xref: /linux/drivers/net/ethernet/microchip/lan743x_main.c (revision 469faa546e7a82be85114e322cec6438790870ff)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /* Copyright (C) 2018 Microchip Technology Inc. */
3 
4 #include <linux/module.h>
5 #include <linux/pci.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/crc32.h>
9 #include <linux/microchipphy.h>
10 #include <linux/net_tstamp.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13 #include <linux/phy.h>
14 #include <linux/phy_fixed.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/iopoll.h>
17 #include <linux/crc16.h>
18 #include <linux/phylink.h>
19 #include "lan743x_main.h"
20 #include "lan743x_ethtool.h"
21 
22 #define MMD_ACCESS_ADDRESS	0
23 #define MMD_ACCESS_WRITE	1
24 #define MMD_ACCESS_READ		2
25 #define MMD_ACCESS_READ_INC	3
26 #define PCS_POWER_STATE_DOWN	0x6
27 #define PCS_POWER_STATE_UP	0x4
28 
29 #define RFE_RD_FIFO_TH_3_DWORDS	0x3
30 
31 static bool pci11x1x_is_a0(struct lan743x_adapter *adapter)
32 {
33 	u32 dev_rev = adapter->csr.id_rev & ID_REV_CHIP_REV_MASK_;
34 	return dev_rev == ID_REV_CHIP_REV_PCI11X1X_A0_;
35 }
36 
37 static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
38 {
39 	u32 fpga_rev;
40 	u32 cfg_load;
41 	u32 hw_cfg;
42 	u32 strap;
43 	int ret;
44 
45 	/* Timeout = 100 (i.e. 1 sec (10 msce * 100)) */
46 	ret = lan743x_hs_syslock_acquire(adapter, 100);
47 	if (ret < 0) {
48 		netif_err(adapter, drv, adapter->netdev,
49 			  "Sys Lock acquire failed ret:%d\n", ret);
50 		return;
51 	}
52 
53 	cfg_load = lan743x_csr_read(adapter, ETH_SYS_CONFIG_LOAD_STARTED_REG);
54 	lan743x_hs_syslock_release(adapter);
55 	hw_cfg = lan743x_csr_read(adapter, HW_CFG);
56 	strap = lan743x_csr_read(adapter, STRAP_READ);
57 	if ((pci11x1x_is_a0(adapter) &&
58 	     (cfg_load & GEN_SYS_LOAD_STARTED_REG_ETH_ ||
59 	      hw_cfg & HW_CFG_RST_PROTECT_)) ||
60 	    (strap & STRAP_READ_USE_SGMII_EN_)) {
61 		if (strap & STRAP_READ_SGMII_EN_)
62 			adapter->is_sgmii_en = true;
63 		else
64 			adapter->is_sgmii_en = false;
65 	} else {
66 		fpga_rev = lan743x_csr_read(adapter, FPGA_REV);
67 		if (fpga_rev) {
68 			if (fpga_rev & FPGA_SGMII_OP)
69 				adapter->is_sgmii_en = true;
70 			else
71 				adapter->is_sgmii_en = false;
72 		} else {
73 			adapter->is_sgmii_en = false;
74 		}
75 	}
76 	netif_dbg(adapter, drv, adapter->netdev,
77 		  "SGMII I/F %sable\n", adapter->is_sgmii_en ? "En" : "Dis");
78 }
79 
80 static bool is_pci11x1x_chip(struct lan743x_adapter *adapter)
81 {
82 	struct lan743x_csr *csr = &adapter->csr;
83 	u32 id_rev = csr->id_rev;
84 
85 	if (((id_rev & 0xFFFF0000) == ID_REV_ID_A011_) ||
86 	    ((id_rev & 0xFFFF0000) == ID_REV_ID_A041_)) {
87 		return true;
88 	}
89 	return false;
90 }
91 
92 static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
93 {
94 	pci_release_selected_regions(adapter->pdev,
95 				     pci_select_bars(adapter->pdev,
96 						     IORESOURCE_MEM));
97 	pci_disable_device(adapter->pdev);
98 }
99 
100 static int lan743x_pci_init(struct lan743x_adapter *adapter,
101 			    struct pci_dev *pdev)
102 {
103 	unsigned long bars = 0;
104 	int ret;
105 
106 	adapter->pdev = pdev;
107 	ret = pci_enable_device_mem(pdev);
108 	if (ret)
109 		goto return_error;
110 
111 	netif_info(adapter, probe, adapter->netdev,
112 		   "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n",
113 		   pdev->vendor, pdev->device);
114 	bars = pci_select_bars(pdev, IORESOURCE_MEM);
115 	if (!test_bit(0, &bars))
116 		goto disable_device;
117 
118 	ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME);
119 	if (ret)
120 		goto disable_device;
121 
122 	pci_set_master(pdev);
123 	return 0;
124 
125 disable_device:
126 	pci_disable_device(adapter->pdev);
127 
128 return_error:
129 	return ret;
130 }
131 
132 u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
133 {
134 	return ioread32(&adapter->csr.csr_address[offset]);
135 }
136 
137 void lan743x_csr_write(struct lan743x_adapter *adapter, int offset,
138 		       u32 data)
139 {
140 	iowrite32(data, &adapter->csr.csr_address[offset]);
141 }
142 
143 #define LAN743X_CSR_READ_OP(offset)	lan743x_csr_read(adapter, offset)
144 
145 static int lan743x_csr_light_reset(struct lan743x_adapter *adapter)
146 {
147 	u32 data;
148 
149 	data = lan743x_csr_read(adapter, HW_CFG);
150 	data |= HW_CFG_LRST_;
151 	lan743x_csr_write(adapter, HW_CFG, data);
152 
153 	return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data,
154 				  !(data & HW_CFG_LRST_), 100000, 10000000);
155 }
156 
157 static int lan743x_csr_wait_for_bit_atomic(struct lan743x_adapter *adapter,
158 					   int offset, u32 bit_mask,
159 					   int target_value, int udelay_min,
160 					   int udelay_max, int count)
161 {
162 	u32 data;
163 
164 	return readx_poll_timeout_atomic(LAN743X_CSR_READ_OP, offset, data,
165 					 target_value == !!(data & bit_mask),
166 					 udelay_max, udelay_min * count);
167 }
168 
169 static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter,
170 				    int offset, u32 bit_mask,
171 				    int target_value, int usleep_min,
172 				    int usleep_max, int count)
173 {
174 	u32 data;
175 
176 	return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data,
177 				  target_value == !!(data & bit_mask),
178 				  usleep_max, usleep_min * count);
179 }
180 
181 static int lan743x_csr_init(struct lan743x_adapter *adapter)
182 {
183 	struct lan743x_csr *csr = &adapter->csr;
184 	resource_size_t bar_start, bar_length;
185 
186 	bar_start = pci_resource_start(adapter->pdev, 0);
187 	bar_length = pci_resource_len(adapter->pdev, 0);
188 	csr->csr_address = devm_ioremap(&adapter->pdev->dev,
189 					bar_start, bar_length);
190 	if (!csr->csr_address)
191 		return -ENOMEM;
192 
193 	csr->id_rev = lan743x_csr_read(adapter, ID_REV);
194 	csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV);
195 	netif_info(adapter, probe, adapter->netdev,
196 		   "ID_REV = 0x%08X, FPGA_REV = %d.%d\n",
197 		   csr->id_rev,	FPGA_REV_GET_MAJOR_(csr->fpga_rev),
198 		   FPGA_REV_GET_MINOR_(csr->fpga_rev));
199 	if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev))
200 		return -ENODEV;
201 
202 	csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
203 	switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) {
204 	case ID_REV_CHIP_REV_A0_:
205 		csr->flags |= LAN743X_CSR_FLAG_IS_A0;
206 		csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
207 		break;
208 	case ID_REV_CHIP_REV_B0_:
209 		csr->flags |= LAN743X_CSR_FLAG_IS_B0;
210 		break;
211 	}
212 
213 	return lan743x_csr_light_reset(adapter);
214 }
215 
216 static void lan743x_intr_software_isr(struct lan743x_adapter *adapter)
217 {
218 	struct lan743x_intr *intr = &adapter->intr;
219 
220 	/* disable the interrupt to prevent repeated re-triggering */
221 	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
222 	intr->software_isr_flag = true;
223 	wake_up(&intr->software_isr_wq);
224 }
225 
226 static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
227 {
228 	struct lan743x_tx *tx = context;
229 	struct lan743x_adapter *adapter = tx->adapter;
230 	bool enable_flag = true;
231 
232 	lan743x_csr_read(adapter, INT_EN_SET);
233 	if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
234 		lan743x_csr_write(adapter, INT_EN_CLR,
235 				  INT_BIT_DMA_TX_(tx->channel_number));
236 	}
237 
238 	if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) {
239 		u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
240 		u32 dmac_int_sts;
241 		u32 dmac_int_en;
242 
243 		if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
244 			dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
245 		else
246 			dmac_int_sts = ioc_bit;
247 		if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
248 			dmac_int_en = lan743x_csr_read(adapter,
249 						       DMAC_INT_EN_SET);
250 		else
251 			dmac_int_en = ioc_bit;
252 
253 		dmac_int_en &= ioc_bit;
254 		dmac_int_sts &= dmac_int_en;
255 		if (dmac_int_sts & ioc_bit) {
256 			napi_schedule(&tx->napi);
257 			enable_flag = false;/* poll func will enable later */
258 		}
259 	}
260 
261 	if (enable_flag)
262 		/* enable isr */
263 		lan743x_csr_write(adapter, INT_EN_SET,
264 				  INT_BIT_DMA_TX_(tx->channel_number));
265 }
266 
267 static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags)
268 {
269 	struct lan743x_rx *rx = context;
270 	struct lan743x_adapter *adapter = rx->adapter;
271 	bool enable_flag = true;
272 
273 	if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
274 		lan743x_csr_write(adapter, INT_EN_CLR,
275 				  INT_BIT_DMA_RX_(rx->channel_number));
276 	}
277 
278 	if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) {
279 		u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number);
280 		u32 dmac_int_sts;
281 		u32 dmac_int_en;
282 
283 		if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
284 			dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
285 		else
286 			dmac_int_sts = rx_frame_bit;
287 		if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
288 			dmac_int_en = lan743x_csr_read(adapter,
289 						       DMAC_INT_EN_SET);
290 		else
291 			dmac_int_en = rx_frame_bit;
292 
293 		dmac_int_en &= rx_frame_bit;
294 		dmac_int_sts &= dmac_int_en;
295 		if (dmac_int_sts & rx_frame_bit) {
296 			napi_schedule(&rx->napi);
297 			enable_flag = false;/* poll funct will enable later */
298 		}
299 	}
300 
301 	if (enable_flag) {
302 		/* enable isr */
303 		lan743x_csr_write(adapter, INT_EN_SET,
304 				  INT_BIT_DMA_RX_(rx->channel_number));
305 	}
306 }
307 
308 static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
309 {
310 	struct lan743x_adapter *adapter = context;
311 	unsigned int channel;
312 
313 	if (int_sts & INT_BIT_ALL_RX_) {
314 		for (channel = 0; channel < LAN743X_USED_RX_CHANNELS;
315 			channel++) {
316 			u32 int_bit = INT_BIT_DMA_RX_(channel);
317 
318 			if (int_sts & int_bit) {
319 				lan743x_rx_isr(&adapter->rx[channel],
320 					       int_bit, flags);
321 				int_sts &= ~int_bit;
322 			}
323 		}
324 	}
325 	if (int_sts & INT_BIT_ALL_TX_) {
326 		for (channel = 0; channel < adapter->used_tx_channels;
327 			channel++) {
328 			u32 int_bit = INT_BIT_DMA_TX_(channel);
329 
330 			if (int_sts & int_bit) {
331 				lan743x_tx_isr(&adapter->tx[channel],
332 					       int_bit, flags);
333 				int_sts &= ~int_bit;
334 			}
335 		}
336 	}
337 	if (int_sts & INT_BIT_ALL_OTHER_) {
338 		if (int_sts & INT_BIT_SW_GP_) {
339 			lan743x_intr_software_isr(adapter);
340 			int_sts &= ~INT_BIT_SW_GP_;
341 		}
342 		if (int_sts & INT_BIT_1588_) {
343 			lan743x_ptp_isr(adapter);
344 			int_sts &= ~INT_BIT_1588_;
345 		}
346 	}
347 	if (int_sts)
348 		lan743x_csr_write(adapter, INT_EN_CLR, int_sts);
349 }
350 
351 static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr)
352 {
353 	struct lan743x_vector *vector = ptr;
354 	struct lan743x_adapter *adapter = vector->adapter;
355 	irqreturn_t result = IRQ_NONE;
356 	u32 int_enables;
357 	u32 int_sts;
358 
359 	if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) {
360 		int_sts = lan743x_csr_read(adapter, INT_STS);
361 	} else if (vector->flags &
362 		   (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C |
363 		   LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) {
364 		int_sts = lan743x_csr_read(adapter, INT_STS_R2C);
365 	} else {
366 		/* use mask as implied status */
367 		int_sts = vector->int_mask | INT_BIT_MAS_;
368 	}
369 
370 	if (!(int_sts & INT_BIT_MAS_))
371 		goto irq_done;
372 
373 	if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR)
374 		/* disable vector interrupt */
375 		lan743x_csr_write(adapter,
376 				  INT_VEC_EN_CLR,
377 				  INT_VEC_EN_(vector->vector_index));
378 
379 	if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR)
380 		/* disable master interrupt */
381 		lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
382 
383 	if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) {
384 		int_enables = lan743x_csr_read(adapter, INT_EN_SET);
385 	} else {
386 		/*  use vector mask as implied enable mask */
387 		int_enables = vector->int_mask;
388 	}
389 
390 	int_sts &= int_enables;
391 	int_sts &= vector->int_mask;
392 	if (int_sts) {
393 		if (vector->handler) {
394 			vector->handler(vector->context,
395 					int_sts, vector->flags);
396 		} else {
397 			/* disable interrupts on this vector */
398 			lan743x_csr_write(adapter, INT_EN_CLR,
399 					  vector->int_mask);
400 		}
401 		result = IRQ_HANDLED;
402 	}
403 
404 	if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET)
405 		/* enable master interrupt */
406 		lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
407 
408 	if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET)
409 		/* enable vector interrupt */
410 		lan743x_csr_write(adapter,
411 				  INT_VEC_EN_SET,
412 				  INT_VEC_EN_(vector->vector_index));
413 irq_done:
414 	return result;
415 }
416 
417 static int lan743x_intr_test_isr(struct lan743x_adapter *adapter)
418 {
419 	struct lan743x_intr *intr = &adapter->intr;
420 	int ret;
421 
422 	intr->software_isr_flag = false;
423 
424 	/* enable and activate test interrupt */
425 	lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_);
426 	lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_);
427 
428 	ret = wait_event_timeout(intr->software_isr_wq,
429 				 intr->software_isr_flag,
430 				 msecs_to_jiffies(200));
431 
432 	/* disable test interrupt */
433 	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
434 
435 	return ret > 0 ? 0 : -ENODEV;
436 }
437 
438 static int lan743x_intr_register_isr(struct lan743x_adapter *adapter,
439 				     int vector_index, u32 flags,
440 				     u32 int_mask,
441 				     lan743x_vector_handler handler,
442 				     void *context)
443 {
444 	struct lan743x_vector *vector = &adapter->intr.vector_list
445 					[vector_index];
446 	int ret;
447 
448 	vector->adapter = adapter;
449 	vector->flags = flags;
450 	vector->vector_index = vector_index;
451 	vector->int_mask = int_mask;
452 	vector->handler = handler;
453 	vector->context = context;
454 
455 	ret = request_irq(vector->irq,
456 			  lan743x_intr_entry_isr,
457 			  (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ?
458 			  IRQF_SHARED : 0, DRIVER_NAME, vector);
459 	if (ret) {
460 		vector->handler = NULL;
461 		vector->context = NULL;
462 		vector->int_mask = 0;
463 		vector->flags = 0;
464 	}
465 	return ret;
466 }
467 
468 static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter,
469 					int vector_index)
470 {
471 	struct lan743x_vector *vector = &adapter->intr.vector_list
472 					[vector_index];
473 
474 	free_irq(vector->irq, vector);
475 	vector->handler = NULL;
476 	vector->context = NULL;
477 	vector->int_mask = 0;
478 	vector->flags = 0;
479 }
480 
481 static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter,
482 					 u32 int_mask)
483 {
484 	int index;
485 
486 	for (index = 0; index < adapter->max_vector_count; index++) {
487 		if (adapter->intr.vector_list[index].int_mask & int_mask)
488 			return adapter->intr.vector_list[index].flags;
489 	}
490 	return 0;
491 }
492 
493 static void lan743x_intr_close(struct lan743x_adapter *adapter)
494 {
495 	struct lan743x_intr *intr = &adapter->intr;
496 	int index = 0;
497 
498 	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
499 	if (adapter->is_pci11x1x)
500 		lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x0000FFFF);
501 	else
502 		lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
503 
504 	for (index = 0; index < intr->number_of_vectors; index++) {
505 		if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) {
506 			lan743x_intr_unregister_isr(adapter, index);
507 			intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index);
508 		}
509 	}
510 
511 	if (intr->flags & INTR_FLAG_MSI_ENABLED) {
512 		pci_disable_msi(adapter->pdev);
513 		intr->flags &= ~INTR_FLAG_MSI_ENABLED;
514 	}
515 
516 	if (intr->flags & INTR_FLAG_MSIX_ENABLED) {
517 		pci_disable_msix(adapter->pdev);
518 		intr->flags &= ~INTR_FLAG_MSIX_ENABLED;
519 	}
520 }
521 
522 static int lan743x_intr_open(struct lan743x_adapter *adapter)
523 {
524 	struct msix_entry msix_entries[PCI11X1X_MAX_VECTOR_COUNT];
525 	struct lan743x_intr *intr = &adapter->intr;
526 	unsigned int used_tx_channels;
527 	u32 int_vec_en_auto_clr = 0;
528 	u8 max_vector_count;
529 	u32 int_vec_map0 = 0;
530 	u32 int_vec_map1 = 0;
531 	int ret = -ENODEV;
532 	int index = 0;
533 	u32 flags = 0;
534 
535 	intr->number_of_vectors = 0;
536 
537 	/* Try to set up MSIX interrupts */
538 	max_vector_count = adapter->max_vector_count;
539 	memset(&msix_entries[0], 0,
540 	       sizeof(struct msix_entry) * max_vector_count);
541 	for (index = 0; index < max_vector_count; index++)
542 		msix_entries[index].entry = index;
543 	used_tx_channels = adapter->used_tx_channels;
544 	ret = pci_enable_msix_range(adapter->pdev,
545 				    msix_entries, 1,
546 				    1 + used_tx_channels +
547 				    LAN743X_USED_RX_CHANNELS);
548 
549 	if (ret > 0) {
550 		intr->flags |= INTR_FLAG_MSIX_ENABLED;
551 		intr->number_of_vectors = ret;
552 		intr->using_vectors = true;
553 		for (index = 0; index < intr->number_of_vectors; index++)
554 			intr->vector_list[index].irq = msix_entries
555 						       [index].vector;
556 		netif_info(adapter, ifup, adapter->netdev,
557 			   "using MSIX interrupts, number of vectors = %d\n",
558 			   intr->number_of_vectors);
559 	}
560 
561 	/* If MSIX failed try to setup using MSI interrupts */
562 	if (!intr->number_of_vectors) {
563 		if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
564 			if (!pci_enable_msi(adapter->pdev)) {
565 				intr->flags |= INTR_FLAG_MSI_ENABLED;
566 				intr->number_of_vectors = 1;
567 				intr->using_vectors = true;
568 				intr->vector_list[0].irq =
569 					adapter->pdev->irq;
570 				netif_info(adapter, ifup, adapter->netdev,
571 					   "using MSI interrupts, number of vectors = %d\n",
572 					   intr->number_of_vectors);
573 			}
574 		}
575 	}
576 
577 	/* If MSIX, and MSI failed, setup using legacy interrupt */
578 	if (!intr->number_of_vectors) {
579 		intr->number_of_vectors = 1;
580 		intr->using_vectors = false;
581 		intr->vector_list[0].irq = intr->irq;
582 		netif_info(adapter, ifup, adapter->netdev,
583 			   "using legacy interrupts\n");
584 	}
585 
586 	/* At this point we must have at least one irq */
587 	lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF);
588 
589 	/* map all interrupts to vector 0 */
590 	lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000);
591 	lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000);
592 	lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000);
593 	flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
594 		LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
595 		LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
596 		LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
597 
598 	if (intr->using_vectors) {
599 		flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
600 			 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
601 	} else {
602 		flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR |
603 			 LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET |
604 			 LAN743X_VECTOR_FLAG_IRQ_SHARED;
605 	}
606 
607 	if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
608 		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ;
609 		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C;
610 		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
611 		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK;
612 		flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C;
613 		flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C;
614 	}
615 
616 	init_waitqueue_head(&intr->software_isr_wq);
617 
618 	ret = lan743x_intr_register_isr(adapter, 0, flags,
619 					INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ |
620 					INT_BIT_ALL_OTHER_,
621 					lan743x_intr_shared_isr, adapter);
622 	if (ret)
623 		goto clean_up;
624 	intr->flags |= INTR_FLAG_IRQ_REQUESTED(0);
625 
626 	if (intr->using_vectors)
627 		lan743x_csr_write(adapter, INT_VEC_EN_SET,
628 				  INT_VEC_EN_(0));
629 
630 	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
631 		lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD);
632 		lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD);
633 		lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD);
634 		lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD);
635 		lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD);
636 		lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD);
637 		lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD);
638 		lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD);
639 		if (adapter->is_pci11x1x) {
640 			lan743x_csr_write(adapter, INT_MOD_CFG8, LAN743X_INT_MOD);
641 			lan743x_csr_write(adapter, INT_MOD_CFG9, LAN743X_INT_MOD);
642 			lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00007654);
643 			lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00003210);
644 		} else {
645 			lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
646 			lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
647 		}
648 		lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF);
649 	}
650 
651 	/* enable interrupts */
652 	lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
653 	ret = lan743x_intr_test_isr(adapter);
654 	if (ret)
655 		goto clean_up;
656 
657 	if (intr->number_of_vectors > 1) {
658 		int number_of_tx_vectors = intr->number_of_vectors - 1;
659 
660 		if (number_of_tx_vectors > used_tx_channels)
661 			number_of_tx_vectors = used_tx_channels;
662 		flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
663 			LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
664 			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
665 			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
666 			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
667 			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
668 
669 		if (adapter->csr.flags &
670 		   LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
671 			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
672 				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
673 				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
674 				LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
675 		}
676 
677 		for (index = 0; index < number_of_tx_vectors; index++) {
678 			u32 int_bit = INT_BIT_DMA_TX_(index);
679 			int vector = index + 1;
680 
681 			/* map TX interrupt to vector */
682 			int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
683 			lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
684 
685 			/* Remove TX interrupt from shared mask */
686 			intr->vector_list[0].int_mask &= ~int_bit;
687 			ret = lan743x_intr_register_isr(adapter, vector, flags,
688 							int_bit, lan743x_tx_isr,
689 							&adapter->tx[index]);
690 			if (ret)
691 				goto clean_up;
692 			intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
693 			if (!(flags &
694 			    LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET))
695 				lan743x_csr_write(adapter, INT_VEC_EN_SET,
696 						  INT_VEC_EN_(vector));
697 		}
698 	}
699 	if ((intr->number_of_vectors - used_tx_channels) > 1) {
700 		int number_of_rx_vectors = intr->number_of_vectors -
701 						used_tx_channels - 1;
702 
703 		if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
704 			number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
705 
706 		flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
707 			LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
708 			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
709 			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
710 			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
711 			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
712 
713 		if (adapter->csr.flags &
714 		    LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
715 			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
716 				LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
717 				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
718 				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
719 				LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
720 		}
721 		for (index = 0; index < number_of_rx_vectors; index++) {
722 			int vector = index + 1 + used_tx_channels;
723 			u32 int_bit = INT_BIT_DMA_RX_(index);
724 
725 			/* map RX interrupt to vector */
726 			int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector);
727 			lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0);
728 			if (flags &
729 			    LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
730 				int_vec_en_auto_clr |= INT_VEC_EN_(vector);
731 				lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
732 						  int_vec_en_auto_clr);
733 			}
734 
735 			/* Remove RX interrupt from shared mask */
736 			intr->vector_list[0].int_mask &= ~int_bit;
737 			ret = lan743x_intr_register_isr(adapter, vector, flags,
738 							int_bit, lan743x_rx_isr,
739 							&adapter->rx[index]);
740 			if (ret)
741 				goto clean_up;
742 			intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
743 
744 			lan743x_csr_write(adapter, INT_VEC_EN_SET,
745 					  INT_VEC_EN_(vector));
746 		}
747 	}
748 	return 0;
749 
750 clean_up:
751 	lan743x_intr_close(adapter);
752 	return ret;
753 }
754 
755 static int lan743x_dp_write(struct lan743x_adapter *adapter,
756 			    u32 select, u32 addr, u32 length, u32 *buf)
757 {
758 	u32 dp_sel;
759 	int i;
760 
761 	if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL, DP_SEL_DPRDY_,
762 					    1, 40, 100, 100))
763 		return -EIO;
764 	dp_sel = lan743x_csr_read(adapter, DP_SEL);
765 	dp_sel &= ~DP_SEL_MASK_;
766 	dp_sel |= select;
767 	lan743x_csr_write(adapter, DP_SEL, dp_sel);
768 
769 	for (i = 0; i < length; i++) {
770 		lan743x_csr_write(adapter, DP_ADDR, addr + i);
771 		lan743x_csr_write(adapter, DP_DATA_0, buf[i]);
772 		lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
773 		if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL,
774 						    DP_SEL_DPRDY_,
775 						    1, 40, 100, 100))
776 			return -EIO;
777 	}
778 
779 	return 0;
780 }
781 
782 static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
783 {
784 	u32 ret;
785 
786 	ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
787 		MAC_MII_ACC_PHY_ADDR_MASK_;
788 	ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) &
789 		MAC_MII_ACC_MIIRINDA_MASK_;
790 
791 	if (read)
792 		ret |= MAC_MII_ACC_MII_READ_;
793 	else
794 		ret |= MAC_MII_ACC_MII_WRITE_;
795 	ret |= MAC_MII_ACC_MII_BUSY_;
796 
797 	return ret;
798 }
799 
800 static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter)
801 {
802 	u32 data;
803 
804 	return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data,
805 				  !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000);
806 }
807 
808 static int lan743x_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int index)
809 {
810 	struct lan743x_adapter *adapter = bus->priv;
811 	u32 val, mii_access;
812 	int ret;
813 
814 	/* confirm MII not busy */
815 	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
816 	if (ret < 0)
817 		return ret;
818 
819 	/* set the address, index & direction (read from PHY) */
820 	mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ);
821 	lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
822 	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
823 	if (ret < 0)
824 		return ret;
825 
826 	val = lan743x_csr_read(adapter, MAC_MII_DATA);
827 	return (int)(val & 0xFFFF);
828 }
829 
830 static int lan743x_mdiobus_write_c22(struct mii_bus *bus,
831 				     int phy_id, int index, u16 regval)
832 {
833 	struct lan743x_adapter *adapter = bus->priv;
834 	u32 val, mii_access;
835 	int ret;
836 
837 	/* confirm MII not busy */
838 	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
839 	if (ret < 0)
840 		return ret;
841 	val = (u32)regval;
842 	lan743x_csr_write(adapter, MAC_MII_DATA, val);
843 
844 	/* set the address, index & direction (write to PHY) */
845 	mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE);
846 	lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
847 	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
848 	return ret;
849 }
850 
851 static u32 lan743x_mac_mmd_access(int id, int dev_addr, int op)
852 {
853 	u32 ret;
854 
855 	ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
856 		MAC_MII_ACC_PHY_ADDR_MASK_;
857 	ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) &
858 		MAC_MII_ACC_MIIMMD_MASK_;
859 	if (op == MMD_ACCESS_WRITE)
860 		ret |= MAC_MII_ACC_MIICMD_WRITE_;
861 	else if (op == MMD_ACCESS_READ)
862 		ret |= MAC_MII_ACC_MIICMD_READ_;
863 	else if (op == MMD_ACCESS_READ_INC)
864 		ret |= MAC_MII_ACC_MIICMD_READ_INC_;
865 	else
866 		ret |= MAC_MII_ACC_MIICMD_ADDR_;
867 	ret |= (MAC_MII_ACC_MII_BUSY_ | MAC_MII_ACC_MIICL45_);
868 
869 	return ret;
870 }
871 
872 static int lan743x_mdiobus_read_c45(struct mii_bus *bus, int phy_id,
873 				    int dev_addr, int index)
874 {
875 	struct lan743x_adapter *adapter = bus->priv;
876 	u32 mmd_access;
877 	int ret;
878 
879 	/* confirm MII not busy */
880 	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
881 	if (ret < 0)
882 		return ret;
883 
884 	/* Load Register Address */
885 	lan743x_csr_write(adapter, MAC_MII_DATA, index);
886 	mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
887 					    MMD_ACCESS_ADDRESS);
888 	lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
889 	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
890 	if (ret < 0)
891 		return ret;
892 
893 	/* Read Data */
894 	mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
895 					    MMD_ACCESS_READ);
896 	lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
897 	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
898 	if (ret < 0)
899 		return ret;
900 
901 	ret = lan743x_csr_read(adapter, MAC_MII_DATA);
902 	return (int)(ret & 0xFFFF);
903 }
904 
905 static int lan743x_mdiobus_write_c45(struct mii_bus *bus, int phy_id,
906 				     int dev_addr, int index, u16 regval)
907 {
908 	struct lan743x_adapter *adapter = bus->priv;
909 	u32 mmd_access;
910 	int ret;
911 
912 	/* confirm MII not busy */
913 	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
914 	if (ret < 0)
915 		return ret;
916 
917 	/* Load Register Address */
918 	lan743x_csr_write(adapter, MAC_MII_DATA, (u32)index);
919 	mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
920 					    MMD_ACCESS_ADDRESS);
921 	lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
922 	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
923 	if (ret < 0)
924 		return ret;
925 
926 	/* Write Data */
927 	lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval);
928 	mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
929 					    MMD_ACCESS_WRITE);
930 	lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
931 
932 	return lan743x_mac_mii_wait_till_not_busy(adapter);
933 }
934 
935 static int lan743x_sgmii_wait_till_not_busy(struct lan743x_adapter *adapter)
936 {
937 	u32 data;
938 	int ret;
939 
940 	ret = readx_poll_timeout(LAN743X_CSR_READ_OP, SGMII_ACC, data,
941 				 !(data & SGMII_ACC_SGMII_BZY_), 100, 1000000);
942 	if (ret < 0)
943 		netif_err(adapter, drv, adapter->netdev,
944 			  "%s: error %d sgmii wait timeout\n", __func__, ret);
945 
946 	return ret;
947 }
948 
949 int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr)
950 {
951 	u32 mmd_access;
952 	int ret;
953 	u32 val;
954 
955 	if (mmd > 31) {
956 		netif_err(adapter, probe, adapter->netdev,
957 			  "%s mmd should <= 31\n", __func__);
958 		return -EINVAL;
959 	}
960 
961 	mutex_lock(&adapter->sgmii_rw_lock);
962 	/* Load Register Address */
963 	mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_;
964 	mmd_access |= (addr | SGMII_ACC_SGMII_BZY_);
965 	lan743x_csr_write(adapter, SGMII_ACC, mmd_access);
966 	ret = lan743x_sgmii_wait_till_not_busy(adapter);
967 	if (ret < 0)
968 		goto sgmii_unlock;
969 
970 	val = lan743x_csr_read(adapter, SGMII_DATA);
971 	ret = (int)(val & SGMII_DATA_MASK_);
972 
973 sgmii_unlock:
974 	mutex_unlock(&adapter->sgmii_rw_lock);
975 
976 	return ret;
977 }
978 
979 static int lan743x_sgmii_write(struct lan743x_adapter *adapter,
980 			       u8 mmd, u16 addr, u16 val)
981 {
982 	u32 mmd_access;
983 	int ret;
984 
985 	if (mmd > 31) {
986 		netif_err(adapter, probe, adapter->netdev,
987 			  "%s mmd should <= 31\n", __func__);
988 		return -EINVAL;
989 	}
990 	mutex_lock(&adapter->sgmii_rw_lock);
991 	/* Load Register Data */
992 	lan743x_csr_write(adapter, SGMII_DATA, (u32)(val & SGMII_DATA_MASK_));
993 	/* Load Register Address */
994 	mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_;
995 	mmd_access |= (addr | SGMII_ACC_SGMII_BZY_ | SGMII_ACC_SGMII_WR_);
996 	lan743x_csr_write(adapter, SGMII_ACC, mmd_access);
997 	ret = lan743x_sgmii_wait_till_not_busy(adapter);
998 	mutex_unlock(&adapter->sgmii_rw_lock);
999 
1000 	return ret;
1001 }
1002 
1003 static int lan743x_get_lsd(int speed, int duplex, u8 mss)
1004 {
1005 	int lsd;
1006 
1007 	switch (speed) {
1008 	case SPEED_2500:
1009 		if (mss == MASTER_SLAVE_STATE_SLAVE)
1010 			lsd = LINK_2500_SLAVE;
1011 		else
1012 			lsd = LINK_2500_MASTER;
1013 		break;
1014 	case SPEED_1000:
1015 		if (mss == MASTER_SLAVE_STATE_SLAVE)
1016 			lsd = LINK_1000_SLAVE;
1017 		else
1018 			lsd = LINK_1000_MASTER;
1019 		break;
1020 	case SPEED_100:
1021 		if (duplex == DUPLEX_FULL)
1022 			lsd = LINK_100FD;
1023 		else
1024 			lsd = LINK_100HD;
1025 		break;
1026 	case SPEED_10:
1027 		if (duplex == DUPLEX_FULL)
1028 			lsd = LINK_10FD;
1029 		else
1030 			lsd = LINK_10HD;
1031 		break;
1032 	default:
1033 		lsd = -EINVAL;
1034 	}
1035 
1036 	return lsd;
1037 }
1038 
1039 static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter,
1040 				  u16 baud)
1041 {
1042 	int mpllctrl0;
1043 	int mpllctrl1;
1044 	int miscctrl1;
1045 	int ret;
1046 
1047 	mpllctrl0 = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
1048 				       VR_MII_GEN2_4_MPLL_CTRL0);
1049 	if (mpllctrl0 < 0)
1050 		return mpllctrl0;
1051 
1052 	mpllctrl0 &= ~VR_MII_MPLL_CTRL0_USE_REFCLK_PAD_;
1053 	if (baud == VR_MII_BAUD_RATE_1P25GBPS) {
1054 		mpllctrl1 = VR_MII_MPLL_MULTIPLIER_100;
1055 		/* mpll_baud_clk/4 */
1056 		miscctrl1 = 0xA;
1057 	} else {
1058 		mpllctrl1 = VR_MII_MPLL_MULTIPLIER_125;
1059 		/* mpll_baud_clk/2 */
1060 		miscctrl1 = 0x5;
1061 	}
1062 
1063 	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1064 				  VR_MII_GEN2_4_MPLL_CTRL0, mpllctrl0);
1065 	if (ret < 0)
1066 		return ret;
1067 
1068 	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1069 				  VR_MII_GEN2_4_MPLL_CTRL1, mpllctrl1);
1070 	if (ret < 0)
1071 		return ret;
1072 
1073 	return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1074 				  VR_MII_GEN2_4_MISC_CTRL1, miscctrl1);
1075 }
1076 
1077 static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter,
1078 				       bool enable)
1079 {
1080 	if (enable)
1081 		return lan743x_sgmii_mpll_set(adapter,
1082 					      VR_MII_BAUD_RATE_3P125GBPS);
1083 	else
1084 		return lan743x_sgmii_mpll_set(adapter,
1085 					      VR_MII_BAUD_RATE_1P25GBPS);
1086 }
1087 
1088 static int lan743x_serdes_clock_and_aneg_update(struct lan743x_adapter *adapter)
1089 {
1090 	enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd;
1091 	int mii_ctrl;
1092 	int dgt_ctrl;
1093 	int an_ctrl;
1094 	int ret;
1095 
1096 	if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE)
1097 		/* Switch to 2.5 Gbps */
1098 		ret = lan743x_sgmii_2_5G_mode_set(adapter, true);
1099 	else
1100 		/* Switch to 10/100/1000 Mbps clock */
1101 		ret = lan743x_sgmii_2_5G_mode_set(adapter, false);
1102 	if (ret < 0)
1103 		return ret;
1104 
1105 	/* Enable SGMII Auto NEG */
1106 	mii_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
1107 	if (mii_ctrl < 0)
1108 		return mii_ctrl;
1109 
1110 	an_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, VR_MII_AN_CTRL);
1111 	if (an_ctrl < 0)
1112 		return an_ctrl;
1113 
1114 	dgt_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
1115 				      VR_MII_DIG_CTRL1);
1116 	if (dgt_ctrl < 0)
1117 		return dgt_ctrl;
1118 
1119 	if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE) {
1120 		mii_ctrl &= ~(BMCR_ANENABLE | BMCR_ANRESTART | BMCR_SPEED100);
1121 		mii_ctrl |= BMCR_SPEED1000;
1122 		dgt_ctrl |= VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_;
1123 		dgt_ctrl &= ~VR_MII_DIG_CTRL1_MAC_AUTO_SW_;
1124 		/* In order for Auto-Negotiation to operate properly at
1125 		 * 2.5 Gbps the 1.6ms link timer values must be adjusted
1126 		 * The VR_MII_LINK_TIMER_CTRL Register must be set to
1127 		 * 16'h7A1 and The CL37_TMR_OVR_RIDE bit of the
1128 		 * VR_MII_DIG_CTRL1 Register set to 1
1129 		 */
1130 		ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1131 					  VR_MII_LINK_TIMER_CTRL, 0x7A1);
1132 		if (ret < 0)
1133 			return ret;
1134 	} else {
1135 		mii_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART);
1136 		an_ctrl &= ~VR_MII_AN_CTRL_SGMII_LINK_STS_;
1137 		dgt_ctrl &= ~VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_;
1138 		dgt_ctrl |= VR_MII_DIG_CTRL1_MAC_AUTO_SW_;
1139 	}
1140 
1141 	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR,
1142 				  mii_ctrl);
1143 	if (ret < 0)
1144 		return ret;
1145 
1146 	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1147 				  VR_MII_DIG_CTRL1, dgt_ctrl);
1148 	if (ret < 0)
1149 		return ret;
1150 
1151 	return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1152 				  VR_MII_AN_CTRL, an_ctrl);
1153 }
1154 
1155 static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state)
1156 {
1157 	u8 wait_cnt = 0;
1158 	u32 dig_sts;
1159 
1160 	do {
1161 		dig_sts = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
1162 					     VR_MII_DIG_STS);
1163 		if (((dig_sts & VR_MII_DIG_STS_PSEQ_STATE_MASK_) >>
1164 		      VR_MII_DIG_STS_PSEQ_STATE_POS_) == state)
1165 			break;
1166 		usleep_range(1000, 2000);
1167 	} while (wait_cnt++ < 10);
1168 
1169 	if (wait_cnt >= 10)
1170 		return -ETIMEDOUT;
1171 
1172 	return 0;
1173 }
1174 
1175 static int lan743x_pcs_power_reset(struct lan743x_adapter *adapter)
1176 {
1177 	int mii_ctl;
1178 	int ret;
1179 
1180 	/* SGMII/1000/2500BASE-X PCS power down */
1181 	mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
1182 	if (mii_ctl < 0)
1183 		return mii_ctl;
1184 
1185 	mii_ctl |= BMCR_PDOWN;
1186 	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
1187 	if (ret < 0)
1188 		return ret;
1189 
1190 	ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_DOWN);
1191 	if (ret < 0)
1192 		return ret;
1193 
1194 	/* SGMII/1000/2500BASE-X PCS power up */
1195 	mii_ctl &= ~BMCR_PDOWN;
1196 	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
1197 	if (ret < 0)
1198 		return ret;
1199 
1200 	return lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
1201 }
1202 
1203 static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
1204 				    u8 *addr)
1205 {
1206 	u32 addr_lo, addr_hi;
1207 
1208 	addr_lo = addr[0] |
1209 		addr[1] << 8 |
1210 		addr[2] << 16 |
1211 		addr[3] << 24;
1212 	addr_hi = addr[4] |
1213 		addr[5] << 8;
1214 	lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo);
1215 	lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi);
1216 
1217 	ether_addr_copy(adapter->mac_address, addr);
1218 	netif_info(adapter, drv, adapter->netdev,
1219 		   "MAC address set to %pM\n", addr);
1220 }
1221 
1222 static int lan743x_mac_init(struct lan743x_adapter *adapter)
1223 {
1224 	bool mac_address_valid = true;
1225 	struct net_device *netdev;
1226 	u32 mac_addr_hi = 0;
1227 	u32 mac_addr_lo = 0;
1228 	u32 data;
1229 
1230 	netdev = adapter->netdev;
1231 
1232 	/* disable auto duplex, and speed detection. Phylib does that */
1233 	data = lan743x_csr_read(adapter, MAC_CR);
1234 	data &= ~(MAC_CR_ADD_ | MAC_CR_ASD_);
1235 	data |= MAC_CR_CNTR_RST_;
1236 	lan743x_csr_write(adapter, MAC_CR, data);
1237 
1238 	if (!is_valid_ether_addr(adapter->mac_address)) {
1239 		mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH);
1240 		mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL);
1241 		adapter->mac_address[0] = mac_addr_lo & 0xFF;
1242 		adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF;
1243 		adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF;
1244 		adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF;
1245 		adapter->mac_address[4] = mac_addr_hi & 0xFF;
1246 		adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF;
1247 
1248 		if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) &&
1249 		    mac_addr_lo == 0xFFFFFFFF) {
1250 			mac_address_valid = false;
1251 		} else if (!is_valid_ether_addr(adapter->mac_address)) {
1252 			mac_address_valid = false;
1253 		}
1254 
1255 		if (!mac_address_valid)
1256 			eth_random_addr(adapter->mac_address);
1257 	}
1258 	lan743x_mac_set_address(adapter, adapter->mac_address);
1259 	eth_hw_addr_set(netdev, adapter->mac_address);
1260 
1261 	return 0;
1262 }
1263 
1264 static int lan743x_mac_open(struct lan743x_adapter *adapter)
1265 {
1266 	u32 temp;
1267 
1268 	temp = lan743x_csr_read(adapter, MAC_RX);
1269 	lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_);
1270 	temp = lan743x_csr_read(adapter, MAC_TX);
1271 	lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_);
1272 	return 0;
1273 }
1274 
1275 static void lan743x_mac_close(struct lan743x_adapter *adapter)
1276 {
1277 	u32 temp;
1278 
1279 	temp = lan743x_csr_read(adapter, MAC_TX);
1280 	temp &= ~MAC_TX_TXEN_;
1281 	lan743x_csr_write(adapter, MAC_TX, temp);
1282 	lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_,
1283 				 1, 1000, 20000, 100);
1284 
1285 	temp = lan743x_csr_read(adapter, MAC_RX);
1286 	temp &= ~MAC_RX_RXEN_;
1287 	lan743x_csr_write(adapter, MAC_RX, temp);
1288 	lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
1289 				 1, 1000, 20000, 100);
1290 }
1291 
1292 void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
1293 				       bool tx_enable, bool rx_enable)
1294 {
1295 	u32 flow_setting = 0;
1296 
1297 	/* set maximum pause time because when fifo space frees
1298 	 * up a zero value pause frame will be sent to release the pause
1299 	 */
1300 	flow_setting = MAC_FLOW_CR_FCPT_MASK_;
1301 	if (tx_enable)
1302 		flow_setting |= MAC_FLOW_CR_TX_FCEN_;
1303 	if (rx_enable)
1304 		flow_setting |= MAC_FLOW_CR_RX_FCEN_;
1305 	lan743x_csr_write(adapter, MAC_FLOW, flow_setting);
1306 }
1307 
1308 static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
1309 {
1310 	int enabled = 0;
1311 	u32 mac_rx = 0;
1312 
1313 	mac_rx = lan743x_csr_read(adapter, MAC_RX);
1314 	if (mac_rx & MAC_RX_RXEN_) {
1315 		enabled = 1;
1316 		if (mac_rx & MAC_RX_RXD_) {
1317 			lan743x_csr_write(adapter, MAC_RX, mac_rx);
1318 			mac_rx &= ~MAC_RX_RXD_;
1319 		}
1320 		mac_rx &= ~MAC_RX_RXEN_;
1321 		lan743x_csr_write(adapter, MAC_RX, mac_rx);
1322 		lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
1323 					 1, 1000, 20000, 100);
1324 		lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_);
1325 	}
1326 
1327 	mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
1328 	mac_rx |= (((new_mtu + ETH_HLEN + ETH_FCS_LEN)
1329 		  << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1330 	lan743x_csr_write(adapter, MAC_RX, mac_rx);
1331 
1332 	if (enabled) {
1333 		mac_rx |= MAC_RX_RXEN_;
1334 		lan743x_csr_write(adapter, MAC_RX, mac_rx);
1335 	}
1336 	return 0;
1337 }
1338 
1339 /* PHY */
1340 static int lan743x_hw_reset_phy(struct lan743x_adapter *adapter)
1341 {
1342 	u32 data;
1343 
1344 	/* Only called with in probe, and before mdiobus_register */
1345 
1346 	data = lan743x_csr_read(adapter, PMT_CTL);
1347 	data |= PMT_CTL_ETH_PHY_RST_;
1348 	lan743x_csr_write(adapter, PMT_CTL, data);
1349 
1350 	return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data,
1351 				  (!(data & PMT_CTL_ETH_PHY_RST_) &&
1352 				  (data & PMT_CTL_READY_)),
1353 				  50000, 1000000);
1354 }
1355 
1356 static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
1357 {
1358 	u32 id_rev;
1359 	u32 data;
1360 
1361 	data = lan743x_csr_read(adapter, MAC_CR);
1362 	id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_;
1363 
1364 	if (adapter->is_pci11x1x && adapter->is_sgmii_en)
1365 		adapter->phy_interface = PHY_INTERFACE_MODE_SGMII;
1366 	else if (id_rev == ID_REV_ID_LAN7430_)
1367 		adapter->phy_interface = PHY_INTERFACE_MODE_GMII;
1368 	else if ((id_rev == ID_REV_ID_LAN7431_) && (data & MAC_CR_MII_EN_))
1369 		adapter->phy_interface = PHY_INTERFACE_MODE_MII;
1370 	else
1371 		adapter->phy_interface = PHY_INTERFACE_MODE_RGMII;
1372 
1373 	netif_dbg(adapter, drv, adapter->netdev,
1374 		  "selected phy interface: 0x%X\n", adapter->phy_interface);
1375 }
1376 
1377 static void lan743x_rfe_open(struct lan743x_adapter *adapter)
1378 {
1379 	lan743x_csr_write(adapter, RFE_RSS_CFG,
1380 		RFE_RSS_CFG_UDP_IPV6_EX_ |
1381 		RFE_RSS_CFG_TCP_IPV6_EX_ |
1382 		RFE_RSS_CFG_IPV6_EX_ |
1383 		RFE_RSS_CFG_UDP_IPV6_ |
1384 		RFE_RSS_CFG_TCP_IPV6_ |
1385 		RFE_RSS_CFG_IPV6_ |
1386 		RFE_RSS_CFG_UDP_IPV4_ |
1387 		RFE_RSS_CFG_TCP_IPV4_ |
1388 		RFE_RSS_CFG_IPV4_ |
1389 		RFE_RSS_CFG_VALID_HASH_BITS_ |
1390 		RFE_RSS_CFG_RSS_QUEUE_ENABLE_ |
1391 		RFE_RSS_CFG_RSS_HASH_STORE_ |
1392 		RFE_RSS_CFG_RSS_ENABLE_);
1393 }
1394 
1395 static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter)
1396 {
1397 	u8 *mac_addr;
1398 	u32 mac_addr_hi = 0;
1399 	u32 mac_addr_lo = 0;
1400 
1401 	/* Add mac address to perfect Filter */
1402 	mac_addr = adapter->mac_address;
1403 	mac_addr_lo = ((((u32)(mac_addr[0])) << 0) |
1404 		      (((u32)(mac_addr[1])) << 8) |
1405 		      (((u32)(mac_addr[2])) << 16) |
1406 		      (((u32)(mac_addr[3])) << 24));
1407 	mac_addr_hi = ((((u32)(mac_addr[4])) << 0) |
1408 		      (((u32)(mac_addr[5])) << 8));
1409 
1410 	lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo);
1411 	lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0),
1412 			  mac_addr_hi | RFE_ADDR_FILT_HI_VALID_);
1413 }
1414 
1415 static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter)
1416 {
1417 	struct net_device *netdev = adapter->netdev;
1418 	u32 hash_table[DP_SEL_VHF_HASH_LEN];
1419 	u32 rfctl;
1420 	u32 data;
1421 
1422 	rfctl = lan743x_csr_read(adapter, RFE_CTL);
1423 	rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ |
1424 		 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1425 	rfctl |= RFE_CTL_AB_;
1426 	if (netdev->flags & IFF_PROMISC) {
1427 		rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_;
1428 	} else {
1429 		if (netdev->flags & IFF_ALLMULTI)
1430 			rfctl |= RFE_CTL_AM_;
1431 	}
1432 
1433 	if (netdev->features & NETIF_F_RXCSUM)
1434 		rfctl |= RFE_CTL_IP_COE_ | RFE_CTL_TCP_UDP_COE_;
1435 
1436 	memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32));
1437 	if (netdev_mc_count(netdev)) {
1438 		struct netdev_hw_addr *ha;
1439 		int i;
1440 
1441 		rfctl |= RFE_CTL_DA_PERFECT_;
1442 		i = 1;
1443 		netdev_for_each_mc_addr(ha, netdev) {
1444 			/* set first 32 into Perfect Filter */
1445 			if (i < 33) {
1446 				lan743x_csr_write(adapter,
1447 						  RFE_ADDR_FILT_HI(i), 0);
1448 				data = ha->addr[3];
1449 				data = ha->addr[2] | (data << 8);
1450 				data = ha->addr[1] | (data << 8);
1451 				data = ha->addr[0] | (data << 8);
1452 				lan743x_csr_write(adapter,
1453 						  RFE_ADDR_FILT_LO(i), data);
1454 				data = ha->addr[5];
1455 				data = ha->addr[4] | (data << 8);
1456 				data |= RFE_ADDR_FILT_HI_VALID_;
1457 				lan743x_csr_write(adapter,
1458 						  RFE_ADDR_FILT_HI(i), data);
1459 			} else {
1460 				u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >>
1461 					     23) & 0x1FF;
1462 				hash_table[bitnum / 32] |= (1 << (bitnum % 32));
1463 				rfctl |= RFE_CTL_MCAST_HASH_;
1464 			}
1465 			i++;
1466 		}
1467 	}
1468 
1469 	lan743x_dp_write(adapter, DP_SEL_RFE_RAM,
1470 			 DP_SEL_VHF_VLAN_LEN,
1471 			 DP_SEL_VHF_HASH_LEN, hash_table);
1472 	lan743x_csr_write(adapter, RFE_CTL, rfctl);
1473 }
1474 
1475 static int lan743x_dmac_init(struct lan743x_adapter *adapter)
1476 {
1477 	u32 data = 0;
1478 
1479 	lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_);
1480 	lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_,
1481 				 0, 1000, 20000, 100);
1482 	switch (DEFAULT_DMA_DESCRIPTOR_SPACING) {
1483 	case DMA_DESCRIPTOR_SPACING_16:
1484 		data = DMAC_CFG_MAX_DSPACE_16_;
1485 		break;
1486 	case DMA_DESCRIPTOR_SPACING_32:
1487 		data = DMAC_CFG_MAX_DSPACE_32_;
1488 		break;
1489 	case DMA_DESCRIPTOR_SPACING_64:
1490 		data = DMAC_CFG_MAX_DSPACE_64_;
1491 		break;
1492 	case DMA_DESCRIPTOR_SPACING_128:
1493 		data = DMAC_CFG_MAX_DSPACE_128_;
1494 		break;
1495 	default:
1496 		return -EPERM;
1497 	}
1498 	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
1499 		data |= DMAC_CFG_COAL_EN_;
1500 	data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_;
1501 	data |= DMAC_CFG_MAX_READ_REQ_SET_(6);
1502 	lan743x_csr_write(adapter, DMAC_CFG, data);
1503 	data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1);
1504 	data |= DMAC_COAL_CFG_TIMER_TX_START_;
1505 	data |= DMAC_COAL_CFG_FLUSH_INTS_;
1506 	data |= DMAC_COAL_CFG_INT_EXIT_COAL_;
1507 	data |= DMAC_COAL_CFG_CSR_EXIT_COAL_;
1508 	data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A);
1509 	data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C);
1510 	lan743x_csr_write(adapter, DMAC_COAL_CFG, data);
1511 	data = DMAC_OBFF_TX_THRES_SET_(0x08);
1512 	data |= DMAC_OBFF_RX_THRES_SET_(0x0A);
1513 	lan743x_csr_write(adapter, DMAC_OBFF_CFG, data);
1514 	return 0;
1515 }
1516 
1517 static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter,
1518 				     int tx_channel)
1519 {
1520 	u32 dmac_cmd = 0;
1521 
1522 	dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
1523 	return DMAC_CHANNEL_STATE_SET((dmac_cmd &
1524 				      DMAC_CMD_START_T_(tx_channel)),
1525 				      (dmac_cmd &
1526 				      DMAC_CMD_STOP_T_(tx_channel)));
1527 }
1528 
1529 static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter,
1530 					     int tx_channel)
1531 {
1532 	int timeout = 100;
1533 	int result = 0;
1534 
1535 	while (timeout &&
1536 	       ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) ==
1537 	       DMAC_CHANNEL_STATE_STOP_PENDING)) {
1538 		usleep_range(1000, 20000);
1539 		timeout--;
1540 	}
1541 	if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
1542 		result = -ENODEV;
1543 	return result;
1544 }
1545 
1546 static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter,
1547 				     int rx_channel)
1548 {
1549 	u32 dmac_cmd = 0;
1550 
1551 	dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
1552 	return DMAC_CHANNEL_STATE_SET((dmac_cmd &
1553 				      DMAC_CMD_START_R_(rx_channel)),
1554 				      (dmac_cmd &
1555 				      DMAC_CMD_STOP_R_(rx_channel)));
1556 }
1557 
1558 static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter,
1559 					     int rx_channel)
1560 {
1561 	int timeout = 100;
1562 	int result = 0;
1563 
1564 	while (timeout &&
1565 	       ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) ==
1566 	       DMAC_CHANNEL_STATE_STOP_PENDING)) {
1567 		usleep_range(1000, 20000);
1568 		timeout--;
1569 	}
1570 	if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
1571 		result = -ENODEV;
1572 	return result;
1573 }
1574 
1575 static void lan743x_tx_release_desc(struct lan743x_tx *tx,
1576 				    int descriptor_index, bool cleanup)
1577 {
1578 	struct lan743x_tx_buffer_info *buffer_info = NULL;
1579 	struct lan743x_tx_descriptor *descriptor = NULL;
1580 	u32 descriptor_type = 0;
1581 	bool ignore_sync;
1582 
1583 	descriptor = &tx->ring_cpu_ptr[descriptor_index];
1584 	buffer_info = &tx->buffer_info[descriptor_index];
1585 	if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE))
1586 		goto done;
1587 
1588 	descriptor_type = le32_to_cpu(descriptor->data0) &
1589 			  TX_DESC_DATA0_DTYPE_MASK_;
1590 	if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_)
1591 		goto clean_up_data_descriptor;
1592 	else
1593 		goto clear_active;
1594 
1595 clean_up_data_descriptor:
1596 	if (buffer_info->dma_ptr) {
1597 		if (buffer_info->flags &
1598 		    TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) {
1599 			dma_unmap_page(&tx->adapter->pdev->dev,
1600 				       buffer_info->dma_ptr,
1601 				       buffer_info->buffer_length,
1602 				       DMA_TO_DEVICE);
1603 		} else {
1604 			dma_unmap_single(&tx->adapter->pdev->dev,
1605 					 buffer_info->dma_ptr,
1606 					 buffer_info->buffer_length,
1607 					 DMA_TO_DEVICE);
1608 		}
1609 		buffer_info->dma_ptr = 0;
1610 		buffer_info->buffer_length = 0;
1611 	}
1612 	if (!buffer_info->skb)
1613 		goto clear_active;
1614 
1615 	if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
1616 		dev_kfree_skb_any(buffer_info->skb);
1617 		goto clear_skb;
1618 	}
1619 
1620 	if (cleanup) {
1621 		lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
1622 		dev_kfree_skb_any(buffer_info->skb);
1623 	} else {
1624 		ignore_sync = (buffer_info->flags &
1625 			       TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
1626 		lan743x_ptp_tx_timestamp_skb(tx->adapter,
1627 					     buffer_info->skb, ignore_sync);
1628 	}
1629 
1630 clear_skb:
1631 	buffer_info->skb = NULL;
1632 
1633 clear_active:
1634 	buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE;
1635 
1636 done:
1637 	memset(buffer_info, 0, sizeof(*buffer_info));
1638 	memset(descriptor, 0, sizeof(*descriptor));
1639 }
1640 
1641 static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
1642 {
1643 	return ((++index) % tx->ring_size);
1644 }
1645 
1646 static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
1647 {
1648 	while (le32_to_cpu(*tx->head_cpu_ptr) != (tx->last_head)) {
1649 		lan743x_tx_release_desc(tx, tx->last_head, false);
1650 		tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
1651 	}
1652 }
1653 
1654 static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx)
1655 {
1656 	u32 original_head = 0;
1657 
1658 	original_head = tx->last_head;
1659 	do {
1660 		lan743x_tx_release_desc(tx, tx->last_head, true);
1661 		tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
1662 	} while (tx->last_head != original_head);
1663 	memset(tx->ring_cpu_ptr, 0,
1664 	       sizeof(*tx->ring_cpu_ptr) * (tx->ring_size));
1665 	memset(tx->buffer_info, 0,
1666 	       sizeof(*tx->buffer_info) * (tx->ring_size));
1667 }
1668 
1669 static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx,
1670 				   struct sk_buff *skb)
1671 {
1672 	int result = 1; /* 1 for the main skb buffer */
1673 	int nr_frags = 0;
1674 
1675 	if (skb_is_gso(skb))
1676 		result++; /* requires an extension descriptor */
1677 	nr_frags = skb_shinfo(skb)->nr_frags;
1678 	result += nr_frags; /* 1 for each fragment buffer */
1679 	return result;
1680 }
1681 
1682 static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
1683 {
1684 	int last_head = tx->last_head;
1685 	int last_tail = tx->last_tail;
1686 
1687 	if (last_tail >= last_head)
1688 		return tx->ring_size - last_tail + last_head - 1;
1689 	else
1690 		return last_head - last_tail - 1;
1691 }
1692 
1693 static void lan743x_rx_cfg_b_tstamp_config(struct lan743x_adapter *adapter,
1694 					   int rx_ts_config)
1695 {
1696 	int channel_number;
1697 	int index;
1698 	u32 data;
1699 
1700 	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
1701 		channel_number = adapter->rx[index].channel_number;
1702 		data = lan743x_csr_read(adapter, RX_CFG_B(channel_number));
1703 		data &= RX_CFG_B_TS_MASK_;
1704 		data |= rx_ts_config;
1705 		lan743x_csr_write(adapter, RX_CFG_B(channel_number),
1706 				  data);
1707 	}
1708 }
1709 
1710 int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter,
1711 			       int rx_filter)
1712 {
1713 	u32 data;
1714 
1715 	switch (rx_filter) {
1716 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1717 			lan743x_rx_cfg_b_tstamp_config(adapter,
1718 						       RX_CFG_B_TS_DESCR_EN_);
1719 			data = lan743x_csr_read(adapter, PTP_RX_TS_CFG);
1720 			data |= PTP_RX_TS_CFG_EVENT_MSGS_;
1721 			lan743x_csr_write(adapter, PTP_RX_TS_CFG, data);
1722 			break;
1723 	case HWTSTAMP_FILTER_NONE:
1724 			lan743x_rx_cfg_b_tstamp_config(adapter,
1725 						       RX_CFG_B_TS_NONE_);
1726 			break;
1727 	case HWTSTAMP_FILTER_ALL:
1728 			lan743x_rx_cfg_b_tstamp_config(adapter,
1729 						       RX_CFG_B_TS_ALL_RX_);
1730 			break;
1731 	default:
1732 			return -ERANGE;
1733 	}
1734 	adapter->rx_tstamp_filter = rx_filter;
1735 	return 0;
1736 }
1737 
1738 void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
1739 				      bool enable_timestamping,
1740 				      bool enable_onestep_sync)
1741 {
1742 	if (enable_timestamping)
1743 		tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED;
1744 	else
1745 		tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED;
1746 	if (enable_onestep_sync)
1747 		tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC;
1748 	else
1749 		tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC;
1750 }
1751 
1752 static int lan743x_tx_frame_start(struct lan743x_tx *tx,
1753 				  unsigned char *first_buffer,
1754 				  unsigned int first_buffer_length,
1755 				  unsigned int frame_length,
1756 				  bool time_stamp,
1757 				  bool check_sum)
1758 {
1759 	/* called only from within lan743x_tx_xmit_frame.
1760 	 * assuming tx->ring_lock has already been acquired.
1761 	 */
1762 	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1763 	struct lan743x_tx_buffer_info *buffer_info = NULL;
1764 	struct lan743x_adapter *adapter = tx->adapter;
1765 	struct device *dev = &adapter->pdev->dev;
1766 	dma_addr_t dma_ptr;
1767 
1768 	tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS;
1769 	tx->frame_first = tx->last_tail;
1770 	tx->frame_tail = tx->frame_first;
1771 
1772 	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1773 	buffer_info = &tx->buffer_info[tx->frame_tail];
1774 	dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length,
1775 				 DMA_TO_DEVICE);
1776 	if (dma_mapping_error(dev, dma_ptr))
1777 		return -ENOMEM;
1778 
1779 	tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
1780 	tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
1781 	tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
1782 		TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
1783 
1784 	buffer_info->skb = NULL;
1785 	buffer_info->dma_ptr = dma_ptr;
1786 	buffer_info->buffer_length = first_buffer_length;
1787 	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1788 
1789 	tx->frame_data0 = (first_buffer_length &
1790 		TX_DESC_DATA0_BUF_LENGTH_MASK_) |
1791 		TX_DESC_DATA0_DTYPE_DATA_ |
1792 		TX_DESC_DATA0_FS_ |
1793 		TX_DESC_DATA0_FCS_;
1794 	if (time_stamp)
1795 		tx->frame_data0 |= TX_DESC_DATA0_TSE_;
1796 
1797 	if (check_sum)
1798 		tx->frame_data0 |= TX_DESC_DATA0_ICE_ |
1799 				   TX_DESC_DATA0_IPE_ |
1800 				   TX_DESC_DATA0_TPE_;
1801 
1802 	/* data0 will be programmed in one of other frame assembler functions */
1803 	return 0;
1804 }
1805 
1806 static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
1807 				     unsigned int frame_length,
1808 				     int nr_frags)
1809 {
1810 	/* called only from within lan743x_tx_xmit_frame.
1811 	 * assuming tx->ring_lock has already been acquired.
1812 	 */
1813 	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1814 	struct lan743x_tx_buffer_info *buffer_info = NULL;
1815 
1816 	/* wrap up previous descriptor */
1817 	tx->frame_data0 |= TX_DESC_DATA0_EXT_;
1818 	if (nr_frags <= 0) {
1819 		tx->frame_data0 |= TX_DESC_DATA0_LS_;
1820 		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
1821 		tx->frame_last = tx->frame_first;
1822 	}
1823 	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1824 	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
1825 
1826 	/* move to next descriptor */
1827 	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1828 	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1829 	buffer_info = &tx->buffer_info[tx->frame_tail];
1830 
1831 	/* add extension descriptor */
1832 	tx_descriptor->data1 = 0;
1833 	tx_descriptor->data2 = 0;
1834 	tx_descriptor->data3 = 0;
1835 
1836 	buffer_info->skb = NULL;
1837 	buffer_info->dma_ptr = 0;
1838 	buffer_info->buffer_length = 0;
1839 	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1840 
1841 	tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) |
1842 			  TX_DESC_DATA0_DTYPE_EXT_ |
1843 			  TX_DESC_DATA0_EXT_LSO_;
1844 
1845 	/* data0 will be programmed in one of other frame assembler functions */
1846 }
1847 
1848 static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
1849 					 const skb_frag_t *fragment,
1850 					 unsigned int frame_length)
1851 {
1852 	/* called only from within lan743x_tx_xmit_frame
1853 	 * assuming tx->ring_lock has already been acquired
1854 	 */
1855 	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1856 	struct lan743x_tx_buffer_info *buffer_info = NULL;
1857 	struct lan743x_adapter *adapter = tx->adapter;
1858 	struct device *dev = &adapter->pdev->dev;
1859 	unsigned int fragment_length = 0;
1860 	dma_addr_t dma_ptr;
1861 
1862 	fragment_length = skb_frag_size(fragment);
1863 	if (!fragment_length)
1864 		return 0;
1865 
1866 	/* wrap up previous descriptor */
1867 	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1868 	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
1869 
1870 	/* move to next descriptor */
1871 	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1872 	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1873 	buffer_info = &tx->buffer_info[tx->frame_tail];
1874 	dma_ptr = skb_frag_dma_map(dev, fragment,
1875 				   0, fragment_length,
1876 				   DMA_TO_DEVICE);
1877 	if (dma_mapping_error(dev, dma_ptr)) {
1878 		int desc_index;
1879 
1880 		/* cleanup all previously setup descriptors */
1881 		desc_index = tx->frame_first;
1882 		while (desc_index != tx->frame_tail) {
1883 			lan743x_tx_release_desc(tx, desc_index, true);
1884 			desc_index = lan743x_tx_next_index(tx, desc_index);
1885 		}
1886 		dma_wmb();
1887 		tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
1888 		tx->frame_first = 0;
1889 		tx->frame_data0 = 0;
1890 		tx->frame_tail = 0;
1891 		tx->frame_last = 0;
1892 		return -ENOMEM;
1893 	}
1894 
1895 	tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
1896 	tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
1897 	tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
1898 			       TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
1899 
1900 	buffer_info->skb = NULL;
1901 	buffer_info->dma_ptr = dma_ptr;
1902 	buffer_info->buffer_length = fragment_length;
1903 	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1904 	buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT;
1905 
1906 	tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) |
1907 			  TX_DESC_DATA0_DTYPE_DATA_ |
1908 			  TX_DESC_DATA0_FCS_;
1909 
1910 	/* data0 will be programmed in one of other frame assembler functions */
1911 	return 0;
1912 }
1913 
1914 static void lan743x_tx_frame_end(struct lan743x_tx *tx,
1915 				 struct sk_buff *skb,
1916 				 bool time_stamp,
1917 				 bool ignore_sync)
1918 {
1919 	/* called only from within lan743x_tx_xmit_frame
1920 	 * assuming tx->ring_lock has already been acquired
1921 	 */
1922 	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1923 	struct lan743x_tx_buffer_info *buffer_info = NULL;
1924 	struct lan743x_adapter *adapter = tx->adapter;
1925 	u32 tx_tail_flags = 0;
1926 
1927 	/* wrap up previous descriptor */
1928 	if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
1929 	    TX_DESC_DATA0_DTYPE_DATA_) {
1930 		tx->frame_data0 |= TX_DESC_DATA0_LS_;
1931 		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
1932 		tx->frame_last = tx->frame_tail;
1933 	}
1934 
1935 	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last];
1936 	buffer_info = &tx->buffer_info[tx->frame_last];
1937 	buffer_info->skb = skb;
1938 	if (time_stamp)
1939 		buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
1940 	if (ignore_sync)
1941 		buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
1942 
1943 	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1944 	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
1945 	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1946 	tx->last_tail = tx->frame_tail;
1947 
1948 	dma_wmb();
1949 
1950 	if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
1951 		tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_;
1952 	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)
1953 		tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ |
1954 		TX_TAIL_SET_TOP_INT_EN_;
1955 
1956 	lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
1957 			  tx_tail_flags | tx->frame_tail);
1958 	tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
1959 }
1960 
1961 static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
1962 					 struct sk_buff *skb)
1963 {
1964 	int required_number_of_descriptors = 0;
1965 	unsigned int start_frame_length = 0;
1966 	netdev_tx_t retval = NETDEV_TX_OK;
1967 	unsigned int frame_length = 0;
1968 	unsigned int head_length = 0;
1969 	unsigned long irq_flags = 0;
1970 	bool do_timestamp = false;
1971 	bool ignore_sync = false;
1972 	struct netdev_queue *txq;
1973 	int nr_frags = 0;
1974 	bool gso = false;
1975 	int j;
1976 
1977 	required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb);
1978 
1979 	spin_lock_irqsave(&tx->ring_lock, irq_flags);
1980 	if (required_number_of_descriptors >
1981 		lan743x_tx_get_avail_desc(tx)) {
1982 		if (required_number_of_descriptors > (tx->ring_size - 1)) {
1983 			dev_kfree_skb_irq(skb);
1984 		} else {
1985 			/* save how many descriptors we needed to restart the queue */
1986 			tx->rqd_descriptors = required_number_of_descriptors;
1987 			retval = NETDEV_TX_BUSY;
1988 			txq = netdev_get_tx_queue(tx->adapter->netdev,
1989 						  tx->channel_number);
1990 			netif_tx_stop_queue(txq);
1991 		}
1992 		goto unlock;
1993 	}
1994 
1995 	/* space available, transmit skb  */
1996 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1997 	    (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) &&
1998 	    (lan743x_ptp_request_tx_timestamp(tx->adapter))) {
1999 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2000 		do_timestamp = true;
2001 		if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
2002 			ignore_sync = true;
2003 	}
2004 	head_length = skb_headlen(skb);
2005 	frame_length = skb_pagelen(skb);
2006 	nr_frags = skb_shinfo(skb)->nr_frags;
2007 	start_frame_length = frame_length;
2008 	gso = skb_is_gso(skb);
2009 	if (gso) {
2010 		start_frame_length = max(skb_shinfo(skb)->gso_size,
2011 					 (unsigned short)8);
2012 	}
2013 
2014 	if (lan743x_tx_frame_start(tx,
2015 				   skb->data, head_length,
2016 				   start_frame_length,
2017 				   do_timestamp,
2018 				   skb->ip_summed == CHECKSUM_PARTIAL)) {
2019 		dev_kfree_skb_irq(skb);
2020 		goto unlock;
2021 	}
2022 	tx->frame_count++;
2023 
2024 	if (gso)
2025 		lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
2026 
2027 	if (nr_frags <= 0)
2028 		goto finish;
2029 
2030 	for (j = 0; j < nr_frags; j++) {
2031 		const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]);
2032 
2033 		if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
2034 			/* upon error no need to call
2035 			 *	lan743x_tx_frame_end
2036 			 * frame assembler clean up was performed inside
2037 			 *	lan743x_tx_frame_add_fragment
2038 			 */
2039 			dev_kfree_skb_irq(skb);
2040 			goto unlock;
2041 		}
2042 	}
2043 
2044 finish:
2045 	lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync);
2046 
2047 unlock:
2048 	spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
2049 	return retval;
2050 }
2051 
2052 static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
2053 {
2054 	struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi);
2055 	struct lan743x_adapter *adapter = tx->adapter;
2056 	unsigned long irq_flags = 0;
2057 	struct netdev_queue *txq;
2058 	u32 ioc_bit = 0;
2059 
2060 	ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
2061 	lan743x_csr_read(adapter, DMAC_INT_STS);
2062 	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C)
2063 		lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit);
2064 	spin_lock_irqsave(&tx->ring_lock, irq_flags);
2065 
2066 	/* clean up tx ring */
2067 	lan743x_tx_release_completed_descriptors(tx);
2068 	txq = netdev_get_tx_queue(adapter->netdev, tx->channel_number);
2069 	if (netif_tx_queue_stopped(txq)) {
2070 		if (tx->rqd_descriptors) {
2071 			if (tx->rqd_descriptors <=
2072 			    lan743x_tx_get_avail_desc(tx)) {
2073 				tx->rqd_descriptors = 0;
2074 				netif_tx_wake_queue(txq);
2075 			}
2076 		} else {
2077 			netif_tx_wake_queue(txq);
2078 		}
2079 	}
2080 	spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
2081 
2082 	if (!napi_complete(napi))
2083 		goto done;
2084 
2085 	/* enable isr */
2086 	lan743x_csr_write(adapter, INT_EN_SET,
2087 			  INT_BIT_DMA_TX_(tx->channel_number));
2088 	lan743x_csr_read(adapter, INT_STS);
2089 
2090 done:
2091 	return 0;
2092 }
2093 
2094 static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
2095 {
2096 	if (tx->head_cpu_ptr) {
2097 		dma_free_coherent(&tx->adapter->pdev->dev,
2098 				  sizeof(*tx->head_cpu_ptr), tx->head_cpu_ptr,
2099 				  tx->head_dma_ptr);
2100 		tx->head_cpu_ptr = NULL;
2101 		tx->head_dma_ptr = 0;
2102 	}
2103 	kfree(tx->buffer_info);
2104 	tx->buffer_info = NULL;
2105 
2106 	if (tx->ring_cpu_ptr) {
2107 		dma_free_coherent(&tx->adapter->pdev->dev,
2108 				  tx->ring_allocation_size, tx->ring_cpu_ptr,
2109 				  tx->ring_dma_ptr);
2110 		tx->ring_allocation_size = 0;
2111 		tx->ring_cpu_ptr = NULL;
2112 		tx->ring_dma_ptr = 0;
2113 	}
2114 	tx->ring_size = 0;
2115 }
2116 
2117 static int lan743x_tx_ring_init(struct lan743x_tx *tx)
2118 {
2119 	size_t ring_allocation_size = 0;
2120 	void *cpu_ptr = NULL;
2121 	dma_addr_t dma_ptr;
2122 	int ret = -ENOMEM;
2123 
2124 	tx->ring_size = LAN743X_TX_RING_SIZE;
2125 	if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) {
2126 		ret = -EINVAL;
2127 		goto cleanup;
2128 	}
2129 	if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
2130 				      DMA_BIT_MASK(64))) {
2131 		dev_warn(&tx->adapter->pdev->dev,
2132 			 "lan743x_: No suitable DMA available\n");
2133 		ret = -ENOMEM;
2134 		goto cleanup;
2135 	}
2136 	ring_allocation_size = ALIGN(tx->ring_size *
2137 				     sizeof(struct lan743x_tx_descriptor),
2138 				     PAGE_SIZE);
2139 	dma_ptr = 0;
2140 	cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
2141 				     ring_allocation_size, &dma_ptr, GFP_KERNEL);
2142 	if (!cpu_ptr) {
2143 		ret = -ENOMEM;
2144 		goto cleanup;
2145 	}
2146 
2147 	tx->ring_allocation_size = ring_allocation_size;
2148 	tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr;
2149 	tx->ring_dma_ptr = dma_ptr;
2150 
2151 	cpu_ptr = kzalloc_objs(*tx->buffer_info, tx->ring_size);
2152 	if (!cpu_ptr) {
2153 		ret = -ENOMEM;
2154 		goto cleanup;
2155 	}
2156 	tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr;
2157 	dma_ptr = 0;
2158 	cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
2159 				     sizeof(*tx->head_cpu_ptr), &dma_ptr,
2160 				     GFP_KERNEL);
2161 	if (!cpu_ptr) {
2162 		ret = -ENOMEM;
2163 		goto cleanup;
2164 	}
2165 
2166 	tx->head_cpu_ptr = cpu_ptr;
2167 	tx->head_dma_ptr = dma_ptr;
2168 	if (tx->head_dma_ptr & 0x3) {
2169 		ret = -ENOMEM;
2170 		goto cleanup;
2171 	}
2172 
2173 	return 0;
2174 
2175 cleanup:
2176 	lan743x_tx_ring_cleanup(tx);
2177 	return ret;
2178 }
2179 
2180 static void lan743x_tx_close(struct lan743x_tx *tx)
2181 {
2182 	struct lan743x_adapter *adapter = tx->adapter;
2183 
2184 	lan743x_csr_write(adapter,
2185 			  DMAC_CMD,
2186 			  DMAC_CMD_STOP_T_(tx->channel_number));
2187 	lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number);
2188 
2189 	lan743x_csr_write(adapter,
2190 			  DMAC_INT_EN_CLR,
2191 			  DMAC_INT_BIT_TX_IOC_(tx->channel_number));
2192 	lan743x_csr_write(adapter, INT_EN_CLR,
2193 			  INT_BIT_DMA_TX_(tx->channel_number));
2194 	napi_disable(&tx->napi);
2195 	netif_napi_del(&tx->napi);
2196 
2197 	lan743x_csr_write(adapter, FCT_TX_CTL,
2198 			  FCT_TX_CTL_DIS_(tx->channel_number));
2199 	lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
2200 				 FCT_TX_CTL_EN_(tx->channel_number),
2201 				 0, 1000, 20000, 100);
2202 
2203 	lan743x_tx_release_all_descriptors(tx);
2204 
2205 	tx->rqd_descriptors = 0;
2206 
2207 	lan743x_tx_ring_cleanup(tx);
2208 }
2209 
2210 static int lan743x_tx_open(struct lan743x_tx *tx)
2211 {
2212 	struct lan743x_adapter *adapter = NULL;
2213 	u32 data = 0;
2214 	int ret;
2215 
2216 	adapter = tx->adapter;
2217 	ret = lan743x_tx_ring_init(tx);
2218 	if (ret)
2219 		return ret;
2220 
2221 	/* initialize fifo */
2222 	lan743x_csr_write(adapter, FCT_TX_CTL,
2223 			  FCT_TX_CTL_RESET_(tx->channel_number));
2224 	lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
2225 				 FCT_TX_CTL_RESET_(tx->channel_number),
2226 				 0, 1000, 20000, 100);
2227 
2228 	/* enable fifo */
2229 	lan743x_csr_write(adapter, FCT_TX_CTL,
2230 			  FCT_TX_CTL_EN_(tx->channel_number));
2231 
2232 	/* reset tx channel */
2233 	lan743x_csr_write(adapter, DMAC_CMD,
2234 			  DMAC_CMD_TX_SWR_(tx->channel_number));
2235 	lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
2236 				 DMAC_CMD_TX_SWR_(tx->channel_number),
2237 				 0, 1000, 20000, 100);
2238 
2239 	/* Write TX_BASE_ADDR */
2240 	lan743x_csr_write(adapter,
2241 			  TX_BASE_ADDRH(tx->channel_number),
2242 			  DMA_ADDR_HIGH32(tx->ring_dma_ptr));
2243 	lan743x_csr_write(adapter,
2244 			  TX_BASE_ADDRL(tx->channel_number),
2245 			  DMA_ADDR_LOW32(tx->ring_dma_ptr));
2246 
2247 	/* Write TX_CFG_B */
2248 	data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number));
2249 	data &= ~TX_CFG_B_TX_RING_LEN_MASK_;
2250 	data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_);
2251 	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
2252 		data |= TX_CFG_B_TDMABL_512_;
2253 	lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data);
2254 
2255 	/* Write TX_CFG_A */
2256 	data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_;
2257 	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
2258 		data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_;
2259 		data |= TX_CFG_A_TX_PF_THRES_SET_(0x10);
2260 		data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04);
2261 		data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07);
2262 	}
2263 	lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data);
2264 
2265 	/* Write TX_HEAD_WRITEBACK_ADDR */
2266 	lan743x_csr_write(adapter,
2267 			  TX_HEAD_WRITEBACK_ADDRH(tx->channel_number),
2268 			  DMA_ADDR_HIGH32(tx->head_dma_ptr));
2269 	lan743x_csr_write(adapter,
2270 			  TX_HEAD_WRITEBACK_ADDRL(tx->channel_number),
2271 			  DMA_ADDR_LOW32(tx->head_dma_ptr));
2272 
2273 	/* set last head */
2274 	tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number));
2275 
2276 	/* write TX_TAIL */
2277 	tx->last_tail = 0;
2278 	lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
2279 			  (u32)(tx->last_tail));
2280 	tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
2281 							 INT_BIT_DMA_TX_
2282 							 (tx->channel_number));
2283 	netif_napi_add_tx_weight(adapter->netdev,
2284 				 &tx->napi, lan743x_tx_napi_poll,
2285 				 NAPI_POLL_WEIGHT);
2286 	napi_enable(&tx->napi);
2287 
2288 	data = 0;
2289 	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
2290 		data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_;
2291 	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
2292 		data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_;
2293 	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
2294 		data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_;
2295 	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
2296 		data |= TX_CFG_C_TX_INT_EN_R2C_;
2297 	lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data);
2298 
2299 	if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET))
2300 		lan743x_csr_write(adapter, INT_EN_SET,
2301 				  INT_BIT_DMA_TX_(tx->channel_number));
2302 	lan743x_csr_write(adapter, DMAC_INT_EN_SET,
2303 			  DMAC_INT_BIT_TX_IOC_(tx->channel_number));
2304 
2305 	/*  start dmac channel */
2306 	lan743x_csr_write(adapter, DMAC_CMD,
2307 			  DMAC_CMD_START_T_(tx->channel_number));
2308 	return 0;
2309 }
2310 
2311 static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
2312 {
2313 	return ((++index) % rx->ring_size);
2314 }
2315 
2316 static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
2317 {
2318 	/* update the tail once per 8 descriptors */
2319 	if ((index & 7) == 7)
2320 		lan743x_csr_write(rx->adapter, RX_TAIL(rx->channel_number),
2321 				  index);
2322 }
2323 
2324 static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
2325 					gfp_t gfp)
2326 {
2327 	struct net_device *netdev = rx->adapter->netdev;
2328 	struct device *dev = &rx->adapter->pdev->dev;
2329 	struct lan743x_rx_buffer_info *buffer_info;
2330 	unsigned int buffer_length, used_length;
2331 	struct lan743x_rx_descriptor *descriptor;
2332 	struct sk_buff *skb;
2333 	dma_addr_t dma_ptr;
2334 
2335 	buffer_length = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + RX_HEAD_PADDING;
2336 
2337 	descriptor = &rx->ring_cpu_ptr[index];
2338 	buffer_info = &rx->buffer_info[index];
2339 	skb = __netdev_alloc_skb(netdev, buffer_length, gfp);
2340 	if (!skb)
2341 		return -ENOMEM;
2342 	dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
2343 	if (dma_mapping_error(dev, dma_ptr)) {
2344 		dev_kfree_skb_any(skb);
2345 		return -ENOMEM;
2346 	}
2347 	if (buffer_info->dma_ptr) {
2348 		/* sync used area of buffer only */
2349 		if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_)
2350 			/* frame length is valid only if LS bit is set.
2351 			 * it's a safe upper bound for the used area in this
2352 			 * buffer.
2353 			 */
2354 			used_length = min(RX_DESC_DATA0_FRAME_LENGTH_GET_
2355 					  (le32_to_cpu(descriptor->data0)),
2356 					  buffer_info->buffer_length);
2357 		else
2358 			used_length = buffer_info->buffer_length;
2359 		dma_sync_single_for_cpu(dev, buffer_info->dma_ptr,
2360 					used_length,
2361 					DMA_FROM_DEVICE);
2362 		dma_unmap_single_attrs(dev, buffer_info->dma_ptr,
2363 				       buffer_info->buffer_length,
2364 				       DMA_FROM_DEVICE,
2365 				       DMA_ATTR_SKIP_CPU_SYNC);
2366 	}
2367 
2368 	buffer_info->skb = skb;
2369 	buffer_info->dma_ptr = dma_ptr;
2370 	buffer_info->buffer_length = buffer_length;
2371 	descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
2372 	descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
2373 	descriptor->data3 = 0;
2374 	descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
2375 			    (buffer_length & RX_DESC_DATA0_BUF_LENGTH_MASK_)));
2376 	lan743x_rx_update_tail(rx, index);
2377 
2378 	return 0;
2379 }
2380 
2381 static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
2382 {
2383 	struct lan743x_rx_buffer_info *buffer_info;
2384 	struct lan743x_rx_descriptor *descriptor;
2385 
2386 	descriptor = &rx->ring_cpu_ptr[index];
2387 	buffer_info = &rx->buffer_info[index];
2388 
2389 	descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
2390 	descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
2391 	descriptor->data3 = 0;
2392 	descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
2393 			    ((buffer_info->buffer_length) &
2394 			    RX_DESC_DATA0_BUF_LENGTH_MASK_)));
2395 	lan743x_rx_update_tail(rx, index);
2396 }
2397 
2398 static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
2399 {
2400 	struct lan743x_rx_buffer_info *buffer_info;
2401 	struct lan743x_rx_descriptor *descriptor;
2402 
2403 	descriptor = &rx->ring_cpu_ptr[index];
2404 	buffer_info = &rx->buffer_info[index];
2405 
2406 	memset(descriptor, 0, sizeof(*descriptor));
2407 
2408 	if (buffer_info->dma_ptr) {
2409 		dma_unmap_single(&rx->adapter->pdev->dev,
2410 				 buffer_info->dma_ptr,
2411 				 buffer_info->buffer_length,
2412 				 DMA_FROM_DEVICE);
2413 		buffer_info->dma_ptr = 0;
2414 	}
2415 
2416 	if (buffer_info->skb) {
2417 		dev_kfree_skb(buffer_info->skb);
2418 		buffer_info->skb = NULL;
2419 	}
2420 
2421 	memset(buffer_info, 0, sizeof(*buffer_info));
2422 }
2423 
2424 static struct sk_buff *
2425 lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length)
2426 {
2427 	if (skb_linearize(skb)) {
2428 		dev_kfree_skb_irq(skb);
2429 		return NULL;
2430 	}
2431 	frame_length = max_t(int, 0, frame_length - ETH_FCS_LEN);
2432 	if (skb->len > frame_length) {
2433 		skb->tail -= skb->len - frame_length;
2434 		skb->len = frame_length;
2435 	}
2436 	return skb;
2437 }
2438 
2439 static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
2440 {
2441 	int current_head_index = le32_to_cpu(*rx->head_cpu_ptr);
2442 	struct lan743x_rx_descriptor *descriptor, *desc_ext;
2443 	struct net_device *netdev = rx->adapter->netdev;
2444 	int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
2445 	struct lan743x_rx_buffer_info *buffer_info;
2446 	int frame_length, buffer_length;
2447 	bool is_ice, is_tce, is_icsm;
2448 	int extension_index = -1;
2449 	bool is_last, is_first;
2450 	struct sk_buff *skb;
2451 
2452 	if (current_head_index < 0 || current_head_index >= rx->ring_size)
2453 		goto done;
2454 
2455 	if (rx->last_head < 0 || rx->last_head >= rx->ring_size)
2456 		goto done;
2457 
2458 	if (rx->last_head == current_head_index)
2459 		goto done;
2460 
2461 	descriptor = &rx->ring_cpu_ptr[rx->last_head];
2462 	if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_)
2463 		goto done;
2464 	buffer_info = &rx->buffer_info[rx->last_head];
2465 
2466 	is_last = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_;
2467 	is_first = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_FS_;
2468 
2469 	if (is_last && le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_EXT_) {
2470 		/* extension is expected to follow */
2471 		int index = lan743x_rx_next_index(rx, rx->last_head);
2472 
2473 		if (index == current_head_index)
2474 			/* extension not yet available */
2475 			goto done;
2476 		desc_ext = &rx->ring_cpu_ptr[index];
2477 		if (le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_OWN_)
2478 			/* extension not yet available */
2479 			goto done;
2480 		if (!(le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_EXT_))
2481 			goto move_forward;
2482 		extension_index = index;
2483 	}
2484 
2485 	/* Only the last buffer in a multi-buffer frame contains the total frame
2486 	 * length. The chip occasionally sends more buffers than strictly
2487 	 * required to reach the total frame length.
2488 	 * Handle this by adding all buffers to the skb in their entirety.
2489 	 * Once the real frame length is known, trim the skb.
2490 	 */
2491 	frame_length =
2492 		RX_DESC_DATA0_FRAME_LENGTH_GET_(le32_to_cpu(descriptor->data0));
2493 	buffer_length = buffer_info->buffer_length;
2494 	is_ice = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICE_;
2495 	is_tce = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_TCE_;
2496 	is_icsm = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICSM_;
2497 
2498 	netdev_dbg(netdev, "%s%schunk: %d/%d",
2499 		   is_first ? "first " : "      ",
2500 		   is_last  ? "last  " : "      ",
2501 		   frame_length, buffer_length);
2502 
2503 	/* save existing skb, allocate new skb and map to dma */
2504 	skb = buffer_info->skb;
2505 	if (lan743x_rx_init_ring_element(rx, rx->last_head, GFP_ATOMIC)) {
2506 		/* failed to allocate next skb.
2507 		 * Memory is very low.
2508 		 * Drop this packet and reuse buffer.
2509 		 */
2510 		lan743x_rx_reuse_ring_element(rx, rx->last_head);
2511 		/* drop packet that was being assembled */
2512 		dev_kfree_skb_irq(rx->skb_head);
2513 		rx->skb_head = NULL;
2514 		goto process_extension;
2515 	}
2516 
2517 	/* add buffers to skb via skb->frag_list */
2518 	if (is_first) {
2519 		skb_reserve(skb, RX_HEAD_PADDING);
2520 		skb_put(skb, buffer_length - RX_HEAD_PADDING);
2521 		if (rx->skb_head)
2522 			dev_kfree_skb_irq(rx->skb_head);
2523 		rx->skb_head = skb;
2524 	} else if (rx->skb_head) {
2525 		skb_put(skb, buffer_length);
2526 		if (skb_shinfo(rx->skb_head)->frag_list)
2527 			rx->skb_tail->next = skb;
2528 		else
2529 			skb_shinfo(rx->skb_head)->frag_list = skb;
2530 		rx->skb_tail = skb;
2531 		rx->skb_head->len += skb->len;
2532 		rx->skb_head->data_len += skb->len;
2533 		rx->skb_head->truesize += skb->truesize;
2534 	} else {
2535 		/* packet to assemble has already been dropped because one or
2536 		 * more of its buffers could not be allocated
2537 		 */
2538 		netdev_dbg(netdev, "drop buffer intended for dropped packet");
2539 		dev_kfree_skb_irq(skb);
2540 	}
2541 
2542 process_extension:
2543 	if (extension_index >= 0) {
2544 		u32 ts_sec;
2545 		u32 ts_nsec;
2546 
2547 		ts_sec = le32_to_cpu(desc_ext->data1);
2548 		ts_nsec = (le32_to_cpu(desc_ext->data2) &
2549 			  RX_DESC_DATA2_TS_NS_MASK_);
2550 		if (rx->skb_head)
2551 			skb_hwtstamps(rx->skb_head)->hwtstamp =
2552 				ktime_set(ts_sec, ts_nsec);
2553 		lan743x_rx_reuse_ring_element(rx, extension_index);
2554 		rx->last_head = extension_index;
2555 		netdev_dbg(netdev, "process extension");
2556 	}
2557 
2558 	if (is_last && rx->skb_head)
2559 		rx->skb_head = lan743x_rx_trim_skb(rx->skb_head, frame_length);
2560 
2561 	if (is_last && rx->skb_head) {
2562 		rx->skb_head->protocol = eth_type_trans(rx->skb_head,
2563 							rx->adapter->netdev);
2564 		if (rx->adapter->netdev->features & NETIF_F_RXCSUM) {
2565 			if (!is_ice && !is_tce && !is_icsm)
2566 				skb->ip_summed = CHECKSUM_UNNECESSARY;
2567 		}
2568 		netdev_dbg(netdev, "sending %d byte frame to OS",
2569 			   rx->skb_head->len);
2570 		napi_gro_receive(&rx->napi, rx->skb_head);
2571 		rx->skb_head = NULL;
2572 	}
2573 
2574 move_forward:
2575 	/* push tail and head forward */
2576 	rx->last_tail = rx->last_head;
2577 	rx->last_head = lan743x_rx_next_index(rx, rx->last_head);
2578 	result = RX_PROCESS_RESULT_BUFFER_RECEIVED;
2579 done:
2580 	return result;
2581 }
2582 
2583 static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
2584 {
2585 	struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi);
2586 	struct lan743x_adapter *adapter = rx->adapter;
2587 	int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
2588 	u32 rx_tail_flags = 0;
2589 	int count;
2590 
2591 	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) {
2592 		/* clear int status bit before reading packet */
2593 		lan743x_csr_write(adapter, DMAC_INT_STS,
2594 				  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2595 	}
2596 	for (count = 0; count < weight; count++) {
2597 		result = lan743x_rx_process_buffer(rx);
2598 		if (result == RX_PROCESS_RESULT_NOTHING_TO_DO)
2599 			break;
2600 	}
2601 	rx->frame_count += count;
2602 	if (count == weight || result == RX_PROCESS_RESULT_BUFFER_RECEIVED)
2603 		return weight;
2604 
2605 	if (!napi_complete_done(napi, count))
2606 		return count;
2607 
2608 	/* re-arm interrupts, must write to rx tail on some chip variants */
2609 	if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
2610 		rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_;
2611 	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) {
2612 		rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_;
2613 	} else {
2614 		lan743x_csr_write(adapter, INT_EN_SET,
2615 				  INT_BIT_DMA_RX_(rx->channel_number));
2616 	}
2617 
2618 	if (rx_tail_flags)
2619 		lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
2620 				  rx_tail_flags | rx->last_tail);
2621 
2622 	return count;
2623 }
2624 
2625 static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx)
2626 {
2627 	if (rx->buffer_info && rx->ring_cpu_ptr) {
2628 		int index;
2629 
2630 		for (index = 0; index < rx->ring_size; index++)
2631 			lan743x_rx_release_ring_element(rx, index);
2632 	}
2633 
2634 	if (rx->head_cpu_ptr) {
2635 		dma_free_coherent(&rx->adapter->pdev->dev,
2636 				  sizeof(*rx->head_cpu_ptr), rx->head_cpu_ptr,
2637 				  rx->head_dma_ptr);
2638 		rx->head_cpu_ptr = NULL;
2639 		rx->head_dma_ptr = 0;
2640 	}
2641 
2642 	kfree(rx->buffer_info);
2643 	rx->buffer_info = NULL;
2644 
2645 	if (rx->ring_cpu_ptr) {
2646 		dma_free_coherent(&rx->adapter->pdev->dev,
2647 				  rx->ring_allocation_size, rx->ring_cpu_ptr,
2648 				  rx->ring_dma_ptr);
2649 		rx->ring_allocation_size = 0;
2650 		rx->ring_cpu_ptr = NULL;
2651 		rx->ring_dma_ptr = 0;
2652 	}
2653 
2654 	rx->ring_size = 0;
2655 	rx->last_head = 0;
2656 }
2657 
2658 static int lan743x_rx_ring_init(struct lan743x_rx *rx)
2659 {
2660 	size_t ring_allocation_size = 0;
2661 	dma_addr_t dma_ptr = 0;
2662 	void *cpu_ptr = NULL;
2663 	int ret = -ENOMEM;
2664 	int index = 0;
2665 
2666 	rx->ring_size = LAN743X_RX_RING_SIZE;
2667 	if (rx->ring_size <= 1) {
2668 		ret = -EINVAL;
2669 		goto cleanup;
2670 	}
2671 	if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) {
2672 		ret = -EINVAL;
2673 		goto cleanup;
2674 	}
2675 	if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
2676 				      DMA_BIT_MASK(64))) {
2677 		dev_warn(&rx->adapter->pdev->dev,
2678 			 "lan743x_: No suitable DMA available\n");
2679 		ret = -ENOMEM;
2680 		goto cleanup;
2681 	}
2682 	ring_allocation_size = ALIGN(rx->ring_size *
2683 				     sizeof(struct lan743x_rx_descriptor),
2684 				     PAGE_SIZE);
2685 	dma_ptr = 0;
2686 	cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
2687 				     ring_allocation_size, &dma_ptr, GFP_KERNEL);
2688 	if (!cpu_ptr) {
2689 		ret = -ENOMEM;
2690 		goto cleanup;
2691 	}
2692 	rx->ring_allocation_size = ring_allocation_size;
2693 	rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr;
2694 	rx->ring_dma_ptr = dma_ptr;
2695 
2696 	cpu_ptr = kzalloc_objs(*rx->buffer_info, rx->ring_size);
2697 	if (!cpu_ptr) {
2698 		ret = -ENOMEM;
2699 		goto cleanup;
2700 	}
2701 	rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr;
2702 	dma_ptr = 0;
2703 	cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
2704 				     sizeof(*rx->head_cpu_ptr), &dma_ptr,
2705 				     GFP_KERNEL);
2706 	if (!cpu_ptr) {
2707 		ret = -ENOMEM;
2708 		goto cleanup;
2709 	}
2710 
2711 	rx->head_cpu_ptr = cpu_ptr;
2712 	rx->head_dma_ptr = dma_ptr;
2713 	if (rx->head_dma_ptr & 0x3) {
2714 		ret = -ENOMEM;
2715 		goto cleanup;
2716 	}
2717 
2718 	rx->last_head = 0;
2719 	for (index = 0; index < rx->ring_size; index++) {
2720 		ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL);
2721 		if (ret)
2722 			goto cleanup;
2723 	}
2724 	return 0;
2725 
2726 cleanup:
2727 	netif_warn(rx->adapter, ifup, rx->adapter->netdev,
2728 		   "Error allocating memory for LAN743x\n");
2729 
2730 	lan743x_rx_ring_cleanup(rx);
2731 	return ret;
2732 }
2733 
2734 static void lan743x_rx_close(struct lan743x_rx *rx)
2735 {
2736 	struct lan743x_adapter *adapter = rx->adapter;
2737 
2738 	lan743x_csr_write(adapter, FCT_RX_CTL,
2739 			  FCT_RX_CTL_DIS_(rx->channel_number));
2740 	lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
2741 				 FCT_RX_CTL_EN_(rx->channel_number),
2742 				 0, 1000, 20000, 100);
2743 
2744 	lan743x_csr_write(adapter, DMAC_CMD,
2745 			  DMAC_CMD_STOP_R_(rx->channel_number));
2746 	lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number);
2747 
2748 	lan743x_csr_write(adapter, DMAC_INT_EN_CLR,
2749 			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2750 	lan743x_csr_write(adapter, INT_EN_CLR,
2751 			  INT_BIT_DMA_RX_(rx->channel_number));
2752 	napi_disable(&rx->napi);
2753 
2754 	netif_napi_del(&rx->napi);
2755 
2756 	lan743x_rx_ring_cleanup(rx);
2757 }
2758 
2759 static int lan743x_rx_open(struct lan743x_rx *rx)
2760 {
2761 	struct lan743x_adapter *adapter = rx->adapter;
2762 	u32 data = 0;
2763 	int ret;
2764 
2765 	rx->frame_count = 0;
2766 	ret = lan743x_rx_ring_init(rx);
2767 	if (ret)
2768 		goto return_error;
2769 
2770 	netif_napi_add(adapter->netdev, &rx->napi, lan743x_rx_napi_poll);
2771 
2772 	lan743x_csr_write(adapter, DMAC_CMD,
2773 			  DMAC_CMD_RX_SWR_(rx->channel_number));
2774 	lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
2775 				 DMAC_CMD_RX_SWR_(rx->channel_number),
2776 				 0, 1000, 20000, 100);
2777 
2778 	/* set ring base address */
2779 	lan743x_csr_write(adapter,
2780 			  RX_BASE_ADDRH(rx->channel_number),
2781 			  DMA_ADDR_HIGH32(rx->ring_dma_ptr));
2782 	lan743x_csr_write(adapter,
2783 			  RX_BASE_ADDRL(rx->channel_number),
2784 			  DMA_ADDR_LOW32(rx->ring_dma_ptr));
2785 
2786 	/* set rx write back address */
2787 	lan743x_csr_write(adapter,
2788 			  RX_HEAD_WRITEBACK_ADDRH(rx->channel_number),
2789 			  DMA_ADDR_HIGH32(rx->head_dma_ptr));
2790 	lan743x_csr_write(adapter,
2791 			  RX_HEAD_WRITEBACK_ADDRL(rx->channel_number),
2792 			  DMA_ADDR_LOW32(rx->head_dma_ptr));
2793 	data = RX_CFG_A_RX_HP_WB_EN_;
2794 	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
2795 		data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ |
2796 			RX_CFG_A_RX_WB_THRES_SET_(0x7) |
2797 			RX_CFG_A_RX_PF_THRES_SET_(16) |
2798 			RX_CFG_A_RX_PF_PRI_THRES_SET_(4));
2799 	}
2800 
2801 	/* set RX_CFG_A */
2802 	lan743x_csr_write(adapter,
2803 			  RX_CFG_A(rx->channel_number), data);
2804 
2805 	/* set RX_CFG_B */
2806 	data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number));
2807 	data &= ~RX_CFG_B_RX_PAD_MASK_;
2808 	if (!RX_HEAD_PADDING)
2809 		data |= RX_CFG_B_RX_PAD_0_;
2810 	else
2811 		data |= RX_CFG_B_RX_PAD_2_;
2812 	data &= ~RX_CFG_B_RX_RING_LEN_MASK_;
2813 	data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_);
2814 	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
2815 		data |= RX_CFG_B_RDMABL_512_;
2816 
2817 	lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data);
2818 	rx->vector_flags = lan743x_intr_get_vector_flags(adapter,
2819 							 INT_BIT_DMA_RX_
2820 							 (rx->channel_number));
2821 
2822 	/* set RX_CFG_C */
2823 	data = 0;
2824 	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
2825 		data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_;
2826 	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
2827 		data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_;
2828 	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
2829 		data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_;
2830 	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
2831 		data |= RX_CFG_C_RX_INT_EN_R2C_;
2832 	lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data);
2833 
2834 	rx->last_tail = ((u32)(rx->ring_size - 1));
2835 	lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
2836 			  rx->last_tail);
2837 	rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number));
2838 	if (rx->last_head) {
2839 		ret = -EIO;
2840 		goto napi_delete;
2841 	}
2842 
2843 	napi_enable(&rx->napi);
2844 
2845 	lan743x_csr_write(adapter, INT_EN_SET,
2846 			  INT_BIT_DMA_RX_(rx->channel_number));
2847 	lan743x_csr_write(adapter, DMAC_INT_STS,
2848 			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2849 	lan743x_csr_write(adapter, DMAC_INT_EN_SET,
2850 			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2851 	lan743x_csr_write(adapter, DMAC_CMD,
2852 			  DMAC_CMD_START_R_(rx->channel_number));
2853 
2854 	/* initialize fifo */
2855 	lan743x_csr_write(adapter, FCT_RX_CTL,
2856 			  FCT_RX_CTL_RESET_(rx->channel_number));
2857 	lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
2858 				 FCT_RX_CTL_RESET_(rx->channel_number),
2859 				 0, 1000, 20000, 100);
2860 	lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number),
2861 			  FCT_FLOW_CTL_REQ_EN_ |
2862 			  FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) |
2863 			  FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA));
2864 
2865 	/* enable fifo */
2866 	lan743x_csr_write(adapter, FCT_RX_CTL,
2867 			  FCT_RX_CTL_EN_(rx->channel_number));
2868 	return 0;
2869 
2870 napi_delete:
2871 	netif_napi_del(&rx->napi);
2872 	lan743x_rx_ring_cleanup(rx);
2873 
2874 return_error:
2875 	return ret;
2876 }
2877 
2878 static int lan743x_phylink_sgmii_config(struct lan743x_adapter *adapter)
2879 {
2880 	u32 sgmii_ctl;
2881 	int ret;
2882 
2883 	ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL,
2884 			      MASTER_SLAVE_STATE_MASTER);
2885 	if (ret < 0) {
2886 		netif_err(adapter, drv, adapter->netdev,
2887 			  "error %d link-speed-duplex(LSD) invalid\n", ret);
2888 		return ret;
2889 	}
2890 
2891 	adapter->sgmii_lsd = ret;
2892 	netif_dbg(adapter, drv, adapter->netdev,
2893 		  "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
2894 
2895 	/* LINK_STATUS_SOURCE from the External PHY via SGMII */
2896 	sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
2897 	sgmii_ctl &= ~SGMII_CTL_LINK_STATUS_SOURCE_;
2898 	lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
2899 
2900 	ret = lan743x_serdes_clock_and_aneg_update(adapter);
2901 	if (ret < 0) {
2902 		netif_err(adapter, drv, adapter->netdev,
2903 			  "error %d sgmii aneg update failed\n", ret);
2904 		return ret;
2905 	}
2906 
2907 	return lan743x_pcs_power_reset(adapter);
2908 }
2909 
2910 static int lan743x_phylink_1000basex_config(struct lan743x_adapter *adapter)
2911 {
2912 	u32 sgmii_ctl;
2913 	int ret;
2914 
2915 	ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL,
2916 			      MASTER_SLAVE_STATE_MASTER);
2917 	if (ret < 0) {
2918 		netif_err(adapter, drv, adapter->netdev,
2919 			  "error %d link-speed-duplex(LSD) invalid\n", ret);
2920 		return ret;
2921 	}
2922 
2923 	adapter->sgmii_lsd = ret;
2924 	netif_dbg(adapter, drv, adapter->netdev,
2925 		  "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
2926 
2927 	/* LINK_STATUS_SOURCE from 1000BASE-X PCS link status */
2928 	sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
2929 	sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_;
2930 	lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
2931 
2932 	ret = lan743x_serdes_clock_and_aneg_update(adapter);
2933 	if (ret < 0) {
2934 		netif_err(adapter, drv, adapter->netdev,
2935 			  "error %d 1000basex aneg update failed\n", ret);
2936 		return ret;
2937 	}
2938 
2939 	return lan743x_pcs_power_reset(adapter);
2940 }
2941 
2942 static int lan743x_phylink_2500basex_config(struct lan743x_adapter *adapter)
2943 {
2944 	u32 sgmii_ctl;
2945 	int ret;
2946 
2947 	ret = lan743x_get_lsd(SPEED_2500, DUPLEX_FULL,
2948 			      MASTER_SLAVE_STATE_MASTER);
2949 	if (ret < 0) {
2950 		netif_err(adapter, drv, adapter->netdev,
2951 			  "error %d link-speed-duplex(LSD) invalid\n", ret);
2952 		return ret;
2953 	}
2954 
2955 	adapter->sgmii_lsd = ret;
2956 	netif_dbg(adapter, drv, adapter->netdev,
2957 		  "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
2958 
2959 	/* LINK_STATUS_SOURCE from 2500BASE-X PCS link status */
2960 	sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
2961 	sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_;
2962 	lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
2963 
2964 	ret = lan743x_serdes_clock_and_aneg_update(adapter);
2965 	if (ret < 0) {
2966 		netif_err(adapter, drv, adapter->netdev,
2967 			  "error %d 2500basex aneg update failed\n", ret);
2968 		return ret;
2969 	}
2970 
2971 	return lan743x_pcs_power_reset(adapter);
2972 }
2973 
2974 static void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable)
2975 {
2976 	u32 mac_cr;
2977 
2978 	mac_cr = lan743x_csr_read(adapter, MAC_CR);
2979 	if (enable)
2980 		mac_cr |= MAC_CR_EEE_EN_;
2981 	else
2982 		mac_cr &= ~MAC_CR_EEE_EN_;
2983 	lan743x_csr_write(adapter, MAC_CR, mac_cr);
2984 }
2985 
2986 static void lan743x_phylink_mac_config(struct phylink_config *config,
2987 				       unsigned int link_an_mode,
2988 				       const struct phylink_link_state *state)
2989 {
2990 	struct net_device *netdev = to_net_dev(config->dev);
2991 	struct lan743x_adapter *adapter = netdev_priv(netdev);
2992 	int ret;
2993 
2994 	switch (state->interface) {
2995 	case PHY_INTERFACE_MODE_2500BASEX:
2996 		ret = lan743x_phylink_2500basex_config(adapter);
2997 		if (ret < 0)
2998 			netif_err(adapter, drv, adapter->netdev,
2999 				  "2500BASEX config failed. Error %d\n", ret);
3000 		else
3001 			netif_dbg(adapter, drv, adapter->netdev,
3002 				  "2500BASEX mode selected and configured\n");
3003 		break;
3004 	case PHY_INTERFACE_MODE_1000BASEX:
3005 		ret = lan743x_phylink_1000basex_config(adapter);
3006 		if (ret < 0)
3007 			netif_err(adapter, drv, adapter->netdev,
3008 				  "1000BASEX config failed. Error %d\n", ret);
3009 		else
3010 			netif_dbg(adapter, drv, adapter->netdev,
3011 				  "1000BASEX mode selected and configured\n");
3012 		break;
3013 	case PHY_INTERFACE_MODE_SGMII:
3014 		ret = lan743x_phylink_sgmii_config(adapter);
3015 		if (ret < 0)
3016 			netif_err(adapter, drv, adapter->netdev,
3017 				  "SGMII config failed. Error %d\n", ret);
3018 		else
3019 			netif_dbg(adapter, drv, adapter->netdev,
3020 				  "SGMII mode selected and configured\n");
3021 		break;
3022 	default:
3023 		netif_dbg(adapter, drv, adapter->netdev,
3024 			  "RGMII/GMII/MII(0x%X) mode enable\n",
3025 			  state->interface);
3026 		break;
3027 	}
3028 }
3029 
3030 static void lan743x_phylink_mac_link_down(struct phylink_config *config,
3031 					  unsigned int link_an_mode,
3032 					  phy_interface_t interface)
3033 {
3034 	struct net_device *netdev = to_net_dev(config->dev);
3035 
3036 	netif_tx_stop_all_queues(netdev);
3037 }
3038 
3039 static void lan743x_phylink_mac_link_up(struct phylink_config *config,
3040 					struct phy_device *phydev,
3041 					unsigned int link_an_mode,
3042 					phy_interface_t interface,
3043 					int speed, int duplex,
3044 					bool tx_pause, bool rx_pause)
3045 {
3046 	struct net_device *netdev = to_net_dev(config->dev);
3047 	struct lan743x_adapter *adapter = netdev_priv(netdev);
3048 	int mac_cr;
3049 	u8 cap;
3050 
3051 	mac_cr = lan743x_csr_read(adapter, MAC_CR);
3052 	/* Pre-initialize register bits.
3053 	 * Resulting value corresponds to SPEED_10
3054 	 */
3055 	mac_cr &= ~(MAC_CR_CFG_H_ | MAC_CR_CFG_L_);
3056 	if (speed == SPEED_2500)
3057 		mac_cr |= MAC_CR_CFG_H_ | MAC_CR_CFG_L_;
3058 	else if (speed == SPEED_1000)
3059 		mac_cr |= MAC_CR_CFG_H_;
3060 	else if (speed == SPEED_100)
3061 		mac_cr |= MAC_CR_CFG_L_;
3062 
3063 	if (duplex == DUPLEX_FULL)
3064 		mac_cr |= MAC_CR_DPX_;
3065 	else
3066 		mac_cr &= ~MAC_CR_DPX_;
3067 
3068 	lan743x_csr_write(adapter, MAC_CR, mac_cr);
3069 
3070 	lan743x_ptp_update_latency(adapter, speed);
3071 
3072 	/* Flow Control operation */
3073 	cap = 0;
3074 	if (tx_pause)
3075 		cap |= FLOW_CTRL_TX;
3076 	if (rx_pause)
3077 		cap |= FLOW_CTRL_RX;
3078 
3079 	lan743x_mac_flow_ctrl_set_enables(adapter,
3080 					  cap & FLOW_CTRL_TX,
3081 					  cap & FLOW_CTRL_RX);
3082 
3083 	netif_tx_wake_all_queues(netdev);
3084 }
3085 
3086 static void lan743x_mac_disable_tx_lpi(struct phylink_config *config)
3087 {
3088 	struct net_device *netdev = to_net_dev(config->dev);
3089 	struct lan743x_adapter *adapter = netdev_priv(netdev);
3090 
3091 	lan743x_mac_eee_enable(adapter, false);
3092 }
3093 
3094 static int lan743x_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
3095 				     bool tx_clk_stop)
3096 {
3097 	struct net_device *netdev = to_net_dev(config->dev);
3098 	struct lan743x_adapter *adapter = netdev_priv(netdev);
3099 
3100 	/* Software should only change this field when Energy Efficient
3101 	 * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing
3102 	 * EEEEN during probe, and phylink itself guarantees that
3103 	 * mac_disable_tx_lpi() will have been previously called.
3104 	 */
3105 	lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, timer);
3106 	lan743x_mac_eee_enable(adapter, true);
3107 
3108 	return 0;
3109 }
3110 
3111 static const struct phylink_mac_ops lan743x_phylink_mac_ops = {
3112 	.mac_config = lan743x_phylink_mac_config,
3113 	.mac_link_down = lan743x_phylink_mac_link_down,
3114 	.mac_link_up = lan743x_phylink_mac_link_up,
3115 	.mac_disable_tx_lpi = lan743x_mac_disable_tx_lpi,
3116 	.mac_enable_tx_lpi = lan743x_mac_enable_tx_lpi,
3117 };
3118 
3119 static int lan743x_phylink_create(struct lan743x_adapter *adapter)
3120 {
3121 	struct net_device *netdev = adapter->netdev;
3122 	struct phylink *pl;
3123 
3124 	adapter->phylink_config.dev = &netdev->dev;
3125 	adapter->phylink_config.type = PHYLINK_NETDEV;
3126 	adapter->phylink_config.mac_managed_pm = false;
3127 
3128 	adapter->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
3129 		MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
3130 	adapter->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD;
3131 	adapter->phylink_config.lpi_timer_default =
3132 		lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
3133 
3134 	lan743x_phy_interface_select(adapter);
3135 
3136 	switch (adapter->phy_interface) {
3137 	case PHY_INTERFACE_MODE_SGMII:
3138 		__set_bit(PHY_INTERFACE_MODE_SGMII,
3139 			  adapter->phylink_config.supported_interfaces);
3140 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
3141 			  adapter->phylink_config.supported_interfaces);
3142 		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
3143 			  adapter->phylink_config.supported_interfaces);
3144 		adapter->phylink_config.mac_capabilities |= MAC_2500FD;
3145 		break;
3146 	case PHY_INTERFACE_MODE_GMII:
3147 		__set_bit(PHY_INTERFACE_MODE_GMII,
3148 			  adapter->phylink_config.supported_interfaces);
3149 		break;
3150 	case PHY_INTERFACE_MODE_MII:
3151 		__set_bit(PHY_INTERFACE_MODE_MII,
3152 			  adapter->phylink_config.supported_interfaces);
3153 		break;
3154 	default:
3155 		phy_interface_set_rgmii(adapter->phylink_config.supported_interfaces);
3156 	}
3157 
3158 	memcpy(adapter->phylink_config.lpi_interfaces,
3159 	       adapter->phylink_config.supported_interfaces,
3160 	       sizeof(adapter->phylink_config.lpi_interfaces));
3161 
3162 	pl = phylink_create(&adapter->phylink_config, NULL,
3163 			    adapter->phy_interface, &lan743x_phylink_mac_ops);
3164 
3165 	if (IS_ERR(pl)) {
3166 		netdev_err(netdev, "Could not create phylink (%pe)\n", pl);
3167 		return PTR_ERR(pl);
3168 	}
3169 
3170 	adapter->phylink = pl;
3171 	netdev_dbg(netdev, "lan743x phylink created");
3172 
3173 	return 0;
3174 }
3175 
3176 static bool lan743x_phy_handle_exists(struct device_node *dn)
3177 {
3178 	dn = of_parse_phandle(dn, "phy-handle", 0);
3179 	of_node_put(dn);
3180 	return dn != NULL;
3181 }
3182 
3183 static int lan743x_phylink_connect(struct lan743x_adapter *adapter)
3184 {
3185 	struct device_node *dn = adapter->pdev->dev.of_node;
3186 	struct net_device *dev = adapter->netdev;
3187 	struct phy_device *phydev;
3188 	int ret;
3189 
3190 	if (dn)
3191 		ret = phylink_of_phy_connect(adapter->phylink, dn, 0);
3192 
3193 	if (!dn || (ret && !lan743x_phy_handle_exists(dn))) {
3194 		phydev = phy_find_first(adapter->mdiobus);
3195 		if (phydev) {
3196 			/* attach the mac to the phy */
3197 			ret = phylink_connect_phy(adapter->phylink, phydev);
3198 		} else if (((adapter->csr.id_rev & ID_REV_ID_MASK_) ==
3199 			      ID_REV_ID_LAN7431_) || adapter->is_pci11x1x) {
3200 			struct phylink_link_state state;
3201 			unsigned long caps;
3202 
3203 			caps = adapter->phylink_config.mac_capabilities;
3204 			if (caps & MAC_2500FD) {
3205 				state.speed = SPEED_2500;
3206 				state.duplex = DUPLEX_FULL;
3207 			} else if (caps & MAC_1000FD) {
3208 				state.speed = SPEED_1000;
3209 				state.duplex = DUPLEX_FULL;
3210 			} else {
3211 				state.speed = SPEED_UNKNOWN;
3212 				state.duplex = DUPLEX_UNKNOWN;
3213 			}
3214 
3215 			ret = phylink_set_fixed_link(adapter->phylink, &state);
3216 			if (ret) {
3217 				netdev_err(dev, "Could not set fixed link\n");
3218 				return ret;
3219 			}
3220 		} else {
3221 			netdev_err(dev, "no PHY found\n");
3222 			return -ENXIO;
3223 		}
3224 	}
3225 
3226 	if (ret) {
3227 		netdev_err(dev, "Could not attach PHY (%d)\n", ret);
3228 		return ret;
3229 	}
3230 
3231 	phylink_start(adapter->phylink);
3232 
3233 	return 0;
3234 }
3235 
3236 static void lan743x_phylink_disconnect(struct lan743x_adapter *adapter)
3237 {
3238 	phylink_stop(adapter->phylink);
3239 	phylink_disconnect_phy(adapter->phylink);
3240 }
3241 
3242 static int lan743x_netdev_close(struct net_device *netdev)
3243 {
3244 	struct lan743x_adapter *adapter = netdev_priv(netdev);
3245 	int index;
3246 
3247 	for (index = 0; index < adapter->used_tx_channels; index++)
3248 		lan743x_tx_close(&adapter->tx[index]);
3249 
3250 	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
3251 		lan743x_rx_close(&adapter->rx[index]);
3252 
3253 	lan743x_ptp_close(adapter);
3254 
3255 	lan743x_phylink_disconnect(adapter);
3256 
3257 	lan743x_mac_close(adapter);
3258 
3259 	lan743x_intr_close(adapter);
3260 
3261 	return 0;
3262 }
3263 
3264 static int lan743x_netdev_open(struct net_device *netdev)
3265 {
3266 	struct lan743x_adapter *adapter = netdev_priv(netdev);
3267 	int index;
3268 	int ret;
3269 
3270 	ret = lan743x_intr_open(adapter);
3271 	if (ret)
3272 		goto return_error;
3273 
3274 	ret = lan743x_mac_open(adapter);
3275 	if (ret)
3276 		goto close_intr;
3277 
3278 	ret = lan743x_phylink_connect(adapter);
3279 	if (ret)
3280 		goto close_mac;
3281 
3282 	ret = lan743x_ptp_open(adapter);
3283 	if (ret)
3284 		goto close_mac;
3285 
3286 	lan743x_rfe_open(adapter);
3287 
3288 	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
3289 		ret = lan743x_rx_open(&adapter->rx[index]);
3290 		if (ret)
3291 			goto close_rx;
3292 	}
3293 
3294 	for (index = 0; index < adapter->used_tx_channels; index++) {
3295 		ret = lan743x_tx_open(&adapter->tx[index]);
3296 		if (ret)
3297 			goto close_tx;
3298 	}
3299 
3300 	if (netdev->phydev)
3301 		phy_support_eee(netdev->phydev);
3302 
3303 #ifdef CONFIG_PM
3304 	if (adapter->netdev->phydev) {
3305 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
3306 
3307 		phy_ethtool_get_wol(netdev->phydev, &wol);
3308 		adapter->phy_wol_supported = wol.supported;
3309 		adapter->phy_wolopts = wol.wolopts;
3310 	}
3311 #endif
3312 
3313 	return 0;
3314 
3315 close_tx:
3316 	for (index = 0; index < adapter->used_tx_channels; index++) {
3317 		if (adapter->tx[index].ring_cpu_ptr)
3318 			lan743x_tx_close(&adapter->tx[index]);
3319 	}
3320 
3321 close_rx:
3322 	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
3323 		if (adapter->rx[index].ring_cpu_ptr)
3324 			lan743x_rx_close(&adapter->rx[index]);
3325 	}
3326 	lan743x_ptp_close(adapter);
3327 	if (adapter->phylink)
3328 		lan743x_phylink_disconnect(adapter);
3329 
3330 close_mac:
3331 	lan743x_mac_close(adapter);
3332 
3333 close_intr:
3334 	lan743x_intr_close(adapter);
3335 
3336 return_error:
3337 	netif_warn(adapter, ifup, adapter->netdev,
3338 		   "Error opening LAN743x\n");
3339 	return ret;
3340 }
3341 
3342 static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
3343 					     struct net_device *netdev)
3344 {
3345 	struct lan743x_adapter *adapter = netdev_priv(netdev);
3346 	u8 ch = 0;
3347 
3348 	if (adapter->is_pci11x1x)
3349 		ch = skb->queue_mapping % PCI11X1X_USED_TX_CHANNELS;
3350 
3351 	return lan743x_tx_xmit_frame(&adapter->tx[ch], skb);
3352 }
3353 
3354 static int lan743x_netdev_ioctl(struct net_device *netdev,
3355 				struct ifreq *ifr, int cmd)
3356 {
3357 	struct lan743x_adapter *adapter = netdev_priv(netdev);
3358 
3359 	if (!netif_running(netdev))
3360 		return -EINVAL;
3361 
3362 	return phylink_mii_ioctl(adapter->phylink, ifr, cmd);
3363 }
3364 
3365 static void lan743x_netdev_set_multicast(struct net_device *netdev)
3366 {
3367 	struct lan743x_adapter *adapter = netdev_priv(netdev);
3368 
3369 	lan743x_rfe_set_multicast(adapter);
3370 }
3371 
3372 static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu)
3373 {
3374 	struct lan743x_adapter *adapter = netdev_priv(netdev);
3375 	int ret = 0;
3376 
3377 	ret = lan743x_mac_set_mtu(adapter, new_mtu);
3378 	if (!ret)
3379 		WRITE_ONCE(netdev->mtu, new_mtu);
3380 	return ret;
3381 }
3382 
3383 static void lan743x_netdev_get_stats64(struct net_device *netdev,
3384 				       struct rtnl_link_stats64 *stats)
3385 {
3386 	struct lan743x_adapter *adapter = netdev_priv(netdev);
3387 
3388 	stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES);
3389 	stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES);
3390 	stats->rx_bytes = lan743x_csr_read(adapter,
3391 					   STAT_RX_UNICAST_BYTE_COUNT) +
3392 			  lan743x_csr_read(adapter,
3393 					   STAT_RX_BROADCAST_BYTE_COUNT) +
3394 			  lan743x_csr_read(adapter,
3395 					   STAT_RX_MULTICAST_BYTE_COUNT);
3396 	stats->tx_bytes = lan743x_csr_read(adapter,
3397 					   STAT_TX_UNICAST_BYTE_COUNT) +
3398 			  lan743x_csr_read(adapter,
3399 					   STAT_TX_BROADCAST_BYTE_COUNT) +
3400 			  lan743x_csr_read(adapter,
3401 					   STAT_TX_MULTICAST_BYTE_COUNT);
3402 	stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) +
3403 			   lan743x_csr_read(adapter,
3404 					    STAT_RX_ALIGNMENT_ERRORS) +
3405 			   lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) +
3406 			   lan743x_csr_read(adapter,
3407 					    STAT_RX_UNDERSIZE_FRAME_ERRORS) +
3408 			   lan743x_csr_read(adapter,
3409 					    STAT_RX_OVERSIZE_FRAME_ERRORS);
3410 	stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) +
3411 			   lan743x_csr_read(adapter,
3412 					    STAT_TX_EXCESS_DEFERRAL_ERRORS) +
3413 			   lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS);
3414 	stats->rx_dropped = lan743x_csr_read(adapter,
3415 					     STAT_RX_DROPPED_FRAMES);
3416 	stats->tx_dropped = lan743x_csr_read(adapter,
3417 					     STAT_TX_EXCESSIVE_COLLISION);
3418 	stats->multicast = lan743x_csr_read(adapter,
3419 					    STAT_RX_MULTICAST_FRAMES) +
3420 			   lan743x_csr_read(adapter,
3421 					    STAT_TX_MULTICAST_FRAMES);
3422 	stats->collisions = lan743x_csr_read(adapter,
3423 					     STAT_TX_SINGLE_COLLISIONS) +
3424 			    lan743x_csr_read(adapter,
3425 					     STAT_TX_MULTIPLE_COLLISIONS) +
3426 			    lan743x_csr_read(adapter,
3427 					     STAT_TX_LATE_COLLISIONS);
3428 }
3429 
3430 static int lan743x_netdev_set_mac_address(struct net_device *netdev,
3431 					  void *addr)
3432 {
3433 	struct lan743x_adapter *adapter = netdev_priv(netdev);
3434 	struct sockaddr *sock_addr = addr;
3435 	int ret;
3436 
3437 	ret = eth_prepare_mac_addr_change(netdev, sock_addr);
3438 	if (ret)
3439 		return ret;
3440 	eth_hw_addr_set(netdev, sock_addr->sa_data);
3441 	lan743x_mac_set_address(adapter, sock_addr->sa_data);
3442 	lan743x_rfe_update_mac_address(adapter);
3443 	return 0;
3444 }
3445 
3446 static const struct net_device_ops lan743x_netdev_ops = {
3447 	.ndo_open		= lan743x_netdev_open,
3448 	.ndo_stop		= lan743x_netdev_close,
3449 	.ndo_start_xmit		= lan743x_netdev_xmit_frame,
3450 	.ndo_eth_ioctl		= lan743x_netdev_ioctl,
3451 	.ndo_set_rx_mode	= lan743x_netdev_set_multicast,
3452 	.ndo_change_mtu		= lan743x_netdev_change_mtu,
3453 	.ndo_get_stats64	= lan743x_netdev_get_stats64,
3454 	.ndo_set_mac_address	= lan743x_netdev_set_mac_address,
3455 	.ndo_hwtstamp_get	= lan743x_ptp_hwtstamp_get,
3456 	.ndo_hwtstamp_set	= lan743x_ptp_hwtstamp_set,
3457 };
3458 
3459 static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
3460 {
3461 	lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
3462 }
3463 
3464 static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
3465 {
3466 	mdiobus_unregister(adapter->mdiobus);
3467 }
3468 
3469 static void lan743x_destroy_phylink(struct lan743x_adapter *adapter)
3470 {
3471 	phylink_destroy(adapter->phylink);
3472 	adapter->phylink = NULL;
3473 }
3474 
3475 static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
3476 {
3477 	unregister_netdev(adapter->netdev);
3478 
3479 	lan743x_destroy_phylink(adapter);
3480 	lan743x_mdiobus_cleanup(adapter);
3481 	lan743x_hardware_cleanup(adapter);
3482 	lan743x_pci_cleanup(adapter);
3483 }
3484 
3485 static void pci11x1x_set_rfe_rd_fifo_threshold(struct lan743x_adapter *adapter)
3486 {
3487 	u16 rev = adapter->csr.id_rev & ID_REV_CHIP_REV_MASK_;
3488 
3489 	if (rev == ID_REV_CHIP_REV_PCI11X1X_B0_) {
3490 		u32 misc_ctl;
3491 
3492 		misc_ctl = lan743x_csr_read(adapter, MISC_CTL_0);
3493 		misc_ctl &= ~MISC_CTL_0_RFE_READ_FIFO_MASK_;
3494 		misc_ctl |= FIELD_PREP(MISC_CTL_0_RFE_READ_FIFO_MASK_,
3495 				       RFE_RD_FIFO_TH_3_DWORDS);
3496 		lan743x_csr_write(adapter, MISC_CTL_0, misc_ctl);
3497 	}
3498 }
3499 
3500 static int lan743x_hardware_init(struct lan743x_adapter *adapter,
3501 				 struct pci_dev *pdev)
3502 {
3503 	struct lan743x_tx *tx;
3504 	u32 sgmii_ctl;
3505 	int index;
3506 	int ret;
3507 
3508 	adapter->is_pci11x1x = is_pci11x1x_chip(adapter);
3509 	if (adapter->is_pci11x1x) {
3510 		adapter->max_tx_channels = PCI11X1X_MAX_TX_CHANNELS;
3511 		adapter->used_tx_channels = PCI11X1X_USED_TX_CHANNELS;
3512 		adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT;
3513 		pci11x1x_strap_get_status(adapter);
3514 		spin_lock_init(&adapter->eth_syslock_spinlock);
3515 		mutex_init(&adapter->sgmii_rw_lock);
3516 		pci11x1x_set_rfe_rd_fifo_threshold(adapter);
3517 		sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
3518 		if (adapter->is_sgmii_en) {
3519 			sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
3520 			sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
3521 		} else {
3522 			sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
3523 			sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
3524 		}
3525 		lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
3526 	} else {
3527 		adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
3528 		adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
3529 		adapter->max_vector_count = LAN743X_MAX_VECTOR_COUNT;
3530 	}
3531 
3532 	adapter->intr.irq = adapter->pdev->irq;
3533 	lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
3534 
3535 	ret = lan743x_gpio_init(adapter);
3536 	if (ret)
3537 		return ret;
3538 
3539 	ret = lan743x_mac_init(adapter);
3540 	if (ret)
3541 		return ret;
3542 
3543 	ret = lan743x_ptp_init(adapter);
3544 	if (ret)
3545 		return ret;
3546 
3547 	lan743x_rfe_update_mac_address(adapter);
3548 
3549 	ret = lan743x_dmac_init(adapter);
3550 	if (ret)
3551 		return ret;
3552 
3553 	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
3554 		adapter->rx[index].adapter = adapter;
3555 		adapter->rx[index].channel_number = index;
3556 	}
3557 
3558 	for (index = 0; index < adapter->used_tx_channels; index++) {
3559 		tx = &adapter->tx[index];
3560 		tx->adapter = adapter;
3561 		tx->channel_number = index;
3562 		spin_lock_init(&tx->ring_lock);
3563 	}
3564 
3565 	/* Ensure EEEEN is clear */
3566 	lan743x_mac_eee_enable(adapter, false);
3567 
3568 	return 0;
3569 }
3570 
3571 static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
3572 {
3573 	int ret;
3574 
3575 	adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
3576 	if (!(adapter->mdiobus)) {
3577 		ret = -ENOMEM;
3578 		goto return_error;
3579 	}
3580 
3581 	adapter->mdiobus->priv = (void *)adapter;
3582 	if (adapter->is_pci11x1x) {
3583 		if (adapter->is_sgmii_en) {
3584 			netif_dbg(adapter, drv, adapter->netdev,
3585 				  "SGMII operation\n");
3586 			adapter->mdiobus->read = lan743x_mdiobus_read_c22;
3587 			adapter->mdiobus->write = lan743x_mdiobus_write_c22;
3588 			adapter->mdiobus->read_c45 = lan743x_mdiobus_read_c45;
3589 			adapter->mdiobus->write_c45 = lan743x_mdiobus_write_c45;
3590 			adapter->mdiobus->name = "lan743x-mdiobus-c45";
3591 			netif_dbg(adapter, drv, adapter->netdev,
3592 				  "lan743x-mdiobus-c45\n");
3593 		} else {
3594 			netif_dbg(adapter, drv, adapter->netdev,
3595 				  "RGMII operation\n");
3596 			// Only C22 support when RGMII I/F
3597 			adapter->mdiobus->read = lan743x_mdiobus_read_c22;
3598 			adapter->mdiobus->write = lan743x_mdiobus_write_c22;
3599 			adapter->mdiobus->name = "lan743x-mdiobus";
3600 			netif_dbg(adapter, drv, adapter->netdev,
3601 				  "lan743x-mdiobus\n");
3602 		}
3603 	} else {
3604 		adapter->mdiobus->read = lan743x_mdiobus_read_c22;
3605 		adapter->mdiobus->write = lan743x_mdiobus_write_c22;
3606 		adapter->mdiobus->name = "lan743x-mdiobus";
3607 		netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n");
3608 	}
3609 
3610 	snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
3611 		 "pci-%s", pci_name(adapter->pdev));
3612 
3613 	if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
3614 		/* LAN7430 uses internal phy at address 1 */
3615 		adapter->mdiobus->phy_mask = ~(u32)BIT(1);
3616 
3617 	/* register mdiobus */
3618 	ret = mdiobus_register(adapter->mdiobus);
3619 	if (ret < 0)
3620 		goto return_error;
3621 	return 0;
3622 
3623 return_error:
3624 	return ret;
3625 }
3626 
3627 /* lan743x_pcidev_probe - Device Initialization Routine
3628  * @pdev: PCI device information struct
3629  * @id: entry in lan743x_pci_tbl
3630  *
3631  * Returns 0 on success, negative on failure
3632  *
3633  * initializes an adapter identified by a pci_dev structure.
3634  * The OS initialization, configuring of the adapter private structure,
3635  * and a hardware reset occur.
3636  **/
3637 static int lan743x_pcidev_probe(struct pci_dev *pdev,
3638 				const struct pci_device_id *id)
3639 {
3640 	struct lan743x_adapter *adapter = NULL;
3641 	struct net_device *netdev = NULL;
3642 	int ret = -ENODEV;
3643 
3644 	if (id->device == PCI_DEVICE_ID_SMSC_A011 ||
3645 	    id->device == PCI_DEVICE_ID_SMSC_A041) {
3646 		netdev = devm_alloc_etherdev_mqs(&pdev->dev,
3647 						 sizeof(struct lan743x_adapter),
3648 						 PCI11X1X_USED_TX_CHANNELS,
3649 						 LAN743X_USED_RX_CHANNELS);
3650 	} else {
3651 		netdev = devm_alloc_etherdev_mqs(&pdev->dev,
3652 						 sizeof(struct lan743x_adapter),
3653 						 LAN743X_USED_TX_CHANNELS,
3654 						 LAN743X_USED_RX_CHANNELS);
3655 	}
3656 
3657 	if (!netdev)
3658 		goto return_error;
3659 
3660 	SET_NETDEV_DEV(netdev, &pdev->dev);
3661 	pci_set_drvdata(pdev, netdev);
3662 	adapter = netdev_priv(netdev);
3663 	adapter->netdev = netdev;
3664 	adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
3665 			      NETIF_MSG_LINK | NETIF_MSG_IFUP |
3666 			      NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
3667 	netdev->max_mtu = LAN743X_MAX_FRAME_SIZE;
3668 
3669 	of_get_mac_address(pdev->dev.of_node, adapter->mac_address);
3670 
3671 	ret = lan743x_pci_init(adapter, pdev);
3672 	if (ret)
3673 		goto return_error;
3674 
3675 	ret = lan743x_csr_init(adapter);
3676 	if (ret)
3677 		goto cleanup_pci;
3678 
3679 	ret = lan743x_hw_reset_phy(adapter);
3680 	if (ret)
3681 		goto cleanup_pci;
3682 
3683 	ret = lan743x_hardware_init(adapter, pdev);
3684 	if (ret)
3685 		goto cleanup_pci;
3686 
3687 	ret = lan743x_mdiobus_init(adapter);
3688 	if (ret)
3689 		goto cleanup_hardware;
3690 
3691 	adapter->netdev->netdev_ops = &lan743x_netdev_ops;
3692 	adapter->netdev->ethtool_ops = &lan743x_ethtool_ops;
3693 	adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO |
3694 				    NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
3695 	adapter->netdev->hw_features = adapter->netdev->features;
3696 
3697 	ret = lan743x_phylink_create(adapter);
3698 	if (ret < 0) {
3699 		netif_err(adapter, probe, netdev,
3700 			  "failed to setup phylink (%d)\n", ret);
3701 		goto cleanup_mdiobus;
3702 	}
3703 
3704 	ret = register_netdev(adapter->netdev);
3705 	if (ret < 0)
3706 		goto cleanup_phylink;
3707 	return 0;
3708 
3709 cleanup_phylink:
3710 	lan743x_destroy_phylink(adapter);
3711 
3712 cleanup_mdiobus:
3713 	lan743x_mdiobus_cleanup(adapter);
3714 
3715 cleanup_hardware:
3716 	lan743x_hardware_cleanup(adapter);
3717 
3718 cleanup_pci:
3719 	lan743x_pci_cleanup(adapter);
3720 
3721 return_error:
3722 	pr_warn("Initialization failed\n");
3723 	return ret;
3724 }
3725 
3726 /**
3727  * lan743x_pcidev_remove - Device Removal Routine
3728  * @pdev: PCI device information struct
3729  *
3730  * this is called by the PCI subsystem to alert the driver
3731  * that it should release a PCI device.  This could be caused by a
3732  * Hot-Plug event, or because the driver is going to be removed from
3733  * memory.
3734  **/
3735 static void lan743x_pcidev_remove(struct pci_dev *pdev)
3736 {
3737 	struct net_device *netdev = pci_get_drvdata(pdev);
3738 	struct lan743x_adapter *adapter = netdev_priv(netdev);
3739 
3740 	lan743x_full_cleanup(adapter);
3741 }
3742 
3743 static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
3744 {
3745 	struct net_device *netdev = pci_get_drvdata(pdev);
3746 	struct lan743x_adapter *adapter = netdev_priv(netdev);
3747 
3748 	rtnl_lock();
3749 	netif_device_detach(netdev);
3750 
3751 	/* close netdev when netdev is at running state.
3752 	 * For instance, it is true when system goes to sleep by pm-suspend
3753 	 * However, it is false when system goes to sleep by suspend GUI menu
3754 	 */
3755 	if (netif_running(netdev))
3756 		lan743x_netdev_close(netdev);
3757 	rtnl_unlock();
3758 
3759 #ifdef CONFIG_PM
3760 	pci_save_state(pdev);
3761 #endif
3762 
3763 	/* clean up lan743x portion */
3764 	lan743x_hardware_cleanup(adapter);
3765 }
3766 
3767 #ifdef CONFIG_PM_SLEEP
3768 static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
3769 {
3770 	return bitrev16(crc16(0xFFFF, buf, len));
3771 }
3772 
3773 static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
3774 {
3775 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3776 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3777 	const u8 arp_type[2] = { 0x08, 0x06 };
3778 	int mask_index;
3779 	u32 sopass;
3780 	u32 pmtctl;
3781 	u32 wucsr;
3782 	u32 macrx;
3783 	u16 crc;
3784 
3785 	for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++)
3786 		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0);
3787 
3788 	/* clear wake settings */
3789 	pmtctl = lan743x_csr_read(adapter, PMT_CTL);
3790 	pmtctl |= PMT_CTL_WUPS_MASK_ | PMT_CTL_RES_CLR_WKP_MASK_;
3791 	pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
3792 		PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
3793 		PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
3794 
3795 	macrx = lan743x_csr_read(adapter, MAC_RX);
3796 
3797 	wucsr = 0;
3798 	mask_index = 0;
3799 
3800 	pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
3801 
3802 	if (adapter->phy_wolopts)
3803 		pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
3804 
3805 	if (adapter->wolopts & WAKE_MAGIC) {
3806 		wucsr |= MAC_WUCSR_MPEN_;
3807 		macrx |= MAC_RX_RXEN_;
3808 		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3809 	}
3810 	if (adapter->wolopts & WAKE_UCAST) {
3811 		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_;
3812 		macrx |= MAC_RX_RXEN_;
3813 		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3814 		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3815 	}
3816 	if (adapter->wolopts & WAKE_BCAST) {
3817 		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_;
3818 		macrx |= MAC_RX_RXEN_;
3819 		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3820 		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3821 	}
3822 	if (adapter->wolopts & WAKE_MCAST) {
3823 		/* IPv4 multicast */
3824 		crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3);
3825 		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
3826 				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
3827 				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
3828 				  (crc & MAC_WUF_CFG_CRC16_MASK_));
3829 		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7);
3830 		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3831 		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3832 		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3833 		mask_index++;
3834 
3835 		/* IPv6 multicast */
3836 		crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2);
3837 		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
3838 				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
3839 				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
3840 				  (crc & MAC_WUF_CFG_CRC16_MASK_));
3841 		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3);
3842 		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3843 		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3844 		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3845 		mask_index++;
3846 
3847 		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
3848 		macrx |= MAC_RX_RXEN_;
3849 		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3850 		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3851 	}
3852 	if (adapter->wolopts & WAKE_ARP) {
3853 		/* set MAC_WUF_CFG & WUF_MASK
3854 		 * for packettype (offset 12,13) = ARP (0x0806)
3855 		 */
3856 		crc = lan743x_pm_wakeframe_crc16(arp_type, 2);
3857 		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
3858 				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ |
3859 				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
3860 				  (crc & MAC_WUF_CFG_CRC16_MASK_));
3861 		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000);
3862 		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3863 		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3864 		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3865 		mask_index++;
3866 
3867 		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
3868 		macrx |= MAC_RX_RXEN_;
3869 		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3870 		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3871 	}
3872 
3873 	if (adapter->wolopts & WAKE_MAGICSECURE) {
3874 		sopass = *(u32 *)adapter->sopass;
3875 		lan743x_csr_write(adapter, MAC_MP_SO_LO, sopass);
3876 		sopass = *(u16 *)&adapter->sopass[4];
3877 		lan743x_csr_write(adapter, MAC_MP_SO_HI, sopass);
3878 		wucsr |= MAC_MP_SO_EN_;
3879 	}
3880 
3881 	lan743x_csr_write(adapter, MAC_WUCSR, wucsr);
3882 	lan743x_csr_write(adapter, PMT_CTL, pmtctl);
3883 	lan743x_csr_write(adapter, MAC_RX, macrx);
3884 }
3885 
3886 static int lan743x_pm_suspend(struct device *dev)
3887 {
3888 	struct pci_dev *pdev = to_pci_dev(dev);
3889 	struct net_device *netdev = pci_get_drvdata(pdev);
3890 	struct lan743x_adapter *adapter = netdev_priv(netdev);
3891 	u32 data;
3892 
3893 	lan743x_pcidev_shutdown(pdev);
3894 
3895 	/* clear all wakes */
3896 	lan743x_csr_write(adapter, MAC_WUCSR, 0);
3897 	lan743x_csr_write(adapter, MAC_WUCSR2, 0);
3898 	lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
3899 
3900 	if (adapter->wolopts || adapter->phy_wolopts)
3901 		lan743x_pm_set_wol(adapter);
3902 
3903 	if (adapter->is_pci11x1x) {
3904 		/* Save HW_CFG to config again in PM resume */
3905 		data = lan743x_csr_read(adapter, HW_CFG);
3906 		adapter->hw_cfg = data;
3907 		data |= (HW_CFG_RST_PROTECT_PCIE_ |
3908 			 HW_CFG_D3_RESET_DIS_ |
3909 			 HW_CFG_D3_VAUX_OVR_ |
3910 			 HW_CFG_HOT_RESET_DIS_ |
3911 			 HW_CFG_RST_PROTECT_);
3912 		lan743x_csr_write(adapter, HW_CFG, data);
3913 	}
3914 
3915 	/* Host sets PME_En, put D3hot */
3916 	return pci_prepare_to_sleep(pdev);
3917 }
3918 
3919 static int lan743x_pm_resume(struct device *dev)
3920 {
3921 	struct pci_dev *pdev = to_pci_dev(dev);
3922 	struct net_device *netdev = pci_get_drvdata(pdev);
3923 	struct lan743x_adapter *adapter = netdev_priv(netdev);
3924 	u32 data;
3925 	int ret;
3926 
3927 	pci_set_power_state(pdev, PCI_D0);
3928 	pci_restore_state(pdev);
3929 
3930 	/* Restore HW_CFG that was saved during pm suspend */
3931 	if (adapter->is_pci11x1x)
3932 		lan743x_csr_write(adapter, HW_CFG, adapter->hw_cfg);
3933 
3934 	ret = lan743x_hardware_init(adapter, pdev);
3935 	if (ret) {
3936 		netif_err(adapter, probe, adapter->netdev,
3937 			  "lan743x_hardware_init returned %d\n", ret);
3938 		lan743x_pci_cleanup(adapter);
3939 		return ret;
3940 	}
3941 
3942 	ret = lan743x_csr_read(adapter, MAC_WK_SRC);
3943 	netif_dbg(adapter, drv, adapter->netdev,
3944 		  "Wakeup source : 0x%08X\n", ret);
3945 
3946 	/* Clear the wol configuration and status bits. Note that
3947 	 * the status bits are "Write One to Clear (W1C)"
3948 	 */
3949 	data = MAC_WUCSR_EEE_TX_WAKE_ | MAC_WUCSR_EEE_RX_WAKE_ |
3950 	       MAC_WUCSR_RFE_WAKE_FR_ | MAC_WUCSR_PFDA_FR_ | MAC_WUCSR_WUFR_ |
3951 	       MAC_WUCSR_MPR_ | MAC_WUCSR_BCAST_FR_;
3952 	lan743x_csr_write(adapter, MAC_WUCSR, data);
3953 
3954 	data = MAC_WUCSR2_NS_RCD_ | MAC_WUCSR2_ARP_RCD_ |
3955 	       MAC_WUCSR2_IPV6_TCPSYN_RCD_ | MAC_WUCSR2_IPV4_TCPSYN_RCD_;
3956 	lan743x_csr_write(adapter, MAC_WUCSR2, data);
3957 
3958 	data = MAC_WK_SRC_ETH_PHY_WK_ | MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ |
3959 	       MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ | MAC_WK_SRC_EEE_TX_WK_ |
3960 	       MAC_WK_SRC_EEE_RX_WK_ | MAC_WK_SRC_RFE_FR_WK_ |
3961 	       MAC_WK_SRC_PFDA_FR_WK_ | MAC_WK_SRC_MP_FR_WK_ |
3962 	       MAC_WK_SRC_BCAST_FR_WK_ | MAC_WK_SRC_WU_FR_WK_ |
3963 	       MAC_WK_SRC_WK_FR_SAVED_;
3964 	lan743x_csr_write(adapter, MAC_WK_SRC, data);
3965 
3966 	rtnl_lock();
3967 	/* open netdev when netdev is at running state while resume.
3968 	 * For instance, it is true when system wakesup after pm-suspend
3969 	 * However, it is false when system wakes up after suspend GUI menu
3970 	 */
3971 	if (netif_running(netdev))
3972 		lan743x_netdev_open(netdev);
3973 
3974 	netif_device_attach(netdev);
3975 	rtnl_unlock();
3976 
3977 	return 0;
3978 }
3979 
3980 static const struct dev_pm_ops lan743x_pm_ops = {
3981 	SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
3982 };
3983 #endif /* CONFIG_PM_SLEEP */
3984 
3985 static const struct pci_device_id lan743x_pcidev_tbl[] = {
3986 	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
3987 	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
3988 	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A011) },
3989 	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A041) },
3990 	{ 0, }
3991 };
3992 
3993 MODULE_DEVICE_TABLE(pci, lan743x_pcidev_tbl);
3994 
3995 static struct pci_driver lan743x_pcidev_driver = {
3996 	.name     = DRIVER_NAME,
3997 	.id_table = lan743x_pcidev_tbl,
3998 	.probe    = lan743x_pcidev_probe,
3999 	.remove   = lan743x_pcidev_remove,
4000 #ifdef CONFIG_PM_SLEEP
4001 	.driver.pm = &lan743x_pm_ops,
4002 #endif
4003 	.shutdown = lan743x_pcidev_shutdown,
4004 };
4005 
4006 module_pci_driver(lan743x_pcidev_driver);
4007 
4008 MODULE_AUTHOR(DRIVER_AUTHOR);
4009 MODULE_DESCRIPTION(DRIVER_DESC);
4010 MODULE_LICENSE("GPL");
4011