xref: /linux/drivers/net/ethernet/realtek/r8169_main.c (revision a6cdeeb16bff89c8486324f53577db058cbe81ba)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * r8169.c: RealTek 8169/8168/8101 ethernet driver.
4  *
5  * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
6  * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
7  * Copyright (c) a lot of people too. Please respect their work.
8  *
9  * See MAINTAINERS file for support contact information.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/pci.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/clk.h>
18 #include <linux/delay.h>
19 #include <linux/ethtool.h>
20 #include <linux/phy.h>
21 #include <linux/if_vlan.h>
22 #include <linux/crc32.h>
23 #include <linux/in.h>
24 #include <linux/io.h>
25 #include <linux/ip.h>
26 #include <linux/tcp.h>
27 #include <linux/interrupt.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/prefetch.h>
31 #include <linux/pci-aspm.h>
32 #include <linux/ipv6.h>
33 #include <net/ip6_checksum.h>
34 
35 #include "r8169_firmware.h"
36 
37 #define MODULENAME "r8169"
38 
39 #define FIRMWARE_8168D_1	"rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2	"rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1	"rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2	"rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3	"rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1	"rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2	"rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1	"rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1		"rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1		"rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8411_2		"rtl_nic/rtl8411-2.fw"
50 #define FIRMWARE_8106E_1	"rtl_nic/rtl8106e-1.fw"
51 #define FIRMWARE_8106E_2	"rtl_nic/rtl8106e-2.fw"
52 #define FIRMWARE_8168G_2	"rtl_nic/rtl8168g-2.fw"
53 #define FIRMWARE_8168G_3	"rtl_nic/rtl8168g-3.fw"
54 #define FIRMWARE_8168H_1	"rtl_nic/rtl8168h-1.fw"
55 #define FIRMWARE_8168H_2	"rtl_nic/rtl8168h-2.fw"
56 #define FIRMWARE_8107E_1	"rtl_nic/rtl8107e-1.fw"
57 #define FIRMWARE_8107E_2	"rtl_nic/rtl8107e-2.fw"
58 
59 #define R8169_MSG_DEFAULT \
60 	(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
61 
62 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
63    The RTL chips use a 64 element hash table based on the Ethernet CRC. */
64 static const int multicast_filter_limit = 32;
65 
66 #define TX_DMA_BURST	7	/* Maximum PCI burst, '7' is unlimited */
67 #define InterFrameGap	0x03	/* 3 means InterFrameGap = the shortest one */
68 
69 #define R8169_REGS_SIZE		256
70 #define R8169_RX_BUF_SIZE	(SZ_16K - 1)
71 #define NUM_TX_DESC	64	/* Number of Tx descriptor registers */
72 #define NUM_RX_DESC	256U	/* Number of Rx descriptor registers */
73 #define R8169_TX_RING_BYTES	(NUM_TX_DESC * sizeof(struct TxDesc))
74 #define R8169_RX_RING_BYTES	(NUM_RX_DESC * sizeof(struct RxDesc))
75 
76 /* write/read MMIO register */
77 #define RTL_W8(tp, reg, val8)	writeb((val8), tp->mmio_addr + (reg))
78 #define RTL_W16(tp, reg, val16)	writew((val16), tp->mmio_addr + (reg))
79 #define RTL_W32(tp, reg, val32)	writel((val32), tp->mmio_addr + (reg))
80 #define RTL_R8(tp, reg)		readb(tp->mmio_addr + (reg))
81 #define RTL_R16(tp, reg)		readw(tp->mmio_addr + (reg))
82 #define RTL_R32(tp, reg)		readl(tp->mmio_addr + (reg))
83 
84 enum mac_version {
85 	/* support for ancient RTL_GIGA_MAC_VER_01 has been removed */
86 	RTL_GIGA_MAC_VER_02,
87 	RTL_GIGA_MAC_VER_03,
88 	RTL_GIGA_MAC_VER_04,
89 	RTL_GIGA_MAC_VER_05,
90 	RTL_GIGA_MAC_VER_06,
91 	RTL_GIGA_MAC_VER_07,
92 	RTL_GIGA_MAC_VER_08,
93 	RTL_GIGA_MAC_VER_09,
94 	RTL_GIGA_MAC_VER_10,
95 	RTL_GIGA_MAC_VER_11,
96 	RTL_GIGA_MAC_VER_12,
97 	RTL_GIGA_MAC_VER_13,
98 	RTL_GIGA_MAC_VER_14,
99 	RTL_GIGA_MAC_VER_15,
100 	RTL_GIGA_MAC_VER_16,
101 	RTL_GIGA_MAC_VER_17,
102 	RTL_GIGA_MAC_VER_18,
103 	RTL_GIGA_MAC_VER_19,
104 	RTL_GIGA_MAC_VER_20,
105 	RTL_GIGA_MAC_VER_21,
106 	RTL_GIGA_MAC_VER_22,
107 	RTL_GIGA_MAC_VER_23,
108 	RTL_GIGA_MAC_VER_24,
109 	RTL_GIGA_MAC_VER_25,
110 	RTL_GIGA_MAC_VER_26,
111 	RTL_GIGA_MAC_VER_27,
112 	RTL_GIGA_MAC_VER_28,
113 	RTL_GIGA_MAC_VER_29,
114 	RTL_GIGA_MAC_VER_30,
115 	RTL_GIGA_MAC_VER_31,
116 	RTL_GIGA_MAC_VER_32,
117 	RTL_GIGA_MAC_VER_33,
118 	RTL_GIGA_MAC_VER_34,
119 	RTL_GIGA_MAC_VER_35,
120 	RTL_GIGA_MAC_VER_36,
121 	RTL_GIGA_MAC_VER_37,
122 	RTL_GIGA_MAC_VER_38,
123 	RTL_GIGA_MAC_VER_39,
124 	RTL_GIGA_MAC_VER_40,
125 	RTL_GIGA_MAC_VER_41,
126 	RTL_GIGA_MAC_VER_42,
127 	RTL_GIGA_MAC_VER_43,
128 	RTL_GIGA_MAC_VER_44,
129 	RTL_GIGA_MAC_VER_45,
130 	RTL_GIGA_MAC_VER_46,
131 	RTL_GIGA_MAC_VER_47,
132 	RTL_GIGA_MAC_VER_48,
133 	RTL_GIGA_MAC_VER_49,
134 	RTL_GIGA_MAC_VER_50,
135 	RTL_GIGA_MAC_VER_51,
136 	RTL_GIGA_MAC_NONE
137 };
138 
139 #define JUMBO_1K	ETH_DATA_LEN
140 #define JUMBO_4K	(4*1024 - ETH_HLEN - 2)
141 #define JUMBO_6K	(6*1024 - ETH_HLEN - 2)
142 #define JUMBO_7K	(7*1024 - ETH_HLEN - 2)
143 #define JUMBO_9K	(9*1024 - ETH_HLEN - 2)
144 
145 static const struct {
146 	const char *name;
147 	const char *fw_name;
148 } rtl_chip_infos[] = {
149 	/* PCI devices. */
150 	[RTL_GIGA_MAC_VER_02] = {"RTL8169s"				},
151 	[RTL_GIGA_MAC_VER_03] = {"RTL8110s"				},
152 	[RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb"			},
153 	[RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc"			},
154 	[RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc"			},
155 	/* PCI-E devices. */
156 	[RTL_GIGA_MAC_VER_07] = {"RTL8102e"				},
157 	[RTL_GIGA_MAC_VER_08] = {"RTL8102e"				},
158 	[RTL_GIGA_MAC_VER_09] = {"RTL8102e"				},
159 	[RTL_GIGA_MAC_VER_10] = {"RTL8101e"				},
160 	[RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b"			},
161 	[RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b"			},
162 	[RTL_GIGA_MAC_VER_13] = {"RTL8101e"				},
163 	[RTL_GIGA_MAC_VER_14] = {"RTL8100e"				},
164 	[RTL_GIGA_MAC_VER_15] = {"RTL8100e"				},
165 	[RTL_GIGA_MAC_VER_16] = {"RTL8101e"				},
166 	[RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b"			},
167 	[RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp"			},
168 	[RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c"			},
169 	[RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c"			},
170 	[RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c"			},
171 	[RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c"			},
172 	[RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp"			},
173 	[RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp"			},
174 	[RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d",	FIRMWARE_8168D_1},
175 	[RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d",	FIRMWARE_8168D_2},
176 	[RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp"			},
177 	[RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp"			},
178 	[RTL_GIGA_MAC_VER_29] = {"RTL8105e",		FIRMWARE_8105E_1},
179 	[RTL_GIGA_MAC_VER_30] = {"RTL8105e",		FIRMWARE_8105E_1},
180 	[RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp"			},
181 	[RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e",	FIRMWARE_8168E_1},
182 	[RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e",	FIRMWARE_8168E_2},
183 	[RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl",	FIRMWARE_8168E_3},
184 	[RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f",	FIRMWARE_8168F_1},
185 	[RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f",	FIRMWARE_8168F_2},
186 	[RTL_GIGA_MAC_VER_37] = {"RTL8402",		FIRMWARE_8402_1 },
187 	[RTL_GIGA_MAC_VER_38] = {"RTL8411",		FIRMWARE_8411_1 },
188 	[RTL_GIGA_MAC_VER_39] = {"RTL8106e",		FIRMWARE_8106E_1},
189 	[RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g",	FIRMWARE_8168G_2},
190 	[RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g"			},
191 	[RTL_GIGA_MAC_VER_42] = {"RTL8168g/8111g",	FIRMWARE_8168G_3},
192 	[RTL_GIGA_MAC_VER_43] = {"RTL8106e",		FIRMWARE_8106E_2},
193 	[RTL_GIGA_MAC_VER_44] = {"RTL8411",		FIRMWARE_8411_2 },
194 	[RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h",	FIRMWARE_8168H_1},
195 	[RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h",	FIRMWARE_8168H_2},
196 	[RTL_GIGA_MAC_VER_47] = {"RTL8107e",		FIRMWARE_8107E_1},
197 	[RTL_GIGA_MAC_VER_48] = {"RTL8107e",		FIRMWARE_8107E_2},
198 	[RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep"			},
199 	[RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep"			},
200 	[RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep"			},
201 };
202 
203 enum cfg_version {
204 	RTL_CFG_0 = 0x00,
205 	RTL_CFG_1,
206 	RTL_CFG_2
207 };
208 
209 static const struct pci_device_id rtl8169_pci_tbl[] = {
210 	{ PCI_VDEVICE(REALTEK,	0x2502), RTL_CFG_1 },
211 	{ PCI_VDEVICE(REALTEK,	0x2600), RTL_CFG_1 },
212 	{ PCI_VDEVICE(REALTEK,	0x8129), RTL_CFG_0 },
213 	{ PCI_VDEVICE(REALTEK,	0x8136), RTL_CFG_2 },
214 	{ PCI_VDEVICE(REALTEK,	0x8161), RTL_CFG_1 },
215 	{ PCI_VDEVICE(REALTEK,	0x8167), RTL_CFG_0 },
216 	{ PCI_VDEVICE(REALTEK,	0x8168), RTL_CFG_1 },
217 	{ PCI_VDEVICE(NCUBE,	0x8168), RTL_CFG_1 },
218 	{ PCI_VDEVICE(REALTEK,	0x8169), RTL_CFG_0 },
219 	{ PCI_VENDOR_ID_DLINK,	0x4300,
220 		PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
221 	{ PCI_VDEVICE(DLINK,	0x4300), RTL_CFG_0 },
222 	{ PCI_VDEVICE(DLINK,	0x4302), RTL_CFG_0 },
223 	{ PCI_VDEVICE(AT,	0xc107), RTL_CFG_0 },
224 	{ PCI_VDEVICE(USR,	0x0116), RTL_CFG_0 },
225 	{ PCI_VENDOR_ID_LINKSYS,		0x1032,
226 		PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
227 	{ 0x0001,				0x8168,
228 		PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
229 	{}
230 };
231 
232 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
233 
234 static struct {
235 	u32 msg_enable;
236 } debug = { -1 };
237 
238 enum rtl_registers {
239 	MAC0		= 0,	/* Ethernet hardware address. */
240 	MAC4		= 4,
241 	MAR0		= 8,	/* Multicast filter. */
242 	CounterAddrLow		= 0x10,
243 	CounterAddrHigh		= 0x14,
244 	TxDescStartAddrLow	= 0x20,
245 	TxDescStartAddrHigh	= 0x24,
246 	TxHDescStartAddrLow	= 0x28,
247 	TxHDescStartAddrHigh	= 0x2c,
248 	FLASH		= 0x30,
249 	ERSR		= 0x36,
250 	ChipCmd		= 0x37,
251 	TxPoll		= 0x38,
252 	IntrMask	= 0x3c,
253 	IntrStatus	= 0x3e,
254 
255 	TxConfig	= 0x40,
256 #define	TXCFG_AUTO_FIFO			(1 << 7)	/* 8111e-vl */
257 #define	TXCFG_EMPTY			(1 << 11)	/* 8111e-vl */
258 
259 	RxConfig	= 0x44,
260 #define	RX128_INT_EN			(1 << 15)	/* 8111c and later */
261 #define	RX_MULTI_EN			(1 << 14)	/* 8111c only */
262 #define	RXCFG_FIFO_SHIFT		13
263 					/* No threshold before first PCI xfer */
264 #define	RX_FIFO_THRESH			(7 << RXCFG_FIFO_SHIFT)
265 #define	RX_EARLY_OFF			(1 << 11)
266 #define	RXCFG_DMA_SHIFT			8
267 					/* Unlimited maximum PCI burst. */
268 #define	RX_DMA_BURST			(7 << RXCFG_DMA_SHIFT)
269 
270 	RxMissed	= 0x4c,
271 	Cfg9346		= 0x50,
272 	Config0		= 0x51,
273 	Config1		= 0x52,
274 	Config2		= 0x53,
275 #define PME_SIGNAL			(1 << 5)	/* 8168c and later */
276 
277 	Config3		= 0x54,
278 	Config4		= 0x55,
279 	Config5		= 0x56,
280 	MultiIntr	= 0x5c,
281 	PHYAR		= 0x60,
282 	PHYstatus	= 0x6c,
283 	RxMaxSize	= 0xda,
284 	CPlusCmd	= 0xe0,
285 	IntrMitigate	= 0xe2,
286 
287 #define RTL_COALESCE_MASK	0x0f
288 #define RTL_COALESCE_SHIFT	4
289 #define RTL_COALESCE_T_MAX	(RTL_COALESCE_MASK)
290 #define RTL_COALESCE_FRAME_MAX	(RTL_COALESCE_MASK << 2)
291 
292 	RxDescAddrLow	= 0xe4,
293 	RxDescAddrHigh	= 0xe8,
294 	EarlyTxThres	= 0xec,	/* 8169. Unit of 32 bytes. */
295 
296 #define NoEarlyTx	0x3f	/* Max value : no early transmit. */
297 
298 	MaxTxPacketSize	= 0xec,	/* 8101/8168. Unit of 128 bytes. */
299 
300 #define TxPacketMax	(8064 >> 7)
301 #define EarlySize	0x27
302 
303 	FuncEvent	= 0xf0,
304 	FuncEventMask	= 0xf4,
305 	FuncPresetState	= 0xf8,
306 	IBCR0           = 0xf8,
307 	IBCR2           = 0xf9,
308 	IBIMR0          = 0xfa,
309 	IBISR0          = 0xfb,
310 	FuncForceEvent	= 0xfc,
311 };
312 
313 enum rtl8168_8101_registers {
314 	CSIDR			= 0x64,
315 	CSIAR			= 0x68,
316 #define	CSIAR_FLAG			0x80000000
317 #define	CSIAR_WRITE_CMD			0x80000000
318 #define	CSIAR_BYTE_ENABLE		0x0000f000
319 #define	CSIAR_ADDR_MASK			0x00000fff
320 	PMCH			= 0x6f,
321 	EPHYAR			= 0x80,
322 #define	EPHYAR_FLAG			0x80000000
323 #define	EPHYAR_WRITE_CMD		0x80000000
324 #define	EPHYAR_REG_MASK			0x1f
325 #define	EPHYAR_REG_SHIFT		16
326 #define	EPHYAR_DATA_MASK		0xffff
327 	DLLPR			= 0xd0,
328 #define	PFM_EN				(1 << 6)
329 #define	TX_10M_PS_EN			(1 << 7)
330 	DBG_REG			= 0xd1,
331 #define	FIX_NAK_1			(1 << 4)
332 #define	FIX_NAK_2			(1 << 3)
333 	TWSI			= 0xd2,
334 	MCU			= 0xd3,
335 #define	NOW_IS_OOB			(1 << 7)
336 #define	TX_EMPTY			(1 << 5)
337 #define	RX_EMPTY			(1 << 4)
338 #define	RXTX_EMPTY			(TX_EMPTY | RX_EMPTY)
339 #define	EN_NDP				(1 << 3)
340 #define	EN_OOB_RESET			(1 << 2)
341 #define	LINK_LIST_RDY			(1 << 1)
342 	EFUSEAR			= 0xdc,
343 #define	EFUSEAR_FLAG			0x80000000
344 #define	EFUSEAR_WRITE_CMD		0x80000000
345 #define	EFUSEAR_READ_CMD		0x00000000
346 #define	EFUSEAR_REG_MASK		0x03ff
347 #define	EFUSEAR_REG_SHIFT		8
348 #define	EFUSEAR_DATA_MASK		0xff
349 	MISC_1			= 0xf2,
350 #define	PFM_D3COLD_EN			(1 << 6)
351 };
352 
353 enum rtl8168_registers {
354 	LED_FREQ		= 0x1a,
355 	EEE_LED			= 0x1b,
356 	ERIDR			= 0x70,
357 	ERIAR			= 0x74,
358 #define ERIAR_FLAG			0x80000000
359 #define ERIAR_WRITE_CMD			0x80000000
360 #define ERIAR_READ_CMD			0x00000000
361 #define ERIAR_ADDR_BYTE_ALIGN		4
362 #define ERIAR_TYPE_SHIFT		16
363 #define ERIAR_EXGMAC			(0x00 << ERIAR_TYPE_SHIFT)
364 #define ERIAR_MSIX			(0x01 << ERIAR_TYPE_SHIFT)
365 #define ERIAR_ASF			(0x02 << ERIAR_TYPE_SHIFT)
366 #define ERIAR_OOB			(0x02 << ERIAR_TYPE_SHIFT)
367 #define ERIAR_MASK_SHIFT		12
368 #define ERIAR_MASK_0001			(0x1 << ERIAR_MASK_SHIFT)
369 #define ERIAR_MASK_0011			(0x3 << ERIAR_MASK_SHIFT)
370 #define ERIAR_MASK_0100			(0x4 << ERIAR_MASK_SHIFT)
371 #define ERIAR_MASK_0101			(0x5 << ERIAR_MASK_SHIFT)
372 #define ERIAR_MASK_1111			(0xf << ERIAR_MASK_SHIFT)
373 	EPHY_RXER_NUM		= 0x7c,
374 	OCPDR			= 0xb0,	/* OCP GPHY access */
375 #define OCPDR_WRITE_CMD			0x80000000
376 #define OCPDR_READ_CMD			0x00000000
377 #define OCPDR_REG_MASK			0x7f
378 #define OCPDR_GPHY_REG_SHIFT		16
379 #define OCPDR_DATA_MASK			0xffff
380 	OCPAR			= 0xb4,
381 #define OCPAR_FLAG			0x80000000
382 #define OCPAR_GPHY_WRITE_CMD		0x8000f060
383 #define OCPAR_GPHY_READ_CMD		0x0000f060
384 	GPHY_OCP		= 0xb8,
385 	RDSAR1			= 0xd0,	/* 8168c only. Undocumented on 8168dp */
386 	MISC			= 0xf0,	/* 8168e only. */
387 #define TXPLA_RST			(1 << 29)
388 #define DISABLE_LAN_EN			(1 << 23) /* Enable GPIO pin */
389 #define PWM_EN				(1 << 22)
390 #define RXDV_GATED_EN			(1 << 19)
391 #define EARLY_TALLY_EN			(1 << 16)
392 };
393 
394 enum rtl_register_content {
395 	/* InterruptStatusBits */
396 	SYSErr		= 0x8000,
397 	PCSTimeout	= 0x4000,
398 	SWInt		= 0x0100,
399 	TxDescUnavail	= 0x0080,
400 	RxFIFOOver	= 0x0040,
401 	LinkChg		= 0x0020,
402 	RxOverflow	= 0x0010,
403 	TxErr		= 0x0008,
404 	TxOK		= 0x0004,
405 	RxErr		= 0x0002,
406 	RxOK		= 0x0001,
407 
408 	/* RxStatusDesc */
409 	RxRWT	= (1 << 22),
410 	RxRES	= (1 << 21),
411 	RxRUNT	= (1 << 20),
412 	RxCRC	= (1 << 19),
413 
414 	/* ChipCmdBits */
415 	StopReq		= 0x80,
416 	CmdReset	= 0x10,
417 	CmdRxEnb	= 0x08,
418 	CmdTxEnb	= 0x04,
419 	RxBufEmpty	= 0x01,
420 
421 	/* TXPoll register p.5 */
422 	HPQ		= 0x80,		/* Poll cmd on the high prio queue */
423 	NPQ		= 0x40,		/* Poll cmd on the low prio queue */
424 	FSWInt		= 0x01,		/* Forced software interrupt */
425 
426 	/* Cfg9346Bits */
427 	Cfg9346_Lock	= 0x00,
428 	Cfg9346_Unlock	= 0xc0,
429 
430 	/* rx_mode_bits */
431 	AcceptErr	= 0x20,
432 	AcceptRunt	= 0x10,
433 	AcceptBroadcast	= 0x08,
434 	AcceptMulticast	= 0x04,
435 	AcceptMyPhys	= 0x02,
436 	AcceptAllPhys	= 0x01,
437 #define RX_CONFIG_ACCEPT_MASK		0x3f
438 
439 	/* TxConfigBits */
440 	TxInterFrameGapShift = 24,
441 	TxDMAShift = 8,	/* DMA burst value (0-7) is shift this many bits */
442 
443 	/* Config1 register p.24 */
444 	LEDS1		= (1 << 7),
445 	LEDS0		= (1 << 6),
446 	Speed_down	= (1 << 4),
447 	MEMMAP		= (1 << 3),
448 	IOMAP		= (1 << 2),
449 	VPD		= (1 << 1),
450 	PMEnable	= (1 << 0),	/* Power Management Enable */
451 
452 	/* Config2 register p. 25 */
453 	ClkReqEn	= (1 << 7),	/* Clock Request Enable */
454 	MSIEnable	= (1 << 5),	/* 8169 only. Reserved in the 8168. */
455 	PCI_Clock_66MHz = 0x01,
456 	PCI_Clock_33MHz = 0x00,
457 
458 	/* Config3 register p.25 */
459 	MagicPacket	= (1 << 5),	/* Wake up when receives a Magic Packet */
460 	LinkUp		= (1 << 4),	/* Wake up when the cable connection is re-established */
461 	Jumbo_En0	= (1 << 2),	/* 8168 only. Reserved in the 8168b */
462 	Rdy_to_L23	= (1 << 1),	/* L23 Enable */
463 	Beacon_en	= (1 << 0),	/* 8168 only. Reserved in the 8168b */
464 
465 	/* Config4 register */
466 	Jumbo_En1	= (1 << 1),	/* 8168 only. Reserved in the 8168b */
467 
468 	/* Config5 register p.27 */
469 	BWF		= (1 << 6),	/* Accept Broadcast wakeup frame */
470 	MWF		= (1 << 5),	/* Accept Multicast wakeup frame */
471 	UWF		= (1 << 4),	/* Accept Unicast wakeup frame */
472 	Spi_en		= (1 << 3),
473 	LanWake		= (1 << 1),	/* LanWake enable/disable */
474 	PMEStatus	= (1 << 0),	/* PME status can be reset by PCI RST# */
475 	ASPM_en		= (1 << 0),	/* ASPM enable */
476 
477 	/* CPlusCmd p.31 */
478 	EnableBist	= (1 << 15),	// 8168 8101
479 	Mac_dbgo_oe	= (1 << 14),	// 8168 8101
480 	Normal_mode	= (1 << 13),	// unused
481 	Force_half_dup	= (1 << 12),	// 8168 8101
482 	Force_rxflow_en	= (1 << 11),	// 8168 8101
483 	Force_txflow_en	= (1 << 10),	// 8168 8101
484 	Cxpl_dbg_sel	= (1 << 9),	// 8168 8101
485 	ASF		= (1 << 8),	// 8168 8101
486 	PktCntrDisable	= (1 << 7),	// 8168 8101
487 	Mac_dbgo_sel	= 0x001c,	// 8168
488 	RxVlan		= (1 << 6),
489 	RxChkSum	= (1 << 5),
490 	PCIDAC		= (1 << 4),
491 	PCIMulRW	= (1 << 3),
492 #define INTT_MASK	GENMASK(1, 0)
493 
494 	/* rtl8169_PHYstatus */
495 	TBI_Enable	= 0x80,
496 	TxFlowCtrl	= 0x40,
497 	RxFlowCtrl	= 0x20,
498 	_1000bpsF	= 0x10,
499 	_100bps		= 0x08,
500 	_10bps		= 0x04,
501 	LinkStatus	= 0x02,
502 	FullDup		= 0x01,
503 
504 	/* ResetCounterCommand */
505 	CounterReset	= 0x1,
506 
507 	/* DumpCounterCommand */
508 	CounterDump	= 0x8,
509 
510 	/* magic enable v2 */
511 	MagicPacket_v2	= (1 << 16),	/* Wake up when receives a Magic Packet */
512 };
513 
514 enum rtl_desc_bit {
515 	/* First doubleword. */
516 	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
517 	RingEnd		= (1 << 30), /* End of descriptor ring */
518 	FirstFrag	= (1 << 29), /* First segment of a packet */
519 	LastFrag	= (1 << 28), /* Final segment of a packet */
520 };
521 
522 /* Generic case. */
523 enum rtl_tx_desc_bit {
524 	/* First doubleword. */
525 	TD_LSO		= (1 << 27),		/* Large Send Offload */
526 #define TD_MSS_MAX			0x07ffu	/* MSS value */
527 
528 	/* Second doubleword. */
529 	TxVlanTag	= (1 << 17),		/* Add VLAN tag */
530 };
531 
532 /* 8169, 8168b and 810x except 8102e. */
533 enum rtl_tx_desc_bit_0 {
534 	/* First doubleword. */
535 #define TD0_MSS_SHIFT			16	/* MSS position (11 bits) */
536 	TD0_TCP_CS	= (1 << 16),		/* Calculate TCP/IP checksum */
537 	TD0_UDP_CS	= (1 << 17),		/* Calculate UDP/IP checksum */
538 	TD0_IP_CS	= (1 << 18),		/* Calculate IP checksum */
539 };
540 
541 /* 8102e, 8168c and beyond. */
542 enum rtl_tx_desc_bit_1 {
543 	/* First doubleword. */
544 	TD1_GTSENV4	= (1 << 26),		/* Giant Send for IPv4 */
545 	TD1_GTSENV6	= (1 << 25),		/* Giant Send for IPv6 */
546 #define GTTCPHO_SHIFT			18
547 #define GTTCPHO_MAX			0x7fU
548 
549 	/* Second doubleword. */
550 #define TCPHO_SHIFT			18
551 #define TCPHO_MAX			0x3ffU
552 #define TD1_MSS_SHIFT			18	/* MSS position (11 bits) */
553 	TD1_IPv6_CS	= (1 << 28),		/* Calculate IPv6 checksum */
554 	TD1_IPv4_CS	= (1 << 29),		/* Calculate IPv4 checksum */
555 	TD1_TCP_CS	= (1 << 30),		/* Calculate TCP/IP checksum */
556 	TD1_UDP_CS	= (1 << 31),		/* Calculate UDP/IP checksum */
557 };
558 
559 enum rtl_rx_desc_bit {
560 	/* Rx private */
561 	PID1		= (1 << 18), /* Protocol ID bit 1/2 */
562 	PID0		= (1 << 17), /* Protocol ID bit 0/2 */
563 
564 #define RxProtoUDP	(PID1)
565 #define RxProtoTCP	(PID0)
566 #define RxProtoIP	(PID1 | PID0)
567 #define RxProtoMask	RxProtoIP
568 
569 	IPFail		= (1 << 16), /* IP checksum failed */
570 	UDPFail		= (1 << 15), /* UDP/IP checksum failed */
571 	TCPFail		= (1 << 14), /* TCP/IP checksum failed */
572 	RxVlanTag	= (1 << 16), /* VLAN tag available */
573 };
574 
575 #define RsvdMask	0x3fffc000
576 #define CPCMD_QUIRK_MASK	(Normal_mode | RxVlan | RxChkSum | INTT_MASK)
577 
578 struct TxDesc {
579 	__le32 opts1;
580 	__le32 opts2;
581 	__le64 addr;
582 };
583 
584 struct RxDesc {
585 	__le32 opts1;
586 	__le32 opts2;
587 	__le64 addr;
588 };
589 
590 struct ring_info {
591 	struct sk_buff	*skb;
592 	u32		len;
593 };
594 
595 struct rtl8169_counters {
596 	__le64	tx_packets;
597 	__le64	rx_packets;
598 	__le64	tx_errors;
599 	__le32	rx_errors;
600 	__le16	rx_missed;
601 	__le16	align_errors;
602 	__le32	tx_one_collision;
603 	__le32	tx_multi_collision;
604 	__le64	rx_unicast;
605 	__le64	rx_broadcast;
606 	__le32	rx_multicast;
607 	__le16	tx_aborted;
608 	__le16	tx_underun;
609 };
610 
611 struct rtl8169_tc_offsets {
612 	bool	inited;
613 	__le64	tx_errors;
614 	__le32	tx_multi_collision;
615 	__le16	tx_aborted;
616 };
617 
618 enum rtl_flag {
619 	RTL_FLAG_TASK_ENABLED = 0,
620 	RTL_FLAG_TASK_RESET_PENDING,
621 	RTL_FLAG_MAX
622 };
623 
624 struct rtl8169_stats {
625 	u64			packets;
626 	u64			bytes;
627 	struct u64_stats_sync	syncp;
628 };
629 
630 struct rtl8169_private {
631 	void __iomem *mmio_addr;	/* memory map physical address */
632 	struct pci_dev *pci_dev;
633 	struct net_device *dev;
634 	struct phy_device *phydev;
635 	struct napi_struct napi;
636 	u32 msg_enable;
637 	enum mac_version mac_version;
638 	u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
639 	u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
640 	u32 dirty_tx;
641 	struct rtl8169_stats rx_stats;
642 	struct rtl8169_stats tx_stats;
643 	struct TxDesc *TxDescArray;	/* 256-aligned Tx descriptor ring */
644 	struct RxDesc *RxDescArray;	/* 256-aligned Rx descriptor ring */
645 	dma_addr_t TxPhyAddr;
646 	dma_addr_t RxPhyAddr;
647 	void *Rx_databuff[NUM_RX_DESC];	/* Rx data buffers */
648 	struct ring_info tx_skb[NUM_TX_DESC];	/* Tx data buffers */
649 	u16 cp_cmd;
650 
651 	u16 irq_mask;
652 	const struct rtl_coalesce_info *coalesce_info;
653 	struct clk *clk;
654 
655 	void (*hw_start)(struct rtl8169_private *tp);
656 
657 	struct {
658 		DECLARE_BITMAP(flags, RTL_FLAG_MAX);
659 		struct mutex mutex;
660 		struct work_struct work;
661 	} wk;
662 
663 	unsigned irq_enabled:1;
664 	unsigned supports_gmii:1;
665 	dma_addr_t counters_phys_addr;
666 	struct rtl8169_counters *counters;
667 	struct rtl8169_tc_offsets tc_offset;
668 	u32 saved_wolopts;
669 
670 	const char *fw_name;
671 	struct rtl_fw *rtl_fw;
672 
673 	u32 ocp_base;
674 };
675 
676 typedef void (*rtl_generic_fct)(struct rtl8169_private *tp);
677 
678 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
679 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
680 module_param_named(debug, debug.msg_enable, int, 0);
681 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
682 MODULE_SOFTDEP("pre: realtek");
683 MODULE_LICENSE("GPL");
684 MODULE_FIRMWARE(FIRMWARE_8168D_1);
685 MODULE_FIRMWARE(FIRMWARE_8168D_2);
686 MODULE_FIRMWARE(FIRMWARE_8168E_1);
687 MODULE_FIRMWARE(FIRMWARE_8168E_2);
688 MODULE_FIRMWARE(FIRMWARE_8168E_3);
689 MODULE_FIRMWARE(FIRMWARE_8105E_1);
690 MODULE_FIRMWARE(FIRMWARE_8168F_1);
691 MODULE_FIRMWARE(FIRMWARE_8168F_2);
692 MODULE_FIRMWARE(FIRMWARE_8402_1);
693 MODULE_FIRMWARE(FIRMWARE_8411_1);
694 MODULE_FIRMWARE(FIRMWARE_8411_2);
695 MODULE_FIRMWARE(FIRMWARE_8106E_1);
696 MODULE_FIRMWARE(FIRMWARE_8106E_2);
697 MODULE_FIRMWARE(FIRMWARE_8168G_2);
698 MODULE_FIRMWARE(FIRMWARE_8168G_3);
699 MODULE_FIRMWARE(FIRMWARE_8168H_1);
700 MODULE_FIRMWARE(FIRMWARE_8168H_2);
701 MODULE_FIRMWARE(FIRMWARE_8107E_1);
702 MODULE_FIRMWARE(FIRMWARE_8107E_2);
703 
704 static inline struct device *tp_to_dev(struct rtl8169_private *tp)
705 {
706 	return &tp->pci_dev->dev;
707 }
708 
709 static void rtl_lock_work(struct rtl8169_private *tp)
710 {
711 	mutex_lock(&tp->wk.mutex);
712 }
713 
714 static void rtl_unlock_work(struct rtl8169_private *tp)
715 {
716 	mutex_unlock(&tp->wk.mutex);
717 }
718 
719 static void rtl_lock_config_regs(struct rtl8169_private *tp)
720 {
721 	RTL_W8(tp, Cfg9346, Cfg9346_Lock);
722 }
723 
724 static void rtl_unlock_config_regs(struct rtl8169_private *tp)
725 {
726 	RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
727 }
728 
729 static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force)
730 {
731 	pcie_capability_clear_and_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
732 					   PCI_EXP_DEVCTL_READRQ, force);
733 }
734 
735 struct rtl_cond {
736 	bool (*check)(struct rtl8169_private *);
737 	const char *msg;
738 };
739 
740 static void rtl_udelay(unsigned int d)
741 {
742 	udelay(d);
743 }
744 
745 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
746 			  void (*delay)(unsigned int), unsigned int d, int n,
747 			  bool high)
748 {
749 	int i;
750 
751 	for (i = 0; i < n; i++) {
752 		if (c->check(tp) == high)
753 			return true;
754 		delay(d);
755 	}
756 	netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
757 		  c->msg, !high, n, d);
758 	return false;
759 }
760 
761 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
762 				      const struct rtl_cond *c,
763 				      unsigned int d, int n)
764 {
765 	return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
766 }
767 
768 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
769 				     const struct rtl_cond *c,
770 				     unsigned int d, int n)
771 {
772 	return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
773 }
774 
775 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
776 				      const struct rtl_cond *c,
777 				      unsigned int d, int n)
778 {
779 	return rtl_loop_wait(tp, c, msleep, d, n, true);
780 }
781 
782 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
783 				     const struct rtl_cond *c,
784 				     unsigned int d, int n)
785 {
786 	return rtl_loop_wait(tp, c, msleep, d, n, false);
787 }
788 
789 #define DECLARE_RTL_COND(name)				\
790 static bool name ## _check(struct rtl8169_private *);	\
791 							\
792 static const struct rtl_cond name = {			\
793 	.check	= name ## _check,			\
794 	.msg	= #name					\
795 };							\
796 							\
797 static bool name ## _check(struct rtl8169_private *tp)
798 
799 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
800 {
801 	if (reg & 0xffff0001) {
802 		netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
803 		return true;
804 	}
805 	return false;
806 }
807 
808 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
809 {
810 	return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG;
811 }
812 
813 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
814 {
815 	if (rtl_ocp_reg_failure(tp, reg))
816 		return;
817 
818 	RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
819 
820 	rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
821 }
822 
823 static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
824 {
825 	if (rtl_ocp_reg_failure(tp, reg))
826 		return 0;
827 
828 	RTL_W32(tp, GPHY_OCP, reg << 15);
829 
830 	return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
831 		(RTL_R32(tp, GPHY_OCP) & 0xffff) : ~0;
832 }
833 
834 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
835 {
836 	if (rtl_ocp_reg_failure(tp, reg))
837 		return;
838 
839 	RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data);
840 }
841 
842 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
843 {
844 	if (rtl_ocp_reg_failure(tp, reg))
845 		return 0;
846 
847 	RTL_W32(tp, OCPDR, reg << 15);
848 
849 	return RTL_R32(tp, OCPDR);
850 }
851 
852 #define OCP_STD_PHY_BASE	0xa400
853 
854 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
855 {
856 	if (reg == 0x1f) {
857 		tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
858 		return;
859 	}
860 
861 	if (tp->ocp_base != OCP_STD_PHY_BASE)
862 		reg -= 0x10;
863 
864 	r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
865 }
866 
867 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
868 {
869 	if (tp->ocp_base != OCP_STD_PHY_BASE)
870 		reg -= 0x10;
871 
872 	return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
873 }
874 
875 static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value)
876 {
877 	if (reg == 0x1f) {
878 		tp->ocp_base = value << 4;
879 		return;
880 	}
881 
882 	r8168_mac_ocp_write(tp, tp->ocp_base + reg, value);
883 }
884 
885 static int mac_mcu_read(struct rtl8169_private *tp, int reg)
886 {
887 	return r8168_mac_ocp_read(tp, tp->ocp_base + reg);
888 }
889 
890 DECLARE_RTL_COND(rtl_phyar_cond)
891 {
892 	return RTL_R32(tp, PHYAR) & 0x80000000;
893 }
894 
895 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
896 {
897 	RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
898 
899 	rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
900 	/*
901 	 * According to hardware specs a 20us delay is required after write
902 	 * complete indication, but before sending next command.
903 	 */
904 	udelay(20);
905 }
906 
907 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
908 {
909 	int value;
910 
911 	RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16);
912 
913 	value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
914 		RTL_R32(tp, PHYAR) & 0xffff : ~0;
915 
916 	/*
917 	 * According to hardware specs a 20us delay is required after read
918 	 * complete indication, but before sending next command.
919 	 */
920 	udelay(20);
921 
922 	return value;
923 }
924 
925 DECLARE_RTL_COND(rtl_ocpar_cond)
926 {
927 	return RTL_R32(tp, OCPAR) & OCPAR_FLAG;
928 }
929 
930 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
931 {
932 	RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
933 	RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD);
934 	RTL_W32(tp, EPHY_RXER_NUM, 0);
935 
936 	rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
937 }
938 
939 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
940 {
941 	r8168dp_1_mdio_access(tp, reg,
942 			      OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
943 }
944 
945 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
946 {
947 	r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
948 
949 	mdelay(1);
950 	RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD);
951 	RTL_W32(tp, EPHY_RXER_NUM, 0);
952 
953 	return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
954 		RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : ~0;
955 }
956 
957 #define R8168DP_1_MDIO_ACCESS_BIT	0x00020000
958 
959 static void r8168dp_2_mdio_start(struct rtl8169_private *tp)
960 {
961 	RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
962 }
963 
964 static void r8168dp_2_mdio_stop(struct rtl8169_private *tp)
965 {
966 	RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
967 }
968 
969 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
970 {
971 	r8168dp_2_mdio_start(tp);
972 
973 	r8169_mdio_write(tp, reg, value);
974 
975 	r8168dp_2_mdio_stop(tp);
976 }
977 
978 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
979 {
980 	int value;
981 
982 	r8168dp_2_mdio_start(tp);
983 
984 	value = r8169_mdio_read(tp, reg);
985 
986 	r8168dp_2_mdio_stop(tp);
987 
988 	return value;
989 }
990 
991 static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
992 {
993 	switch (tp->mac_version) {
994 	case RTL_GIGA_MAC_VER_27:
995 		r8168dp_1_mdio_write(tp, location, val);
996 		break;
997 	case RTL_GIGA_MAC_VER_28:
998 	case RTL_GIGA_MAC_VER_31:
999 		r8168dp_2_mdio_write(tp, location, val);
1000 		break;
1001 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
1002 		r8168g_mdio_write(tp, location, val);
1003 		break;
1004 	default:
1005 		r8169_mdio_write(tp, location, val);
1006 		break;
1007 	}
1008 }
1009 
1010 static int rtl_readphy(struct rtl8169_private *tp, int location)
1011 {
1012 	switch (tp->mac_version) {
1013 	case RTL_GIGA_MAC_VER_27:
1014 		return r8168dp_1_mdio_read(tp, location);
1015 	case RTL_GIGA_MAC_VER_28:
1016 	case RTL_GIGA_MAC_VER_31:
1017 		return r8168dp_2_mdio_read(tp, location);
1018 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
1019 		return r8168g_mdio_read(tp, location);
1020 	default:
1021 		return r8169_mdio_read(tp, location);
1022 	}
1023 }
1024 
1025 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1026 {
1027 	rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1028 }
1029 
1030 static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1031 {
1032 	int val;
1033 
1034 	val = rtl_readphy(tp, reg_addr);
1035 	rtl_writephy(tp, reg_addr, (val & ~m) | p);
1036 }
1037 
1038 DECLARE_RTL_COND(rtl_ephyar_cond)
1039 {
1040 	return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
1041 }
1042 
1043 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1044 {
1045 	RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1046 		(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1047 
1048 	rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1049 
1050 	udelay(10);
1051 }
1052 
1053 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1054 {
1055 	RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1056 
1057 	return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1058 		RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
1059 }
1060 
1061 DECLARE_RTL_COND(rtl_eriar_cond)
1062 {
1063 	return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
1064 }
1065 
1066 static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1067 			   u32 val, int type)
1068 {
1069 	BUG_ON((addr & 3) || (mask == 0));
1070 	RTL_W32(tp, ERIDR, val);
1071 	RTL_W32(tp, ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1072 
1073 	rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1074 }
1075 
1076 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1077 			  u32 val)
1078 {
1079 	_rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC);
1080 }
1081 
1082 static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1083 {
1084 	RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1085 
1086 	return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1087 		RTL_R32(tp, ERIDR) : ~0;
1088 }
1089 
1090 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
1091 {
1092 	return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
1093 }
1094 
1095 static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1096 			 u32 m)
1097 {
1098 	u32 val;
1099 
1100 	val = rtl_eri_read(tp, addr);
1101 	rtl_eri_write(tp, addr, mask, (val & ~m) | p);
1102 }
1103 
1104 static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 mask,
1105 			     u32 p)
1106 {
1107 	rtl_w0w1_eri(tp, addr, mask, p, 0);
1108 }
1109 
1110 static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 mask,
1111 			       u32 m)
1112 {
1113 	rtl_w0w1_eri(tp, addr, mask, 0, m);
1114 }
1115 
1116 static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
1117 {
1118 	RTL_W32(tp, OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
1119 	return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
1120 		RTL_R32(tp, OCPDR) : ~0;
1121 }
1122 
1123 static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
1124 {
1125 	return _rtl_eri_read(tp, reg, ERIAR_OOB);
1126 }
1127 
1128 static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
1129 			      u32 data)
1130 {
1131 	RTL_W32(tp, OCPDR, data);
1132 	RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
1133 	rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
1134 }
1135 
1136 static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
1137 			      u32 data)
1138 {
1139 	_rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT,
1140 		       data, ERIAR_OOB);
1141 }
1142 
1143 static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd)
1144 {
1145 	rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd);
1146 
1147 	r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001);
1148 }
1149 
1150 #define OOB_CMD_RESET		0x00
1151 #define OOB_CMD_DRIVER_START	0x05
1152 #define OOB_CMD_DRIVER_STOP	0x06
1153 
1154 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
1155 {
1156 	return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
1157 }
1158 
1159 DECLARE_RTL_COND(rtl_dp_ocp_read_cond)
1160 {
1161 	u16 reg;
1162 
1163 	reg = rtl8168_get_ocp_reg(tp);
1164 
1165 	return r8168dp_ocp_read(tp, 0x0f, reg) & 0x00000800;
1166 }
1167 
1168 DECLARE_RTL_COND(rtl_ep_ocp_read_cond)
1169 {
1170 	return r8168ep_ocp_read(tp, 0x0f, 0x124) & 0x00000001;
1171 }
1172 
1173 DECLARE_RTL_COND(rtl_ocp_tx_cond)
1174 {
1175 	return RTL_R8(tp, IBISR0) & 0x20;
1176 }
1177 
1178 static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
1179 {
1180 	RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01);
1181 	rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
1182 	RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20);
1183 	RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
1184 }
1185 
1186 static void rtl8168dp_driver_start(struct rtl8169_private *tp)
1187 {
1188 	r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
1189 	rtl_msleep_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10, 10);
1190 }
1191 
1192 static void rtl8168ep_driver_start(struct rtl8169_private *tp)
1193 {
1194 	r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
1195 	r8168ep_ocp_write(tp, 0x01, 0x30,
1196 			  r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
1197 	rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10);
1198 }
1199 
1200 static void rtl8168_driver_start(struct rtl8169_private *tp)
1201 {
1202 	switch (tp->mac_version) {
1203 	case RTL_GIGA_MAC_VER_27:
1204 	case RTL_GIGA_MAC_VER_28:
1205 	case RTL_GIGA_MAC_VER_31:
1206 		rtl8168dp_driver_start(tp);
1207 		break;
1208 	case RTL_GIGA_MAC_VER_49:
1209 	case RTL_GIGA_MAC_VER_50:
1210 	case RTL_GIGA_MAC_VER_51:
1211 		rtl8168ep_driver_start(tp);
1212 		break;
1213 	default:
1214 		BUG();
1215 		break;
1216 	}
1217 }
1218 
1219 static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
1220 {
1221 	r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
1222 	rtl_msleep_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10, 10);
1223 }
1224 
1225 static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
1226 {
1227 	rtl8168ep_stop_cmac(tp);
1228 	r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
1229 	r8168ep_ocp_write(tp, 0x01, 0x30,
1230 			  r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
1231 	rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10);
1232 }
1233 
1234 static void rtl8168_driver_stop(struct rtl8169_private *tp)
1235 {
1236 	switch (tp->mac_version) {
1237 	case RTL_GIGA_MAC_VER_27:
1238 	case RTL_GIGA_MAC_VER_28:
1239 	case RTL_GIGA_MAC_VER_31:
1240 		rtl8168dp_driver_stop(tp);
1241 		break;
1242 	case RTL_GIGA_MAC_VER_49:
1243 	case RTL_GIGA_MAC_VER_50:
1244 	case RTL_GIGA_MAC_VER_51:
1245 		rtl8168ep_driver_stop(tp);
1246 		break;
1247 	default:
1248 		BUG();
1249 		break;
1250 	}
1251 }
1252 
1253 static bool r8168dp_check_dash(struct rtl8169_private *tp)
1254 {
1255 	u16 reg = rtl8168_get_ocp_reg(tp);
1256 
1257 	return !!(r8168dp_ocp_read(tp, 0x0f, reg) & 0x00008000);
1258 }
1259 
1260 static bool r8168ep_check_dash(struct rtl8169_private *tp)
1261 {
1262 	return !!(r8168ep_ocp_read(tp, 0x0f, 0x128) & 0x00000001);
1263 }
1264 
1265 static bool r8168_check_dash(struct rtl8169_private *tp)
1266 {
1267 	switch (tp->mac_version) {
1268 	case RTL_GIGA_MAC_VER_27:
1269 	case RTL_GIGA_MAC_VER_28:
1270 	case RTL_GIGA_MAC_VER_31:
1271 		return r8168dp_check_dash(tp);
1272 	case RTL_GIGA_MAC_VER_49:
1273 	case RTL_GIGA_MAC_VER_50:
1274 	case RTL_GIGA_MAC_VER_51:
1275 		return r8168ep_check_dash(tp);
1276 	default:
1277 		return false;
1278 	}
1279 }
1280 
1281 static void rtl_reset_packet_filter(struct rtl8169_private *tp)
1282 {
1283 	rtl_eri_clear_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0));
1284 	rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0));
1285 }
1286 
1287 DECLARE_RTL_COND(rtl_efusear_cond)
1288 {
1289 	return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG;
1290 }
1291 
1292 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1293 {
1294 	RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1295 
1296 	return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1297 		RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1298 }
1299 
1300 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1301 {
1302 	RTL_W16(tp, IntrStatus, bits);
1303 }
1304 
1305 static void rtl_irq_disable(struct rtl8169_private *tp)
1306 {
1307 	RTL_W16(tp, IntrMask, 0);
1308 	tp->irq_enabled = 0;
1309 }
1310 
1311 #define RTL_EVENT_NAPI_RX	(RxOK | RxErr)
1312 #define RTL_EVENT_NAPI_TX	(TxOK | TxErr)
1313 #define RTL_EVENT_NAPI		(RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1314 
1315 static void rtl_irq_enable(struct rtl8169_private *tp)
1316 {
1317 	tp->irq_enabled = 1;
1318 	RTL_W16(tp, IntrMask, tp->irq_mask);
1319 }
1320 
1321 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1322 {
1323 	rtl_irq_disable(tp);
1324 	rtl_ack_events(tp, 0xffff);
1325 	/* PCI commit */
1326 	RTL_R8(tp, ChipCmd);
1327 }
1328 
1329 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1330 {
1331 	struct net_device *dev = tp->dev;
1332 	struct phy_device *phydev = tp->phydev;
1333 
1334 	if (!netif_running(dev))
1335 		return;
1336 
1337 	if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1338 	    tp->mac_version == RTL_GIGA_MAC_VER_38) {
1339 		if (phydev->speed == SPEED_1000) {
1340 			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011);
1341 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1342 		} else if (phydev->speed == SPEED_100) {
1343 			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1344 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1345 		} else {
1346 			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1347 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f);
1348 		}
1349 		rtl_reset_packet_filter(tp);
1350 	} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1351 		   tp->mac_version == RTL_GIGA_MAC_VER_36) {
1352 		if (phydev->speed == SPEED_1000) {
1353 			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011);
1354 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1355 		} else {
1356 			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1357 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f);
1358 		}
1359 	} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1360 		if (phydev->speed == SPEED_10) {
1361 			rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02);
1362 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a);
1363 		} else {
1364 			rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
1365 		}
1366 	}
1367 }
1368 
1369 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1370 
1371 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1372 {
1373 	struct rtl8169_private *tp = netdev_priv(dev);
1374 
1375 	rtl_lock_work(tp);
1376 	wol->supported = WAKE_ANY;
1377 	wol->wolopts = tp->saved_wolopts;
1378 	rtl_unlock_work(tp);
1379 }
1380 
1381 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1382 {
1383 	unsigned int i, tmp;
1384 	static const struct {
1385 		u32 opt;
1386 		u16 reg;
1387 		u8  mask;
1388 	} cfg[] = {
1389 		{ WAKE_PHY,   Config3, LinkUp },
1390 		{ WAKE_UCAST, Config5, UWF },
1391 		{ WAKE_BCAST, Config5, BWF },
1392 		{ WAKE_MCAST, Config5, MWF },
1393 		{ WAKE_ANY,   Config5, LanWake },
1394 		{ WAKE_MAGIC, Config3, MagicPacket }
1395 	};
1396 	u8 options;
1397 
1398 	rtl_unlock_config_regs(tp);
1399 
1400 	switch (tp->mac_version) {
1401 	case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
1402 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
1403 		tmp = ARRAY_SIZE(cfg) - 1;
1404 		if (wolopts & WAKE_MAGIC)
1405 			rtl_eri_set_bits(tp, 0x0dc, ERIAR_MASK_0100,
1406 					 MagicPacket_v2);
1407 		else
1408 			rtl_eri_clear_bits(tp, 0x0dc, ERIAR_MASK_0100,
1409 					   MagicPacket_v2);
1410 		break;
1411 	default:
1412 		tmp = ARRAY_SIZE(cfg);
1413 		break;
1414 	}
1415 
1416 	for (i = 0; i < tmp; i++) {
1417 		options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask;
1418 		if (wolopts & cfg[i].opt)
1419 			options |= cfg[i].mask;
1420 		RTL_W8(tp, cfg[i].reg, options);
1421 	}
1422 
1423 	switch (tp->mac_version) {
1424 	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_17:
1425 		options = RTL_R8(tp, Config1) & ~PMEnable;
1426 		if (wolopts)
1427 			options |= PMEnable;
1428 		RTL_W8(tp, Config1, options);
1429 		break;
1430 	default:
1431 		options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
1432 		if (wolopts)
1433 			options |= PME_SIGNAL;
1434 		RTL_W8(tp, Config2, options);
1435 		break;
1436 	}
1437 
1438 	rtl_lock_config_regs(tp);
1439 
1440 	device_set_wakeup_enable(tp_to_dev(tp), wolopts);
1441 }
1442 
1443 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1444 {
1445 	struct rtl8169_private *tp = netdev_priv(dev);
1446 	struct device *d = tp_to_dev(tp);
1447 
1448 	if (wol->wolopts & ~WAKE_ANY)
1449 		return -EINVAL;
1450 
1451 	pm_runtime_get_noresume(d);
1452 
1453 	rtl_lock_work(tp);
1454 
1455 	tp->saved_wolopts = wol->wolopts;
1456 
1457 	if (pm_runtime_active(d))
1458 		__rtl8169_set_wol(tp, tp->saved_wolopts);
1459 
1460 	rtl_unlock_work(tp);
1461 
1462 	pm_runtime_put_noidle(d);
1463 
1464 	return 0;
1465 }
1466 
1467 static void rtl8169_get_drvinfo(struct net_device *dev,
1468 				struct ethtool_drvinfo *info)
1469 {
1470 	struct rtl8169_private *tp = netdev_priv(dev);
1471 	struct rtl_fw *rtl_fw = tp->rtl_fw;
1472 
1473 	strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1474 	strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1475 	BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1476 	if (rtl_fw)
1477 		strlcpy(info->fw_version, rtl_fw->version,
1478 			sizeof(info->fw_version));
1479 }
1480 
1481 static int rtl8169_get_regs_len(struct net_device *dev)
1482 {
1483 	return R8169_REGS_SIZE;
1484 }
1485 
1486 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1487 	netdev_features_t features)
1488 {
1489 	struct rtl8169_private *tp = netdev_priv(dev);
1490 
1491 	if (dev->mtu > TD_MSS_MAX)
1492 		features &= ~NETIF_F_ALL_TSO;
1493 
1494 	if (dev->mtu > JUMBO_1K &&
1495 	    tp->mac_version > RTL_GIGA_MAC_VER_06)
1496 		features &= ~NETIF_F_IP_CSUM;
1497 
1498 	return features;
1499 }
1500 
1501 static int rtl8169_set_features(struct net_device *dev,
1502 				netdev_features_t features)
1503 {
1504 	struct rtl8169_private *tp = netdev_priv(dev);
1505 	u32 rx_config;
1506 
1507 	rtl_lock_work(tp);
1508 
1509 	rx_config = RTL_R32(tp, RxConfig);
1510 	if (features & NETIF_F_RXALL)
1511 		rx_config |= (AcceptErr | AcceptRunt);
1512 	else
1513 		rx_config &= ~(AcceptErr | AcceptRunt);
1514 
1515 	RTL_W32(tp, RxConfig, rx_config);
1516 
1517 	if (features & NETIF_F_RXCSUM)
1518 		tp->cp_cmd |= RxChkSum;
1519 	else
1520 		tp->cp_cmd &= ~RxChkSum;
1521 
1522 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1523 		tp->cp_cmd |= RxVlan;
1524 	else
1525 		tp->cp_cmd &= ~RxVlan;
1526 
1527 	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
1528 	RTL_R16(tp, CPlusCmd);
1529 
1530 	rtl_unlock_work(tp);
1531 
1532 	return 0;
1533 }
1534 
1535 static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1536 {
1537 	return (skb_vlan_tag_present(skb)) ?
1538 		TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
1539 }
1540 
1541 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1542 {
1543 	u32 opts2 = le32_to_cpu(desc->opts2);
1544 
1545 	if (opts2 & RxVlanTag)
1546 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
1547 }
1548 
1549 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1550 			     void *p)
1551 {
1552 	struct rtl8169_private *tp = netdev_priv(dev);
1553 	u32 __iomem *data = tp->mmio_addr;
1554 	u32 *dw = p;
1555 	int i;
1556 
1557 	rtl_lock_work(tp);
1558 	for (i = 0; i < R8169_REGS_SIZE; i += 4)
1559 		memcpy_fromio(dw++, data++, 4);
1560 	rtl_unlock_work(tp);
1561 }
1562 
1563 static u32 rtl8169_get_msglevel(struct net_device *dev)
1564 {
1565 	struct rtl8169_private *tp = netdev_priv(dev);
1566 
1567 	return tp->msg_enable;
1568 }
1569 
1570 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1571 {
1572 	struct rtl8169_private *tp = netdev_priv(dev);
1573 
1574 	tp->msg_enable = value;
1575 }
1576 
1577 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1578 	"tx_packets",
1579 	"rx_packets",
1580 	"tx_errors",
1581 	"rx_errors",
1582 	"rx_missed",
1583 	"align_errors",
1584 	"tx_single_collisions",
1585 	"tx_multi_collisions",
1586 	"unicast",
1587 	"broadcast",
1588 	"multicast",
1589 	"tx_aborted",
1590 	"tx_underrun",
1591 };
1592 
1593 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1594 {
1595 	switch (sset) {
1596 	case ETH_SS_STATS:
1597 		return ARRAY_SIZE(rtl8169_gstrings);
1598 	default:
1599 		return -EOPNOTSUPP;
1600 	}
1601 }
1602 
1603 DECLARE_RTL_COND(rtl_counters_cond)
1604 {
1605 	return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump);
1606 }
1607 
1608 static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
1609 {
1610 	dma_addr_t paddr = tp->counters_phys_addr;
1611 	u32 cmd;
1612 
1613 	RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32);
1614 	RTL_R32(tp, CounterAddrHigh);
1615 	cmd = (u64)paddr & DMA_BIT_MASK(32);
1616 	RTL_W32(tp, CounterAddrLow, cmd);
1617 	RTL_W32(tp, CounterAddrLow, cmd | counter_cmd);
1618 
1619 	return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
1620 }
1621 
1622 static bool rtl8169_reset_counters(struct rtl8169_private *tp)
1623 {
1624 	/*
1625 	 * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
1626 	 * tally counters.
1627 	 */
1628 	if (tp->mac_version < RTL_GIGA_MAC_VER_19)
1629 		return true;
1630 
1631 	return rtl8169_do_counters(tp, CounterReset);
1632 }
1633 
1634 static bool rtl8169_update_counters(struct rtl8169_private *tp)
1635 {
1636 	u8 val = RTL_R8(tp, ChipCmd);
1637 
1638 	/*
1639 	 * Some chips are unable to dump tally counters when the receiver
1640 	 * is disabled. If 0xff chip may be in a PCI power-save state.
1641 	 */
1642 	if (!(val & CmdRxEnb) || val == 0xff)
1643 		return true;
1644 
1645 	return rtl8169_do_counters(tp, CounterDump);
1646 }
1647 
1648 static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp)
1649 {
1650 	struct rtl8169_counters *counters = tp->counters;
1651 	bool ret = false;
1652 
1653 	/*
1654 	 * rtl8169_init_counter_offsets is called from rtl_open.  On chip
1655 	 * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only
1656 	 * reset by a power cycle, while the counter values collected by the
1657 	 * driver are reset at every driver unload/load cycle.
1658 	 *
1659 	 * To make sure the HW values returned by @get_stats64 match the SW
1660 	 * values, we collect the initial values at first open(*) and use them
1661 	 * as offsets to normalize the values returned by @get_stats64.
1662 	 *
1663 	 * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one
1664 	 * for the reason stated in rtl8169_update_counters; CmdRxEnb is only
1665 	 * set at open time by rtl_hw_start.
1666 	 */
1667 
1668 	if (tp->tc_offset.inited)
1669 		return true;
1670 
1671 	/* If both, reset and update fail, propagate to caller. */
1672 	if (rtl8169_reset_counters(tp))
1673 		ret = true;
1674 
1675 	if (rtl8169_update_counters(tp))
1676 		ret = true;
1677 
1678 	tp->tc_offset.tx_errors = counters->tx_errors;
1679 	tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
1680 	tp->tc_offset.tx_aborted = counters->tx_aborted;
1681 	tp->tc_offset.inited = true;
1682 
1683 	return ret;
1684 }
1685 
1686 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1687 				      struct ethtool_stats *stats, u64 *data)
1688 {
1689 	struct rtl8169_private *tp = netdev_priv(dev);
1690 	struct device *d = tp_to_dev(tp);
1691 	struct rtl8169_counters *counters = tp->counters;
1692 
1693 	ASSERT_RTNL();
1694 
1695 	pm_runtime_get_noresume(d);
1696 
1697 	if (pm_runtime_active(d))
1698 		rtl8169_update_counters(tp);
1699 
1700 	pm_runtime_put_noidle(d);
1701 
1702 	data[0] = le64_to_cpu(counters->tx_packets);
1703 	data[1] = le64_to_cpu(counters->rx_packets);
1704 	data[2] = le64_to_cpu(counters->tx_errors);
1705 	data[3] = le32_to_cpu(counters->rx_errors);
1706 	data[4] = le16_to_cpu(counters->rx_missed);
1707 	data[5] = le16_to_cpu(counters->align_errors);
1708 	data[6] = le32_to_cpu(counters->tx_one_collision);
1709 	data[7] = le32_to_cpu(counters->tx_multi_collision);
1710 	data[8] = le64_to_cpu(counters->rx_unicast);
1711 	data[9] = le64_to_cpu(counters->rx_broadcast);
1712 	data[10] = le32_to_cpu(counters->rx_multicast);
1713 	data[11] = le16_to_cpu(counters->tx_aborted);
1714 	data[12] = le16_to_cpu(counters->tx_underun);
1715 }
1716 
1717 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1718 {
1719 	switch(stringset) {
1720 	case ETH_SS_STATS:
1721 		memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1722 		break;
1723 	}
1724 }
1725 
1726 /*
1727  * Interrupt coalescing
1728  *
1729  * > 1 - the availability of the IntrMitigate (0xe2) register through the
1730  * >     8169, 8168 and 810x line of chipsets
1731  *
1732  * 8169, 8168, and 8136(810x) serial chipsets support it.
1733  *
1734  * > 2 - the Tx timer unit at gigabit speed
1735  *
1736  * The unit of the timer depends on both the speed and the setting of CPlusCmd
1737  * (0xe0) bit 1 and bit 0.
1738  *
1739  * For 8169
1740  * bit[1:0] \ speed        1000M           100M            10M
1741  * 0 0                     320ns           2.56us          40.96us
1742  * 0 1                     2.56us          20.48us         327.7us
1743  * 1 0                     5.12us          40.96us         655.4us
1744  * 1 1                     10.24us         81.92us         1.31ms
1745  *
1746  * For the other
1747  * bit[1:0] \ speed        1000M           100M            10M
1748  * 0 0                     5us             2.56us          40.96us
1749  * 0 1                     40us            20.48us         327.7us
1750  * 1 0                     80us            40.96us         655.4us
1751  * 1 1                     160us           81.92us         1.31ms
1752  */
1753 
1754 /* rx/tx scale factors for one particular CPlusCmd[0:1] value */
1755 struct rtl_coalesce_scale {
1756 	/* Rx / Tx */
1757 	u32 nsecs[2];
1758 };
1759 
1760 /* rx/tx scale factors for all CPlusCmd[0:1] cases */
1761 struct rtl_coalesce_info {
1762 	u32 speed;
1763 	struct rtl_coalesce_scale scalev[4];	/* each CPlusCmd[0:1] case */
1764 };
1765 
1766 /* produce (r,t) pairs with each being in series of *1, *8, *8*2, *8*2*2 */
1767 #define rxtx_x1822(r, t) {		\
1768 	{{(r),		(t)}},		\
1769 	{{(r)*8,	(t)*8}},	\
1770 	{{(r)*8*2,	(t)*8*2}},	\
1771 	{{(r)*8*2*2,	(t)*8*2*2}},	\
1772 }
1773 static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = {
1774 	/* speed	delays:     rx00   tx00	*/
1775 	{ SPEED_10,	rxtx_x1822(40960, 40960)	},
1776 	{ SPEED_100,	rxtx_x1822( 2560,  2560)	},
1777 	{ SPEED_1000,	rxtx_x1822(  320,   320)	},
1778 	{ 0 },
1779 };
1780 
1781 static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = {
1782 	/* speed	delays:     rx00   tx00	*/
1783 	{ SPEED_10,	rxtx_x1822(40960, 40960)	},
1784 	{ SPEED_100,	rxtx_x1822( 2560,  2560)	},
1785 	{ SPEED_1000,	rxtx_x1822( 5000,  5000)	},
1786 	{ 0 },
1787 };
1788 #undef rxtx_x1822
1789 
1790 /* get rx/tx scale vector corresponding to current speed */
1791 static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev)
1792 {
1793 	struct rtl8169_private *tp = netdev_priv(dev);
1794 	struct ethtool_link_ksettings ecmd;
1795 	const struct rtl_coalesce_info *ci;
1796 	int rc;
1797 
1798 	rc = phy_ethtool_get_link_ksettings(dev, &ecmd);
1799 	if (rc < 0)
1800 		return ERR_PTR(rc);
1801 
1802 	for (ci = tp->coalesce_info; ci->speed != 0; ci++) {
1803 		if (ecmd.base.speed == ci->speed) {
1804 			return ci;
1805 		}
1806 	}
1807 
1808 	return ERR_PTR(-ELNRNG);
1809 }
1810 
1811 static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1812 {
1813 	struct rtl8169_private *tp = netdev_priv(dev);
1814 	const struct rtl_coalesce_info *ci;
1815 	const struct rtl_coalesce_scale *scale;
1816 	struct {
1817 		u32 *max_frames;
1818 		u32 *usecs;
1819 	} coal_settings [] = {
1820 		{ &ec->rx_max_coalesced_frames, &ec->rx_coalesce_usecs },
1821 		{ &ec->tx_max_coalesced_frames, &ec->tx_coalesce_usecs }
1822 	}, *p = coal_settings;
1823 	int i;
1824 	u16 w;
1825 
1826 	memset(ec, 0, sizeof(*ec));
1827 
1828 	/* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */
1829 	ci = rtl_coalesce_info(dev);
1830 	if (IS_ERR(ci))
1831 		return PTR_ERR(ci);
1832 
1833 	scale = &ci->scalev[tp->cp_cmd & INTT_MASK];
1834 
1835 	/* read IntrMitigate and adjust according to scale */
1836 	for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) {
1837 		*p->max_frames = (w & RTL_COALESCE_MASK) << 2;
1838 		w >>= RTL_COALESCE_SHIFT;
1839 		*p->usecs = w & RTL_COALESCE_MASK;
1840 	}
1841 
1842 	for (i = 0; i < 2; i++) {
1843 		p = coal_settings + i;
1844 		*p->usecs = (*p->usecs * scale->nsecs[i]) / 1000;
1845 
1846 		/*
1847 		 * ethtool_coalesce says it is illegal to set both usecs and
1848 		 * max_frames to 0.
1849 		 */
1850 		if (!*p->usecs && !*p->max_frames)
1851 			*p->max_frames = 1;
1852 	}
1853 
1854 	return 0;
1855 }
1856 
1857 /* choose appropriate scale factor and CPlusCmd[0:1] for (speed, nsec) */
1858 static const struct rtl_coalesce_scale *rtl_coalesce_choose_scale(
1859 			struct net_device *dev, u32 nsec, u16 *cp01)
1860 {
1861 	const struct rtl_coalesce_info *ci;
1862 	u16 i;
1863 
1864 	ci = rtl_coalesce_info(dev);
1865 	if (IS_ERR(ci))
1866 		return ERR_CAST(ci);
1867 
1868 	for (i = 0; i < 4; i++) {
1869 		u32 rxtx_maxscale = max(ci->scalev[i].nsecs[0],
1870 					ci->scalev[i].nsecs[1]);
1871 		if (nsec <= rxtx_maxscale * RTL_COALESCE_T_MAX) {
1872 			*cp01 = i;
1873 			return &ci->scalev[i];
1874 		}
1875 	}
1876 
1877 	return ERR_PTR(-EINVAL);
1878 }
1879 
1880 static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1881 {
1882 	struct rtl8169_private *tp = netdev_priv(dev);
1883 	const struct rtl_coalesce_scale *scale;
1884 	struct {
1885 		u32 frames;
1886 		u32 usecs;
1887 	} coal_settings [] = {
1888 		{ ec->rx_max_coalesced_frames, ec->rx_coalesce_usecs },
1889 		{ ec->tx_max_coalesced_frames, ec->tx_coalesce_usecs }
1890 	}, *p = coal_settings;
1891 	u16 w = 0, cp01;
1892 	int i;
1893 
1894 	scale = rtl_coalesce_choose_scale(dev,
1895 			max(p[0].usecs, p[1].usecs) * 1000, &cp01);
1896 	if (IS_ERR(scale))
1897 		return PTR_ERR(scale);
1898 
1899 	for (i = 0; i < 2; i++, p++) {
1900 		u32 units;
1901 
1902 		/*
1903 		 * accept max_frames=1 we returned in rtl_get_coalesce.
1904 		 * accept it not only when usecs=0 because of e.g. the following scenario:
1905 		 *
1906 		 * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX)
1907 		 * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1
1908 		 * - then user does `ethtool -C eth0 rx-usecs 100`
1909 		 *
1910 		 * since ethtool sends to kernel whole ethtool_coalesce
1911 		 * settings, if we do not handle rx_usecs=!0, rx_frames=1
1912 		 * we'll reject it below in `frames % 4 != 0`.
1913 		 */
1914 		if (p->frames == 1) {
1915 			p->frames = 0;
1916 		}
1917 
1918 		units = p->usecs * 1000 / scale->nsecs[i];
1919 		if (p->frames > RTL_COALESCE_FRAME_MAX || p->frames % 4)
1920 			return -EINVAL;
1921 
1922 		w <<= RTL_COALESCE_SHIFT;
1923 		w |= units;
1924 		w <<= RTL_COALESCE_SHIFT;
1925 		w |= p->frames >> 2;
1926 	}
1927 
1928 	rtl_lock_work(tp);
1929 
1930 	RTL_W16(tp, IntrMitigate, swab16(w));
1931 
1932 	tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01;
1933 	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
1934 	RTL_R16(tp, CPlusCmd);
1935 
1936 	rtl_unlock_work(tp);
1937 
1938 	return 0;
1939 }
1940 
1941 static int rtl_get_eee_supp(struct rtl8169_private *tp)
1942 {
1943 	struct phy_device *phydev = tp->phydev;
1944 	int ret;
1945 
1946 	switch (tp->mac_version) {
1947 	case RTL_GIGA_MAC_VER_34:
1948 	case RTL_GIGA_MAC_VER_35:
1949 	case RTL_GIGA_MAC_VER_36:
1950 	case RTL_GIGA_MAC_VER_38:
1951 		ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
1952 		break;
1953 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
1954 		ret = phy_read_paged(phydev, 0x0a5c, 0x12);
1955 		break;
1956 	default:
1957 		ret = -EPROTONOSUPPORT;
1958 		break;
1959 	}
1960 
1961 	return ret;
1962 }
1963 
1964 static int rtl_get_eee_lpadv(struct rtl8169_private *tp)
1965 {
1966 	struct phy_device *phydev = tp->phydev;
1967 	int ret;
1968 
1969 	switch (tp->mac_version) {
1970 	case RTL_GIGA_MAC_VER_34:
1971 	case RTL_GIGA_MAC_VER_35:
1972 	case RTL_GIGA_MAC_VER_36:
1973 	case RTL_GIGA_MAC_VER_38:
1974 		ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
1975 		break;
1976 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
1977 		ret = phy_read_paged(phydev, 0x0a5d, 0x11);
1978 		break;
1979 	default:
1980 		ret = -EPROTONOSUPPORT;
1981 		break;
1982 	}
1983 
1984 	return ret;
1985 }
1986 
1987 static int rtl_get_eee_adv(struct rtl8169_private *tp)
1988 {
1989 	struct phy_device *phydev = tp->phydev;
1990 	int ret;
1991 
1992 	switch (tp->mac_version) {
1993 	case RTL_GIGA_MAC_VER_34:
1994 	case RTL_GIGA_MAC_VER_35:
1995 	case RTL_GIGA_MAC_VER_36:
1996 	case RTL_GIGA_MAC_VER_38:
1997 		ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
1998 		break;
1999 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
2000 		ret = phy_read_paged(phydev, 0x0a5d, 0x10);
2001 		break;
2002 	default:
2003 		ret = -EPROTONOSUPPORT;
2004 		break;
2005 	}
2006 
2007 	return ret;
2008 }
2009 
2010 static int rtl_set_eee_adv(struct rtl8169_private *tp, int val)
2011 {
2012 	struct phy_device *phydev = tp->phydev;
2013 	int ret = 0;
2014 
2015 	switch (tp->mac_version) {
2016 	case RTL_GIGA_MAC_VER_34:
2017 	case RTL_GIGA_MAC_VER_35:
2018 	case RTL_GIGA_MAC_VER_36:
2019 	case RTL_GIGA_MAC_VER_38:
2020 		ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
2021 		break;
2022 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
2023 		phy_write_paged(phydev, 0x0a5d, 0x10, val);
2024 		break;
2025 	default:
2026 		ret = -EPROTONOSUPPORT;
2027 		break;
2028 	}
2029 
2030 	return ret;
2031 }
2032 
2033 static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data)
2034 {
2035 	struct rtl8169_private *tp = netdev_priv(dev);
2036 	struct device *d = tp_to_dev(tp);
2037 	int ret;
2038 
2039 	pm_runtime_get_noresume(d);
2040 
2041 	if (!pm_runtime_active(d)) {
2042 		ret = -EOPNOTSUPP;
2043 		goto out;
2044 	}
2045 
2046 	/* Get Supported EEE */
2047 	ret = rtl_get_eee_supp(tp);
2048 	if (ret < 0)
2049 		goto out;
2050 	data->supported = mmd_eee_cap_to_ethtool_sup_t(ret);
2051 
2052 	/* Get advertisement EEE */
2053 	ret = rtl_get_eee_adv(tp);
2054 	if (ret < 0)
2055 		goto out;
2056 	data->advertised = mmd_eee_adv_to_ethtool_adv_t(ret);
2057 	data->eee_enabled = !!data->advertised;
2058 
2059 	/* Get LP advertisement EEE */
2060 	ret = rtl_get_eee_lpadv(tp);
2061 	if (ret < 0)
2062 		goto out;
2063 	data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(ret);
2064 	data->eee_active = !!(data->advertised & data->lp_advertised);
2065 out:
2066 	pm_runtime_put_noidle(d);
2067 	return ret < 0 ? ret : 0;
2068 }
2069 
2070 static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
2071 {
2072 	struct rtl8169_private *tp = netdev_priv(dev);
2073 	struct device *d = tp_to_dev(tp);
2074 	int old_adv, adv = 0, cap, ret;
2075 
2076 	pm_runtime_get_noresume(d);
2077 
2078 	if (!dev->phydev || !pm_runtime_active(d)) {
2079 		ret = -EOPNOTSUPP;
2080 		goto out;
2081 	}
2082 
2083 	if (dev->phydev->autoneg == AUTONEG_DISABLE ||
2084 	    dev->phydev->duplex != DUPLEX_FULL) {
2085 		ret = -EPROTONOSUPPORT;
2086 		goto out;
2087 	}
2088 
2089 	/* Get Supported EEE */
2090 	ret = rtl_get_eee_supp(tp);
2091 	if (ret < 0)
2092 		goto out;
2093 	cap = ret;
2094 
2095 	ret = rtl_get_eee_adv(tp);
2096 	if (ret < 0)
2097 		goto out;
2098 	old_adv = ret;
2099 
2100 	if (data->eee_enabled) {
2101 		adv = !data->advertised ? cap :
2102 		      ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
2103 		/* Mask prohibited EEE modes */
2104 		adv &= ~dev->phydev->eee_broken_modes;
2105 	}
2106 
2107 	if (old_adv != adv) {
2108 		ret = rtl_set_eee_adv(tp, adv);
2109 		if (ret < 0)
2110 			goto out;
2111 
2112 		/* Restart autonegotiation so the new modes get sent to the
2113 		 * link partner.
2114 		 */
2115 		ret = phy_restart_aneg(dev->phydev);
2116 	}
2117 
2118 out:
2119 	pm_runtime_put_noidle(d);
2120 	return ret < 0 ? ret : 0;
2121 }
2122 
2123 static const struct ethtool_ops rtl8169_ethtool_ops = {
2124 	.get_drvinfo		= rtl8169_get_drvinfo,
2125 	.get_regs_len		= rtl8169_get_regs_len,
2126 	.get_link		= ethtool_op_get_link,
2127 	.get_coalesce		= rtl_get_coalesce,
2128 	.set_coalesce		= rtl_set_coalesce,
2129 	.get_msglevel		= rtl8169_get_msglevel,
2130 	.set_msglevel		= rtl8169_set_msglevel,
2131 	.get_regs		= rtl8169_get_regs,
2132 	.get_wol		= rtl8169_get_wol,
2133 	.set_wol		= rtl8169_set_wol,
2134 	.get_strings		= rtl8169_get_strings,
2135 	.get_sset_count		= rtl8169_get_sset_count,
2136 	.get_ethtool_stats	= rtl8169_get_ethtool_stats,
2137 	.get_ts_info		= ethtool_op_get_ts_info,
2138 	.nway_reset		= phy_ethtool_nway_reset,
2139 	.get_eee		= rtl8169_get_eee,
2140 	.set_eee		= rtl8169_set_eee,
2141 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
2142 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
2143 };
2144 
2145 static void rtl_enable_eee(struct rtl8169_private *tp)
2146 {
2147 	int supported = rtl_get_eee_supp(tp);
2148 
2149 	if (supported > 0)
2150 		rtl_set_eee_adv(tp, supported);
2151 }
2152 
2153 static void rtl8169_get_mac_version(struct rtl8169_private *tp)
2154 {
2155 	/*
2156 	 * The driver currently handles the 8168Bf and the 8168Be identically
2157 	 * but they can be identified more specifically through the test below
2158 	 * if needed:
2159 	 *
2160 	 * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2161 	 *
2162 	 * Same thing for the 8101Eb and the 8101Ec:
2163 	 *
2164 	 * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2165 	 */
2166 	static const struct rtl_mac_info {
2167 		u16 mask;
2168 		u16 val;
2169 		u16 mac_version;
2170 	} mac_info[] = {
2171 		/* 8168EP family. */
2172 		{ 0x7cf, 0x502,	RTL_GIGA_MAC_VER_51 },
2173 		{ 0x7cf, 0x501,	RTL_GIGA_MAC_VER_50 },
2174 		{ 0x7cf, 0x500,	RTL_GIGA_MAC_VER_49 },
2175 
2176 		/* 8168H family. */
2177 		{ 0x7cf, 0x541,	RTL_GIGA_MAC_VER_46 },
2178 		{ 0x7cf, 0x540,	RTL_GIGA_MAC_VER_45 },
2179 
2180 		/* 8168G family. */
2181 		{ 0x7cf, 0x5c8,	RTL_GIGA_MAC_VER_44 },
2182 		{ 0x7cf, 0x509,	RTL_GIGA_MAC_VER_42 },
2183 		{ 0x7cf, 0x4c1,	RTL_GIGA_MAC_VER_41 },
2184 		{ 0x7cf, 0x4c0,	RTL_GIGA_MAC_VER_40 },
2185 
2186 		/* 8168F family. */
2187 		{ 0x7c8, 0x488,	RTL_GIGA_MAC_VER_38 },
2188 		{ 0x7cf, 0x481,	RTL_GIGA_MAC_VER_36 },
2189 		{ 0x7cf, 0x480,	RTL_GIGA_MAC_VER_35 },
2190 
2191 		/* 8168E family. */
2192 		{ 0x7c8, 0x2c8,	RTL_GIGA_MAC_VER_34 },
2193 		{ 0x7cf, 0x2c1,	RTL_GIGA_MAC_VER_32 },
2194 		{ 0x7c8, 0x2c0,	RTL_GIGA_MAC_VER_33 },
2195 
2196 		/* 8168D family. */
2197 		{ 0x7cf, 0x281,	RTL_GIGA_MAC_VER_25 },
2198 		{ 0x7c8, 0x280,	RTL_GIGA_MAC_VER_26 },
2199 
2200 		/* 8168DP family. */
2201 		{ 0x7cf, 0x288,	RTL_GIGA_MAC_VER_27 },
2202 		{ 0x7cf, 0x28a,	RTL_GIGA_MAC_VER_28 },
2203 		{ 0x7cf, 0x28b,	RTL_GIGA_MAC_VER_31 },
2204 
2205 		/* 8168C family. */
2206 		{ 0x7cf, 0x3c9,	RTL_GIGA_MAC_VER_23 },
2207 		{ 0x7cf, 0x3c8,	RTL_GIGA_MAC_VER_18 },
2208 		{ 0x7c8, 0x3c8,	RTL_GIGA_MAC_VER_24 },
2209 		{ 0x7cf, 0x3c0,	RTL_GIGA_MAC_VER_19 },
2210 		{ 0x7cf, 0x3c2,	RTL_GIGA_MAC_VER_20 },
2211 		{ 0x7cf, 0x3c3,	RTL_GIGA_MAC_VER_21 },
2212 		{ 0x7c8, 0x3c0,	RTL_GIGA_MAC_VER_22 },
2213 
2214 		/* 8168B family. */
2215 		{ 0x7cf, 0x380,	RTL_GIGA_MAC_VER_12 },
2216 		{ 0x7c8, 0x380,	RTL_GIGA_MAC_VER_17 },
2217 		{ 0x7c8, 0x300,	RTL_GIGA_MAC_VER_11 },
2218 
2219 		/* 8101 family. */
2220 		{ 0x7c8, 0x448,	RTL_GIGA_MAC_VER_39 },
2221 		{ 0x7c8, 0x440,	RTL_GIGA_MAC_VER_37 },
2222 		{ 0x7cf, 0x409,	RTL_GIGA_MAC_VER_29 },
2223 		{ 0x7c8, 0x408,	RTL_GIGA_MAC_VER_30 },
2224 		{ 0x7cf, 0x349,	RTL_GIGA_MAC_VER_08 },
2225 		{ 0x7cf, 0x249,	RTL_GIGA_MAC_VER_08 },
2226 		{ 0x7cf, 0x348,	RTL_GIGA_MAC_VER_07 },
2227 		{ 0x7cf, 0x248,	RTL_GIGA_MAC_VER_07 },
2228 		{ 0x7cf, 0x340,	RTL_GIGA_MAC_VER_13 },
2229 		{ 0x7cf, 0x343,	RTL_GIGA_MAC_VER_10 },
2230 		{ 0x7cf, 0x342,	RTL_GIGA_MAC_VER_16 },
2231 		{ 0x7c8, 0x348,	RTL_GIGA_MAC_VER_09 },
2232 		{ 0x7c8, 0x248,	RTL_GIGA_MAC_VER_09 },
2233 		{ 0x7c8, 0x340,	RTL_GIGA_MAC_VER_16 },
2234 		/* FIXME: where did these entries come from ? -- FR */
2235 		{ 0xfc8, 0x388,	RTL_GIGA_MAC_VER_15 },
2236 		{ 0xfc8, 0x308,	RTL_GIGA_MAC_VER_14 },
2237 
2238 		/* 8110 family. */
2239 		{ 0xfc8, 0x980,	RTL_GIGA_MAC_VER_06 },
2240 		{ 0xfc8, 0x180,	RTL_GIGA_MAC_VER_05 },
2241 		{ 0xfc8, 0x100,	RTL_GIGA_MAC_VER_04 },
2242 		{ 0xfc8, 0x040,	RTL_GIGA_MAC_VER_03 },
2243 		{ 0xfc8, 0x008,	RTL_GIGA_MAC_VER_02 },
2244 
2245 		/* Catch-all */
2246 		{ 0x000, 0x000,	RTL_GIGA_MAC_NONE   }
2247 	};
2248 	const struct rtl_mac_info *p = mac_info;
2249 	u16 reg = RTL_R32(tp, TxConfig) >> 20;
2250 
2251 	while ((reg & p->mask) != p->val)
2252 		p++;
2253 	tp->mac_version = p->mac_version;
2254 
2255 	if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2256 		dev_err(tp_to_dev(tp), "unknown chip XID %03x\n", reg & 0xfcf);
2257 	} else if (!tp->supports_gmii) {
2258 		if (tp->mac_version == RTL_GIGA_MAC_VER_42)
2259 			tp->mac_version = RTL_GIGA_MAC_VER_43;
2260 		else if (tp->mac_version == RTL_GIGA_MAC_VER_45)
2261 			tp->mac_version = RTL_GIGA_MAC_VER_47;
2262 		else if (tp->mac_version == RTL_GIGA_MAC_VER_46)
2263 			tp->mac_version = RTL_GIGA_MAC_VER_48;
2264 	}
2265 }
2266 
2267 struct phy_reg {
2268 	u16 reg;
2269 	u16 val;
2270 };
2271 
2272 static void __rtl_writephy_batch(struct rtl8169_private *tp,
2273 				 const struct phy_reg *regs, int len)
2274 {
2275 	while (len-- > 0) {
2276 		rtl_writephy(tp, regs->reg, regs->val);
2277 		regs++;
2278 	}
2279 }
2280 
2281 #define rtl_writephy_batch(tp, a) __rtl_writephy_batch(tp, a, ARRAY_SIZE(a))
2282 
2283 static void rtl_release_firmware(struct rtl8169_private *tp)
2284 {
2285 	if (tp->rtl_fw) {
2286 		rtl_fw_release_firmware(tp->rtl_fw);
2287 		kfree(tp->rtl_fw);
2288 		tp->rtl_fw = NULL;
2289 	}
2290 }
2291 
2292 static void rtl_apply_firmware(struct rtl8169_private *tp)
2293 {
2294 	/* TODO: release firmware if rtl_fw_write_firmware signals failure. */
2295 	if (tp->rtl_fw)
2296 		rtl_fw_write_firmware(tp, tp->rtl_fw);
2297 }
2298 
2299 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2300 {
2301 	if (rtl_readphy(tp, reg) != val)
2302 		netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2303 	else
2304 		rtl_apply_firmware(tp);
2305 }
2306 
2307 static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
2308 {
2309 	/* Adjust EEE LED frequency */
2310 	if (tp->mac_version != RTL_GIGA_MAC_VER_38)
2311 		RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
2312 
2313 	rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_1111, 0x0003);
2314 }
2315 
2316 static void rtl8168f_config_eee_phy(struct rtl8169_private *tp)
2317 {
2318 	struct phy_device *phydev = tp->phydev;
2319 
2320 	phy_write(phydev, 0x1f, 0x0007);
2321 	phy_write(phydev, 0x1e, 0x0020);
2322 	phy_set_bits(phydev, 0x15, BIT(8));
2323 
2324 	phy_write(phydev, 0x1f, 0x0005);
2325 	phy_write(phydev, 0x05, 0x8b85);
2326 	phy_set_bits(phydev, 0x06, BIT(13));
2327 
2328 	phy_write(phydev, 0x1f, 0x0000);
2329 }
2330 
2331 static void rtl8168g_config_eee_phy(struct rtl8169_private *tp)
2332 {
2333 	phy_modify_paged(tp->phydev, 0x0a43, 0x11, 0, BIT(4));
2334 }
2335 
2336 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2337 {
2338 	static const struct phy_reg phy_reg_init[] = {
2339 		{ 0x1f, 0x0001 },
2340 		{ 0x06, 0x006e },
2341 		{ 0x08, 0x0708 },
2342 		{ 0x15, 0x4000 },
2343 		{ 0x18, 0x65c7 },
2344 
2345 		{ 0x1f, 0x0001 },
2346 		{ 0x03, 0x00a1 },
2347 		{ 0x02, 0x0008 },
2348 		{ 0x01, 0x0120 },
2349 		{ 0x00, 0x1000 },
2350 		{ 0x04, 0x0800 },
2351 		{ 0x04, 0x0000 },
2352 
2353 		{ 0x03, 0xff41 },
2354 		{ 0x02, 0xdf60 },
2355 		{ 0x01, 0x0140 },
2356 		{ 0x00, 0x0077 },
2357 		{ 0x04, 0x7800 },
2358 		{ 0x04, 0x7000 },
2359 
2360 		{ 0x03, 0x802f },
2361 		{ 0x02, 0x4f02 },
2362 		{ 0x01, 0x0409 },
2363 		{ 0x00, 0xf0f9 },
2364 		{ 0x04, 0x9800 },
2365 		{ 0x04, 0x9000 },
2366 
2367 		{ 0x03, 0xdf01 },
2368 		{ 0x02, 0xdf20 },
2369 		{ 0x01, 0xff95 },
2370 		{ 0x00, 0xba00 },
2371 		{ 0x04, 0xa800 },
2372 		{ 0x04, 0xa000 },
2373 
2374 		{ 0x03, 0xff41 },
2375 		{ 0x02, 0xdf20 },
2376 		{ 0x01, 0x0140 },
2377 		{ 0x00, 0x00bb },
2378 		{ 0x04, 0xb800 },
2379 		{ 0x04, 0xb000 },
2380 
2381 		{ 0x03, 0xdf41 },
2382 		{ 0x02, 0xdc60 },
2383 		{ 0x01, 0x6340 },
2384 		{ 0x00, 0x007d },
2385 		{ 0x04, 0xd800 },
2386 		{ 0x04, 0xd000 },
2387 
2388 		{ 0x03, 0xdf01 },
2389 		{ 0x02, 0xdf20 },
2390 		{ 0x01, 0x100a },
2391 		{ 0x00, 0xa0ff },
2392 		{ 0x04, 0xf800 },
2393 		{ 0x04, 0xf000 },
2394 
2395 		{ 0x1f, 0x0000 },
2396 		{ 0x0b, 0x0000 },
2397 		{ 0x00, 0x9200 }
2398 	};
2399 
2400 	rtl_writephy_batch(tp, phy_reg_init);
2401 }
2402 
2403 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2404 {
2405 	static const struct phy_reg phy_reg_init[] = {
2406 		{ 0x1f, 0x0002 },
2407 		{ 0x01, 0x90d0 },
2408 		{ 0x1f, 0x0000 }
2409 	};
2410 
2411 	rtl_writephy_batch(tp, phy_reg_init);
2412 }
2413 
2414 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2415 {
2416 	struct pci_dev *pdev = tp->pci_dev;
2417 
2418 	if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2419 	    (pdev->subsystem_device != 0xe000))
2420 		return;
2421 
2422 	rtl_writephy(tp, 0x1f, 0x0001);
2423 	rtl_writephy(tp, 0x10, 0xf01b);
2424 	rtl_writephy(tp, 0x1f, 0x0000);
2425 }
2426 
2427 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2428 {
2429 	static const struct phy_reg phy_reg_init[] = {
2430 		{ 0x1f, 0x0001 },
2431 		{ 0x04, 0x0000 },
2432 		{ 0x03, 0x00a1 },
2433 		{ 0x02, 0x0008 },
2434 		{ 0x01, 0x0120 },
2435 		{ 0x00, 0x1000 },
2436 		{ 0x04, 0x0800 },
2437 		{ 0x04, 0x9000 },
2438 		{ 0x03, 0x802f },
2439 		{ 0x02, 0x4f02 },
2440 		{ 0x01, 0x0409 },
2441 		{ 0x00, 0xf099 },
2442 		{ 0x04, 0x9800 },
2443 		{ 0x04, 0xa000 },
2444 		{ 0x03, 0xdf01 },
2445 		{ 0x02, 0xdf20 },
2446 		{ 0x01, 0xff95 },
2447 		{ 0x00, 0xba00 },
2448 		{ 0x04, 0xa800 },
2449 		{ 0x04, 0xf000 },
2450 		{ 0x03, 0xdf01 },
2451 		{ 0x02, 0xdf20 },
2452 		{ 0x01, 0x101a },
2453 		{ 0x00, 0xa0ff },
2454 		{ 0x04, 0xf800 },
2455 		{ 0x04, 0x0000 },
2456 		{ 0x1f, 0x0000 },
2457 
2458 		{ 0x1f, 0x0001 },
2459 		{ 0x10, 0xf41b },
2460 		{ 0x14, 0xfb54 },
2461 		{ 0x18, 0xf5c7 },
2462 		{ 0x1f, 0x0000 },
2463 
2464 		{ 0x1f, 0x0001 },
2465 		{ 0x17, 0x0cc0 },
2466 		{ 0x1f, 0x0000 }
2467 	};
2468 
2469 	rtl_writephy_batch(tp, phy_reg_init);
2470 
2471 	rtl8169scd_hw_phy_config_quirk(tp);
2472 }
2473 
2474 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2475 {
2476 	static const struct phy_reg phy_reg_init[] = {
2477 		{ 0x1f, 0x0001 },
2478 		{ 0x04, 0x0000 },
2479 		{ 0x03, 0x00a1 },
2480 		{ 0x02, 0x0008 },
2481 		{ 0x01, 0x0120 },
2482 		{ 0x00, 0x1000 },
2483 		{ 0x04, 0x0800 },
2484 		{ 0x04, 0x9000 },
2485 		{ 0x03, 0x802f },
2486 		{ 0x02, 0x4f02 },
2487 		{ 0x01, 0x0409 },
2488 		{ 0x00, 0xf099 },
2489 		{ 0x04, 0x9800 },
2490 		{ 0x04, 0xa000 },
2491 		{ 0x03, 0xdf01 },
2492 		{ 0x02, 0xdf20 },
2493 		{ 0x01, 0xff95 },
2494 		{ 0x00, 0xba00 },
2495 		{ 0x04, 0xa800 },
2496 		{ 0x04, 0xf000 },
2497 		{ 0x03, 0xdf01 },
2498 		{ 0x02, 0xdf20 },
2499 		{ 0x01, 0x101a },
2500 		{ 0x00, 0xa0ff },
2501 		{ 0x04, 0xf800 },
2502 		{ 0x04, 0x0000 },
2503 		{ 0x1f, 0x0000 },
2504 
2505 		{ 0x1f, 0x0001 },
2506 		{ 0x0b, 0x8480 },
2507 		{ 0x1f, 0x0000 },
2508 
2509 		{ 0x1f, 0x0001 },
2510 		{ 0x18, 0x67c7 },
2511 		{ 0x04, 0x2000 },
2512 		{ 0x03, 0x002f },
2513 		{ 0x02, 0x4360 },
2514 		{ 0x01, 0x0109 },
2515 		{ 0x00, 0x3022 },
2516 		{ 0x04, 0x2800 },
2517 		{ 0x1f, 0x0000 },
2518 
2519 		{ 0x1f, 0x0001 },
2520 		{ 0x17, 0x0cc0 },
2521 		{ 0x1f, 0x0000 }
2522 	};
2523 
2524 	rtl_writephy_batch(tp, phy_reg_init);
2525 }
2526 
2527 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2528 {
2529 	static const struct phy_reg phy_reg_init[] = {
2530 		{ 0x10, 0xf41b },
2531 		{ 0x1f, 0x0000 }
2532 	};
2533 
2534 	rtl_writephy(tp, 0x1f, 0x0001);
2535 	rtl_patchphy(tp, 0x16, 1 << 0);
2536 
2537 	rtl_writephy_batch(tp, phy_reg_init);
2538 }
2539 
2540 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2541 {
2542 	static const struct phy_reg phy_reg_init[] = {
2543 		{ 0x1f, 0x0001 },
2544 		{ 0x10, 0xf41b },
2545 		{ 0x1f, 0x0000 }
2546 	};
2547 
2548 	rtl_writephy_batch(tp, phy_reg_init);
2549 }
2550 
2551 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2552 {
2553 	static const struct phy_reg phy_reg_init[] = {
2554 		{ 0x1f, 0x0000 },
2555 		{ 0x1d, 0x0f00 },
2556 		{ 0x1f, 0x0002 },
2557 		{ 0x0c, 0x1ec8 },
2558 		{ 0x1f, 0x0000 }
2559 	};
2560 
2561 	rtl_writephy_batch(tp, phy_reg_init);
2562 }
2563 
2564 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2565 {
2566 	static const struct phy_reg phy_reg_init[] = {
2567 		{ 0x1f, 0x0001 },
2568 		{ 0x1d, 0x3d98 },
2569 		{ 0x1f, 0x0000 }
2570 	};
2571 
2572 	rtl_writephy(tp, 0x1f, 0x0000);
2573 	rtl_patchphy(tp, 0x14, 1 << 5);
2574 	rtl_patchphy(tp, 0x0d, 1 << 5);
2575 
2576 	rtl_writephy_batch(tp, phy_reg_init);
2577 }
2578 
2579 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2580 {
2581 	static const struct phy_reg phy_reg_init[] = {
2582 		{ 0x1f, 0x0001 },
2583 		{ 0x12, 0x2300 },
2584 		{ 0x1f, 0x0002 },
2585 		{ 0x00, 0x88d4 },
2586 		{ 0x01, 0x82b1 },
2587 		{ 0x03, 0x7002 },
2588 		{ 0x08, 0x9e30 },
2589 		{ 0x09, 0x01f0 },
2590 		{ 0x0a, 0x5500 },
2591 		{ 0x0c, 0x00c8 },
2592 		{ 0x1f, 0x0003 },
2593 		{ 0x12, 0xc096 },
2594 		{ 0x16, 0x000a },
2595 		{ 0x1f, 0x0000 },
2596 		{ 0x1f, 0x0000 },
2597 		{ 0x09, 0x2000 },
2598 		{ 0x09, 0x0000 }
2599 	};
2600 
2601 	rtl_writephy_batch(tp, phy_reg_init);
2602 
2603 	rtl_patchphy(tp, 0x14, 1 << 5);
2604 	rtl_patchphy(tp, 0x0d, 1 << 5);
2605 	rtl_writephy(tp, 0x1f, 0x0000);
2606 }
2607 
2608 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2609 {
2610 	static const struct phy_reg phy_reg_init[] = {
2611 		{ 0x1f, 0x0001 },
2612 		{ 0x12, 0x2300 },
2613 		{ 0x03, 0x802f },
2614 		{ 0x02, 0x4f02 },
2615 		{ 0x01, 0x0409 },
2616 		{ 0x00, 0xf099 },
2617 		{ 0x04, 0x9800 },
2618 		{ 0x04, 0x9000 },
2619 		{ 0x1d, 0x3d98 },
2620 		{ 0x1f, 0x0002 },
2621 		{ 0x0c, 0x7eb8 },
2622 		{ 0x06, 0x0761 },
2623 		{ 0x1f, 0x0003 },
2624 		{ 0x16, 0x0f0a },
2625 		{ 0x1f, 0x0000 }
2626 	};
2627 
2628 	rtl_writephy_batch(tp, phy_reg_init);
2629 
2630 	rtl_patchphy(tp, 0x16, 1 << 0);
2631 	rtl_patchphy(tp, 0x14, 1 << 5);
2632 	rtl_patchphy(tp, 0x0d, 1 << 5);
2633 	rtl_writephy(tp, 0x1f, 0x0000);
2634 }
2635 
2636 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2637 {
2638 	static const struct phy_reg phy_reg_init[] = {
2639 		{ 0x1f, 0x0001 },
2640 		{ 0x12, 0x2300 },
2641 		{ 0x1d, 0x3d98 },
2642 		{ 0x1f, 0x0002 },
2643 		{ 0x0c, 0x7eb8 },
2644 		{ 0x06, 0x5461 },
2645 		{ 0x1f, 0x0003 },
2646 		{ 0x16, 0x0f0a },
2647 		{ 0x1f, 0x0000 }
2648 	};
2649 
2650 	rtl_writephy_batch(tp, phy_reg_init);
2651 
2652 	rtl_patchphy(tp, 0x16, 1 << 0);
2653 	rtl_patchphy(tp, 0x14, 1 << 5);
2654 	rtl_patchphy(tp, 0x0d, 1 << 5);
2655 	rtl_writephy(tp, 0x1f, 0x0000);
2656 }
2657 
2658 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2659 {
2660 	rtl8168c_3_hw_phy_config(tp);
2661 }
2662 
2663 static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = {
2664 	/* Channel Estimation */
2665 	{ 0x1f, 0x0001 },
2666 	{ 0x06, 0x4064 },
2667 	{ 0x07, 0x2863 },
2668 	{ 0x08, 0x059c },
2669 	{ 0x09, 0x26b4 },
2670 	{ 0x0a, 0x6a19 },
2671 	{ 0x0b, 0xdcc8 },
2672 	{ 0x10, 0xf06d },
2673 	{ 0x14, 0x7f68 },
2674 	{ 0x18, 0x7fd9 },
2675 	{ 0x1c, 0xf0ff },
2676 	{ 0x1d, 0x3d9c },
2677 	{ 0x1f, 0x0003 },
2678 	{ 0x12, 0xf49f },
2679 	{ 0x13, 0x070b },
2680 	{ 0x1a, 0x05ad },
2681 	{ 0x14, 0x94c0 },
2682 
2683 	/*
2684 	 * Tx Error Issue
2685 	 * Enhance line driver power
2686 	 */
2687 	{ 0x1f, 0x0002 },
2688 	{ 0x06, 0x5561 },
2689 	{ 0x1f, 0x0005 },
2690 	{ 0x05, 0x8332 },
2691 	{ 0x06, 0x5561 },
2692 
2693 	/*
2694 	 * Can not link to 1Gbps with bad cable
2695 	 * Decrease SNR threshold form 21.07dB to 19.04dB
2696 	 */
2697 	{ 0x1f, 0x0001 },
2698 	{ 0x17, 0x0cc0 },
2699 
2700 	{ 0x1f, 0x0000 },
2701 	{ 0x0d, 0xf880 }
2702 };
2703 
2704 static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = {
2705 	{ 0x1f, 0x0002 },
2706 	{ 0x05, 0x669a },
2707 	{ 0x1f, 0x0005 },
2708 	{ 0x05, 0x8330 },
2709 	{ 0x06, 0x669a },
2710 	{ 0x1f, 0x0002 }
2711 };
2712 
2713 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2714 {
2715 	rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
2716 
2717 	/*
2718 	 * Rx Error Issue
2719 	 * Fine Tune Switching regulator parameter
2720 	 */
2721 	rtl_writephy(tp, 0x1f, 0x0002);
2722 	rtl_w0w1_phy(tp, 0x0b, 0x0010, 0x00ef);
2723 	rtl_w0w1_phy(tp, 0x0c, 0xa200, 0x5d00);
2724 
2725 	if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2726 		int val;
2727 
2728 		rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_1);
2729 
2730 		val = rtl_readphy(tp, 0x0d);
2731 
2732 		if ((val & 0x00ff) != 0x006c) {
2733 			static const u32 set[] = {
2734 				0x0065, 0x0066, 0x0067, 0x0068,
2735 				0x0069, 0x006a, 0x006b, 0x006c
2736 			};
2737 			int i;
2738 
2739 			rtl_writephy(tp, 0x1f, 0x0002);
2740 
2741 			val &= 0xff00;
2742 			for (i = 0; i < ARRAY_SIZE(set); i++)
2743 				rtl_writephy(tp, 0x0d, val | set[i]);
2744 		}
2745 	} else {
2746 		static const struct phy_reg phy_reg_init[] = {
2747 			{ 0x1f, 0x0002 },
2748 			{ 0x05, 0x6662 },
2749 			{ 0x1f, 0x0005 },
2750 			{ 0x05, 0x8330 },
2751 			{ 0x06, 0x6662 }
2752 		};
2753 
2754 		rtl_writephy_batch(tp, phy_reg_init);
2755 	}
2756 
2757 	/* RSET couple improve */
2758 	rtl_writephy(tp, 0x1f, 0x0002);
2759 	rtl_patchphy(tp, 0x0d, 0x0300);
2760 	rtl_patchphy(tp, 0x0f, 0x0010);
2761 
2762 	/* Fine tune PLL performance */
2763 	rtl_writephy(tp, 0x1f, 0x0002);
2764 	rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
2765 	rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
2766 
2767 	rtl_writephy(tp, 0x1f, 0x0005);
2768 	rtl_writephy(tp, 0x05, 0x001b);
2769 
2770 	rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2771 
2772 	rtl_writephy(tp, 0x1f, 0x0000);
2773 }
2774 
2775 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2776 {
2777 	rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
2778 
2779 	if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2780 		int val;
2781 
2782 		rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_1);
2783 
2784 		val = rtl_readphy(tp, 0x0d);
2785 		if ((val & 0x00ff) != 0x006c) {
2786 			static const u32 set[] = {
2787 				0x0065, 0x0066, 0x0067, 0x0068,
2788 				0x0069, 0x006a, 0x006b, 0x006c
2789 			};
2790 			int i;
2791 
2792 			rtl_writephy(tp, 0x1f, 0x0002);
2793 
2794 			val &= 0xff00;
2795 			for (i = 0; i < ARRAY_SIZE(set); i++)
2796 				rtl_writephy(tp, 0x0d, val | set[i]);
2797 		}
2798 	} else {
2799 		static const struct phy_reg phy_reg_init[] = {
2800 			{ 0x1f, 0x0002 },
2801 			{ 0x05, 0x2642 },
2802 			{ 0x1f, 0x0005 },
2803 			{ 0x05, 0x8330 },
2804 			{ 0x06, 0x2642 }
2805 		};
2806 
2807 		rtl_writephy_batch(tp, phy_reg_init);
2808 	}
2809 
2810 	/* Fine tune PLL performance */
2811 	rtl_writephy(tp, 0x1f, 0x0002);
2812 	rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
2813 	rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
2814 
2815 	/* Switching regulator Slew rate */
2816 	rtl_writephy(tp, 0x1f, 0x0002);
2817 	rtl_patchphy(tp, 0x0f, 0x0017);
2818 
2819 	rtl_writephy(tp, 0x1f, 0x0005);
2820 	rtl_writephy(tp, 0x05, 0x001b);
2821 
2822 	rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2823 
2824 	rtl_writephy(tp, 0x1f, 0x0000);
2825 }
2826 
2827 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2828 {
2829 	static const struct phy_reg phy_reg_init[] = {
2830 		{ 0x1f, 0x0002 },
2831 		{ 0x10, 0x0008 },
2832 		{ 0x0d, 0x006c },
2833 
2834 		{ 0x1f, 0x0000 },
2835 		{ 0x0d, 0xf880 },
2836 
2837 		{ 0x1f, 0x0001 },
2838 		{ 0x17, 0x0cc0 },
2839 
2840 		{ 0x1f, 0x0001 },
2841 		{ 0x0b, 0xa4d8 },
2842 		{ 0x09, 0x281c },
2843 		{ 0x07, 0x2883 },
2844 		{ 0x0a, 0x6b35 },
2845 		{ 0x1d, 0x3da4 },
2846 		{ 0x1c, 0xeffd },
2847 		{ 0x14, 0x7f52 },
2848 		{ 0x18, 0x7fc6 },
2849 		{ 0x08, 0x0601 },
2850 		{ 0x06, 0x4063 },
2851 		{ 0x10, 0xf074 },
2852 		{ 0x1f, 0x0003 },
2853 		{ 0x13, 0x0789 },
2854 		{ 0x12, 0xf4bd },
2855 		{ 0x1a, 0x04fd },
2856 		{ 0x14, 0x84b0 },
2857 		{ 0x1f, 0x0000 },
2858 		{ 0x00, 0x9200 },
2859 
2860 		{ 0x1f, 0x0005 },
2861 		{ 0x01, 0x0340 },
2862 		{ 0x1f, 0x0001 },
2863 		{ 0x04, 0x4000 },
2864 		{ 0x03, 0x1d21 },
2865 		{ 0x02, 0x0c32 },
2866 		{ 0x01, 0x0200 },
2867 		{ 0x00, 0x5554 },
2868 		{ 0x04, 0x4800 },
2869 		{ 0x04, 0x4000 },
2870 		{ 0x04, 0xf000 },
2871 		{ 0x03, 0xdf01 },
2872 		{ 0x02, 0xdf20 },
2873 		{ 0x01, 0x101a },
2874 		{ 0x00, 0xa0ff },
2875 		{ 0x04, 0xf800 },
2876 		{ 0x04, 0xf000 },
2877 		{ 0x1f, 0x0000 },
2878 
2879 		{ 0x1f, 0x0007 },
2880 		{ 0x1e, 0x0023 },
2881 		{ 0x16, 0x0000 },
2882 		{ 0x1f, 0x0000 }
2883 	};
2884 
2885 	rtl_writephy_batch(tp, phy_reg_init);
2886 }
2887 
2888 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
2889 {
2890 	static const struct phy_reg phy_reg_init[] = {
2891 		{ 0x1f, 0x0001 },
2892 		{ 0x17, 0x0cc0 },
2893 
2894 		{ 0x1f, 0x0007 },
2895 		{ 0x1e, 0x002d },
2896 		{ 0x18, 0x0040 },
2897 		{ 0x1f, 0x0000 }
2898 	};
2899 
2900 	rtl_writephy_batch(tp, phy_reg_init);
2901 	rtl_patchphy(tp, 0x0d, 1 << 5);
2902 }
2903 
2904 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
2905 {
2906 	static const struct phy_reg phy_reg_init[] = {
2907 		/* Enable Delay cap */
2908 		{ 0x1f, 0x0005 },
2909 		{ 0x05, 0x8b80 },
2910 		{ 0x06, 0xc896 },
2911 		{ 0x1f, 0x0000 },
2912 
2913 		/* Channel estimation fine tune */
2914 		{ 0x1f, 0x0001 },
2915 		{ 0x0b, 0x6c20 },
2916 		{ 0x07, 0x2872 },
2917 		{ 0x1c, 0xefff },
2918 		{ 0x1f, 0x0003 },
2919 		{ 0x14, 0x6420 },
2920 		{ 0x1f, 0x0000 },
2921 
2922 		/* Update PFM & 10M TX idle timer */
2923 		{ 0x1f, 0x0007 },
2924 		{ 0x1e, 0x002f },
2925 		{ 0x15, 0x1919 },
2926 		{ 0x1f, 0x0000 },
2927 
2928 		{ 0x1f, 0x0007 },
2929 		{ 0x1e, 0x00ac },
2930 		{ 0x18, 0x0006 },
2931 		{ 0x1f, 0x0000 }
2932 	};
2933 
2934 	rtl_apply_firmware(tp);
2935 
2936 	rtl_writephy_batch(tp, phy_reg_init);
2937 
2938 	/* DCO enable for 10M IDLE Power */
2939 	rtl_writephy(tp, 0x1f, 0x0007);
2940 	rtl_writephy(tp, 0x1e, 0x0023);
2941 	rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000);
2942 	rtl_writephy(tp, 0x1f, 0x0000);
2943 
2944 	/* For impedance matching */
2945 	rtl_writephy(tp, 0x1f, 0x0002);
2946 	rtl_w0w1_phy(tp, 0x08, 0x8000, 0x7f00);
2947 	rtl_writephy(tp, 0x1f, 0x0000);
2948 
2949 	/* PHY auto speed down */
2950 	rtl_writephy(tp, 0x1f, 0x0007);
2951 	rtl_writephy(tp, 0x1e, 0x002d);
2952 	rtl_w0w1_phy(tp, 0x18, 0x0050, 0x0000);
2953 	rtl_writephy(tp, 0x1f, 0x0000);
2954 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
2955 
2956 	rtl_writephy(tp, 0x1f, 0x0005);
2957 	rtl_writephy(tp, 0x05, 0x8b86);
2958 	rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
2959 	rtl_writephy(tp, 0x1f, 0x0000);
2960 
2961 	rtl_writephy(tp, 0x1f, 0x0005);
2962 	rtl_writephy(tp, 0x05, 0x8b85);
2963 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
2964 	rtl_writephy(tp, 0x1f, 0x0007);
2965 	rtl_writephy(tp, 0x1e, 0x0020);
2966 	rtl_w0w1_phy(tp, 0x15, 0x0000, 0x1100);
2967 	rtl_writephy(tp, 0x1f, 0x0006);
2968 	rtl_writephy(tp, 0x00, 0x5a00);
2969 	rtl_writephy(tp, 0x1f, 0x0000);
2970 	rtl_writephy(tp, 0x0d, 0x0007);
2971 	rtl_writephy(tp, 0x0e, 0x003c);
2972 	rtl_writephy(tp, 0x0d, 0x4007);
2973 	rtl_writephy(tp, 0x0e, 0x0000);
2974 	rtl_writephy(tp, 0x0d, 0x0000);
2975 }
2976 
2977 static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
2978 {
2979 	const u16 w[] = {
2980 		addr[0] | (addr[1] << 8),
2981 		addr[2] | (addr[3] << 8),
2982 		addr[4] | (addr[5] << 8)
2983 	};
2984 
2985 	rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, w[0] | (w[1] << 16));
2986 	rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, w[2]);
2987 	rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, w[0] << 16);
2988 	rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, w[1] | (w[2] << 16));
2989 }
2990 
2991 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
2992 {
2993 	static const struct phy_reg phy_reg_init[] = {
2994 		/* Enable Delay cap */
2995 		{ 0x1f, 0x0004 },
2996 		{ 0x1f, 0x0007 },
2997 		{ 0x1e, 0x00ac },
2998 		{ 0x18, 0x0006 },
2999 		{ 0x1f, 0x0002 },
3000 		{ 0x1f, 0x0000 },
3001 		{ 0x1f, 0x0000 },
3002 
3003 		/* Channel estimation fine tune */
3004 		{ 0x1f, 0x0003 },
3005 		{ 0x09, 0xa20f },
3006 		{ 0x1f, 0x0000 },
3007 		{ 0x1f, 0x0000 },
3008 
3009 		/* Green Setting */
3010 		{ 0x1f, 0x0005 },
3011 		{ 0x05, 0x8b5b },
3012 		{ 0x06, 0x9222 },
3013 		{ 0x05, 0x8b6d },
3014 		{ 0x06, 0x8000 },
3015 		{ 0x05, 0x8b76 },
3016 		{ 0x06, 0x8000 },
3017 		{ 0x1f, 0x0000 }
3018 	};
3019 
3020 	rtl_apply_firmware(tp);
3021 
3022 	rtl_writephy_batch(tp, phy_reg_init);
3023 
3024 	/* For 4-corner performance improve */
3025 	rtl_writephy(tp, 0x1f, 0x0005);
3026 	rtl_writephy(tp, 0x05, 0x8b80);
3027 	rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000);
3028 	rtl_writephy(tp, 0x1f, 0x0000);
3029 
3030 	/* PHY auto speed down */
3031 	rtl_writephy(tp, 0x1f, 0x0004);
3032 	rtl_writephy(tp, 0x1f, 0x0007);
3033 	rtl_writephy(tp, 0x1e, 0x002d);
3034 	rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000);
3035 	rtl_writephy(tp, 0x1f, 0x0002);
3036 	rtl_writephy(tp, 0x1f, 0x0000);
3037 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3038 
3039 	/* improve 10M EEE waveform */
3040 	rtl_writephy(tp, 0x1f, 0x0005);
3041 	rtl_writephy(tp, 0x05, 0x8b86);
3042 	rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
3043 	rtl_writephy(tp, 0x1f, 0x0000);
3044 
3045 	/* Improve 2-pair detection performance */
3046 	rtl_writephy(tp, 0x1f, 0x0005);
3047 	rtl_writephy(tp, 0x05, 0x8b85);
3048 	rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
3049 	rtl_writephy(tp, 0x1f, 0x0000);
3050 
3051 	rtl8168f_config_eee_phy(tp);
3052 	rtl_enable_eee(tp);
3053 
3054 	/* Green feature */
3055 	rtl_writephy(tp, 0x1f, 0x0003);
3056 	rtl_w0w1_phy(tp, 0x19, 0x0001, 0x0000);
3057 	rtl_w0w1_phy(tp, 0x10, 0x0400, 0x0000);
3058 	rtl_writephy(tp, 0x1f, 0x0000);
3059 	rtl_writephy(tp, 0x1f, 0x0005);
3060 	rtl_w0w1_phy(tp, 0x01, 0x0100, 0x0000);
3061 	rtl_writephy(tp, 0x1f, 0x0000);
3062 
3063 	/* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3064 	rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3065 }
3066 
3067 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3068 {
3069 	/* For 4-corner performance improve */
3070 	rtl_writephy(tp, 0x1f, 0x0005);
3071 	rtl_writephy(tp, 0x05, 0x8b80);
3072 	rtl_w0w1_phy(tp, 0x06, 0x0006, 0x0000);
3073 	rtl_writephy(tp, 0x1f, 0x0000);
3074 
3075 	/* PHY auto speed down */
3076 	rtl_writephy(tp, 0x1f, 0x0007);
3077 	rtl_writephy(tp, 0x1e, 0x002d);
3078 	rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000);
3079 	rtl_writephy(tp, 0x1f, 0x0000);
3080 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3081 
3082 	/* Improve 10M EEE waveform */
3083 	rtl_writephy(tp, 0x1f, 0x0005);
3084 	rtl_writephy(tp, 0x05, 0x8b86);
3085 	rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
3086 	rtl_writephy(tp, 0x1f, 0x0000);
3087 
3088 	rtl8168f_config_eee_phy(tp);
3089 	rtl_enable_eee(tp);
3090 }
3091 
3092 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3093 {
3094 	static const struct phy_reg phy_reg_init[] = {
3095 		/* Channel estimation fine tune */
3096 		{ 0x1f, 0x0003 },
3097 		{ 0x09, 0xa20f },
3098 		{ 0x1f, 0x0000 },
3099 
3100 		/* Modify green table for giga & fnet */
3101 		{ 0x1f, 0x0005 },
3102 		{ 0x05, 0x8b55 },
3103 		{ 0x06, 0x0000 },
3104 		{ 0x05, 0x8b5e },
3105 		{ 0x06, 0x0000 },
3106 		{ 0x05, 0x8b67 },
3107 		{ 0x06, 0x0000 },
3108 		{ 0x05, 0x8b70 },
3109 		{ 0x06, 0x0000 },
3110 		{ 0x1f, 0x0000 },
3111 		{ 0x1f, 0x0007 },
3112 		{ 0x1e, 0x0078 },
3113 		{ 0x17, 0x0000 },
3114 		{ 0x19, 0x00fb },
3115 		{ 0x1f, 0x0000 },
3116 
3117 		/* Modify green table for 10M */
3118 		{ 0x1f, 0x0005 },
3119 		{ 0x05, 0x8b79 },
3120 		{ 0x06, 0xaa00 },
3121 		{ 0x1f, 0x0000 },
3122 
3123 		/* Disable hiimpedance detection (RTCT) */
3124 		{ 0x1f, 0x0003 },
3125 		{ 0x01, 0x328a },
3126 		{ 0x1f, 0x0000 }
3127 	};
3128 
3129 	rtl_apply_firmware(tp);
3130 
3131 	rtl_writephy_batch(tp, phy_reg_init);
3132 
3133 	rtl8168f_hw_phy_config(tp);
3134 
3135 	/* Improve 2-pair detection performance */
3136 	rtl_writephy(tp, 0x1f, 0x0005);
3137 	rtl_writephy(tp, 0x05, 0x8b85);
3138 	rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
3139 	rtl_writephy(tp, 0x1f, 0x0000);
3140 }
3141 
3142 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3143 {
3144 	rtl_apply_firmware(tp);
3145 
3146 	rtl8168f_hw_phy_config(tp);
3147 }
3148 
3149 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3150 {
3151 	static const struct phy_reg phy_reg_init[] = {
3152 		/* Channel estimation fine tune */
3153 		{ 0x1f, 0x0003 },
3154 		{ 0x09, 0xa20f },
3155 		{ 0x1f, 0x0000 },
3156 
3157 		/* Modify green table for giga & fnet */
3158 		{ 0x1f, 0x0005 },
3159 		{ 0x05, 0x8b55 },
3160 		{ 0x06, 0x0000 },
3161 		{ 0x05, 0x8b5e },
3162 		{ 0x06, 0x0000 },
3163 		{ 0x05, 0x8b67 },
3164 		{ 0x06, 0x0000 },
3165 		{ 0x05, 0x8b70 },
3166 		{ 0x06, 0x0000 },
3167 		{ 0x1f, 0x0000 },
3168 		{ 0x1f, 0x0007 },
3169 		{ 0x1e, 0x0078 },
3170 		{ 0x17, 0x0000 },
3171 		{ 0x19, 0x00aa },
3172 		{ 0x1f, 0x0000 },
3173 
3174 		/* Modify green table for 10M */
3175 		{ 0x1f, 0x0005 },
3176 		{ 0x05, 0x8b79 },
3177 		{ 0x06, 0xaa00 },
3178 		{ 0x1f, 0x0000 },
3179 
3180 		/* Disable hiimpedance detection (RTCT) */
3181 		{ 0x1f, 0x0003 },
3182 		{ 0x01, 0x328a },
3183 		{ 0x1f, 0x0000 }
3184 	};
3185 
3186 
3187 	rtl_apply_firmware(tp);
3188 
3189 	rtl8168f_hw_phy_config(tp);
3190 
3191 	/* Improve 2-pair detection performance */
3192 	rtl_writephy(tp, 0x1f, 0x0005);
3193 	rtl_writephy(tp, 0x05, 0x8b85);
3194 	rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
3195 	rtl_writephy(tp, 0x1f, 0x0000);
3196 
3197 	rtl_writephy_batch(tp, phy_reg_init);
3198 
3199 	/* Modify green table for giga */
3200 	rtl_writephy(tp, 0x1f, 0x0005);
3201 	rtl_writephy(tp, 0x05, 0x8b54);
3202 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800);
3203 	rtl_writephy(tp, 0x05, 0x8b5d);
3204 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800);
3205 	rtl_writephy(tp, 0x05, 0x8a7c);
3206 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3207 	rtl_writephy(tp, 0x05, 0x8a7f);
3208 	rtl_w0w1_phy(tp, 0x06, 0x0100, 0x0000);
3209 	rtl_writephy(tp, 0x05, 0x8a82);
3210 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3211 	rtl_writephy(tp, 0x05, 0x8a85);
3212 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3213 	rtl_writephy(tp, 0x05, 0x8a88);
3214 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3215 	rtl_writephy(tp, 0x1f, 0x0000);
3216 
3217 	/* uc same-seed solution */
3218 	rtl_writephy(tp, 0x1f, 0x0005);
3219 	rtl_writephy(tp, 0x05, 0x8b85);
3220 	rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000);
3221 	rtl_writephy(tp, 0x1f, 0x0000);
3222 
3223 	/* Green feature */
3224 	rtl_writephy(tp, 0x1f, 0x0003);
3225 	rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
3226 	rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400);
3227 	rtl_writephy(tp, 0x1f, 0x0000);
3228 }
3229 
3230 static void rtl8168g_disable_aldps(struct rtl8169_private *tp)
3231 {
3232 	phy_modify_paged(tp->phydev, 0x0a43, 0x10, BIT(2), 0);
3233 }
3234 
3235 static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private *tp)
3236 {
3237 	struct phy_device *phydev = tp->phydev;
3238 
3239 	phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0);
3240 	phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6));
3241 	phy_write(phydev, 0x1f, 0x0a43);
3242 	phy_write(phydev, 0x13, 0x8084);
3243 	phy_clear_bits(phydev, 0x14, BIT(14) | BIT(13));
3244 	phy_set_bits(phydev, 0x10, BIT(12) | BIT(1) | BIT(0));
3245 
3246 	phy_write(phydev, 0x1f, 0x0000);
3247 }
3248 
3249 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3250 {
3251 	int ret;
3252 
3253 	rtl_apply_firmware(tp);
3254 
3255 	ret = phy_read_paged(tp->phydev, 0x0a46, 0x10);
3256 	if (ret & BIT(8))
3257 		phy_modify_paged(tp->phydev, 0x0bcc, 0x12, BIT(15), 0);
3258 	else
3259 		phy_modify_paged(tp->phydev, 0x0bcc, 0x12, 0, BIT(15));
3260 
3261 	ret = phy_read_paged(tp->phydev, 0x0a46, 0x13);
3262 	if (ret & BIT(8))
3263 		phy_modify_paged(tp->phydev, 0x0c41, 0x12, 0, BIT(1));
3264 	else
3265 		phy_modify_paged(tp->phydev, 0x0c41, 0x12, BIT(1), 0);
3266 
3267 	/* Enable PHY auto speed down */
3268 	phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
3269 
3270 	rtl8168g_phy_adjust_10m_aldps(tp);
3271 
3272 	/* EEE auto-fallback function */
3273 	phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
3274 
3275 	/* Enable UC LPF tune function */
3276 	rtl_writephy(tp, 0x1f, 0x0a43);
3277 	rtl_writephy(tp, 0x13, 0x8012);
3278 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3279 
3280 	phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
3281 
3282 	/* Improve SWR Efficiency */
3283 	rtl_writephy(tp, 0x1f, 0x0bcd);
3284 	rtl_writephy(tp, 0x14, 0x5065);
3285 	rtl_writephy(tp, 0x14, 0xd065);
3286 	rtl_writephy(tp, 0x1f, 0x0bc8);
3287 	rtl_writephy(tp, 0x11, 0x5655);
3288 	rtl_writephy(tp, 0x1f, 0x0bcd);
3289 	rtl_writephy(tp, 0x14, 0x1065);
3290 	rtl_writephy(tp, 0x14, 0x9065);
3291 	rtl_writephy(tp, 0x14, 0x1065);
3292 	rtl_writephy(tp, 0x1f, 0x0000);
3293 
3294 	rtl8168g_disable_aldps(tp);
3295 	rtl8168g_config_eee_phy(tp);
3296 	rtl_enable_eee(tp);
3297 }
3298 
3299 static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
3300 {
3301 	rtl_apply_firmware(tp);
3302 	rtl8168g_config_eee_phy(tp);
3303 	rtl_enable_eee(tp);
3304 }
3305 
3306 static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
3307 {
3308 	u16 dout_tapbin;
3309 	u32 data;
3310 
3311 	rtl_apply_firmware(tp);
3312 
3313 	/* CHN EST parameters adjust - giga master */
3314 	rtl_writephy(tp, 0x1f, 0x0a43);
3315 	rtl_writephy(tp, 0x13, 0x809b);
3316 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0xf800);
3317 	rtl_writephy(tp, 0x13, 0x80a2);
3318 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0xff00);
3319 	rtl_writephy(tp, 0x13, 0x80a4);
3320 	rtl_w0w1_phy(tp, 0x14, 0x8500, 0xff00);
3321 	rtl_writephy(tp, 0x13, 0x809c);
3322 	rtl_w0w1_phy(tp, 0x14, 0xbd00, 0xff00);
3323 	rtl_writephy(tp, 0x1f, 0x0000);
3324 
3325 	/* CHN EST parameters adjust - giga slave */
3326 	rtl_writephy(tp, 0x1f, 0x0a43);
3327 	rtl_writephy(tp, 0x13, 0x80ad);
3328 	rtl_w0w1_phy(tp, 0x14, 0x7000, 0xf800);
3329 	rtl_writephy(tp, 0x13, 0x80b4);
3330 	rtl_w0w1_phy(tp, 0x14, 0x5000, 0xff00);
3331 	rtl_writephy(tp, 0x13, 0x80ac);
3332 	rtl_w0w1_phy(tp, 0x14, 0x4000, 0xff00);
3333 	rtl_writephy(tp, 0x1f, 0x0000);
3334 
3335 	/* CHN EST parameters adjust - fnet */
3336 	rtl_writephy(tp, 0x1f, 0x0a43);
3337 	rtl_writephy(tp, 0x13, 0x808e);
3338 	rtl_w0w1_phy(tp, 0x14, 0x1200, 0xff00);
3339 	rtl_writephy(tp, 0x13, 0x8090);
3340 	rtl_w0w1_phy(tp, 0x14, 0xe500, 0xff00);
3341 	rtl_writephy(tp, 0x13, 0x8092);
3342 	rtl_w0w1_phy(tp, 0x14, 0x9f00, 0xff00);
3343 	rtl_writephy(tp, 0x1f, 0x0000);
3344 
3345 	/* enable R-tune & PGA-retune function */
3346 	dout_tapbin = 0;
3347 	rtl_writephy(tp, 0x1f, 0x0a46);
3348 	data = rtl_readphy(tp, 0x13);
3349 	data &= 3;
3350 	data <<= 2;
3351 	dout_tapbin |= data;
3352 	data = rtl_readphy(tp, 0x12);
3353 	data &= 0xc000;
3354 	data >>= 14;
3355 	dout_tapbin |= data;
3356 	dout_tapbin = ~(dout_tapbin^0x08);
3357 	dout_tapbin <<= 12;
3358 	dout_tapbin &= 0xf000;
3359 	rtl_writephy(tp, 0x1f, 0x0a43);
3360 	rtl_writephy(tp, 0x13, 0x827a);
3361 	rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3362 	rtl_writephy(tp, 0x13, 0x827b);
3363 	rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3364 	rtl_writephy(tp, 0x13, 0x827c);
3365 	rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3366 	rtl_writephy(tp, 0x13, 0x827d);
3367 	rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3368 
3369 	rtl_writephy(tp, 0x1f, 0x0a43);
3370 	rtl_writephy(tp, 0x13, 0x0811);
3371 	rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000);
3372 	rtl_writephy(tp, 0x1f, 0x0a42);
3373 	rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000);
3374 	rtl_writephy(tp, 0x1f, 0x0000);
3375 
3376 	/* enable GPHY 10M */
3377 	phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
3378 
3379 	/* SAR ADC performance */
3380 	phy_modify_paged(tp->phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
3381 
3382 	rtl_writephy(tp, 0x1f, 0x0a43);
3383 	rtl_writephy(tp, 0x13, 0x803f);
3384 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3385 	rtl_writephy(tp, 0x13, 0x8047);
3386 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3387 	rtl_writephy(tp, 0x13, 0x804f);
3388 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3389 	rtl_writephy(tp, 0x13, 0x8057);
3390 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3391 	rtl_writephy(tp, 0x13, 0x805f);
3392 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3393 	rtl_writephy(tp, 0x13, 0x8067);
3394 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3395 	rtl_writephy(tp, 0x13, 0x806f);
3396 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3397 	rtl_writephy(tp, 0x1f, 0x0000);
3398 
3399 	/* disable phy pfm mode */
3400 	phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
3401 
3402 	rtl8168g_disable_aldps(tp);
3403 	rtl8168g_config_eee_phy(tp);
3404 	rtl_enable_eee(tp);
3405 }
3406 
3407 static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
3408 {
3409 	u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0;
3410 	u16 rlen;
3411 	u32 data;
3412 
3413 	rtl_apply_firmware(tp);
3414 
3415 	/* CHIN EST parameter update */
3416 	rtl_writephy(tp, 0x1f, 0x0a43);
3417 	rtl_writephy(tp, 0x13, 0x808a);
3418 	rtl_w0w1_phy(tp, 0x14, 0x000a, 0x003f);
3419 	rtl_writephy(tp, 0x1f, 0x0000);
3420 
3421 	/* enable R-tune & PGA-retune function */
3422 	rtl_writephy(tp, 0x1f, 0x0a43);
3423 	rtl_writephy(tp, 0x13, 0x0811);
3424 	rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000);
3425 	rtl_writephy(tp, 0x1f, 0x0a42);
3426 	rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000);
3427 	rtl_writephy(tp, 0x1f, 0x0000);
3428 
3429 	/* enable GPHY 10M */
3430 	phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
3431 
3432 	r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
3433 	data = r8168_mac_ocp_read(tp, 0xdd02);
3434 	ioffset_p3 = ((data & 0x80)>>7);
3435 	ioffset_p3 <<= 3;
3436 
3437 	data = r8168_mac_ocp_read(tp, 0xdd00);
3438 	ioffset_p3 |= ((data & (0xe000))>>13);
3439 	ioffset_p2 = ((data & (0x1e00))>>9);
3440 	ioffset_p1 = ((data & (0x01e0))>>5);
3441 	ioffset_p0 = ((data & 0x0010)>>4);
3442 	ioffset_p0 <<= 3;
3443 	ioffset_p0 |= (data & (0x07));
3444 	data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0);
3445 
3446 	if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) ||
3447 	    (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f)) {
3448 		rtl_writephy(tp, 0x1f, 0x0bcf);
3449 		rtl_writephy(tp, 0x16, data);
3450 		rtl_writephy(tp, 0x1f, 0x0000);
3451 	}
3452 
3453 	/* Modify rlen (TX LPF corner frequency) level */
3454 	rtl_writephy(tp, 0x1f, 0x0bcd);
3455 	data = rtl_readphy(tp, 0x16);
3456 	data &= 0x000f;
3457 	rlen = 0;
3458 	if (data > 3)
3459 		rlen = data - 3;
3460 	data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12);
3461 	rtl_writephy(tp, 0x17, data);
3462 	rtl_writephy(tp, 0x1f, 0x0bcd);
3463 	rtl_writephy(tp, 0x1f, 0x0000);
3464 
3465 	/* disable phy pfm mode */
3466 	phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
3467 
3468 	rtl8168g_disable_aldps(tp);
3469 	rtl8168g_config_eee_phy(tp);
3470 	rtl_enable_eee(tp);
3471 }
3472 
3473 static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
3474 {
3475 	/* Enable PHY auto speed down */
3476 	phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
3477 
3478 	rtl8168g_phy_adjust_10m_aldps(tp);
3479 
3480 	/* Enable EEE auto-fallback function */
3481 	phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
3482 
3483 	/* Enable UC LPF tune function */
3484 	rtl_writephy(tp, 0x1f, 0x0a43);
3485 	rtl_writephy(tp, 0x13, 0x8012);
3486 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3487 	rtl_writephy(tp, 0x1f, 0x0000);
3488 
3489 	/* set rg_sel_sdm_rate */
3490 	phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
3491 
3492 	rtl8168g_disable_aldps(tp);
3493 	rtl8168g_config_eee_phy(tp);
3494 	rtl_enable_eee(tp);
3495 }
3496 
3497 static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
3498 {
3499 	rtl8168g_phy_adjust_10m_aldps(tp);
3500 
3501 	/* Enable UC LPF tune function */
3502 	rtl_writephy(tp, 0x1f, 0x0a43);
3503 	rtl_writephy(tp, 0x13, 0x8012);
3504 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3505 	rtl_writephy(tp, 0x1f, 0x0000);
3506 
3507 	/* Set rg_sel_sdm_rate */
3508 	phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
3509 
3510 	/* Channel estimation parameters */
3511 	rtl_writephy(tp, 0x1f, 0x0a43);
3512 	rtl_writephy(tp, 0x13, 0x80f3);
3513 	rtl_w0w1_phy(tp, 0x14, 0x8b00, ~0x8bff);
3514 	rtl_writephy(tp, 0x13, 0x80f0);
3515 	rtl_w0w1_phy(tp, 0x14, 0x3a00, ~0x3aff);
3516 	rtl_writephy(tp, 0x13, 0x80ef);
3517 	rtl_w0w1_phy(tp, 0x14, 0x0500, ~0x05ff);
3518 	rtl_writephy(tp, 0x13, 0x80f6);
3519 	rtl_w0w1_phy(tp, 0x14, 0x6e00, ~0x6eff);
3520 	rtl_writephy(tp, 0x13, 0x80ec);
3521 	rtl_w0w1_phy(tp, 0x14, 0x6800, ~0x68ff);
3522 	rtl_writephy(tp, 0x13, 0x80ed);
3523 	rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff);
3524 	rtl_writephy(tp, 0x13, 0x80f2);
3525 	rtl_w0w1_phy(tp, 0x14, 0xf400, ~0xf4ff);
3526 	rtl_writephy(tp, 0x13, 0x80f4);
3527 	rtl_w0w1_phy(tp, 0x14, 0x8500, ~0x85ff);
3528 	rtl_writephy(tp, 0x1f, 0x0a43);
3529 	rtl_writephy(tp, 0x13, 0x8110);
3530 	rtl_w0w1_phy(tp, 0x14, 0xa800, ~0xa8ff);
3531 	rtl_writephy(tp, 0x13, 0x810f);
3532 	rtl_w0w1_phy(tp, 0x14, 0x1d00, ~0x1dff);
3533 	rtl_writephy(tp, 0x13, 0x8111);
3534 	rtl_w0w1_phy(tp, 0x14, 0xf500, ~0xf5ff);
3535 	rtl_writephy(tp, 0x13, 0x8113);
3536 	rtl_w0w1_phy(tp, 0x14, 0x6100, ~0x61ff);
3537 	rtl_writephy(tp, 0x13, 0x8115);
3538 	rtl_w0w1_phy(tp, 0x14, 0x9200, ~0x92ff);
3539 	rtl_writephy(tp, 0x13, 0x810e);
3540 	rtl_w0w1_phy(tp, 0x14, 0x0400, ~0x04ff);
3541 	rtl_writephy(tp, 0x13, 0x810c);
3542 	rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff);
3543 	rtl_writephy(tp, 0x13, 0x810b);
3544 	rtl_w0w1_phy(tp, 0x14, 0x5a00, ~0x5aff);
3545 	rtl_writephy(tp, 0x1f, 0x0a43);
3546 	rtl_writephy(tp, 0x13, 0x80d1);
3547 	rtl_w0w1_phy(tp, 0x14, 0xff00, ~0xffff);
3548 	rtl_writephy(tp, 0x13, 0x80cd);
3549 	rtl_w0w1_phy(tp, 0x14, 0x9e00, ~0x9eff);
3550 	rtl_writephy(tp, 0x13, 0x80d3);
3551 	rtl_w0w1_phy(tp, 0x14, 0x0e00, ~0x0eff);
3552 	rtl_writephy(tp, 0x13, 0x80d5);
3553 	rtl_w0w1_phy(tp, 0x14, 0xca00, ~0xcaff);
3554 	rtl_writephy(tp, 0x13, 0x80d7);
3555 	rtl_w0w1_phy(tp, 0x14, 0x8400, ~0x84ff);
3556 
3557 	/* Force PWM-mode */
3558 	rtl_writephy(tp, 0x1f, 0x0bcd);
3559 	rtl_writephy(tp, 0x14, 0x5065);
3560 	rtl_writephy(tp, 0x14, 0xd065);
3561 	rtl_writephy(tp, 0x1f, 0x0bc8);
3562 	rtl_writephy(tp, 0x12, 0x00ed);
3563 	rtl_writephy(tp, 0x1f, 0x0bcd);
3564 	rtl_writephy(tp, 0x14, 0x1065);
3565 	rtl_writephy(tp, 0x14, 0x9065);
3566 	rtl_writephy(tp, 0x14, 0x1065);
3567 	rtl_writephy(tp, 0x1f, 0x0000);
3568 
3569 	rtl8168g_disable_aldps(tp);
3570 	rtl8168g_config_eee_phy(tp);
3571 	rtl_enable_eee(tp);
3572 }
3573 
3574 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3575 {
3576 	static const struct phy_reg phy_reg_init[] = {
3577 		{ 0x1f, 0x0003 },
3578 		{ 0x08, 0x441d },
3579 		{ 0x01, 0x9100 },
3580 		{ 0x1f, 0x0000 }
3581 	};
3582 
3583 	rtl_writephy(tp, 0x1f, 0x0000);
3584 	rtl_patchphy(tp, 0x11, 1 << 12);
3585 	rtl_patchphy(tp, 0x19, 1 << 13);
3586 	rtl_patchphy(tp, 0x10, 1 << 15);
3587 
3588 	rtl_writephy_batch(tp, phy_reg_init);
3589 }
3590 
3591 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3592 {
3593 	static const struct phy_reg phy_reg_init[] = {
3594 		{ 0x1f, 0x0005 },
3595 		{ 0x1a, 0x0000 },
3596 		{ 0x1f, 0x0000 },
3597 
3598 		{ 0x1f, 0x0004 },
3599 		{ 0x1c, 0x0000 },
3600 		{ 0x1f, 0x0000 },
3601 
3602 		{ 0x1f, 0x0001 },
3603 		{ 0x15, 0x7701 },
3604 		{ 0x1f, 0x0000 }
3605 	};
3606 
3607 	/* Disable ALDPS before ram code */
3608 	rtl_writephy(tp, 0x1f, 0x0000);
3609 	rtl_writephy(tp, 0x18, 0x0310);
3610 	msleep(100);
3611 
3612 	rtl_apply_firmware(tp);
3613 
3614 	rtl_writephy_batch(tp, phy_reg_init);
3615 }
3616 
3617 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3618 {
3619 	/* Disable ALDPS before setting firmware */
3620 	rtl_writephy(tp, 0x1f, 0x0000);
3621 	rtl_writephy(tp, 0x18, 0x0310);
3622 	msleep(20);
3623 
3624 	rtl_apply_firmware(tp);
3625 
3626 	/* EEE setting */
3627 	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
3628 	rtl_writephy(tp, 0x1f, 0x0004);
3629 	rtl_writephy(tp, 0x10, 0x401f);
3630 	rtl_writephy(tp, 0x19, 0x7030);
3631 	rtl_writephy(tp, 0x1f, 0x0000);
3632 }
3633 
3634 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3635 {
3636 	static const struct phy_reg phy_reg_init[] = {
3637 		{ 0x1f, 0x0004 },
3638 		{ 0x10, 0xc07f },
3639 		{ 0x19, 0x7030 },
3640 		{ 0x1f, 0x0000 }
3641 	};
3642 
3643 	/* Disable ALDPS before ram code */
3644 	rtl_writephy(tp, 0x1f, 0x0000);
3645 	rtl_writephy(tp, 0x18, 0x0310);
3646 	msleep(100);
3647 
3648 	rtl_apply_firmware(tp);
3649 
3650 	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
3651 	rtl_writephy_batch(tp, phy_reg_init);
3652 
3653 	rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
3654 }
3655 
3656 static void rtl_hw_phy_config(struct net_device *dev)
3657 {
3658 	static const rtl_generic_fct phy_configs[] = {
3659 		/* PCI devices. */
3660 		[RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config,
3661 		[RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config,
3662 		[RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config,
3663 		[RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config,
3664 		[RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config,
3665 		/* PCI-E devices. */
3666 		[RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config,
3667 		[RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
3668 		[RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
3669 		[RTL_GIGA_MAC_VER_10] = NULL,
3670 		[RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
3671 		[RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config,
3672 		[RTL_GIGA_MAC_VER_13] = NULL,
3673 		[RTL_GIGA_MAC_VER_14] = NULL,
3674 		[RTL_GIGA_MAC_VER_15] = NULL,
3675 		[RTL_GIGA_MAC_VER_16] = NULL,
3676 		[RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
3677 		[RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
3678 		[RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
3679 		[RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config,
3680 		[RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config,
3681 		[RTL_GIGA_MAC_VER_22] = rtl8168c_4_hw_phy_config,
3682 		[RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config,
3683 		[RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
3684 		[RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
3685 		[RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config,
3686 		[RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config,
3687 		[RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config,
3688 		[RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config,
3689 		[RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config,
3690 		[RTL_GIGA_MAC_VER_31] = NULL,
3691 		[RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config,
3692 		[RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config,
3693 		[RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config,
3694 		[RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config,
3695 		[RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config,
3696 		[RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config,
3697 		[RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config,
3698 		[RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config,
3699 		[RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config,
3700 		[RTL_GIGA_MAC_VER_41] = NULL,
3701 		[RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config,
3702 		[RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config,
3703 		[RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config,
3704 		[RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config,
3705 		[RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config,
3706 		[RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config,
3707 		[RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
3708 		[RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
3709 		[RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
3710 		[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
3711 	};
3712 	struct rtl8169_private *tp = netdev_priv(dev);
3713 
3714 	if (phy_configs[tp->mac_version])
3715 		phy_configs[tp->mac_version](tp);
3716 }
3717 
3718 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3719 {
3720 	if (!test_and_set_bit(flag, tp->wk.flags))
3721 		schedule_work(&tp->wk.work);
3722 }
3723 
3724 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3725 {
3726 	rtl_hw_phy_config(dev);
3727 
3728 	if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3729 		pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3730 		pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3731 		netif_dbg(tp, drv, dev,
3732 			  "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3733 		RTL_W8(tp, 0x82, 0x01);
3734 	}
3735 
3736 	/* We may have called phy_speed_down before */
3737 	phy_speed_up(tp->phydev);
3738 
3739 	genphy_soft_reset(tp->phydev);
3740 }
3741 
3742 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3743 {
3744 	rtl_lock_work(tp);
3745 
3746 	rtl_unlock_config_regs(tp);
3747 
3748 	RTL_W32(tp, MAC4, addr[4] | addr[5] << 8);
3749 	RTL_R32(tp, MAC4);
3750 
3751 	RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
3752 	RTL_R32(tp, MAC0);
3753 
3754 	if (tp->mac_version == RTL_GIGA_MAC_VER_34)
3755 		rtl_rar_exgmac_set(tp, addr);
3756 
3757 	rtl_lock_config_regs(tp);
3758 
3759 	rtl_unlock_work(tp);
3760 }
3761 
3762 static int rtl_set_mac_address(struct net_device *dev, void *p)
3763 {
3764 	struct rtl8169_private *tp = netdev_priv(dev);
3765 	struct device *d = tp_to_dev(tp);
3766 	int ret;
3767 
3768 	ret = eth_mac_addr(dev, p);
3769 	if (ret)
3770 		return ret;
3771 
3772 	pm_runtime_get_noresume(d);
3773 
3774 	if (pm_runtime_active(d))
3775 		rtl_rar_set(tp, dev->dev_addr);
3776 
3777 	pm_runtime_put_noidle(d);
3778 
3779 	return 0;
3780 }
3781 
3782 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3783 {
3784 	struct rtl8169_private *tp = netdev_priv(dev);
3785 
3786 	if (!netif_running(dev))
3787 		return -ENODEV;
3788 
3789 	return phy_mii_ioctl(tp->phydev, ifr, cmd);
3790 }
3791 
3792 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3793 {
3794 	switch (tp->mac_version) {
3795 	case RTL_GIGA_MAC_VER_25:
3796 	case RTL_GIGA_MAC_VER_26:
3797 	case RTL_GIGA_MAC_VER_29:
3798 	case RTL_GIGA_MAC_VER_30:
3799 	case RTL_GIGA_MAC_VER_32:
3800 	case RTL_GIGA_MAC_VER_33:
3801 	case RTL_GIGA_MAC_VER_34:
3802 	case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_51:
3803 		RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
3804 			AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3805 		break;
3806 	default:
3807 		break;
3808 	}
3809 }
3810 
3811 static void rtl_pll_power_down(struct rtl8169_private *tp)
3812 {
3813 	if (r8168_check_dash(tp))
3814 		return;
3815 
3816 	if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3817 	    tp->mac_version == RTL_GIGA_MAC_VER_33)
3818 		rtl_ephy_write(tp, 0x19, 0xff64);
3819 
3820 	if (device_may_wakeup(tp_to_dev(tp))) {
3821 		phy_speed_down(tp->phydev, false);
3822 		rtl_wol_suspend_quirk(tp);
3823 		return;
3824 	}
3825 
3826 	switch (tp->mac_version) {
3827 	case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
3828 	case RTL_GIGA_MAC_VER_37:
3829 	case RTL_GIGA_MAC_VER_39:
3830 	case RTL_GIGA_MAC_VER_43:
3831 	case RTL_GIGA_MAC_VER_44:
3832 	case RTL_GIGA_MAC_VER_45:
3833 	case RTL_GIGA_MAC_VER_46:
3834 	case RTL_GIGA_MAC_VER_47:
3835 	case RTL_GIGA_MAC_VER_48:
3836 	case RTL_GIGA_MAC_VER_50:
3837 	case RTL_GIGA_MAC_VER_51:
3838 		RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
3839 		break;
3840 	case RTL_GIGA_MAC_VER_40:
3841 	case RTL_GIGA_MAC_VER_41:
3842 	case RTL_GIGA_MAC_VER_49:
3843 		rtl_eri_clear_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
3844 		RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
3845 		break;
3846 	default:
3847 		break;
3848 	}
3849 }
3850 
3851 static void rtl_pll_power_up(struct rtl8169_private *tp)
3852 {
3853 	switch (tp->mac_version) {
3854 	case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
3855 	case RTL_GIGA_MAC_VER_37:
3856 	case RTL_GIGA_MAC_VER_39:
3857 	case RTL_GIGA_MAC_VER_43:
3858 		RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
3859 		break;
3860 	case RTL_GIGA_MAC_VER_44:
3861 	case RTL_GIGA_MAC_VER_45:
3862 	case RTL_GIGA_MAC_VER_46:
3863 	case RTL_GIGA_MAC_VER_47:
3864 	case RTL_GIGA_MAC_VER_48:
3865 	case RTL_GIGA_MAC_VER_50:
3866 	case RTL_GIGA_MAC_VER_51:
3867 		RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
3868 		break;
3869 	case RTL_GIGA_MAC_VER_40:
3870 	case RTL_GIGA_MAC_VER_41:
3871 	case RTL_GIGA_MAC_VER_49:
3872 		RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
3873 		rtl_eri_set_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
3874 		break;
3875 	default:
3876 		break;
3877 	}
3878 
3879 	phy_resume(tp->phydev);
3880 	/* give MAC/PHY some time to resume */
3881 	msleep(20);
3882 }
3883 
3884 static void rtl_init_rxcfg(struct rtl8169_private *tp)
3885 {
3886 	switch (tp->mac_version) {
3887 	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
3888 	case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
3889 		RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
3890 		break;
3891 	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
3892 	case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
3893 	case RTL_GIGA_MAC_VER_38:
3894 		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
3895 		break;
3896 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
3897 		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
3898 		break;
3899 	default:
3900 		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
3901 		break;
3902 	}
3903 }
3904 
3905 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
3906 {
3907 	tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
3908 }
3909 
3910 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
3911 {
3912 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
3913 	RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1);
3914 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B);
3915 }
3916 
3917 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
3918 {
3919 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
3920 	RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1);
3921 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
3922 }
3923 
3924 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
3925 {
3926 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
3927 }
3928 
3929 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
3930 {
3931 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
3932 }
3933 
3934 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
3935 {
3936 	RTL_W8(tp, MaxTxPacketSize, 0x3f);
3937 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
3938 	RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
3939 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B);
3940 }
3941 
3942 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
3943 {
3944 	RTL_W8(tp, MaxTxPacketSize, 0x0c);
3945 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
3946 	RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
3947 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
3948 }
3949 
3950 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
3951 {
3952 	rtl_tx_performance_tweak(tp,
3953 		PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN);
3954 }
3955 
3956 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
3957 {
3958 	rtl_tx_performance_tweak(tp,
3959 		PCI_EXP_DEVCTL_READRQ_4096B | PCI_EXP_DEVCTL_NOSNOOP_EN);
3960 }
3961 
3962 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
3963 {
3964 	r8168b_0_hw_jumbo_enable(tp);
3965 
3966 	RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0));
3967 }
3968 
3969 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
3970 {
3971 	r8168b_0_hw_jumbo_disable(tp);
3972 
3973 	RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
3974 }
3975 
3976 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
3977 {
3978 	rtl_unlock_config_regs(tp);
3979 	switch (tp->mac_version) {
3980 	case RTL_GIGA_MAC_VER_11:
3981 		r8168b_0_hw_jumbo_enable(tp);
3982 		break;
3983 	case RTL_GIGA_MAC_VER_12:
3984 	case RTL_GIGA_MAC_VER_17:
3985 		r8168b_1_hw_jumbo_enable(tp);
3986 		break;
3987 	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
3988 		r8168c_hw_jumbo_enable(tp);
3989 		break;
3990 	case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
3991 		r8168dp_hw_jumbo_enable(tp);
3992 		break;
3993 	case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34:
3994 		r8168e_hw_jumbo_enable(tp);
3995 		break;
3996 	default:
3997 		break;
3998 	}
3999 	rtl_lock_config_regs(tp);
4000 }
4001 
4002 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4003 {
4004 	rtl_unlock_config_regs(tp);
4005 	switch (tp->mac_version) {
4006 	case RTL_GIGA_MAC_VER_11:
4007 		r8168b_0_hw_jumbo_disable(tp);
4008 		break;
4009 	case RTL_GIGA_MAC_VER_12:
4010 	case RTL_GIGA_MAC_VER_17:
4011 		r8168b_1_hw_jumbo_disable(tp);
4012 		break;
4013 	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
4014 		r8168c_hw_jumbo_disable(tp);
4015 		break;
4016 	case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
4017 		r8168dp_hw_jumbo_disable(tp);
4018 		break;
4019 	case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34:
4020 		r8168e_hw_jumbo_disable(tp);
4021 		break;
4022 	default:
4023 		break;
4024 	}
4025 	rtl_lock_config_regs(tp);
4026 }
4027 
4028 DECLARE_RTL_COND(rtl_chipcmd_cond)
4029 {
4030 	return RTL_R8(tp, ChipCmd) & CmdReset;
4031 }
4032 
4033 static void rtl_hw_reset(struct rtl8169_private *tp)
4034 {
4035 	RTL_W8(tp, ChipCmd, CmdReset);
4036 
4037 	rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4038 }
4039 
4040 static void rtl_request_firmware(struct rtl8169_private *tp)
4041 {
4042 	struct rtl_fw *rtl_fw;
4043 
4044 	/* firmware loaded already or no firmware available */
4045 	if (tp->rtl_fw || !tp->fw_name)
4046 		return;
4047 
4048 	rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4049 	if (!rtl_fw) {
4050 		netif_warn(tp, ifup, tp->dev, "Unable to load firmware, out of memory\n");
4051 		return;
4052 	}
4053 
4054 	rtl_fw->phy_write = rtl_writephy;
4055 	rtl_fw->phy_read = rtl_readphy;
4056 	rtl_fw->mac_mcu_write = mac_mcu_write;
4057 	rtl_fw->mac_mcu_read = mac_mcu_read;
4058 	rtl_fw->fw_name = tp->fw_name;
4059 	rtl_fw->dev = tp_to_dev(tp);
4060 
4061 	if (rtl_fw_request_firmware(rtl_fw))
4062 		kfree(rtl_fw);
4063 	else
4064 		tp->rtl_fw = rtl_fw;
4065 }
4066 
4067 static void rtl_rx_close(struct rtl8169_private *tp)
4068 {
4069 	RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4070 }
4071 
4072 DECLARE_RTL_COND(rtl_npq_cond)
4073 {
4074 	return RTL_R8(tp, TxPoll) & NPQ;
4075 }
4076 
4077 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4078 {
4079 	return RTL_R32(tp, TxConfig) & TXCFG_EMPTY;
4080 }
4081 
4082 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4083 {
4084 	/* Disable interrupts */
4085 	rtl8169_irq_mask_and_ack(tp);
4086 
4087 	rtl_rx_close(tp);
4088 
4089 	switch (tp->mac_version) {
4090 	case RTL_GIGA_MAC_VER_27:
4091 	case RTL_GIGA_MAC_VER_28:
4092 	case RTL_GIGA_MAC_VER_31:
4093 		rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4094 		break;
4095 	case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
4096 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
4097 		RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
4098 		rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4099 		break;
4100 	default:
4101 		RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
4102 		udelay(100);
4103 		break;
4104 	}
4105 
4106 	rtl_hw_reset(tp);
4107 }
4108 
4109 static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
4110 {
4111 	u32 val = TX_DMA_BURST << TxDMAShift |
4112 		  InterFrameGap << TxInterFrameGapShift;
4113 
4114 	if (tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
4115 	    tp->mac_version != RTL_GIGA_MAC_VER_39)
4116 		val |= TXCFG_AUTO_FIFO;
4117 
4118 	RTL_W32(tp, TxConfig, val);
4119 }
4120 
4121 static void rtl_set_rx_max_size(struct rtl8169_private *tp)
4122 {
4123 	/* Low hurts. Let's disable the filtering. */
4124 	RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1);
4125 }
4126 
4127 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
4128 {
4129 	/*
4130 	 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4131 	 * register to be written before TxDescAddrLow to work.
4132 	 * Switching from MMIO to I/O access fixes the issue as well.
4133 	 */
4134 	RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4135 	RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4136 	RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4137 	RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4138 }
4139 
4140 static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version)
4141 {
4142 	u32 val;
4143 
4144 	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
4145 		val = 0x000fff00;
4146 	else if (tp->mac_version == RTL_GIGA_MAC_VER_06)
4147 		val = 0x00ffff00;
4148 	else
4149 		return;
4150 
4151 	if (RTL_R8(tp, Config2) & PCI_Clock_66MHz)
4152 		val |= 0xff;
4153 
4154 	RTL_W32(tp, 0x7c, val);
4155 }
4156 
4157 static void rtl_set_rx_mode(struct net_device *dev)
4158 {
4159 	struct rtl8169_private *tp = netdev_priv(dev);
4160 	u32 mc_filter[2];	/* Multicast hash filter */
4161 	int rx_mode;
4162 	u32 tmp = 0;
4163 
4164 	if (dev->flags & IFF_PROMISC) {
4165 		/* Unconditionally log net taps. */
4166 		netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4167 		rx_mode =
4168 		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4169 		    AcceptAllPhys;
4170 		mc_filter[1] = mc_filter[0] = 0xffffffff;
4171 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4172 		   (dev->flags & IFF_ALLMULTI)) {
4173 		/* Too many to filter perfectly -- accept all multicasts. */
4174 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4175 		mc_filter[1] = mc_filter[0] = 0xffffffff;
4176 	} else {
4177 		struct netdev_hw_addr *ha;
4178 
4179 		rx_mode = AcceptBroadcast | AcceptMyPhys;
4180 		mc_filter[1] = mc_filter[0] = 0;
4181 		netdev_for_each_mc_addr(ha, dev) {
4182 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4183 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4184 			rx_mode |= AcceptMulticast;
4185 		}
4186 	}
4187 
4188 	if (dev->features & NETIF_F_RXALL)
4189 		rx_mode |= (AcceptErr | AcceptRunt);
4190 
4191 	tmp = (RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4192 
4193 	if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4194 		u32 data = mc_filter[0];
4195 
4196 		mc_filter[0] = swab32(mc_filter[1]);
4197 		mc_filter[1] = swab32(data);
4198 	}
4199 
4200 	if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4201 		mc_filter[1] = mc_filter[0] = 0xffffffff;
4202 
4203 	RTL_W32(tp, MAR0 + 4, mc_filter[1]);
4204 	RTL_W32(tp, MAR0 + 0, mc_filter[0]);
4205 
4206 	RTL_W32(tp, RxConfig, tmp);
4207 }
4208 
4209 static void rtl_hw_start(struct  rtl8169_private *tp)
4210 {
4211 	rtl_unlock_config_regs(tp);
4212 
4213 	tp->hw_start(tp);
4214 
4215 	rtl_set_rx_max_size(tp);
4216 	rtl_set_rx_tx_desc_registers(tp);
4217 	rtl_lock_config_regs(tp);
4218 
4219 	/* disable interrupt coalescing */
4220 	RTL_W16(tp, IntrMitigate, 0x0000);
4221 	/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4222 	RTL_R8(tp, IntrMask);
4223 	RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
4224 	rtl_init_rxcfg(tp);
4225 	rtl_set_tx_config_registers(tp);
4226 
4227 	rtl_set_rx_mode(tp->dev);
4228 	/* no early-rx interrupts */
4229 	RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
4230 	rtl_irq_enable(tp);
4231 }
4232 
4233 static void rtl_hw_start_8169(struct rtl8169_private *tp)
4234 {
4235 	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
4236 		pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
4237 
4238 	RTL_W8(tp, EarlyTxThres, NoEarlyTx);
4239 
4240 	tp->cp_cmd |= PCIMulRW;
4241 
4242 	if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4243 	    tp->mac_version == RTL_GIGA_MAC_VER_03) {
4244 		netif_dbg(tp, drv, tp->dev,
4245 			  "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n");
4246 		tp->cp_cmd |= (1 << 14);
4247 	}
4248 
4249 	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
4250 
4251 	rtl8169_set_magic_reg(tp, tp->mac_version);
4252 
4253 	RTL_W32(tp, RxMissed, 0);
4254 }
4255 
4256 DECLARE_RTL_COND(rtl_csiar_cond)
4257 {
4258 	return RTL_R32(tp, CSIAR) & CSIAR_FLAG;
4259 }
4260 
4261 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4262 {
4263 	u32 func = PCI_FUNC(tp->pci_dev->devfn);
4264 
4265 	RTL_W32(tp, CSIDR, value);
4266 	RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4267 		CSIAR_BYTE_ENABLE | func << 16);
4268 
4269 	rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4270 }
4271 
4272 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4273 {
4274 	u32 func = PCI_FUNC(tp->pci_dev->devfn);
4275 
4276 	RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 |
4277 		CSIAR_BYTE_ENABLE);
4278 
4279 	return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4280 		RTL_R32(tp, CSIDR) : ~0;
4281 }
4282 
4283 static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val)
4284 {
4285 	struct pci_dev *pdev = tp->pci_dev;
4286 	u32 csi;
4287 
4288 	/* According to Realtek the value at config space address 0x070f
4289 	 * controls the L0s/L1 entrance latency. We try standard ECAM access
4290 	 * first and if it fails fall back to CSI.
4291 	 */
4292 	if (pdev->cfg_size > 0x070f &&
4293 	    pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL)
4294 		return;
4295 
4296 	netdev_notice_once(tp->dev,
4297 		"No native access to PCI extended config space, falling back to CSI\n");
4298 	csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4299 	rtl_csi_write(tp, 0x070c, csi | val << 24);
4300 }
4301 
4302 static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
4303 {
4304 	rtl_csi_access_enable(tp, 0x27);
4305 }
4306 
4307 struct ephy_info {
4308 	unsigned int offset;
4309 	u16 mask;
4310 	u16 bits;
4311 };
4312 
4313 static void __rtl_ephy_init(struct rtl8169_private *tp,
4314 			    const struct ephy_info *e, int len)
4315 {
4316 	u16 w;
4317 
4318 	while (len-- > 0) {
4319 		w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4320 		rtl_ephy_write(tp, e->offset, w);
4321 		e++;
4322 	}
4323 }
4324 
4325 #define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a))
4326 
4327 static void rtl_disable_clock_request(struct rtl8169_private *tp)
4328 {
4329 	pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL,
4330 				   PCI_EXP_LNKCTL_CLKREQ_EN);
4331 }
4332 
4333 static void rtl_enable_clock_request(struct rtl8169_private *tp)
4334 {
4335 	pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL,
4336 				 PCI_EXP_LNKCTL_CLKREQ_EN);
4337 }
4338 
4339 static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp)
4340 {
4341 	/* work around an issue when PCI reset occurs during L2/L3 state */
4342 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23);
4343 }
4344 
4345 static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
4346 {
4347 	if (enable) {
4348 		RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
4349 		RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
4350 	} else {
4351 		RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
4352 		RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
4353 	}
4354 
4355 	udelay(10);
4356 }
4357 
4358 static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat,
4359 			      u16 tx_stat, u16 rx_dyn, u16 tx_dyn)
4360 {
4361 	/* Usage of dynamic vs. static FIFO is controlled by bit
4362 	 * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known.
4363 	 */
4364 	rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn);
4365 	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn);
4366 }
4367 
4368 static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp,
4369 					  u8 low, u8 high)
4370 {
4371 	/* FIFO thresholds for pause flow control */
4372 	rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low);
4373 	rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high);
4374 }
4375 
4376 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4377 {
4378 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
4379 
4380 	tp->cp_cmd &= CPCMD_QUIRK_MASK;
4381 	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
4382 
4383 	if (tp->dev->mtu <= ETH_DATA_LEN) {
4384 		rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B |
4385 					 PCI_EXP_DEVCTL_NOSNOOP_EN);
4386 	}
4387 }
4388 
4389 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4390 {
4391 	rtl_hw_start_8168bb(tp);
4392 
4393 	RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
4394 
4395 	RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
4396 }
4397 
4398 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4399 {
4400 	RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down);
4401 
4402 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
4403 
4404 	if (tp->dev->mtu <= ETH_DATA_LEN)
4405 		rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4406 
4407 	rtl_disable_clock_request(tp);
4408 
4409 	tp->cp_cmd &= CPCMD_QUIRK_MASK;
4410 	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
4411 }
4412 
4413 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4414 {
4415 	static const struct ephy_info e_info_8168cp[] = {
4416 		{ 0x01, 0,	0x0001 },
4417 		{ 0x02, 0x0800,	0x1000 },
4418 		{ 0x03, 0,	0x0042 },
4419 		{ 0x06, 0x0080,	0x0000 },
4420 		{ 0x07, 0,	0x2000 }
4421 	};
4422 
4423 	rtl_set_def_aspm_entry_latency(tp);
4424 
4425 	rtl_ephy_init(tp, e_info_8168cp);
4426 
4427 	__rtl_hw_start_8168cp(tp);
4428 }
4429 
4430 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4431 {
4432 	rtl_set_def_aspm_entry_latency(tp);
4433 
4434 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
4435 
4436 	if (tp->dev->mtu <= ETH_DATA_LEN)
4437 		rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4438 
4439 	tp->cp_cmd &= CPCMD_QUIRK_MASK;
4440 	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
4441 }
4442 
4443 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4444 {
4445 	rtl_set_def_aspm_entry_latency(tp);
4446 
4447 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
4448 
4449 	/* Magic. */
4450 	RTL_W8(tp, DBG_REG, 0x20);
4451 
4452 	RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
4453 
4454 	if (tp->dev->mtu <= ETH_DATA_LEN)
4455 		rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4456 
4457 	tp->cp_cmd &= CPCMD_QUIRK_MASK;
4458 	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
4459 }
4460 
4461 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4462 {
4463 	static const struct ephy_info e_info_8168c_1[] = {
4464 		{ 0x02, 0x0800,	0x1000 },
4465 		{ 0x03, 0,	0x0002 },
4466 		{ 0x06, 0x0080,	0x0000 }
4467 	};
4468 
4469 	rtl_set_def_aspm_entry_latency(tp);
4470 
4471 	RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4472 
4473 	rtl_ephy_init(tp, e_info_8168c_1);
4474 
4475 	__rtl_hw_start_8168cp(tp);
4476 }
4477 
4478 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4479 {
4480 	static const struct ephy_info e_info_8168c_2[] = {
4481 		{ 0x01, 0,	0x0001 },
4482 		{ 0x03, 0x0400,	0x0220 }
4483 	};
4484 
4485 	rtl_set_def_aspm_entry_latency(tp);
4486 
4487 	rtl_ephy_init(tp, e_info_8168c_2);
4488 
4489 	__rtl_hw_start_8168cp(tp);
4490 }
4491 
4492 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4493 {
4494 	rtl_hw_start_8168c_2(tp);
4495 }
4496 
4497 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4498 {
4499 	rtl_set_def_aspm_entry_latency(tp);
4500 
4501 	__rtl_hw_start_8168cp(tp);
4502 }
4503 
4504 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4505 {
4506 	rtl_set_def_aspm_entry_latency(tp);
4507 
4508 	rtl_disable_clock_request(tp);
4509 
4510 	RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
4511 
4512 	if (tp->dev->mtu <= ETH_DATA_LEN)
4513 		rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4514 
4515 	tp->cp_cmd &= CPCMD_QUIRK_MASK;
4516 	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
4517 }
4518 
4519 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4520 {
4521 	rtl_set_def_aspm_entry_latency(tp);
4522 
4523 	if (tp->dev->mtu <= ETH_DATA_LEN)
4524 		rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4525 
4526 	RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
4527 
4528 	rtl_disable_clock_request(tp);
4529 }
4530 
4531 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4532 {
4533 	static const struct ephy_info e_info_8168d_4[] = {
4534 		{ 0x0b, 0x0000,	0x0048 },
4535 		{ 0x19, 0x0020,	0x0050 },
4536 		{ 0x0c, 0x0100,	0x0020 }
4537 	};
4538 
4539 	rtl_set_def_aspm_entry_latency(tp);
4540 
4541 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4542 
4543 	RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
4544 
4545 	rtl_ephy_init(tp, e_info_8168d_4);
4546 
4547 	rtl_enable_clock_request(tp);
4548 }
4549 
4550 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4551 {
4552 	static const struct ephy_info e_info_8168e_1[] = {
4553 		{ 0x00, 0x0200,	0x0100 },
4554 		{ 0x00, 0x0000,	0x0004 },
4555 		{ 0x06, 0x0002,	0x0001 },
4556 		{ 0x06, 0x0000,	0x0030 },
4557 		{ 0x07, 0x0000,	0x2000 },
4558 		{ 0x00, 0x0000,	0x0020 },
4559 		{ 0x03, 0x5800,	0x2000 },
4560 		{ 0x03, 0x0000,	0x0001 },
4561 		{ 0x01, 0x0800,	0x1000 },
4562 		{ 0x07, 0x0000,	0x4000 },
4563 		{ 0x1e, 0x0000,	0x2000 },
4564 		{ 0x19, 0xffff,	0xfe6c },
4565 		{ 0x0a, 0x0000,	0x0040 }
4566 	};
4567 
4568 	rtl_set_def_aspm_entry_latency(tp);
4569 
4570 	rtl_ephy_init(tp, e_info_8168e_1);
4571 
4572 	if (tp->dev->mtu <= ETH_DATA_LEN)
4573 		rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4574 
4575 	RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
4576 
4577 	rtl_disable_clock_request(tp);
4578 
4579 	/* Reset tx FIFO pointer */
4580 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST);
4581 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST);
4582 
4583 	RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
4584 }
4585 
4586 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4587 {
4588 	static const struct ephy_info e_info_8168e_2[] = {
4589 		{ 0x09, 0x0000,	0x0080 },
4590 		{ 0x19, 0x0000,	0x0224 }
4591 	};
4592 
4593 	rtl_set_def_aspm_entry_latency(tp);
4594 
4595 	rtl_ephy_init(tp, e_info_8168e_2);
4596 
4597 	if (tp->dev->mtu <= ETH_DATA_LEN)
4598 		rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4599 
4600 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
4601 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
4602 	rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
4603 	rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
4604 	rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060);
4605 	rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4));
4606 	rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00);
4607 
4608 	RTL_W8(tp, MaxTxPacketSize, EarlySize);
4609 
4610 	rtl_disable_clock_request(tp);
4611 
4612 	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
4613 
4614 	rtl8168_config_eee_mac(tp);
4615 
4616 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
4617 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
4618 	RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
4619 
4620 	rtl_hw_aspm_clkreq_enable(tp, true);
4621 }
4622 
4623 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
4624 {
4625 	rtl_set_def_aspm_entry_latency(tp);
4626 
4627 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4628 
4629 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
4630 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
4631 	rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
4632 	rtl_reset_packet_filter(tp);
4633 	rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4));
4634 	rtl_eri_set_bits(tp, 0x1d0, ERIAR_MASK_0001, BIT(4));
4635 	rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
4636 	rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060);
4637 
4638 	RTL_W8(tp, MaxTxPacketSize, EarlySize);
4639 
4640 	rtl_disable_clock_request(tp);
4641 
4642 	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
4643 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
4644 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
4645 	RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
4646 
4647 	rtl8168_config_eee_mac(tp);
4648 }
4649 
4650 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
4651 {
4652 	static const struct ephy_info e_info_8168f_1[] = {
4653 		{ 0x06, 0x00c0,	0x0020 },
4654 		{ 0x08, 0x0001,	0x0002 },
4655 		{ 0x09, 0x0000,	0x0080 },
4656 		{ 0x19, 0x0000,	0x0224 }
4657 	};
4658 
4659 	rtl_hw_start_8168f(tp);
4660 
4661 	rtl_ephy_init(tp, e_info_8168f_1);
4662 
4663 	rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00);
4664 }
4665 
4666 static void rtl_hw_start_8411(struct rtl8169_private *tp)
4667 {
4668 	static const struct ephy_info e_info_8168f_1[] = {
4669 		{ 0x06, 0x00c0,	0x0020 },
4670 		{ 0x0f, 0xffff,	0x5200 },
4671 		{ 0x1e, 0x0000,	0x4000 },
4672 		{ 0x19, 0x0000,	0x0224 }
4673 	};
4674 
4675 	rtl_hw_start_8168f(tp);
4676 	rtl_pcie_state_l2l3_disable(tp);
4677 
4678 	rtl_ephy_init(tp, e_info_8168f_1);
4679 
4680 	rtl_eri_set_bits(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00);
4681 }
4682 
4683 static void rtl_hw_start_8168g(struct rtl8169_private *tp)
4684 {
4685 	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
4686 	rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
4687 
4688 	rtl_set_def_aspm_entry_latency(tp);
4689 
4690 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4691 
4692 	rtl_reset_packet_filter(tp);
4693 	rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
4694 
4695 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
4696 	RTL_W8(tp, MaxTxPacketSize, EarlySize);
4697 
4698 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
4699 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
4700 
4701 	rtl8168_config_eee_mac(tp);
4702 
4703 	rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06);
4704 	rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
4705 
4706 	rtl_pcie_state_l2l3_disable(tp);
4707 }
4708 
4709 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
4710 {
4711 	static const struct ephy_info e_info_8168g_1[] = {
4712 		{ 0x00, 0x0000,	0x0008 },
4713 		{ 0x0c, 0x37d0,	0x0820 },
4714 		{ 0x1e, 0x0000,	0x0001 },
4715 		{ 0x19, 0x8000,	0x0000 }
4716 	};
4717 
4718 	rtl_hw_start_8168g(tp);
4719 
4720 	/* disable aspm and clock request before access ephy */
4721 	rtl_hw_aspm_clkreq_enable(tp, false);
4722 	rtl_ephy_init(tp, e_info_8168g_1);
4723 	rtl_hw_aspm_clkreq_enable(tp, true);
4724 }
4725 
4726 static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
4727 {
4728 	static const struct ephy_info e_info_8168g_2[] = {
4729 		{ 0x00, 0x0000,	0x0008 },
4730 		{ 0x0c, 0x3df0,	0x0200 },
4731 		{ 0x19, 0xffff,	0xfc00 },
4732 		{ 0x1e, 0xffff,	0x20eb }
4733 	};
4734 
4735 	rtl_hw_start_8168g(tp);
4736 
4737 	/* disable aspm and clock request before access ephy */
4738 	RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
4739 	RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
4740 	rtl_ephy_init(tp, e_info_8168g_2);
4741 }
4742 
4743 static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
4744 {
4745 	static const struct ephy_info e_info_8411_2[] = {
4746 		{ 0x00, 0x0000,	0x0008 },
4747 		{ 0x0c, 0x3df0,	0x0200 },
4748 		{ 0x0f, 0xffff,	0x5200 },
4749 		{ 0x19, 0x0020,	0x0000 },
4750 		{ 0x1e, 0x0000,	0x2000 }
4751 	};
4752 
4753 	rtl_hw_start_8168g(tp);
4754 
4755 	/* disable aspm and clock request before access ephy */
4756 	rtl_hw_aspm_clkreq_enable(tp, false);
4757 	rtl_ephy_init(tp, e_info_8411_2);
4758 	rtl_hw_aspm_clkreq_enable(tp, true);
4759 }
4760 
4761 static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
4762 {
4763 	int rg_saw_cnt;
4764 	u32 data;
4765 	static const struct ephy_info e_info_8168h_1[] = {
4766 		{ 0x1e, 0x0800,	0x0001 },
4767 		{ 0x1d, 0x0000,	0x0800 },
4768 		{ 0x05, 0xffff,	0x2089 },
4769 		{ 0x06, 0xffff,	0x5881 },
4770 		{ 0x04, 0xffff,	0x154a },
4771 		{ 0x01, 0xffff,	0x068b }
4772 	};
4773 
4774 	/* disable aspm and clock request before access ephy */
4775 	rtl_hw_aspm_clkreq_enable(tp, false);
4776 	rtl_ephy_init(tp, e_info_8168h_1);
4777 
4778 	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
4779 	rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
4780 
4781 	rtl_set_def_aspm_entry_latency(tp);
4782 
4783 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4784 
4785 	rtl_reset_packet_filter(tp);
4786 
4787 	rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_1111, BIT(4));
4788 
4789 	rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f00);
4790 
4791 	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
4792 
4793 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
4794 	RTL_W8(tp, MaxTxPacketSize, EarlySize);
4795 
4796 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
4797 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
4798 
4799 	rtl8168_config_eee_mac(tp);
4800 
4801 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
4802 	RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
4803 
4804 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
4805 
4806 	rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
4807 
4808 	rtl_pcie_state_l2l3_disable(tp);
4809 
4810 	rtl_writephy(tp, 0x1f, 0x0c42);
4811 	rg_saw_cnt = (rtl_readphy(tp, 0x13) & 0x3fff);
4812 	rtl_writephy(tp, 0x1f, 0x0000);
4813 	if (rg_saw_cnt > 0) {
4814 		u16 sw_cnt_1ms_ini;
4815 
4816 		sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
4817 		sw_cnt_1ms_ini &= 0x0fff;
4818 		data = r8168_mac_ocp_read(tp, 0xd412);
4819 		data &= ~0x0fff;
4820 		data |= sw_cnt_1ms_ini;
4821 		r8168_mac_ocp_write(tp, 0xd412, data);
4822 	}
4823 
4824 	data = r8168_mac_ocp_read(tp, 0xe056);
4825 	data &= ~0xf0;
4826 	data |= 0x70;
4827 	r8168_mac_ocp_write(tp, 0xe056, data);
4828 
4829 	data = r8168_mac_ocp_read(tp, 0xe052);
4830 	data &= ~0x6000;
4831 	data |= 0x8008;
4832 	r8168_mac_ocp_write(tp, 0xe052, data);
4833 
4834 	data = r8168_mac_ocp_read(tp, 0xe0d6);
4835 	data &= ~0x01ff;
4836 	data |= 0x017f;
4837 	r8168_mac_ocp_write(tp, 0xe0d6, data);
4838 
4839 	data = r8168_mac_ocp_read(tp, 0xd420);
4840 	data &= ~0x0fff;
4841 	data |= 0x047f;
4842 	r8168_mac_ocp_write(tp, 0xd420, data);
4843 
4844 	r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
4845 	r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
4846 	r8168_mac_ocp_write(tp, 0xc094, 0x0000);
4847 	r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
4848 
4849 	rtl_hw_aspm_clkreq_enable(tp, true);
4850 }
4851 
4852 static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
4853 {
4854 	rtl8168ep_stop_cmac(tp);
4855 
4856 	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
4857 	rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);
4858 
4859 	rtl_set_def_aspm_entry_latency(tp);
4860 
4861 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4862 
4863 	rtl_reset_packet_filter(tp);
4864 
4865 	rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f80);
4866 
4867 	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
4868 
4869 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
4870 	RTL_W8(tp, MaxTxPacketSize, EarlySize);
4871 
4872 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
4873 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
4874 
4875 	rtl8168_config_eee_mac(tp);
4876 
4877 	rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06);
4878 
4879 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
4880 
4881 	rtl_pcie_state_l2l3_disable(tp);
4882 }
4883 
4884 static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
4885 {
4886 	static const struct ephy_info e_info_8168ep_1[] = {
4887 		{ 0x00, 0xffff,	0x10ab },
4888 		{ 0x06, 0xffff,	0xf030 },
4889 		{ 0x08, 0xffff,	0x2006 },
4890 		{ 0x0d, 0xffff,	0x1666 },
4891 		{ 0x0c, 0x3ff0,	0x0000 }
4892 	};
4893 
4894 	/* disable aspm and clock request before access ephy */
4895 	rtl_hw_aspm_clkreq_enable(tp, false);
4896 	rtl_ephy_init(tp, e_info_8168ep_1);
4897 
4898 	rtl_hw_start_8168ep(tp);
4899 
4900 	rtl_hw_aspm_clkreq_enable(tp, true);
4901 }
4902 
4903 static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
4904 {
4905 	static const struct ephy_info e_info_8168ep_2[] = {
4906 		{ 0x00, 0xffff,	0x10a3 },
4907 		{ 0x19, 0xffff,	0xfc00 },
4908 		{ 0x1e, 0xffff,	0x20ea }
4909 	};
4910 
4911 	/* disable aspm and clock request before access ephy */
4912 	rtl_hw_aspm_clkreq_enable(tp, false);
4913 	rtl_ephy_init(tp, e_info_8168ep_2);
4914 
4915 	rtl_hw_start_8168ep(tp);
4916 
4917 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
4918 	RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
4919 
4920 	rtl_hw_aspm_clkreq_enable(tp, true);
4921 }
4922 
4923 static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
4924 {
4925 	u32 data;
4926 	static const struct ephy_info e_info_8168ep_3[] = {
4927 		{ 0x00, 0xffff,	0x10a3 },
4928 		{ 0x19, 0xffff,	0x7c00 },
4929 		{ 0x1e, 0xffff,	0x20eb },
4930 		{ 0x0d, 0xffff,	0x1666 }
4931 	};
4932 
4933 	/* disable aspm and clock request before access ephy */
4934 	rtl_hw_aspm_clkreq_enable(tp, false);
4935 	rtl_ephy_init(tp, e_info_8168ep_3);
4936 
4937 	rtl_hw_start_8168ep(tp);
4938 
4939 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
4940 	RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
4941 
4942 	data = r8168_mac_ocp_read(tp, 0xd3e2);
4943 	data &= 0xf000;
4944 	data |= 0x0271;
4945 	r8168_mac_ocp_write(tp, 0xd3e2, data);
4946 
4947 	data = r8168_mac_ocp_read(tp, 0xd3e4);
4948 	data &= 0xff00;
4949 	r8168_mac_ocp_write(tp, 0xd3e4, data);
4950 
4951 	data = r8168_mac_ocp_read(tp, 0xe860);
4952 	data |= 0x0080;
4953 	r8168_mac_ocp_write(tp, 0xe860, data);
4954 
4955 	rtl_hw_aspm_clkreq_enable(tp, true);
4956 }
4957 
4958 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
4959 {
4960 	static const struct ephy_info e_info_8102e_1[] = {
4961 		{ 0x01,	0, 0x6e65 },
4962 		{ 0x02,	0, 0x091f },
4963 		{ 0x03,	0, 0xc2f9 },
4964 		{ 0x06,	0, 0xafb5 },
4965 		{ 0x07,	0, 0x0e00 },
4966 		{ 0x19,	0, 0xec80 },
4967 		{ 0x01,	0, 0x2e65 },
4968 		{ 0x01,	0, 0x6e65 }
4969 	};
4970 	u8 cfg1;
4971 
4972 	rtl_set_def_aspm_entry_latency(tp);
4973 
4974 	RTL_W8(tp, DBG_REG, FIX_NAK_1);
4975 
4976 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4977 
4978 	RTL_W8(tp, Config1,
4979 	       LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
4980 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
4981 
4982 	cfg1 = RTL_R8(tp, Config1);
4983 	if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
4984 		RTL_W8(tp, Config1, cfg1 & ~LEDS0);
4985 
4986 	rtl_ephy_init(tp, e_info_8102e_1);
4987 }
4988 
4989 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
4990 {
4991 	rtl_set_def_aspm_entry_latency(tp);
4992 
4993 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4994 
4995 	RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable);
4996 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
4997 }
4998 
4999 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5000 {
5001 	rtl_hw_start_8102e_2(tp);
5002 
5003 	rtl_ephy_write(tp, 0x03, 0xc2f9);
5004 }
5005 
5006 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5007 {
5008 	static const struct ephy_info e_info_8105e_1[] = {
5009 		{ 0x07,	0, 0x4000 },
5010 		{ 0x19,	0, 0x0200 },
5011 		{ 0x19,	0, 0x0020 },
5012 		{ 0x1e,	0, 0x2000 },
5013 		{ 0x03,	0, 0x0001 },
5014 		{ 0x19,	0, 0x0100 },
5015 		{ 0x19,	0, 0x0004 },
5016 		{ 0x0a,	0, 0x0020 }
5017 	};
5018 
5019 	/* Force LAN exit from ASPM if Rx/Tx are not idle */
5020 	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
5021 
5022 	/* Disable Early Tally Counter */
5023 	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000);
5024 
5025 	RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
5026 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
5027 
5028 	rtl_ephy_init(tp, e_info_8105e_1);
5029 
5030 	rtl_pcie_state_l2l3_disable(tp);
5031 }
5032 
5033 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5034 {
5035 	rtl_hw_start_8105e_1(tp);
5036 	rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5037 }
5038 
5039 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5040 {
5041 	static const struct ephy_info e_info_8402[] = {
5042 		{ 0x19,	0xffff, 0xff64 },
5043 		{ 0x1e,	0, 0x4000 }
5044 	};
5045 
5046 	rtl_set_def_aspm_entry_latency(tp);
5047 
5048 	/* Force LAN exit from ASPM if Rx/Tx are not idle */
5049 	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
5050 
5051 	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
5052 
5053 	rtl_ephy_init(tp, e_info_8402);
5054 
5055 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
5056 
5057 	rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06);
5058 	rtl_reset_packet_filter(tp);
5059 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
5060 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
5061 	rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00);
5062 
5063 	rtl_pcie_state_l2l3_disable(tp);
5064 }
5065 
5066 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5067 {
5068 	rtl_hw_aspm_clkreq_enable(tp, false);
5069 
5070 	/* Force LAN exit from ASPM if Rx/Tx are not idle */
5071 	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
5072 
5073 	RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5074 	RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
5075 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
5076 
5077 	rtl_pcie_state_l2l3_disable(tp);
5078 	rtl_hw_aspm_clkreq_enable(tp, true);
5079 }
5080 
5081 static void rtl_hw_config(struct rtl8169_private *tp)
5082 {
5083 	static const rtl_generic_fct hw_configs[] = {
5084 		[RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1,
5085 		[RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
5086 		[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
5087 		[RTL_GIGA_MAC_VER_10] = NULL,
5088 		[RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168bb,
5089 		[RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168bef,
5090 		[RTL_GIGA_MAC_VER_13] = NULL,
5091 		[RTL_GIGA_MAC_VER_14] = NULL,
5092 		[RTL_GIGA_MAC_VER_15] = NULL,
5093 		[RTL_GIGA_MAC_VER_16] = NULL,
5094 		[RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168bef,
5095 		[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
5096 		[RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1,
5097 		[RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2,
5098 		[RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_3,
5099 		[RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4,
5100 		[RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2,
5101 		[RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3,
5102 		[RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d,
5103 		[RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d,
5104 		[RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d,
5105 		[RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4,
5106 		[RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1,
5107 		[RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2,
5108 		[RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168dp,
5109 		[RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1,
5110 		[RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1,
5111 		[RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2,
5112 		[RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1,
5113 		[RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1,
5114 		[RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402,
5115 		[RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411,
5116 		[RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106,
5117 		[RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1,
5118 		[RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1,
5119 		[RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2,
5120 		[RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2,
5121 		[RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2,
5122 		[RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1,
5123 		[RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1,
5124 		[RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1,
5125 		[RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
5126 		[RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1,
5127 		[RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
5128 		[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
5129 	};
5130 
5131 	if (hw_configs[tp->mac_version])
5132 		hw_configs[tp->mac_version](tp);
5133 }
5134 
5135 static void rtl_hw_start_8168(struct rtl8169_private *tp)
5136 {
5137 	RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
5138 
5139 	/* Workaround for RxFIFO overflow. */
5140 	if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5141 		tp->irq_mask |= RxFIFOOver;
5142 		tp->irq_mask &= ~RxOverflow;
5143 	}
5144 
5145 	rtl_hw_config(tp);
5146 }
5147 
5148 static void rtl_hw_start_8101(struct rtl8169_private *tp)
5149 {
5150 	if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5151 		tp->irq_mask &= ~RxFIFOOver;
5152 
5153 	if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5154 	    tp->mac_version == RTL_GIGA_MAC_VER_16)
5155 		pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
5156 					 PCI_EXP_DEVCTL_NOSNOOP_EN);
5157 
5158 	RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
5159 
5160 	tp->cp_cmd &= CPCMD_QUIRK_MASK;
5161 	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
5162 
5163 	rtl_hw_config(tp);
5164 }
5165 
5166 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5167 {
5168 	struct rtl8169_private *tp = netdev_priv(dev);
5169 
5170 	if (new_mtu > ETH_DATA_LEN)
5171 		rtl_hw_jumbo_enable(tp);
5172 	else
5173 		rtl_hw_jumbo_disable(tp);
5174 
5175 	dev->mtu = new_mtu;
5176 	netdev_update_features(dev);
5177 
5178 	return 0;
5179 }
5180 
5181 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5182 {
5183 	desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5184 	desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5185 }
5186 
5187 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5188 				     void **data_buff, struct RxDesc *desc)
5189 {
5190 	dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr),
5191 			 R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
5192 
5193 	kfree(*data_buff);
5194 	*data_buff = NULL;
5195 	rtl8169_make_unusable_by_asic(desc);
5196 }
5197 
5198 static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
5199 {
5200 	u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5201 
5202 	/* Force memory writes to complete before releasing descriptor */
5203 	dma_wmb();
5204 
5205 	desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE);
5206 }
5207 
5208 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5209 					     struct RxDesc *desc)
5210 {
5211 	void *data;
5212 	dma_addr_t mapping;
5213 	struct device *d = tp_to_dev(tp);
5214 	int node = dev_to_node(d);
5215 
5216 	data = kmalloc_node(R8169_RX_BUF_SIZE, GFP_KERNEL, node);
5217 	if (!data)
5218 		return NULL;
5219 
5220 	/* Memory should be properly aligned, but better check. */
5221 	if (!IS_ALIGNED((unsigned long)data, 8)) {
5222 		netdev_err_once(tp->dev, "RX buffer not 8-byte-aligned\n");
5223 		goto err_out;
5224 	}
5225 
5226 	mapping = dma_map_single(d, data, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
5227 	if (unlikely(dma_mapping_error(d, mapping))) {
5228 		if (net_ratelimit())
5229 			netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5230 		goto err_out;
5231 	}
5232 
5233 	desc->addr = cpu_to_le64(mapping);
5234 	rtl8169_mark_to_asic(desc);
5235 	return data;
5236 
5237 err_out:
5238 	kfree(data);
5239 	return NULL;
5240 }
5241 
5242 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5243 {
5244 	unsigned int i;
5245 
5246 	for (i = 0; i < NUM_RX_DESC; i++) {
5247 		if (tp->Rx_databuff[i]) {
5248 			rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5249 					    tp->RxDescArray + i);
5250 		}
5251 	}
5252 }
5253 
5254 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5255 {
5256 	desc->opts1 |= cpu_to_le32(RingEnd);
5257 }
5258 
5259 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5260 {
5261 	unsigned int i;
5262 
5263 	for (i = 0; i < NUM_RX_DESC; i++) {
5264 		void *data;
5265 
5266 		data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5267 		if (!data) {
5268 			rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5269 			goto err_out;
5270 		}
5271 		tp->Rx_databuff[i] = data;
5272 	}
5273 
5274 	rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5275 	return 0;
5276 
5277 err_out:
5278 	rtl8169_rx_clear(tp);
5279 	return -ENOMEM;
5280 }
5281 
5282 static int rtl8169_init_ring(struct rtl8169_private *tp)
5283 {
5284 	rtl8169_init_ring_indexes(tp);
5285 
5286 	memset(tp->tx_skb, 0, sizeof(tp->tx_skb));
5287 	memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff));
5288 
5289 	return rtl8169_rx_fill(tp);
5290 }
5291 
5292 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5293 				 struct TxDesc *desc)
5294 {
5295 	unsigned int len = tx_skb->len;
5296 
5297 	dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5298 
5299 	desc->opts1 = 0x00;
5300 	desc->opts2 = 0x00;
5301 	desc->addr = 0x00;
5302 	tx_skb->len = 0;
5303 }
5304 
5305 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5306 				   unsigned int n)
5307 {
5308 	unsigned int i;
5309 
5310 	for (i = 0; i < n; i++) {
5311 		unsigned int entry = (start + i) % NUM_TX_DESC;
5312 		struct ring_info *tx_skb = tp->tx_skb + entry;
5313 		unsigned int len = tx_skb->len;
5314 
5315 		if (len) {
5316 			struct sk_buff *skb = tx_skb->skb;
5317 
5318 			rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb,
5319 					     tp->TxDescArray + entry);
5320 			if (skb) {
5321 				dev_consume_skb_any(skb);
5322 				tx_skb->skb = NULL;
5323 			}
5324 		}
5325 	}
5326 }
5327 
5328 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5329 {
5330 	rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5331 	tp->cur_tx = tp->dirty_tx = 0;
5332 	netdev_reset_queue(tp->dev);
5333 }
5334 
5335 static void rtl_reset_work(struct rtl8169_private *tp)
5336 {
5337 	struct net_device *dev = tp->dev;
5338 	int i;
5339 
5340 	napi_disable(&tp->napi);
5341 	netif_stop_queue(dev);
5342 	synchronize_rcu();
5343 
5344 	rtl8169_hw_reset(tp);
5345 
5346 	for (i = 0; i < NUM_RX_DESC; i++)
5347 		rtl8169_mark_to_asic(tp->RxDescArray + i);
5348 
5349 	rtl8169_tx_clear(tp);
5350 	rtl8169_init_ring_indexes(tp);
5351 
5352 	napi_enable(&tp->napi);
5353 	rtl_hw_start(tp);
5354 	netif_wake_queue(dev);
5355 }
5356 
5357 static void rtl8169_tx_timeout(struct net_device *dev)
5358 {
5359 	struct rtl8169_private *tp = netdev_priv(dev);
5360 
5361 	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5362 }
5363 
5364 static __le32 rtl8169_get_txd_opts1(u32 opts0, u32 len, unsigned int entry)
5365 {
5366 	u32 status = opts0 | len;
5367 
5368 	if (entry == NUM_TX_DESC - 1)
5369 		status |= RingEnd;
5370 
5371 	return cpu_to_le32(status);
5372 }
5373 
5374 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5375 			      u32 *opts)
5376 {
5377 	struct skb_shared_info *info = skb_shinfo(skb);
5378 	unsigned int cur_frag, entry;
5379 	struct TxDesc *uninitialized_var(txd);
5380 	struct device *d = tp_to_dev(tp);
5381 
5382 	entry = tp->cur_tx;
5383 	for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5384 		const skb_frag_t *frag = info->frags + cur_frag;
5385 		dma_addr_t mapping;
5386 		u32 len;
5387 		void *addr;
5388 
5389 		entry = (entry + 1) % NUM_TX_DESC;
5390 
5391 		txd = tp->TxDescArray + entry;
5392 		len = skb_frag_size(frag);
5393 		addr = skb_frag_address(frag);
5394 		mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5395 		if (unlikely(dma_mapping_error(d, mapping))) {
5396 			if (net_ratelimit())
5397 				netif_err(tp, drv, tp->dev,
5398 					  "Failed to map TX fragments DMA!\n");
5399 			goto err_out;
5400 		}
5401 
5402 		txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
5403 		txd->opts2 = cpu_to_le32(opts[1]);
5404 		txd->addr = cpu_to_le64(mapping);
5405 
5406 		tp->tx_skb[entry].len = len;
5407 	}
5408 
5409 	if (cur_frag) {
5410 		tp->tx_skb[entry].skb = skb;
5411 		txd->opts1 |= cpu_to_le32(LastFrag);
5412 	}
5413 
5414 	return cur_frag;
5415 
5416 err_out:
5417 	rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5418 	return -EIO;
5419 }
5420 
5421 static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
5422 {
5423 	return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
5424 }
5425 
5426 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5427 				      struct net_device *dev);
5428 /* r8169_csum_workaround()
5429  * The hw limites the value the transport offset. When the offset is out of the
5430  * range, calculate the checksum by sw.
5431  */
5432 static void r8169_csum_workaround(struct rtl8169_private *tp,
5433 				  struct sk_buff *skb)
5434 {
5435 	if (skb_is_gso(skb)) {
5436 		netdev_features_t features = tp->dev->features;
5437 		struct sk_buff *segs, *nskb;
5438 
5439 		features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
5440 		segs = skb_gso_segment(skb, features);
5441 		if (IS_ERR(segs) || !segs)
5442 			goto drop;
5443 
5444 		do {
5445 			nskb = segs;
5446 			segs = segs->next;
5447 			nskb->next = NULL;
5448 			rtl8169_start_xmit(nskb, tp->dev);
5449 		} while (segs);
5450 
5451 		dev_consume_skb_any(skb);
5452 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5453 		if (skb_checksum_help(skb) < 0)
5454 			goto drop;
5455 
5456 		rtl8169_start_xmit(skb, tp->dev);
5457 	} else {
5458 drop:
5459 		tp->dev->stats.tx_dropped++;
5460 		dev_kfree_skb_any(skb);
5461 	}
5462 }
5463 
5464 /* msdn_giant_send_check()
5465  * According to the document of microsoft, the TCP Pseudo Header excludes the
5466  * packet length for IPv6 TCP large packets.
5467  */
5468 static int msdn_giant_send_check(struct sk_buff *skb)
5469 {
5470 	const struct ipv6hdr *ipv6h;
5471 	struct tcphdr *th;
5472 	int ret;
5473 
5474 	ret = skb_cow_head(skb, 0);
5475 	if (ret)
5476 		return ret;
5477 
5478 	ipv6h = ipv6_hdr(skb);
5479 	th = tcp_hdr(skb);
5480 
5481 	th->check = 0;
5482 	th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
5483 
5484 	return ret;
5485 }
5486 
5487 static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
5488 {
5489 	u32 mss = skb_shinfo(skb)->gso_size;
5490 
5491 	if (mss) {
5492 		opts[0] |= TD_LSO;
5493 		opts[0] |= min(mss, TD_MSS_MAX) << TD0_MSS_SHIFT;
5494 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5495 		const struct iphdr *ip = ip_hdr(skb);
5496 
5497 		if (ip->protocol == IPPROTO_TCP)
5498 			opts[0] |= TD0_IP_CS | TD0_TCP_CS;
5499 		else if (ip->protocol == IPPROTO_UDP)
5500 			opts[0] |= TD0_IP_CS | TD0_UDP_CS;
5501 		else
5502 			WARN_ON_ONCE(1);
5503 	}
5504 }
5505 
5506 static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
5507 				struct sk_buff *skb, u32 *opts)
5508 {
5509 	u32 transport_offset = (u32)skb_transport_offset(skb);
5510 	u32 mss = skb_shinfo(skb)->gso_size;
5511 
5512 	if (mss) {
5513 		if (transport_offset > GTTCPHO_MAX) {
5514 			netif_warn(tp, tx_err, tp->dev,
5515 				   "Invalid transport offset 0x%x for TSO\n",
5516 				   transport_offset);
5517 			return false;
5518 		}
5519 
5520 		switch (vlan_get_protocol(skb)) {
5521 		case htons(ETH_P_IP):
5522 			opts[0] |= TD1_GTSENV4;
5523 			break;
5524 
5525 		case htons(ETH_P_IPV6):
5526 			if (msdn_giant_send_check(skb))
5527 				return false;
5528 
5529 			opts[0] |= TD1_GTSENV6;
5530 			break;
5531 
5532 		default:
5533 			WARN_ON_ONCE(1);
5534 			break;
5535 		}
5536 
5537 		opts[0] |= transport_offset << GTTCPHO_SHIFT;
5538 		opts[1] |= min(mss, TD_MSS_MAX) << TD1_MSS_SHIFT;
5539 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5540 		u8 ip_protocol;
5541 
5542 		if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
5543 			return !(skb_checksum_help(skb) || eth_skb_pad(skb));
5544 
5545 		if (transport_offset > TCPHO_MAX) {
5546 			netif_warn(tp, tx_err, tp->dev,
5547 				   "Invalid transport offset 0x%x\n",
5548 				   transport_offset);
5549 			return false;
5550 		}
5551 
5552 		switch (vlan_get_protocol(skb)) {
5553 		case htons(ETH_P_IP):
5554 			opts[1] |= TD1_IPv4_CS;
5555 			ip_protocol = ip_hdr(skb)->protocol;
5556 			break;
5557 
5558 		case htons(ETH_P_IPV6):
5559 			opts[1] |= TD1_IPv6_CS;
5560 			ip_protocol = ipv6_hdr(skb)->nexthdr;
5561 			break;
5562 
5563 		default:
5564 			ip_protocol = IPPROTO_RAW;
5565 			break;
5566 		}
5567 
5568 		if (ip_protocol == IPPROTO_TCP)
5569 			opts[1] |= TD1_TCP_CS;
5570 		else if (ip_protocol == IPPROTO_UDP)
5571 			opts[1] |= TD1_UDP_CS;
5572 		else
5573 			WARN_ON_ONCE(1);
5574 
5575 		opts[1] |= transport_offset << TCPHO_SHIFT;
5576 	} else {
5577 		if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
5578 			return !eth_skb_pad(skb);
5579 	}
5580 
5581 	return true;
5582 }
5583 
5584 static bool rtl_tx_slots_avail(struct rtl8169_private *tp,
5585 			       unsigned int nr_frags)
5586 {
5587 	unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx;
5588 
5589 	/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
5590 	return slots_avail > nr_frags;
5591 }
5592 
5593 /* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
5594 static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp)
5595 {
5596 	switch (tp->mac_version) {
5597 	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
5598 	case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
5599 		return false;
5600 	default:
5601 		return true;
5602 	}
5603 }
5604 
5605 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5606 				      struct net_device *dev)
5607 {
5608 	struct rtl8169_private *tp = netdev_priv(dev);
5609 	unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5610 	struct TxDesc *txd = tp->TxDescArray + entry;
5611 	struct device *d = tp_to_dev(tp);
5612 	dma_addr_t mapping;
5613 	u32 opts[2], len;
5614 	int frags;
5615 
5616 	if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
5617 		netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5618 		goto err_stop_0;
5619 	}
5620 
5621 	if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5622 		goto err_stop_0;
5623 
5624 	opts[1] = rtl8169_tx_vlan_tag(skb);
5625 	opts[0] = DescOwn;
5626 
5627 	if (rtl_chip_supports_csum_v2(tp)) {
5628 		if (!rtl8169_tso_csum_v2(tp, skb, opts)) {
5629 			r8169_csum_workaround(tp, skb);
5630 			return NETDEV_TX_OK;
5631 		}
5632 	} else {
5633 		rtl8169_tso_csum_v1(skb, opts);
5634 	}
5635 
5636 	len = skb_headlen(skb);
5637 	mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5638 	if (unlikely(dma_mapping_error(d, mapping))) {
5639 		if (net_ratelimit())
5640 			netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5641 		goto err_dma_0;
5642 	}
5643 
5644 	tp->tx_skb[entry].len = len;
5645 	txd->addr = cpu_to_le64(mapping);
5646 
5647 	frags = rtl8169_xmit_frags(tp, skb, opts);
5648 	if (frags < 0)
5649 		goto err_dma_1;
5650 	else if (frags)
5651 		opts[0] |= FirstFrag;
5652 	else {
5653 		opts[0] |= FirstFrag | LastFrag;
5654 		tp->tx_skb[entry].skb = skb;
5655 	}
5656 
5657 	txd->opts2 = cpu_to_le32(opts[1]);
5658 
5659 	netdev_sent_queue(dev, skb->len);
5660 
5661 	skb_tx_timestamp(skb);
5662 
5663 	/* Force memory writes to complete before releasing descriptor */
5664 	dma_wmb();
5665 
5666 	txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
5667 
5668 	/* Force all memory writes to complete before notifying device */
5669 	wmb();
5670 
5671 	tp->cur_tx += frags + 1;
5672 
5673 	RTL_W8(tp, TxPoll, NPQ);
5674 
5675 	if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
5676 		/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5677 		 * not miss a ring update when it notices a stopped queue.
5678 		 */
5679 		smp_wmb();
5680 		netif_stop_queue(dev);
5681 		/* Sync with rtl_tx:
5682 		 * - publish queue status and cur_tx ring index (write barrier)
5683 		 * - refresh dirty_tx ring index (read barrier).
5684 		 * May the current thread have a pessimistic view of the ring
5685 		 * status and forget to wake up queue, a racing rtl_tx thread
5686 		 * can't.
5687 		 */
5688 		smp_mb();
5689 		if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
5690 			netif_start_queue(dev);
5691 	}
5692 
5693 	return NETDEV_TX_OK;
5694 
5695 err_dma_1:
5696 	rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5697 err_dma_0:
5698 	dev_kfree_skb_any(skb);
5699 	dev->stats.tx_dropped++;
5700 	return NETDEV_TX_OK;
5701 
5702 err_stop_0:
5703 	netif_stop_queue(dev);
5704 	dev->stats.tx_dropped++;
5705 	return NETDEV_TX_BUSY;
5706 }
5707 
5708 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5709 {
5710 	struct rtl8169_private *tp = netdev_priv(dev);
5711 	struct pci_dev *pdev = tp->pci_dev;
5712 	u16 pci_status, pci_cmd;
5713 
5714 	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5715 	pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5716 
5717 	netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5718 		  pci_cmd, pci_status);
5719 
5720 	/*
5721 	 * The recovery sequence below admits a very elaborated explanation:
5722 	 * - it seems to work;
5723 	 * - I did not see what else could be done;
5724 	 * - it makes iop3xx happy.
5725 	 *
5726 	 * Feel free to adjust to your needs.
5727 	 */
5728 	if (pdev->broken_parity_status)
5729 		pci_cmd &= ~PCI_COMMAND_PARITY;
5730 	else
5731 		pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5732 
5733 	pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5734 
5735 	pci_write_config_word(pdev, PCI_STATUS,
5736 		pci_status & (PCI_STATUS_DETECTED_PARITY |
5737 		PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5738 		PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5739 
5740 	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5741 }
5742 
5743 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
5744 		   int budget)
5745 {
5746 	unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0;
5747 
5748 	dirty_tx = tp->dirty_tx;
5749 	smp_rmb();
5750 	tx_left = tp->cur_tx - dirty_tx;
5751 
5752 	while (tx_left > 0) {
5753 		unsigned int entry = dirty_tx % NUM_TX_DESC;
5754 		struct ring_info *tx_skb = tp->tx_skb + entry;
5755 		u32 status;
5756 
5757 		status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5758 		if (status & DescOwn)
5759 			break;
5760 
5761 		/* This barrier is needed to keep us from reading
5762 		 * any other fields out of the Tx descriptor until
5763 		 * we know the status of DescOwn
5764 		 */
5765 		dma_rmb();
5766 
5767 		rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb,
5768 				     tp->TxDescArray + entry);
5769 		if (status & LastFrag) {
5770 			pkts_compl++;
5771 			bytes_compl += tx_skb->skb->len;
5772 			napi_consume_skb(tx_skb->skb, budget);
5773 			tx_skb->skb = NULL;
5774 		}
5775 		dirty_tx++;
5776 		tx_left--;
5777 	}
5778 
5779 	if (tp->dirty_tx != dirty_tx) {
5780 		netdev_completed_queue(dev, pkts_compl, bytes_compl);
5781 
5782 		u64_stats_update_begin(&tp->tx_stats.syncp);
5783 		tp->tx_stats.packets += pkts_compl;
5784 		tp->tx_stats.bytes += bytes_compl;
5785 		u64_stats_update_end(&tp->tx_stats.syncp);
5786 
5787 		tp->dirty_tx = dirty_tx;
5788 		/* Sync with rtl8169_start_xmit:
5789 		 * - publish dirty_tx ring index (write barrier)
5790 		 * - refresh cur_tx ring index and queue status (read barrier)
5791 		 * May the current thread miss the stopped queue condition,
5792 		 * a racing xmit thread can only have a right view of the
5793 		 * ring status.
5794 		 */
5795 		smp_mb();
5796 		if (netif_queue_stopped(dev) &&
5797 		    rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
5798 			netif_wake_queue(dev);
5799 		}
5800 		/*
5801 		 * 8168 hack: TxPoll requests are lost when the Tx packets are
5802 		 * too close. Let's kick an extra TxPoll request when a burst
5803 		 * of start_xmit activity is detected (if it is not detected,
5804 		 * it is slow enough). -- FR
5805 		 */
5806 		if (tp->cur_tx != dirty_tx)
5807 			RTL_W8(tp, TxPoll, NPQ);
5808 	}
5809 }
5810 
5811 static inline int rtl8169_fragmented_frame(u32 status)
5812 {
5813 	return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5814 }
5815 
5816 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
5817 {
5818 	u32 status = opts1 & RxProtoMask;
5819 
5820 	if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
5821 	    ((status == RxProtoUDP) && !(opts1 & UDPFail)))
5822 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5823 	else
5824 		skb_checksum_none_assert(skb);
5825 }
5826 
5827 static struct sk_buff *rtl8169_try_rx_copy(void *data,
5828 					   struct rtl8169_private *tp,
5829 					   int pkt_size,
5830 					   dma_addr_t addr)
5831 {
5832 	struct sk_buff *skb;
5833 	struct device *d = tp_to_dev(tp);
5834 
5835 	dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
5836 	prefetch(data);
5837 	skb = napi_alloc_skb(&tp->napi, pkt_size);
5838 	if (skb)
5839 		skb_copy_to_linear_data(skb, data, pkt_size);
5840 	dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5841 
5842 	return skb;
5843 }
5844 
5845 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
5846 {
5847 	unsigned int cur_rx, rx_left;
5848 	unsigned int count;
5849 
5850 	cur_rx = tp->cur_rx;
5851 
5852 	for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
5853 		unsigned int entry = cur_rx % NUM_RX_DESC;
5854 		struct RxDesc *desc = tp->RxDescArray + entry;
5855 		u32 status;
5856 
5857 		status = le32_to_cpu(desc->opts1);
5858 		if (status & DescOwn)
5859 			break;
5860 
5861 		/* This barrier is needed to keep us from reading
5862 		 * any other fields out of the Rx descriptor until
5863 		 * we know the status of DescOwn
5864 		 */
5865 		dma_rmb();
5866 
5867 		if (unlikely(status & RxRES)) {
5868 			netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
5869 				   status);
5870 			dev->stats.rx_errors++;
5871 			if (status & (RxRWT | RxRUNT))
5872 				dev->stats.rx_length_errors++;
5873 			if (status & RxCRC)
5874 				dev->stats.rx_crc_errors++;
5875 			if (status & (RxRUNT | RxCRC) && !(status & RxRWT) &&
5876 			    dev->features & NETIF_F_RXALL) {
5877 				goto process_pkt;
5878 			}
5879 		} else {
5880 			struct sk_buff *skb;
5881 			dma_addr_t addr;
5882 			int pkt_size;
5883 
5884 process_pkt:
5885 			addr = le64_to_cpu(desc->addr);
5886 			if (likely(!(dev->features & NETIF_F_RXFCS)))
5887 				pkt_size = (status & 0x00003fff) - 4;
5888 			else
5889 				pkt_size = status & 0x00003fff;
5890 
5891 			/*
5892 			 * The driver does not support incoming fragmented
5893 			 * frames. They are seen as a symptom of over-mtu
5894 			 * sized frames.
5895 			 */
5896 			if (unlikely(rtl8169_fragmented_frame(status))) {
5897 				dev->stats.rx_dropped++;
5898 				dev->stats.rx_length_errors++;
5899 				goto release_descriptor;
5900 			}
5901 
5902 			skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
5903 						  tp, pkt_size, addr);
5904 			if (!skb) {
5905 				dev->stats.rx_dropped++;
5906 				goto release_descriptor;
5907 			}
5908 
5909 			rtl8169_rx_csum(skb, status);
5910 			skb_put(skb, pkt_size);
5911 			skb->protocol = eth_type_trans(skb, dev);
5912 
5913 			rtl8169_rx_vlan_tag(desc, skb);
5914 
5915 			if (skb->pkt_type == PACKET_MULTICAST)
5916 				dev->stats.multicast++;
5917 
5918 			napi_gro_receive(&tp->napi, skb);
5919 
5920 			u64_stats_update_begin(&tp->rx_stats.syncp);
5921 			tp->rx_stats.packets++;
5922 			tp->rx_stats.bytes += pkt_size;
5923 			u64_stats_update_end(&tp->rx_stats.syncp);
5924 		}
5925 release_descriptor:
5926 		desc->opts2 = 0;
5927 		rtl8169_mark_to_asic(desc);
5928 	}
5929 
5930 	count = cur_rx - tp->cur_rx;
5931 	tp->cur_rx = cur_rx;
5932 
5933 	return count;
5934 }
5935 
5936 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
5937 {
5938 	struct rtl8169_private *tp = dev_instance;
5939 	u16 status = RTL_R16(tp, IntrStatus);
5940 
5941 	if (!tp->irq_enabled || status == 0xffff || !(status & tp->irq_mask))
5942 		return IRQ_NONE;
5943 
5944 	if (unlikely(status & SYSErr)) {
5945 		rtl8169_pcierr_interrupt(tp->dev);
5946 		goto out;
5947 	}
5948 
5949 	if (status & LinkChg)
5950 		phy_mac_interrupt(tp->phydev);
5951 
5952 	if (unlikely(status & RxFIFOOver &&
5953 	    tp->mac_version == RTL_GIGA_MAC_VER_11)) {
5954 		netif_stop_queue(tp->dev);
5955 		/* XXX - Hack alert. See rtl_task(). */
5956 		set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
5957 	}
5958 
5959 	rtl_irq_disable(tp);
5960 	napi_schedule_irqoff(&tp->napi);
5961 out:
5962 	rtl_ack_events(tp, status);
5963 
5964 	return IRQ_HANDLED;
5965 }
5966 
5967 static void rtl_task(struct work_struct *work)
5968 {
5969 	static const struct {
5970 		int bitnr;
5971 		void (*action)(struct rtl8169_private *);
5972 	} rtl_work[] = {
5973 		{ RTL_FLAG_TASK_RESET_PENDING,	rtl_reset_work },
5974 	};
5975 	struct rtl8169_private *tp =
5976 		container_of(work, struct rtl8169_private, wk.work);
5977 	struct net_device *dev = tp->dev;
5978 	int i;
5979 
5980 	rtl_lock_work(tp);
5981 
5982 	if (!netif_running(dev) ||
5983 	    !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
5984 		goto out_unlock;
5985 
5986 	for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
5987 		bool pending;
5988 
5989 		pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
5990 		if (pending)
5991 			rtl_work[i].action(tp);
5992 	}
5993 
5994 out_unlock:
5995 	rtl_unlock_work(tp);
5996 }
5997 
5998 static int rtl8169_poll(struct napi_struct *napi, int budget)
5999 {
6000 	struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6001 	struct net_device *dev = tp->dev;
6002 	int work_done;
6003 
6004 	work_done = rtl_rx(dev, tp, (u32) budget);
6005 
6006 	rtl_tx(dev, tp, budget);
6007 
6008 	if (work_done < budget) {
6009 		napi_complete_done(napi, work_done);
6010 		rtl_irq_enable(tp);
6011 	}
6012 
6013 	return work_done;
6014 }
6015 
6016 static void rtl8169_rx_missed(struct net_device *dev)
6017 {
6018 	struct rtl8169_private *tp = netdev_priv(dev);
6019 
6020 	if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6021 		return;
6022 
6023 	dev->stats.rx_missed_errors += RTL_R32(tp, RxMissed) & 0xffffff;
6024 	RTL_W32(tp, RxMissed, 0);
6025 }
6026 
6027 static void r8169_phylink_handler(struct net_device *ndev)
6028 {
6029 	struct rtl8169_private *tp = netdev_priv(ndev);
6030 
6031 	if (netif_carrier_ok(ndev)) {
6032 		rtl_link_chg_patch(tp);
6033 		pm_request_resume(&tp->pci_dev->dev);
6034 	} else {
6035 		pm_runtime_idle(&tp->pci_dev->dev);
6036 	}
6037 
6038 	if (net_ratelimit())
6039 		phy_print_status(tp->phydev);
6040 }
6041 
6042 static int r8169_phy_connect(struct rtl8169_private *tp)
6043 {
6044 	struct phy_device *phydev = tp->phydev;
6045 	phy_interface_t phy_mode;
6046 	int ret;
6047 
6048 	phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII :
6049 		   PHY_INTERFACE_MODE_MII;
6050 
6051 	ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler,
6052 				 phy_mode);
6053 	if (ret)
6054 		return ret;
6055 
6056 	if (tp->supports_gmii)
6057 		phy_remove_link_mode(phydev,
6058 				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
6059 	else
6060 		phy_set_max_speed(phydev, SPEED_100);
6061 
6062 	phy_support_asym_pause(phydev);
6063 
6064 	phy_attached_info(phydev);
6065 
6066 	return 0;
6067 }
6068 
6069 static void rtl8169_down(struct net_device *dev)
6070 {
6071 	struct rtl8169_private *tp = netdev_priv(dev);
6072 
6073 	phy_stop(tp->phydev);
6074 
6075 	napi_disable(&tp->napi);
6076 	netif_stop_queue(dev);
6077 
6078 	rtl8169_hw_reset(tp);
6079 	/*
6080 	 * At this point device interrupts can not be enabled in any function,
6081 	 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6082 	 * and napi is disabled (rtl8169_poll).
6083 	 */
6084 	rtl8169_rx_missed(dev);
6085 
6086 	/* Give a racing hard_start_xmit a few cycles to complete. */
6087 	synchronize_rcu();
6088 
6089 	rtl8169_tx_clear(tp);
6090 
6091 	rtl8169_rx_clear(tp);
6092 
6093 	rtl_pll_power_down(tp);
6094 }
6095 
6096 static int rtl8169_close(struct net_device *dev)
6097 {
6098 	struct rtl8169_private *tp = netdev_priv(dev);
6099 	struct pci_dev *pdev = tp->pci_dev;
6100 
6101 	pm_runtime_get_sync(&pdev->dev);
6102 
6103 	/* Update counters before going down */
6104 	rtl8169_update_counters(tp);
6105 
6106 	rtl_lock_work(tp);
6107 	/* Clear all task flags */
6108 	bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
6109 
6110 	rtl8169_down(dev);
6111 	rtl_unlock_work(tp);
6112 
6113 	cancel_work_sync(&tp->wk.work);
6114 
6115 	phy_disconnect(tp->phydev);
6116 
6117 	pci_free_irq(pdev, 0, tp);
6118 
6119 	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6120 			  tp->RxPhyAddr);
6121 	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6122 			  tp->TxPhyAddr);
6123 	tp->TxDescArray = NULL;
6124 	tp->RxDescArray = NULL;
6125 
6126 	pm_runtime_put_sync(&pdev->dev);
6127 
6128 	return 0;
6129 }
6130 
6131 #ifdef CONFIG_NET_POLL_CONTROLLER
6132 static void rtl8169_netpoll(struct net_device *dev)
6133 {
6134 	struct rtl8169_private *tp = netdev_priv(dev);
6135 
6136 	rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp);
6137 }
6138 #endif
6139 
6140 static int rtl_open(struct net_device *dev)
6141 {
6142 	struct rtl8169_private *tp = netdev_priv(dev);
6143 	struct pci_dev *pdev = tp->pci_dev;
6144 	int retval = -ENOMEM;
6145 
6146 	pm_runtime_get_sync(&pdev->dev);
6147 
6148 	/*
6149 	 * Rx and Tx descriptors needs 256 bytes alignment.
6150 	 * dma_alloc_coherent provides more.
6151 	 */
6152 	tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6153 					     &tp->TxPhyAddr, GFP_KERNEL);
6154 	if (!tp->TxDescArray)
6155 		goto err_pm_runtime_put;
6156 
6157 	tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6158 					     &tp->RxPhyAddr, GFP_KERNEL);
6159 	if (!tp->RxDescArray)
6160 		goto err_free_tx_0;
6161 
6162 	retval = rtl8169_init_ring(tp);
6163 	if (retval < 0)
6164 		goto err_free_rx_1;
6165 
6166 	rtl_request_firmware(tp);
6167 
6168 	retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp,
6169 				 dev->name);
6170 	if (retval < 0)
6171 		goto err_release_fw_2;
6172 
6173 	retval = r8169_phy_connect(tp);
6174 	if (retval)
6175 		goto err_free_irq;
6176 
6177 	rtl_lock_work(tp);
6178 
6179 	set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6180 
6181 	napi_enable(&tp->napi);
6182 
6183 	rtl8169_init_phy(dev, tp);
6184 
6185 	rtl_pll_power_up(tp);
6186 
6187 	rtl_hw_start(tp);
6188 
6189 	if (!rtl8169_init_counter_offsets(tp))
6190 		netif_warn(tp, hw, dev, "counter reset/update failed\n");
6191 
6192 	phy_start(tp->phydev);
6193 	netif_start_queue(dev);
6194 
6195 	rtl_unlock_work(tp);
6196 
6197 	pm_runtime_put_sync(&pdev->dev);
6198 out:
6199 	return retval;
6200 
6201 err_free_irq:
6202 	pci_free_irq(pdev, 0, tp);
6203 err_release_fw_2:
6204 	rtl_release_firmware(tp);
6205 	rtl8169_rx_clear(tp);
6206 err_free_rx_1:
6207 	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6208 			  tp->RxPhyAddr);
6209 	tp->RxDescArray = NULL;
6210 err_free_tx_0:
6211 	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6212 			  tp->TxPhyAddr);
6213 	tp->TxDescArray = NULL;
6214 err_pm_runtime_put:
6215 	pm_runtime_put_noidle(&pdev->dev);
6216 	goto out;
6217 }
6218 
6219 static void
6220 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6221 {
6222 	struct rtl8169_private *tp = netdev_priv(dev);
6223 	struct pci_dev *pdev = tp->pci_dev;
6224 	struct rtl8169_counters *counters = tp->counters;
6225 	unsigned int start;
6226 
6227 	pm_runtime_get_noresume(&pdev->dev);
6228 
6229 	if (netif_running(dev) && pm_runtime_active(&pdev->dev))
6230 		rtl8169_rx_missed(dev);
6231 
6232 	do {
6233 		start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
6234 		stats->rx_packets = tp->rx_stats.packets;
6235 		stats->rx_bytes	= tp->rx_stats.bytes;
6236 	} while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
6237 
6238 	do {
6239 		start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
6240 		stats->tx_packets = tp->tx_stats.packets;
6241 		stats->tx_bytes	= tp->tx_stats.bytes;
6242 	} while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
6243 
6244 	stats->rx_dropped	= dev->stats.rx_dropped;
6245 	stats->tx_dropped	= dev->stats.tx_dropped;
6246 	stats->rx_length_errors = dev->stats.rx_length_errors;
6247 	stats->rx_errors	= dev->stats.rx_errors;
6248 	stats->rx_crc_errors	= dev->stats.rx_crc_errors;
6249 	stats->rx_fifo_errors	= dev->stats.rx_fifo_errors;
6250 	stats->rx_missed_errors = dev->stats.rx_missed_errors;
6251 	stats->multicast	= dev->stats.multicast;
6252 
6253 	/*
6254 	 * Fetch additonal counter values missing in stats collected by driver
6255 	 * from tally counters.
6256 	 */
6257 	if (pm_runtime_active(&pdev->dev))
6258 		rtl8169_update_counters(tp);
6259 
6260 	/*
6261 	 * Subtract values fetched during initalization.
6262 	 * See rtl8169_init_counter_offsets for a description why we do that.
6263 	 */
6264 	stats->tx_errors = le64_to_cpu(counters->tx_errors) -
6265 		le64_to_cpu(tp->tc_offset.tx_errors);
6266 	stats->collisions = le32_to_cpu(counters->tx_multi_collision) -
6267 		le32_to_cpu(tp->tc_offset.tx_multi_collision);
6268 	stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
6269 		le16_to_cpu(tp->tc_offset.tx_aborted);
6270 
6271 	pm_runtime_put_noidle(&pdev->dev);
6272 }
6273 
6274 static void rtl8169_net_suspend(struct net_device *dev)
6275 {
6276 	struct rtl8169_private *tp = netdev_priv(dev);
6277 
6278 	if (!netif_running(dev))
6279 		return;
6280 
6281 	phy_stop(tp->phydev);
6282 	netif_device_detach(dev);
6283 
6284 	rtl_lock_work(tp);
6285 	napi_disable(&tp->napi);
6286 	/* Clear all task flags */
6287 	bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
6288 
6289 	rtl_unlock_work(tp);
6290 
6291 	rtl_pll_power_down(tp);
6292 }
6293 
6294 #ifdef CONFIG_PM
6295 
6296 static int rtl8169_suspend(struct device *device)
6297 {
6298 	struct net_device *dev = dev_get_drvdata(device);
6299 	struct rtl8169_private *tp = netdev_priv(dev);
6300 
6301 	rtl8169_net_suspend(dev);
6302 	clk_disable_unprepare(tp->clk);
6303 
6304 	return 0;
6305 }
6306 
6307 static void __rtl8169_resume(struct net_device *dev)
6308 {
6309 	struct rtl8169_private *tp = netdev_priv(dev);
6310 
6311 	netif_device_attach(dev);
6312 
6313 	rtl_pll_power_up(tp);
6314 	rtl8169_init_phy(dev, tp);
6315 
6316 	phy_start(tp->phydev);
6317 
6318 	rtl_lock_work(tp);
6319 	napi_enable(&tp->napi);
6320 	set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6321 	rtl_reset_work(tp);
6322 	rtl_unlock_work(tp);
6323 }
6324 
6325 static int rtl8169_resume(struct device *device)
6326 {
6327 	struct net_device *dev = dev_get_drvdata(device);
6328 	struct rtl8169_private *tp = netdev_priv(dev);
6329 
6330 	rtl_rar_set(tp, dev->dev_addr);
6331 
6332 	clk_prepare_enable(tp->clk);
6333 
6334 	if (netif_running(dev))
6335 		__rtl8169_resume(dev);
6336 
6337 	return 0;
6338 }
6339 
6340 static int rtl8169_runtime_suspend(struct device *device)
6341 {
6342 	struct net_device *dev = dev_get_drvdata(device);
6343 	struct rtl8169_private *tp = netdev_priv(dev);
6344 
6345 	if (!tp->TxDescArray)
6346 		return 0;
6347 
6348 	rtl_lock_work(tp);
6349 	__rtl8169_set_wol(tp, WAKE_ANY);
6350 	rtl_unlock_work(tp);
6351 
6352 	rtl8169_net_suspend(dev);
6353 
6354 	/* Update counters before going runtime suspend */
6355 	rtl8169_rx_missed(dev);
6356 	rtl8169_update_counters(tp);
6357 
6358 	return 0;
6359 }
6360 
6361 static int rtl8169_runtime_resume(struct device *device)
6362 {
6363 	struct net_device *dev = dev_get_drvdata(device);
6364 	struct rtl8169_private *tp = netdev_priv(dev);
6365 
6366 	rtl_rar_set(tp, dev->dev_addr);
6367 
6368 	if (!tp->TxDescArray)
6369 		return 0;
6370 
6371 	rtl_lock_work(tp);
6372 	__rtl8169_set_wol(tp, tp->saved_wolopts);
6373 	rtl_unlock_work(tp);
6374 
6375 	__rtl8169_resume(dev);
6376 
6377 	return 0;
6378 }
6379 
6380 static int rtl8169_runtime_idle(struct device *device)
6381 {
6382 	struct net_device *dev = dev_get_drvdata(device);
6383 
6384 	if (!netif_running(dev) || !netif_carrier_ok(dev))
6385 		pm_schedule_suspend(device, 10000);
6386 
6387 	return -EBUSY;
6388 }
6389 
6390 static const struct dev_pm_ops rtl8169_pm_ops = {
6391 	.suspend		= rtl8169_suspend,
6392 	.resume			= rtl8169_resume,
6393 	.freeze			= rtl8169_suspend,
6394 	.thaw			= rtl8169_resume,
6395 	.poweroff		= rtl8169_suspend,
6396 	.restore		= rtl8169_resume,
6397 	.runtime_suspend	= rtl8169_runtime_suspend,
6398 	.runtime_resume		= rtl8169_runtime_resume,
6399 	.runtime_idle		= rtl8169_runtime_idle,
6400 };
6401 
6402 #define RTL8169_PM_OPS	(&rtl8169_pm_ops)
6403 
6404 #else /* !CONFIG_PM */
6405 
6406 #define RTL8169_PM_OPS	NULL
6407 
6408 #endif /* !CONFIG_PM */
6409 
6410 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6411 {
6412 	/* WoL fails with 8168b when the receiver is disabled. */
6413 	switch (tp->mac_version) {
6414 	case RTL_GIGA_MAC_VER_11:
6415 	case RTL_GIGA_MAC_VER_12:
6416 	case RTL_GIGA_MAC_VER_17:
6417 		pci_clear_master(tp->pci_dev);
6418 
6419 		RTL_W8(tp, ChipCmd, CmdRxEnb);
6420 		/* PCI commit */
6421 		RTL_R8(tp, ChipCmd);
6422 		break;
6423 	default:
6424 		break;
6425 	}
6426 }
6427 
6428 static void rtl_shutdown(struct pci_dev *pdev)
6429 {
6430 	struct net_device *dev = pci_get_drvdata(pdev);
6431 	struct rtl8169_private *tp = netdev_priv(dev);
6432 
6433 	rtl8169_net_suspend(dev);
6434 
6435 	/* Restore original MAC address */
6436 	rtl_rar_set(tp, dev->perm_addr);
6437 
6438 	rtl8169_hw_reset(tp);
6439 
6440 	if (system_state == SYSTEM_POWER_OFF) {
6441 		if (tp->saved_wolopts) {
6442 			rtl_wol_suspend_quirk(tp);
6443 			rtl_wol_shutdown_quirk(tp);
6444 		}
6445 
6446 		pci_wake_from_d3(pdev, true);
6447 		pci_set_power_state(pdev, PCI_D3hot);
6448 	}
6449 }
6450 
6451 static void rtl_remove_one(struct pci_dev *pdev)
6452 {
6453 	struct net_device *dev = pci_get_drvdata(pdev);
6454 	struct rtl8169_private *tp = netdev_priv(dev);
6455 
6456 	if (r8168_check_dash(tp))
6457 		rtl8168_driver_stop(tp);
6458 
6459 	netif_napi_del(&tp->napi);
6460 
6461 	unregister_netdev(dev);
6462 	mdiobus_unregister(tp->phydev->mdio.bus);
6463 
6464 	rtl_release_firmware(tp);
6465 
6466 	if (pci_dev_run_wake(pdev))
6467 		pm_runtime_get_noresume(&pdev->dev);
6468 
6469 	/* restore original MAC address */
6470 	rtl_rar_set(tp, dev->perm_addr);
6471 }
6472 
6473 static const struct net_device_ops rtl_netdev_ops = {
6474 	.ndo_open		= rtl_open,
6475 	.ndo_stop		= rtl8169_close,
6476 	.ndo_get_stats64	= rtl8169_get_stats64,
6477 	.ndo_start_xmit		= rtl8169_start_xmit,
6478 	.ndo_tx_timeout		= rtl8169_tx_timeout,
6479 	.ndo_validate_addr	= eth_validate_addr,
6480 	.ndo_change_mtu		= rtl8169_change_mtu,
6481 	.ndo_fix_features	= rtl8169_fix_features,
6482 	.ndo_set_features	= rtl8169_set_features,
6483 	.ndo_set_mac_address	= rtl_set_mac_address,
6484 	.ndo_do_ioctl		= rtl8169_ioctl,
6485 	.ndo_set_rx_mode	= rtl_set_rx_mode,
6486 #ifdef CONFIG_NET_POLL_CONTROLLER
6487 	.ndo_poll_controller	= rtl8169_netpoll,
6488 #endif
6489 
6490 };
6491 
6492 static const struct rtl_cfg_info {
6493 	void (*hw_start)(struct rtl8169_private *tp);
6494 	u16 irq_mask;
6495 	unsigned int has_gmii:1;
6496 	const struct rtl_coalesce_info *coalesce_info;
6497 } rtl_cfg_infos [] = {
6498 	[RTL_CFG_0] = {
6499 		.hw_start	= rtl_hw_start_8169,
6500 		.irq_mask	= SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6501 		.has_gmii	= 1,
6502 		.coalesce_info	= rtl_coalesce_info_8169,
6503 	},
6504 	[RTL_CFG_1] = {
6505 		.hw_start	= rtl_hw_start_8168,
6506 		.irq_mask	= LinkChg | RxOverflow,
6507 		.has_gmii	= 1,
6508 		.coalesce_info	= rtl_coalesce_info_8168_8136,
6509 	},
6510 	[RTL_CFG_2] = {
6511 		.hw_start	= rtl_hw_start_8101,
6512 		.irq_mask	= LinkChg | RxOverflow | RxFIFOOver,
6513 		.coalesce_info	= rtl_coalesce_info_8168_8136,
6514 	}
6515 };
6516 
6517 static int rtl_alloc_irq(struct rtl8169_private *tp)
6518 {
6519 	unsigned int flags;
6520 
6521 	if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
6522 		rtl_unlock_config_regs(tp);
6523 		RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
6524 		rtl_lock_config_regs(tp);
6525 		flags = PCI_IRQ_LEGACY;
6526 	} else {
6527 		flags = PCI_IRQ_ALL_TYPES;
6528 	}
6529 
6530 	return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
6531 }
6532 
6533 static void rtl_read_mac_address(struct rtl8169_private *tp,
6534 				 u8 mac_addr[ETH_ALEN])
6535 {
6536 	u32 value;
6537 
6538 	/* Get MAC address */
6539 	switch (tp->mac_version) {
6540 	case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38:
6541 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
6542 		value = rtl_eri_read(tp, 0xe0);
6543 		mac_addr[0] = (value >>  0) & 0xff;
6544 		mac_addr[1] = (value >>  8) & 0xff;
6545 		mac_addr[2] = (value >> 16) & 0xff;
6546 		mac_addr[3] = (value >> 24) & 0xff;
6547 
6548 		value = rtl_eri_read(tp, 0xe4);
6549 		mac_addr[4] = (value >>  0) & 0xff;
6550 		mac_addr[5] = (value >>  8) & 0xff;
6551 		break;
6552 	default:
6553 		break;
6554 	}
6555 }
6556 
6557 DECLARE_RTL_COND(rtl_link_list_ready_cond)
6558 {
6559 	return RTL_R8(tp, MCU) & LINK_LIST_RDY;
6560 }
6561 
6562 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6563 {
6564 	return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6565 }
6566 
6567 static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg)
6568 {
6569 	struct rtl8169_private *tp = mii_bus->priv;
6570 
6571 	if (phyaddr > 0)
6572 		return -ENODEV;
6573 
6574 	return rtl_readphy(tp, phyreg);
6575 }
6576 
6577 static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
6578 				int phyreg, u16 val)
6579 {
6580 	struct rtl8169_private *tp = mii_bus->priv;
6581 
6582 	if (phyaddr > 0)
6583 		return -ENODEV;
6584 
6585 	rtl_writephy(tp, phyreg, val);
6586 
6587 	return 0;
6588 }
6589 
6590 static int r8169_mdio_register(struct rtl8169_private *tp)
6591 {
6592 	struct pci_dev *pdev = tp->pci_dev;
6593 	struct mii_bus *new_bus;
6594 	int ret;
6595 
6596 	new_bus = devm_mdiobus_alloc(&pdev->dev);
6597 	if (!new_bus)
6598 		return -ENOMEM;
6599 
6600 	new_bus->name = "r8169";
6601 	new_bus->priv = tp;
6602 	new_bus->parent = &pdev->dev;
6603 	new_bus->irq[0] = PHY_IGNORE_INTERRUPT;
6604 	snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
6605 
6606 	new_bus->read = r8169_mdio_read_reg;
6607 	new_bus->write = r8169_mdio_write_reg;
6608 
6609 	ret = mdiobus_register(new_bus);
6610 	if (ret)
6611 		return ret;
6612 
6613 	tp->phydev = mdiobus_get_phy(new_bus, 0);
6614 	if (!tp->phydev) {
6615 		mdiobus_unregister(new_bus);
6616 		return -ENODEV;
6617 	}
6618 
6619 	/* PHY will be woken up in rtl_open() */
6620 	phy_suspend(tp->phydev);
6621 
6622 	return 0;
6623 }
6624 
6625 static void rtl_hw_init_8168g(struct rtl8169_private *tp)
6626 {
6627 	u32 data;
6628 
6629 	tp->ocp_base = OCP_STD_PHY_BASE;
6630 
6631 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
6632 
6633 	if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6634 		return;
6635 
6636 	if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6637 		return;
6638 
6639 	RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6640 	msleep(1);
6641 	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
6642 
6643 	data = r8168_mac_ocp_read(tp, 0xe8de);
6644 	data &= ~(1 << 14);
6645 	r8168_mac_ocp_write(tp, 0xe8de, data);
6646 
6647 	if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6648 		return;
6649 
6650 	data = r8168_mac_ocp_read(tp, 0xe8de);
6651 	data |= (1 << 15);
6652 	r8168_mac_ocp_write(tp, 0xe8de, data);
6653 
6654 	rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
6655 }
6656 
6657 static void rtl_hw_initialize(struct rtl8169_private *tp)
6658 {
6659 	switch (tp->mac_version) {
6660 	case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51:
6661 		rtl8168ep_stop_cmac(tp);
6662 		/* fall through */
6663 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
6664 		rtl_hw_init_8168g(tp);
6665 		break;
6666 	default:
6667 		break;
6668 	}
6669 }
6670 
6671 static int rtl_jumbo_max(struct rtl8169_private *tp)
6672 {
6673 	/* Non-GBit versions don't support jumbo frames */
6674 	if (!tp->supports_gmii)
6675 		return JUMBO_1K;
6676 
6677 	switch (tp->mac_version) {
6678 	/* RTL8169 */
6679 	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
6680 		return JUMBO_7K;
6681 	/* RTL8168b */
6682 	case RTL_GIGA_MAC_VER_11:
6683 	case RTL_GIGA_MAC_VER_12:
6684 	case RTL_GIGA_MAC_VER_17:
6685 		return JUMBO_4K;
6686 	/* RTL8168c */
6687 	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
6688 		return JUMBO_6K;
6689 	default:
6690 		return JUMBO_9K;
6691 	}
6692 }
6693 
6694 static void rtl_disable_clk(void *data)
6695 {
6696 	clk_disable_unprepare(data);
6697 }
6698 
6699 static int rtl_get_ether_clk(struct rtl8169_private *tp)
6700 {
6701 	struct device *d = tp_to_dev(tp);
6702 	struct clk *clk;
6703 	int rc;
6704 
6705 	clk = devm_clk_get(d, "ether_clk");
6706 	if (IS_ERR(clk)) {
6707 		rc = PTR_ERR(clk);
6708 		if (rc == -ENOENT)
6709 			/* clk-core allows NULL (for suspend / resume) */
6710 			rc = 0;
6711 		else if (rc != -EPROBE_DEFER)
6712 			dev_err(d, "failed to get clk: %d\n", rc);
6713 	} else {
6714 		tp->clk = clk;
6715 		rc = clk_prepare_enable(clk);
6716 		if (rc)
6717 			dev_err(d, "failed to enable clk: %d\n", rc);
6718 		else
6719 			rc = devm_add_action_or_reset(d, rtl_disable_clk, clk);
6720 	}
6721 
6722 	return rc;
6723 }
6724 
6725 static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6726 {
6727 	const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6728 	/* align to u16 for is_valid_ether_addr() */
6729 	u8 mac_addr[ETH_ALEN] __aligned(2) = {};
6730 	struct rtl8169_private *tp;
6731 	struct net_device *dev;
6732 	int chipset, region, i;
6733 	int jumbo_max, rc;
6734 
6735 	dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
6736 	if (!dev)
6737 		return -ENOMEM;
6738 
6739 	SET_NETDEV_DEV(dev, &pdev->dev);
6740 	dev->netdev_ops = &rtl_netdev_ops;
6741 	tp = netdev_priv(dev);
6742 	tp->dev = dev;
6743 	tp->pci_dev = pdev;
6744 	tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6745 	tp->supports_gmii = cfg->has_gmii;
6746 
6747 	/* Get the *optional* external "ether_clk" used on some boards */
6748 	rc = rtl_get_ether_clk(tp);
6749 	if (rc)
6750 		return rc;
6751 
6752 	/* Disable ASPM completely as that cause random device stop working
6753 	 * problems as well as full system hangs for some PCIe devices users.
6754 	 */
6755 	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
6756 
6757 	/* enable device (incl. PCI PM wakeup and hotplug setup) */
6758 	rc = pcim_enable_device(pdev);
6759 	if (rc < 0) {
6760 		dev_err(&pdev->dev, "enable failure\n");
6761 		return rc;
6762 	}
6763 
6764 	if (pcim_set_mwi(pdev) < 0)
6765 		dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n");
6766 
6767 	/* use first MMIO region */
6768 	region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1;
6769 	if (region < 0) {
6770 		dev_err(&pdev->dev, "no MMIO resource found\n");
6771 		return -ENODEV;
6772 	}
6773 
6774 	/* check for weird/broken PCI region reporting */
6775 	if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6776 		dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
6777 		return -ENODEV;
6778 	}
6779 
6780 	rc = pcim_iomap_regions(pdev, BIT(region), MODULENAME);
6781 	if (rc < 0) {
6782 		dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
6783 		return rc;
6784 	}
6785 
6786 	tp->mmio_addr = pcim_iomap_table(pdev)[region];
6787 
6788 	/* Identify chip attached to board */
6789 	rtl8169_get_mac_version(tp);
6790 	if (tp->mac_version == RTL_GIGA_MAC_NONE)
6791 		return -ENODEV;
6792 
6793 	tp->cp_cmd = RTL_R16(tp, CPlusCmd);
6794 
6795 	if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
6796 	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
6797 		dev->features |= NETIF_F_HIGHDMA;
6798 	} else {
6799 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6800 		if (rc < 0) {
6801 			dev_err(&pdev->dev, "DMA configuration failed\n");
6802 			return rc;
6803 		}
6804 	}
6805 
6806 	rtl_init_rxcfg(tp);
6807 
6808 	rtl8169_irq_mask_and_ack(tp);
6809 
6810 	rtl_hw_initialize(tp);
6811 
6812 	rtl_hw_reset(tp);
6813 
6814 	pci_set_master(pdev);
6815 
6816 	chipset = tp->mac_version;
6817 
6818 	rc = rtl_alloc_irq(tp);
6819 	if (rc < 0) {
6820 		dev_err(&pdev->dev, "Can't allocate interrupt\n");
6821 		return rc;
6822 	}
6823 
6824 	mutex_init(&tp->wk.mutex);
6825 	INIT_WORK(&tp->wk.work, rtl_task);
6826 	u64_stats_init(&tp->rx_stats.syncp);
6827 	u64_stats_init(&tp->tx_stats.syncp);
6828 
6829 	/* get MAC address */
6830 	rc = eth_platform_get_mac_address(&pdev->dev, mac_addr);
6831 	if (rc)
6832 		rtl_read_mac_address(tp, mac_addr);
6833 
6834 	if (is_valid_ether_addr(mac_addr))
6835 		rtl_rar_set(tp, mac_addr);
6836 
6837 	for (i = 0; i < ETH_ALEN; i++)
6838 		dev->dev_addr[i] = RTL_R8(tp, MAC0 + i);
6839 
6840 	dev->ethtool_ops = &rtl8169_ethtool_ops;
6841 
6842 	netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
6843 
6844 	/* don't enable SG, IP_CSUM and TSO by default - it might not work
6845 	 * properly for all devices */
6846 	dev->features |= NETIF_F_RXCSUM |
6847 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6848 
6849 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6850 		NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
6851 		NETIF_F_HW_VLAN_CTAG_RX;
6852 	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6853 		NETIF_F_HIGHDMA;
6854 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
6855 
6856 	tp->cp_cmd |= RxChkSum | RxVlan;
6857 
6858 	/*
6859 	 * Pretend we are using VLANs; This bypasses a nasty bug where
6860 	 * Interrupts stop flowing on high load on 8110SCd controllers.
6861 	 */
6862 	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6863 		/* Disallow toggling */
6864 		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
6865 
6866 	if (rtl_chip_supports_csum_v2(tp))
6867 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
6868 
6869 	dev->hw_features |= NETIF_F_RXALL;
6870 	dev->hw_features |= NETIF_F_RXFCS;
6871 
6872 	/* MTU range: 60 - hw-specific max */
6873 	dev->min_mtu = ETH_ZLEN;
6874 	jumbo_max = rtl_jumbo_max(tp);
6875 	dev->max_mtu = jumbo_max;
6876 
6877 	tp->hw_start = cfg->hw_start;
6878 	tp->irq_mask = RTL_EVENT_NAPI | cfg->irq_mask;
6879 	tp->coalesce_info = cfg->coalesce_info;
6880 
6881 	tp->fw_name = rtl_chip_infos[chipset].fw_name;
6882 
6883 	tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
6884 					    &tp->counters_phys_addr,
6885 					    GFP_KERNEL);
6886 	if (!tp->counters)
6887 		return -ENOMEM;
6888 
6889 	pci_set_drvdata(pdev, dev);
6890 
6891 	rc = r8169_mdio_register(tp);
6892 	if (rc)
6893 		return rc;
6894 
6895 	/* chip gets powered up in rtl_open() */
6896 	rtl_pll_power_down(tp);
6897 
6898 	rc = register_netdev(dev);
6899 	if (rc)
6900 		goto err_mdio_unregister;
6901 
6902 	netif_info(tp, probe, dev, "%s, %pM, XID %03x, IRQ %d\n",
6903 		   rtl_chip_infos[chipset].name, dev->dev_addr,
6904 		   (RTL_R32(tp, TxConfig) >> 20) & 0xfcf,
6905 		   pci_irq_vector(pdev, 0));
6906 
6907 	if (jumbo_max > JUMBO_1K)
6908 		netif_info(tp, probe, dev,
6909 			   "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
6910 			   jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
6911 			   "ok" : "ko");
6912 
6913 	if (r8168_check_dash(tp))
6914 		rtl8168_driver_start(tp);
6915 
6916 	if (pci_dev_run_wake(pdev))
6917 		pm_runtime_put_sync(&pdev->dev);
6918 
6919 	return 0;
6920 
6921 err_mdio_unregister:
6922 	mdiobus_unregister(tp->phydev->mdio.bus);
6923 	return rc;
6924 }
6925 
6926 static struct pci_driver rtl8169_pci_driver = {
6927 	.name		= MODULENAME,
6928 	.id_table	= rtl8169_pci_tbl,
6929 	.probe		= rtl_init_one,
6930 	.remove		= rtl_remove_one,
6931 	.shutdown	= rtl_shutdown,
6932 	.driver.pm	= RTL8169_PM_OPS,
6933 };
6934 
6935 module_pci_driver(rtl8169_pci_driver);
6936