1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * drivers/net/ethernet/micrel/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
4 *
5 * Copyright (c) 2009-2010 Micrel, Inc.
6 * Tristram Ha <Tristram.Ha@micrel.com>
7 */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/hex.h>
16 #include <linux/ioport.h>
17 #include <linux/pci.h>
18 #include <linux/proc_fs.h>
19 #include <linux/mii.h>
20 #include <linux/platform_device.h>
21 #include <linux/ethtool.h>
22 #include <linux/etherdevice.h>
23 #include <linux/in.h>
24 #include <linux/ip.h>
25 #include <linux/if_vlan.h>
26 #include <linux/crc32.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/micrel_phy.h>
30
31
32 /* DMA Registers */
33
34 #define KS_DMA_TX_CTRL 0x0000
35 #define DMA_TX_ENABLE 0x00000001
36 #define DMA_TX_CRC_ENABLE 0x00000002
37 #define DMA_TX_PAD_ENABLE 0x00000004
38 #define DMA_TX_LOOPBACK 0x00000100
39 #define DMA_TX_FLOW_ENABLE 0x00000200
40 #define DMA_TX_CSUM_IP 0x00010000
41 #define DMA_TX_CSUM_TCP 0x00020000
42 #define DMA_TX_CSUM_UDP 0x00040000
43 #define DMA_TX_BURST_SIZE 0x3F000000
44
45 #define KS_DMA_RX_CTRL 0x0004
46 #define DMA_RX_ENABLE 0x00000001
47 #define KS884X_DMA_RX_MULTICAST 0x00000002
48 #define DMA_RX_PROMISCUOUS 0x00000004
49 #define DMA_RX_ERROR 0x00000008
50 #define DMA_RX_UNICAST 0x00000010
51 #define DMA_RX_ALL_MULTICAST 0x00000020
52 #define DMA_RX_BROADCAST 0x00000040
53 #define DMA_RX_FLOW_ENABLE 0x00000200
54 #define DMA_RX_CSUM_IP 0x00010000
55 #define DMA_RX_CSUM_TCP 0x00020000
56 #define DMA_RX_CSUM_UDP 0x00040000
57 #define DMA_RX_BURST_SIZE 0x3F000000
58
59 #define DMA_BURST_SHIFT 24
60 #define DMA_BURST_DEFAULT 8
61
62 #define KS_DMA_TX_START 0x0008
63 #define KS_DMA_RX_START 0x000C
64 #define DMA_START 0x00000001
65
66 #define KS_DMA_TX_ADDR 0x0010
67 #define KS_DMA_RX_ADDR 0x0014
68
69 #define DMA_ADDR_LIST_MASK 0xFFFFFFFC
70 #define DMA_ADDR_LIST_SHIFT 2
71
72 /* MTR0 */
73 #define KS884X_MULTICAST_0_OFFSET 0x0020
74 #define KS884X_MULTICAST_1_OFFSET 0x0021
75 #define KS884X_MULTICAST_2_OFFSET 0x0022
76 #define KS884x_MULTICAST_3_OFFSET 0x0023
77 /* MTR1 */
78 #define KS884X_MULTICAST_4_OFFSET 0x0024
79 #define KS884X_MULTICAST_5_OFFSET 0x0025
80 #define KS884X_MULTICAST_6_OFFSET 0x0026
81 #define KS884X_MULTICAST_7_OFFSET 0x0027
82
83 /* Interrupt Registers */
84
85 /* INTEN */
86 #define KS884X_INTERRUPTS_ENABLE 0x0028
87 /* INTST */
88 #define KS884X_INTERRUPTS_STATUS 0x002C
89
90 #define KS884X_INT_RX_STOPPED 0x02000000
91 #define KS884X_INT_TX_STOPPED 0x04000000
92 #define KS884X_INT_RX_OVERRUN 0x08000000
93 #define KS884X_INT_TX_EMPTY 0x10000000
94 #define KS884X_INT_RX 0x20000000
95 #define KS884X_INT_TX 0x40000000
96 #define KS884X_INT_PHY 0x80000000
97
98 #define KS884X_INT_RX_MASK \
99 (KS884X_INT_RX | KS884X_INT_RX_OVERRUN)
100 #define KS884X_INT_TX_MASK \
101 (KS884X_INT_TX | KS884X_INT_TX_EMPTY)
102 #define KS884X_INT_MASK (KS884X_INT_RX | KS884X_INT_TX | KS884X_INT_PHY)
103
104 /* MAC Additional Station Address */
105
106 /* MAAL0 */
107 #define KS_ADD_ADDR_0_LO 0x0080
108 /* MAAH0 */
109 #define KS_ADD_ADDR_0_HI 0x0084
110 /* MAAL1 */
111 #define KS_ADD_ADDR_1_LO 0x0088
112 /* MAAH1 */
113 #define KS_ADD_ADDR_1_HI 0x008C
114 /* MAAL2 */
115 #define KS_ADD_ADDR_2_LO 0x0090
116 /* MAAH2 */
117 #define KS_ADD_ADDR_2_HI 0x0094
118 /* MAAL3 */
119 #define KS_ADD_ADDR_3_LO 0x0098
120 /* MAAH3 */
121 #define KS_ADD_ADDR_3_HI 0x009C
122 /* MAAL4 */
123 #define KS_ADD_ADDR_4_LO 0x00A0
124 /* MAAH4 */
125 #define KS_ADD_ADDR_4_HI 0x00A4
126 /* MAAL5 */
127 #define KS_ADD_ADDR_5_LO 0x00A8
128 /* MAAH5 */
129 #define KS_ADD_ADDR_5_HI 0x00AC
130 /* MAAL6 */
131 #define KS_ADD_ADDR_6_LO 0x00B0
132 /* MAAH6 */
133 #define KS_ADD_ADDR_6_HI 0x00B4
134 /* MAAL7 */
135 #define KS_ADD_ADDR_7_LO 0x00B8
136 /* MAAH7 */
137 #define KS_ADD_ADDR_7_HI 0x00BC
138 /* MAAL8 */
139 #define KS_ADD_ADDR_8_LO 0x00C0
140 /* MAAH8 */
141 #define KS_ADD_ADDR_8_HI 0x00C4
142 /* MAAL9 */
143 #define KS_ADD_ADDR_9_LO 0x00C8
144 /* MAAH9 */
145 #define KS_ADD_ADDR_9_HI 0x00CC
146 /* MAAL10 */
147 #define KS_ADD_ADDR_A_LO 0x00D0
148 /* MAAH10 */
149 #define KS_ADD_ADDR_A_HI 0x00D4
150 /* MAAL11 */
151 #define KS_ADD_ADDR_B_LO 0x00D8
152 /* MAAH11 */
153 #define KS_ADD_ADDR_B_HI 0x00DC
154 /* MAAL12 */
155 #define KS_ADD_ADDR_C_LO 0x00E0
156 /* MAAH12 */
157 #define KS_ADD_ADDR_C_HI 0x00E4
158 /* MAAL13 */
159 #define KS_ADD_ADDR_D_LO 0x00E8
160 /* MAAH13 */
161 #define KS_ADD_ADDR_D_HI 0x00EC
162 /* MAAL14 */
163 #define KS_ADD_ADDR_E_LO 0x00F0
164 /* MAAH14 */
165 #define KS_ADD_ADDR_E_HI 0x00F4
166 /* MAAL15 */
167 #define KS_ADD_ADDR_F_LO 0x00F8
168 /* MAAH15 */
169 #define KS_ADD_ADDR_F_HI 0x00FC
170
171 #define ADD_ADDR_HI_MASK 0x0000FFFF
172 #define ADD_ADDR_ENABLE 0x80000000
173 #define ADD_ADDR_INCR 8
174
175 /* Miscellaneous Registers */
176
177 /* MARL */
178 #define KS884X_ADDR_0_OFFSET 0x0200
179 #define KS884X_ADDR_1_OFFSET 0x0201
180 /* MARM */
181 #define KS884X_ADDR_2_OFFSET 0x0202
182 #define KS884X_ADDR_3_OFFSET 0x0203
183 /* MARH */
184 #define KS884X_ADDR_4_OFFSET 0x0204
185 #define KS884X_ADDR_5_OFFSET 0x0205
186
187 /* OBCR */
188 #define KS884X_BUS_CTRL_OFFSET 0x0210
189
190 #define BUS_SPEED_125_MHZ 0x0000
191 #define BUS_SPEED_62_5_MHZ 0x0001
192 #define BUS_SPEED_41_66_MHZ 0x0002
193 #define BUS_SPEED_25_MHZ 0x0003
194
195 /* EEPCR */
196 #define KS884X_EEPROM_CTRL_OFFSET 0x0212
197
198 #define EEPROM_CHIP_SELECT 0x0001
199 #define EEPROM_SERIAL_CLOCK 0x0002
200 #define EEPROM_DATA_OUT 0x0004
201 #define EEPROM_DATA_IN 0x0008
202 #define EEPROM_ACCESS_ENABLE 0x0010
203
204 /* MBIR */
205 #define KS884X_MEM_INFO_OFFSET 0x0214
206
207 #define RX_MEM_TEST_FAILED 0x0008
208 #define RX_MEM_TEST_FINISHED 0x0010
209 #define TX_MEM_TEST_FAILED 0x0800
210 #define TX_MEM_TEST_FINISHED 0x1000
211
212 /* GCR */
213 #define KS884X_GLOBAL_CTRL_OFFSET 0x0216
214 #define GLOBAL_SOFTWARE_RESET 0x0001
215
216 #define KS8841_POWER_MANAGE_OFFSET 0x0218
217
218 /* WFCR */
219 #define KS8841_WOL_CTRL_OFFSET 0x021A
220 #define KS8841_WOL_MAGIC_ENABLE 0x0080
221 #define KS8841_WOL_FRAME3_ENABLE 0x0008
222 #define KS8841_WOL_FRAME2_ENABLE 0x0004
223 #define KS8841_WOL_FRAME1_ENABLE 0x0002
224 #define KS8841_WOL_FRAME0_ENABLE 0x0001
225
226 /* WF0 */
227 #define KS8841_WOL_FRAME_CRC_OFFSET 0x0220
228 #define KS8841_WOL_FRAME_BYTE0_OFFSET 0x0224
229 #define KS8841_WOL_FRAME_BYTE2_OFFSET 0x0228
230
231 /* IACR */
232 #define KS884X_IACR_P 0x04A0
233 #define KS884X_IACR_OFFSET KS884X_IACR_P
234
235 /* IADR1 */
236 #define KS884X_IADR1_P 0x04A2
237 #define KS884X_IADR2_P 0x04A4
238 #define KS884X_IADR3_P 0x04A6
239 #define KS884X_IADR4_P 0x04A8
240 #define KS884X_IADR5_P 0x04AA
241
242 #define KS884X_ACC_CTRL_SEL_OFFSET KS884X_IACR_P
243 #define KS884X_ACC_CTRL_INDEX_OFFSET (KS884X_ACC_CTRL_SEL_OFFSET + 1)
244
245 #define KS884X_ACC_DATA_0_OFFSET KS884X_IADR4_P
246 #define KS884X_ACC_DATA_1_OFFSET (KS884X_ACC_DATA_0_OFFSET + 1)
247 #define KS884X_ACC_DATA_2_OFFSET KS884X_IADR5_P
248 #define KS884X_ACC_DATA_3_OFFSET (KS884X_ACC_DATA_2_OFFSET + 1)
249 #define KS884X_ACC_DATA_4_OFFSET KS884X_IADR2_P
250 #define KS884X_ACC_DATA_5_OFFSET (KS884X_ACC_DATA_4_OFFSET + 1)
251 #define KS884X_ACC_DATA_6_OFFSET KS884X_IADR3_P
252 #define KS884X_ACC_DATA_7_OFFSET (KS884X_ACC_DATA_6_OFFSET + 1)
253 #define KS884X_ACC_DATA_8_OFFSET KS884X_IADR1_P
254
255 /* P1MBCR */
256 #define KS884X_P1MBCR_P 0x04D0
257 #define KS884X_P1MBSR_P 0x04D2
258 #define KS884X_PHY1ILR_P 0x04D4
259 #define KS884X_PHY1IHR_P 0x04D6
260 #define KS884X_P1ANAR_P 0x04D8
261 #define KS884X_P1ANLPR_P 0x04DA
262
263 /* P2MBCR */
264 #define KS884X_P2MBCR_P 0x04E0
265 #define KS884X_P2MBSR_P 0x04E2
266 #define KS884X_PHY2ILR_P 0x04E4
267 #define KS884X_PHY2IHR_P 0x04E6
268 #define KS884X_P2ANAR_P 0x04E8
269 #define KS884X_P2ANLPR_P 0x04EA
270
271 #define KS884X_PHY_1_CTRL_OFFSET KS884X_P1MBCR_P
272 #define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
273
274 #define KS884X_PHY_CTRL_OFFSET 0x00
275
276 #define KS884X_PHY_STATUS_OFFSET 0x02
277
278 #define KS884X_PHY_ID_1_OFFSET 0x04
279 #define KS884X_PHY_ID_2_OFFSET 0x06
280
281 #define KS884X_PHY_AUTO_NEG_OFFSET 0x08
282
283 #define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A
284
285 /* P1VCT */
286 #define KS884X_P1VCT_P 0x04F0
287 #define KS884X_P1PHYCTRL_P 0x04F2
288
289 /* P2VCT */
290 #define KS884X_P2VCT_P 0x04F4
291 #define KS884X_P2PHYCTRL_P 0x04F6
292
293 #define KS884X_PHY_SPECIAL_OFFSET KS884X_P1VCT_P
294 #define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P)
295
296 #define KS884X_PHY_LINK_MD_OFFSET 0x00
297
298 #define PHY_START_CABLE_DIAG 0x8000
299 #define PHY_CABLE_DIAG_RESULT 0x6000
300 #define PHY_CABLE_STAT_NORMAL 0x0000
301 #define PHY_CABLE_STAT_OPEN 0x2000
302 #define PHY_CABLE_STAT_SHORT 0x4000
303 #define PHY_CABLE_STAT_FAILED 0x6000
304 #define PHY_CABLE_10M_SHORT 0x1000
305 #define PHY_CABLE_FAULT_COUNTER 0x01FF
306
307 #define KS884X_PHY_PHY_CTRL_OFFSET 0x02
308
309 #define PHY_STAT_REVERSED_POLARITY 0x0020
310 #define PHY_STAT_MDIX 0x0010
311 #define PHY_FORCE_LINK 0x0008
312 #define PHY_POWER_SAVING_DISABLE 0x0004
313 #define PHY_REMOTE_LOOPBACK 0x0002
314
315 /* SIDER */
316 #define KS884X_SIDER_P 0x0400
317 #define KS884X_CHIP_ID_OFFSET KS884X_SIDER_P
318 #define KS884X_FAMILY_ID_OFFSET (KS884X_CHIP_ID_OFFSET + 1)
319
320 #define REG_FAMILY_ID 0x88
321
322 #define REG_CHIP_ID_41 0x8810
323 #define REG_CHIP_ID_42 0x8800
324
325 #define KS884X_CHIP_ID_MASK_41 0xFF10
326 #define KS884X_CHIP_ID_MASK 0xFFF0
327 #define KS884X_CHIP_ID_SHIFT 4
328 #define KS884X_REVISION_MASK 0x000E
329 #define KS884X_REVISION_SHIFT 1
330 #define KS8842_START 0x0001
331
332 #define CHIP_IP_41_M 0x8810
333 #define CHIP_IP_42_M 0x8800
334 #define CHIP_IP_61_M 0x8890
335 #define CHIP_IP_62_M 0x8880
336
337 #define CHIP_IP_41_P 0x8850
338 #define CHIP_IP_42_P 0x8840
339 #define CHIP_IP_61_P 0x88D0
340 #define CHIP_IP_62_P 0x88C0
341
342 /* SGCR1 */
343 #define KS8842_SGCR1_P 0x0402
344 #define KS8842_SWITCH_CTRL_1_OFFSET KS8842_SGCR1_P
345
346 #define SWITCH_PASS_ALL 0x8000
347 #define SWITCH_TX_FLOW_CTRL 0x2000
348 #define SWITCH_RX_FLOW_CTRL 0x1000
349 #define SWITCH_CHECK_LENGTH 0x0800
350 #define SWITCH_AGING_ENABLE 0x0400
351 #define SWITCH_FAST_AGING 0x0200
352 #define SWITCH_AGGR_BACKOFF 0x0100
353 #define SWITCH_PASS_PAUSE 0x0008
354 #define SWITCH_LINK_AUTO_AGING 0x0001
355
356 /* SGCR2 */
357 #define KS8842_SGCR2_P 0x0404
358 #define KS8842_SWITCH_CTRL_2_OFFSET KS8842_SGCR2_P
359
360 #define SWITCH_VLAN_ENABLE 0x8000
361 #define SWITCH_IGMP_SNOOP 0x4000
362 #define IPV6_MLD_SNOOP_ENABLE 0x2000
363 #define IPV6_MLD_SNOOP_OPTION 0x1000
364 #define PRIORITY_SCHEME_SELECT 0x0800
365 #define SWITCH_MIRROR_RX_TX 0x0100
366 #define UNICAST_VLAN_BOUNDARY 0x0080
367 #define MULTICAST_STORM_DISABLE 0x0040
368 #define SWITCH_BACK_PRESSURE 0x0020
369 #define FAIR_FLOW_CTRL 0x0010
370 #define NO_EXC_COLLISION_DROP 0x0008
371 #define SWITCH_HUGE_PACKET 0x0004
372 #define SWITCH_LEGAL_PACKET 0x0002
373 #define SWITCH_BUF_RESERVE 0x0001
374
375 /* SGCR3 */
376 #define KS8842_SGCR3_P 0x0406
377 #define KS8842_SWITCH_CTRL_3_OFFSET KS8842_SGCR3_P
378
379 #define BROADCAST_STORM_RATE_LO 0xFF00
380 #define SWITCH_REPEATER 0x0080
381 #define SWITCH_HALF_DUPLEX 0x0040
382 #define SWITCH_FLOW_CTRL 0x0020
383 #define SWITCH_10_MBIT 0x0010
384 #define SWITCH_REPLACE_NULL_VID 0x0008
385 #define BROADCAST_STORM_RATE_HI 0x0007
386
387 #define BROADCAST_STORM_RATE 0x07FF
388
389 /* SGCR4 */
390 #define KS8842_SGCR4_P 0x0408
391
392 /* SGCR5 */
393 #define KS8842_SGCR5_P 0x040A
394 #define KS8842_SWITCH_CTRL_5_OFFSET KS8842_SGCR5_P
395
396 #define LED_MODE 0x8200
397 #define LED_SPEED_DUPLEX_ACT 0x0000
398 #define LED_SPEED_DUPLEX_LINK_ACT 0x8000
399 #define LED_DUPLEX_10_100 0x0200
400
401 /* SGCR6 */
402 #define KS8842_SGCR6_P 0x0410
403 #define KS8842_SWITCH_CTRL_6_OFFSET KS8842_SGCR6_P
404
405 #define KS8842_PRIORITY_MASK 3
406 #define KS8842_PRIORITY_SHIFT 2
407
408 /* SGCR7 */
409 #define KS8842_SGCR7_P 0x0412
410 #define KS8842_SWITCH_CTRL_7_OFFSET KS8842_SGCR7_P
411
412 #define SWITCH_UNK_DEF_PORT_ENABLE 0x0008
413 #define SWITCH_UNK_DEF_PORT_3 0x0004
414 #define SWITCH_UNK_DEF_PORT_2 0x0002
415 #define SWITCH_UNK_DEF_PORT_1 0x0001
416
417 /* MACAR1 */
418 #define KS8842_MACAR1_P 0x0470
419 #define KS8842_MACAR2_P 0x0472
420 #define KS8842_MACAR3_P 0x0474
421 #define KS8842_MAC_ADDR_1_OFFSET KS8842_MACAR1_P
422 #define KS8842_MAC_ADDR_0_OFFSET (KS8842_MAC_ADDR_1_OFFSET + 1)
423 #define KS8842_MAC_ADDR_3_OFFSET KS8842_MACAR2_P
424 #define KS8842_MAC_ADDR_2_OFFSET (KS8842_MAC_ADDR_3_OFFSET + 1)
425 #define KS8842_MAC_ADDR_5_OFFSET KS8842_MACAR3_P
426 #define KS8842_MAC_ADDR_4_OFFSET (KS8842_MAC_ADDR_5_OFFSET + 1)
427
428 /* TOSR1 */
429 #define KS8842_TOSR1_P 0x0480
430 #define KS8842_TOSR2_P 0x0482
431 #define KS8842_TOSR3_P 0x0484
432 #define KS8842_TOSR4_P 0x0486
433 #define KS8842_TOSR5_P 0x0488
434 #define KS8842_TOSR6_P 0x048A
435 #define KS8842_TOSR7_P 0x0490
436 #define KS8842_TOSR8_P 0x0492
437 #define KS8842_TOS_1_OFFSET KS8842_TOSR1_P
438 #define KS8842_TOS_2_OFFSET KS8842_TOSR2_P
439 #define KS8842_TOS_3_OFFSET KS8842_TOSR3_P
440 #define KS8842_TOS_4_OFFSET KS8842_TOSR4_P
441 #define KS8842_TOS_5_OFFSET KS8842_TOSR5_P
442 #define KS8842_TOS_6_OFFSET KS8842_TOSR6_P
443
444 #define KS8842_TOS_7_OFFSET KS8842_TOSR7_P
445 #define KS8842_TOS_8_OFFSET KS8842_TOSR8_P
446
447 /* P1CR1 */
448 #define KS8842_P1CR1_P 0x0500
449 #define KS8842_P1CR2_P 0x0502
450 #define KS8842_P1VIDR_P 0x0504
451 #define KS8842_P1CR3_P 0x0506
452 #define KS8842_P1IRCR_P 0x0508
453 #define KS8842_P1ERCR_P 0x050A
454 #define KS884X_P1SCSLMD_P 0x0510
455 #define KS884X_P1CR4_P 0x0512
456 #define KS884X_P1SR_P 0x0514
457
458 /* P2CR1 */
459 #define KS8842_P2CR1_P 0x0520
460 #define KS8842_P2CR2_P 0x0522
461 #define KS8842_P2VIDR_P 0x0524
462 #define KS8842_P2CR3_P 0x0526
463 #define KS8842_P2IRCR_P 0x0528
464 #define KS8842_P2ERCR_P 0x052A
465 #define KS884X_P2SCSLMD_P 0x0530
466 #define KS884X_P2CR4_P 0x0532
467 #define KS884X_P2SR_P 0x0534
468
469 /* P3CR1 */
470 #define KS8842_P3CR1_P 0x0540
471 #define KS8842_P3CR2_P 0x0542
472 #define KS8842_P3VIDR_P 0x0544
473 #define KS8842_P3CR3_P 0x0546
474 #define KS8842_P3IRCR_P 0x0548
475 #define KS8842_P3ERCR_P 0x054A
476
477 #define KS8842_PORT_1_CTRL_1 KS8842_P1CR1_P
478 #define KS8842_PORT_2_CTRL_1 KS8842_P2CR1_P
479 #define KS8842_PORT_3_CTRL_1 KS8842_P3CR1_P
480
481 #define PORT_CTRL_ADDR(port, addr) \
482 (addr = KS8842_PORT_1_CTRL_1 + (port) * \
483 (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
484
485 #define KS8842_PORT_CTRL_1_OFFSET 0x00
486
487 #define PORT_BROADCAST_STORM 0x0080
488 #define PORT_DIFFSERV_ENABLE 0x0040
489 #define PORT_802_1P_ENABLE 0x0020
490 #define PORT_BASED_PRIORITY_MASK 0x0018
491 #define PORT_BASED_PRIORITY_BASE 0x0003
492 #define PORT_BASED_PRIORITY_SHIFT 3
493 #define PORT_BASED_PRIORITY_0 0x0000
494 #define PORT_BASED_PRIORITY_1 0x0008
495 #define PORT_BASED_PRIORITY_2 0x0010
496 #define PORT_BASED_PRIORITY_3 0x0018
497 #define PORT_INSERT_TAG 0x0004
498 #define PORT_REMOVE_TAG 0x0002
499 #define PORT_PRIO_QUEUE_ENABLE 0x0001
500
501 #define KS8842_PORT_CTRL_2_OFFSET 0x02
502
503 #define PORT_INGRESS_VLAN_FILTER 0x4000
504 #define PORT_DISCARD_NON_VID 0x2000
505 #define PORT_FORCE_FLOW_CTRL 0x1000
506 #define PORT_BACK_PRESSURE 0x0800
507 #define PORT_TX_ENABLE 0x0400
508 #define PORT_RX_ENABLE 0x0200
509 #define PORT_LEARN_DISABLE 0x0100
510 #define PORT_MIRROR_SNIFFER 0x0080
511 #define PORT_MIRROR_RX 0x0040
512 #define PORT_MIRROR_TX 0x0020
513 #define PORT_USER_PRIORITY_CEILING 0x0008
514 #define PORT_VLAN_MEMBERSHIP 0x0007
515
516 #define KS8842_PORT_CTRL_VID_OFFSET 0x04
517
518 #define PORT_DEFAULT_VID 0x0001
519
520 #define KS8842_PORT_CTRL_3_OFFSET 0x06
521
522 #define PORT_INGRESS_LIMIT_MODE 0x000C
523 #define PORT_INGRESS_ALL 0x0000
524 #define PORT_INGRESS_UNICAST 0x0004
525 #define PORT_INGRESS_MULTICAST 0x0008
526 #define PORT_INGRESS_BROADCAST 0x000C
527 #define PORT_COUNT_IFG 0x0002
528 #define PORT_COUNT_PREAMBLE 0x0001
529
530 #define KS8842_PORT_IN_RATE_OFFSET 0x08
531 #define KS8842_PORT_OUT_RATE_OFFSET 0x0A
532
533 #define PORT_PRIORITY_RATE 0x0F
534 #define PORT_PRIORITY_RATE_SHIFT 4
535
536 #define KS884X_PORT_LINK_MD 0x10
537
538 #define PORT_CABLE_10M_SHORT 0x8000
539 #define PORT_CABLE_DIAG_RESULT 0x6000
540 #define PORT_CABLE_STAT_NORMAL 0x0000
541 #define PORT_CABLE_STAT_OPEN 0x2000
542 #define PORT_CABLE_STAT_SHORT 0x4000
543 #define PORT_CABLE_STAT_FAILED 0x6000
544 #define PORT_START_CABLE_DIAG 0x1000
545 #define PORT_FORCE_LINK 0x0800
546 #define PORT_POWER_SAVING_DISABLE 0x0400
547 #define PORT_PHY_REMOTE_LOOPBACK 0x0200
548 #define PORT_CABLE_FAULT_COUNTER 0x01FF
549
550 #define KS884X_PORT_CTRL_4_OFFSET 0x12
551
552 #define PORT_LED_OFF 0x8000
553 #define PORT_TX_DISABLE 0x4000
554 #define PORT_AUTO_NEG_RESTART 0x2000
555 #define PORT_REMOTE_FAULT_DISABLE 0x1000
556 #define PORT_POWER_DOWN 0x0800
557 #define PORT_AUTO_MDIX_DISABLE 0x0400
558 #define PORT_FORCE_MDIX 0x0200
559 #define PORT_LOOPBACK 0x0100
560 #define PORT_AUTO_NEG_ENABLE 0x0080
561 #define PORT_FORCE_100_MBIT 0x0040
562 #define PORT_FORCE_FULL_DUPLEX 0x0020
563 #define PORT_AUTO_NEG_SYM_PAUSE 0x0010
564 #define PORT_AUTO_NEG_100BTX_FD 0x0008
565 #define PORT_AUTO_NEG_100BTX 0x0004
566 #define PORT_AUTO_NEG_10BT_FD 0x0002
567 #define PORT_AUTO_NEG_10BT 0x0001
568
569 #define KS884X_PORT_STATUS_OFFSET 0x14
570
571 #define PORT_HP_MDIX 0x8000
572 #define PORT_REVERSED_POLARITY 0x2000
573 #define PORT_RX_FLOW_CTRL 0x0800
574 #define PORT_TX_FLOW_CTRL 0x1000
575 #define PORT_STATUS_SPEED_100MBIT 0x0400
576 #define PORT_STATUS_FULL_DUPLEX 0x0200
577 #define PORT_REMOTE_FAULT 0x0100
578 #define PORT_MDIX_STATUS 0x0080
579 #define PORT_AUTO_NEG_COMPLETE 0x0040
580 #define PORT_STATUS_LINK_GOOD 0x0020
581 #define PORT_REMOTE_SYM_PAUSE 0x0010
582 #define PORT_REMOTE_100BTX_FD 0x0008
583 #define PORT_REMOTE_100BTX 0x0004
584 #define PORT_REMOTE_10BT_FD 0x0002
585 #define PORT_REMOTE_10BT 0x0001
586
587 /*
588 #define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
589 #define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000
590 #define STATIC_MAC_TABLE_VALID 00-00080000-00000000
591 #define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000
592 #define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000
593 #define STATIC_MAC_TABLE_FID 00-03C00000-00000000
594 */
595
596 #define STATIC_MAC_TABLE_ADDR 0x0000FFFF
597 #define STATIC_MAC_TABLE_FWD_PORTS 0x00070000
598 #define STATIC_MAC_TABLE_VALID 0x00080000
599 #define STATIC_MAC_TABLE_OVERRIDE 0x00100000
600 #define STATIC_MAC_TABLE_USE_FID 0x00200000
601 #define STATIC_MAC_TABLE_FID 0x03C00000
602
603 #define STATIC_MAC_FWD_PORTS_SHIFT 16
604 #define STATIC_MAC_FID_SHIFT 22
605
606 /*
607 #define VLAN_TABLE_VID 00-00000000-00000FFF
608 #define VLAN_TABLE_FID 00-00000000-0000F000
609 #define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000
610 #define VLAN_TABLE_VALID 00-00000000-00080000
611 */
612
613 #define VLAN_TABLE_VID 0x00000FFF
614 #define VLAN_TABLE_FID 0x0000F000
615 #define VLAN_TABLE_MEMBERSHIP 0x00070000
616 #define VLAN_TABLE_VALID 0x00080000
617
618 #define VLAN_TABLE_FID_SHIFT 12
619 #define VLAN_TABLE_MEMBERSHIP_SHIFT 16
620
621 /*
622 #define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
623 #define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000
624 #define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000
625 #define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000
626 #define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000
627 #define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000
628 #define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000
629 #define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000
630 */
631
632 #define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF
633 #define DYNAMIC_MAC_TABLE_FID 0x000F0000
634 #define DYNAMIC_MAC_TABLE_SRC_PORT 0x00300000
635 #define DYNAMIC_MAC_TABLE_TIMESTAMP 0x00C00000
636 #define DYNAMIC_MAC_TABLE_ENTRIES 0xFF000000
637
638 #define DYNAMIC_MAC_TABLE_ENTRIES_H 0x03
639 #define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x04
640 #define DYNAMIC_MAC_TABLE_RESERVED 0x78
641 #define DYNAMIC_MAC_TABLE_NOT_READY 0x80
642
643 #define DYNAMIC_MAC_FID_SHIFT 16
644 #define DYNAMIC_MAC_SRC_PORT_SHIFT 20
645 #define DYNAMIC_MAC_TIMESTAMP_SHIFT 22
646 #define DYNAMIC_MAC_ENTRIES_SHIFT 24
647 #define DYNAMIC_MAC_ENTRIES_H_SHIFT 8
648
649 /*
650 #define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
651 #define MIB_COUNTER_VALID 00-00000000-40000000
652 #define MIB_COUNTER_OVERFLOW 00-00000000-80000000
653 */
654
655 #define MIB_COUNTER_VALUE 0x3FFFFFFF
656 #define MIB_COUNTER_VALID 0x40000000
657 #define MIB_COUNTER_OVERFLOW 0x80000000
658
659 #define MIB_PACKET_DROPPED 0x0000FFFF
660
661 #define KS_MIB_PACKET_DROPPED_TX_0 0x100
662 #define KS_MIB_PACKET_DROPPED_TX_1 0x101
663 #define KS_MIB_PACKET_DROPPED_TX 0x102
664 #define KS_MIB_PACKET_DROPPED_RX_0 0x103
665 #define KS_MIB_PACKET_DROPPED_RX_1 0x104
666 #define KS_MIB_PACKET_DROPPED_RX 0x105
667
668 /* Change default LED mode. */
669 #define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
670
671 #define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i))
672
673 #define MAX_ETHERNET_BODY_SIZE 1500
674 #define ETHERNET_HEADER_SIZE (14 + VLAN_HLEN)
675
676 #define MAX_ETHERNET_PACKET_SIZE \
677 (MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
678
679 #define REGULAR_RX_BUF_SIZE (MAX_ETHERNET_PACKET_SIZE + 4)
680 #define MAX_RX_BUF_SIZE (1912 + 4)
681
682 #define ADDITIONAL_ENTRIES 16
683 #define MAX_MULTICAST_LIST 32
684
685 #define HW_MULTICAST_SIZE 8
686
687 #define HW_TO_DEV_PORT(port) (port - 1)
688
689 enum {
690 media_connected,
691 media_disconnected
692 };
693
694 enum {
695 OID_COUNTER_UNKOWN,
696
697 OID_COUNTER_FIRST,
698
699 /* total transmit errors */
700 OID_COUNTER_XMIT_ERROR,
701
702 /* total receive errors */
703 OID_COUNTER_RCV_ERROR,
704
705 OID_COUNTER_LAST
706 };
707
708 /*
709 * Hardware descriptor definitions
710 */
711
712 #define DESC_ALIGNMENT 16
713 #define BUFFER_ALIGNMENT 8
714
715 #define NUM_OF_RX_DESC 64
716 #define NUM_OF_TX_DESC 64
717
718 #define KS_DESC_RX_FRAME_LEN 0x000007FF
719 #define KS_DESC_RX_FRAME_TYPE 0x00008000
720 #define KS_DESC_RX_ERROR_CRC 0x00010000
721 #define KS_DESC_RX_ERROR_RUNT 0x00020000
722 #define KS_DESC_RX_ERROR_TOO_LONG 0x00040000
723 #define KS_DESC_RX_ERROR_PHY 0x00080000
724 #define KS884X_DESC_RX_PORT_MASK 0x00300000
725 #define KS_DESC_RX_MULTICAST 0x01000000
726 #define KS_DESC_RX_ERROR 0x02000000
727 #define KS_DESC_RX_ERROR_CSUM_UDP 0x04000000
728 #define KS_DESC_RX_ERROR_CSUM_TCP 0x08000000
729 #define KS_DESC_RX_ERROR_CSUM_IP 0x10000000
730 #define KS_DESC_RX_LAST 0x20000000
731 #define KS_DESC_RX_FIRST 0x40000000
732 #define KS_DESC_RX_ERROR_COND \
733 (KS_DESC_RX_ERROR_CRC | \
734 KS_DESC_RX_ERROR_RUNT | \
735 KS_DESC_RX_ERROR_PHY | \
736 KS_DESC_RX_ERROR_TOO_LONG)
737
738 #define KS_DESC_HW_OWNED 0x80000000
739
740 #define KS_DESC_BUF_SIZE 0x000007FF
741 #define KS884X_DESC_TX_PORT_MASK 0x00300000
742 #define KS_DESC_END_OF_RING 0x02000000
743 #define KS_DESC_TX_CSUM_GEN_UDP 0x04000000
744 #define KS_DESC_TX_CSUM_GEN_TCP 0x08000000
745 #define KS_DESC_TX_CSUM_GEN_IP 0x10000000
746 #define KS_DESC_TX_LAST 0x20000000
747 #define KS_DESC_TX_FIRST 0x40000000
748 #define KS_DESC_TX_INTERRUPT 0x80000000
749
750 #define KS_DESC_PORT_SHIFT 20
751
752 #define KS_DESC_RX_MASK (KS_DESC_BUF_SIZE)
753
754 #define KS_DESC_TX_MASK \
755 (KS_DESC_TX_INTERRUPT | \
756 KS_DESC_TX_FIRST | \
757 KS_DESC_TX_LAST | \
758 KS_DESC_TX_CSUM_GEN_IP | \
759 KS_DESC_TX_CSUM_GEN_TCP | \
760 KS_DESC_TX_CSUM_GEN_UDP | \
761 KS_DESC_BUF_SIZE)
762
763 struct ksz_desc_rx_stat {
764 #ifdef __BIG_ENDIAN_BITFIELD
765 u32 hw_owned:1;
766 u32 first_desc:1;
767 u32 last_desc:1;
768 u32 csum_err_ip:1;
769 u32 csum_err_tcp:1;
770 u32 csum_err_udp:1;
771 u32 error:1;
772 u32 multicast:1;
773 u32 src_port:4;
774 u32 err_phy:1;
775 u32 err_too_long:1;
776 u32 err_runt:1;
777 u32 err_crc:1;
778 u32 frame_type:1;
779 u32 reserved1:4;
780 u32 frame_len:11;
781 #else
782 u32 frame_len:11;
783 u32 reserved1:4;
784 u32 frame_type:1;
785 u32 err_crc:1;
786 u32 err_runt:1;
787 u32 err_too_long:1;
788 u32 err_phy:1;
789 u32 src_port:4;
790 u32 multicast:1;
791 u32 error:1;
792 u32 csum_err_udp:1;
793 u32 csum_err_tcp:1;
794 u32 csum_err_ip:1;
795 u32 last_desc:1;
796 u32 first_desc:1;
797 u32 hw_owned:1;
798 #endif
799 };
800
801 struct ksz_desc_tx_stat {
802 #ifdef __BIG_ENDIAN_BITFIELD
803 u32 hw_owned:1;
804 u32 reserved1:31;
805 #else
806 u32 reserved1:31;
807 u32 hw_owned:1;
808 #endif
809 };
810
811 struct ksz_desc_rx_buf {
812 #ifdef __BIG_ENDIAN_BITFIELD
813 u32 reserved4:6;
814 u32 end_of_ring:1;
815 u32 reserved3:14;
816 u32 buf_size:11;
817 #else
818 u32 buf_size:11;
819 u32 reserved3:14;
820 u32 end_of_ring:1;
821 u32 reserved4:6;
822 #endif
823 };
824
825 struct ksz_desc_tx_buf {
826 #ifdef __BIG_ENDIAN_BITFIELD
827 u32 intr:1;
828 u32 first_seg:1;
829 u32 last_seg:1;
830 u32 csum_gen_ip:1;
831 u32 csum_gen_tcp:1;
832 u32 csum_gen_udp:1;
833 u32 end_of_ring:1;
834 u32 reserved4:1;
835 u32 dest_port:4;
836 u32 reserved3:9;
837 u32 buf_size:11;
838 #else
839 u32 buf_size:11;
840 u32 reserved3:9;
841 u32 dest_port:4;
842 u32 reserved4:1;
843 u32 end_of_ring:1;
844 u32 csum_gen_udp:1;
845 u32 csum_gen_tcp:1;
846 u32 csum_gen_ip:1;
847 u32 last_seg:1;
848 u32 first_seg:1;
849 u32 intr:1;
850 #endif
851 };
852
853 union desc_stat {
854 struct ksz_desc_rx_stat rx;
855 struct ksz_desc_tx_stat tx;
856 u32 data;
857 };
858
859 union desc_buf {
860 struct ksz_desc_rx_buf rx;
861 struct ksz_desc_tx_buf tx;
862 u32 data;
863 };
864
865 /**
866 * struct ksz_hw_desc - Hardware descriptor data structure
867 * @ctrl: Descriptor control value.
868 * @buf: Descriptor buffer value.
869 * @addr: Physical address of memory buffer.
870 * @next: Pointer to next hardware descriptor.
871 */
872 struct ksz_hw_desc {
873 union desc_stat ctrl;
874 union desc_buf buf;
875 u32 addr;
876 u32 next;
877 };
878
879 /**
880 * struct ksz_sw_desc - Software descriptor data structure
881 * @ctrl: Descriptor control value.
882 * @buf: Descriptor buffer value.
883 * @buf_size: Current buffers size value in hardware descriptor.
884 */
885 struct ksz_sw_desc {
886 union desc_stat ctrl;
887 union desc_buf buf;
888 u32 buf_size;
889 };
890
891 /**
892 * struct ksz_dma_buf - OS dependent DMA buffer data structure
893 * @skb: Associated socket buffer.
894 * @dma: Associated physical DMA address.
895 * @len: Actual len used.
896 */
897 struct ksz_dma_buf {
898 struct sk_buff *skb;
899 dma_addr_t dma;
900 int len;
901 };
902
903 /**
904 * struct ksz_desc - Descriptor structure
905 * @phw: Hardware descriptor pointer to uncached physical memory.
906 * @sw: Cached memory to hold hardware descriptor values for
907 * manipulation.
908 * @dma_buf: Operating system dependent data structure to hold physical
909 * memory buffer allocation information.
910 */
911 struct ksz_desc {
912 struct ksz_hw_desc *phw;
913 struct ksz_sw_desc sw;
914 struct ksz_dma_buf dma_buf;
915 };
916
917 #define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf))
918
919 /**
920 * struct ksz_desc_info - Descriptor information data structure
921 * @ring: First descriptor in the ring.
922 * @cur: Current descriptor being manipulated.
923 * @ring_virt: First hardware descriptor in the ring.
924 * @ring_phys: The physical address of the first descriptor of the ring.
925 * @size: Size of hardware descriptor.
926 * @alloc: Number of descriptors allocated.
927 * @avail: Number of descriptors available for use.
928 * @last: Index for last descriptor released to hardware.
929 * @next: Index for next descriptor available for use.
930 * @mask: Mask for index wrapping.
931 */
932 struct ksz_desc_info {
933 struct ksz_desc *ring;
934 struct ksz_desc *cur;
935 struct ksz_hw_desc *ring_virt;
936 u32 ring_phys;
937 int size;
938 int alloc;
939 int avail;
940 int last;
941 int next;
942 int mask;
943 };
944
945 /*
946 * KSZ8842 switch definitions
947 */
948
949 enum {
950 TABLE_STATIC_MAC = 0,
951 TABLE_VLAN,
952 TABLE_DYNAMIC_MAC,
953 TABLE_MIB
954 };
955
956 #define LEARNED_MAC_TABLE_ENTRIES 1024
957 #define STATIC_MAC_TABLE_ENTRIES 8
958
959 /**
960 * struct ksz_mac_table - Static MAC table data structure
961 * @mac_addr: MAC address to filter.
962 * @vid: VID value.
963 * @fid: FID value.
964 * @ports: Port membership.
965 * @override: Override setting.
966 * @use_fid: FID use setting.
967 * @valid: Valid setting indicating the entry is being used.
968 */
969 struct ksz_mac_table {
970 u8 mac_addr[ETH_ALEN];
971 u16 vid;
972 u8 fid;
973 u8 ports;
974 u8 override:1;
975 u8 use_fid:1;
976 u8 valid:1;
977 };
978
979 #define VLAN_TABLE_ENTRIES 16
980
981 /**
982 * struct ksz_vlan_table - VLAN table data structure
983 * @vid: VID value.
984 * @fid: FID value.
985 * @member: Port membership.
986 */
987 struct ksz_vlan_table {
988 u16 vid;
989 u8 fid;
990 u8 member;
991 };
992
993 #define DIFFSERV_ENTRIES 64
994 #define PRIO_802_1P_ENTRIES 8
995 #define PRIO_QUEUES 4
996
997 #define SWITCH_PORT_NUM 2
998 #define TOTAL_PORT_NUM (SWITCH_PORT_NUM + 1)
999 #define HOST_MASK (1 << SWITCH_PORT_NUM)
1000 #define PORT_MASK 7
1001
1002 #define MAIN_PORT 0
1003 #define OTHER_PORT 1
1004 #define HOST_PORT SWITCH_PORT_NUM
1005
1006 #define PORT_COUNTER_NUM 0x20
1007 #define TOTAL_PORT_COUNTER_NUM (PORT_COUNTER_NUM + 2)
1008
1009 #define MIB_COUNTER_RX_LO_PRIORITY 0x00
1010 #define MIB_COUNTER_RX_HI_PRIORITY 0x01
1011 #define MIB_COUNTER_RX_UNDERSIZE 0x02
1012 #define MIB_COUNTER_RX_FRAGMENT 0x03
1013 #define MIB_COUNTER_RX_OVERSIZE 0x04
1014 #define MIB_COUNTER_RX_JABBER 0x05
1015 #define MIB_COUNTER_RX_SYMBOL_ERR 0x06
1016 #define MIB_COUNTER_RX_CRC_ERR 0x07
1017 #define MIB_COUNTER_RX_ALIGNMENT_ERR 0x08
1018 #define MIB_COUNTER_RX_CTRL_8808 0x09
1019 #define MIB_COUNTER_RX_PAUSE 0x0A
1020 #define MIB_COUNTER_RX_BROADCAST 0x0B
1021 #define MIB_COUNTER_RX_MULTICAST 0x0C
1022 #define MIB_COUNTER_RX_UNICAST 0x0D
1023 #define MIB_COUNTER_RX_OCTET_64 0x0E
1024 #define MIB_COUNTER_RX_OCTET_65_127 0x0F
1025 #define MIB_COUNTER_RX_OCTET_128_255 0x10
1026 #define MIB_COUNTER_RX_OCTET_256_511 0x11
1027 #define MIB_COUNTER_RX_OCTET_512_1023 0x12
1028 #define MIB_COUNTER_RX_OCTET_1024_1522 0x13
1029 #define MIB_COUNTER_TX_LO_PRIORITY 0x14
1030 #define MIB_COUNTER_TX_HI_PRIORITY 0x15
1031 #define MIB_COUNTER_TX_LATE_COLLISION 0x16
1032 #define MIB_COUNTER_TX_PAUSE 0x17
1033 #define MIB_COUNTER_TX_BROADCAST 0x18
1034 #define MIB_COUNTER_TX_MULTICAST 0x19
1035 #define MIB_COUNTER_TX_UNICAST 0x1A
1036 #define MIB_COUNTER_TX_DEFERRED 0x1B
1037 #define MIB_COUNTER_TX_TOTAL_COLLISION 0x1C
1038 #define MIB_COUNTER_TX_EXCESS_COLLISION 0x1D
1039 #define MIB_COUNTER_TX_SINGLE_COLLISION 0x1E
1040 #define MIB_COUNTER_TX_MULTI_COLLISION 0x1F
1041
1042 #define MIB_COUNTER_RX_DROPPED_PACKET 0x20
1043 #define MIB_COUNTER_TX_DROPPED_PACKET 0x21
1044
1045 /**
1046 * struct ksz_port_mib - Port MIB data structure
1047 * @cnt_ptr: Current pointer to MIB counter index.
1048 * @link_down: Indication the link has just gone down.
1049 * @state: Connection status of the port.
1050 * @mib_start: The starting counter index. Some ports do not start at 0.
1051 * @counter: 64-bit MIB counter value.
1052 * @dropped: Temporary buffer to remember last read packet dropped values.
1053 *
1054 * MIB counters needs to be read periodically so that counters do not get
1055 * overflowed and give incorrect values. A right balance is needed to
1056 * satisfy this condition and not waste too much CPU time.
1057 *
1058 * It is pointless to read MIB counters when the port is disconnected. The
1059 * @state provides the connection status so that MIB counters are read only
1060 * when the port is connected. The @link_down indicates the port is just
1061 * disconnected so that all MIB counters are read one last time to update the
1062 * information.
1063 */
1064 struct ksz_port_mib {
1065 u8 cnt_ptr;
1066 u8 link_down;
1067 u8 state;
1068 u8 mib_start;
1069
1070 u64 counter[TOTAL_PORT_COUNTER_NUM];
1071 u32 dropped[2];
1072 };
1073
1074 /**
1075 * struct ksz_port_cfg - Port configuration data structure
1076 * @vid: VID value.
1077 * @member: Port membership.
1078 * @port_prio: Port priority.
1079 * @rx_rate: Receive priority rate.
1080 * @tx_rate: Transmit priority rate.
1081 * @stp_state: Current Spanning Tree Protocol state.
1082 */
1083 struct ksz_port_cfg {
1084 u16 vid;
1085 u8 member;
1086 u8 port_prio;
1087 u32 rx_rate[PRIO_QUEUES];
1088 u32 tx_rate[PRIO_QUEUES];
1089 int stp_state;
1090 };
1091
1092 /**
1093 * struct ksz_switch - KSZ8842 switch data structure
1094 * @mac_table: MAC table entries information.
1095 * @vlan_table: VLAN table entries information.
1096 * @port_cfg: Port configuration information.
1097 * @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS
1098 * (bit7 ~ bit2) field.
1099 * @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p
1100 * Tag priority field.
1101 * @br_addr: Bridge address. Used for STP.
1102 * @other_addr: Other MAC address. Used for multiple network device mode.
1103 * @broad_per: Broadcast storm percentage.
1104 * @member: Current port membership. Used for STP.
1105 */
1106 struct ksz_switch {
1107 struct ksz_mac_table mac_table[STATIC_MAC_TABLE_ENTRIES];
1108 struct ksz_vlan_table vlan_table[VLAN_TABLE_ENTRIES];
1109 struct ksz_port_cfg port_cfg[TOTAL_PORT_NUM];
1110
1111 u8 diffserv[DIFFSERV_ENTRIES];
1112 u8 p_802_1p[PRIO_802_1P_ENTRIES];
1113
1114 u8 br_addr[ETH_ALEN];
1115 u8 other_addr[ETH_ALEN];
1116
1117 u8 broad_per;
1118 u8 member;
1119 };
1120
1121 #define TX_RATE_UNIT 10000
1122
1123 /**
1124 * struct ksz_port_info - Port information data structure
1125 * @state: Connection status of the port.
1126 * @tx_rate: Transmit rate divided by 10000 to get Mbit.
1127 * @duplex: Duplex mode.
1128 * @advertised: Advertised auto-negotiation setting. Used to determine link.
1129 * @partner: Auto-negotiation partner setting. Used to determine link.
1130 * @port_id: Port index to access actual hardware register.
1131 * @pdev: Pointer to OS dependent network device.
1132 */
1133 struct ksz_port_info {
1134 uint state;
1135 uint tx_rate;
1136 u8 duplex;
1137 u8 advertised;
1138 u8 partner;
1139 u8 port_id;
1140 void *pdev;
1141 };
1142
1143 #define MAX_TX_HELD_SIZE 52000
1144
1145 /* Hardware features and bug fixes. */
1146 #define LINK_INT_WORKING (1 << 0)
1147 #define SMALL_PACKET_TX_BUG (1 << 1)
1148 #define HALF_DUPLEX_SIGNAL_BUG (1 << 2)
1149 #define RX_HUGE_FRAME (1 << 4)
1150 #define STP_SUPPORT (1 << 8)
1151
1152 /* Software overrides. */
1153 #define PAUSE_FLOW_CTRL (1 << 0)
1154 #define FAST_AGING (1 << 1)
1155
1156 /**
1157 * struct ksz_hw - KSZ884X hardware data structure
1158 * @io: Virtual address assigned.
1159 * @ksz_switch: Pointer to KSZ8842 switch.
1160 * @port_info: Port information.
1161 * @port_mib: Port MIB information.
1162 * @dev_count: Number of network devices this hardware supports.
1163 * @dst_ports: Destination ports in switch for transmission.
1164 * @id: Hardware ID. Used for display only.
1165 * @mib_cnt: Number of MIB counters this hardware has.
1166 * @mib_port_cnt: Number of ports with MIB counters.
1167 * @tx_cfg: Cached transmit control settings.
1168 * @rx_cfg: Cached receive control settings.
1169 * @intr_mask: Current interrupt mask.
1170 * @intr_set: Current interrupt set.
1171 * @intr_blocked: Interrupt blocked.
1172 * @rx_desc_info: Receive descriptor information.
1173 * @tx_desc_info: Transmit descriptor information.
1174 * @tx_int_cnt: Transmit interrupt count. Used for TX optimization.
1175 * @tx_int_mask: Transmit interrupt mask. Used for TX optimization.
1176 * @tx_size: Transmit data size. Used for TX optimization.
1177 * The maximum is defined by MAX_TX_HELD_SIZE.
1178 * @perm_addr: Permanent MAC address.
1179 * @override_addr: Overridden MAC address.
1180 * @address: Additional MAC address entries.
1181 * @addr_list_size: Additional MAC address list size.
1182 * @mac_override: Indication of MAC address overridden.
1183 * @promiscuous: Counter to keep track of promiscuous mode set.
1184 * @all_multi: Counter to keep track of all multicast mode set.
1185 * @multi_list: Multicast address entries.
1186 * @multi_bits: Cached multicast hash table settings.
1187 * @multi_list_size: Multicast address list size.
1188 * @enabled: Indication of hardware enabled.
1189 * @rx_stop: Indication of receive process stop.
1190 * @reserved2: none
1191 * @features: Hardware features to enable.
1192 * @overrides: Hardware features to override.
1193 * @parent: Pointer to parent, network device private structure.
1194 */
1195 struct ksz_hw {
1196 void __iomem *io;
1197
1198 struct ksz_switch *ksz_switch;
1199 struct ksz_port_info port_info[SWITCH_PORT_NUM];
1200 struct ksz_port_mib port_mib[TOTAL_PORT_NUM];
1201 int dev_count;
1202 int dst_ports;
1203 int id;
1204 int mib_cnt;
1205 int mib_port_cnt;
1206
1207 u32 tx_cfg;
1208 u32 rx_cfg;
1209 u32 intr_mask;
1210 u32 intr_set;
1211 uint intr_blocked;
1212
1213 struct ksz_desc_info rx_desc_info;
1214 struct ksz_desc_info tx_desc_info;
1215
1216 int tx_int_cnt;
1217 int tx_int_mask;
1218 int tx_size;
1219
1220 u8 perm_addr[ETH_ALEN];
1221 u8 override_addr[ETH_ALEN];
1222 u8 address[ADDITIONAL_ENTRIES][ETH_ALEN];
1223 u8 addr_list_size;
1224 u8 mac_override;
1225 u8 promiscuous;
1226 u8 all_multi;
1227 u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN];
1228 u8 multi_bits[HW_MULTICAST_SIZE];
1229 u8 multi_list_size;
1230
1231 u8 enabled;
1232 u8 rx_stop;
1233 u8 reserved2[1];
1234
1235 uint features;
1236 uint overrides;
1237
1238 void *parent;
1239 };
1240
1241 enum {
1242 PHY_NO_FLOW_CTRL,
1243 PHY_FLOW_CTRL,
1244 PHY_TX_ONLY,
1245 PHY_RX_ONLY
1246 };
1247
1248 /**
1249 * struct ksz_port - Virtual port data structure
1250 * @duplex: Duplex mode setting. 1 for half duplex, 2 for full
1251 * duplex, and 0 for auto, which normally results in full
1252 * duplex.
1253 * @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and
1254 * 0 for auto, which normally results in 100 Mbit.
1255 * @force_link: Force link setting. 0 for auto-negotiation, and 1 for
1256 * force.
1257 * @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow
1258 * control, and PHY_FLOW_CTRL for flow control.
1259 * PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100
1260 * Mbit PHY.
1261 * @first_port: Index of first port this port supports.
1262 * @mib_port_cnt: Number of ports with MIB counters.
1263 * @port_cnt: Number of ports this port supports.
1264 * @counter: Port statistics counter.
1265 * @hw: Pointer to hardware structure.
1266 * @linked: Pointer to port information linked to this port.
1267 */
1268 struct ksz_port {
1269 u8 duplex;
1270 u8 speed;
1271 u8 force_link;
1272 u8 flow_ctrl;
1273
1274 int first_port;
1275 int mib_port_cnt;
1276 int port_cnt;
1277 u64 counter[OID_COUNTER_LAST];
1278
1279 struct ksz_hw *hw;
1280 struct ksz_port_info *linked;
1281 };
1282
1283 /**
1284 * struct ksz_timer_info - Timer information data structure
1285 * @timer: Kernel timer.
1286 * @cnt: Running timer counter.
1287 * @max: Number of times to run timer; -1 for infinity.
1288 * @period: Timer period in jiffies.
1289 */
1290 struct ksz_timer_info {
1291 struct timer_list timer;
1292 int cnt;
1293 int max;
1294 int period;
1295 };
1296
1297 /**
1298 * struct ksz_shared_mem - OS dependent shared memory data structure
1299 * @dma_addr: Physical DMA address allocated.
1300 * @alloc_size: Allocation size.
1301 * @phys: Actual physical address used.
1302 * @alloc_virt: Virtual address allocated.
1303 * @virt: Actual virtual address used.
1304 */
1305 struct ksz_shared_mem {
1306 dma_addr_t dma_addr;
1307 uint alloc_size;
1308 uint phys;
1309 u8 *alloc_virt;
1310 u8 *virt;
1311 };
1312
1313 /**
1314 * struct ksz_counter_info - OS dependent counter information data structure
1315 * @counter: Wait queue to wakeup after counters are read.
1316 * @time: Next time in jiffies to read counter.
1317 * @read: Indication of counters read in full or not.
1318 */
1319 struct ksz_counter_info {
1320 wait_queue_head_t counter;
1321 unsigned long time;
1322 int read;
1323 };
1324
1325 /**
1326 * struct dev_info - Network device information data structure
1327 * @dev: Pointer to network device.
1328 * @pdev: Pointer to PCI device.
1329 * @hw: Hardware structure.
1330 * @desc_pool: Physical memory used for descriptor pool.
1331 * @hwlock: Spinlock to prevent hardware from accessing.
1332 * @lock: Mutex lock to prevent device from accessing.
1333 * @dev_rcv: Receive process function used.
1334 * @last_skb: Socket buffer allocated for descriptor rx fragments.
1335 * @skb_index: Buffer index for receiving fragments.
1336 * @skb_len: Buffer length for receiving fragments.
1337 * @mib_read: Workqueue to read MIB counters.
1338 * @mib_timer_info: Timer to read MIB counters.
1339 * @counter: Used for MIB reading.
1340 * @mtu: Current MTU used. The default is REGULAR_RX_BUF_SIZE;
1341 * the maximum is MAX_RX_BUF_SIZE.
1342 * @opened: Counter to keep track of device open.
1343 * @rx_tasklet: Receive processing tasklet.
1344 * @tx_tasklet: Transmit processing tasklet.
1345 * @wol_enable: Wake-on-LAN enable set by ethtool.
1346 * @wol_support: Wake-on-LAN support used by ethtool.
1347 * @pme_wait: Used for KSZ8841 power management.
1348 */
1349 struct dev_info {
1350 struct net_device *dev;
1351 struct pci_dev *pdev;
1352
1353 struct ksz_hw hw;
1354 struct ksz_shared_mem desc_pool;
1355
1356 spinlock_t hwlock;
1357 struct mutex lock;
1358
1359 int (*dev_rcv)(struct dev_info *);
1360
1361 struct sk_buff *last_skb;
1362 int skb_index;
1363 int skb_len;
1364
1365 struct work_struct mib_read;
1366 struct ksz_timer_info mib_timer_info;
1367 struct ksz_counter_info counter[TOTAL_PORT_NUM];
1368
1369 int mtu;
1370 int opened;
1371
1372 struct tasklet_struct rx_tasklet;
1373 struct tasklet_struct tx_tasklet;
1374
1375 int wol_enable;
1376 int wol_support;
1377 unsigned long pme_wait;
1378 };
1379
1380 /**
1381 * struct dev_priv - Network device private data structure
1382 * @adapter: Adapter device information.
1383 * @port: Port information.
1384 * @monitor_timer_info: Timer to monitor ports.
1385 * @proc_sem: Semaphore for proc accessing.
1386 * @id: Device ID.
1387 * @mii_if: MII interface information.
1388 * @advertising: Temporary variable to store advertised settings.
1389 * @msg_enable: The message flags controlling driver output.
1390 * @media_state: The connection status of the device.
1391 * @multicast: The all multicast state of the device.
1392 * @promiscuous: The promiscuous state of the device.
1393 */
1394 struct dev_priv {
1395 struct dev_info *adapter;
1396 struct ksz_port port;
1397 struct ksz_timer_info monitor_timer_info;
1398
1399 struct semaphore proc_sem;
1400 int id;
1401
1402 struct mii_if_info mii_if;
1403 u32 advertising;
1404
1405 u32 msg_enable;
1406 int media_state;
1407 int multicast;
1408 int promiscuous;
1409 };
1410
1411 #define DRV_NAME "KSZ884X PCI"
1412 #define DEVICE_NAME "KSZ884x PCI"
1413 #define DRV_VERSION "1.0.0"
1414 #define DRV_RELDATE "Feb 8, 2010"
1415
1416 static char version[] =
1417 "Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")";
1418
1419 static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 };
1420
1421 /*
1422 * Interrupt processing primary routines
1423 */
1424
hw_ack_intr(struct ksz_hw * hw,uint interrupt)1425 static inline void hw_ack_intr(struct ksz_hw *hw, uint interrupt)
1426 {
1427 writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS);
1428 }
1429
hw_dis_intr(struct ksz_hw * hw)1430 static inline void hw_dis_intr(struct ksz_hw *hw)
1431 {
1432 hw->intr_blocked = hw->intr_mask;
1433 writel(0, hw->io + KS884X_INTERRUPTS_ENABLE);
1434 hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
1435 }
1436
hw_set_intr(struct ksz_hw * hw,uint interrupt)1437 static inline void hw_set_intr(struct ksz_hw *hw, uint interrupt)
1438 {
1439 hw->intr_set = interrupt;
1440 writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE);
1441 }
1442
hw_ena_intr(struct ksz_hw * hw)1443 static inline void hw_ena_intr(struct ksz_hw *hw)
1444 {
1445 hw->intr_blocked = 0;
1446 hw_set_intr(hw, hw->intr_mask);
1447 }
1448
hw_dis_intr_bit(struct ksz_hw * hw,uint bit)1449 static inline void hw_dis_intr_bit(struct ksz_hw *hw, uint bit)
1450 {
1451 hw->intr_mask &= ~(bit);
1452 }
1453
hw_turn_off_intr(struct ksz_hw * hw,uint interrupt)1454 static inline void hw_turn_off_intr(struct ksz_hw *hw, uint interrupt)
1455 {
1456 u32 read_intr;
1457
1458 read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
1459 hw->intr_set = read_intr & ~interrupt;
1460 writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
1461 hw_dis_intr_bit(hw, interrupt);
1462 }
1463
1464 /**
1465 * hw_turn_on_intr - turn on specified interrupts
1466 * @hw: The hardware instance.
1467 * @bit: The interrupt bits to be on.
1468 *
1469 * This routine turns on the specified interrupts in the interrupt mask so that
1470 * those interrupts will be enabled.
1471 */
hw_turn_on_intr(struct ksz_hw * hw,u32 bit)1472 static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit)
1473 {
1474 hw->intr_mask |= bit;
1475
1476 if (!hw->intr_blocked)
1477 hw_set_intr(hw, hw->intr_mask);
1478 }
1479
hw_read_intr(struct ksz_hw * hw,uint * status)1480 static inline void hw_read_intr(struct ksz_hw *hw, uint *status)
1481 {
1482 *status = readl(hw->io + KS884X_INTERRUPTS_STATUS);
1483 *status = *status & hw->intr_set;
1484 }
1485
hw_restore_intr(struct ksz_hw * hw,uint interrupt)1486 static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt)
1487 {
1488 if (interrupt)
1489 hw_ena_intr(hw);
1490 }
1491
1492 /**
1493 * hw_block_intr - block hardware interrupts
1494 * @hw: The hardware instance.
1495 *
1496 * This function blocks all interrupts of the hardware and returns the current
1497 * interrupt enable mask so that interrupts can be restored later.
1498 *
1499 * Return the current interrupt enable mask.
1500 */
hw_block_intr(struct ksz_hw * hw)1501 static uint hw_block_intr(struct ksz_hw *hw)
1502 {
1503 uint interrupt = 0;
1504
1505 if (!hw->intr_blocked) {
1506 hw_dis_intr(hw);
1507 interrupt = hw->intr_blocked;
1508 }
1509 return interrupt;
1510 }
1511
1512 /*
1513 * Hardware descriptor routines
1514 */
1515
reset_desc(struct ksz_desc * desc,union desc_stat status)1516 static inline void reset_desc(struct ksz_desc *desc, union desc_stat status)
1517 {
1518 status.rx.hw_owned = 0;
1519 desc->phw->ctrl.data = cpu_to_le32(status.data);
1520 }
1521
release_desc(struct ksz_desc * desc)1522 static inline void release_desc(struct ksz_desc *desc)
1523 {
1524 desc->sw.ctrl.tx.hw_owned = 1;
1525 if (desc->sw.buf_size != desc->sw.buf.data) {
1526 desc->sw.buf_size = desc->sw.buf.data;
1527 desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data);
1528 }
1529 desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data);
1530 }
1531
get_rx_pkt(struct ksz_desc_info * info,struct ksz_desc ** desc)1532 static void get_rx_pkt(struct ksz_desc_info *info, struct ksz_desc **desc)
1533 {
1534 *desc = &info->ring[info->last];
1535 info->last++;
1536 info->last &= info->mask;
1537 info->avail--;
1538 (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK;
1539 }
1540
set_rx_buf(struct ksz_desc * desc,u32 addr)1541 static inline void set_rx_buf(struct ksz_desc *desc, u32 addr)
1542 {
1543 desc->phw->addr = cpu_to_le32(addr);
1544 }
1545
set_rx_len(struct ksz_desc * desc,u32 len)1546 static inline void set_rx_len(struct ksz_desc *desc, u32 len)
1547 {
1548 desc->sw.buf.rx.buf_size = len;
1549 }
1550
get_tx_pkt(struct ksz_desc_info * info,struct ksz_desc ** desc)1551 static inline void get_tx_pkt(struct ksz_desc_info *info,
1552 struct ksz_desc **desc)
1553 {
1554 *desc = &info->ring[info->next];
1555 info->next++;
1556 info->next &= info->mask;
1557 info->avail--;
1558 (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK;
1559 }
1560
set_tx_buf(struct ksz_desc * desc,u32 addr)1561 static inline void set_tx_buf(struct ksz_desc *desc, u32 addr)
1562 {
1563 desc->phw->addr = cpu_to_le32(addr);
1564 }
1565
set_tx_len(struct ksz_desc * desc,u32 len)1566 static inline void set_tx_len(struct ksz_desc *desc, u32 len)
1567 {
1568 desc->sw.buf.tx.buf_size = len;
1569 }
1570
1571 /* Switch functions */
1572
1573 #define TABLE_READ 0x10
1574 #define TABLE_SEL_SHIFT 2
1575
1576 #define HW_DELAY(hw, reg) \
1577 do { \
1578 readw(hw->io + reg); \
1579 } while (0)
1580
1581 /**
1582 * sw_r_table - read 4 bytes of data from switch table
1583 * @hw: The hardware instance.
1584 * @table: The table selector.
1585 * @addr: The address of the table entry.
1586 * @data: Buffer to store the read data.
1587 *
1588 * This routine reads 4 bytes of data from the table of the switch.
1589 * Hardware interrupts are disabled to minimize corruption of read data.
1590 */
sw_r_table(struct ksz_hw * hw,int table,u16 addr,u32 * data)1591 static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data)
1592 {
1593 u16 ctrl_addr;
1594 uint interrupt;
1595
1596 ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr;
1597
1598 interrupt = hw_block_intr(hw);
1599
1600 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
1601 HW_DELAY(hw, KS884X_IACR_OFFSET);
1602 *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
1603
1604 hw_restore_intr(hw, interrupt);
1605 }
1606
1607 /**
1608 * sw_w_table_64 - write 8 bytes of data to the switch table
1609 * @hw: The hardware instance.
1610 * @table: The table selector.
1611 * @addr: The address of the table entry.
1612 * @data_hi: The high part of data to be written (bit63 ~ bit32).
1613 * @data_lo: The low part of data to be written (bit31 ~ bit0).
1614 *
1615 * This routine writes 8 bytes of data to the table of the switch.
1616 * Hardware interrupts are disabled to minimize corruption of written data.
1617 */
sw_w_table_64(struct ksz_hw * hw,int table,u16 addr,u32 data_hi,u32 data_lo)1618 static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi,
1619 u32 data_lo)
1620 {
1621 u16 ctrl_addr;
1622 uint interrupt;
1623
1624 ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr;
1625
1626 interrupt = hw_block_intr(hw);
1627
1628 writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET);
1629 writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET);
1630
1631 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
1632 HW_DELAY(hw, KS884X_IACR_OFFSET);
1633
1634 hw_restore_intr(hw, interrupt);
1635 }
1636
1637 /**
1638 * sw_w_sta_mac_table - write to the static MAC table
1639 * @hw: The hardware instance.
1640 * @addr: The address of the table entry.
1641 * @mac_addr: The MAC address.
1642 * @ports: The port members.
1643 * @override: The flag to override the port receive/transmit settings.
1644 * @valid: The flag to indicate entry is valid.
1645 * @use_fid: The flag to indicate the FID is valid.
1646 * @fid: The FID value.
1647 *
1648 * This routine writes an entry of the static MAC table of the switch. It
1649 * calls sw_w_table_64() to write the data.
1650 */
sw_w_sta_mac_table(struct ksz_hw * hw,u16 addr,u8 * mac_addr,u8 ports,int override,int valid,int use_fid,u8 fid)1651 static void sw_w_sta_mac_table(struct ksz_hw *hw, u16 addr, u8 *mac_addr,
1652 u8 ports, int override, int valid, int use_fid, u8 fid)
1653 {
1654 u32 data_hi;
1655 u32 data_lo;
1656
1657 data_lo = ((u32) mac_addr[2] << 24) |
1658 ((u32) mac_addr[3] << 16) |
1659 ((u32) mac_addr[4] << 8) | mac_addr[5];
1660 data_hi = ((u32) mac_addr[0] << 8) | mac_addr[1];
1661 data_hi |= (u32) ports << STATIC_MAC_FWD_PORTS_SHIFT;
1662
1663 if (override)
1664 data_hi |= STATIC_MAC_TABLE_OVERRIDE;
1665 if (use_fid) {
1666 data_hi |= STATIC_MAC_TABLE_USE_FID;
1667 data_hi |= (u32) fid << STATIC_MAC_FID_SHIFT;
1668 }
1669 if (valid)
1670 data_hi |= STATIC_MAC_TABLE_VALID;
1671
1672 sw_w_table_64(hw, TABLE_STATIC_MAC, addr, data_hi, data_lo);
1673 }
1674
1675 /**
1676 * sw_r_vlan_table - read from the VLAN table
1677 * @hw: The hardware instance.
1678 * @addr: The address of the table entry.
1679 * @vid: Buffer to store the VID.
1680 * @fid: Buffer to store the VID.
1681 * @member: Buffer to store the port membership.
1682 *
1683 * This function reads an entry of the VLAN table of the switch. It calls
1684 * sw_r_table() to get the data.
1685 *
1686 * Return 0 if the entry is valid; otherwise -1.
1687 */
sw_r_vlan_table(struct ksz_hw * hw,u16 addr,u16 * vid,u8 * fid,u8 * member)1688 static int sw_r_vlan_table(struct ksz_hw *hw, u16 addr, u16 *vid, u8 *fid,
1689 u8 *member)
1690 {
1691 u32 data;
1692
1693 sw_r_table(hw, TABLE_VLAN, addr, &data);
1694 if (data & VLAN_TABLE_VALID) {
1695 *vid = (u16)(data & VLAN_TABLE_VID);
1696 *fid = (u8)((data & VLAN_TABLE_FID) >> VLAN_TABLE_FID_SHIFT);
1697 *member = (u8)((data & VLAN_TABLE_MEMBERSHIP) >>
1698 VLAN_TABLE_MEMBERSHIP_SHIFT);
1699 return 0;
1700 }
1701 return -1;
1702 }
1703
1704 /**
1705 * port_r_mib_cnt - read MIB counter
1706 * @hw: The hardware instance.
1707 * @port: The port index.
1708 * @addr: The address of the counter.
1709 * @cnt: Buffer to store the counter.
1710 *
1711 * This routine reads a MIB counter of the port.
1712 * Hardware interrupts are disabled to minimize corruption of read data.
1713 */
port_r_mib_cnt(struct ksz_hw * hw,int port,u16 addr,u64 * cnt)1714 static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt)
1715 {
1716 u32 data;
1717 u16 ctrl_addr;
1718 uint interrupt;
1719 int timeout;
1720
1721 ctrl_addr = addr + PORT_COUNTER_NUM * port;
1722
1723 interrupt = hw_block_intr(hw);
1724
1725 ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) << 8);
1726 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
1727 HW_DELAY(hw, KS884X_IACR_OFFSET);
1728
1729 for (timeout = 100; timeout > 0; timeout--) {
1730 data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
1731
1732 if (data & MIB_COUNTER_VALID) {
1733 if (data & MIB_COUNTER_OVERFLOW)
1734 *cnt += MIB_COUNTER_VALUE + 1;
1735 *cnt += data & MIB_COUNTER_VALUE;
1736 break;
1737 }
1738 }
1739
1740 hw_restore_intr(hw, interrupt);
1741 }
1742
1743 /**
1744 * port_r_mib_pkt - read dropped packet counts
1745 * @hw: The hardware instance.
1746 * @port: The port index.
1747 * @last: last one
1748 * @cnt: Buffer to store the receive and transmit dropped packet counts.
1749 *
1750 * This routine reads the dropped packet counts of the port.
1751 * Hardware interrupts are disabled to minimize corruption of read data.
1752 */
port_r_mib_pkt(struct ksz_hw * hw,int port,u32 * last,u64 * cnt)1753 static void port_r_mib_pkt(struct ksz_hw *hw, int port, u32 *last, u64 *cnt)
1754 {
1755 u32 cur;
1756 u32 data;
1757 u16 ctrl_addr;
1758 uint interrupt;
1759 int index;
1760
1761 index = KS_MIB_PACKET_DROPPED_RX_0 + port;
1762 do {
1763 interrupt = hw_block_intr(hw);
1764
1765 ctrl_addr = (u16) index;
1766 ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ)
1767 << 8);
1768 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
1769 HW_DELAY(hw, KS884X_IACR_OFFSET);
1770 data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
1771
1772 hw_restore_intr(hw, interrupt);
1773
1774 data &= MIB_PACKET_DROPPED;
1775 cur = *last;
1776 if (data != cur) {
1777 *last = data;
1778 if (data < cur)
1779 data += MIB_PACKET_DROPPED + 1;
1780 data -= cur;
1781 *cnt += data;
1782 }
1783 ++last;
1784 ++cnt;
1785 index -= KS_MIB_PACKET_DROPPED_TX -
1786 KS_MIB_PACKET_DROPPED_TX_0 + 1;
1787 } while (index >= KS_MIB_PACKET_DROPPED_TX_0 + port);
1788 }
1789
1790 /**
1791 * port_r_cnt - read MIB counters periodically
1792 * @hw: The hardware instance.
1793 * @port: The port index.
1794 *
1795 * This routine is used to read the counters of the port periodically to avoid
1796 * counter overflow. The hardware should be acquired first before calling this
1797 * routine.
1798 *
1799 * Return non-zero when not all counters not read.
1800 */
port_r_cnt(struct ksz_hw * hw,int port)1801 static int port_r_cnt(struct ksz_hw *hw, int port)
1802 {
1803 struct ksz_port_mib *mib = &hw->port_mib[port];
1804
1805 if (mib->mib_start < PORT_COUNTER_NUM)
1806 while (mib->cnt_ptr < PORT_COUNTER_NUM) {
1807 port_r_mib_cnt(hw, port, mib->cnt_ptr,
1808 &mib->counter[mib->cnt_ptr]);
1809 ++mib->cnt_ptr;
1810 }
1811 if (hw->mib_cnt > PORT_COUNTER_NUM)
1812 port_r_mib_pkt(hw, port, mib->dropped,
1813 &mib->counter[PORT_COUNTER_NUM]);
1814 mib->cnt_ptr = 0;
1815 return 0;
1816 }
1817
1818 /**
1819 * port_init_cnt - initialize MIB counter values
1820 * @hw: The hardware instance.
1821 * @port: The port index.
1822 *
1823 * This routine is used to initialize all counters to zero if the hardware
1824 * cannot do it after reset.
1825 */
port_init_cnt(struct ksz_hw * hw,int port)1826 static void port_init_cnt(struct ksz_hw *hw, int port)
1827 {
1828 struct ksz_port_mib *mib = &hw->port_mib[port];
1829
1830 mib->cnt_ptr = 0;
1831 if (mib->mib_start < PORT_COUNTER_NUM)
1832 do {
1833 port_r_mib_cnt(hw, port, mib->cnt_ptr,
1834 &mib->counter[mib->cnt_ptr]);
1835 ++mib->cnt_ptr;
1836 } while (mib->cnt_ptr < PORT_COUNTER_NUM);
1837 if (hw->mib_cnt > PORT_COUNTER_NUM)
1838 port_r_mib_pkt(hw, port, mib->dropped,
1839 &mib->counter[PORT_COUNTER_NUM]);
1840 memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
1841 mib->cnt_ptr = 0;
1842 }
1843
1844 /*
1845 * Port functions
1846 */
1847
1848 /**
1849 * port_cfg - set port register bits
1850 * @hw: The hardware instance.
1851 * @port: The port index.
1852 * @offset: The offset of the port register.
1853 * @bits: The data bits to set.
1854 * @set: The flag indicating whether the bits are to be set or not.
1855 *
1856 * This routine sets or resets the specified bits of the port register.
1857 */
port_cfg(struct ksz_hw * hw,int port,int offset,u16 bits,int set)1858 static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
1859 int set)
1860 {
1861 u32 addr;
1862 u16 data;
1863
1864 PORT_CTRL_ADDR(port, addr);
1865 addr += offset;
1866 data = readw(hw->io + addr);
1867 if (set)
1868 data |= bits;
1869 else
1870 data &= ~bits;
1871 writew(data, hw->io + addr);
1872 }
1873
1874 /**
1875 * port_r8 - read byte from port register
1876 * @hw: The hardware instance.
1877 * @port: The port index.
1878 * @offset: The offset of the port register.
1879 * @data: Buffer to store the data.
1880 *
1881 * This routine reads a byte from the port register.
1882 */
port_r8(struct ksz_hw * hw,int port,int offset,u8 * data)1883 static void port_r8(struct ksz_hw *hw, int port, int offset, u8 *data)
1884 {
1885 u32 addr;
1886
1887 PORT_CTRL_ADDR(port, addr);
1888 addr += offset;
1889 *data = readb(hw->io + addr);
1890 }
1891
1892 /**
1893 * port_r16 - read word from port register.
1894 * @hw: The hardware instance.
1895 * @port: The port index.
1896 * @offset: The offset of the port register.
1897 * @data: Buffer to store the data.
1898 *
1899 * This routine reads a word from the port register.
1900 */
port_r16(struct ksz_hw * hw,int port,int offset,u16 * data)1901 static void port_r16(struct ksz_hw *hw, int port, int offset, u16 *data)
1902 {
1903 u32 addr;
1904
1905 PORT_CTRL_ADDR(port, addr);
1906 addr += offset;
1907 *data = readw(hw->io + addr);
1908 }
1909
1910 /**
1911 * port_w16 - write word to port register.
1912 * @hw: The hardware instance.
1913 * @port: The port index.
1914 * @offset: The offset of the port register.
1915 * @data: Data to write.
1916 *
1917 * This routine writes a word to the port register.
1918 */
port_w16(struct ksz_hw * hw,int port,int offset,u16 data)1919 static void port_w16(struct ksz_hw *hw, int port, int offset, u16 data)
1920 {
1921 u32 addr;
1922
1923 PORT_CTRL_ADDR(port, addr);
1924 addr += offset;
1925 writew(data, hw->io + addr);
1926 }
1927
1928 /**
1929 * sw_chk - check switch register bits
1930 * @hw: The hardware instance.
1931 * @addr: The address of the switch register.
1932 * @bits: The data bits to check.
1933 *
1934 * This function checks whether the specified bits of the switch register are
1935 * set or not.
1936 *
1937 * Return 0 if the bits are not set.
1938 */
sw_chk(struct ksz_hw * hw,u32 addr,u16 bits)1939 static int sw_chk(struct ksz_hw *hw, u32 addr, u16 bits)
1940 {
1941 u16 data;
1942
1943 data = readw(hw->io + addr);
1944 return (data & bits) == bits;
1945 }
1946
1947 /**
1948 * sw_cfg - set switch register bits
1949 * @hw: The hardware instance.
1950 * @addr: The address of the switch register.
1951 * @bits: The data bits to set.
1952 * @set: The flag indicating whether the bits are to be set or not.
1953 *
1954 * This function sets or resets the specified bits of the switch register.
1955 */
sw_cfg(struct ksz_hw * hw,u32 addr,u16 bits,int set)1956 static void sw_cfg(struct ksz_hw *hw, u32 addr, u16 bits, int set)
1957 {
1958 u16 data;
1959
1960 data = readw(hw->io + addr);
1961 if (set)
1962 data |= bits;
1963 else
1964 data &= ~bits;
1965 writew(data, hw->io + addr);
1966 }
1967
1968 /* Bandwidth */
1969
port_cfg_broad_storm(struct ksz_hw * hw,int p,int set)1970 static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set)
1971 {
1972 port_cfg(hw, p,
1973 KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set);
1974 }
1975
1976 /* Driver set switch broadcast storm protection at 10% rate. */
1977 #define BROADCAST_STORM_PROTECTION_RATE 10
1978
1979 /* 148,800 frames * 67 ms / 100 */
1980 #define BROADCAST_STORM_VALUE 9969
1981
1982 /**
1983 * sw_cfg_broad_storm - configure broadcast storm threshold
1984 * @hw: The hardware instance.
1985 * @percent: Broadcast storm threshold in percent of transmit rate.
1986 *
1987 * This routine configures the broadcast storm threshold of the switch.
1988 */
sw_cfg_broad_storm(struct ksz_hw * hw,u8 percent)1989 static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
1990 {
1991 u16 data;
1992 u32 value = ((u32) BROADCAST_STORM_VALUE * (u32) percent / 100);
1993
1994 if (value > BROADCAST_STORM_RATE)
1995 value = BROADCAST_STORM_RATE;
1996
1997 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
1998 data &= ~(BROADCAST_STORM_RATE_LO | BROADCAST_STORM_RATE_HI);
1999 data |= ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
2000 writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
2001 }
2002
2003 /**
2004 * sw_get_broad_storm - get broadcast storm threshold
2005 * @hw: The hardware instance.
2006 * @percent: Buffer to store the broadcast storm threshold percentage.
2007 *
2008 * This routine retrieves the broadcast storm threshold of the switch.
2009 */
sw_get_broad_storm(struct ksz_hw * hw,u8 * percent)2010 static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent)
2011 {
2012 int num;
2013 u16 data;
2014
2015 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
2016 num = (data & BROADCAST_STORM_RATE_HI);
2017 num <<= 8;
2018 num |= (data & BROADCAST_STORM_RATE_LO) >> 8;
2019 num = DIV_ROUND_CLOSEST(num * 100, BROADCAST_STORM_VALUE);
2020 *percent = (u8) num;
2021 }
2022
2023 /**
2024 * sw_dis_broad_storm - disable broadstorm
2025 * @hw: The hardware instance.
2026 * @port: The port index.
2027 *
2028 * This routine disables the broadcast storm limit function of the switch.
2029 */
sw_dis_broad_storm(struct ksz_hw * hw,int port)2030 static void sw_dis_broad_storm(struct ksz_hw *hw, int port)
2031 {
2032 port_cfg_broad_storm(hw, port, 0);
2033 }
2034
2035 /**
2036 * sw_ena_broad_storm - enable broadcast storm
2037 * @hw: The hardware instance.
2038 * @port: The port index.
2039 *
2040 * This routine enables the broadcast storm limit function of the switch.
2041 */
sw_ena_broad_storm(struct ksz_hw * hw,int port)2042 static void sw_ena_broad_storm(struct ksz_hw *hw, int port)
2043 {
2044 sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
2045 port_cfg_broad_storm(hw, port, 1);
2046 }
2047
2048 /**
2049 * sw_init_broad_storm - initialize broadcast storm
2050 * @hw: The hardware instance.
2051 *
2052 * This routine initializes the broadcast storm limit function of the switch.
2053 */
sw_init_broad_storm(struct ksz_hw * hw)2054 static void sw_init_broad_storm(struct ksz_hw *hw)
2055 {
2056 int port;
2057
2058 hw->ksz_switch->broad_per = 1;
2059 sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
2060 for (port = 0; port < TOTAL_PORT_NUM; port++)
2061 sw_dis_broad_storm(hw, port);
2062 sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, MULTICAST_STORM_DISABLE, 1);
2063 }
2064
2065 /**
2066 * hw_cfg_broad_storm - configure broadcast storm
2067 * @hw: The hardware instance.
2068 * @percent: Broadcast storm threshold in percent of transmit rate.
2069 *
2070 * This routine configures the broadcast storm threshold of the switch.
2071 * It is called by user functions. The hardware should be acquired first.
2072 */
hw_cfg_broad_storm(struct ksz_hw * hw,u8 percent)2073 static void hw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
2074 {
2075 if (percent > 100)
2076 percent = 100;
2077
2078 sw_cfg_broad_storm(hw, percent);
2079 sw_get_broad_storm(hw, &percent);
2080 hw->ksz_switch->broad_per = percent;
2081 }
2082
2083 /**
2084 * sw_dis_prio_rate - disable switch priority rate
2085 * @hw: The hardware instance.
2086 * @port: The port index.
2087 *
2088 * This routine disables the priority rate function of the switch.
2089 */
sw_dis_prio_rate(struct ksz_hw * hw,int port)2090 static void sw_dis_prio_rate(struct ksz_hw *hw, int port)
2091 {
2092 u32 addr;
2093
2094 PORT_CTRL_ADDR(port, addr);
2095 addr += KS8842_PORT_IN_RATE_OFFSET;
2096 writel(0, hw->io + addr);
2097 }
2098
2099 /**
2100 * sw_init_prio_rate - initialize switch priority rate
2101 * @hw: The hardware instance.
2102 *
2103 * This routine initializes the priority rate function of the switch.
2104 */
sw_init_prio_rate(struct ksz_hw * hw)2105 static void sw_init_prio_rate(struct ksz_hw *hw)
2106 {
2107 int port;
2108 int prio;
2109 struct ksz_switch *sw = hw->ksz_switch;
2110
2111 for (port = 0; port < TOTAL_PORT_NUM; port++) {
2112 for (prio = 0; prio < PRIO_QUEUES; prio++) {
2113 sw->port_cfg[port].rx_rate[prio] =
2114 sw->port_cfg[port].tx_rate[prio] = 0;
2115 }
2116 sw_dis_prio_rate(hw, port);
2117 }
2118 }
2119
2120 /* Communication */
2121
port_cfg_back_pressure(struct ksz_hw * hw,int p,int set)2122 static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set)
2123 {
2124 port_cfg(hw, p,
2125 KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set);
2126 }
2127
2128 /* Mirroring */
2129
port_cfg_mirror_sniffer(struct ksz_hw * hw,int p,int set)2130 static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set)
2131 {
2132 port_cfg(hw, p,
2133 KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_SNIFFER, set);
2134 }
2135
port_cfg_mirror_rx(struct ksz_hw * hw,int p,int set)2136 static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set)
2137 {
2138 port_cfg(hw, p,
2139 KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_RX, set);
2140 }
2141
port_cfg_mirror_tx(struct ksz_hw * hw,int p,int set)2142 static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set)
2143 {
2144 port_cfg(hw, p,
2145 KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_TX, set);
2146 }
2147
sw_cfg_mirror_rx_tx(struct ksz_hw * hw,int set)2148 static inline void sw_cfg_mirror_rx_tx(struct ksz_hw *hw, int set)
2149 {
2150 sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, SWITCH_MIRROR_RX_TX, set);
2151 }
2152
sw_init_mirror(struct ksz_hw * hw)2153 static void sw_init_mirror(struct ksz_hw *hw)
2154 {
2155 int port;
2156
2157 for (port = 0; port < TOTAL_PORT_NUM; port++) {
2158 port_cfg_mirror_sniffer(hw, port, 0);
2159 port_cfg_mirror_rx(hw, port, 0);
2160 port_cfg_mirror_tx(hw, port, 0);
2161 }
2162 sw_cfg_mirror_rx_tx(hw, 0);
2163 }
2164
2165 /* Priority */
2166
port_cfg_diffserv(struct ksz_hw * hw,int p,int set)2167 static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set)
2168 {
2169 port_cfg(hw, p,
2170 KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE, set);
2171 }
2172
port_cfg_802_1p(struct ksz_hw * hw,int p,int set)2173 static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set)
2174 {
2175 port_cfg(hw, p,
2176 KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE, set);
2177 }
2178
port_cfg_replace_vid(struct ksz_hw * hw,int p,int set)2179 static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set)
2180 {
2181 port_cfg(hw, p,
2182 KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING, set);
2183 }
2184
port_cfg_prio(struct ksz_hw * hw,int p,int set)2185 static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set)
2186 {
2187 port_cfg(hw, p,
2188 KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set);
2189 }
2190
2191 /**
2192 * sw_dis_diffserv - disable switch DiffServ priority
2193 * @hw: The hardware instance.
2194 * @port: The port index.
2195 *
2196 * This routine disables the DiffServ priority function of the switch.
2197 */
sw_dis_diffserv(struct ksz_hw * hw,int port)2198 static void sw_dis_diffserv(struct ksz_hw *hw, int port)
2199 {
2200 port_cfg_diffserv(hw, port, 0);
2201 }
2202
2203 /**
2204 * sw_dis_802_1p - disable switch 802.1p priority
2205 * @hw: The hardware instance.
2206 * @port: The port index.
2207 *
2208 * This routine disables the 802.1p priority function of the switch.
2209 */
sw_dis_802_1p(struct ksz_hw * hw,int port)2210 static void sw_dis_802_1p(struct ksz_hw *hw, int port)
2211 {
2212 port_cfg_802_1p(hw, port, 0);
2213 }
2214
2215 /**
2216 * sw_cfg_replace_null_vid -
2217 * @hw: The hardware instance.
2218 * @set: The flag to disable or enable.
2219 *
2220 */
sw_cfg_replace_null_vid(struct ksz_hw * hw,int set)2221 static void sw_cfg_replace_null_vid(struct ksz_hw *hw, int set)
2222 {
2223 sw_cfg(hw, KS8842_SWITCH_CTRL_3_OFFSET, SWITCH_REPLACE_NULL_VID, set);
2224 }
2225
2226 /**
2227 * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
2228 * @hw: The hardware instance.
2229 * @port: The port index.
2230 * @set: The flag to disable or enable.
2231 *
2232 * This routine enables the 802.1p priority re-mapping function of the switch.
2233 * That allows 802.1p priority field to be replaced with the port's default
2234 * tag's priority value if the ingress packet's 802.1p priority has a higher
2235 * priority than port's default tag's priority.
2236 */
sw_cfg_replace_vid(struct ksz_hw * hw,int port,int set)2237 static void sw_cfg_replace_vid(struct ksz_hw *hw, int port, int set)
2238 {
2239 port_cfg_replace_vid(hw, port, set);
2240 }
2241
2242 /**
2243 * sw_cfg_port_based - configure switch port based priority
2244 * @hw: The hardware instance.
2245 * @port: The port index.
2246 * @prio: The priority to set.
2247 *
2248 * This routine configures the port based priority of the switch.
2249 */
sw_cfg_port_based(struct ksz_hw * hw,int port,u8 prio)2250 static void sw_cfg_port_based(struct ksz_hw *hw, int port, u8 prio)
2251 {
2252 u16 data;
2253
2254 if (prio > PORT_BASED_PRIORITY_BASE)
2255 prio = PORT_BASED_PRIORITY_BASE;
2256
2257 hw->ksz_switch->port_cfg[port].port_prio = prio;
2258
2259 port_r16(hw, port, KS8842_PORT_CTRL_1_OFFSET, &data);
2260 data &= ~PORT_BASED_PRIORITY_MASK;
2261 data |= prio << PORT_BASED_PRIORITY_SHIFT;
2262 port_w16(hw, port, KS8842_PORT_CTRL_1_OFFSET, data);
2263 }
2264
2265 /**
2266 * sw_dis_multi_queue - disable transmit multiple queues
2267 * @hw: The hardware instance.
2268 * @port: The port index.
2269 *
2270 * This routine disables the transmit multiple queues selection of the switch
2271 * port. Only single transmit queue on the port.
2272 */
sw_dis_multi_queue(struct ksz_hw * hw,int port)2273 static void sw_dis_multi_queue(struct ksz_hw *hw, int port)
2274 {
2275 port_cfg_prio(hw, port, 0);
2276 }
2277
2278 /**
2279 * sw_init_prio - initialize switch priority
2280 * @hw: The hardware instance.
2281 *
2282 * This routine initializes the switch QoS priority functions.
2283 */
sw_init_prio(struct ksz_hw * hw)2284 static void sw_init_prio(struct ksz_hw *hw)
2285 {
2286 int port;
2287 int tos;
2288 struct ksz_switch *sw = hw->ksz_switch;
2289
2290 /*
2291 * Init all the 802.1p tag priority value to be assigned to different
2292 * priority queue.
2293 */
2294 sw->p_802_1p[0] = 0;
2295 sw->p_802_1p[1] = 0;
2296 sw->p_802_1p[2] = 1;
2297 sw->p_802_1p[3] = 1;
2298 sw->p_802_1p[4] = 2;
2299 sw->p_802_1p[5] = 2;
2300 sw->p_802_1p[6] = 3;
2301 sw->p_802_1p[7] = 3;
2302
2303 /*
2304 * Init all the DiffServ priority value to be assigned to priority
2305 * queue 0.
2306 */
2307 for (tos = 0; tos < DIFFSERV_ENTRIES; tos++)
2308 sw->diffserv[tos] = 0;
2309
2310 /* All QoS functions disabled. */
2311 for (port = 0; port < TOTAL_PORT_NUM; port++) {
2312 sw_dis_multi_queue(hw, port);
2313 sw_dis_diffserv(hw, port);
2314 sw_dis_802_1p(hw, port);
2315 sw_cfg_replace_vid(hw, port, 0);
2316
2317 sw->port_cfg[port].port_prio = 0;
2318 sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio);
2319 }
2320 sw_cfg_replace_null_vid(hw, 0);
2321 }
2322
2323 /**
2324 * port_get_def_vid - get port default VID.
2325 * @hw: The hardware instance.
2326 * @port: The port index.
2327 * @vid: Buffer to store the VID.
2328 *
2329 * This routine retrieves the default VID of the port.
2330 */
port_get_def_vid(struct ksz_hw * hw,int port,u16 * vid)2331 static void port_get_def_vid(struct ksz_hw *hw, int port, u16 *vid)
2332 {
2333 u32 addr;
2334
2335 PORT_CTRL_ADDR(port, addr);
2336 addr += KS8842_PORT_CTRL_VID_OFFSET;
2337 *vid = readw(hw->io + addr);
2338 }
2339
2340 /**
2341 * sw_init_vlan - initialize switch VLAN
2342 * @hw: The hardware instance.
2343 *
2344 * This routine initializes the VLAN function of the switch.
2345 */
sw_init_vlan(struct ksz_hw * hw)2346 static void sw_init_vlan(struct ksz_hw *hw)
2347 {
2348 int port;
2349 int entry;
2350 struct ksz_switch *sw = hw->ksz_switch;
2351
2352 /* Read 16 VLAN entries from device's VLAN table. */
2353 for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) {
2354 sw_r_vlan_table(hw, entry,
2355 &sw->vlan_table[entry].vid,
2356 &sw->vlan_table[entry].fid,
2357 &sw->vlan_table[entry].member);
2358 }
2359
2360 for (port = 0; port < TOTAL_PORT_NUM; port++) {
2361 port_get_def_vid(hw, port, &sw->port_cfg[port].vid);
2362 sw->port_cfg[port].member = PORT_MASK;
2363 }
2364 }
2365
2366 /**
2367 * sw_cfg_port_base_vlan - configure port-based VLAN membership
2368 * @hw: The hardware instance.
2369 * @port: The port index.
2370 * @member: The port-based VLAN membership.
2371 *
2372 * This routine configures the port-based VLAN membership of the port.
2373 */
sw_cfg_port_base_vlan(struct ksz_hw * hw,int port,u8 member)2374 static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member)
2375 {
2376 u32 addr;
2377 u8 data;
2378
2379 PORT_CTRL_ADDR(port, addr);
2380 addr += KS8842_PORT_CTRL_2_OFFSET;
2381
2382 data = readb(hw->io + addr);
2383 data &= ~PORT_VLAN_MEMBERSHIP;
2384 data |= (member & PORT_MASK);
2385 writeb(data, hw->io + addr);
2386
2387 hw->ksz_switch->port_cfg[port].member = member;
2388 }
2389
2390 /**
2391 * sw_set_addr - configure switch MAC address
2392 * @hw: The hardware instance.
2393 * @mac_addr: The MAC address.
2394 *
2395 * This function configures the MAC address of the switch.
2396 */
sw_set_addr(struct ksz_hw * hw,u8 * mac_addr)2397 static void sw_set_addr(struct ksz_hw *hw, u8 *mac_addr)
2398 {
2399 int i;
2400
2401 for (i = 0; i < 6; i += 2) {
2402 writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
2403 writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
2404 }
2405 }
2406
2407 /**
2408 * sw_set_global_ctrl - set switch global control
2409 * @hw: The hardware instance.
2410 *
2411 * This routine sets the global control of the switch function.
2412 */
sw_set_global_ctrl(struct ksz_hw * hw)2413 static void sw_set_global_ctrl(struct ksz_hw *hw)
2414 {
2415 u16 data;
2416
2417 /* Enable switch MII flow control. */
2418 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
2419 data |= SWITCH_FLOW_CTRL;
2420 writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
2421
2422 data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
2423
2424 /* Enable aggressive back off algorithm in half duplex mode. */
2425 data |= SWITCH_AGGR_BACKOFF;
2426
2427 /* Enable automatic fast aging when link changed detected. */
2428 data |= SWITCH_AGING_ENABLE;
2429 data |= SWITCH_LINK_AUTO_AGING;
2430
2431 if (hw->overrides & FAST_AGING)
2432 data |= SWITCH_FAST_AGING;
2433 else
2434 data &= ~SWITCH_FAST_AGING;
2435 writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
2436
2437 data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
2438
2439 /* Enable no excessive collision drop. */
2440 data |= NO_EXC_COLLISION_DROP;
2441 writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
2442 }
2443
2444 enum {
2445 STP_STATE_DISABLED = 0,
2446 STP_STATE_LISTENING,
2447 STP_STATE_LEARNING,
2448 STP_STATE_FORWARDING,
2449 STP_STATE_BLOCKED,
2450 STP_STATE_SIMPLE
2451 };
2452
2453 /**
2454 * port_set_stp_state - configure port spanning tree state
2455 * @hw: The hardware instance.
2456 * @port: The port index.
2457 * @state: The spanning tree state.
2458 *
2459 * This routine configures the spanning tree state of the port.
2460 */
port_set_stp_state(struct ksz_hw * hw,int port,int state)2461 static void port_set_stp_state(struct ksz_hw *hw, int port, int state)
2462 {
2463 u16 data;
2464
2465 port_r16(hw, port, KS8842_PORT_CTRL_2_OFFSET, &data);
2466 switch (state) {
2467 case STP_STATE_DISABLED:
2468 data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
2469 data |= PORT_LEARN_DISABLE;
2470 break;
2471 case STP_STATE_LISTENING:
2472 /*
2473 * No need to turn on transmit because of port direct mode.
2474 * Turning on receive is required if static MAC table is not setup.
2475 */
2476 data &= ~PORT_TX_ENABLE;
2477 data |= PORT_RX_ENABLE;
2478 data |= PORT_LEARN_DISABLE;
2479 break;
2480 case STP_STATE_LEARNING:
2481 data &= ~PORT_TX_ENABLE;
2482 data |= PORT_RX_ENABLE;
2483 data &= ~PORT_LEARN_DISABLE;
2484 break;
2485 case STP_STATE_FORWARDING:
2486 data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
2487 data &= ~PORT_LEARN_DISABLE;
2488 break;
2489 case STP_STATE_BLOCKED:
2490 /*
2491 * Need to setup static MAC table with override to keep receiving BPDU
2492 * messages. See sw_init_stp routine.
2493 */
2494 data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
2495 data |= PORT_LEARN_DISABLE;
2496 break;
2497 case STP_STATE_SIMPLE:
2498 data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
2499 data |= PORT_LEARN_DISABLE;
2500 break;
2501 }
2502 port_w16(hw, port, KS8842_PORT_CTRL_2_OFFSET, data);
2503 hw->ksz_switch->port_cfg[port].stp_state = state;
2504 }
2505
2506 #define STP_ENTRY 0
2507 #define BROADCAST_ENTRY 1
2508 #define BRIDGE_ADDR_ENTRY 2
2509 #define IPV6_ADDR_ENTRY 3
2510
2511 /**
2512 * sw_clr_sta_mac_table - clear static MAC table
2513 * @hw: The hardware instance.
2514 *
2515 * This routine clears the static MAC table.
2516 */
sw_clr_sta_mac_table(struct ksz_hw * hw)2517 static void sw_clr_sta_mac_table(struct ksz_hw *hw)
2518 {
2519 struct ksz_mac_table *entry;
2520 int i;
2521
2522 for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) {
2523 entry = &hw->ksz_switch->mac_table[i];
2524 sw_w_sta_mac_table(hw, i,
2525 entry->mac_addr, entry->ports,
2526 entry->override, 0,
2527 entry->use_fid, entry->fid);
2528 }
2529 }
2530
2531 /**
2532 * sw_init_stp - initialize switch spanning tree support
2533 * @hw: The hardware instance.
2534 *
2535 * This routine initializes the spanning tree support of the switch.
2536 */
sw_init_stp(struct ksz_hw * hw)2537 static void sw_init_stp(struct ksz_hw *hw)
2538 {
2539 struct ksz_mac_table *entry;
2540
2541 entry = &hw->ksz_switch->mac_table[STP_ENTRY];
2542 entry->mac_addr[0] = 0x01;
2543 entry->mac_addr[1] = 0x80;
2544 entry->mac_addr[2] = 0xC2;
2545 entry->mac_addr[3] = 0x00;
2546 entry->mac_addr[4] = 0x00;
2547 entry->mac_addr[5] = 0x00;
2548 entry->ports = HOST_MASK;
2549 entry->override = 1;
2550 entry->valid = 1;
2551 sw_w_sta_mac_table(hw, STP_ENTRY,
2552 entry->mac_addr, entry->ports,
2553 entry->override, entry->valid,
2554 entry->use_fid, entry->fid);
2555 }
2556
2557 /**
2558 * sw_block_addr - block certain packets from the host port
2559 * @hw: The hardware instance.
2560 *
2561 * This routine blocks certain packets from reaching to the host port.
2562 */
sw_block_addr(struct ksz_hw * hw)2563 static void sw_block_addr(struct ksz_hw *hw)
2564 {
2565 struct ksz_mac_table *entry;
2566 int i;
2567
2568 for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) {
2569 entry = &hw->ksz_switch->mac_table[i];
2570 entry->valid = 0;
2571 sw_w_sta_mac_table(hw, i,
2572 entry->mac_addr, entry->ports,
2573 entry->override, entry->valid,
2574 entry->use_fid, entry->fid);
2575 }
2576 }
2577
hw_r_phy_ctrl(struct ksz_hw * hw,int phy,u16 * data)2578 static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data)
2579 {
2580 *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
2581 }
2582
hw_w_phy_ctrl(struct ksz_hw * hw,int phy,u16 data)2583 static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data)
2584 {
2585 writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
2586 }
2587
2588 /**
2589 * hw_r_phy - read data from PHY register
2590 * @hw: The hardware instance.
2591 * @port: Port to read.
2592 * @reg: PHY register to read.
2593 * @val: Buffer to store the read data.
2594 *
2595 * This routine reads data from the PHY register.
2596 */
hw_r_phy(struct ksz_hw * hw,int port,u16 reg,u16 * val)2597 static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val)
2598 {
2599 int phy;
2600
2601 phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
2602 *val = readw(hw->io + phy);
2603 }
2604
2605 /**
2606 * hw_w_phy - write data to PHY register
2607 * @hw: The hardware instance.
2608 * @port: Port to write.
2609 * @reg: PHY register to write.
2610 * @val: Word data to write.
2611 *
2612 * This routine writes data to the PHY register.
2613 */
hw_w_phy(struct ksz_hw * hw,int port,u16 reg,u16 val)2614 static void hw_w_phy(struct ksz_hw *hw, int port, u16 reg, u16 val)
2615 {
2616 int phy;
2617
2618 phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
2619 writew(val, hw->io + phy);
2620 }
2621
2622 /*
2623 * EEPROM access functions
2624 */
2625
2626 #define AT93C_CODE 0
2627 #define AT93C_WR_OFF 0x00
2628 #define AT93C_WR_ALL 0x10
2629 #define AT93C_ER_ALL 0x20
2630 #define AT93C_WR_ON 0x30
2631
2632 #define AT93C_WRITE 1
2633 #define AT93C_READ 2
2634 #define AT93C_ERASE 3
2635
2636 #define EEPROM_DELAY 4
2637
drop_gpio(struct ksz_hw * hw,u8 gpio)2638 static inline void drop_gpio(struct ksz_hw *hw, u8 gpio)
2639 {
2640 u16 data;
2641
2642 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
2643 data &= ~gpio;
2644 writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
2645 }
2646
raise_gpio(struct ksz_hw * hw,u8 gpio)2647 static inline void raise_gpio(struct ksz_hw *hw, u8 gpio)
2648 {
2649 u16 data;
2650
2651 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
2652 data |= gpio;
2653 writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
2654 }
2655
state_gpio(struct ksz_hw * hw,u8 gpio)2656 static inline u8 state_gpio(struct ksz_hw *hw, u8 gpio)
2657 {
2658 u16 data;
2659
2660 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
2661 return (u8)(data & gpio);
2662 }
2663
eeprom_clk(struct ksz_hw * hw)2664 static void eeprom_clk(struct ksz_hw *hw)
2665 {
2666 raise_gpio(hw, EEPROM_SERIAL_CLOCK);
2667 udelay(EEPROM_DELAY);
2668 drop_gpio(hw, EEPROM_SERIAL_CLOCK);
2669 udelay(EEPROM_DELAY);
2670 }
2671
spi_r(struct ksz_hw * hw)2672 static u16 spi_r(struct ksz_hw *hw)
2673 {
2674 int i;
2675 u16 temp = 0;
2676
2677 for (i = 15; i >= 0; i--) {
2678 raise_gpio(hw, EEPROM_SERIAL_CLOCK);
2679 udelay(EEPROM_DELAY);
2680
2681 temp |= (state_gpio(hw, EEPROM_DATA_IN)) ? 1 << i : 0;
2682
2683 drop_gpio(hw, EEPROM_SERIAL_CLOCK);
2684 udelay(EEPROM_DELAY);
2685 }
2686 return temp;
2687 }
2688
spi_w(struct ksz_hw * hw,u16 data)2689 static void spi_w(struct ksz_hw *hw, u16 data)
2690 {
2691 int i;
2692
2693 for (i = 15; i >= 0; i--) {
2694 (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
2695 drop_gpio(hw, EEPROM_DATA_OUT);
2696 eeprom_clk(hw);
2697 }
2698 }
2699
spi_reg(struct ksz_hw * hw,u8 data,u8 reg)2700 static void spi_reg(struct ksz_hw *hw, u8 data, u8 reg)
2701 {
2702 int i;
2703
2704 /* Initial start bit */
2705 raise_gpio(hw, EEPROM_DATA_OUT);
2706 eeprom_clk(hw);
2707
2708 /* AT93C operation */
2709 for (i = 1; i >= 0; i--) {
2710 (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
2711 drop_gpio(hw, EEPROM_DATA_OUT);
2712 eeprom_clk(hw);
2713 }
2714
2715 /* Address location */
2716 for (i = 5; i >= 0; i--) {
2717 (reg & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
2718 drop_gpio(hw, EEPROM_DATA_OUT);
2719 eeprom_clk(hw);
2720 }
2721 }
2722
2723 #define EEPROM_DATA_RESERVED 0
2724 #define EEPROM_DATA_MAC_ADDR_0 1
2725 #define EEPROM_DATA_MAC_ADDR_1 2
2726 #define EEPROM_DATA_MAC_ADDR_2 3
2727 #define EEPROM_DATA_SUBSYS_ID 4
2728 #define EEPROM_DATA_SUBSYS_VEN_ID 5
2729 #define EEPROM_DATA_PM_CAP 6
2730
2731 /* User defined EEPROM data */
2732 #define EEPROM_DATA_OTHER_MAC_ADDR 9
2733
2734 /**
2735 * eeprom_read - read from AT93C46 EEPROM
2736 * @hw: The hardware instance.
2737 * @reg: The register offset.
2738 *
2739 * This function reads a word from the AT93C46 EEPROM.
2740 *
2741 * Return the data value.
2742 */
eeprom_read(struct ksz_hw * hw,u8 reg)2743 static u16 eeprom_read(struct ksz_hw *hw, u8 reg)
2744 {
2745 u16 data;
2746
2747 raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
2748
2749 spi_reg(hw, AT93C_READ, reg);
2750 data = spi_r(hw);
2751
2752 drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
2753
2754 return data;
2755 }
2756
2757 /**
2758 * eeprom_write - write to AT93C46 EEPROM
2759 * @hw: The hardware instance.
2760 * @reg: The register offset.
2761 * @data: The data value.
2762 *
2763 * This procedure writes a word to the AT93C46 EEPROM.
2764 */
eeprom_write(struct ksz_hw * hw,u8 reg,u16 data)2765 static void eeprom_write(struct ksz_hw *hw, u8 reg, u16 data)
2766 {
2767 int timeout;
2768
2769 raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
2770
2771 /* Enable write. */
2772 spi_reg(hw, AT93C_CODE, AT93C_WR_ON);
2773 drop_gpio(hw, EEPROM_CHIP_SELECT);
2774 udelay(1);
2775
2776 /* Erase the register. */
2777 raise_gpio(hw, EEPROM_CHIP_SELECT);
2778 spi_reg(hw, AT93C_ERASE, reg);
2779 drop_gpio(hw, EEPROM_CHIP_SELECT);
2780 udelay(1);
2781
2782 /* Check operation complete. */
2783 raise_gpio(hw, EEPROM_CHIP_SELECT);
2784 timeout = 8;
2785 mdelay(2);
2786 do {
2787 mdelay(1);
2788 } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
2789 drop_gpio(hw, EEPROM_CHIP_SELECT);
2790 udelay(1);
2791
2792 /* Write the register. */
2793 raise_gpio(hw, EEPROM_CHIP_SELECT);
2794 spi_reg(hw, AT93C_WRITE, reg);
2795 spi_w(hw, data);
2796 drop_gpio(hw, EEPROM_CHIP_SELECT);
2797 udelay(1);
2798
2799 /* Check operation complete. */
2800 raise_gpio(hw, EEPROM_CHIP_SELECT);
2801 timeout = 8;
2802 mdelay(2);
2803 do {
2804 mdelay(1);
2805 } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
2806 drop_gpio(hw, EEPROM_CHIP_SELECT);
2807 udelay(1);
2808
2809 /* Disable write. */
2810 raise_gpio(hw, EEPROM_CHIP_SELECT);
2811 spi_reg(hw, AT93C_CODE, AT93C_WR_OFF);
2812
2813 drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
2814 }
2815
2816 /*
2817 * Link detection routines
2818 */
2819
advertised_flow_ctrl(struct ksz_port * port,u16 ctrl)2820 static u16 advertised_flow_ctrl(struct ksz_port *port, u16 ctrl)
2821 {
2822 ctrl &= ~PORT_AUTO_NEG_SYM_PAUSE;
2823 switch (port->flow_ctrl) {
2824 case PHY_FLOW_CTRL:
2825 ctrl |= PORT_AUTO_NEG_SYM_PAUSE;
2826 break;
2827 /* Not supported. */
2828 case PHY_TX_ONLY:
2829 case PHY_RX_ONLY:
2830 default:
2831 break;
2832 }
2833 return ctrl;
2834 }
2835
set_flow_ctrl(struct ksz_hw * hw,int rx,int tx)2836 static void set_flow_ctrl(struct ksz_hw *hw, int rx, int tx)
2837 {
2838 u32 rx_cfg;
2839 u32 tx_cfg;
2840
2841 rx_cfg = hw->rx_cfg;
2842 tx_cfg = hw->tx_cfg;
2843 if (rx)
2844 hw->rx_cfg |= DMA_RX_FLOW_ENABLE;
2845 else
2846 hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE;
2847 if (tx)
2848 hw->tx_cfg |= DMA_TX_FLOW_ENABLE;
2849 else
2850 hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
2851 if (hw->enabled) {
2852 if (rx_cfg != hw->rx_cfg)
2853 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
2854 if (tx_cfg != hw->tx_cfg)
2855 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
2856 }
2857 }
2858
determine_flow_ctrl(struct ksz_hw * hw,struct ksz_port * port,u16 local,u16 remote)2859 static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port,
2860 u16 local, u16 remote)
2861 {
2862 int rx;
2863 int tx;
2864
2865 if (hw->overrides & PAUSE_FLOW_CTRL)
2866 return;
2867
2868 rx = tx = 0;
2869 if (port->force_link)
2870 rx = tx = 1;
2871 if (remote & LPA_PAUSE_CAP) {
2872 if (local & ADVERTISE_PAUSE_CAP) {
2873 rx = tx = 1;
2874 } else if ((remote & LPA_PAUSE_ASYM) &&
2875 (local &
2876 (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) ==
2877 ADVERTISE_PAUSE_ASYM) {
2878 tx = 1;
2879 }
2880 } else if (remote & LPA_PAUSE_ASYM) {
2881 if ((local & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM))
2882 == (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM))
2883 rx = 1;
2884 }
2885 if (!hw->ksz_switch)
2886 set_flow_ctrl(hw, rx, tx);
2887 }
2888
port_cfg_change(struct ksz_hw * hw,struct ksz_port * port,struct ksz_port_info * info,u16 link_status)2889 static inline void port_cfg_change(struct ksz_hw *hw, struct ksz_port *port,
2890 struct ksz_port_info *info, u16 link_status)
2891 {
2892 if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) &&
2893 !(hw->overrides & PAUSE_FLOW_CTRL)) {
2894 u32 cfg = hw->tx_cfg;
2895
2896 /* Disable flow control in the half duplex mode. */
2897 if (1 == info->duplex)
2898 hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
2899 if (hw->enabled && cfg != hw->tx_cfg)
2900 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
2901 }
2902 }
2903
2904 /**
2905 * port_get_link_speed - get current link status
2906 * @port: The port instance.
2907 *
2908 * This routine reads PHY registers to determine the current link status of the
2909 * switch ports.
2910 */
port_get_link_speed(struct ksz_port * port)2911 static void port_get_link_speed(struct ksz_port *port)
2912 {
2913 uint interrupt;
2914 struct ksz_port_info *info;
2915 struct ksz_port_info *linked = NULL;
2916 struct ksz_hw *hw = port->hw;
2917 u16 data;
2918 u16 status;
2919 u8 local;
2920 u8 remote;
2921 int i;
2922 int p;
2923
2924 interrupt = hw_block_intr(hw);
2925
2926 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
2927 info = &hw->port_info[p];
2928 port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
2929 port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
2930
2931 /*
2932 * Link status is changing all the time even when there is no
2933 * cable connection!
2934 */
2935 remote = status & (PORT_AUTO_NEG_COMPLETE |
2936 PORT_STATUS_LINK_GOOD);
2937 local = (u8) data;
2938
2939 /* No change to status. */
2940 if (local == info->advertised && remote == info->partner)
2941 continue;
2942
2943 info->advertised = local;
2944 info->partner = remote;
2945 if (status & PORT_STATUS_LINK_GOOD) {
2946
2947 /* Remember the first linked port. */
2948 if (!linked)
2949 linked = info;
2950
2951 info->tx_rate = 10 * TX_RATE_UNIT;
2952 if (status & PORT_STATUS_SPEED_100MBIT)
2953 info->tx_rate = 100 * TX_RATE_UNIT;
2954
2955 info->duplex = 1;
2956 if (status & PORT_STATUS_FULL_DUPLEX)
2957 info->duplex = 2;
2958
2959 if (media_connected != info->state) {
2960 hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET,
2961 &data);
2962 hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET,
2963 &status);
2964 determine_flow_ctrl(hw, port, data, status);
2965 if (hw->ksz_switch) {
2966 port_cfg_back_pressure(hw, p,
2967 (1 == info->duplex));
2968 }
2969 port_cfg_change(hw, port, info, status);
2970 }
2971 info->state = media_connected;
2972 } else {
2973 /* Indicate the link just goes down. */
2974 if (media_disconnected != info->state)
2975 hw->port_mib[p].link_down = 1;
2976
2977 info->state = media_disconnected;
2978 }
2979 hw->port_mib[p].state = (u8) info->state;
2980 }
2981
2982 if (linked && media_disconnected == port->linked->state)
2983 port->linked = linked;
2984
2985 hw_restore_intr(hw, interrupt);
2986 }
2987
2988 #define PHY_RESET_TIMEOUT 10
2989
2990 /**
2991 * port_set_link_speed - set port speed
2992 * @port: The port instance.
2993 *
2994 * This routine sets the link speed of the switch ports.
2995 */
port_set_link_speed(struct ksz_port * port)2996 static void port_set_link_speed(struct ksz_port *port)
2997 {
2998 struct ksz_hw *hw = port->hw;
2999 u16 data;
3000 u16 cfg;
3001 u8 status;
3002 int i;
3003 int p;
3004
3005 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
3006 port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
3007 port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
3008
3009 cfg = 0;
3010 if (status & PORT_STATUS_LINK_GOOD)
3011 cfg = data;
3012
3013 data |= PORT_AUTO_NEG_ENABLE;
3014 data = advertised_flow_ctrl(port, data);
3015
3016 data |= PORT_AUTO_NEG_100BTX_FD | PORT_AUTO_NEG_100BTX |
3017 PORT_AUTO_NEG_10BT_FD | PORT_AUTO_NEG_10BT;
3018
3019 /* Check if manual configuration is specified by the user. */
3020 if (port->speed || port->duplex) {
3021 if (10 == port->speed)
3022 data &= ~(PORT_AUTO_NEG_100BTX_FD |
3023 PORT_AUTO_NEG_100BTX);
3024 else if (100 == port->speed)
3025 data &= ~(PORT_AUTO_NEG_10BT_FD |
3026 PORT_AUTO_NEG_10BT);
3027 if (1 == port->duplex)
3028 data &= ~(PORT_AUTO_NEG_100BTX_FD |
3029 PORT_AUTO_NEG_10BT_FD);
3030 else if (2 == port->duplex)
3031 data &= ~(PORT_AUTO_NEG_100BTX |
3032 PORT_AUTO_NEG_10BT);
3033 }
3034 if (data != cfg) {
3035 data |= PORT_AUTO_NEG_RESTART;
3036 port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data);
3037 }
3038 }
3039 }
3040
3041 /**
3042 * port_force_link_speed - force port speed
3043 * @port: The port instance.
3044 *
3045 * This routine forces the link speed of the switch ports.
3046 */
port_force_link_speed(struct ksz_port * port)3047 static void port_force_link_speed(struct ksz_port *port)
3048 {
3049 struct ksz_hw *hw = port->hw;
3050 u16 data;
3051 int i;
3052 int phy;
3053 int p;
3054
3055 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
3056 phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
3057 hw_r_phy_ctrl(hw, phy, &data);
3058
3059 data &= ~BMCR_ANENABLE;
3060
3061 if (10 == port->speed)
3062 data &= ~BMCR_SPEED100;
3063 else if (100 == port->speed)
3064 data |= BMCR_SPEED100;
3065 if (1 == port->duplex)
3066 data &= ~BMCR_FULLDPLX;
3067 else if (2 == port->duplex)
3068 data |= BMCR_FULLDPLX;
3069 hw_w_phy_ctrl(hw, phy, data);
3070 }
3071 }
3072
port_set_power_saving(struct ksz_port * port,int enable)3073 static void port_set_power_saving(struct ksz_port *port, int enable)
3074 {
3075 struct ksz_hw *hw = port->hw;
3076 int i;
3077 int p;
3078
3079 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++)
3080 port_cfg(hw, p,
3081 KS884X_PORT_CTRL_4_OFFSET, PORT_POWER_DOWN, enable);
3082 }
3083
3084 /*
3085 * KSZ8841 power management functions
3086 */
3087
3088 /**
3089 * hw_chk_wol_pme_status - check PMEN pin
3090 * @hw: The hardware instance.
3091 *
3092 * This function is used to check PMEN pin is asserted.
3093 *
3094 * Return 1 if PMEN pin is asserted; otherwise, 0.
3095 */
hw_chk_wol_pme_status(struct ksz_hw * hw)3096 static int hw_chk_wol_pme_status(struct ksz_hw *hw)
3097 {
3098 struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
3099 struct pci_dev *pdev = hw_priv->pdev;
3100 u16 data;
3101
3102 if (!pdev->pm_cap)
3103 return 0;
3104 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
3105 return (data & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS;
3106 }
3107
3108 /**
3109 * hw_clr_wol_pme_status - clear PMEN pin
3110 * @hw: The hardware instance.
3111 *
3112 * This routine is used to clear PME_Status to deassert PMEN pin.
3113 */
hw_clr_wol_pme_status(struct ksz_hw * hw)3114 static void hw_clr_wol_pme_status(struct ksz_hw *hw)
3115 {
3116 struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
3117 struct pci_dev *pdev = hw_priv->pdev;
3118 u16 data;
3119
3120 if (!pdev->pm_cap)
3121 return;
3122
3123 /* Clear PME_Status to deassert PMEN pin. */
3124 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
3125 data |= PCI_PM_CTRL_PME_STATUS;
3126 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
3127 }
3128
3129 /**
3130 * hw_cfg_wol_pme - enable or disable Wake-on-LAN
3131 * @hw: The hardware instance.
3132 * @set: The flag indicating whether to enable or disable.
3133 *
3134 * This routine is used to enable or disable Wake-on-LAN.
3135 */
hw_cfg_wol_pme(struct ksz_hw * hw,int set)3136 static void hw_cfg_wol_pme(struct ksz_hw *hw, int set)
3137 {
3138 struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
3139 struct pci_dev *pdev = hw_priv->pdev;
3140 u16 data;
3141
3142 if (!pdev->pm_cap)
3143 return;
3144 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
3145 data &= ~PCI_PM_CTRL_STATE_MASK;
3146 if (set)
3147 data |= PCI_PM_CTRL_PME_ENABLE | PCI_D3hot;
3148 else
3149 data &= ~PCI_PM_CTRL_PME_ENABLE;
3150 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
3151 }
3152
3153 /**
3154 * hw_cfg_wol - configure Wake-on-LAN features
3155 * @hw: The hardware instance.
3156 * @frame: The pattern frame bit.
3157 * @set: The flag indicating whether to enable or disable.
3158 *
3159 * This routine is used to enable or disable certain Wake-on-LAN features.
3160 */
hw_cfg_wol(struct ksz_hw * hw,u16 frame,int set)3161 static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
3162 {
3163 u16 data;
3164
3165 data = readw(hw->io + KS8841_WOL_CTRL_OFFSET);
3166 if (set)
3167 data |= frame;
3168 else
3169 data &= ~frame;
3170 writew(data, hw->io + KS8841_WOL_CTRL_OFFSET);
3171 }
3172
3173 /**
3174 * hw_set_wol_frame - program Wake-on-LAN pattern
3175 * @hw: The hardware instance.
3176 * @i: The frame index.
3177 * @mask_size: The size of the mask.
3178 * @mask: Mask to ignore certain bytes in the pattern.
3179 * @frame_size: The size of the frame.
3180 * @pattern: The frame data.
3181 *
3182 * This routine is used to program Wake-on-LAN pattern.
3183 */
hw_set_wol_frame(struct ksz_hw * hw,int i,uint mask_size,const u8 * mask,uint frame_size,const u8 * pattern)3184 static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
3185 const u8 *mask, uint frame_size, const u8 *pattern)
3186 {
3187 int bits;
3188 int from;
3189 int len;
3190 int to;
3191 u32 crc;
3192 u8 data[64];
3193 u8 val = 0;
3194
3195 if (frame_size > mask_size * 8)
3196 frame_size = mask_size * 8;
3197 if (frame_size > 64)
3198 frame_size = 64;
3199
3200 i *= 0x10;
3201 writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i);
3202 writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i);
3203
3204 bits = len = from = to = 0;
3205 do {
3206 if (bits) {
3207 if ((val & 1))
3208 data[to++] = pattern[from];
3209 val >>= 1;
3210 ++from;
3211 --bits;
3212 } else {
3213 val = mask[len];
3214 writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i
3215 + len);
3216 ++len;
3217 if (val)
3218 bits = 8;
3219 else
3220 from += 8;
3221 }
3222 } while (from < (int) frame_size);
3223 if (val) {
3224 bits = mask[len - 1];
3225 val <<= (from % 8);
3226 bits &= ~val;
3227 writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len -
3228 1);
3229 }
3230 crc = ether_crc(to, data);
3231 writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i);
3232 }
3233
3234 /**
3235 * hw_add_wol_arp - add ARP pattern
3236 * @hw: The hardware instance.
3237 * @ip_addr: The IPv4 address assigned to the device.
3238 *
3239 * This routine is used to add ARP pattern for waking up the host.
3240 */
hw_add_wol_arp(struct ksz_hw * hw,const u8 * ip_addr)3241 static void hw_add_wol_arp(struct ksz_hw *hw, const u8 *ip_addr)
3242 {
3243 static const u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
3244 u8 pattern[42] = {
3245 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
3246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3247 0x08, 0x06,
3248 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
3249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3250 0x00, 0x00, 0x00, 0x00,
3251 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3252 0x00, 0x00, 0x00, 0x00 };
3253
3254 memcpy(&pattern[38], ip_addr, 4);
3255 hw_set_wol_frame(hw, 3, 6, mask, 42, pattern);
3256 }
3257
3258 /**
3259 * hw_add_wol_bcast - add broadcast pattern
3260 * @hw: The hardware instance.
3261 *
3262 * This routine is used to add broadcast pattern for waking up the host.
3263 */
hw_add_wol_bcast(struct ksz_hw * hw)3264 static void hw_add_wol_bcast(struct ksz_hw *hw)
3265 {
3266 static const u8 mask[] = { 0x3F };
3267 static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3268
3269 hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern);
3270 }
3271
3272 /**
3273 * hw_add_wol_mcast - add multicast pattern
3274 * @hw: The hardware instance.
3275 *
3276 * This routine is used to add multicast pattern for waking up the host.
3277 *
3278 * It is assumed the multicast packet is the ICMPv6 neighbor solicitation used
3279 * by IPv6 ping command. Note that multicast packets are filtred through the
3280 * multicast hash table, so not all multicast packets can wake up the host.
3281 */
hw_add_wol_mcast(struct ksz_hw * hw)3282 static void hw_add_wol_mcast(struct ksz_hw *hw)
3283 {
3284 static const u8 mask[] = { 0x3F };
3285 u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
3286
3287 memcpy(&pattern[3], &hw->override_addr[3], 3);
3288 hw_set_wol_frame(hw, 1, 1, mask, 6, pattern);
3289 }
3290
3291 /**
3292 * hw_add_wol_ucast - add unicast pattern
3293 * @hw: The hardware instance.
3294 *
3295 * This routine is used to add unicast pattern to wakeup the host.
3296 *
3297 * It is assumed the unicast packet is directed to the device, as the hardware
3298 * can only receive them in normal case.
3299 */
hw_add_wol_ucast(struct ksz_hw * hw)3300 static void hw_add_wol_ucast(struct ksz_hw *hw)
3301 {
3302 static const u8 mask[] = { 0x3F };
3303
3304 hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr);
3305 }
3306
3307 /**
3308 * hw_enable_wol - enable Wake-on-LAN
3309 * @hw: The hardware instance.
3310 * @wol_enable: The Wake-on-LAN settings.
3311 * @net_addr: The IPv4 address assigned to the device.
3312 *
3313 * This routine is used to enable Wake-on-LAN depending on driver settings.
3314 */
hw_enable_wol(struct ksz_hw * hw,u32 wol_enable,const u8 * net_addr)3315 static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, const u8 *net_addr)
3316 {
3317 hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
3318 hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
3319 hw_add_wol_ucast(hw);
3320 hw_cfg_wol(hw, KS8841_WOL_FRAME1_ENABLE, (wol_enable & WAKE_MCAST));
3321 hw_add_wol_mcast(hw);
3322 hw_cfg_wol(hw, KS8841_WOL_FRAME2_ENABLE, (wol_enable & WAKE_BCAST));
3323 hw_cfg_wol(hw, KS8841_WOL_FRAME3_ENABLE, (wol_enable & WAKE_ARP));
3324 hw_add_wol_arp(hw, net_addr);
3325 }
3326
3327 /**
3328 * hw_init - check driver is correct for the hardware
3329 * @hw: The hardware instance.
3330 *
3331 * This function checks the hardware is correct for this driver and sets the
3332 * hardware up for proper initialization.
3333 *
3334 * Return number of ports or 0 if not right.
3335 */
hw_init(struct ksz_hw * hw)3336 static int hw_init(struct ksz_hw *hw)
3337 {
3338 int rc = 0;
3339 u16 data;
3340 u16 revision;
3341
3342 /* Set bus speed to 125MHz. */
3343 writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET);
3344
3345 /* Check KSZ884x chip ID. */
3346 data = readw(hw->io + KS884X_CHIP_ID_OFFSET);
3347
3348 revision = (data & KS884X_REVISION_MASK) >> KS884X_REVISION_SHIFT;
3349 data &= KS884X_CHIP_ID_MASK_41;
3350 if (REG_CHIP_ID_41 == data)
3351 rc = 1;
3352 else if (REG_CHIP_ID_42 == data)
3353 rc = 2;
3354 else
3355 return 0;
3356
3357 /* Setup hardware features or bug workarounds. */
3358 if (revision <= 1) {
3359 hw->features |= SMALL_PACKET_TX_BUG;
3360 if (1 == rc)
3361 hw->features |= HALF_DUPLEX_SIGNAL_BUG;
3362 }
3363 return rc;
3364 }
3365
3366 /**
3367 * hw_reset - reset the hardware
3368 * @hw: The hardware instance.
3369 *
3370 * This routine resets the hardware.
3371 */
hw_reset(struct ksz_hw * hw)3372 static void hw_reset(struct ksz_hw *hw)
3373 {
3374 writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
3375
3376 /* Wait for device to reset. */
3377 mdelay(10);
3378
3379 /* Write 0 to clear device reset. */
3380 writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
3381 }
3382
3383 /**
3384 * hw_setup - setup the hardware
3385 * @hw: The hardware instance.
3386 *
3387 * This routine setup the hardware for proper operation.
3388 */
hw_setup(struct ksz_hw * hw)3389 static void hw_setup(struct ksz_hw *hw)
3390 {
3391 #if SET_DEFAULT_LED
3392 u16 data;
3393
3394 /* Change default LED mode. */
3395 data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
3396 data &= ~LED_MODE;
3397 data |= SET_DEFAULT_LED;
3398 writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
3399 #endif
3400
3401 /* Setup transmit control. */
3402 hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE |
3403 (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_TX_ENABLE);
3404
3405 /* Setup receive control. */
3406 hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST |
3407 (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_RX_ENABLE);
3408 hw->rx_cfg |= KS884X_DMA_RX_MULTICAST;
3409
3410 /* Hardware cannot handle UDP packet in IP fragments. */
3411 hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
3412
3413 if (hw->all_multi)
3414 hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
3415 if (hw->promiscuous)
3416 hw->rx_cfg |= DMA_RX_PROMISCUOUS;
3417 }
3418
3419 /**
3420 * hw_setup_intr - setup interrupt mask
3421 * @hw: The hardware instance.
3422 *
3423 * This routine setup the interrupt mask for proper operation.
3424 */
hw_setup_intr(struct ksz_hw * hw)3425 static void hw_setup_intr(struct ksz_hw *hw)
3426 {
3427 hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN;
3428 }
3429
ksz_check_desc_num(struct ksz_desc_info * info)3430 static void ksz_check_desc_num(struct ksz_desc_info *info)
3431 {
3432 #define MIN_DESC_SHIFT 2
3433
3434 int alloc = info->alloc;
3435 int shift;
3436
3437 shift = 0;
3438 while (!(alloc & 1)) {
3439 shift++;
3440 alloc >>= 1;
3441 }
3442 if (alloc != 1 || shift < MIN_DESC_SHIFT) {
3443 pr_alert("Hardware descriptor numbers not right!\n");
3444 while (alloc) {
3445 shift++;
3446 alloc >>= 1;
3447 }
3448 if (shift < MIN_DESC_SHIFT)
3449 shift = MIN_DESC_SHIFT;
3450 alloc = 1 << shift;
3451 info->alloc = alloc;
3452 }
3453 info->mask = info->alloc - 1;
3454 }
3455
hw_init_desc(struct ksz_desc_info * desc_info,int transmit)3456 static void hw_init_desc(struct ksz_desc_info *desc_info, int transmit)
3457 {
3458 int i;
3459 u32 phys = desc_info->ring_phys;
3460 struct ksz_hw_desc *desc = desc_info->ring_virt;
3461 struct ksz_desc *cur = desc_info->ring;
3462 struct ksz_desc *previous = NULL;
3463
3464 for (i = 0; i < desc_info->alloc; i++) {
3465 cur->phw = desc++;
3466 phys += desc_info->size;
3467 previous = cur++;
3468 previous->phw->next = cpu_to_le32(phys);
3469 }
3470 previous->phw->next = cpu_to_le32(desc_info->ring_phys);
3471 previous->sw.buf.rx.end_of_ring = 1;
3472 previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data);
3473
3474 desc_info->avail = desc_info->alloc;
3475 desc_info->last = desc_info->next = 0;
3476
3477 desc_info->cur = desc_info->ring;
3478 }
3479
3480 /**
3481 * hw_set_desc_base - set descriptor base addresses
3482 * @hw: The hardware instance.
3483 * @tx_addr: The transmit descriptor base.
3484 * @rx_addr: The receive descriptor base.
3485 *
3486 * This routine programs the descriptor base addresses after reset.
3487 */
hw_set_desc_base(struct ksz_hw * hw,u32 tx_addr,u32 rx_addr)3488 static void hw_set_desc_base(struct ksz_hw *hw, u32 tx_addr, u32 rx_addr)
3489 {
3490 /* Set base address of Tx/Rx descriptors. */
3491 writel(tx_addr, hw->io + KS_DMA_TX_ADDR);
3492 writel(rx_addr, hw->io + KS_DMA_RX_ADDR);
3493 }
3494
hw_reset_pkts(struct ksz_desc_info * info)3495 static void hw_reset_pkts(struct ksz_desc_info *info)
3496 {
3497 info->cur = info->ring;
3498 info->avail = info->alloc;
3499 info->last = info->next = 0;
3500 }
3501
hw_resume_rx(struct ksz_hw * hw)3502 static inline void hw_resume_rx(struct ksz_hw *hw)
3503 {
3504 writel(DMA_START, hw->io + KS_DMA_RX_START);
3505 }
3506
3507 /**
3508 * hw_start_rx - start receiving
3509 * @hw: The hardware instance.
3510 *
3511 * This routine starts the receive function of the hardware.
3512 */
hw_start_rx(struct ksz_hw * hw)3513 static void hw_start_rx(struct ksz_hw *hw)
3514 {
3515 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
3516
3517 /* Notify when the receive stops. */
3518 hw->intr_mask |= KS884X_INT_RX_STOPPED;
3519
3520 writel(DMA_START, hw->io + KS_DMA_RX_START);
3521 hw_ack_intr(hw, KS884X_INT_RX_STOPPED);
3522 hw->rx_stop++;
3523
3524 /* Variable overflows. */
3525 if (0 == hw->rx_stop)
3526 hw->rx_stop = 2;
3527 }
3528
3529 /**
3530 * hw_stop_rx - stop receiving
3531 * @hw: The hardware instance.
3532 *
3533 * This routine stops the receive function of the hardware.
3534 */
hw_stop_rx(struct ksz_hw * hw)3535 static void hw_stop_rx(struct ksz_hw *hw)
3536 {
3537 hw->rx_stop = 0;
3538 hw_turn_off_intr(hw, KS884X_INT_RX_STOPPED);
3539 writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL);
3540 }
3541
3542 /**
3543 * hw_start_tx - start transmitting
3544 * @hw: The hardware instance.
3545 *
3546 * This routine starts the transmit function of the hardware.
3547 */
hw_start_tx(struct ksz_hw * hw)3548 static void hw_start_tx(struct ksz_hw *hw)
3549 {
3550 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
3551 }
3552
3553 /**
3554 * hw_stop_tx - stop transmitting
3555 * @hw: The hardware instance.
3556 *
3557 * This routine stops the transmit function of the hardware.
3558 */
hw_stop_tx(struct ksz_hw * hw)3559 static void hw_stop_tx(struct ksz_hw *hw)
3560 {
3561 writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL);
3562 }
3563
3564 /**
3565 * hw_disable - disable hardware
3566 * @hw: The hardware instance.
3567 *
3568 * This routine disables the hardware.
3569 */
hw_disable(struct ksz_hw * hw)3570 static void hw_disable(struct ksz_hw *hw)
3571 {
3572 hw_stop_rx(hw);
3573 hw_stop_tx(hw);
3574 hw->enabled = 0;
3575 }
3576
3577 /**
3578 * hw_enable - enable hardware
3579 * @hw: The hardware instance.
3580 *
3581 * This routine enables the hardware.
3582 */
hw_enable(struct ksz_hw * hw)3583 static void hw_enable(struct ksz_hw *hw)
3584 {
3585 hw_start_tx(hw);
3586 hw_start_rx(hw);
3587 hw->enabled = 1;
3588 }
3589
3590 /**
3591 * hw_alloc_pkt - allocate enough descriptors for transmission
3592 * @hw: The hardware instance.
3593 * @length: The length of the packet.
3594 * @physical: Number of descriptors required.
3595 *
3596 * This function allocates descriptors for transmission.
3597 *
3598 * Return 0 if not successful; 1 for buffer copy; or number of descriptors.
3599 */
hw_alloc_pkt(struct ksz_hw * hw,int length,int physical)3600 static int hw_alloc_pkt(struct ksz_hw *hw, int length, int physical)
3601 {
3602 /* Always leave one descriptor free. */
3603 if (hw->tx_desc_info.avail <= 1)
3604 return 0;
3605
3606 /* Allocate a descriptor for transmission and mark it current. */
3607 get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur);
3608 hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1;
3609
3610 /* Keep track of number of transmit descriptors used so far. */
3611 ++hw->tx_int_cnt;
3612 hw->tx_size += length;
3613
3614 /* Cannot hold on too much data. */
3615 if (hw->tx_size >= MAX_TX_HELD_SIZE)
3616 hw->tx_int_cnt = hw->tx_int_mask + 1;
3617
3618 if (physical > hw->tx_desc_info.avail)
3619 return 1;
3620
3621 return hw->tx_desc_info.avail;
3622 }
3623
3624 /**
3625 * hw_send_pkt - mark packet for transmission
3626 * @hw: The hardware instance.
3627 *
3628 * This routine marks the packet for transmission in PCI version.
3629 */
hw_send_pkt(struct ksz_hw * hw)3630 static void hw_send_pkt(struct ksz_hw *hw)
3631 {
3632 struct ksz_desc *cur = hw->tx_desc_info.cur;
3633
3634 cur->sw.buf.tx.last_seg = 1;
3635
3636 /* Interrupt only after specified number of descriptors used. */
3637 if (hw->tx_int_cnt > hw->tx_int_mask) {
3638 cur->sw.buf.tx.intr = 1;
3639 hw->tx_int_cnt = 0;
3640 hw->tx_size = 0;
3641 }
3642
3643 /* KSZ8842 supports port directed transmission. */
3644 cur->sw.buf.tx.dest_port = hw->dst_ports;
3645
3646 release_desc(cur);
3647
3648 writel(0, hw->io + KS_DMA_TX_START);
3649 }
3650
empty_addr(u8 * addr)3651 static int empty_addr(u8 *addr)
3652 {
3653 u32 *addr1 = (u32 *) addr;
3654 u16 *addr2 = (u16 *) &addr[4];
3655
3656 return 0 == *addr1 && 0 == *addr2;
3657 }
3658
3659 /**
3660 * hw_set_addr - set MAC address
3661 * @hw: The hardware instance.
3662 *
3663 * This routine programs the MAC address of the hardware when the address is
3664 * overridden.
3665 */
hw_set_addr(struct ksz_hw * hw)3666 static void hw_set_addr(struct ksz_hw *hw)
3667 {
3668 int i;
3669
3670 for (i = 0; i < ETH_ALEN; i++)
3671 writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
3672 hw->io + KS884X_ADDR_0_OFFSET + i);
3673
3674 sw_set_addr(hw, hw->override_addr);
3675 }
3676
3677 /**
3678 * hw_read_addr - read MAC address
3679 * @hw: The hardware instance.
3680 *
3681 * This routine retrieves the MAC address of the hardware.
3682 */
hw_read_addr(struct ksz_hw * hw)3683 static void hw_read_addr(struct ksz_hw *hw)
3684 {
3685 int i;
3686
3687 for (i = 0; i < ETH_ALEN; i++)
3688 hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
3689 KS884X_ADDR_0_OFFSET + i);
3690
3691 if (!hw->mac_override) {
3692 memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN);
3693 if (empty_addr(hw->override_addr)) {
3694 memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN);
3695 memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
3696 ETH_ALEN);
3697 hw->override_addr[5] += hw->id;
3698 hw_set_addr(hw);
3699 }
3700 }
3701 }
3702
hw_ena_add_addr(struct ksz_hw * hw,int index,u8 * mac_addr)3703 static void hw_ena_add_addr(struct ksz_hw *hw, int index, u8 *mac_addr)
3704 {
3705 int i;
3706 u32 mac_addr_lo;
3707 u32 mac_addr_hi;
3708
3709 mac_addr_hi = 0;
3710 for (i = 0; i < 2; i++) {
3711 mac_addr_hi <<= 8;
3712 mac_addr_hi |= mac_addr[i];
3713 }
3714 mac_addr_hi |= ADD_ADDR_ENABLE;
3715 mac_addr_lo = 0;
3716 for (i = 2; i < 6; i++) {
3717 mac_addr_lo <<= 8;
3718 mac_addr_lo |= mac_addr[i];
3719 }
3720 index *= ADD_ADDR_INCR;
3721
3722 writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO);
3723 writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI);
3724 }
3725
hw_set_add_addr(struct ksz_hw * hw)3726 static void hw_set_add_addr(struct ksz_hw *hw)
3727 {
3728 int i;
3729
3730 for (i = 0; i < ADDITIONAL_ENTRIES; i++) {
3731 if (empty_addr(hw->address[i]))
3732 writel(0, hw->io + ADD_ADDR_INCR * i +
3733 KS_ADD_ADDR_0_HI);
3734 else
3735 hw_ena_add_addr(hw, i, hw->address[i]);
3736 }
3737 }
3738
hw_add_addr(struct ksz_hw * hw,const u8 * mac_addr)3739 static int hw_add_addr(struct ksz_hw *hw, const u8 *mac_addr)
3740 {
3741 int i;
3742 int j = ADDITIONAL_ENTRIES;
3743
3744 if (ether_addr_equal(hw->override_addr, mac_addr))
3745 return 0;
3746 for (i = 0; i < hw->addr_list_size; i++) {
3747 if (ether_addr_equal(hw->address[i], mac_addr))
3748 return 0;
3749 if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
3750 j = i;
3751 }
3752 if (j < ADDITIONAL_ENTRIES) {
3753 memcpy(hw->address[j], mac_addr, ETH_ALEN);
3754 hw_ena_add_addr(hw, j, hw->address[j]);
3755 return 0;
3756 }
3757 return -1;
3758 }
3759
hw_del_addr(struct ksz_hw * hw,const u8 * mac_addr)3760 static int hw_del_addr(struct ksz_hw *hw, const u8 *mac_addr)
3761 {
3762 int i;
3763
3764 for (i = 0; i < hw->addr_list_size; i++) {
3765 if (ether_addr_equal(hw->address[i], mac_addr)) {
3766 eth_zero_addr(hw->address[i]);
3767 writel(0, hw->io + ADD_ADDR_INCR * i +
3768 KS_ADD_ADDR_0_HI);
3769 return 0;
3770 }
3771 }
3772 return -1;
3773 }
3774
3775 /**
3776 * hw_clr_multicast - clear multicast addresses
3777 * @hw: The hardware instance.
3778 *
3779 * This routine removes all multicast addresses set in the hardware.
3780 */
hw_clr_multicast(struct ksz_hw * hw)3781 static void hw_clr_multicast(struct ksz_hw *hw)
3782 {
3783 int i;
3784
3785 for (i = 0; i < HW_MULTICAST_SIZE; i++) {
3786 hw->multi_bits[i] = 0;
3787
3788 writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i);
3789 }
3790 }
3791
3792 /**
3793 * hw_set_grp_addr - set multicast addresses
3794 * @hw: The hardware instance.
3795 *
3796 * This routine programs multicast addresses for the hardware to accept those
3797 * addresses.
3798 */
hw_set_grp_addr(struct ksz_hw * hw)3799 static void hw_set_grp_addr(struct ksz_hw *hw)
3800 {
3801 int i;
3802 int index;
3803 int position;
3804 int value;
3805
3806 memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE);
3807
3808 for (i = 0; i < hw->multi_list_size; i++) {
3809 position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f;
3810 index = position >> 3;
3811 value = 1 << (position & 7);
3812 hw->multi_bits[index] |= (u8) value;
3813 }
3814
3815 for (i = 0; i < HW_MULTICAST_SIZE; i++)
3816 writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET +
3817 i);
3818 }
3819
3820 /**
3821 * hw_set_multicast - enable or disable all multicast receiving
3822 * @hw: The hardware instance.
3823 * @multicast: To turn on or off the all multicast feature.
3824 *
3825 * This routine enables/disables the hardware to accept all multicast packets.
3826 */
hw_set_multicast(struct ksz_hw * hw,u8 multicast)3827 static void hw_set_multicast(struct ksz_hw *hw, u8 multicast)
3828 {
3829 /* Stop receiving for reconfiguration. */
3830 hw_stop_rx(hw);
3831
3832 if (multicast)
3833 hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
3834 else
3835 hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST;
3836
3837 if (hw->enabled)
3838 hw_start_rx(hw);
3839 }
3840
3841 /**
3842 * hw_set_promiscuous - enable or disable promiscuous receiving
3843 * @hw: The hardware instance.
3844 * @prom: To turn on or off the promiscuous feature.
3845 *
3846 * This routine enables/disables the hardware to accept all packets.
3847 */
hw_set_promiscuous(struct ksz_hw * hw,u8 prom)3848 static void hw_set_promiscuous(struct ksz_hw *hw, u8 prom)
3849 {
3850 /* Stop receiving for reconfiguration. */
3851 hw_stop_rx(hw);
3852
3853 if (prom)
3854 hw->rx_cfg |= DMA_RX_PROMISCUOUS;
3855 else
3856 hw->rx_cfg &= ~DMA_RX_PROMISCUOUS;
3857
3858 if (hw->enabled)
3859 hw_start_rx(hw);
3860 }
3861
3862 /**
3863 * sw_enable - enable the switch
3864 * @hw: The hardware instance.
3865 * @enable: The flag to enable or disable the switch
3866 *
3867 * This routine is used to enable/disable the switch in KSZ8842.
3868 */
sw_enable(struct ksz_hw * hw,int enable)3869 static void sw_enable(struct ksz_hw *hw, int enable)
3870 {
3871 int port;
3872
3873 for (port = 0; port < SWITCH_PORT_NUM; port++) {
3874 if (hw->dev_count > 1) {
3875 /* Set port-base vlan membership with host port. */
3876 sw_cfg_port_base_vlan(hw, port,
3877 HOST_MASK | (1 << port));
3878 port_set_stp_state(hw, port, STP_STATE_DISABLED);
3879 } else {
3880 sw_cfg_port_base_vlan(hw, port, PORT_MASK);
3881 port_set_stp_state(hw, port, STP_STATE_FORWARDING);
3882 }
3883 }
3884 if (hw->dev_count > 1)
3885 port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
3886 else
3887 port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_FORWARDING);
3888
3889 if (enable)
3890 enable = KS8842_START;
3891 writew(enable, hw->io + KS884X_CHIP_ID_OFFSET);
3892 }
3893
3894 /**
3895 * sw_setup - setup the switch
3896 * @hw: The hardware instance.
3897 *
3898 * This routine setup the hardware switch engine for default operation.
3899 */
sw_setup(struct ksz_hw * hw)3900 static void sw_setup(struct ksz_hw *hw)
3901 {
3902 int port;
3903
3904 sw_set_global_ctrl(hw);
3905
3906 /* Enable switch broadcast storm protection at 10% percent rate. */
3907 sw_init_broad_storm(hw);
3908 hw_cfg_broad_storm(hw, BROADCAST_STORM_PROTECTION_RATE);
3909 for (port = 0; port < SWITCH_PORT_NUM; port++)
3910 sw_ena_broad_storm(hw, port);
3911
3912 sw_init_prio(hw);
3913
3914 sw_init_mirror(hw);
3915
3916 sw_init_prio_rate(hw);
3917
3918 sw_init_vlan(hw);
3919
3920 if (hw->features & STP_SUPPORT)
3921 sw_init_stp(hw);
3922 if (!sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
3923 SWITCH_TX_FLOW_CTRL | SWITCH_RX_FLOW_CTRL))
3924 hw->overrides |= PAUSE_FLOW_CTRL;
3925 sw_enable(hw, 1);
3926 }
3927
3928 /**
3929 * ksz_start_timer - start kernel timer
3930 * @info: Kernel timer information.
3931 * @time: The time tick.
3932 *
3933 * This routine starts the kernel timer after the specified time tick.
3934 */
ksz_start_timer(struct ksz_timer_info * info,int time)3935 static void ksz_start_timer(struct ksz_timer_info *info, int time)
3936 {
3937 info->cnt = 0;
3938 info->timer.expires = jiffies + time;
3939 add_timer(&info->timer);
3940
3941 /* infinity */
3942 info->max = -1;
3943 }
3944
3945 /**
3946 * ksz_stop_timer - stop kernel timer
3947 * @info: Kernel timer information.
3948 *
3949 * This routine stops the kernel timer.
3950 */
ksz_stop_timer(struct ksz_timer_info * info)3951 static void ksz_stop_timer(struct ksz_timer_info *info)
3952 {
3953 if (info->max) {
3954 info->max = 0;
3955 timer_delete_sync(&info->timer);
3956 }
3957 }
3958
ksz_init_timer(struct ksz_timer_info * info,int period,void (* function)(struct timer_list *))3959 static void ksz_init_timer(struct ksz_timer_info *info, int period,
3960 void (*function)(struct timer_list *))
3961 {
3962 info->max = 0;
3963 info->period = period;
3964 timer_setup(&info->timer, function, 0);
3965 }
3966
ksz_update_timer(struct ksz_timer_info * info)3967 static void ksz_update_timer(struct ksz_timer_info *info)
3968 {
3969 ++info->cnt;
3970 if (info->max > 0) {
3971 if (info->cnt < info->max) {
3972 info->timer.expires = jiffies + info->period;
3973 add_timer(&info->timer);
3974 } else
3975 info->max = 0;
3976 } else if (info->max < 0) {
3977 info->timer.expires = jiffies + info->period;
3978 add_timer(&info->timer);
3979 }
3980 }
3981
3982 /**
3983 * ksz_alloc_soft_desc - allocate software descriptors
3984 * @desc_info: Descriptor information structure.
3985 * @transmit: Indication that descriptors are for transmit.
3986 *
3987 * This local function allocates software descriptors for manipulation in
3988 * memory.
3989 *
3990 * Return 0 if successful.
3991 */
ksz_alloc_soft_desc(struct ksz_desc_info * desc_info,int transmit)3992 static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
3993 {
3994 desc_info->ring = kzalloc_objs(struct ksz_desc, desc_info->alloc);
3995 if (!desc_info->ring)
3996 return 1;
3997 hw_init_desc(desc_info, transmit);
3998 return 0;
3999 }
4000
4001 /**
4002 * ksz_alloc_desc - allocate hardware descriptors
4003 * @adapter: Adapter information structure.
4004 *
4005 * This local function allocates hardware descriptors for receiving and
4006 * transmitting.
4007 *
4008 * Return 0 if successful.
4009 */
ksz_alloc_desc(struct dev_info * adapter)4010 static int ksz_alloc_desc(struct dev_info *adapter)
4011 {
4012 struct ksz_hw *hw = &adapter->hw;
4013 int offset;
4014
4015 /* Allocate memory for RX & TX descriptors. */
4016 adapter->desc_pool.alloc_size =
4017 hw->rx_desc_info.size * hw->rx_desc_info.alloc +
4018 hw->tx_desc_info.size * hw->tx_desc_info.alloc +
4019 DESC_ALIGNMENT;
4020
4021 adapter->desc_pool.alloc_virt =
4022 dma_alloc_coherent(&adapter->pdev->dev,
4023 adapter->desc_pool.alloc_size,
4024 &adapter->desc_pool.dma_addr, GFP_KERNEL);
4025 if (adapter->desc_pool.alloc_virt == NULL) {
4026 adapter->desc_pool.alloc_size = 0;
4027 return 1;
4028 }
4029
4030 /* Align to the next cache line boundary. */
4031 offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ?
4032 (DESC_ALIGNMENT -
4033 ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0);
4034 adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset;
4035 adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset;
4036
4037 /* Allocate receive/transmit descriptors. */
4038 hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *)
4039 adapter->desc_pool.virt;
4040 hw->rx_desc_info.ring_phys = adapter->desc_pool.phys;
4041 offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size;
4042 hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *)
4043 (adapter->desc_pool.virt + offset);
4044 hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset;
4045
4046 if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0))
4047 return 1;
4048 if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1))
4049 return 1;
4050
4051 return 0;
4052 }
4053
4054 /**
4055 * free_dma_buf - release DMA buffer resources
4056 * @adapter: Adapter information structure.
4057 * @dma_buf: pointer to buf
4058 * @direction: to or from device
4059 *
4060 * This routine is just a helper function to release the DMA buffer resources.
4061 */
free_dma_buf(struct dev_info * adapter,struct ksz_dma_buf * dma_buf,int direction)4062 static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf,
4063 int direction)
4064 {
4065 dma_unmap_single(&adapter->pdev->dev, dma_buf->dma, dma_buf->len,
4066 direction);
4067 dev_kfree_skb(dma_buf->skb);
4068 dma_buf->skb = NULL;
4069 dma_buf->dma = 0;
4070 }
4071
4072 /**
4073 * ksz_init_rx_buffers - initialize receive descriptors
4074 * @adapter: Adapter information structure.
4075 *
4076 * This routine initializes DMA buffers for receiving.
4077 */
ksz_init_rx_buffers(struct dev_info * adapter)4078 static void ksz_init_rx_buffers(struct dev_info *adapter)
4079 {
4080 int i;
4081 struct ksz_desc *desc;
4082 struct ksz_dma_buf *dma_buf;
4083 struct ksz_hw *hw = &adapter->hw;
4084 struct ksz_desc_info *info = &hw->rx_desc_info;
4085
4086 for (i = 0; i < hw->rx_desc_info.alloc; i++) {
4087 get_rx_pkt(info, &desc);
4088
4089 dma_buf = DMA_BUFFER(desc);
4090 if (dma_buf->skb && dma_buf->len != adapter->mtu)
4091 free_dma_buf(adapter, dma_buf, DMA_FROM_DEVICE);
4092 dma_buf->len = adapter->mtu;
4093 if (!dma_buf->skb)
4094 dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
4095 if (dma_buf->skb && !dma_buf->dma)
4096 dma_buf->dma = dma_map_single(&adapter->pdev->dev,
4097 skb_tail_pointer(dma_buf->skb),
4098 dma_buf->len,
4099 DMA_FROM_DEVICE);
4100
4101 /* Set descriptor. */
4102 set_rx_buf(desc, dma_buf->dma);
4103 set_rx_len(desc, dma_buf->len);
4104 release_desc(desc);
4105 }
4106 }
4107
4108 /**
4109 * ksz_alloc_mem - allocate memory for hardware descriptors
4110 * @adapter: Adapter information structure.
4111 *
4112 * This function allocates memory for use by hardware descriptors for receiving
4113 * and transmitting.
4114 *
4115 * Return 0 if successful.
4116 */
ksz_alloc_mem(struct dev_info * adapter)4117 static int ksz_alloc_mem(struct dev_info *adapter)
4118 {
4119 struct ksz_hw *hw = &adapter->hw;
4120
4121 /* Determine the number of receive and transmit descriptors. */
4122 hw->rx_desc_info.alloc = NUM_OF_RX_DESC;
4123 hw->tx_desc_info.alloc = NUM_OF_TX_DESC;
4124
4125 /* Determine how many descriptors to skip transmit interrupt. */
4126 hw->tx_int_cnt = 0;
4127 hw->tx_int_mask = NUM_OF_TX_DESC / 4;
4128 if (hw->tx_int_mask > 8)
4129 hw->tx_int_mask = 8;
4130 while (hw->tx_int_mask) {
4131 hw->tx_int_cnt++;
4132 hw->tx_int_mask >>= 1;
4133 }
4134 if (hw->tx_int_cnt) {
4135 hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1;
4136 hw->tx_int_cnt = 0;
4137 }
4138
4139 /* Determine the descriptor size. */
4140 hw->rx_desc_info.size =
4141 (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
4142 DESC_ALIGNMENT) * DESC_ALIGNMENT);
4143 hw->tx_desc_info.size =
4144 (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
4145 DESC_ALIGNMENT) * DESC_ALIGNMENT);
4146 if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
4147 pr_alert("Hardware descriptor size not right!\n");
4148 ksz_check_desc_num(&hw->rx_desc_info);
4149 ksz_check_desc_num(&hw->tx_desc_info);
4150
4151 /* Allocate descriptors. */
4152 if (ksz_alloc_desc(adapter))
4153 return 1;
4154
4155 return 0;
4156 }
4157
4158 /**
4159 * ksz_free_desc - free software and hardware descriptors
4160 * @adapter: Adapter information structure.
4161 *
4162 * This local routine frees the software and hardware descriptors allocated by
4163 * ksz_alloc_desc().
4164 */
ksz_free_desc(struct dev_info * adapter)4165 static void ksz_free_desc(struct dev_info *adapter)
4166 {
4167 struct ksz_hw *hw = &adapter->hw;
4168
4169 /* Reset descriptor. */
4170 hw->rx_desc_info.ring_virt = NULL;
4171 hw->tx_desc_info.ring_virt = NULL;
4172 hw->rx_desc_info.ring_phys = 0;
4173 hw->tx_desc_info.ring_phys = 0;
4174
4175 /* Free memory. */
4176 if (adapter->desc_pool.alloc_virt)
4177 dma_free_coherent(&adapter->pdev->dev,
4178 adapter->desc_pool.alloc_size,
4179 adapter->desc_pool.alloc_virt,
4180 adapter->desc_pool.dma_addr);
4181
4182 /* Reset resource pool. */
4183 adapter->desc_pool.alloc_size = 0;
4184 adapter->desc_pool.alloc_virt = NULL;
4185
4186 kfree(hw->rx_desc_info.ring);
4187 hw->rx_desc_info.ring = NULL;
4188 kfree(hw->tx_desc_info.ring);
4189 hw->tx_desc_info.ring = NULL;
4190 }
4191
4192 /**
4193 * ksz_free_buffers - free buffers used in the descriptors
4194 * @adapter: Adapter information structure.
4195 * @desc_info: Descriptor information structure.
4196 * @direction: to or from device
4197 *
4198 * This local routine frees buffers used in the DMA buffers.
4199 */
ksz_free_buffers(struct dev_info * adapter,struct ksz_desc_info * desc_info,int direction)4200 static void ksz_free_buffers(struct dev_info *adapter,
4201 struct ksz_desc_info *desc_info, int direction)
4202 {
4203 int i;
4204 struct ksz_dma_buf *dma_buf;
4205 struct ksz_desc *desc = desc_info->ring;
4206
4207 for (i = 0; i < desc_info->alloc; i++) {
4208 dma_buf = DMA_BUFFER(desc);
4209 if (dma_buf->skb)
4210 free_dma_buf(adapter, dma_buf, direction);
4211 desc++;
4212 }
4213 }
4214
4215 /**
4216 * ksz_free_mem - free all resources used by descriptors
4217 * @adapter: Adapter information structure.
4218 *
4219 * This local routine frees all the resources allocated by ksz_alloc_mem().
4220 */
ksz_free_mem(struct dev_info * adapter)4221 static void ksz_free_mem(struct dev_info *adapter)
4222 {
4223 /* Free transmit buffers. */
4224 ksz_free_buffers(adapter, &adapter->hw.tx_desc_info, DMA_TO_DEVICE);
4225
4226 /* Free receive buffers. */
4227 ksz_free_buffers(adapter, &adapter->hw.rx_desc_info, DMA_FROM_DEVICE);
4228
4229 /* Free descriptors. */
4230 ksz_free_desc(adapter);
4231 }
4232
get_mib_counters(struct ksz_hw * hw,int first,int cnt,u64 * counter)4233 static void get_mib_counters(struct ksz_hw *hw, int first, int cnt,
4234 u64 *counter)
4235 {
4236 int i;
4237 int mib;
4238 int port;
4239 struct ksz_port_mib *port_mib;
4240
4241 memset(counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
4242 for (i = 0, port = first; i < cnt; i++, port++) {
4243 port_mib = &hw->port_mib[port];
4244 for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++)
4245 counter[mib] += port_mib->counter[mib];
4246 }
4247 }
4248
4249 /**
4250 * send_packet - send packet
4251 * @skb: Socket buffer.
4252 * @dev: Network device.
4253 *
4254 * This routine is used to send a packet out to the network.
4255 */
send_packet(struct sk_buff * skb,struct net_device * dev)4256 static void send_packet(struct sk_buff *skb, struct net_device *dev)
4257 {
4258 struct ksz_desc *desc;
4259 struct ksz_desc *first;
4260 struct dev_priv *priv = netdev_priv(dev);
4261 struct dev_info *hw_priv = priv->adapter;
4262 struct ksz_hw *hw = &hw_priv->hw;
4263 struct ksz_desc_info *info = &hw->tx_desc_info;
4264 struct ksz_dma_buf *dma_buf;
4265 int len;
4266 int last_frag = skb_shinfo(skb)->nr_frags;
4267
4268 /*
4269 * KSZ8842 with multiple device interfaces needs to be told which port
4270 * to send.
4271 */
4272 if (hw->dev_count > 1)
4273 hw->dst_ports = 1 << priv->port.first_port;
4274
4275 /* Hardware will pad the length to 60. */
4276 len = skb->len;
4277
4278 /* Remember the very first descriptor. */
4279 first = info->cur;
4280 desc = first;
4281
4282 dma_buf = DMA_BUFFER(desc);
4283 if (last_frag) {
4284 int frag;
4285 skb_frag_t *this_frag;
4286
4287 dma_buf->len = skb_headlen(skb);
4288
4289 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data,
4290 dma_buf->len, DMA_TO_DEVICE);
4291 set_tx_buf(desc, dma_buf->dma);
4292 set_tx_len(desc, dma_buf->len);
4293
4294 frag = 0;
4295 do {
4296 this_frag = &skb_shinfo(skb)->frags[frag];
4297
4298 /* Get a new descriptor. */
4299 get_tx_pkt(info, &desc);
4300
4301 /* Keep track of descriptors used so far. */
4302 ++hw->tx_int_cnt;
4303
4304 dma_buf = DMA_BUFFER(desc);
4305 dma_buf->len = skb_frag_size(this_frag);
4306
4307 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev,
4308 skb_frag_address(this_frag),
4309 dma_buf->len,
4310 DMA_TO_DEVICE);
4311 set_tx_buf(desc, dma_buf->dma);
4312 set_tx_len(desc, dma_buf->len);
4313
4314 frag++;
4315 if (frag == last_frag)
4316 break;
4317
4318 /* Do not release the last descriptor here. */
4319 release_desc(desc);
4320 } while (1);
4321
4322 /* current points to the last descriptor. */
4323 info->cur = desc;
4324
4325 /* Release the first descriptor. */
4326 release_desc(first);
4327 } else {
4328 dma_buf->len = len;
4329
4330 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data,
4331 dma_buf->len, DMA_TO_DEVICE);
4332 set_tx_buf(desc, dma_buf->dma);
4333 set_tx_len(desc, dma_buf->len);
4334 }
4335
4336 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4337 (desc)->sw.buf.tx.csum_gen_tcp = 1;
4338 (desc)->sw.buf.tx.csum_gen_udp = 1;
4339 }
4340
4341 /*
4342 * The last descriptor holds the packet so that it can be returned to
4343 * network subsystem after all descriptors are transmitted.
4344 */
4345 dma_buf->skb = skb;
4346
4347 hw_send_pkt(hw);
4348
4349 /* Update transmit statistics. */
4350 dev->stats.tx_packets++;
4351 dev->stats.tx_bytes += len;
4352 }
4353
4354 /**
4355 * transmit_cleanup - clean up transmit descriptors
4356 * @hw_priv: Network device.
4357 * @normal: break if owned
4358 *
4359 * This routine is called to clean up the transmitted buffers.
4360 */
transmit_cleanup(struct dev_info * hw_priv,int normal)4361 static void transmit_cleanup(struct dev_info *hw_priv, int normal)
4362 {
4363 int last;
4364 union desc_stat status;
4365 struct ksz_hw *hw = &hw_priv->hw;
4366 struct ksz_desc_info *info = &hw->tx_desc_info;
4367 struct ksz_desc *desc;
4368 struct ksz_dma_buf *dma_buf;
4369 struct net_device *dev = NULL;
4370
4371 spin_lock_irq(&hw_priv->hwlock);
4372 last = info->last;
4373
4374 while (info->avail < info->alloc) {
4375 /* Get next descriptor which is not hardware owned. */
4376 desc = &info->ring[last];
4377 status.data = le32_to_cpu(desc->phw->ctrl.data);
4378 if (status.tx.hw_owned) {
4379 if (normal)
4380 break;
4381 else
4382 reset_desc(desc, status);
4383 }
4384
4385 dma_buf = DMA_BUFFER(desc);
4386 dma_unmap_single(&hw_priv->pdev->dev, dma_buf->dma,
4387 dma_buf->len, DMA_TO_DEVICE);
4388
4389 /* This descriptor contains the last buffer in the packet. */
4390 if (dma_buf->skb) {
4391 dev = dma_buf->skb->dev;
4392
4393 /* Release the packet back to network subsystem. */
4394 dev_kfree_skb_irq(dma_buf->skb);
4395 dma_buf->skb = NULL;
4396 }
4397
4398 /* Free the transmitted descriptor. */
4399 last++;
4400 last &= info->mask;
4401 info->avail++;
4402 }
4403 info->last = last;
4404 spin_unlock_irq(&hw_priv->hwlock);
4405
4406 /* Notify the network subsystem that the packet has been sent. */
4407 if (dev)
4408 netif_trans_update(dev);
4409 }
4410
4411 /**
4412 * tx_done - transmit done processing
4413 * @hw_priv: Network device.
4414 *
4415 * This routine is called when the transmit interrupt is triggered, indicating
4416 * either a packet is sent successfully or there are transmit errors.
4417 */
tx_done(struct dev_info * hw_priv)4418 static void tx_done(struct dev_info *hw_priv)
4419 {
4420 struct ksz_hw *hw = &hw_priv->hw;
4421 int port;
4422
4423 transmit_cleanup(hw_priv, 1);
4424
4425 for (port = 0; port < hw->dev_count; port++) {
4426 struct net_device *dev = hw->port_info[port].pdev;
4427
4428 if (netif_running(dev) && netif_queue_stopped(dev))
4429 netif_wake_queue(dev);
4430 }
4431 }
4432
copy_old_skb(struct sk_buff * old,struct sk_buff * skb)4433 static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
4434 {
4435 skb->dev = old->dev;
4436 skb->protocol = old->protocol;
4437 skb->ip_summed = old->ip_summed;
4438 skb->csum = old->csum;
4439 skb_set_network_header(skb, ETH_HLEN);
4440
4441 dev_consume_skb_any(old);
4442 }
4443
4444 /**
4445 * netdev_tx - send out packet
4446 * @skb: Socket buffer.
4447 * @dev: Network device.
4448 *
4449 * This function is used by the upper network layer to send out a packet.
4450 *
4451 * Return 0 if successful; otherwise an error code indicating failure.
4452 */
netdev_tx(struct sk_buff * skb,struct net_device * dev)4453 static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
4454 {
4455 struct dev_priv *priv = netdev_priv(dev);
4456 struct dev_info *hw_priv = priv->adapter;
4457 struct ksz_hw *hw = &hw_priv->hw;
4458 int left;
4459 int num = 1;
4460 int rc = 0;
4461
4462 if (hw->features & SMALL_PACKET_TX_BUG) {
4463 struct sk_buff *org_skb = skb;
4464
4465 if (skb->len <= 48) {
4466 if (skb_end_pointer(skb) - skb->data >= 50) {
4467 memset(&skb->data[skb->len], 0, 50 - skb->len);
4468 skb->len = 50;
4469 } else {
4470 skb = netdev_alloc_skb(dev, 50);
4471 if (!skb)
4472 return NETDEV_TX_BUSY;
4473 memcpy(skb->data, org_skb->data, org_skb->len);
4474 memset(&skb->data[org_skb->len], 0,
4475 50 - org_skb->len);
4476 skb->len = 50;
4477 copy_old_skb(org_skb, skb);
4478 }
4479 }
4480 }
4481
4482 spin_lock_irq(&hw_priv->hwlock);
4483
4484 num = skb_shinfo(skb)->nr_frags + 1;
4485 left = hw_alloc_pkt(hw, skb->len, num);
4486 if (left) {
4487 if (left < num ||
4488 (CHECKSUM_PARTIAL == skb->ip_summed &&
4489 skb->protocol == htons(ETH_P_IPV6))) {
4490 struct sk_buff *org_skb = skb;
4491
4492 skb = netdev_alloc_skb(dev, org_skb->len);
4493 if (!skb) {
4494 rc = NETDEV_TX_BUSY;
4495 goto unlock;
4496 }
4497 skb_copy_and_csum_dev(org_skb, skb->data);
4498 org_skb->ip_summed = CHECKSUM_NONE;
4499 skb->len = org_skb->len;
4500 copy_old_skb(org_skb, skb);
4501 }
4502 send_packet(skb, dev);
4503 if (left <= num)
4504 netif_stop_queue(dev);
4505 } else {
4506 /* Stop the transmit queue until packet is allocated. */
4507 netif_stop_queue(dev);
4508 rc = NETDEV_TX_BUSY;
4509 }
4510 unlock:
4511 spin_unlock_irq(&hw_priv->hwlock);
4512
4513 return rc;
4514 }
4515
4516 /**
4517 * netdev_tx_timeout - transmit timeout processing
4518 * @dev: Network device.
4519 * @txqueue: index of hanging queue
4520 *
4521 * This routine is called when the transmit timer expires. That indicates the
4522 * hardware is not running correctly because transmit interrupts are not
4523 * triggered to free up resources so that the transmit routine can continue
4524 * sending out packets. The hardware is reset to correct the problem.
4525 */
netdev_tx_timeout(struct net_device * dev,unsigned int txqueue)4526 static void netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
4527 {
4528 static unsigned long last_reset;
4529
4530 struct dev_priv *priv = netdev_priv(dev);
4531 struct dev_info *hw_priv = priv->adapter;
4532 struct ksz_hw *hw = &hw_priv->hw;
4533 int port;
4534
4535 if (hw->dev_count > 1) {
4536 /*
4537 * Only reset the hardware if time between calls is long
4538 * enough.
4539 */
4540 if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo))
4541 hw_priv = NULL;
4542 }
4543
4544 last_reset = jiffies;
4545 if (hw_priv) {
4546 hw_dis_intr(hw);
4547 hw_disable(hw);
4548
4549 transmit_cleanup(hw_priv, 0);
4550 hw_reset_pkts(&hw->rx_desc_info);
4551 hw_reset_pkts(&hw->tx_desc_info);
4552 ksz_init_rx_buffers(hw_priv);
4553
4554 hw_reset(hw);
4555
4556 hw_set_desc_base(hw,
4557 hw->tx_desc_info.ring_phys,
4558 hw->rx_desc_info.ring_phys);
4559 hw_set_addr(hw);
4560 if (hw->all_multi)
4561 hw_set_multicast(hw, hw->all_multi);
4562 else if (hw->multi_list_size)
4563 hw_set_grp_addr(hw);
4564
4565 if (hw->dev_count > 1) {
4566 hw_set_add_addr(hw);
4567 for (port = 0; port < SWITCH_PORT_NUM; port++) {
4568 struct net_device *port_dev;
4569
4570 port_set_stp_state(hw, port,
4571 STP_STATE_DISABLED);
4572
4573 port_dev = hw->port_info[port].pdev;
4574 if (netif_running(port_dev))
4575 port_set_stp_state(hw, port,
4576 STP_STATE_SIMPLE);
4577 }
4578 }
4579
4580 hw_enable(hw);
4581 hw_ena_intr(hw);
4582 }
4583
4584 netif_trans_update(dev);
4585 netif_wake_queue(dev);
4586 }
4587
csum_verified(struct sk_buff * skb)4588 static inline void csum_verified(struct sk_buff *skb)
4589 {
4590 unsigned short protocol;
4591 struct iphdr *iph;
4592
4593 protocol = skb->protocol;
4594 skb_reset_network_header(skb);
4595 iph = (struct iphdr *) skb_network_header(skb);
4596 if (protocol == htons(ETH_P_8021Q)) {
4597 protocol = iph->tot_len;
4598 skb_set_network_header(skb, VLAN_HLEN);
4599 iph = (struct iphdr *) skb_network_header(skb);
4600 }
4601 if (protocol == htons(ETH_P_IP)) {
4602 if (iph->protocol == IPPROTO_TCP)
4603 skb->ip_summed = CHECKSUM_UNNECESSARY;
4604 }
4605 }
4606
rx_proc(struct net_device * dev,struct ksz_hw * hw,struct ksz_desc * desc,union desc_stat status)4607 static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
4608 struct ksz_desc *desc, union desc_stat status)
4609 {
4610 int packet_len;
4611 struct dev_priv *priv = netdev_priv(dev);
4612 struct dev_info *hw_priv = priv->adapter;
4613 struct ksz_dma_buf *dma_buf;
4614 struct sk_buff *skb;
4615
4616 /* Received length includes 4-byte CRC. */
4617 packet_len = status.rx.frame_len - 4;
4618
4619 dma_buf = DMA_BUFFER(desc);
4620 dma_sync_single_for_cpu(&hw_priv->pdev->dev, dma_buf->dma,
4621 packet_len + 4, DMA_FROM_DEVICE);
4622
4623 do {
4624 /* skb->data != skb->head */
4625 skb = netdev_alloc_skb(dev, packet_len + 2);
4626 if (!skb) {
4627 dev->stats.rx_dropped++;
4628 return -ENOMEM;
4629 }
4630
4631 /*
4632 * Align socket buffer in 4-byte boundary for better
4633 * performance.
4634 */
4635 skb_reserve(skb, 2);
4636
4637 skb_put_data(skb, dma_buf->skb->data, packet_len);
4638 } while (0);
4639
4640 skb->protocol = eth_type_trans(skb, dev);
4641
4642 if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
4643 csum_verified(skb);
4644
4645 /* Update receive statistics. */
4646 dev->stats.rx_packets++;
4647 dev->stats.rx_bytes += packet_len;
4648
4649 /* Notify upper layer for received packet. */
4650 netif_rx(skb);
4651
4652 return 0;
4653 }
4654
dev_rcv_packets(struct dev_info * hw_priv)4655 static int dev_rcv_packets(struct dev_info *hw_priv)
4656 {
4657 int next;
4658 union desc_stat status;
4659 struct ksz_hw *hw = &hw_priv->hw;
4660 struct net_device *dev = hw->port_info[0].pdev;
4661 struct ksz_desc_info *info = &hw->rx_desc_info;
4662 int left = info->alloc;
4663 struct ksz_desc *desc;
4664 int received = 0;
4665
4666 next = info->next;
4667 while (left--) {
4668 /* Get next descriptor which is not hardware owned. */
4669 desc = &info->ring[next];
4670 status.data = le32_to_cpu(desc->phw->ctrl.data);
4671 if (status.rx.hw_owned)
4672 break;
4673
4674 /* Status valid only when last descriptor bit is set. */
4675 if (status.rx.last_desc && status.rx.first_desc) {
4676 if (rx_proc(dev, hw, desc, status))
4677 goto release_packet;
4678 received++;
4679 }
4680
4681 release_packet:
4682 release_desc(desc);
4683 next++;
4684 next &= info->mask;
4685 }
4686 info->next = next;
4687
4688 return received;
4689 }
4690
port_rcv_packets(struct dev_info * hw_priv)4691 static int port_rcv_packets(struct dev_info *hw_priv)
4692 {
4693 int next;
4694 union desc_stat status;
4695 struct ksz_hw *hw = &hw_priv->hw;
4696 struct net_device *dev = hw->port_info[0].pdev;
4697 struct ksz_desc_info *info = &hw->rx_desc_info;
4698 int left = info->alloc;
4699 struct ksz_desc *desc;
4700 int received = 0;
4701
4702 next = info->next;
4703 while (left--) {
4704 /* Get next descriptor which is not hardware owned. */
4705 desc = &info->ring[next];
4706 status.data = le32_to_cpu(desc->phw->ctrl.data);
4707 if (status.rx.hw_owned)
4708 break;
4709
4710 if (hw->dev_count > 1) {
4711 /* Get received port number. */
4712 int p = HW_TO_DEV_PORT(status.rx.src_port);
4713
4714 dev = hw->port_info[p].pdev;
4715 if (!netif_running(dev))
4716 goto release_packet;
4717 }
4718
4719 /* Status valid only when last descriptor bit is set. */
4720 if (status.rx.last_desc && status.rx.first_desc) {
4721 if (rx_proc(dev, hw, desc, status))
4722 goto release_packet;
4723 received++;
4724 }
4725
4726 release_packet:
4727 release_desc(desc);
4728 next++;
4729 next &= info->mask;
4730 }
4731 info->next = next;
4732
4733 return received;
4734 }
4735
dev_rcv_special(struct dev_info * hw_priv)4736 static int dev_rcv_special(struct dev_info *hw_priv)
4737 {
4738 int next;
4739 union desc_stat status;
4740 struct ksz_hw *hw = &hw_priv->hw;
4741 struct net_device *dev = hw->port_info[0].pdev;
4742 struct ksz_desc_info *info = &hw->rx_desc_info;
4743 int left = info->alloc;
4744 struct ksz_desc *desc;
4745 int received = 0;
4746
4747 next = info->next;
4748 while (left--) {
4749 /* Get next descriptor which is not hardware owned. */
4750 desc = &info->ring[next];
4751 status.data = le32_to_cpu(desc->phw->ctrl.data);
4752 if (status.rx.hw_owned)
4753 break;
4754
4755 if (hw->dev_count > 1) {
4756 /* Get received port number. */
4757 int p = HW_TO_DEV_PORT(status.rx.src_port);
4758
4759 dev = hw->port_info[p].pdev;
4760 if (!netif_running(dev))
4761 goto release_packet;
4762 }
4763
4764 /* Status valid only when last descriptor bit is set. */
4765 if (status.rx.last_desc && status.rx.first_desc) {
4766 /*
4767 * Receive without error. With receive errors
4768 * disabled, packets with receive errors will be
4769 * dropped, so no need to check the error bit.
4770 */
4771 if (!status.rx.error || (status.data &
4772 KS_DESC_RX_ERROR_COND) ==
4773 KS_DESC_RX_ERROR_TOO_LONG) {
4774 if (rx_proc(dev, hw, desc, status))
4775 goto release_packet;
4776 received++;
4777 } else {
4778 struct dev_priv *priv = netdev_priv(dev);
4779
4780 /* Update receive error statistics. */
4781 priv->port.counter[OID_COUNTER_RCV_ERROR]++;
4782 }
4783 }
4784
4785 release_packet:
4786 release_desc(desc);
4787 next++;
4788 next &= info->mask;
4789 }
4790 info->next = next;
4791
4792 return received;
4793 }
4794
rx_proc_task(struct tasklet_struct * t)4795 static void rx_proc_task(struct tasklet_struct *t)
4796 {
4797 struct dev_info *hw_priv = from_tasklet(hw_priv, t, rx_tasklet);
4798 struct ksz_hw *hw = &hw_priv->hw;
4799
4800 if (!hw->enabled)
4801 return;
4802 if (unlikely(!hw_priv->dev_rcv(hw_priv))) {
4803
4804 /* In case receive process is suspended because of overrun. */
4805 hw_resume_rx(hw);
4806
4807 /* tasklets are interruptible. */
4808 spin_lock_irq(&hw_priv->hwlock);
4809 hw_turn_on_intr(hw, KS884X_INT_RX_MASK);
4810 spin_unlock_irq(&hw_priv->hwlock);
4811 } else {
4812 hw_ack_intr(hw, KS884X_INT_RX);
4813 tasklet_schedule(&hw_priv->rx_tasklet);
4814 }
4815 }
4816
tx_proc_task(struct tasklet_struct * t)4817 static void tx_proc_task(struct tasklet_struct *t)
4818 {
4819 struct dev_info *hw_priv = from_tasklet(hw_priv, t, tx_tasklet);
4820 struct ksz_hw *hw = &hw_priv->hw;
4821
4822 hw_ack_intr(hw, KS884X_INT_TX_MASK);
4823
4824 tx_done(hw_priv);
4825
4826 /* tasklets are interruptible. */
4827 spin_lock_irq(&hw_priv->hwlock);
4828 hw_turn_on_intr(hw, KS884X_INT_TX);
4829 spin_unlock_irq(&hw_priv->hwlock);
4830 }
4831
handle_rx_stop(struct ksz_hw * hw)4832 static inline void handle_rx_stop(struct ksz_hw *hw)
4833 {
4834 /* Receive just has been stopped. */
4835 if (0 == hw->rx_stop)
4836 hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
4837 else if (hw->rx_stop > 1) {
4838 if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) {
4839 hw_start_rx(hw);
4840 } else {
4841 hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
4842 hw->rx_stop = 0;
4843 }
4844 } else
4845 /* Receive just has been started. */
4846 hw->rx_stop++;
4847 }
4848
4849 /**
4850 * netdev_intr - interrupt handling
4851 * @irq: Interrupt number.
4852 * @dev_id: Network device.
4853 *
4854 * This function is called by upper network layer to signal interrupt.
4855 *
4856 * Return IRQ_HANDLED if interrupt is handled.
4857 */
netdev_intr(int irq,void * dev_id)4858 static irqreturn_t netdev_intr(int irq, void *dev_id)
4859 {
4860 uint int_enable = 0;
4861 struct net_device *dev = (struct net_device *) dev_id;
4862 struct dev_priv *priv = netdev_priv(dev);
4863 struct dev_info *hw_priv = priv->adapter;
4864 struct ksz_hw *hw = &hw_priv->hw;
4865
4866 spin_lock(&hw_priv->hwlock);
4867
4868 hw_read_intr(hw, &int_enable);
4869
4870 /* Not our interrupt! */
4871 if (!int_enable) {
4872 spin_unlock(&hw_priv->hwlock);
4873 return IRQ_NONE;
4874 }
4875
4876 do {
4877 hw_ack_intr(hw, int_enable);
4878 int_enable &= hw->intr_mask;
4879
4880 if (unlikely(int_enable & KS884X_INT_TX_MASK)) {
4881 hw_dis_intr_bit(hw, KS884X_INT_TX_MASK);
4882 tasklet_schedule(&hw_priv->tx_tasklet);
4883 }
4884
4885 if (likely(int_enable & KS884X_INT_RX)) {
4886 hw_dis_intr_bit(hw, KS884X_INT_RX);
4887 tasklet_schedule(&hw_priv->rx_tasklet);
4888 }
4889
4890 if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) {
4891 dev->stats.rx_fifo_errors++;
4892 hw_resume_rx(hw);
4893 }
4894
4895 if (unlikely(int_enable & KS884X_INT_PHY)) {
4896 struct ksz_port *port = &priv->port;
4897
4898 hw->features |= LINK_INT_WORKING;
4899 port_get_link_speed(port);
4900 }
4901
4902 if (unlikely(int_enable & KS884X_INT_RX_STOPPED)) {
4903 handle_rx_stop(hw);
4904 break;
4905 }
4906
4907 if (unlikely(int_enable & KS884X_INT_TX_STOPPED)) {
4908 u32 data;
4909
4910 hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
4911 pr_info("Tx stopped\n");
4912 data = readl(hw->io + KS_DMA_TX_CTRL);
4913 if (!(data & DMA_TX_ENABLE))
4914 pr_info("Tx disabled\n");
4915 break;
4916 }
4917 } while (0);
4918
4919 hw_ena_intr(hw);
4920
4921 spin_unlock(&hw_priv->hwlock);
4922
4923 return IRQ_HANDLED;
4924 }
4925
4926 /*
4927 * Linux network device functions
4928 */
4929
4930
4931 #ifdef CONFIG_NET_POLL_CONTROLLER
netdev_netpoll(struct net_device * dev)4932 static void netdev_netpoll(struct net_device *dev)
4933 {
4934 struct dev_priv *priv = netdev_priv(dev);
4935 struct dev_info *hw_priv = priv->adapter;
4936
4937 hw_dis_intr(&hw_priv->hw);
4938 netdev_intr(dev->irq, dev);
4939 }
4940 #endif
4941
bridge_change(struct ksz_hw * hw)4942 static void bridge_change(struct ksz_hw *hw)
4943 {
4944 int port;
4945 u8 member;
4946 struct ksz_switch *sw = hw->ksz_switch;
4947
4948 /* No ports in forwarding state. */
4949 if (!sw->member) {
4950 port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
4951 sw_block_addr(hw);
4952 }
4953 for (port = 0; port < SWITCH_PORT_NUM; port++) {
4954 if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state)
4955 member = HOST_MASK | sw->member;
4956 else
4957 member = HOST_MASK | (1 << port);
4958 if (member != sw->port_cfg[port].member)
4959 sw_cfg_port_base_vlan(hw, port, member);
4960 }
4961 }
4962
4963 /**
4964 * netdev_close - close network device
4965 * @dev: Network device.
4966 *
4967 * This function process the close operation of network device. This is caused
4968 * by the user command "ifconfig ethX down."
4969 *
4970 * Return 0 if successful; otherwise an error code indicating failure.
4971 */
netdev_close(struct net_device * dev)4972 static int netdev_close(struct net_device *dev)
4973 {
4974 struct dev_priv *priv = netdev_priv(dev);
4975 struct dev_info *hw_priv = priv->adapter;
4976 struct ksz_port *port = &priv->port;
4977 struct ksz_hw *hw = &hw_priv->hw;
4978 int pi;
4979
4980 netif_stop_queue(dev);
4981
4982 ksz_stop_timer(&priv->monitor_timer_info);
4983
4984 /* Need to shut the port manually in multiple device interfaces mode. */
4985 if (hw->dev_count > 1) {
4986 port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED);
4987
4988 /* Port is closed. Need to change bridge setting. */
4989 if (hw->features & STP_SUPPORT) {
4990 pi = 1 << port->first_port;
4991 if (hw->ksz_switch->member & pi) {
4992 hw->ksz_switch->member &= ~pi;
4993 bridge_change(hw);
4994 }
4995 }
4996 }
4997 if (port->first_port > 0)
4998 hw_del_addr(hw, dev->dev_addr);
4999 if (!hw_priv->wol_enable)
5000 port_set_power_saving(port, true);
5001
5002 if (priv->multicast)
5003 --hw->all_multi;
5004 if (priv->promiscuous)
5005 --hw->promiscuous;
5006
5007 hw_priv->opened--;
5008 if (!(hw_priv->opened)) {
5009 ksz_stop_timer(&hw_priv->mib_timer_info);
5010 flush_work(&hw_priv->mib_read);
5011
5012 hw_dis_intr(hw);
5013 hw_disable(hw);
5014 hw_clr_multicast(hw);
5015
5016 /* Delay for receive task to stop scheduling itself. */
5017 msleep(2000 / HZ);
5018
5019 tasklet_kill(&hw_priv->rx_tasklet);
5020 tasklet_kill(&hw_priv->tx_tasklet);
5021 free_irq(dev->irq, hw_priv->dev);
5022
5023 transmit_cleanup(hw_priv, 0);
5024 hw_reset_pkts(&hw->rx_desc_info);
5025 hw_reset_pkts(&hw->tx_desc_info);
5026
5027 /* Clean out static MAC table when the switch is shutdown. */
5028 if (hw->features & STP_SUPPORT)
5029 sw_clr_sta_mac_table(hw);
5030 }
5031
5032 return 0;
5033 }
5034
hw_cfg_huge_frame(struct dev_info * hw_priv,struct ksz_hw * hw)5035 static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw)
5036 {
5037 if (hw->ksz_switch) {
5038 u32 data;
5039
5040 data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
5041 if (hw->features & RX_HUGE_FRAME)
5042 data |= SWITCH_HUGE_PACKET;
5043 else
5044 data &= ~SWITCH_HUGE_PACKET;
5045 writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
5046 }
5047 if (hw->features & RX_HUGE_FRAME) {
5048 hw->rx_cfg |= DMA_RX_ERROR;
5049 hw_priv->dev_rcv = dev_rcv_special;
5050 } else {
5051 hw->rx_cfg &= ~DMA_RX_ERROR;
5052 if (hw->dev_count > 1)
5053 hw_priv->dev_rcv = port_rcv_packets;
5054 else
5055 hw_priv->dev_rcv = dev_rcv_packets;
5056 }
5057 }
5058
prepare_hardware(struct net_device * dev)5059 static int prepare_hardware(struct net_device *dev)
5060 {
5061 struct dev_priv *priv = netdev_priv(dev);
5062 struct dev_info *hw_priv = priv->adapter;
5063 struct ksz_hw *hw = &hw_priv->hw;
5064 int rc = 0;
5065
5066 /* Remember the network device that requests interrupts. */
5067 hw_priv->dev = dev;
5068 rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
5069 if (rc)
5070 return rc;
5071 tasklet_setup(&hw_priv->rx_tasklet, rx_proc_task);
5072 tasklet_setup(&hw_priv->tx_tasklet, tx_proc_task);
5073
5074 hw->promiscuous = 0;
5075 hw->all_multi = 0;
5076 hw->multi_list_size = 0;
5077
5078 hw_reset(hw);
5079
5080 hw_set_desc_base(hw,
5081 hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys);
5082 hw_set_addr(hw);
5083 hw_cfg_huge_frame(hw_priv, hw);
5084 ksz_init_rx_buffers(hw_priv);
5085 return 0;
5086 }
5087
set_media_state(struct net_device * dev,int media_state)5088 static void set_media_state(struct net_device *dev, int media_state)
5089 {
5090 struct dev_priv *priv = netdev_priv(dev);
5091
5092 if (media_state == priv->media_state)
5093 netif_carrier_on(dev);
5094 else
5095 netif_carrier_off(dev);
5096 netif_info(priv, link, dev, "link %s\n",
5097 media_state == priv->media_state ? "on" : "off");
5098 }
5099
5100 /**
5101 * netdev_open - open network device
5102 * @dev: Network device.
5103 *
5104 * This function process the open operation of network device. This is caused
5105 * by the user command "ifconfig ethX up."
5106 *
5107 * Return 0 if successful; otherwise an error code indicating failure.
5108 */
netdev_open(struct net_device * dev)5109 static int netdev_open(struct net_device *dev)
5110 {
5111 struct dev_priv *priv = netdev_priv(dev);
5112 struct dev_info *hw_priv = priv->adapter;
5113 struct ksz_hw *hw = &hw_priv->hw;
5114 struct ksz_port *port = &priv->port;
5115 unsigned long next_jiffies;
5116 int i;
5117 int p;
5118 int rc = 0;
5119
5120 next_jiffies = jiffies + HZ * 2;
5121 priv->multicast = 0;
5122 priv->promiscuous = 0;
5123
5124 /* Reset device statistics. */
5125 memset(&dev->stats, 0, sizeof(struct net_device_stats));
5126 memset((void *) port->counter, 0,
5127 (sizeof(u64) * OID_COUNTER_LAST));
5128
5129 if (!(hw_priv->opened)) {
5130 rc = prepare_hardware(dev);
5131 if (rc)
5132 return rc;
5133 for (i = 0; i < hw->mib_port_cnt; i++) {
5134 next_jiffies += HZ * 1;
5135 hw_priv->counter[i].time = next_jiffies;
5136 hw->port_mib[i].state = media_disconnected;
5137 port_init_cnt(hw, i);
5138 }
5139 if (hw->ksz_switch)
5140 hw->port_mib[HOST_PORT].state = media_connected;
5141 else {
5142 hw_add_wol_bcast(hw);
5143 hw_cfg_wol_pme(hw, 0);
5144 hw_clr_wol_pme_status(&hw_priv->hw);
5145 }
5146 }
5147 port_set_power_saving(port, false);
5148
5149 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
5150 /*
5151 * Initialize to invalid value so that link detection
5152 * is done.
5153 */
5154 hw->port_info[p].partner = 0xFF;
5155 hw->port_info[p].state = media_disconnected;
5156 }
5157
5158 /* Need to open the port in multiple device interfaces mode. */
5159 if (hw->dev_count > 1) {
5160 port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE);
5161 if (port->first_port > 0)
5162 hw_add_addr(hw, dev->dev_addr);
5163 }
5164
5165 port_get_link_speed(port);
5166 if (port->force_link)
5167 port_force_link_speed(port);
5168 else
5169 port_set_link_speed(port);
5170
5171 if (!(hw_priv->opened)) {
5172 hw_setup_intr(hw);
5173 hw_enable(hw);
5174 hw_ena_intr(hw);
5175
5176 if (hw->mib_port_cnt)
5177 ksz_start_timer(&hw_priv->mib_timer_info,
5178 hw_priv->mib_timer_info.period);
5179 }
5180
5181 hw_priv->opened++;
5182
5183 ksz_start_timer(&priv->monitor_timer_info,
5184 priv->monitor_timer_info.period);
5185
5186 priv->media_state = port->linked->state;
5187
5188 set_media_state(dev, media_connected);
5189 netif_start_queue(dev);
5190
5191 return 0;
5192 }
5193
5194 /* RX errors = rx_errors */
5195 /* RX dropped = rx_dropped */
5196 /* RX overruns = rx_fifo_errors */
5197 /* RX frame = rx_crc_errors + rx_frame_errors + rx_length_errors */
5198 /* TX errors = tx_errors */
5199 /* TX dropped = tx_dropped */
5200 /* TX overruns = tx_fifo_errors */
5201 /* TX carrier = tx_aborted_errors + tx_carrier_errors + tx_window_errors */
5202 /* collisions = collisions */
5203
5204 /**
5205 * netdev_query_statistics - query network device statistics
5206 * @dev: Network device.
5207 *
5208 * This function returns the statistics of the network device. The device
5209 * needs not be opened.
5210 *
5211 * Return network device statistics.
5212 */
netdev_query_statistics(struct net_device * dev)5213 static struct net_device_stats *netdev_query_statistics(struct net_device *dev)
5214 {
5215 struct dev_priv *priv = netdev_priv(dev);
5216 struct ksz_port *port = &priv->port;
5217 struct ksz_hw *hw = &priv->adapter->hw;
5218 struct ksz_port_mib *mib;
5219 int i;
5220 int p;
5221
5222 dev->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
5223 dev->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
5224
5225 /* Reset to zero to add count later. */
5226 dev->stats.multicast = 0;
5227 dev->stats.collisions = 0;
5228 dev->stats.rx_length_errors = 0;
5229 dev->stats.rx_crc_errors = 0;
5230 dev->stats.rx_frame_errors = 0;
5231 dev->stats.tx_window_errors = 0;
5232
5233 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
5234 mib = &hw->port_mib[p];
5235
5236 dev->stats.multicast += (unsigned long)
5237 mib->counter[MIB_COUNTER_RX_MULTICAST];
5238
5239 dev->stats.collisions += (unsigned long)
5240 mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION];
5241
5242 dev->stats.rx_length_errors += (unsigned long)(
5243 mib->counter[MIB_COUNTER_RX_UNDERSIZE] +
5244 mib->counter[MIB_COUNTER_RX_FRAGMENT] +
5245 mib->counter[MIB_COUNTER_RX_OVERSIZE] +
5246 mib->counter[MIB_COUNTER_RX_JABBER]);
5247 dev->stats.rx_crc_errors += (unsigned long)
5248 mib->counter[MIB_COUNTER_RX_CRC_ERR];
5249 dev->stats.rx_frame_errors += (unsigned long)(
5250 mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] +
5251 mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]);
5252
5253 dev->stats.tx_window_errors += (unsigned long)
5254 mib->counter[MIB_COUNTER_TX_LATE_COLLISION];
5255 }
5256
5257 return &dev->stats;
5258 }
5259
5260 /**
5261 * netdev_set_mac_address - set network device MAC address
5262 * @dev: Network device.
5263 * @addr: Buffer of MAC address.
5264 *
5265 * This function is used to set the MAC address of the network device.
5266 *
5267 * Return 0 to indicate success.
5268 */
netdev_set_mac_address(struct net_device * dev,void * addr)5269 static int netdev_set_mac_address(struct net_device *dev, void *addr)
5270 {
5271 struct dev_priv *priv = netdev_priv(dev);
5272 struct dev_info *hw_priv = priv->adapter;
5273 struct ksz_hw *hw = &hw_priv->hw;
5274 struct sockaddr *mac = addr;
5275 uint interrupt;
5276
5277 if (priv->port.first_port > 0)
5278 hw_del_addr(hw, dev->dev_addr);
5279 else {
5280 hw->mac_override = 1;
5281 memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
5282 }
5283
5284 eth_hw_addr_set(dev, mac->sa_data);
5285
5286 interrupt = hw_block_intr(hw);
5287
5288 if (priv->port.first_port > 0)
5289 hw_add_addr(hw, dev->dev_addr);
5290 else
5291 hw_set_addr(hw);
5292 hw_restore_intr(hw, interrupt);
5293
5294 return 0;
5295 }
5296
dev_set_promiscuous(struct net_device * dev,struct dev_priv * priv,struct ksz_hw * hw,int promiscuous)5297 static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
5298 struct ksz_hw *hw, int promiscuous)
5299 {
5300 if (promiscuous != priv->promiscuous) {
5301 u8 prev_state = hw->promiscuous;
5302
5303 if (promiscuous)
5304 ++hw->promiscuous;
5305 else
5306 --hw->promiscuous;
5307 priv->promiscuous = promiscuous;
5308
5309 /* Turn on/off promiscuous mode. */
5310 if (hw->promiscuous <= 1 && prev_state <= 1)
5311 hw_set_promiscuous(hw, hw->promiscuous);
5312
5313 /*
5314 * Port is not in promiscuous mode, meaning it is released
5315 * from the bridge.
5316 */
5317 if ((hw->features & STP_SUPPORT) && !promiscuous &&
5318 netif_is_bridge_port(dev)) {
5319 struct ksz_switch *sw = hw->ksz_switch;
5320 int port = priv->port.first_port;
5321
5322 port_set_stp_state(hw, port, STP_STATE_DISABLED);
5323 port = 1 << port;
5324 if (sw->member & port) {
5325 sw->member &= ~port;
5326 bridge_change(hw);
5327 }
5328 }
5329 }
5330 }
5331
dev_set_multicast(struct dev_priv * priv,struct ksz_hw * hw,int multicast)5332 static void dev_set_multicast(struct dev_priv *priv, struct ksz_hw *hw,
5333 int multicast)
5334 {
5335 if (multicast != priv->multicast) {
5336 u8 all_multi = hw->all_multi;
5337
5338 if (multicast)
5339 ++hw->all_multi;
5340 else
5341 --hw->all_multi;
5342 priv->multicast = multicast;
5343
5344 /* Turn on/off all multicast mode. */
5345 if (hw->all_multi <= 1 && all_multi <= 1)
5346 hw_set_multicast(hw, hw->all_multi);
5347 }
5348 }
5349
5350 /**
5351 * netdev_set_rx_mode
5352 * @dev: Network device.
5353 *
5354 * This routine is used to set multicast addresses or put the network device
5355 * into promiscuous mode.
5356 */
netdev_set_rx_mode(struct net_device * dev)5357 static void netdev_set_rx_mode(struct net_device *dev)
5358 {
5359 struct dev_priv *priv = netdev_priv(dev);
5360 struct dev_info *hw_priv = priv->adapter;
5361 struct ksz_hw *hw = &hw_priv->hw;
5362 struct netdev_hw_addr *ha;
5363 int multicast = (dev->flags & IFF_ALLMULTI);
5364
5365 dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
5366
5367 if (hw_priv->hw.dev_count > 1)
5368 multicast |= (dev->flags & IFF_MULTICAST);
5369 dev_set_multicast(priv, hw, multicast);
5370
5371 /* Cannot use different hashes in multiple device interfaces mode. */
5372 if (hw_priv->hw.dev_count > 1)
5373 return;
5374
5375 if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
5376 int i = 0;
5377
5378 /* List too big to support so turn on all multicast mode. */
5379 if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) {
5380 if (MAX_MULTICAST_LIST != hw->multi_list_size) {
5381 hw->multi_list_size = MAX_MULTICAST_LIST;
5382 ++hw->all_multi;
5383 hw_set_multicast(hw, hw->all_multi);
5384 }
5385 return;
5386 }
5387
5388 netdev_for_each_mc_addr(ha, dev) {
5389 if (i >= MAX_MULTICAST_LIST)
5390 break;
5391 memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN);
5392 }
5393 hw->multi_list_size = (u8) i;
5394 hw_set_grp_addr(hw);
5395 } else {
5396 if (MAX_MULTICAST_LIST == hw->multi_list_size) {
5397 --hw->all_multi;
5398 hw_set_multicast(hw, hw->all_multi);
5399 }
5400 hw->multi_list_size = 0;
5401 hw_clr_multicast(hw);
5402 }
5403 }
5404
netdev_change_mtu(struct net_device * dev,int new_mtu)5405 static int netdev_change_mtu(struct net_device *dev, int new_mtu)
5406 {
5407 struct dev_priv *priv = netdev_priv(dev);
5408 struct dev_info *hw_priv = priv->adapter;
5409 struct ksz_hw *hw = &hw_priv->hw;
5410 int hw_mtu;
5411
5412 if (netif_running(dev))
5413 return -EBUSY;
5414
5415 /* Cannot use different MTU in multiple device interfaces mode. */
5416 if (hw->dev_count > 1)
5417 if (dev != hw_priv->dev)
5418 return 0;
5419
5420 hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
5421 if (hw_mtu > REGULAR_RX_BUF_SIZE) {
5422 hw->features |= RX_HUGE_FRAME;
5423 hw_mtu = MAX_RX_BUF_SIZE;
5424 } else {
5425 hw->features &= ~RX_HUGE_FRAME;
5426 hw_mtu = REGULAR_RX_BUF_SIZE;
5427 }
5428 hw_mtu = (hw_mtu + 3) & ~3;
5429 hw_priv->mtu = hw_mtu;
5430 WRITE_ONCE(dev->mtu, new_mtu);
5431
5432 return 0;
5433 }
5434
5435 /**
5436 * netdev_ioctl - I/O control processing
5437 * @dev: Network device.
5438 * @ifr: Interface request structure.
5439 * @cmd: I/O control code.
5440 *
5441 * This function is used to process I/O control calls.
5442 *
5443 * Return 0 to indicate success.
5444 */
netdev_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)5445 static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5446 {
5447 struct dev_priv *priv = netdev_priv(dev);
5448 struct dev_info *hw_priv = priv->adapter;
5449 struct ksz_hw *hw = &hw_priv->hw;
5450 struct ksz_port *port = &priv->port;
5451 int result = 0;
5452 struct mii_ioctl_data *data = if_mii(ifr);
5453
5454 if (down_interruptible(&priv->proc_sem))
5455 return -ERESTARTSYS;
5456
5457 switch (cmd) {
5458 /* Get address of MII PHY in use. */
5459 case SIOCGMIIPHY:
5460 data->phy_id = priv->id;
5461 fallthrough;
5462
5463 /* Read MII PHY register. */
5464 case SIOCGMIIREG:
5465 if (data->phy_id != priv->id || data->reg_num >= 6)
5466 result = -EIO;
5467 else
5468 hw_r_phy(hw, port->linked->port_id, data->reg_num,
5469 &data->val_out);
5470 break;
5471
5472 /* Write MII PHY register. */
5473 case SIOCSMIIREG:
5474 if (!capable(CAP_NET_ADMIN))
5475 result = -EPERM;
5476 else if (data->phy_id != priv->id || data->reg_num >= 6)
5477 result = -EIO;
5478 else
5479 hw_w_phy(hw, port->linked->port_id, data->reg_num,
5480 data->val_in);
5481 break;
5482
5483 default:
5484 result = -EOPNOTSUPP;
5485 }
5486
5487 up(&priv->proc_sem);
5488
5489 return result;
5490 }
5491
5492 /*
5493 * MII support
5494 */
5495
5496 /**
5497 * mdio_read - read PHY register
5498 * @dev: Network device.
5499 * @phy_id: The PHY id.
5500 * @reg_num: The register number.
5501 *
5502 * This function returns the PHY register value.
5503 *
5504 * Return the register value.
5505 */
mdio_read(struct net_device * dev,int phy_id,int reg_num)5506 static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
5507 {
5508 struct dev_priv *priv = netdev_priv(dev);
5509 struct ksz_port *port = &priv->port;
5510 struct ksz_hw *hw = port->hw;
5511 u16 val_out;
5512
5513 hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out);
5514 return val_out;
5515 }
5516
5517 /**
5518 * mdio_write - set PHY register
5519 * @dev: Network device.
5520 * @phy_id: The PHY id.
5521 * @reg_num: The register number.
5522 * @val: The register value.
5523 *
5524 * This procedure sets the PHY register value.
5525 */
mdio_write(struct net_device * dev,int phy_id,int reg_num,int val)5526 static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
5527 {
5528 struct dev_priv *priv = netdev_priv(dev);
5529 struct ksz_port *port = &priv->port;
5530 struct ksz_hw *hw = port->hw;
5531 int i;
5532 int pi;
5533
5534 for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++)
5535 hw_w_phy(hw, pi, reg_num << 1, val);
5536 }
5537
5538 /*
5539 * ethtool support
5540 */
5541
5542 #define EEPROM_SIZE 0x40
5543
5544 static u16 eeprom_data[EEPROM_SIZE] = { 0 };
5545
5546 #define ADVERTISED_ALL \
5547 (ADVERTISED_10baseT_Half | \
5548 ADVERTISED_10baseT_Full | \
5549 ADVERTISED_100baseT_Half | \
5550 ADVERTISED_100baseT_Full)
5551
5552 /* These functions use the MII functions in mii.c. */
5553
5554 /**
5555 * netdev_get_link_ksettings - get network device settings
5556 * @dev: Network device.
5557 * @cmd: Ethtool command.
5558 *
5559 * This function queries the PHY and returns its state in the ethtool command.
5560 *
5561 * Return 0 if successful; otherwise an error code.
5562 */
netdev_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)5563 static int netdev_get_link_ksettings(struct net_device *dev,
5564 struct ethtool_link_ksettings *cmd)
5565 {
5566 struct dev_priv *priv = netdev_priv(dev);
5567 struct dev_info *hw_priv = priv->adapter;
5568
5569 mutex_lock(&hw_priv->lock);
5570 mii_ethtool_get_link_ksettings(&priv->mii_if, cmd);
5571 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
5572 mutex_unlock(&hw_priv->lock);
5573
5574 /* Save advertised settings for workaround in next function. */
5575 ethtool_convert_link_mode_to_legacy_u32(&priv->advertising,
5576 cmd->link_modes.advertising);
5577
5578 return 0;
5579 }
5580
5581 /**
5582 * netdev_set_link_ksettings - set network device settings
5583 * @dev: Network device.
5584 * @cmd: Ethtool command.
5585 *
5586 * This function sets the PHY according to the ethtool command.
5587 *
5588 * Return 0 if successful; otherwise an error code.
5589 */
netdev_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)5590 static int netdev_set_link_ksettings(struct net_device *dev,
5591 const struct ethtool_link_ksettings *cmd)
5592 {
5593 struct dev_priv *priv = netdev_priv(dev);
5594 struct dev_info *hw_priv = priv->adapter;
5595 struct ksz_port *port = &priv->port;
5596 struct ethtool_link_ksettings copy_cmd;
5597 u32 speed = cmd->base.speed;
5598 u32 advertising;
5599 int rc;
5600
5601 ethtool_convert_link_mode_to_legacy_u32(&advertising,
5602 cmd->link_modes.advertising);
5603
5604 /*
5605 * ethtool utility does not change advertised setting if auto
5606 * negotiation is not specified explicitly.
5607 */
5608 if (cmd->base.autoneg && priv->advertising == advertising) {
5609 advertising |= ADVERTISED_ALL;
5610 if (10 == speed)
5611 advertising &=
5612 ~(ADVERTISED_100baseT_Full |
5613 ADVERTISED_100baseT_Half);
5614 else if (100 == speed)
5615 advertising &=
5616 ~(ADVERTISED_10baseT_Full |
5617 ADVERTISED_10baseT_Half);
5618 if (0 == cmd->base.duplex)
5619 advertising &=
5620 ~(ADVERTISED_100baseT_Full |
5621 ADVERTISED_10baseT_Full);
5622 else if (1 == cmd->base.duplex)
5623 advertising &=
5624 ~(ADVERTISED_100baseT_Half |
5625 ADVERTISED_10baseT_Half);
5626 }
5627 mutex_lock(&hw_priv->lock);
5628 if (cmd->base.autoneg &&
5629 (advertising & ADVERTISED_ALL) == ADVERTISED_ALL) {
5630 port->duplex = 0;
5631 port->speed = 0;
5632 port->force_link = 0;
5633 } else {
5634 port->duplex = cmd->base.duplex + 1;
5635 if (1000 != speed)
5636 port->speed = speed;
5637 if (cmd->base.autoneg)
5638 port->force_link = 0;
5639 else
5640 port->force_link = 1;
5641 }
5642
5643 memcpy(©_cmd, cmd, sizeof(copy_cmd));
5644 ethtool_convert_legacy_u32_to_link_mode(copy_cmd.link_modes.advertising,
5645 advertising);
5646 rc = mii_ethtool_set_link_ksettings(
5647 &priv->mii_if,
5648 (const struct ethtool_link_ksettings *)©_cmd);
5649 mutex_unlock(&hw_priv->lock);
5650 return rc;
5651 }
5652
5653 /**
5654 * netdev_nway_reset - restart auto-negotiation
5655 * @dev: Network device.
5656 *
5657 * This function restarts the PHY for auto-negotiation.
5658 *
5659 * Return 0 if successful; otherwise an error code.
5660 */
netdev_nway_reset(struct net_device * dev)5661 static int netdev_nway_reset(struct net_device *dev)
5662 {
5663 struct dev_priv *priv = netdev_priv(dev);
5664 struct dev_info *hw_priv = priv->adapter;
5665 int rc;
5666
5667 mutex_lock(&hw_priv->lock);
5668 rc = mii_nway_restart(&priv->mii_if);
5669 mutex_unlock(&hw_priv->lock);
5670 return rc;
5671 }
5672
5673 /**
5674 * netdev_get_link - get network device link status
5675 * @dev: Network device.
5676 *
5677 * This function gets the link status from the PHY.
5678 *
5679 * Return true if PHY is linked and false otherwise.
5680 */
netdev_get_link(struct net_device * dev)5681 static u32 netdev_get_link(struct net_device *dev)
5682 {
5683 struct dev_priv *priv = netdev_priv(dev);
5684 int rc;
5685
5686 rc = mii_link_ok(&priv->mii_if);
5687 return rc;
5688 }
5689
5690 /**
5691 * netdev_get_drvinfo - get network driver information
5692 * @dev: Network device.
5693 * @info: Ethtool driver info data structure.
5694 *
5695 * This procedure returns the driver information.
5696 */
netdev_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)5697 static void netdev_get_drvinfo(struct net_device *dev,
5698 struct ethtool_drvinfo *info)
5699 {
5700 struct dev_priv *priv = netdev_priv(dev);
5701 struct dev_info *hw_priv = priv->adapter;
5702
5703 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
5704 strscpy(info->version, DRV_VERSION, sizeof(info->version));
5705 strscpy(info->bus_info, pci_name(hw_priv->pdev),
5706 sizeof(info->bus_info));
5707 }
5708
5709 static struct hw_regs {
5710 int start;
5711 int end;
5712 } hw_regs_range[] = {
5713 { KS_DMA_TX_CTRL, KS884X_INTERRUPTS_STATUS },
5714 { KS_ADD_ADDR_0_LO, KS_ADD_ADDR_F_HI },
5715 { KS884X_ADDR_0_OFFSET, KS8841_WOL_FRAME_BYTE2_OFFSET },
5716 { KS884X_SIDER_P, KS8842_SGCR7_P },
5717 { KS8842_MACAR1_P, KS8842_TOSR8_P },
5718 { KS884X_P1MBCR_P, KS8842_P3ERCR_P },
5719 { 0, 0 }
5720 };
5721
5722 /**
5723 * netdev_get_regs_len - get length of register dump
5724 * @dev: Network device.
5725 *
5726 * This function returns the length of the register dump.
5727 *
5728 * Return length of the register dump.
5729 */
netdev_get_regs_len(struct net_device * dev)5730 static int netdev_get_regs_len(struct net_device *dev)
5731 {
5732 struct hw_regs *range = hw_regs_range;
5733 int regs_len = 0x10 * sizeof(u32);
5734
5735 while (range->end > range->start) {
5736 regs_len += (range->end - range->start + 3) / 4 * 4;
5737 range++;
5738 }
5739 return regs_len;
5740 }
5741
5742 /**
5743 * netdev_get_regs - get register dump
5744 * @dev: Network device.
5745 * @regs: Ethtool registers data structure.
5746 * @ptr: Buffer to store the register values.
5747 *
5748 * This procedure dumps the register values in the provided buffer.
5749 */
netdev_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * ptr)5750 static void netdev_get_regs(struct net_device *dev, struct ethtool_regs *regs,
5751 void *ptr)
5752 {
5753 struct dev_priv *priv = netdev_priv(dev);
5754 struct dev_info *hw_priv = priv->adapter;
5755 struct ksz_hw *hw = &hw_priv->hw;
5756 int *buf = (int *) ptr;
5757 struct hw_regs *range = hw_regs_range;
5758 int len;
5759
5760 mutex_lock(&hw_priv->lock);
5761 regs->version = 0;
5762 for (len = 0; len < 0x40; len += 4) {
5763 pci_read_config_dword(hw_priv->pdev, len, buf);
5764 buf++;
5765 }
5766 while (range->end > range->start) {
5767 for (len = range->start; len < range->end; len += 4) {
5768 *buf = readl(hw->io + len);
5769 buf++;
5770 }
5771 range++;
5772 }
5773 mutex_unlock(&hw_priv->lock);
5774 }
5775
5776 #define WOL_SUPPORT \
5777 (WAKE_PHY | WAKE_MAGIC | \
5778 WAKE_UCAST | WAKE_MCAST | \
5779 WAKE_BCAST | WAKE_ARP)
5780
5781 /**
5782 * netdev_get_wol - get Wake-on-LAN support
5783 * @dev: Network device.
5784 * @wol: Ethtool Wake-on-LAN data structure.
5785 *
5786 * This procedure returns Wake-on-LAN support.
5787 */
netdev_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)5788 static void netdev_get_wol(struct net_device *dev,
5789 struct ethtool_wolinfo *wol)
5790 {
5791 struct dev_priv *priv = netdev_priv(dev);
5792 struct dev_info *hw_priv = priv->adapter;
5793
5794 wol->supported = hw_priv->wol_support;
5795 wol->wolopts = hw_priv->wol_enable;
5796 memset(&wol->sopass, 0, sizeof(wol->sopass));
5797 }
5798
5799 /**
5800 * netdev_set_wol - set Wake-on-LAN support
5801 * @dev: Network device.
5802 * @wol: Ethtool Wake-on-LAN data structure.
5803 *
5804 * This function sets Wake-on-LAN support.
5805 *
5806 * Return 0 if successful; otherwise an error code.
5807 */
netdev_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)5808 static int netdev_set_wol(struct net_device *dev,
5809 struct ethtool_wolinfo *wol)
5810 {
5811 struct dev_priv *priv = netdev_priv(dev);
5812 struct dev_info *hw_priv = priv->adapter;
5813
5814 /* Need to find a way to retrieve the device IP address. */
5815 static const u8 net_addr[] = { 192, 168, 1, 1 };
5816
5817 if (wol->wolopts & ~hw_priv->wol_support)
5818 return -EINVAL;
5819
5820 hw_priv->wol_enable = wol->wolopts;
5821
5822 /* Link wakeup cannot really be disabled. */
5823 if (wol->wolopts)
5824 hw_priv->wol_enable |= WAKE_PHY;
5825 hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr);
5826 return 0;
5827 }
5828
5829 /**
5830 * netdev_get_msglevel - get debug message level
5831 * @dev: Network device.
5832 *
5833 * This function returns current debug message level.
5834 *
5835 * Return current debug message flags.
5836 */
netdev_get_msglevel(struct net_device * dev)5837 static u32 netdev_get_msglevel(struct net_device *dev)
5838 {
5839 struct dev_priv *priv = netdev_priv(dev);
5840
5841 return priv->msg_enable;
5842 }
5843
5844 /**
5845 * netdev_set_msglevel - set debug message level
5846 * @dev: Network device.
5847 * @value: Debug message flags.
5848 *
5849 * This procedure sets debug message level.
5850 */
netdev_set_msglevel(struct net_device * dev,u32 value)5851 static void netdev_set_msglevel(struct net_device *dev, u32 value)
5852 {
5853 struct dev_priv *priv = netdev_priv(dev);
5854
5855 priv->msg_enable = value;
5856 }
5857
5858 /**
5859 * netdev_get_eeprom_len - get EEPROM length
5860 * @dev: Network device.
5861 *
5862 * This function returns the length of the EEPROM.
5863 *
5864 * Return length of the EEPROM.
5865 */
netdev_get_eeprom_len(struct net_device * dev)5866 static int netdev_get_eeprom_len(struct net_device *dev)
5867 {
5868 return EEPROM_SIZE * 2;
5869 }
5870
5871 #define EEPROM_MAGIC 0x10A18842
5872
5873 /**
5874 * netdev_get_eeprom - get EEPROM data
5875 * @dev: Network device.
5876 * @eeprom: Ethtool EEPROM data structure.
5877 * @data: Buffer to store the EEPROM data.
5878 *
5879 * This function dumps the EEPROM data in the provided buffer.
5880 *
5881 * Return 0 if successful; otherwise an error code.
5882 */
netdev_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)5883 static int netdev_get_eeprom(struct net_device *dev,
5884 struct ethtool_eeprom *eeprom, u8 *data)
5885 {
5886 struct dev_priv *priv = netdev_priv(dev);
5887 struct dev_info *hw_priv = priv->adapter;
5888 u8 *eeprom_byte = (u8 *) eeprom_data;
5889 int i;
5890 int len;
5891
5892 len = (eeprom->offset + eeprom->len + 1) / 2;
5893 for (i = eeprom->offset / 2; i < len; i++)
5894 eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
5895 eeprom->magic = EEPROM_MAGIC;
5896 memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len);
5897
5898 return 0;
5899 }
5900
5901 /**
5902 * netdev_set_eeprom - write EEPROM data
5903 * @dev: Network device.
5904 * @eeprom: Ethtool EEPROM data structure.
5905 * @data: Data buffer.
5906 *
5907 * This function modifies the EEPROM data one byte at a time.
5908 *
5909 * Return 0 if successful; otherwise an error code.
5910 */
netdev_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)5911 static int netdev_set_eeprom(struct net_device *dev,
5912 struct ethtool_eeprom *eeprom, u8 *data)
5913 {
5914 struct dev_priv *priv = netdev_priv(dev);
5915 struct dev_info *hw_priv = priv->adapter;
5916 u16 eeprom_word[EEPROM_SIZE];
5917 u8 *eeprom_byte = (u8 *) eeprom_word;
5918 int i;
5919 int len;
5920
5921 if (eeprom->magic != EEPROM_MAGIC)
5922 return -EINVAL;
5923
5924 len = (eeprom->offset + eeprom->len + 1) / 2;
5925 for (i = eeprom->offset / 2; i < len; i++)
5926 eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
5927 memcpy(eeprom_word, eeprom_data, EEPROM_SIZE * 2);
5928 memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len);
5929 for (i = 0; i < EEPROM_SIZE; i++)
5930 if (eeprom_word[i] != eeprom_data[i]) {
5931 eeprom_data[i] = eeprom_word[i];
5932 eeprom_write(&hw_priv->hw, i, eeprom_data[i]);
5933 }
5934
5935 return 0;
5936 }
5937
5938 /**
5939 * netdev_get_pauseparam - get flow control parameters
5940 * @dev: Network device.
5941 * @pause: Ethtool PAUSE settings data structure.
5942 *
5943 * This procedure returns the PAUSE control flow settings.
5944 */
netdev_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)5945 static void netdev_get_pauseparam(struct net_device *dev,
5946 struct ethtool_pauseparam *pause)
5947 {
5948 struct dev_priv *priv = netdev_priv(dev);
5949 struct dev_info *hw_priv = priv->adapter;
5950 struct ksz_hw *hw = &hw_priv->hw;
5951
5952 pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1;
5953 if (!hw->ksz_switch) {
5954 pause->rx_pause =
5955 (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0;
5956 pause->tx_pause =
5957 (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0;
5958 } else {
5959 pause->rx_pause =
5960 (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
5961 SWITCH_RX_FLOW_CTRL)) ? 1 : 0;
5962 pause->tx_pause =
5963 (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
5964 SWITCH_TX_FLOW_CTRL)) ? 1 : 0;
5965 }
5966 }
5967
5968 /**
5969 * netdev_set_pauseparam - set flow control parameters
5970 * @dev: Network device.
5971 * @pause: Ethtool PAUSE settings data structure.
5972 *
5973 * This function sets the PAUSE control flow settings.
5974 * Not implemented yet.
5975 *
5976 * Return 0 if successful; otherwise an error code.
5977 */
netdev_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)5978 static int netdev_set_pauseparam(struct net_device *dev,
5979 struct ethtool_pauseparam *pause)
5980 {
5981 struct dev_priv *priv = netdev_priv(dev);
5982 struct dev_info *hw_priv = priv->adapter;
5983 struct ksz_hw *hw = &hw_priv->hw;
5984 struct ksz_port *port = &priv->port;
5985
5986 mutex_lock(&hw_priv->lock);
5987 if (pause->autoneg) {
5988 if (!pause->rx_pause && !pause->tx_pause)
5989 port->flow_ctrl = PHY_NO_FLOW_CTRL;
5990 else
5991 port->flow_ctrl = PHY_FLOW_CTRL;
5992 hw->overrides &= ~PAUSE_FLOW_CTRL;
5993 port->force_link = 0;
5994 if (hw->ksz_switch) {
5995 sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
5996 SWITCH_RX_FLOW_CTRL, 1);
5997 sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
5998 SWITCH_TX_FLOW_CTRL, 1);
5999 }
6000 port_set_link_speed(port);
6001 } else {
6002 hw->overrides |= PAUSE_FLOW_CTRL;
6003 if (hw->ksz_switch) {
6004 sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
6005 SWITCH_RX_FLOW_CTRL, pause->rx_pause);
6006 sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
6007 SWITCH_TX_FLOW_CTRL, pause->tx_pause);
6008 } else
6009 set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause);
6010 }
6011 mutex_unlock(&hw_priv->lock);
6012
6013 return 0;
6014 }
6015
6016 /**
6017 * netdev_get_ringparam - get tx/rx ring parameters
6018 * @dev: Network device.
6019 * @ring: Ethtool RING settings data structure.
6020 * @kernel_ring: Ethtool external RING settings data structure.
6021 * @extack: Netlink handle.
6022 *
6023 * This procedure returns the TX/RX ring settings.
6024 */
netdev_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)6025 static void netdev_get_ringparam(struct net_device *dev,
6026 struct ethtool_ringparam *ring,
6027 struct kernel_ethtool_ringparam *kernel_ring,
6028 struct netlink_ext_ack *extack)
6029 {
6030 struct dev_priv *priv = netdev_priv(dev);
6031 struct dev_info *hw_priv = priv->adapter;
6032 struct ksz_hw *hw = &hw_priv->hw;
6033
6034 ring->tx_max_pending = (1 << 9);
6035 ring->tx_pending = hw->tx_desc_info.alloc;
6036 ring->rx_max_pending = (1 << 9);
6037 ring->rx_pending = hw->rx_desc_info.alloc;
6038 }
6039
6040 #define STATS_LEN (TOTAL_PORT_COUNTER_NUM)
6041
6042 static struct {
6043 char string[ETH_GSTRING_LEN];
6044 } ethtool_stats_keys[STATS_LEN] = {
6045 { "rx_lo_priority_octets" },
6046 { "rx_hi_priority_octets" },
6047 { "rx_undersize_packets" },
6048 { "rx_fragments" },
6049 { "rx_oversize_packets" },
6050 { "rx_jabbers" },
6051 { "rx_symbol_errors" },
6052 { "rx_crc_errors" },
6053 { "rx_align_errors" },
6054 { "rx_mac_ctrl_packets" },
6055 { "rx_pause_packets" },
6056 { "rx_bcast_packets" },
6057 { "rx_mcast_packets" },
6058 { "rx_ucast_packets" },
6059 { "rx_64_or_less_octet_packets" },
6060 { "rx_65_to_127_octet_packets" },
6061 { "rx_128_to_255_octet_packets" },
6062 { "rx_256_to_511_octet_packets" },
6063 { "rx_512_to_1023_octet_packets" },
6064 { "rx_1024_to_1522_octet_packets" },
6065
6066 { "tx_lo_priority_octets" },
6067 { "tx_hi_priority_octets" },
6068 { "tx_late_collisions" },
6069 { "tx_pause_packets" },
6070 { "tx_bcast_packets" },
6071 { "tx_mcast_packets" },
6072 { "tx_ucast_packets" },
6073 { "tx_deferred" },
6074 { "tx_total_collisions" },
6075 { "tx_excessive_collisions" },
6076 { "tx_single_collisions" },
6077 { "tx_mult_collisions" },
6078
6079 { "rx_discards" },
6080 { "tx_discards" },
6081 };
6082
6083 /**
6084 * netdev_get_strings - get statistics identity strings
6085 * @dev: Network device.
6086 * @stringset: String set identifier.
6087 * @buf: Buffer to store the strings.
6088 *
6089 * This procedure returns the strings used to identify the statistics.
6090 */
netdev_get_strings(struct net_device * dev,u32 stringset,u8 * buf)6091 static void netdev_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6092 {
6093 struct dev_priv *priv = netdev_priv(dev);
6094 struct dev_info *hw_priv = priv->adapter;
6095 struct ksz_hw *hw = &hw_priv->hw;
6096
6097 if (ETH_SS_STATS == stringset)
6098 memcpy(buf, ðtool_stats_keys,
6099 ETH_GSTRING_LEN * hw->mib_cnt);
6100 }
6101
6102 /**
6103 * netdev_get_sset_count - get statistics size
6104 * @dev: Network device.
6105 * @sset: The statistics set number.
6106 *
6107 * This function returns the size of the statistics to be reported.
6108 *
6109 * Return size of the statistics to be reported.
6110 */
netdev_get_sset_count(struct net_device * dev,int sset)6111 static int netdev_get_sset_count(struct net_device *dev, int sset)
6112 {
6113 struct dev_priv *priv = netdev_priv(dev);
6114 struct dev_info *hw_priv = priv->adapter;
6115 struct ksz_hw *hw = &hw_priv->hw;
6116
6117 switch (sset) {
6118 case ETH_SS_STATS:
6119 return hw->mib_cnt;
6120 default:
6121 return -EOPNOTSUPP;
6122 }
6123 }
6124
6125 /**
6126 * netdev_get_ethtool_stats - get network device statistics
6127 * @dev: Network device.
6128 * @stats: Ethtool statistics data structure.
6129 * @data: Buffer to store the statistics.
6130 *
6131 * This procedure returns the statistics.
6132 */
netdev_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)6133 static void netdev_get_ethtool_stats(struct net_device *dev,
6134 struct ethtool_stats *stats, u64 *data)
6135 {
6136 struct dev_priv *priv = netdev_priv(dev);
6137 struct dev_info *hw_priv = priv->adapter;
6138 struct ksz_hw *hw = &hw_priv->hw;
6139 struct ksz_port *port = &priv->port;
6140 int n_stats = stats->n_stats;
6141 int i;
6142 int n;
6143 int p;
6144 u64 counter[TOTAL_PORT_COUNTER_NUM];
6145
6146 mutex_lock(&hw_priv->lock);
6147 n = SWITCH_PORT_NUM;
6148 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
6149 if (media_connected == hw->port_mib[p].state) {
6150 hw_priv->counter[p].read = 1;
6151
6152 /* Remember first port that requests read. */
6153 if (n == SWITCH_PORT_NUM)
6154 n = p;
6155 }
6156 }
6157 mutex_unlock(&hw_priv->lock);
6158
6159 if (n < SWITCH_PORT_NUM)
6160 schedule_work(&hw_priv->mib_read);
6161
6162 if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
6163 p = n;
6164 wait_event_interruptible_timeout(
6165 hw_priv->counter[p].counter,
6166 2 == hw_priv->counter[p].read,
6167 HZ * 1);
6168 } else
6169 for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
6170 if (0 == i) {
6171 wait_event_interruptible_timeout(
6172 hw_priv->counter[p].counter,
6173 2 == hw_priv->counter[p].read,
6174 HZ * 2);
6175 } else if (hw->port_mib[p].cnt_ptr) {
6176 wait_event_interruptible_timeout(
6177 hw_priv->counter[p].counter,
6178 2 == hw_priv->counter[p].read,
6179 HZ * 1);
6180 }
6181 }
6182
6183 get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter);
6184 n = hw->mib_cnt;
6185 if (n > n_stats)
6186 n = n_stats;
6187 n_stats -= n;
6188 for (i = 0; i < n; i++)
6189 *data++ = counter[i];
6190 }
6191
6192 /**
6193 * netdev_set_features - set receive checksum support
6194 * @dev: Network device.
6195 * @features: New device features (offloads).
6196 *
6197 * This function sets receive checksum support setting.
6198 *
6199 * Return 0 if successful; otherwise an error code.
6200 */
netdev_set_features(struct net_device * dev,netdev_features_t features)6201 static int netdev_set_features(struct net_device *dev,
6202 netdev_features_t features)
6203 {
6204 struct dev_priv *priv = netdev_priv(dev);
6205 struct dev_info *hw_priv = priv->adapter;
6206 struct ksz_hw *hw = &hw_priv->hw;
6207
6208 mutex_lock(&hw_priv->lock);
6209
6210 /* see note in hw_setup() */
6211 if (features & NETIF_F_RXCSUM)
6212 hw->rx_cfg |= DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP;
6213 else
6214 hw->rx_cfg &= ~(DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
6215
6216 if (hw->enabled)
6217 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
6218
6219 mutex_unlock(&hw_priv->lock);
6220
6221 return 0;
6222 }
6223
6224 static const struct ethtool_ops netdev_ethtool_ops = {
6225 .nway_reset = netdev_nway_reset,
6226 .get_link = netdev_get_link,
6227 .get_drvinfo = netdev_get_drvinfo,
6228 .get_regs_len = netdev_get_regs_len,
6229 .get_regs = netdev_get_regs,
6230 .get_wol = netdev_get_wol,
6231 .set_wol = netdev_set_wol,
6232 .get_msglevel = netdev_get_msglevel,
6233 .set_msglevel = netdev_set_msglevel,
6234 .get_eeprom_len = netdev_get_eeprom_len,
6235 .get_eeprom = netdev_get_eeprom,
6236 .set_eeprom = netdev_set_eeprom,
6237 .get_pauseparam = netdev_get_pauseparam,
6238 .set_pauseparam = netdev_set_pauseparam,
6239 .get_ringparam = netdev_get_ringparam,
6240 .get_strings = netdev_get_strings,
6241 .get_sset_count = netdev_get_sset_count,
6242 .get_ethtool_stats = netdev_get_ethtool_stats,
6243 .get_link_ksettings = netdev_get_link_ksettings,
6244 .set_link_ksettings = netdev_set_link_ksettings,
6245 };
6246
6247 /*
6248 * Hardware monitoring
6249 */
6250
update_link(struct net_device * dev,struct dev_priv * priv,struct ksz_port * port)6251 static void update_link(struct net_device *dev, struct dev_priv *priv,
6252 struct ksz_port *port)
6253 {
6254 if (priv->media_state != port->linked->state) {
6255 priv->media_state = port->linked->state;
6256 if (netif_running(dev))
6257 set_media_state(dev, media_connected);
6258 }
6259 }
6260
mib_read_work(struct work_struct * work)6261 static void mib_read_work(struct work_struct *work)
6262 {
6263 struct dev_info *hw_priv =
6264 container_of(work, struct dev_info, mib_read);
6265 struct ksz_hw *hw = &hw_priv->hw;
6266 unsigned long next_jiffies;
6267 struct ksz_port_mib *mib;
6268 int i;
6269
6270 next_jiffies = jiffies;
6271 for (i = 0; i < hw->mib_port_cnt; i++) {
6272 mib = &hw->port_mib[i];
6273
6274 /* Reading MIB counters or requested to read. */
6275 if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) {
6276
6277 /* Need to process receive interrupt. */
6278 if (port_r_cnt(hw, i))
6279 break;
6280 hw_priv->counter[i].read = 0;
6281
6282 /* Finish reading counters. */
6283 if (0 == mib->cnt_ptr) {
6284 hw_priv->counter[i].read = 2;
6285 wake_up_interruptible(
6286 &hw_priv->counter[i].counter);
6287 }
6288 } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) {
6289 /* Only read MIB counters when the port is connected. */
6290 if (media_connected == mib->state)
6291 hw_priv->counter[i].read = 1;
6292 next_jiffies += HZ * 1 * hw->mib_port_cnt;
6293 hw_priv->counter[i].time = next_jiffies;
6294
6295 /* Port is just disconnected. */
6296 } else if (mib->link_down) {
6297 mib->link_down = 0;
6298
6299 /* Read counters one last time after link is lost. */
6300 hw_priv->counter[i].read = 1;
6301 }
6302 }
6303 }
6304
mib_monitor(struct timer_list * t)6305 static void mib_monitor(struct timer_list *t)
6306 {
6307 struct dev_info *hw_priv = timer_container_of(hw_priv, t,
6308 mib_timer_info.timer);
6309
6310 mib_read_work(&hw_priv->mib_read);
6311
6312 /* This is used to verify Wake-on-LAN is working. */
6313 if (hw_priv->pme_wait) {
6314 if (time_is_before_eq_jiffies(hw_priv->pme_wait)) {
6315 hw_clr_wol_pme_status(&hw_priv->hw);
6316 hw_priv->pme_wait = 0;
6317 }
6318 } else if (hw_chk_wol_pme_status(&hw_priv->hw)) {
6319
6320 /* PME is asserted. Wait 2 seconds to clear it. */
6321 hw_priv->pme_wait = jiffies + HZ * 2;
6322 }
6323
6324 ksz_update_timer(&hw_priv->mib_timer_info);
6325 }
6326
6327 /**
6328 * dev_monitor - periodic monitoring
6329 * @t: timer list containing a network device pointer.
6330 *
6331 * This routine is run in a kernel timer to monitor the network device.
6332 */
dev_monitor(struct timer_list * t)6333 static void dev_monitor(struct timer_list *t)
6334 {
6335 struct dev_priv *priv = timer_container_of(priv, t,
6336 monitor_timer_info.timer);
6337 struct net_device *dev = priv->mii_if.dev;
6338 struct dev_info *hw_priv = priv->adapter;
6339 struct ksz_hw *hw = &hw_priv->hw;
6340 struct ksz_port *port = &priv->port;
6341
6342 if (!(hw->features & LINK_INT_WORKING))
6343 port_get_link_speed(port);
6344 update_link(dev, priv, port);
6345
6346 ksz_update_timer(&priv->monitor_timer_info);
6347 }
6348
6349 /*
6350 * Linux network device interface functions
6351 */
6352
6353 /* Driver exported variables */
6354
6355 static int msg_enable;
6356
6357 static char *macaddr = ":";
6358 static char *mac1addr = ":";
6359
6360 /*
6361 * This enables multiple network device mode for KSZ8842, which contains a
6362 * switch with two physical ports. Some users like to take control of the
6363 * ports for running Spanning Tree Protocol. The driver will create an
6364 * additional eth? device for the other port.
6365 *
6366 * Some limitations are the network devices cannot have different MTU and
6367 * multicast hash tables.
6368 */
6369 static int multi_dev;
6370
6371 /*
6372 * As most users select multiple network device mode to use Spanning Tree
6373 * Protocol, this enables a feature in which most unicast and multicast packets
6374 * are forwarded inside the switch and not passed to the host. Only packets
6375 * that need the host's attention are passed to it. This prevents the host
6376 * wasting CPU time to examine each and every incoming packets and do the
6377 * forwarding itself.
6378 *
6379 * As the hack requires the private bridge header, the driver cannot compile
6380 * with just the kernel headers.
6381 *
6382 * Enabling STP support also turns on multiple network device mode.
6383 */
6384 static int stp;
6385
6386 /*
6387 * This enables fast aging in the KSZ8842 switch. Not sure what situation
6388 * needs that. However, fast aging is used to flush the dynamic MAC table when
6389 * STP support is enabled.
6390 */
6391 static int fast_aging;
6392
6393 /**
6394 * netdev_init - initialize network device.
6395 * @dev: Network device.
6396 *
6397 * This function initializes the network device.
6398 *
6399 * Return 0 if successful; otherwise an error code indicating failure.
6400 */
netdev_init(struct net_device * dev)6401 static int __init netdev_init(struct net_device *dev)
6402 {
6403 struct dev_priv *priv = netdev_priv(dev);
6404
6405 /* 500 ms timeout */
6406 ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000,
6407 dev_monitor);
6408
6409 /* 500 ms timeout */
6410 dev->watchdog_timeo = HZ / 2;
6411
6412 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
6413
6414 /*
6415 * Hardware does not really support IPv6 checksum generation, but
6416 * driver actually runs faster with this on.
6417 */
6418 dev->hw_features |= NETIF_F_IPV6_CSUM;
6419
6420 dev->features |= dev->hw_features;
6421
6422 sema_init(&priv->proc_sem, 1);
6423
6424 priv->mii_if.phy_id_mask = 0x1;
6425 priv->mii_if.reg_num_mask = 0x7;
6426 priv->mii_if.dev = dev;
6427 priv->mii_if.mdio_read = mdio_read;
6428 priv->mii_if.mdio_write = mdio_write;
6429 priv->mii_if.phy_id = priv->port.first_port + 1;
6430
6431 priv->msg_enable = netif_msg_init(msg_enable,
6432 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK));
6433
6434 return 0;
6435 }
6436
6437 static const struct net_device_ops netdev_ops = {
6438 .ndo_init = netdev_init,
6439 .ndo_open = netdev_open,
6440 .ndo_stop = netdev_close,
6441 .ndo_get_stats = netdev_query_statistics,
6442 .ndo_start_xmit = netdev_tx,
6443 .ndo_tx_timeout = netdev_tx_timeout,
6444 .ndo_change_mtu = netdev_change_mtu,
6445 .ndo_set_features = netdev_set_features,
6446 .ndo_set_mac_address = netdev_set_mac_address,
6447 .ndo_validate_addr = eth_validate_addr,
6448 .ndo_eth_ioctl = netdev_ioctl,
6449 .ndo_set_rx_mode = netdev_set_rx_mode,
6450 #ifdef CONFIG_NET_POLL_CONTROLLER
6451 .ndo_poll_controller = netdev_netpoll,
6452 #endif
6453 };
6454
netdev_free(struct net_device * dev)6455 static void netdev_free(struct net_device *dev)
6456 {
6457 if (dev->watchdog_timeo)
6458 unregister_netdev(dev);
6459
6460 free_netdev(dev);
6461 }
6462
6463 struct platform_info {
6464 struct dev_info dev_info;
6465 struct net_device *netdev[SWITCH_PORT_NUM];
6466 };
6467
6468 static int net_device_present;
6469
get_mac_addr(struct dev_info * hw_priv,u8 * macaddr,int port)6470 static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
6471 {
6472 int i;
6473 int j;
6474 int got_num;
6475 int num;
6476
6477 i = j = num = got_num = 0;
6478 while (j < ETH_ALEN) {
6479 if (macaddr[i]) {
6480 int digit;
6481
6482 got_num = 1;
6483 digit = hex_to_bin(macaddr[i]);
6484 if (digit >= 0)
6485 num = num * 16 + digit;
6486 else if (':' == macaddr[i])
6487 got_num = 2;
6488 else
6489 break;
6490 } else if (got_num)
6491 got_num = 2;
6492 else
6493 break;
6494 if (2 == got_num) {
6495 if (MAIN_PORT == port) {
6496 hw_priv->hw.override_addr[j++] = (u8) num;
6497 hw_priv->hw.override_addr[5] +=
6498 hw_priv->hw.id;
6499 } else {
6500 hw_priv->hw.ksz_switch->other_addr[j++] =
6501 (u8) num;
6502 hw_priv->hw.ksz_switch->other_addr[5] +=
6503 hw_priv->hw.id;
6504 }
6505 num = got_num = 0;
6506 }
6507 i++;
6508 }
6509 if (ETH_ALEN == j) {
6510 if (MAIN_PORT == port)
6511 hw_priv->hw.mac_override = 1;
6512 }
6513 }
6514
6515 #define KS884X_DMA_MASK (~0x0UL)
6516
read_other_addr(struct ksz_hw * hw)6517 static void read_other_addr(struct ksz_hw *hw)
6518 {
6519 int i;
6520 u16 data[3];
6521 struct ksz_switch *sw = hw->ksz_switch;
6522
6523 for (i = 0; i < 3; i++)
6524 data[i] = eeprom_read(hw, i + EEPROM_DATA_OTHER_MAC_ADDR);
6525 if ((data[0] || data[1] || data[2]) && data[0] != 0xffff) {
6526 sw->other_addr[5] = (u8) data[0];
6527 sw->other_addr[4] = (u8)(data[0] >> 8);
6528 sw->other_addr[3] = (u8) data[1];
6529 sw->other_addr[2] = (u8)(data[1] >> 8);
6530 sw->other_addr[1] = (u8) data[2];
6531 sw->other_addr[0] = (u8)(data[2] >> 8);
6532 }
6533 }
6534
6535 #ifndef PCI_VENDOR_ID_MICREL_KS
6536 #define PCI_VENDOR_ID_MICREL_KS 0x16c6
6537 #endif
6538
pcidev_init(struct pci_dev * pdev,const struct pci_device_id * id)6539 static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
6540 {
6541 struct net_device *dev;
6542 struct dev_priv *priv;
6543 struct dev_info *hw_priv;
6544 struct ksz_hw *hw;
6545 struct platform_info *info;
6546 struct ksz_port *port;
6547 unsigned long reg_base;
6548 unsigned long reg_len;
6549 int cnt;
6550 int i;
6551 int mib_port_count;
6552 int pi;
6553 int port_count;
6554 int result;
6555 char banner[sizeof(version)];
6556 struct ksz_switch *sw = NULL;
6557
6558 result = pcim_enable_device(pdev);
6559 if (result)
6560 return result;
6561
6562 result = -ENODEV;
6563
6564 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) ||
6565 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))
6566 return result;
6567
6568 reg_base = pci_resource_start(pdev, 0);
6569 reg_len = pci_resource_len(pdev, 0);
6570 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0)
6571 return result;
6572
6573 if (!request_mem_region(reg_base, reg_len, DRV_NAME))
6574 return result;
6575 pci_set_master(pdev);
6576
6577 result = -ENOMEM;
6578
6579 info = kzalloc_obj(struct platform_info);
6580 if (!info)
6581 goto pcidev_init_dev_err;
6582
6583 hw_priv = &info->dev_info;
6584 hw_priv->pdev = pdev;
6585
6586 hw = &hw_priv->hw;
6587
6588 hw->io = ioremap(reg_base, reg_len);
6589 if (!hw->io)
6590 goto pcidev_init_io_err;
6591
6592 cnt = hw_init(hw);
6593 if (!cnt) {
6594 if (msg_enable & NETIF_MSG_PROBE)
6595 pr_alert("chip not detected\n");
6596 result = -ENODEV;
6597 goto pcidev_init_alloc_err;
6598 }
6599
6600 snprintf(banner, sizeof(banner), "%s", version);
6601 banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */
6602 dev_info(&hw_priv->pdev->dev, "%s\n", banner);
6603 dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
6604
6605 /* Assume device is KSZ8841. */
6606 hw->dev_count = 1;
6607 port_count = 1;
6608 mib_port_count = 1;
6609 hw->addr_list_size = 0;
6610 hw->mib_cnt = PORT_COUNTER_NUM;
6611 hw->mib_port_cnt = 1;
6612
6613 /* KSZ8842 has a switch with multiple ports. */
6614 if (2 == cnt) {
6615 if (fast_aging)
6616 hw->overrides |= FAST_AGING;
6617
6618 hw->mib_cnt = TOTAL_PORT_COUNTER_NUM;
6619
6620 /* Multiple network device interfaces are required. */
6621 if (multi_dev) {
6622 hw->dev_count = SWITCH_PORT_NUM;
6623 hw->addr_list_size = SWITCH_PORT_NUM - 1;
6624 }
6625
6626 /* Single network device has multiple ports. */
6627 if (1 == hw->dev_count) {
6628 port_count = SWITCH_PORT_NUM;
6629 mib_port_count = SWITCH_PORT_NUM;
6630 }
6631 hw->mib_port_cnt = TOTAL_PORT_NUM;
6632 hw->ksz_switch = kzalloc_obj(struct ksz_switch);
6633 if (!hw->ksz_switch)
6634 goto pcidev_init_alloc_err;
6635
6636 sw = hw->ksz_switch;
6637 }
6638 for (i = 0; i < hw->mib_port_cnt; i++)
6639 hw->port_mib[i].mib_start = 0;
6640
6641 hw->parent = hw_priv;
6642
6643 /* Default MTU is 1500. */
6644 hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
6645
6646 if (ksz_alloc_mem(hw_priv))
6647 goto pcidev_init_mem_err;
6648
6649 hw_priv->hw.id = net_device_present;
6650
6651 spin_lock_init(&hw_priv->hwlock);
6652 mutex_init(&hw_priv->lock);
6653
6654 for (i = 0; i < TOTAL_PORT_NUM; i++)
6655 init_waitqueue_head(&hw_priv->counter[i].counter);
6656
6657 if (macaddr[0] != ':')
6658 get_mac_addr(hw_priv, macaddr, MAIN_PORT);
6659
6660 /* Read MAC address and initialize override address if not overridden. */
6661 hw_read_addr(hw);
6662
6663 /* Multiple device interfaces mode requires a second MAC address. */
6664 if (hw->dev_count > 1) {
6665 memcpy(sw->other_addr, hw->override_addr, ETH_ALEN);
6666 read_other_addr(hw);
6667 if (mac1addr[0] != ':')
6668 get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
6669 }
6670
6671 hw_setup(hw);
6672 if (hw->ksz_switch)
6673 sw_setup(hw);
6674 else {
6675 hw_priv->wol_support = WOL_SUPPORT;
6676 hw_priv->wol_enable = 0;
6677 }
6678
6679 INIT_WORK(&hw_priv->mib_read, mib_read_work);
6680
6681 /* 500 ms timeout */
6682 ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
6683 mib_monitor);
6684
6685 for (i = 0; i < hw->dev_count; i++) {
6686 dev = alloc_etherdev(sizeof(struct dev_priv));
6687 if (!dev)
6688 goto pcidev_init_reg_err;
6689 SET_NETDEV_DEV(dev, &pdev->dev);
6690 info->netdev[i] = dev;
6691
6692 priv = netdev_priv(dev);
6693 priv->adapter = hw_priv;
6694 priv->id = net_device_present++;
6695
6696 port = &priv->port;
6697 port->port_cnt = port_count;
6698 port->mib_port_cnt = mib_port_count;
6699 port->first_port = i;
6700 port->flow_ctrl = PHY_FLOW_CTRL;
6701
6702 port->hw = hw;
6703 port->linked = &hw->port_info[port->first_port];
6704
6705 for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) {
6706 hw->port_info[pi].port_id = pi;
6707 hw->port_info[pi].pdev = dev;
6708 hw->port_info[pi].state = media_disconnected;
6709 }
6710
6711 dev->mem_start = (unsigned long) hw->io;
6712 dev->mem_end = dev->mem_start + reg_len - 1;
6713 dev->irq = pdev->irq;
6714 if (MAIN_PORT == i)
6715 eth_hw_addr_set(dev, hw_priv->hw.override_addr);
6716 else {
6717 u8 addr[ETH_ALEN];
6718
6719 ether_addr_copy(addr, sw->other_addr);
6720 if (ether_addr_equal(sw->other_addr, hw->override_addr))
6721 addr[5] += port->first_port;
6722 eth_hw_addr_set(dev, addr);
6723 }
6724
6725 dev->netdev_ops = &netdev_ops;
6726 dev->ethtool_ops = &netdev_ethtool_ops;
6727
6728 /* MTU range: 60 - 1894 */
6729 dev->min_mtu = ETH_ZLEN;
6730 dev->max_mtu = MAX_RX_BUF_SIZE -
6731 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
6732
6733 if (register_netdev(dev))
6734 goto pcidev_init_reg_err;
6735 port_set_power_saving(port, true);
6736 }
6737
6738 pci_dev_get(hw_priv->pdev);
6739 pci_set_drvdata(pdev, info);
6740 return 0;
6741
6742 pcidev_init_reg_err:
6743 for (i = 0; i < hw->dev_count; i++) {
6744 if (info->netdev[i]) {
6745 netdev_free(info->netdev[i]);
6746 info->netdev[i] = NULL;
6747 }
6748 }
6749
6750 pcidev_init_mem_err:
6751 ksz_free_mem(hw_priv);
6752 kfree(hw->ksz_switch);
6753
6754 pcidev_init_alloc_err:
6755 iounmap(hw->io);
6756
6757 pcidev_init_io_err:
6758 kfree(info);
6759
6760 pcidev_init_dev_err:
6761 release_mem_region(reg_base, reg_len);
6762
6763 return result;
6764 }
6765
pcidev_exit(struct pci_dev * pdev)6766 static void pcidev_exit(struct pci_dev *pdev)
6767 {
6768 int i;
6769 struct platform_info *info = pci_get_drvdata(pdev);
6770 struct dev_info *hw_priv = &info->dev_info;
6771
6772 release_mem_region(pci_resource_start(pdev, 0),
6773 pci_resource_len(pdev, 0));
6774 for (i = 0; i < hw_priv->hw.dev_count; i++) {
6775 if (info->netdev[i])
6776 netdev_free(info->netdev[i]);
6777 }
6778 if (hw_priv->hw.io)
6779 iounmap(hw_priv->hw.io);
6780 ksz_free_mem(hw_priv);
6781 kfree(hw_priv->hw.ksz_switch);
6782 pci_dev_put(hw_priv->pdev);
6783 kfree(info);
6784 }
6785
pcidev_resume(struct device * dev_d)6786 static int __maybe_unused pcidev_resume(struct device *dev_d)
6787 {
6788 int i;
6789 struct platform_info *info = dev_get_drvdata(dev_d);
6790 struct dev_info *hw_priv = &info->dev_info;
6791 struct ksz_hw *hw = &hw_priv->hw;
6792
6793 device_wakeup_disable(dev_d);
6794
6795 if (hw_priv->wol_enable)
6796 hw_cfg_wol_pme(hw, 0);
6797 for (i = 0; i < hw->dev_count; i++) {
6798 if (info->netdev[i]) {
6799 struct net_device *dev = info->netdev[i];
6800
6801 if (netif_running(dev)) {
6802 netdev_open(dev);
6803 netif_device_attach(dev);
6804 }
6805 }
6806 }
6807 return 0;
6808 }
6809
pcidev_suspend(struct device * dev_d)6810 static int __maybe_unused pcidev_suspend(struct device *dev_d)
6811 {
6812 int i;
6813 struct platform_info *info = dev_get_drvdata(dev_d);
6814 struct dev_info *hw_priv = &info->dev_info;
6815 struct ksz_hw *hw = &hw_priv->hw;
6816
6817 /* Need to find a way to retrieve the device IP address. */
6818 static const u8 net_addr[] = { 192, 168, 1, 1 };
6819
6820 for (i = 0; i < hw->dev_count; i++) {
6821 if (info->netdev[i]) {
6822 struct net_device *dev = info->netdev[i];
6823
6824 if (netif_running(dev)) {
6825 netif_device_detach(dev);
6826 netdev_close(dev);
6827 }
6828 }
6829 }
6830 if (hw_priv->wol_enable) {
6831 hw_enable_wol(hw, hw_priv->wol_enable, net_addr);
6832 hw_cfg_wol_pme(hw, 1);
6833 }
6834
6835 device_wakeup_enable(dev_d);
6836 return 0;
6837 }
6838
6839 static char pcidev_name[] = "ksz884xp";
6840
6841 static const struct pci_device_id pcidev_table[] = {
6842 { PCI_VENDOR_ID_MICREL_KS, 0x8841,
6843 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
6844 { PCI_VENDOR_ID_MICREL_KS, 0x8842,
6845 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
6846 { 0 }
6847 };
6848
6849 MODULE_DEVICE_TABLE(pci, pcidev_table);
6850
6851 static SIMPLE_DEV_PM_OPS(pcidev_pm_ops, pcidev_suspend, pcidev_resume);
6852
6853 static struct pci_driver pci_device_driver = {
6854 .driver.pm = &pcidev_pm_ops,
6855 .name = pcidev_name,
6856 .id_table = pcidev_table,
6857 .probe = pcidev_init,
6858 .remove = pcidev_exit
6859 };
6860
6861 module_pci_driver(pci_device_driver);
6862
6863 MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
6864 MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
6865 MODULE_LICENSE("GPL");
6866
6867 module_param_named(message, msg_enable, int, 0);
6868 MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
6869
6870 module_param(macaddr, charp, 0);
6871 module_param(mac1addr, charp, 0);
6872 module_param(fast_aging, int, 0);
6873 module_param(multi_dev, int, 0);
6874 module_param(stp, int, 0);
6875 MODULE_PARM_DESC(macaddr, "MAC address");
6876 MODULE_PARM_DESC(mac1addr, "Second MAC address");
6877 MODULE_PARM_DESC(fast_aging, "Fast aging");
6878 MODULE_PARM_DESC(multi_dev, "Multiple device interfaces");
6879 MODULE_PARM_DESC(stp, "STP support");
6880