1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * drivers/net/ethernet/micrel/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver 4 * 5 * Copyright (c) 2009-2010 Micrel, Inc. 6 * Tristram Ha <Tristram.Ha@micrel.com> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/init.h> 12 #include <linux/interrupt.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/hex.h> 16 #include <linux/ioport.h> 17 #include <linux/pci.h> 18 #include <linux/proc_fs.h> 19 #include <linux/mii.h> 20 #include <linux/platform_device.h> 21 #include <linux/ethtool.h> 22 #include <linux/etherdevice.h> 23 #include <linux/in.h> 24 #include <linux/ip.h> 25 #include <linux/if_vlan.h> 26 #include <linux/crc32.h> 27 #include <linux/sched.h> 28 #include <linux/slab.h> 29 #include <linux/micrel_phy.h> 30 31 32 /* DMA Registers */ 33 34 #define KS_DMA_TX_CTRL 0x0000 35 #define DMA_TX_ENABLE 0x00000001 36 #define DMA_TX_CRC_ENABLE 0x00000002 37 #define DMA_TX_PAD_ENABLE 0x00000004 38 #define DMA_TX_LOOPBACK 0x00000100 39 #define DMA_TX_FLOW_ENABLE 0x00000200 40 #define DMA_TX_CSUM_IP 0x00010000 41 #define DMA_TX_CSUM_TCP 0x00020000 42 #define DMA_TX_CSUM_UDP 0x00040000 43 #define DMA_TX_BURST_SIZE 0x3F000000 44 45 #define KS_DMA_RX_CTRL 0x0004 46 #define DMA_RX_ENABLE 0x00000001 47 #define KS884X_DMA_RX_MULTICAST 0x00000002 48 #define DMA_RX_PROMISCUOUS 0x00000004 49 #define DMA_RX_ERROR 0x00000008 50 #define DMA_RX_UNICAST 0x00000010 51 #define DMA_RX_ALL_MULTICAST 0x00000020 52 #define DMA_RX_BROADCAST 0x00000040 53 #define DMA_RX_FLOW_ENABLE 0x00000200 54 #define DMA_RX_CSUM_IP 0x00010000 55 #define DMA_RX_CSUM_TCP 0x00020000 56 #define DMA_RX_CSUM_UDP 0x00040000 57 #define DMA_RX_BURST_SIZE 0x3F000000 58 59 #define DMA_BURST_SHIFT 24 60 #define DMA_BURST_DEFAULT 8 61 62 #define KS_DMA_TX_START 0x0008 63 #define KS_DMA_RX_START 0x000C 64 #define DMA_START 0x00000001 65 66 #define KS_DMA_TX_ADDR 0x0010 67 #define KS_DMA_RX_ADDR 0x0014 68 69 #define DMA_ADDR_LIST_MASK 0xFFFFFFFC 70 #define DMA_ADDR_LIST_SHIFT 2 71 72 /* MTR0 */ 73 #define KS884X_MULTICAST_0_OFFSET 0x0020 74 #define KS884X_MULTICAST_1_OFFSET 0x0021 75 #define KS884X_MULTICAST_2_OFFSET 0x0022 76 #define KS884x_MULTICAST_3_OFFSET 0x0023 77 /* MTR1 */ 78 #define KS884X_MULTICAST_4_OFFSET 0x0024 79 #define KS884X_MULTICAST_5_OFFSET 0x0025 80 #define KS884X_MULTICAST_6_OFFSET 0x0026 81 #define KS884X_MULTICAST_7_OFFSET 0x0027 82 83 /* Interrupt Registers */ 84 85 /* INTEN */ 86 #define KS884X_INTERRUPTS_ENABLE 0x0028 87 /* INTST */ 88 #define KS884X_INTERRUPTS_STATUS 0x002C 89 90 #define KS884X_INT_RX_STOPPED 0x02000000 91 #define KS884X_INT_TX_STOPPED 0x04000000 92 #define KS884X_INT_RX_OVERRUN 0x08000000 93 #define KS884X_INT_TX_EMPTY 0x10000000 94 #define KS884X_INT_RX 0x20000000 95 #define KS884X_INT_TX 0x40000000 96 #define KS884X_INT_PHY 0x80000000 97 98 #define KS884X_INT_RX_MASK \ 99 (KS884X_INT_RX | KS884X_INT_RX_OVERRUN) 100 #define KS884X_INT_TX_MASK \ 101 (KS884X_INT_TX | KS884X_INT_TX_EMPTY) 102 #define KS884X_INT_MASK (KS884X_INT_RX | KS884X_INT_TX | KS884X_INT_PHY) 103 104 /* MAC Additional Station Address */ 105 106 /* MAAL0 */ 107 #define KS_ADD_ADDR_0_LO 0x0080 108 /* MAAH0 */ 109 #define KS_ADD_ADDR_0_HI 0x0084 110 /* MAAL1 */ 111 #define KS_ADD_ADDR_1_LO 0x0088 112 /* MAAH1 */ 113 #define KS_ADD_ADDR_1_HI 0x008C 114 /* MAAL2 */ 115 #define KS_ADD_ADDR_2_LO 0x0090 116 /* MAAH2 */ 117 #define KS_ADD_ADDR_2_HI 0x0094 118 /* MAAL3 */ 119 #define KS_ADD_ADDR_3_LO 0x0098 120 /* MAAH3 */ 121 #define KS_ADD_ADDR_3_HI 0x009C 122 /* MAAL4 */ 123 #define KS_ADD_ADDR_4_LO 0x00A0 124 /* MAAH4 */ 125 #define KS_ADD_ADDR_4_HI 0x00A4 126 /* MAAL5 */ 127 #define KS_ADD_ADDR_5_LO 0x00A8 128 /* MAAH5 */ 129 #define KS_ADD_ADDR_5_HI 0x00AC 130 /* MAAL6 */ 131 #define KS_ADD_ADDR_6_LO 0x00B0 132 /* MAAH6 */ 133 #define KS_ADD_ADDR_6_HI 0x00B4 134 /* MAAL7 */ 135 #define KS_ADD_ADDR_7_LO 0x00B8 136 /* MAAH7 */ 137 #define KS_ADD_ADDR_7_HI 0x00BC 138 /* MAAL8 */ 139 #define KS_ADD_ADDR_8_LO 0x00C0 140 /* MAAH8 */ 141 #define KS_ADD_ADDR_8_HI 0x00C4 142 /* MAAL9 */ 143 #define KS_ADD_ADDR_9_LO 0x00C8 144 /* MAAH9 */ 145 #define KS_ADD_ADDR_9_HI 0x00CC 146 /* MAAL10 */ 147 #define KS_ADD_ADDR_A_LO 0x00D0 148 /* MAAH10 */ 149 #define KS_ADD_ADDR_A_HI 0x00D4 150 /* MAAL11 */ 151 #define KS_ADD_ADDR_B_LO 0x00D8 152 /* MAAH11 */ 153 #define KS_ADD_ADDR_B_HI 0x00DC 154 /* MAAL12 */ 155 #define KS_ADD_ADDR_C_LO 0x00E0 156 /* MAAH12 */ 157 #define KS_ADD_ADDR_C_HI 0x00E4 158 /* MAAL13 */ 159 #define KS_ADD_ADDR_D_LO 0x00E8 160 /* MAAH13 */ 161 #define KS_ADD_ADDR_D_HI 0x00EC 162 /* MAAL14 */ 163 #define KS_ADD_ADDR_E_LO 0x00F0 164 /* MAAH14 */ 165 #define KS_ADD_ADDR_E_HI 0x00F4 166 /* MAAL15 */ 167 #define KS_ADD_ADDR_F_LO 0x00F8 168 /* MAAH15 */ 169 #define KS_ADD_ADDR_F_HI 0x00FC 170 171 #define ADD_ADDR_HI_MASK 0x0000FFFF 172 #define ADD_ADDR_ENABLE 0x80000000 173 #define ADD_ADDR_INCR 8 174 175 /* Miscellaneous Registers */ 176 177 /* MARL */ 178 #define KS884X_ADDR_0_OFFSET 0x0200 179 #define KS884X_ADDR_1_OFFSET 0x0201 180 /* MARM */ 181 #define KS884X_ADDR_2_OFFSET 0x0202 182 #define KS884X_ADDR_3_OFFSET 0x0203 183 /* MARH */ 184 #define KS884X_ADDR_4_OFFSET 0x0204 185 #define KS884X_ADDR_5_OFFSET 0x0205 186 187 /* OBCR */ 188 #define KS884X_BUS_CTRL_OFFSET 0x0210 189 190 #define BUS_SPEED_125_MHZ 0x0000 191 #define BUS_SPEED_62_5_MHZ 0x0001 192 #define BUS_SPEED_41_66_MHZ 0x0002 193 #define BUS_SPEED_25_MHZ 0x0003 194 195 /* EEPCR */ 196 #define KS884X_EEPROM_CTRL_OFFSET 0x0212 197 198 #define EEPROM_CHIP_SELECT 0x0001 199 #define EEPROM_SERIAL_CLOCK 0x0002 200 #define EEPROM_DATA_OUT 0x0004 201 #define EEPROM_DATA_IN 0x0008 202 #define EEPROM_ACCESS_ENABLE 0x0010 203 204 /* MBIR */ 205 #define KS884X_MEM_INFO_OFFSET 0x0214 206 207 #define RX_MEM_TEST_FAILED 0x0008 208 #define RX_MEM_TEST_FINISHED 0x0010 209 #define TX_MEM_TEST_FAILED 0x0800 210 #define TX_MEM_TEST_FINISHED 0x1000 211 212 /* GCR */ 213 #define KS884X_GLOBAL_CTRL_OFFSET 0x0216 214 #define GLOBAL_SOFTWARE_RESET 0x0001 215 216 #define KS8841_POWER_MANAGE_OFFSET 0x0218 217 218 /* WFCR */ 219 #define KS8841_WOL_CTRL_OFFSET 0x021A 220 #define KS8841_WOL_MAGIC_ENABLE 0x0080 221 #define KS8841_WOL_FRAME3_ENABLE 0x0008 222 #define KS8841_WOL_FRAME2_ENABLE 0x0004 223 #define KS8841_WOL_FRAME1_ENABLE 0x0002 224 #define KS8841_WOL_FRAME0_ENABLE 0x0001 225 226 /* WF0 */ 227 #define KS8841_WOL_FRAME_CRC_OFFSET 0x0220 228 #define KS8841_WOL_FRAME_BYTE0_OFFSET 0x0224 229 #define KS8841_WOL_FRAME_BYTE2_OFFSET 0x0228 230 231 /* IACR */ 232 #define KS884X_IACR_P 0x04A0 233 #define KS884X_IACR_OFFSET KS884X_IACR_P 234 235 /* IADR1 */ 236 #define KS884X_IADR1_P 0x04A2 237 #define KS884X_IADR2_P 0x04A4 238 #define KS884X_IADR3_P 0x04A6 239 #define KS884X_IADR4_P 0x04A8 240 #define KS884X_IADR5_P 0x04AA 241 242 #define KS884X_ACC_CTRL_SEL_OFFSET KS884X_IACR_P 243 #define KS884X_ACC_CTRL_INDEX_OFFSET (KS884X_ACC_CTRL_SEL_OFFSET + 1) 244 245 #define KS884X_ACC_DATA_0_OFFSET KS884X_IADR4_P 246 #define KS884X_ACC_DATA_1_OFFSET (KS884X_ACC_DATA_0_OFFSET + 1) 247 #define KS884X_ACC_DATA_2_OFFSET KS884X_IADR5_P 248 #define KS884X_ACC_DATA_3_OFFSET (KS884X_ACC_DATA_2_OFFSET + 1) 249 #define KS884X_ACC_DATA_4_OFFSET KS884X_IADR2_P 250 #define KS884X_ACC_DATA_5_OFFSET (KS884X_ACC_DATA_4_OFFSET + 1) 251 #define KS884X_ACC_DATA_6_OFFSET KS884X_IADR3_P 252 #define KS884X_ACC_DATA_7_OFFSET (KS884X_ACC_DATA_6_OFFSET + 1) 253 #define KS884X_ACC_DATA_8_OFFSET KS884X_IADR1_P 254 255 /* P1MBCR */ 256 #define KS884X_P1MBCR_P 0x04D0 257 #define KS884X_P1MBSR_P 0x04D2 258 #define KS884X_PHY1ILR_P 0x04D4 259 #define KS884X_PHY1IHR_P 0x04D6 260 #define KS884X_P1ANAR_P 0x04D8 261 #define KS884X_P1ANLPR_P 0x04DA 262 263 /* P2MBCR */ 264 #define KS884X_P2MBCR_P 0x04E0 265 #define KS884X_P2MBSR_P 0x04E2 266 #define KS884X_PHY2ILR_P 0x04E4 267 #define KS884X_PHY2IHR_P 0x04E6 268 #define KS884X_P2ANAR_P 0x04E8 269 #define KS884X_P2ANLPR_P 0x04EA 270 271 #define KS884X_PHY_1_CTRL_OFFSET KS884X_P1MBCR_P 272 #define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P) 273 274 #define KS884X_PHY_CTRL_OFFSET 0x00 275 276 #define KS884X_PHY_STATUS_OFFSET 0x02 277 278 #define KS884X_PHY_ID_1_OFFSET 0x04 279 #define KS884X_PHY_ID_2_OFFSET 0x06 280 281 #define KS884X_PHY_AUTO_NEG_OFFSET 0x08 282 283 #define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A 284 285 /* P1VCT */ 286 #define KS884X_P1VCT_P 0x04F0 287 #define KS884X_P1PHYCTRL_P 0x04F2 288 289 /* P2VCT */ 290 #define KS884X_P2VCT_P 0x04F4 291 #define KS884X_P2PHYCTRL_P 0x04F6 292 293 #define KS884X_PHY_SPECIAL_OFFSET KS884X_P1VCT_P 294 #define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P) 295 296 #define KS884X_PHY_LINK_MD_OFFSET 0x00 297 298 #define PHY_START_CABLE_DIAG 0x8000 299 #define PHY_CABLE_DIAG_RESULT 0x6000 300 #define PHY_CABLE_STAT_NORMAL 0x0000 301 #define PHY_CABLE_STAT_OPEN 0x2000 302 #define PHY_CABLE_STAT_SHORT 0x4000 303 #define PHY_CABLE_STAT_FAILED 0x6000 304 #define PHY_CABLE_10M_SHORT 0x1000 305 #define PHY_CABLE_FAULT_COUNTER 0x01FF 306 307 #define KS884X_PHY_PHY_CTRL_OFFSET 0x02 308 309 #define PHY_STAT_REVERSED_POLARITY 0x0020 310 #define PHY_STAT_MDIX 0x0010 311 #define PHY_FORCE_LINK 0x0008 312 #define PHY_POWER_SAVING_DISABLE 0x0004 313 #define PHY_REMOTE_LOOPBACK 0x0002 314 315 /* SIDER */ 316 #define KS884X_SIDER_P 0x0400 317 #define KS884X_CHIP_ID_OFFSET KS884X_SIDER_P 318 #define KS884X_FAMILY_ID_OFFSET (KS884X_CHIP_ID_OFFSET + 1) 319 320 #define REG_FAMILY_ID 0x88 321 322 #define REG_CHIP_ID_41 0x8810 323 #define REG_CHIP_ID_42 0x8800 324 325 #define KS884X_CHIP_ID_MASK_41 0xFF10 326 #define KS884X_CHIP_ID_MASK 0xFFF0 327 #define KS884X_CHIP_ID_SHIFT 4 328 #define KS884X_REVISION_MASK 0x000E 329 #define KS884X_REVISION_SHIFT 1 330 #define KS8842_START 0x0001 331 332 #define CHIP_IP_41_M 0x8810 333 #define CHIP_IP_42_M 0x8800 334 #define CHIP_IP_61_M 0x8890 335 #define CHIP_IP_62_M 0x8880 336 337 #define CHIP_IP_41_P 0x8850 338 #define CHIP_IP_42_P 0x8840 339 #define CHIP_IP_61_P 0x88D0 340 #define CHIP_IP_62_P 0x88C0 341 342 /* SGCR1 */ 343 #define KS8842_SGCR1_P 0x0402 344 #define KS8842_SWITCH_CTRL_1_OFFSET KS8842_SGCR1_P 345 346 #define SWITCH_PASS_ALL 0x8000 347 #define SWITCH_TX_FLOW_CTRL 0x2000 348 #define SWITCH_RX_FLOW_CTRL 0x1000 349 #define SWITCH_CHECK_LENGTH 0x0800 350 #define SWITCH_AGING_ENABLE 0x0400 351 #define SWITCH_FAST_AGING 0x0200 352 #define SWITCH_AGGR_BACKOFF 0x0100 353 #define SWITCH_PASS_PAUSE 0x0008 354 #define SWITCH_LINK_AUTO_AGING 0x0001 355 356 /* SGCR2 */ 357 #define KS8842_SGCR2_P 0x0404 358 #define KS8842_SWITCH_CTRL_2_OFFSET KS8842_SGCR2_P 359 360 #define SWITCH_VLAN_ENABLE 0x8000 361 #define SWITCH_IGMP_SNOOP 0x4000 362 #define IPV6_MLD_SNOOP_ENABLE 0x2000 363 #define IPV6_MLD_SNOOP_OPTION 0x1000 364 #define PRIORITY_SCHEME_SELECT 0x0800 365 #define SWITCH_MIRROR_RX_TX 0x0100 366 #define UNICAST_VLAN_BOUNDARY 0x0080 367 #define MULTICAST_STORM_DISABLE 0x0040 368 #define SWITCH_BACK_PRESSURE 0x0020 369 #define FAIR_FLOW_CTRL 0x0010 370 #define NO_EXC_COLLISION_DROP 0x0008 371 #define SWITCH_HUGE_PACKET 0x0004 372 #define SWITCH_LEGAL_PACKET 0x0002 373 #define SWITCH_BUF_RESERVE 0x0001 374 375 /* SGCR3 */ 376 #define KS8842_SGCR3_P 0x0406 377 #define KS8842_SWITCH_CTRL_3_OFFSET KS8842_SGCR3_P 378 379 #define BROADCAST_STORM_RATE_LO 0xFF00 380 #define SWITCH_REPEATER 0x0080 381 #define SWITCH_HALF_DUPLEX 0x0040 382 #define SWITCH_FLOW_CTRL 0x0020 383 #define SWITCH_10_MBIT 0x0010 384 #define SWITCH_REPLACE_NULL_VID 0x0008 385 #define BROADCAST_STORM_RATE_HI 0x0007 386 387 #define BROADCAST_STORM_RATE 0x07FF 388 389 /* SGCR4 */ 390 #define KS8842_SGCR4_P 0x0408 391 392 /* SGCR5 */ 393 #define KS8842_SGCR5_P 0x040A 394 #define KS8842_SWITCH_CTRL_5_OFFSET KS8842_SGCR5_P 395 396 #define LED_MODE 0x8200 397 #define LED_SPEED_DUPLEX_ACT 0x0000 398 #define LED_SPEED_DUPLEX_LINK_ACT 0x8000 399 #define LED_DUPLEX_10_100 0x0200 400 401 /* SGCR6 */ 402 #define KS8842_SGCR6_P 0x0410 403 #define KS8842_SWITCH_CTRL_6_OFFSET KS8842_SGCR6_P 404 405 #define KS8842_PRIORITY_MASK 3 406 #define KS8842_PRIORITY_SHIFT 2 407 408 /* SGCR7 */ 409 #define KS8842_SGCR7_P 0x0412 410 #define KS8842_SWITCH_CTRL_7_OFFSET KS8842_SGCR7_P 411 412 #define SWITCH_UNK_DEF_PORT_ENABLE 0x0008 413 #define SWITCH_UNK_DEF_PORT_3 0x0004 414 #define SWITCH_UNK_DEF_PORT_2 0x0002 415 #define SWITCH_UNK_DEF_PORT_1 0x0001 416 417 /* MACAR1 */ 418 #define KS8842_MACAR1_P 0x0470 419 #define KS8842_MACAR2_P 0x0472 420 #define KS8842_MACAR3_P 0x0474 421 #define KS8842_MAC_ADDR_1_OFFSET KS8842_MACAR1_P 422 #define KS8842_MAC_ADDR_0_OFFSET (KS8842_MAC_ADDR_1_OFFSET + 1) 423 #define KS8842_MAC_ADDR_3_OFFSET KS8842_MACAR2_P 424 #define KS8842_MAC_ADDR_2_OFFSET (KS8842_MAC_ADDR_3_OFFSET + 1) 425 #define KS8842_MAC_ADDR_5_OFFSET KS8842_MACAR3_P 426 #define KS8842_MAC_ADDR_4_OFFSET (KS8842_MAC_ADDR_5_OFFSET + 1) 427 428 /* TOSR1 */ 429 #define KS8842_TOSR1_P 0x0480 430 #define KS8842_TOSR2_P 0x0482 431 #define KS8842_TOSR3_P 0x0484 432 #define KS8842_TOSR4_P 0x0486 433 #define KS8842_TOSR5_P 0x0488 434 #define KS8842_TOSR6_P 0x048A 435 #define KS8842_TOSR7_P 0x0490 436 #define KS8842_TOSR8_P 0x0492 437 #define KS8842_TOS_1_OFFSET KS8842_TOSR1_P 438 #define KS8842_TOS_2_OFFSET KS8842_TOSR2_P 439 #define KS8842_TOS_3_OFFSET KS8842_TOSR3_P 440 #define KS8842_TOS_4_OFFSET KS8842_TOSR4_P 441 #define KS8842_TOS_5_OFFSET KS8842_TOSR5_P 442 #define KS8842_TOS_6_OFFSET KS8842_TOSR6_P 443 444 #define KS8842_TOS_7_OFFSET KS8842_TOSR7_P 445 #define KS8842_TOS_8_OFFSET KS8842_TOSR8_P 446 447 /* P1CR1 */ 448 #define KS8842_P1CR1_P 0x0500 449 #define KS8842_P1CR2_P 0x0502 450 #define KS8842_P1VIDR_P 0x0504 451 #define KS8842_P1CR3_P 0x0506 452 #define KS8842_P1IRCR_P 0x0508 453 #define KS8842_P1ERCR_P 0x050A 454 #define KS884X_P1SCSLMD_P 0x0510 455 #define KS884X_P1CR4_P 0x0512 456 #define KS884X_P1SR_P 0x0514 457 458 /* P2CR1 */ 459 #define KS8842_P2CR1_P 0x0520 460 #define KS8842_P2CR2_P 0x0522 461 #define KS8842_P2VIDR_P 0x0524 462 #define KS8842_P2CR3_P 0x0526 463 #define KS8842_P2IRCR_P 0x0528 464 #define KS8842_P2ERCR_P 0x052A 465 #define KS884X_P2SCSLMD_P 0x0530 466 #define KS884X_P2CR4_P 0x0532 467 #define KS884X_P2SR_P 0x0534 468 469 /* P3CR1 */ 470 #define KS8842_P3CR1_P 0x0540 471 #define KS8842_P3CR2_P 0x0542 472 #define KS8842_P3VIDR_P 0x0544 473 #define KS8842_P3CR3_P 0x0546 474 #define KS8842_P3IRCR_P 0x0548 475 #define KS8842_P3ERCR_P 0x054A 476 477 #define KS8842_PORT_1_CTRL_1 KS8842_P1CR1_P 478 #define KS8842_PORT_2_CTRL_1 KS8842_P2CR1_P 479 #define KS8842_PORT_3_CTRL_1 KS8842_P3CR1_P 480 481 #define PORT_CTRL_ADDR(port, addr) \ 482 (addr = KS8842_PORT_1_CTRL_1 + (port) * \ 483 (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1)) 484 485 #define KS8842_PORT_CTRL_1_OFFSET 0x00 486 487 #define PORT_BROADCAST_STORM 0x0080 488 #define PORT_DIFFSERV_ENABLE 0x0040 489 #define PORT_802_1P_ENABLE 0x0020 490 #define PORT_BASED_PRIORITY_MASK 0x0018 491 #define PORT_BASED_PRIORITY_BASE 0x0003 492 #define PORT_BASED_PRIORITY_SHIFT 3 493 #define PORT_BASED_PRIORITY_0 0x0000 494 #define PORT_BASED_PRIORITY_1 0x0008 495 #define PORT_BASED_PRIORITY_2 0x0010 496 #define PORT_BASED_PRIORITY_3 0x0018 497 #define PORT_INSERT_TAG 0x0004 498 #define PORT_REMOVE_TAG 0x0002 499 #define PORT_PRIO_QUEUE_ENABLE 0x0001 500 501 #define KS8842_PORT_CTRL_2_OFFSET 0x02 502 503 #define PORT_INGRESS_VLAN_FILTER 0x4000 504 #define PORT_DISCARD_NON_VID 0x2000 505 #define PORT_FORCE_FLOW_CTRL 0x1000 506 #define PORT_BACK_PRESSURE 0x0800 507 #define PORT_TX_ENABLE 0x0400 508 #define PORT_RX_ENABLE 0x0200 509 #define PORT_LEARN_DISABLE 0x0100 510 #define PORT_MIRROR_SNIFFER 0x0080 511 #define PORT_MIRROR_RX 0x0040 512 #define PORT_MIRROR_TX 0x0020 513 #define PORT_USER_PRIORITY_CEILING 0x0008 514 #define PORT_VLAN_MEMBERSHIP 0x0007 515 516 #define KS8842_PORT_CTRL_VID_OFFSET 0x04 517 518 #define PORT_DEFAULT_VID 0x0001 519 520 #define KS8842_PORT_CTRL_3_OFFSET 0x06 521 522 #define PORT_INGRESS_LIMIT_MODE 0x000C 523 #define PORT_INGRESS_ALL 0x0000 524 #define PORT_INGRESS_UNICAST 0x0004 525 #define PORT_INGRESS_MULTICAST 0x0008 526 #define PORT_INGRESS_BROADCAST 0x000C 527 #define PORT_COUNT_IFG 0x0002 528 #define PORT_COUNT_PREAMBLE 0x0001 529 530 #define KS8842_PORT_IN_RATE_OFFSET 0x08 531 #define KS8842_PORT_OUT_RATE_OFFSET 0x0A 532 533 #define PORT_PRIORITY_RATE 0x0F 534 #define PORT_PRIORITY_RATE_SHIFT 4 535 536 #define KS884X_PORT_LINK_MD 0x10 537 538 #define PORT_CABLE_10M_SHORT 0x8000 539 #define PORT_CABLE_DIAG_RESULT 0x6000 540 #define PORT_CABLE_STAT_NORMAL 0x0000 541 #define PORT_CABLE_STAT_OPEN 0x2000 542 #define PORT_CABLE_STAT_SHORT 0x4000 543 #define PORT_CABLE_STAT_FAILED 0x6000 544 #define PORT_START_CABLE_DIAG 0x1000 545 #define PORT_FORCE_LINK 0x0800 546 #define PORT_POWER_SAVING_DISABLE 0x0400 547 #define PORT_PHY_REMOTE_LOOPBACK 0x0200 548 #define PORT_CABLE_FAULT_COUNTER 0x01FF 549 550 #define KS884X_PORT_CTRL_4_OFFSET 0x12 551 552 #define PORT_LED_OFF 0x8000 553 #define PORT_TX_DISABLE 0x4000 554 #define PORT_AUTO_NEG_RESTART 0x2000 555 #define PORT_REMOTE_FAULT_DISABLE 0x1000 556 #define PORT_POWER_DOWN 0x0800 557 #define PORT_AUTO_MDIX_DISABLE 0x0400 558 #define PORT_FORCE_MDIX 0x0200 559 #define PORT_LOOPBACK 0x0100 560 #define PORT_AUTO_NEG_ENABLE 0x0080 561 #define PORT_FORCE_100_MBIT 0x0040 562 #define PORT_FORCE_FULL_DUPLEX 0x0020 563 #define PORT_AUTO_NEG_SYM_PAUSE 0x0010 564 #define PORT_AUTO_NEG_100BTX_FD 0x0008 565 #define PORT_AUTO_NEG_100BTX 0x0004 566 #define PORT_AUTO_NEG_10BT_FD 0x0002 567 #define PORT_AUTO_NEG_10BT 0x0001 568 569 #define KS884X_PORT_STATUS_OFFSET 0x14 570 571 #define PORT_HP_MDIX 0x8000 572 #define PORT_REVERSED_POLARITY 0x2000 573 #define PORT_RX_FLOW_CTRL 0x0800 574 #define PORT_TX_FLOW_CTRL 0x1000 575 #define PORT_STATUS_SPEED_100MBIT 0x0400 576 #define PORT_STATUS_FULL_DUPLEX 0x0200 577 #define PORT_REMOTE_FAULT 0x0100 578 #define PORT_MDIX_STATUS 0x0080 579 #define PORT_AUTO_NEG_COMPLETE 0x0040 580 #define PORT_STATUS_LINK_GOOD 0x0020 581 #define PORT_REMOTE_SYM_PAUSE 0x0010 582 #define PORT_REMOTE_100BTX_FD 0x0008 583 #define PORT_REMOTE_100BTX 0x0004 584 #define PORT_REMOTE_10BT_FD 0x0002 585 #define PORT_REMOTE_10BT 0x0001 586 587 /* 588 #define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF 589 #define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000 590 #define STATIC_MAC_TABLE_VALID 00-00080000-00000000 591 #define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000 592 #define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000 593 #define STATIC_MAC_TABLE_FID 00-03C00000-00000000 594 */ 595 596 #define STATIC_MAC_TABLE_ADDR 0x0000FFFF 597 #define STATIC_MAC_TABLE_FWD_PORTS 0x00070000 598 #define STATIC_MAC_TABLE_VALID 0x00080000 599 #define STATIC_MAC_TABLE_OVERRIDE 0x00100000 600 #define STATIC_MAC_TABLE_USE_FID 0x00200000 601 #define STATIC_MAC_TABLE_FID 0x03C00000 602 603 #define STATIC_MAC_FWD_PORTS_SHIFT 16 604 #define STATIC_MAC_FID_SHIFT 22 605 606 /* 607 #define VLAN_TABLE_VID 00-00000000-00000FFF 608 #define VLAN_TABLE_FID 00-00000000-0000F000 609 #define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000 610 #define VLAN_TABLE_VALID 00-00000000-00080000 611 */ 612 613 #define VLAN_TABLE_VID 0x00000FFF 614 #define VLAN_TABLE_FID 0x0000F000 615 #define VLAN_TABLE_MEMBERSHIP 0x00070000 616 #define VLAN_TABLE_VALID 0x00080000 617 618 #define VLAN_TABLE_FID_SHIFT 12 619 #define VLAN_TABLE_MEMBERSHIP_SHIFT 16 620 621 /* 622 #define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF 623 #define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000 624 #define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000 625 #define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000 626 #define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000 627 #define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000 628 #define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000 629 #define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000 630 */ 631 632 #define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF 633 #define DYNAMIC_MAC_TABLE_FID 0x000F0000 634 #define DYNAMIC_MAC_TABLE_SRC_PORT 0x00300000 635 #define DYNAMIC_MAC_TABLE_TIMESTAMP 0x00C00000 636 #define DYNAMIC_MAC_TABLE_ENTRIES 0xFF000000 637 638 #define DYNAMIC_MAC_TABLE_ENTRIES_H 0x03 639 #define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x04 640 #define DYNAMIC_MAC_TABLE_RESERVED 0x78 641 #define DYNAMIC_MAC_TABLE_NOT_READY 0x80 642 643 #define DYNAMIC_MAC_FID_SHIFT 16 644 #define DYNAMIC_MAC_SRC_PORT_SHIFT 20 645 #define DYNAMIC_MAC_TIMESTAMP_SHIFT 22 646 #define DYNAMIC_MAC_ENTRIES_SHIFT 24 647 #define DYNAMIC_MAC_ENTRIES_H_SHIFT 8 648 649 /* 650 #define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF 651 #define MIB_COUNTER_VALID 00-00000000-40000000 652 #define MIB_COUNTER_OVERFLOW 00-00000000-80000000 653 */ 654 655 #define MIB_COUNTER_VALUE 0x3FFFFFFF 656 #define MIB_COUNTER_VALID 0x40000000 657 #define MIB_COUNTER_OVERFLOW 0x80000000 658 659 #define MIB_PACKET_DROPPED 0x0000FFFF 660 661 #define KS_MIB_PACKET_DROPPED_TX_0 0x100 662 #define KS_MIB_PACKET_DROPPED_TX_1 0x101 663 #define KS_MIB_PACKET_DROPPED_TX 0x102 664 #define KS_MIB_PACKET_DROPPED_RX_0 0x103 665 #define KS_MIB_PACKET_DROPPED_RX_1 0x104 666 #define KS_MIB_PACKET_DROPPED_RX 0x105 667 668 /* Change default LED mode. */ 669 #define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT 670 671 #define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i)) 672 673 #define MAX_ETHERNET_BODY_SIZE 1500 674 #define ETHERNET_HEADER_SIZE (14 + VLAN_HLEN) 675 676 #define MAX_ETHERNET_PACKET_SIZE \ 677 (MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE) 678 679 #define REGULAR_RX_BUF_SIZE (MAX_ETHERNET_PACKET_SIZE + 4) 680 #define MAX_RX_BUF_SIZE (1912 + 4) 681 682 #define ADDITIONAL_ENTRIES 16 683 #define MAX_MULTICAST_LIST 32 684 685 #define HW_MULTICAST_SIZE 8 686 687 #define HW_TO_DEV_PORT(port) (port - 1) 688 689 enum { 690 media_connected, 691 media_disconnected 692 }; 693 694 enum { 695 OID_COUNTER_UNKOWN, 696 697 OID_COUNTER_FIRST, 698 699 /* total transmit errors */ 700 OID_COUNTER_XMIT_ERROR, 701 702 /* total receive errors */ 703 OID_COUNTER_RCV_ERROR, 704 705 OID_COUNTER_LAST 706 }; 707 708 /* 709 * Hardware descriptor definitions 710 */ 711 712 #define DESC_ALIGNMENT 16 713 #define BUFFER_ALIGNMENT 8 714 715 #define NUM_OF_RX_DESC 64 716 #define NUM_OF_TX_DESC 64 717 718 #define KS_DESC_RX_FRAME_LEN 0x000007FF 719 #define KS_DESC_RX_FRAME_TYPE 0x00008000 720 #define KS_DESC_RX_ERROR_CRC 0x00010000 721 #define KS_DESC_RX_ERROR_RUNT 0x00020000 722 #define KS_DESC_RX_ERROR_TOO_LONG 0x00040000 723 #define KS_DESC_RX_ERROR_PHY 0x00080000 724 #define KS884X_DESC_RX_PORT_MASK 0x00300000 725 #define KS_DESC_RX_MULTICAST 0x01000000 726 #define KS_DESC_RX_ERROR 0x02000000 727 #define KS_DESC_RX_ERROR_CSUM_UDP 0x04000000 728 #define KS_DESC_RX_ERROR_CSUM_TCP 0x08000000 729 #define KS_DESC_RX_ERROR_CSUM_IP 0x10000000 730 #define KS_DESC_RX_LAST 0x20000000 731 #define KS_DESC_RX_FIRST 0x40000000 732 #define KS_DESC_RX_ERROR_COND \ 733 (KS_DESC_RX_ERROR_CRC | \ 734 KS_DESC_RX_ERROR_RUNT | \ 735 KS_DESC_RX_ERROR_PHY | \ 736 KS_DESC_RX_ERROR_TOO_LONG) 737 738 #define KS_DESC_HW_OWNED 0x80000000 739 740 #define KS_DESC_BUF_SIZE 0x000007FF 741 #define KS884X_DESC_TX_PORT_MASK 0x00300000 742 #define KS_DESC_END_OF_RING 0x02000000 743 #define KS_DESC_TX_CSUM_GEN_UDP 0x04000000 744 #define KS_DESC_TX_CSUM_GEN_TCP 0x08000000 745 #define KS_DESC_TX_CSUM_GEN_IP 0x10000000 746 #define KS_DESC_TX_LAST 0x20000000 747 #define KS_DESC_TX_FIRST 0x40000000 748 #define KS_DESC_TX_INTERRUPT 0x80000000 749 750 #define KS_DESC_PORT_SHIFT 20 751 752 #define KS_DESC_RX_MASK (KS_DESC_BUF_SIZE) 753 754 #define KS_DESC_TX_MASK \ 755 (KS_DESC_TX_INTERRUPT | \ 756 KS_DESC_TX_FIRST | \ 757 KS_DESC_TX_LAST | \ 758 KS_DESC_TX_CSUM_GEN_IP | \ 759 KS_DESC_TX_CSUM_GEN_TCP | \ 760 KS_DESC_TX_CSUM_GEN_UDP | \ 761 KS_DESC_BUF_SIZE) 762 763 struct ksz_desc_rx_stat { 764 #ifdef __BIG_ENDIAN_BITFIELD 765 u32 hw_owned:1; 766 u32 first_desc:1; 767 u32 last_desc:1; 768 u32 csum_err_ip:1; 769 u32 csum_err_tcp:1; 770 u32 csum_err_udp:1; 771 u32 error:1; 772 u32 multicast:1; 773 u32 src_port:4; 774 u32 err_phy:1; 775 u32 err_too_long:1; 776 u32 err_runt:1; 777 u32 err_crc:1; 778 u32 frame_type:1; 779 u32 reserved1:4; 780 u32 frame_len:11; 781 #else 782 u32 frame_len:11; 783 u32 reserved1:4; 784 u32 frame_type:1; 785 u32 err_crc:1; 786 u32 err_runt:1; 787 u32 err_too_long:1; 788 u32 err_phy:1; 789 u32 src_port:4; 790 u32 multicast:1; 791 u32 error:1; 792 u32 csum_err_udp:1; 793 u32 csum_err_tcp:1; 794 u32 csum_err_ip:1; 795 u32 last_desc:1; 796 u32 first_desc:1; 797 u32 hw_owned:1; 798 #endif 799 }; 800 801 struct ksz_desc_tx_stat { 802 #ifdef __BIG_ENDIAN_BITFIELD 803 u32 hw_owned:1; 804 u32 reserved1:31; 805 #else 806 u32 reserved1:31; 807 u32 hw_owned:1; 808 #endif 809 }; 810 811 struct ksz_desc_rx_buf { 812 #ifdef __BIG_ENDIAN_BITFIELD 813 u32 reserved4:6; 814 u32 end_of_ring:1; 815 u32 reserved3:14; 816 u32 buf_size:11; 817 #else 818 u32 buf_size:11; 819 u32 reserved3:14; 820 u32 end_of_ring:1; 821 u32 reserved4:6; 822 #endif 823 }; 824 825 struct ksz_desc_tx_buf { 826 #ifdef __BIG_ENDIAN_BITFIELD 827 u32 intr:1; 828 u32 first_seg:1; 829 u32 last_seg:1; 830 u32 csum_gen_ip:1; 831 u32 csum_gen_tcp:1; 832 u32 csum_gen_udp:1; 833 u32 end_of_ring:1; 834 u32 reserved4:1; 835 u32 dest_port:4; 836 u32 reserved3:9; 837 u32 buf_size:11; 838 #else 839 u32 buf_size:11; 840 u32 reserved3:9; 841 u32 dest_port:4; 842 u32 reserved4:1; 843 u32 end_of_ring:1; 844 u32 csum_gen_udp:1; 845 u32 csum_gen_tcp:1; 846 u32 csum_gen_ip:1; 847 u32 last_seg:1; 848 u32 first_seg:1; 849 u32 intr:1; 850 #endif 851 }; 852 853 union desc_stat { 854 struct ksz_desc_rx_stat rx; 855 struct ksz_desc_tx_stat tx; 856 u32 data; 857 }; 858 859 union desc_buf { 860 struct ksz_desc_rx_buf rx; 861 struct ksz_desc_tx_buf tx; 862 u32 data; 863 }; 864 865 /** 866 * struct ksz_hw_desc - Hardware descriptor data structure 867 * @ctrl: Descriptor control value. 868 * @buf: Descriptor buffer value. 869 * @addr: Physical address of memory buffer. 870 * @next: Pointer to next hardware descriptor. 871 */ 872 struct ksz_hw_desc { 873 union desc_stat ctrl; 874 union desc_buf buf; 875 u32 addr; 876 u32 next; 877 }; 878 879 /** 880 * struct ksz_sw_desc - Software descriptor data structure 881 * @ctrl: Descriptor control value. 882 * @buf: Descriptor buffer value. 883 * @buf_size: Current buffers size value in hardware descriptor. 884 */ 885 struct ksz_sw_desc { 886 union desc_stat ctrl; 887 union desc_buf buf; 888 u32 buf_size; 889 }; 890 891 /** 892 * struct ksz_dma_buf - OS dependent DMA buffer data structure 893 * @skb: Associated socket buffer. 894 * @dma: Associated physical DMA address. 895 * @len: Actual len used. 896 */ 897 struct ksz_dma_buf { 898 struct sk_buff *skb; 899 dma_addr_t dma; 900 int len; 901 }; 902 903 /** 904 * struct ksz_desc - Descriptor structure 905 * @phw: Hardware descriptor pointer to uncached physical memory. 906 * @sw: Cached memory to hold hardware descriptor values for 907 * manipulation. 908 * @dma_buf: Operating system dependent data structure to hold physical 909 * memory buffer allocation information. 910 */ 911 struct ksz_desc { 912 struct ksz_hw_desc *phw; 913 struct ksz_sw_desc sw; 914 struct ksz_dma_buf dma_buf; 915 }; 916 917 #define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf)) 918 919 /** 920 * struct ksz_desc_info - Descriptor information data structure 921 * @ring: First descriptor in the ring. 922 * @cur: Current descriptor being manipulated. 923 * @ring_virt: First hardware descriptor in the ring. 924 * @ring_phys: The physical address of the first descriptor of the ring. 925 * @size: Size of hardware descriptor. 926 * @alloc: Number of descriptors allocated. 927 * @avail: Number of descriptors available for use. 928 * @last: Index for last descriptor released to hardware. 929 * @next: Index for next descriptor available for use. 930 * @mask: Mask for index wrapping. 931 */ 932 struct ksz_desc_info { 933 struct ksz_desc *ring; 934 struct ksz_desc *cur; 935 struct ksz_hw_desc *ring_virt; 936 u32 ring_phys; 937 int size; 938 int alloc; 939 int avail; 940 int last; 941 int next; 942 int mask; 943 }; 944 945 /* 946 * KSZ8842 switch definitions 947 */ 948 949 enum { 950 TABLE_STATIC_MAC = 0, 951 TABLE_VLAN, 952 TABLE_DYNAMIC_MAC, 953 TABLE_MIB 954 }; 955 956 #define LEARNED_MAC_TABLE_ENTRIES 1024 957 #define STATIC_MAC_TABLE_ENTRIES 8 958 959 /** 960 * struct ksz_mac_table - Static MAC table data structure 961 * @mac_addr: MAC address to filter. 962 * @vid: VID value. 963 * @fid: FID value. 964 * @ports: Port membership. 965 * @override: Override setting. 966 * @use_fid: FID use setting. 967 * @valid: Valid setting indicating the entry is being used. 968 */ 969 struct ksz_mac_table { 970 u8 mac_addr[ETH_ALEN]; 971 u16 vid; 972 u8 fid; 973 u8 ports; 974 u8 override:1; 975 u8 use_fid:1; 976 u8 valid:1; 977 }; 978 979 #define VLAN_TABLE_ENTRIES 16 980 981 /** 982 * struct ksz_vlan_table - VLAN table data structure 983 * @vid: VID value. 984 * @fid: FID value. 985 * @member: Port membership. 986 */ 987 struct ksz_vlan_table { 988 u16 vid; 989 u8 fid; 990 u8 member; 991 }; 992 993 #define DIFFSERV_ENTRIES 64 994 #define PRIO_802_1P_ENTRIES 8 995 #define PRIO_QUEUES 4 996 997 #define SWITCH_PORT_NUM 2 998 #define TOTAL_PORT_NUM (SWITCH_PORT_NUM + 1) 999 #define HOST_MASK (1 << SWITCH_PORT_NUM) 1000 #define PORT_MASK 7 1001 1002 #define MAIN_PORT 0 1003 #define OTHER_PORT 1 1004 #define HOST_PORT SWITCH_PORT_NUM 1005 1006 #define PORT_COUNTER_NUM 0x20 1007 #define TOTAL_PORT_COUNTER_NUM (PORT_COUNTER_NUM + 2) 1008 1009 #define MIB_COUNTER_RX_LO_PRIORITY 0x00 1010 #define MIB_COUNTER_RX_HI_PRIORITY 0x01 1011 #define MIB_COUNTER_RX_UNDERSIZE 0x02 1012 #define MIB_COUNTER_RX_FRAGMENT 0x03 1013 #define MIB_COUNTER_RX_OVERSIZE 0x04 1014 #define MIB_COUNTER_RX_JABBER 0x05 1015 #define MIB_COUNTER_RX_SYMBOL_ERR 0x06 1016 #define MIB_COUNTER_RX_CRC_ERR 0x07 1017 #define MIB_COUNTER_RX_ALIGNMENT_ERR 0x08 1018 #define MIB_COUNTER_RX_CTRL_8808 0x09 1019 #define MIB_COUNTER_RX_PAUSE 0x0A 1020 #define MIB_COUNTER_RX_BROADCAST 0x0B 1021 #define MIB_COUNTER_RX_MULTICAST 0x0C 1022 #define MIB_COUNTER_RX_UNICAST 0x0D 1023 #define MIB_COUNTER_RX_OCTET_64 0x0E 1024 #define MIB_COUNTER_RX_OCTET_65_127 0x0F 1025 #define MIB_COUNTER_RX_OCTET_128_255 0x10 1026 #define MIB_COUNTER_RX_OCTET_256_511 0x11 1027 #define MIB_COUNTER_RX_OCTET_512_1023 0x12 1028 #define MIB_COUNTER_RX_OCTET_1024_1522 0x13 1029 #define MIB_COUNTER_TX_LO_PRIORITY 0x14 1030 #define MIB_COUNTER_TX_HI_PRIORITY 0x15 1031 #define MIB_COUNTER_TX_LATE_COLLISION 0x16 1032 #define MIB_COUNTER_TX_PAUSE 0x17 1033 #define MIB_COUNTER_TX_BROADCAST 0x18 1034 #define MIB_COUNTER_TX_MULTICAST 0x19 1035 #define MIB_COUNTER_TX_UNICAST 0x1A 1036 #define MIB_COUNTER_TX_DEFERRED 0x1B 1037 #define MIB_COUNTER_TX_TOTAL_COLLISION 0x1C 1038 #define MIB_COUNTER_TX_EXCESS_COLLISION 0x1D 1039 #define MIB_COUNTER_TX_SINGLE_COLLISION 0x1E 1040 #define MIB_COUNTER_TX_MULTI_COLLISION 0x1F 1041 1042 #define MIB_COUNTER_RX_DROPPED_PACKET 0x20 1043 #define MIB_COUNTER_TX_DROPPED_PACKET 0x21 1044 1045 /** 1046 * struct ksz_port_mib - Port MIB data structure 1047 * @cnt_ptr: Current pointer to MIB counter index. 1048 * @link_down: Indication the link has just gone down. 1049 * @state: Connection status of the port. 1050 * @mib_start: The starting counter index. Some ports do not start at 0. 1051 * @counter: 64-bit MIB counter value. 1052 * @dropped: Temporary buffer to remember last read packet dropped values. 1053 * 1054 * MIB counters needs to be read periodically so that counters do not get 1055 * overflowed and give incorrect values. A right balance is needed to 1056 * satisfy this condition and not waste too much CPU time. 1057 * 1058 * It is pointless to read MIB counters when the port is disconnected. The 1059 * @state provides the connection status so that MIB counters are read only 1060 * when the port is connected. The @link_down indicates the port is just 1061 * disconnected so that all MIB counters are read one last time to update the 1062 * information. 1063 */ 1064 struct ksz_port_mib { 1065 u8 cnt_ptr; 1066 u8 link_down; 1067 u8 state; 1068 u8 mib_start; 1069 1070 u64 counter[TOTAL_PORT_COUNTER_NUM]; 1071 u32 dropped[2]; 1072 }; 1073 1074 /** 1075 * struct ksz_port_cfg - Port configuration data structure 1076 * @vid: VID value. 1077 * @member: Port membership. 1078 * @port_prio: Port priority. 1079 * @rx_rate: Receive priority rate. 1080 * @tx_rate: Transmit priority rate. 1081 * @stp_state: Current Spanning Tree Protocol state. 1082 */ 1083 struct ksz_port_cfg { 1084 u16 vid; 1085 u8 member; 1086 u8 port_prio; 1087 u32 rx_rate[PRIO_QUEUES]; 1088 u32 tx_rate[PRIO_QUEUES]; 1089 int stp_state; 1090 }; 1091 1092 /** 1093 * struct ksz_switch - KSZ8842 switch data structure 1094 * @mac_table: MAC table entries information. 1095 * @vlan_table: VLAN table entries information. 1096 * @port_cfg: Port configuration information. 1097 * @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS 1098 * (bit7 ~ bit2) field. 1099 * @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p 1100 * Tag priority field. 1101 * @br_addr: Bridge address. Used for STP. 1102 * @other_addr: Other MAC address. Used for multiple network device mode. 1103 * @broad_per: Broadcast storm percentage. 1104 * @member: Current port membership. Used for STP. 1105 */ 1106 struct ksz_switch { 1107 struct ksz_mac_table mac_table[STATIC_MAC_TABLE_ENTRIES]; 1108 struct ksz_vlan_table vlan_table[VLAN_TABLE_ENTRIES]; 1109 struct ksz_port_cfg port_cfg[TOTAL_PORT_NUM]; 1110 1111 u8 diffserv[DIFFSERV_ENTRIES]; 1112 u8 p_802_1p[PRIO_802_1P_ENTRIES]; 1113 1114 u8 br_addr[ETH_ALEN]; 1115 u8 other_addr[ETH_ALEN]; 1116 1117 u8 broad_per; 1118 u8 member; 1119 }; 1120 1121 #define TX_RATE_UNIT 10000 1122 1123 /** 1124 * struct ksz_port_info - Port information data structure 1125 * @state: Connection status of the port. 1126 * @tx_rate: Transmit rate divided by 10000 to get Mbit. 1127 * @duplex: Duplex mode. 1128 * @advertised: Advertised auto-negotiation setting. Used to determine link. 1129 * @partner: Auto-negotiation partner setting. Used to determine link. 1130 * @port_id: Port index to access actual hardware register. 1131 * @pdev: Pointer to OS dependent network device. 1132 */ 1133 struct ksz_port_info { 1134 uint state; 1135 uint tx_rate; 1136 u8 duplex; 1137 u8 advertised; 1138 u8 partner; 1139 u8 port_id; 1140 void *pdev; 1141 }; 1142 1143 #define MAX_TX_HELD_SIZE 52000 1144 1145 /* Hardware features and bug fixes. */ 1146 #define LINK_INT_WORKING (1 << 0) 1147 #define SMALL_PACKET_TX_BUG (1 << 1) 1148 #define HALF_DUPLEX_SIGNAL_BUG (1 << 2) 1149 #define RX_HUGE_FRAME (1 << 4) 1150 #define STP_SUPPORT (1 << 8) 1151 1152 /* Software overrides. */ 1153 #define PAUSE_FLOW_CTRL (1 << 0) 1154 #define FAST_AGING (1 << 1) 1155 1156 /** 1157 * struct ksz_hw - KSZ884X hardware data structure 1158 * @io: Virtual address assigned. 1159 * @ksz_switch: Pointer to KSZ8842 switch. 1160 * @port_info: Port information. 1161 * @port_mib: Port MIB information. 1162 * @dev_count: Number of network devices this hardware supports. 1163 * @dst_ports: Destination ports in switch for transmission. 1164 * @id: Hardware ID. Used for display only. 1165 * @mib_cnt: Number of MIB counters this hardware has. 1166 * @mib_port_cnt: Number of ports with MIB counters. 1167 * @tx_cfg: Cached transmit control settings. 1168 * @rx_cfg: Cached receive control settings. 1169 * @intr_mask: Current interrupt mask. 1170 * @intr_set: Current interrupt set. 1171 * @intr_blocked: Interrupt blocked. 1172 * @rx_desc_info: Receive descriptor information. 1173 * @tx_desc_info: Transmit descriptor information. 1174 * @tx_int_cnt: Transmit interrupt count. Used for TX optimization. 1175 * @tx_int_mask: Transmit interrupt mask. Used for TX optimization. 1176 * @tx_size: Transmit data size. Used for TX optimization. 1177 * The maximum is defined by MAX_TX_HELD_SIZE. 1178 * @perm_addr: Permanent MAC address. 1179 * @override_addr: Overridden MAC address. 1180 * @address: Additional MAC address entries. 1181 * @addr_list_size: Additional MAC address list size. 1182 * @mac_override: Indication of MAC address overridden. 1183 * @promiscuous: Counter to keep track of promiscuous mode set. 1184 * @all_multi: Counter to keep track of all multicast mode set. 1185 * @multi_list: Multicast address entries. 1186 * @multi_bits: Cached multicast hash table settings. 1187 * @multi_list_size: Multicast address list size. 1188 * @enabled: Indication of hardware enabled. 1189 * @rx_stop: Indication of receive process stop. 1190 * @reserved2: none 1191 * @features: Hardware features to enable. 1192 * @overrides: Hardware features to override. 1193 * @parent: Pointer to parent, network device private structure. 1194 */ 1195 struct ksz_hw { 1196 void __iomem *io; 1197 1198 struct ksz_switch *ksz_switch; 1199 struct ksz_port_info port_info[SWITCH_PORT_NUM]; 1200 struct ksz_port_mib port_mib[TOTAL_PORT_NUM]; 1201 int dev_count; 1202 int dst_ports; 1203 int id; 1204 int mib_cnt; 1205 int mib_port_cnt; 1206 1207 u32 tx_cfg; 1208 u32 rx_cfg; 1209 u32 intr_mask; 1210 u32 intr_set; 1211 uint intr_blocked; 1212 1213 struct ksz_desc_info rx_desc_info; 1214 struct ksz_desc_info tx_desc_info; 1215 1216 int tx_int_cnt; 1217 int tx_int_mask; 1218 int tx_size; 1219 1220 u8 perm_addr[ETH_ALEN]; 1221 u8 override_addr[ETH_ALEN]; 1222 u8 address[ADDITIONAL_ENTRIES][ETH_ALEN]; 1223 u8 addr_list_size; 1224 u8 mac_override; 1225 u8 promiscuous; 1226 u8 all_multi; 1227 u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN]; 1228 u8 multi_bits[HW_MULTICAST_SIZE]; 1229 u8 multi_list_size; 1230 1231 u8 enabled; 1232 u8 rx_stop; 1233 u8 reserved2[1]; 1234 1235 uint features; 1236 uint overrides; 1237 1238 void *parent; 1239 }; 1240 1241 enum { 1242 PHY_NO_FLOW_CTRL, 1243 PHY_FLOW_CTRL, 1244 PHY_TX_ONLY, 1245 PHY_RX_ONLY 1246 }; 1247 1248 /** 1249 * struct ksz_port - Virtual port data structure 1250 * @duplex: Duplex mode setting. 1 for half duplex, 2 for full 1251 * duplex, and 0 for auto, which normally results in full 1252 * duplex. 1253 * @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and 1254 * 0 for auto, which normally results in 100 Mbit. 1255 * @force_link: Force link setting. 0 for auto-negotiation, and 1 for 1256 * force. 1257 * @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow 1258 * control, and PHY_FLOW_CTRL for flow control. 1259 * PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100 1260 * Mbit PHY. 1261 * @first_port: Index of first port this port supports. 1262 * @mib_port_cnt: Number of ports with MIB counters. 1263 * @port_cnt: Number of ports this port supports. 1264 * @counter: Port statistics counter. 1265 * @hw: Pointer to hardware structure. 1266 * @linked: Pointer to port information linked to this port. 1267 */ 1268 struct ksz_port { 1269 u8 duplex; 1270 u8 speed; 1271 u8 force_link; 1272 u8 flow_ctrl; 1273 1274 int first_port; 1275 int mib_port_cnt; 1276 int port_cnt; 1277 u64 counter[OID_COUNTER_LAST]; 1278 1279 struct ksz_hw *hw; 1280 struct ksz_port_info *linked; 1281 }; 1282 1283 /** 1284 * struct ksz_timer_info - Timer information data structure 1285 * @timer: Kernel timer. 1286 * @cnt: Running timer counter. 1287 * @max: Number of times to run timer; -1 for infinity. 1288 * @period: Timer period in jiffies. 1289 */ 1290 struct ksz_timer_info { 1291 struct timer_list timer; 1292 int cnt; 1293 int max; 1294 int period; 1295 }; 1296 1297 /** 1298 * struct ksz_shared_mem - OS dependent shared memory data structure 1299 * @dma_addr: Physical DMA address allocated. 1300 * @alloc_size: Allocation size. 1301 * @phys: Actual physical address used. 1302 * @alloc_virt: Virtual address allocated. 1303 * @virt: Actual virtual address used. 1304 */ 1305 struct ksz_shared_mem { 1306 dma_addr_t dma_addr; 1307 uint alloc_size; 1308 uint phys; 1309 u8 *alloc_virt; 1310 u8 *virt; 1311 }; 1312 1313 /** 1314 * struct ksz_counter_info - OS dependent counter information data structure 1315 * @counter: Wait queue to wakeup after counters are read. 1316 * @time: Next time in jiffies to read counter. 1317 * @read: Indication of counters read in full or not. 1318 */ 1319 struct ksz_counter_info { 1320 wait_queue_head_t counter; 1321 unsigned long time; 1322 int read; 1323 }; 1324 1325 /** 1326 * struct dev_info - Network device information data structure 1327 * @dev: Pointer to network device. 1328 * @pdev: Pointer to PCI device. 1329 * @hw: Hardware structure. 1330 * @desc_pool: Physical memory used for descriptor pool. 1331 * @hwlock: Spinlock to prevent hardware from accessing. 1332 * @lock: Mutex lock to prevent device from accessing. 1333 * @dev_rcv: Receive process function used. 1334 * @last_skb: Socket buffer allocated for descriptor rx fragments. 1335 * @skb_index: Buffer index for receiving fragments. 1336 * @skb_len: Buffer length for receiving fragments. 1337 * @mib_read: Workqueue to read MIB counters. 1338 * @mib_timer_info: Timer to read MIB counters. 1339 * @counter: Used for MIB reading. 1340 * @mtu: Current MTU used. The default is REGULAR_RX_BUF_SIZE; 1341 * the maximum is MAX_RX_BUF_SIZE. 1342 * @opened: Counter to keep track of device open. 1343 * @rx_tasklet: Receive processing tasklet. 1344 * @tx_tasklet: Transmit processing tasklet. 1345 * @wol_enable: Wake-on-LAN enable set by ethtool. 1346 * @wol_support: Wake-on-LAN support used by ethtool. 1347 * @pme_wait: Used for KSZ8841 power management. 1348 */ 1349 struct dev_info { 1350 struct net_device *dev; 1351 struct pci_dev *pdev; 1352 1353 struct ksz_hw hw; 1354 struct ksz_shared_mem desc_pool; 1355 1356 spinlock_t hwlock; 1357 struct mutex lock; 1358 1359 int (*dev_rcv)(struct dev_info *); 1360 1361 struct sk_buff *last_skb; 1362 int skb_index; 1363 int skb_len; 1364 1365 struct work_struct mib_read; 1366 struct ksz_timer_info mib_timer_info; 1367 struct ksz_counter_info counter[TOTAL_PORT_NUM]; 1368 1369 int mtu; 1370 int opened; 1371 1372 struct tasklet_struct rx_tasklet; 1373 struct tasklet_struct tx_tasklet; 1374 1375 int wol_enable; 1376 int wol_support; 1377 unsigned long pme_wait; 1378 }; 1379 1380 /** 1381 * struct dev_priv - Network device private data structure 1382 * @adapter: Adapter device information. 1383 * @port: Port information. 1384 * @monitor_timer_info: Timer to monitor ports. 1385 * @proc_sem: Semaphore for proc accessing. 1386 * @id: Device ID. 1387 * @mii_if: MII interface information. 1388 * @advertising: Temporary variable to store advertised settings. 1389 * @msg_enable: The message flags controlling driver output. 1390 * @media_state: The connection status of the device. 1391 * @multicast: The all multicast state of the device. 1392 * @promiscuous: The promiscuous state of the device. 1393 */ 1394 struct dev_priv { 1395 struct dev_info *adapter; 1396 struct ksz_port port; 1397 struct ksz_timer_info monitor_timer_info; 1398 1399 struct semaphore proc_sem; 1400 int id; 1401 1402 struct mii_if_info mii_if; 1403 u32 advertising; 1404 1405 u32 msg_enable; 1406 int media_state; 1407 int multicast; 1408 int promiscuous; 1409 }; 1410 1411 #define DRV_NAME "KSZ884X PCI" 1412 #define DEVICE_NAME "KSZ884x PCI" 1413 #define DRV_VERSION "1.0.0" 1414 #define DRV_RELDATE "Feb 8, 2010" 1415 1416 static char version[] = 1417 "Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")"; 1418 1419 static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 }; 1420 1421 /* 1422 * Interrupt processing primary routines 1423 */ 1424 1425 static inline void hw_ack_intr(struct ksz_hw *hw, uint interrupt) 1426 { 1427 writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS); 1428 } 1429 1430 static inline void hw_dis_intr(struct ksz_hw *hw) 1431 { 1432 hw->intr_blocked = hw->intr_mask; 1433 writel(0, hw->io + KS884X_INTERRUPTS_ENABLE); 1434 hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE); 1435 } 1436 1437 static inline void hw_set_intr(struct ksz_hw *hw, uint interrupt) 1438 { 1439 hw->intr_set = interrupt; 1440 writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE); 1441 } 1442 1443 static inline void hw_ena_intr(struct ksz_hw *hw) 1444 { 1445 hw->intr_blocked = 0; 1446 hw_set_intr(hw, hw->intr_mask); 1447 } 1448 1449 static inline void hw_dis_intr_bit(struct ksz_hw *hw, uint bit) 1450 { 1451 hw->intr_mask &= ~(bit); 1452 } 1453 1454 static inline void hw_turn_off_intr(struct ksz_hw *hw, uint interrupt) 1455 { 1456 u32 read_intr; 1457 1458 read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE); 1459 hw->intr_set = read_intr & ~interrupt; 1460 writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE); 1461 hw_dis_intr_bit(hw, interrupt); 1462 } 1463 1464 /** 1465 * hw_turn_on_intr - turn on specified interrupts 1466 * @hw: The hardware instance. 1467 * @bit: The interrupt bits to be on. 1468 * 1469 * This routine turns on the specified interrupts in the interrupt mask so that 1470 * those interrupts will be enabled. 1471 */ 1472 static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit) 1473 { 1474 hw->intr_mask |= bit; 1475 1476 if (!hw->intr_blocked) 1477 hw_set_intr(hw, hw->intr_mask); 1478 } 1479 1480 static inline void hw_read_intr(struct ksz_hw *hw, uint *status) 1481 { 1482 *status = readl(hw->io + KS884X_INTERRUPTS_STATUS); 1483 *status = *status & hw->intr_set; 1484 } 1485 1486 static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt) 1487 { 1488 if (interrupt) 1489 hw_ena_intr(hw); 1490 } 1491 1492 /** 1493 * hw_block_intr - block hardware interrupts 1494 * @hw: The hardware instance. 1495 * 1496 * This function blocks all interrupts of the hardware and returns the current 1497 * interrupt enable mask so that interrupts can be restored later. 1498 * 1499 * Return the current interrupt enable mask. 1500 */ 1501 static uint hw_block_intr(struct ksz_hw *hw) 1502 { 1503 uint interrupt = 0; 1504 1505 if (!hw->intr_blocked) { 1506 hw_dis_intr(hw); 1507 interrupt = hw->intr_blocked; 1508 } 1509 return interrupt; 1510 } 1511 1512 /* 1513 * Hardware descriptor routines 1514 */ 1515 1516 static inline void reset_desc(struct ksz_desc *desc, union desc_stat status) 1517 { 1518 status.rx.hw_owned = 0; 1519 desc->phw->ctrl.data = cpu_to_le32(status.data); 1520 } 1521 1522 static inline void release_desc(struct ksz_desc *desc) 1523 { 1524 desc->sw.ctrl.tx.hw_owned = 1; 1525 if (desc->sw.buf_size != desc->sw.buf.data) { 1526 desc->sw.buf_size = desc->sw.buf.data; 1527 desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data); 1528 } 1529 desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data); 1530 } 1531 1532 static void get_rx_pkt(struct ksz_desc_info *info, struct ksz_desc **desc) 1533 { 1534 *desc = &info->ring[info->last]; 1535 info->last++; 1536 info->last &= info->mask; 1537 info->avail--; 1538 (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK; 1539 } 1540 1541 static inline void set_rx_buf(struct ksz_desc *desc, u32 addr) 1542 { 1543 desc->phw->addr = cpu_to_le32(addr); 1544 } 1545 1546 static inline void set_rx_len(struct ksz_desc *desc, u32 len) 1547 { 1548 desc->sw.buf.rx.buf_size = len; 1549 } 1550 1551 static inline void get_tx_pkt(struct ksz_desc_info *info, 1552 struct ksz_desc **desc) 1553 { 1554 *desc = &info->ring[info->next]; 1555 info->next++; 1556 info->next &= info->mask; 1557 info->avail--; 1558 (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK; 1559 } 1560 1561 static inline void set_tx_buf(struct ksz_desc *desc, u32 addr) 1562 { 1563 desc->phw->addr = cpu_to_le32(addr); 1564 } 1565 1566 static inline void set_tx_len(struct ksz_desc *desc, u32 len) 1567 { 1568 desc->sw.buf.tx.buf_size = len; 1569 } 1570 1571 /* Switch functions */ 1572 1573 #define TABLE_READ 0x10 1574 #define TABLE_SEL_SHIFT 2 1575 1576 #define HW_DELAY(hw, reg) \ 1577 do { \ 1578 readw(hw->io + reg); \ 1579 } while (0) 1580 1581 /** 1582 * sw_r_table - read 4 bytes of data from switch table 1583 * @hw: The hardware instance. 1584 * @table: The table selector. 1585 * @addr: The address of the table entry. 1586 * @data: Buffer to store the read data. 1587 * 1588 * This routine reads 4 bytes of data from the table of the switch. 1589 * Hardware interrupts are disabled to minimize corruption of read data. 1590 */ 1591 static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data) 1592 { 1593 u16 ctrl_addr; 1594 uint interrupt; 1595 1596 ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr; 1597 1598 interrupt = hw_block_intr(hw); 1599 1600 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); 1601 HW_DELAY(hw, KS884X_IACR_OFFSET); 1602 *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET); 1603 1604 hw_restore_intr(hw, interrupt); 1605 } 1606 1607 /** 1608 * sw_w_table_64 - write 8 bytes of data to the switch table 1609 * @hw: The hardware instance. 1610 * @table: The table selector. 1611 * @addr: The address of the table entry. 1612 * @data_hi: The high part of data to be written (bit63 ~ bit32). 1613 * @data_lo: The low part of data to be written (bit31 ~ bit0). 1614 * 1615 * This routine writes 8 bytes of data to the table of the switch. 1616 * Hardware interrupts are disabled to minimize corruption of written data. 1617 */ 1618 static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi, 1619 u32 data_lo) 1620 { 1621 u16 ctrl_addr; 1622 uint interrupt; 1623 1624 ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr; 1625 1626 interrupt = hw_block_intr(hw); 1627 1628 writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET); 1629 writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET); 1630 1631 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); 1632 HW_DELAY(hw, KS884X_IACR_OFFSET); 1633 1634 hw_restore_intr(hw, interrupt); 1635 } 1636 1637 /** 1638 * sw_w_sta_mac_table - write to the static MAC table 1639 * @hw: The hardware instance. 1640 * @addr: The address of the table entry. 1641 * @mac_addr: The MAC address. 1642 * @ports: The port members. 1643 * @override: The flag to override the port receive/transmit settings. 1644 * @valid: The flag to indicate entry is valid. 1645 * @use_fid: The flag to indicate the FID is valid. 1646 * @fid: The FID value. 1647 * 1648 * This routine writes an entry of the static MAC table of the switch. It 1649 * calls sw_w_table_64() to write the data. 1650 */ 1651 static void sw_w_sta_mac_table(struct ksz_hw *hw, u16 addr, u8 *mac_addr, 1652 u8 ports, int override, int valid, int use_fid, u8 fid) 1653 { 1654 u32 data_hi; 1655 u32 data_lo; 1656 1657 data_lo = ((u32) mac_addr[2] << 24) | 1658 ((u32) mac_addr[3] << 16) | 1659 ((u32) mac_addr[4] << 8) | mac_addr[5]; 1660 data_hi = ((u32) mac_addr[0] << 8) | mac_addr[1]; 1661 data_hi |= (u32) ports << STATIC_MAC_FWD_PORTS_SHIFT; 1662 1663 if (override) 1664 data_hi |= STATIC_MAC_TABLE_OVERRIDE; 1665 if (use_fid) { 1666 data_hi |= STATIC_MAC_TABLE_USE_FID; 1667 data_hi |= (u32) fid << STATIC_MAC_FID_SHIFT; 1668 } 1669 if (valid) 1670 data_hi |= STATIC_MAC_TABLE_VALID; 1671 1672 sw_w_table_64(hw, TABLE_STATIC_MAC, addr, data_hi, data_lo); 1673 } 1674 1675 /** 1676 * sw_r_vlan_table - read from the VLAN table 1677 * @hw: The hardware instance. 1678 * @addr: The address of the table entry. 1679 * @vid: Buffer to store the VID. 1680 * @fid: Buffer to store the VID. 1681 * @member: Buffer to store the port membership. 1682 * 1683 * This function reads an entry of the VLAN table of the switch. It calls 1684 * sw_r_table() to get the data. 1685 * 1686 * Return 0 if the entry is valid; otherwise -1. 1687 */ 1688 static int sw_r_vlan_table(struct ksz_hw *hw, u16 addr, u16 *vid, u8 *fid, 1689 u8 *member) 1690 { 1691 u32 data; 1692 1693 sw_r_table(hw, TABLE_VLAN, addr, &data); 1694 if (data & VLAN_TABLE_VALID) { 1695 *vid = (u16)(data & VLAN_TABLE_VID); 1696 *fid = (u8)((data & VLAN_TABLE_FID) >> VLAN_TABLE_FID_SHIFT); 1697 *member = (u8)((data & VLAN_TABLE_MEMBERSHIP) >> 1698 VLAN_TABLE_MEMBERSHIP_SHIFT); 1699 return 0; 1700 } 1701 return -1; 1702 } 1703 1704 /** 1705 * port_r_mib_cnt - read MIB counter 1706 * @hw: The hardware instance. 1707 * @port: The port index. 1708 * @addr: The address of the counter. 1709 * @cnt: Buffer to store the counter. 1710 * 1711 * This routine reads a MIB counter of the port. 1712 * Hardware interrupts are disabled to minimize corruption of read data. 1713 */ 1714 static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt) 1715 { 1716 u32 data; 1717 u16 ctrl_addr; 1718 uint interrupt; 1719 int timeout; 1720 1721 ctrl_addr = addr + PORT_COUNTER_NUM * port; 1722 1723 interrupt = hw_block_intr(hw); 1724 1725 ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) << 8); 1726 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); 1727 HW_DELAY(hw, KS884X_IACR_OFFSET); 1728 1729 for (timeout = 100; timeout > 0; timeout--) { 1730 data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET); 1731 1732 if (data & MIB_COUNTER_VALID) { 1733 if (data & MIB_COUNTER_OVERFLOW) 1734 *cnt += MIB_COUNTER_VALUE + 1; 1735 *cnt += data & MIB_COUNTER_VALUE; 1736 break; 1737 } 1738 } 1739 1740 hw_restore_intr(hw, interrupt); 1741 } 1742 1743 /** 1744 * port_r_mib_pkt - read dropped packet counts 1745 * @hw: The hardware instance. 1746 * @port: The port index. 1747 * @last: last one 1748 * @cnt: Buffer to store the receive and transmit dropped packet counts. 1749 * 1750 * This routine reads the dropped packet counts of the port. 1751 * Hardware interrupts are disabled to minimize corruption of read data. 1752 */ 1753 static void port_r_mib_pkt(struct ksz_hw *hw, int port, u32 *last, u64 *cnt) 1754 { 1755 u32 cur; 1756 u32 data; 1757 u16 ctrl_addr; 1758 uint interrupt; 1759 int index; 1760 1761 index = KS_MIB_PACKET_DROPPED_RX_0 + port; 1762 do { 1763 interrupt = hw_block_intr(hw); 1764 1765 ctrl_addr = (u16) index; 1766 ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) 1767 << 8); 1768 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); 1769 HW_DELAY(hw, KS884X_IACR_OFFSET); 1770 data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET); 1771 1772 hw_restore_intr(hw, interrupt); 1773 1774 data &= MIB_PACKET_DROPPED; 1775 cur = *last; 1776 if (data != cur) { 1777 *last = data; 1778 if (data < cur) 1779 data += MIB_PACKET_DROPPED + 1; 1780 data -= cur; 1781 *cnt += data; 1782 } 1783 ++last; 1784 ++cnt; 1785 index -= KS_MIB_PACKET_DROPPED_TX - 1786 KS_MIB_PACKET_DROPPED_TX_0 + 1; 1787 } while (index >= KS_MIB_PACKET_DROPPED_TX_0 + port); 1788 } 1789 1790 /** 1791 * port_r_cnt - read MIB counters periodically 1792 * @hw: The hardware instance. 1793 * @port: The port index. 1794 * 1795 * This routine is used to read the counters of the port periodically to avoid 1796 * counter overflow. The hardware should be acquired first before calling this 1797 * routine. 1798 * 1799 * Return non-zero when not all counters not read. 1800 */ 1801 static int port_r_cnt(struct ksz_hw *hw, int port) 1802 { 1803 struct ksz_port_mib *mib = &hw->port_mib[port]; 1804 1805 if (mib->mib_start < PORT_COUNTER_NUM) 1806 while (mib->cnt_ptr < PORT_COUNTER_NUM) { 1807 port_r_mib_cnt(hw, port, mib->cnt_ptr, 1808 &mib->counter[mib->cnt_ptr]); 1809 ++mib->cnt_ptr; 1810 } 1811 if (hw->mib_cnt > PORT_COUNTER_NUM) 1812 port_r_mib_pkt(hw, port, mib->dropped, 1813 &mib->counter[PORT_COUNTER_NUM]); 1814 mib->cnt_ptr = 0; 1815 return 0; 1816 } 1817 1818 /** 1819 * port_init_cnt - initialize MIB counter values 1820 * @hw: The hardware instance. 1821 * @port: The port index. 1822 * 1823 * This routine is used to initialize all counters to zero if the hardware 1824 * cannot do it after reset. 1825 */ 1826 static void port_init_cnt(struct ksz_hw *hw, int port) 1827 { 1828 struct ksz_port_mib *mib = &hw->port_mib[port]; 1829 1830 mib->cnt_ptr = 0; 1831 if (mib->mib_start < PORT_COUNTER_NUM) 1832 do { 1833 port_r_mib_cnt(hw, port, mib->cnt_ptr, 1834 &mib->counter[mib->cnt_ptr]); 1835 ++mib->cnt_ptr; 1836 } while (mib->cnt_ptr < PORT_COUNTER_NUM); 1837 if (hw->mib_cnt > PORT_COUNTER_NUM) 1838 port_r_mib_pkt(hw, port, mib->dropped, 1839 &mib->counter[PORT_COUNTER_NUM]); 1840 memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM); 1841 mib->cnt_ptr = 0; 1842 } 1843 1844 /* 1845 * Port functions 1846 */ 1847 1848 /** 1849 * port_cfg - set port register bits 1850 * @hw: The hardware instance. 1851 * @port: The port index. 1852 * @offset: The offset of the port register. 1853 * @bits: The data bits to set. 1854 * @set: The flag indicating whether the bits are to be set or not. 1855 * 1856 * This routine sets or resets the specified bits of the port register. 1857 */ 1858 static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits, 1859 int set) 1860 { 1861 u32 addr; 1862 u16 data; 1863 1864 PORT_CTRL_ADDR(port, addr); 1865 addr += offset; 1866 data = readw(hw->io + addr); 1867 if (set) 1868 data |= bits; 1869 else 1870 data &= ~bits; 1871 writew(data, hw->io + addr); 1872 } 1873 1874 /** 1875 * port_r8 - read byte from port register 1876 * @hw: The hardware instance. 1877 * @port: The port index. 1878 * @offset: The offset of the port register. 1879 * @data: Buffer to store the data. 1880 * 1881 * This routine reads a byte from the port register. 1882 */ 1883 static void port_r8(struct ksz_hw *hw, int port, int offset, u8 *data) 1884 { 1885 u32 addr; 1886 1887 PORT_CTRL_ADDR(port, addr); 1888 addr += offset; 1889 *data = readb(hw->io + addr); 1890 } 1891 1892 /** 1893 * port_r16 - read word from port register. 1894 * @hw: The hardware instance. 1895 * @port: The port index. 1896 * @offset: The offset of the port register. 1897 * @data: Buffer to store the data. 1898 * 1899 * This routine reads a word from the port register. 1900 */ 1901 static void port_r16(struct ksz_hw *hw, int port, int offset, u16 *data) 1902 { 1903 u32 addr; 1904 1905 PORT_CTRL_ADDR(port, addr); 1906 addr += offset; 1907 *data = readw(hw->io + addr); 1908 } 1909 1910 /** 1911 * port_w16 - write word to port register. 1912 * @hw: The hardware instance. 1913 * @port: The port index. 1914 * @offset: The offset of the port register. 1915 * @data: Data to write. 1916 * 1917 * This routine writes a word to the port register. 1918 */ 1919 static void port_w16(struct ksz_hw *hw, int port, int offset, u16 data) 1920 { 1921 u32 addr; 1922 1923 PORT_CTRL_ADDR(port, addr); 1924 addr += offset; 1925 writew(data, hw->io + addr); 1926 } 1927 1928 /** 1929 * sw_chk - check switch register bits 1930 * @hw: The hardware instance. 1931 * @addr: The address of the switch register. 1932 * @bits: The data bits to check. 1933 * 1934 * This function checks whether the specified bits of the switch register are 1935 * set or not. 1936 * 1937 * Return 0 if the bits are not set. 1938 */ 1939 static int sw_chk(struct ksz_hw *hw, u32 addr, u16 bits) 1940 { 1941 u16 data; 1942 1943 data = readw(hw->io + addr); 1944 return (data & bits) == bits; 1945 } 1946 1947 /** 1948 * sw_cfg - set switch register bits 1949 * @hw: The hardware instance. 1950 * @addr: The address of the switch register. 1951 * @bits: The data bits to set. 1952 * @set: The flag indicating whether the bits are to be set or not. 1953 * 1954 * This function sets or resets the specified bits of the switch register. 1955 */ 1956 static void sw_cfg(struct ksz_hw *hw, u32 addr, u16 bits, int set) 1957 { 1958 u16 data; 1959 1960 data = readw(hw->io + addr); 1961 if (set) 1962 data |= bits; 1963 else 1964 data &= ~bits; 1965 writew(data, hw->io + addr); 1966 } 1967 1968 /* Bandwidth */ 1969 1970 static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set) 1971 { 1972 port_cfg(hw, p, 1973 KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set); 1974 } 1975 1976 /* Driver set switch broadcast storm protection at 10% rate. */ 1977 #define BROADCAST_STORM_PROTECTION_RATE 10 1978 1979 /* 148,800 frames * 67 ms / 100 */ 1980 #define BROADCAST_STORM_VALUE 9969 1981 1982 /** 1983 * sw_cfg_broad_storm - configure broadcast storm threshold 1984 * @hw: The hardware instance. 1985 * @percent: Broadcast storm threshold in percent of transmit rate. 1986 * 1987 * This routine configures the broadcast storm threshold of the switch. 1988 */ 1989 static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent) 1990 { 1991 u16 data; 1992 u32 value = ((u32) BROADCAST_STORM_VALUE * (u32) percent / 100); 1993 1994 if (value > BROADCAST_STORM_RATE) 1995 value = BROADCAST_STORM_RATE; 1996 1997 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET); 1998 data &= ~(BROADCAST_STORM_RATE_LO | BROADCAST_STORM_RATE_HI); 1999 data |= ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8); 2000 writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET); 2001 } 2002 2003 /** 2004 * sw_get_broad_storm - get broadcast storm threshold 2005 * @hw: The hardware instance. 2006 * @percent: Buffer to store the broadcast storm threshold percentage. 2007 * 2008 * This routine retrieves the broadcast storm threshold of the switch. 2009 */ 2010 static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent) 2011 { 2012 int num; 2013 u16 data; 2014 2015 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET); 2016 num = (data & BROADCAST_STORM_RATE_HI); 2017 num <<= 8; 2018 num |= (data & BROADCAST_STORM_RATE_LO) >> 8; 2019 num = DIV_ROUND_CLOSEST(num * 100, BROADCAST_STORM_VALUE); 2020 *percent = (u8) num; 2021 } 2022 2023 /** 2024 * sw_dis_broad_storm - disable broadstorm 2025 * @hw: The hardware instance. 2026 * @port: The port index. 2027 * 2028 * This routine disables the broadcast storm limit function of the switch. 2029 */ 2030 static void sw_dis_broad_storm(struct ksz_hw *hw, int port) 2031 { 2032 port_cfg_broad_storm(hw, port, 0); 2033 } 2034 2035 /** 2036 * sw_ena_broad_storm - enable broadcast storm 2037 * @hw: The hardware instance. 2038 * @port: The port index. 2039 * 2040 * This routine enables the broadcast storm limit function of the switch. 2041 */ 2042 static void sw_ena_broad_storm(struct ksz_hw *hw, int port) 2043 { 2044 sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per); 2045 port_cfg_broad_storm(hw, port, 1); 2046 } 2047 2048 /** 2049 * sw_init_broad_storm - initialize broadcast storm 2050 * @hw: The hardware instance. 2051 * 2052 * This routine initializes the broadcast storm limit function of the switch. 2053 */ 2054 static void sw_init_broad_storm(struct ksz_hw *hw) 2055 { 2056 int port; 2057 2058 hw->ksz_switch->broad_per = 1; 2059 sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per); 2060 for (port = 0; port < TOTAL_PORT_NUM; port++) 2061 sw_dis_broad_storm(hw, port); 2062 sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, MULTICAST_STORM_DISABLE, 1); 2063 } 2064 2065 /** 2066 * hw_cfg_broad_storm - configure broadcast storm 2067 * @hw: The hardware instance. 2068 * @percent: Broadcast storm threshold in percent of transmit rate. 2069 * 2070 * This routine configures the broadcast storm threshold of the switch. 2071 * It is called by user functions. The hardware should be acquired first. 2072 */ 2073 static void hw_cfg_broad_storm(struct ksz_hw *hw, u8 percent) 2074 { 2075 if (percent > 100) 2076 percent = 100; 2077 2078 sw_cfg_broad_storm(hw, percent); 2079 sw_get_broad_storm(hw, &percent); 2080 hw->ksz_switch->broad_per = percent; 2081 } 2082 2083 /** 2084 * sw_dis_prio_rate - disable switch priority rate 2085 * @hw: The hardware instance. 2086 * @port: The port index. 2087 * 2088 * This routine disables the priority rate function of the switch. 2089 */ 2090 static void sw_dis_prio_rate(struct ksz_hw *hw, int port) 2091 { 2092 u32 addr; 2093 2094 PORT_CTRL_ADDR(port, addr); 2095 addr += KS8842_PORT_IN_RATE_OFFSET; 2096 writel(0, hw->io + addr); 2097 } 2098 2099 /** 2100 * sw_init_prio_rate - initialize switch priority rate 2101 * @hw: The hardware instance. 2102 * 2103 * This routine initializes the priority rate function of the switch. 2104 */ 2105 static void sw_init_prio_rate(struct ksz_hw *hw) 2106 { 2107 int port; 2108 int prio; 2109 struct ksz_switch *sw = hw->ksz_switch; 2110 2111 for (port = 0; port < TOTAL_PORT_NUM; port++) { 2112 for (prio = 0; prio < PRIO_QUEUES; prio++) { 2113 sw->port_cfg[port].rx_rate[prio] = 2114 sw->port_cfg[port].tx_rate[prio] = 0; 2115 } 2116 sw_dis_prio_rate(hw, port); 2117 } 2118 } 2119 2120 /* Communication */ 2121 2122 static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set) 2123 { 2124 port_cfg(hw, p, 2125 KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set); 2126 } 2127 2128 /* Mirroring */ 2129 2130 static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set) 2131 { 2132 port_cfg(hw, p, 2133 KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_SNIFFER, set); 2134 } 2135 2136 static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set) 2137 { 2138 port_cfg(hw, p, 2139 KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_RX, set); 2140 } 2141 2142 static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set) 2143 { 2144 port_cfg(hw, p, 2145 KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_TX, set); 2146 } 2147 2148 static inline void sw_cfg_mirror_rx_tx(struct ksz_hw *hw, int set) 2149 { 2150 sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, SWITCH_MIRROR_RX_TX, set); 2151 } 2152 2153 static void sw_init_mirror(struct ksz_hw *hw) 2154 { 2155 int port; 2156 2157 for (port = 0; port < TOTAL_PORT_NUM; port++) { 2158 port_cfg_mirror_sniffer(hw, port, 0); 2159 port_cfg_mirror_rx(hw, port, 0); 2160 port_cfg_mirror_tx(hw, port, 0); 2161 } 2162 sw_cfg_mirror_rx_tx(hw, 0); 2163 } 2164 2165 /* Priority */ 2166 2167 static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set) 2168 { 2169 port_cfg(hw, p, 2170 KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE, set); 2171 } 2172 2173 static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set) 2174 { 2175 port_cfg(hw, p, 2176 KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE, set); 2177 } 2178 2179 static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set) 2180 { 2181 port_cfg(hw, p, 2182 KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING, set); 2183 } 2184 2185 static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set) 2186 { 2187 port_cfg(hw, p, 2188 KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set); 2189 } 2190 2191 /** 2192 * sw_dis_diffserv - disable switch DiffServ priority 2193 * @hw: The hardware instance. 2194 * @port: The port index. 2195 * 2196 * This routine disables the DiffServ priority function of the switch. 2197 */ 2198 static void sw_dis_diffserv(struct ksz_hw *hw, int port) 2199 { 2200 port_cfg_diffserv(hw, port, 0); 2201 } 2202 2203 /** 2204 * sw_dis_802_1p - disable switch 802.1p priority 2205 * @hw: The hardware instance. 2206 * @port: The port index. 2207 * 2208 * This routine disables the 802.1p priority function of the switch. 2209 */ 2210 static void sw_dis_802_1p(struct ksz_hw *hw, int port) 2211 { 2212 port_cfg_802_1p(hw, port, 0); 2213 } 2214 2215 /** 2216 * sw_cfg_replace_null_vid - 2217 * @hw: The hardware instance. 2218 * @set: The flag to disable or enable. 2219 * 2220 */ 2221 static void sw_cfg_replace_null_vid(struct ksz_hw *hw, int set) 2222 { 2223 sw_cfg(hw, KS8842_SWITCH_CTRL_3_OFFSET, SWITCH_REPLACE_NULL_VID, set); 2224 } 2225 2226 /** 2227 * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping 2228 * @hw: The hardware instance. 2229 * @port: The port index. 2230 * @set: The flag to disable or enable. 2231 * 2232 * This routine enables the 802.1p priority re-mapping function of the switch. 2233 * That allows 802.1p priority field to be replaced with the port's default 2234 * tag's priority value if the ingress packet's 802.1p priority has a higher 2235 * priority than port's default tag's priority. 2236 */ 2237 static void sw_cfg_replace_vid(struct ksz_hw *hw, int port, int set) 2238 { 2239 port_cfg_replace_vid(hw, port, set); 2240 } 2241 2242 /** 2243 * sw_cfg_port_based - configure switch port based priority 2244 * @hw: The hardware instance. 2245 * @port: The port index. 2246 * @prio: The priority to set. 2247 * 2248 * This routine configures the port based priority of the switch. 2249 */ 2250 static void sw_cfg_port_based(struct ksz_hw *hw, int port, u8 prio) 2251 { 2252 u16 data; 2253 2254 if (prio > PORT_BASED_PRIORITY_BASE) 2255 prio = PORT_BASED_PRIORITY_BASE; 2256 2257 hw->ksz_switch->port_cfg[port].port_prio = prio; 2258 2259 port_r16(hw, port, KS8842_PORT_CTRL_1_OFFSET, &data); 2260 data &= ~PORT_BASED_PRIORITY_MASK; 2261 data |= prio << PORT_BASED_PRIORITY_SHIFT; 2262 port_w16(hw, port, KS8842_PORT_CTRL_1_OFFSET, data); 2263 } 2264 2265 /** 2266 * sw_dis_multi_queue - disable transmit multiple queues 2267 * @hw: The hardware instance. 2268 * @port: The port index. 2269 * 2270 * This routine disables the transmit multiple queues selection of the switch 2271 * port. Only single transmit queue on the port. 2272 */ 2273 static void sw_dis_multi_queue(struct ksz_hw *hw, int port) 2274 { 2275 port_cfg_prio(hw, port, 0); 2276 } 2277 2278 /** 2279 * sw_init_prio - initialize switch priority 2280 * @hw: The hardware instance. 2281 * 2282 * This routine initializes the switch QoS priority functions. 2283 */ 2284 static void sw_init_prio(struct ksz_hw *hw) 2285 { 2286 int port; 2287 int tos; 2288 struct ksz_switch *sw = hw->ksz_switch; 2289 2290 /* 2291 * Init all the 802.1p tag priority value to be assigned to different 2292 * priority queue. 2293 */ 2294 sw->p_802_1p[0] = 0; 2295 sw->p_802_1p[1] = 0; 2296 sw->p_802_1p[2] = 1; 2297 sw->p_802_1p[3] = 1; 2298 sw->p_802_1p[4] = 2; 2299 sw->p_802_1p[5] = 2; 2300 sw->p_802_1p[6] = 3; 2301 sw->p_802_1p[7] = 3; 2302 2303 /* 2304 * Init all the DiffServ priority value to be assigned to priority 2305 * queue 0. 2306 */ 2307 for (tos = 0; tos < DIFFSERV_ENTRIES; tos++) 2308 sw->diffserv[tos] = 0; 2309 2310 /* All QoS functions disabled. */ 2311 for (port = 0; port < TOTAL_PORT_NUM; port++) { 2312 sw_dis_multi_queue(hw, port); 2313 sw_dis_diffserv(hw, port); 2314 sw_dis_802_1p(hw, port); 2315 sw_cfg_replace_vid(hw, port, 0); 2316 2317 sw->port_cfg[port].port_prio = 0; 2318 sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio); 2319 } 2320 sw_cfg_replace_null_vid(hw, 0); 2321 } 2322 2323 /** 2324 * port_get_def_vid - get port default VID. 2325 * @hw: The hardware instance. 2326 * @port: The port index. 2327 * @vid: Buffer to store the VID. 2328 * 2329 * This routine retrieves the default VID of the port. 2330 */ 2331 static void port_get_def_vid(struct ksz_hw *hw, int port, u16 *vid) 2332 { 2333 u32 addr; 2334 2335 PORT_CTRL_ADDR(port, addr); 2336 addr += KS8842_PORT_CTRL_VID_OFFSET; 2337 *vid = readw(hw->io + addr); 2338 } 2339 2340 /** 2341 * sw_init_vlan - initialize switch VLAN 2342 * @hw: The hardware instance. 2343 * 2344 * This routine initializes the VLAN function of the switch. 2345 */ 2346 static void sw_init_vlan(struct ksz_hw *hw) 2347 { 2348 int port; 2349 int entry; 2350 struct ksz_switch *sw = hw->ksz_switch; 2351 2352 /* Read 16 VLAN entries from device's VLAN table. */ 2353 for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) { 2354 sw_r_vlan_table(hw, entry, 2355 &sw->vlan_table[entry].vid, 2356 &sw->vlan_table[entry].fid, 2357 &sw->vlan_table[entry].member); 2358 } 2359 2360 for (port = 0; port < TOTAL_PORT_NUM; port++) { 2361 port_get_def_vid(hw, port, &sw->port_cfg[port].vid); 2362 sw->port_cfg[port].member = PORT_MASK; 2363 } 2364 } 2365 2366 /** 2367 * sw_cfg_port_base_vlan - configure port-based VLAN membership 2368 * @hw: The hardware instance. 2369 * @port: The port index. 2370 * @member: The port-based VLAN membership. 2371 * 2372 * This routine configures the port-based VLAN membership of the port. 2373 */ 2374 static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member) 2375 { 2376 u32 addr; 2377 u8 data; 2378 2379 PORT_CTRL_ADDR(port, addr); 2380 addr += KS8842_PORT_CTRL_2_OFFSET; 2381 2382 data = readb(hw->io + addr); 2383 data &= ~PORT_VLAN_MEMBERSHIP; 2384 data |= (member & PORT_MASK); 2385 writeb(data, hw->io + addr); 2386 2387 hw->ksz_switch->port_cfg[port].member = member; 2388 } 2389 2390 /** 2391 * sw_set_addr - configure switch MAC address 2392 * @hw: The hardware instance. 2393 * @mac_addr: The MAC address. 2394 * 2395 * This function configures the MAC address of the switch. 2396 */ 2397 static void sw_set_addr(struct ksz_hw *hw, u8 *mac_addr) 2398 { 2399 int i; 2400 2401 for (i = 0; i < 6; i += 2) { 2402 writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i); 2403 writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i); 2404 } 2405 } 2406 2407 /** 2408 * sw_set_global_ctrl - set switch global control 2409 * @hw: The hardware instance. 2410 * 2411 * This routine sets the global control of the switch function. 2412 */ 2413 static void sw_set_global_ctrl(struct ksz_hw *hw) 2414 { 2415 u16 data; 2416 2417 /* Enable switch MII flow control. */ 2418 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET); 2419 data |= SWITCH_FLOW_CTRL; 2420 writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET); 2421 2422 data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET); 2423 2424 /* Enable aggressive back off algorithm in half duplex mode. */ 2425 data |= SWITCH_AGGR_BACKOFF; 2426 2427 /* Enable automatic fast aging when link changed detected. */ 2428 data |= SWITCH_AGING_ENABLE; 2429 data |= SWITCH_LINK_AUTO_AGING; 2430 2431 if (hw->overrides & FAST_AGING) 2432 data |= SWITCH_FAST_AGING; 2433 else 2434 data &= ~SWITCH_FAST_AGING; 2435 writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET); 2436 2437 data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET); 2438 2439 /* Enable no excessive collision drop. */ 2440 data |= NO_EXC_COLLISION_DROP; 2441 writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET); 2442 } 2443 2444 enum { 2445 STP_STATE_DISABLED = 0, 2446 STP_STATE_LISTENING, 2447 STP_STATE_LEARNING, 2448 STP_STATE_FORWARDING, 2449 STP_STATE_BLOCKED, 2450 STP_STATE_SIMPLE 2451 }; 2452 2453 /** 2454 * port_set_stp_state - configure port spanning tree state 2455 * @hw: The hardware instance. 2456 * @port: The port index. 2457 * @state: The spanning tree state. 2458 * 2459 * This routine configures the spanning tree state of the port. 2460 */ 2461 static void port_set_stp_state(struct ksz_hw *hw, int port, int state) 2462 { 2463 u16 data; 2464 2465 port_r16(hw, port, KS8842_PORT_CTRL_2_OFFSET, &data); 2466 switch (state) { 2467 case STP_STATE_DISABLED: 2468 data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE); 2469 data |= PORT_LEARN_DISABLE; 2470 break; 2471 case STP_STATE_LISTENING: 2472 /* 2473 * No need to turn on transmit because of port direct mode. 2474 * Turning on receive is required if static MAC table is not setup. 2475 */ 2476 data &= ~PORT_TX_ENABLE; 2477 data |= PORT_RX_ENABLE; 2478 data |= PORT_LEARN_DISABLE; 2479 break; 2480 case STP_STATE_LEARNING: 2481 data &= ~PORT_TX_ENABLE; 2482 data |= PORT_RX_ENABLE; 2483 data &= ~PORT_LEARN_DISABLE; 2484 break; 2485 case STP_STATE_FORWARDING: 2486 data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); 2487 data &= ~PORT_LEARN_DISABLE; 2488 break; 2489 case STP_STATE_BLOCKED: 2490 /* 2491 * Need to setup static MAC table with override to keep receiving BPDU 2492 * messages. See sw_init_stp routine. 2493 */ 2494 data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE); 2495 data |= PORT_LEARN_DISABLE; 2496 break; 2497 case STP_STATE_SIMPLE: 2498 data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); 2499 data |= PORT_LEARN_DISABLE; 2500 break; 2501 } 2502 port_w16(hw, port, KS8842_PORT_CTRL_2_OFFSET, data); 2503 hw->ksz_switch->port_cfg[port].stp_state = state; 2504 } 2505 2506 #define STP_ENTRY 0 2507 #define BROADCAST_ENTRY 1 2508 #define BRIDGE_ADDR_ENTRY 2 2509 #define IPV6_ADDR_ENTRY 3 2510 2511 /** 2512 * sw_clr_sta_mac_table - clear static MAC table 2513 * @hw: The hardware instance. 2514 * 2515 * This routine clears the static MAC table. 2516 */ 2517 static void sw_clr_sta_mac_table(struct ksz_hw *hw) 2518 { 2519 struct ksz_mac_table *entry; 2520 int i; 2521 2522 for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) { 2523 entry = &hw->ksz_switch->mac_table[i]; 2524 sw_w_sta_mac_table(hw, i, 2525 entry->mac_addr, entry->ports, 2526 entry->override, 0, 2527 entry->use_fid, entry->fid); 2528 } 2529 } 2530 2531 /** 2532 * sw_init_stp - initialize switch spanning tree support 2533 * @hw: The hardware instance. 2534 * 2535 * This routine initializes the spanning tree support of the switch. 2536 */ 2537 static void sw_init_stp(struct ksz_hw *hw) 2538 { 2539 struct ksz_mac_table *entry; 2540 2541 entry = &hw->ksz_switch->mac_table[STP_ENTRY]; 2542 entry->mac_addr[0] = 0x01; 2543 entry->mac_addr[1] = 0x80; 2544 entry->mac_addr[2] = 0xC2; 2545 entry->mac_addr[3] = 0x00; 2546 entry->mac_addr[4] = 0x00; 2547 entry->mac_addr[5] = 0x00; 2548 entry->ports = HOST_MASK; 2549 entry->override = 1; 2550 entry->valid = 1; 2551 sw_w_sta_mac_table(hw, STP_ENTRY, 2552 entry->mac_addr, entry->ports, 2553 entry->override, entry->valid, 2554 entry->use_fid, entry->fid); 2555 } 2556 2557 /** 2558 * sw_block_addr - block certain packets from the host port 2559 * @hw: The hardware instance. 2560 * 2561 * This routine blocks certain packets from reaching to the host port. 2562 */ 2563 static void sw_block_addr(struct ksz_hw *hw) 2564 { 2565 struct ksz_mac_table *entry; 2566 int i; 2567 2568 for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) { 2569 entry = &hw->ksz_switch->mac_table[i]; 2570 entry->valid = 0; 2571 sw_w_sta_mac_table(hw, i, 2572 entry->mac_addr, entry->ports, 2573 entry->override, entry->valid, 2574 entry->use_fid, entry->fid); 2575 } 2576 } 2577 2578 static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data) 2579 { 2580 *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET); 2581 } 2582 2583 static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data) 2584 { 2585 writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET); 2586 } 2587 2588 /** 2589 * hw_r_phy - read data from PHY register 2590 * @hw: The hardware instance. 2591 * @port: Port to read. 2592 * @reg: PHY register to read. 2593 * @val: Buffer to store the read data. 2594 * 2595 * This routine reads data from the PHY register. 2596 */ 2597 static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val) 2598 { 2599 int phy; 2600 2601 phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg; 2602 *val = readw(hw->io + phy); 2603 } 2604 2605 /** 2606 * hw_w_phy - write data to PHY register 2607 * @hw: The hardware instance. 2608 * @port: Port to write. 2609 * @reg: PHY register to write. 2610 * @val: Word data to write. 2611 * 2612 * This routine writes data to the PHY register. 2613 */ 2614 static void hw_w_phy(struct ksz_hw *hw, int port, u16 reg, u16 val) 2615 { 2616 int phy; 2617 2618 phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg; 2619 writew(val, hw->io + phy); 2620 } 2621 2622 /* 2623 * EEPROM access functions 2624 */ 2625 2626 #define AT93C_CODE 0 2627 #define AT93C_WR_OFF 0x00 2628 #define AT93C_WR_ALL 0x10 2629 #define AT93C_ER_ALL 0x20 2630 #define AT93C_WR_ON 0x30 2631 2632 #define AT93C_WRITE 1 2633 #define AT93C_READ 2 2634 #define AT93C_ERASE 3 2635 2636 #define EEPROM_DELAY 4 2637 2638 static inline void drop_gpio(struct ksz_hw *hw, u8 gpio) 2639 { 2640 u16 data; 2641 2642 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET); 2643 data &= ~gpio; 2644 writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET); 2645 } 2646 2647 static inline void raise_gpio(struct ksz_hw *hw, u8 gpio) 2648 { 2649 u16 data; 2650 2651 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET); 2652 data |= gpio; 2653 writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET); 2654 } 2655 2656 static inline u8 state_gpio(struct ksz_hw *hw, u8 gpio) 2657 { 2658 u16 data; 2659 2660 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET); 2661 return (u8)(data & gpio); 2662 } 2663 2664 static void eeprom_clk(struct ksz_hw *hw) 2665 { 2666 raise_gpio(hw, EEPROM_SERIAL_CLOCK); 2667 udelay(EEPROM_DELAY); 2668 drop_gpio(hw, EEPROM_SERIAL_CLOCK); 2669 udelay(EEPROM_DELAY); 2670 } 2671 2672 static u16 spi_r(struct ksz_hw *hw) 2673 { 2674 int i; 2675 u16 temp = 0; 2676 2677 for (i = 15; i >= 0; i--) { 2678 raise_gpio(hw, EEPROM_SERIAL_CLOCK); 2679 udelay(EEPROM_DELAY); 2680 2681 temp |= (state_gpio(hw, EEPROM_DATA_IN)) ? 1 << i : 0; 2682 2683 drop_gpio(hw, EEPROM_SERIAL_CLOCK); 2684 udelay(EEPROM_DELAY); 2685 } 2686 return temp; 2687 } 2688 2689 static void spi_w(struct ksz_hw *hw, u16 data) 2690 { 2691 int i; 2692 2693 for (i = 15; i >= 0; i--) { 2694 (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) : 2695 drop_gpio(hw, EEPROM_DATA_OUT); 2696 eeprom_clk(hw); 2697 } 2698 } 2699 2700 static void spi_reg(struct ksz_hw *hw, u8 data, u8 reg) 2701 { 2702 int i; 2703 2704 /* Initial start bit */ 2705 raise_gpio(hw, EEPROM_DATA_OUT); 2706 eeprom_clk(hw); 2707 2708 /* AT93C operation */ 2709 for (i = 1; i >= 0; i--) { 2710 (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) : 2711 drop_gpio(hw, EEPROM_DATA_OUT); 2712 eeprom_clk(hw); 2713 } 2714 2715 /* Address location */ 2716 for (i = 5; i >= 0; i--) { 2717 (reg & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) : 2718 drop_gpio(hw, EEPROM_DATA_OUT); 2719 eeprom_clk(hw); 2720 } 2721 } 2722 2723 #define EEPROM_DATA_RESERVED 0 2724 #define EEPROM_DATA_MAC_ADDR_0 1 2725 #define EEPROM_DATA_MAC_ADDR_1 2 2726 #define EEPROM_DATA_MAC_ADDR_2 3 2727 #define EEPROM_DATA_SUBSYS_ID 4 2728 #define EEPROM_DATA_SUBSYS_VEN_ID 5 2729 #define EEPROM_DATA_PM_CAP 6 2730 2731 /* User defined EEPROM data */ 2732 #define EEPROM_DATA_OTHER_MAC_ADDR 9 2733 2734 /** 2735 * eeprom_read - read from AT93C46 EEPROM 2736 * @hw: The hardware instance. 2737 * @reg: The register offset. 2738 * 2739 * This function reads a word from the AT93C46 EEPROM. 2740 * 2741 * Return the data value. 2742 */ 2743 static u16 eeprom_read(struct ksz_hw *hw, u8 reg) 2744 { 2745 u16 data; 2746 2747 raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT); 2748 2749 spi_reg(hw, AT93C_READ, reg); 2750 data = spi_r(hw); 2751 2752 drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT); 2753 2754 return data; 2755 } 2756 2757 /** 2758 * eeprom_write - write to AT93C46 EEPROM 2759 * @hw: The hardware instance. 2760 * @reg: The register offset. 2761 * @data: The data value. 2762 * 2763 * This procedure writes a word to the AT93C46 EEPROM. 2764 */ 2765 static void eeprom_write(struct ksz_hw *hw, u8 reg, u16 data) 2766 { 2767 int timeout; 2768 2769 raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT); 2770 2771 /* Enable write. */ 2772 spi_reg(hw, AT93C_CODE, AT93C_WR_ON); 2773 drop_gpio(hw, EEPROM_CHIP_SELECT); 2774 udelay(1); 2775 2776 /* Erase the register. */ 2777 raise_gpio(hw, EEPROM_CHIP_SELECT); 2778 spi_reg(hw, AT93C_ERASE, reg); 2779 drop_gpio(hw, EEPROM_CHIP_SELECT); 2780 udelay(1); 2781 2782 /* Check operation complete. */ 2783 raise_gpio(hw, EEPROM_CHIP_SELECT); 2784 timeout = 8; 2785 mdelay(2); 2786 do { 2787 mdelay(1); 2788 } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout); 2789 drop_gpio(hw, EEPROM_CHIP_SELECT); 2790 udelay(1); 2791 2792 /* Write the register. */ 2793 raise_gpio(hw, EEPROM_CHIP_SELECT); 2794 spi_reg(hw, AT93C_WRITE, reg); 2795 spi_w(hw, data); 2796 drop_gpio(hw, EEPROM_CHIP_SELECT); 2797 udelay(1); 2798 2799 /* Check operation complete. */ 2800 raise_gpio(hw, EEPROM_CHIP_SELECT); 2801 timeout = 8; 2802 mdelay(2); 2803 do { 2804 mdelay(1); 2805 } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout); 2806 drop_gpio(hw, EEPROM_CHIP_SELECT); 2807 udelay(1); 2808 2809 /* Disable write. */ 2810 raise_gpio(hw, EEPROM_CHIP_SELECT); 2811 spi_reg(hw, AT93C_CODE, AT93C_WR_OFF); 2812 2813 drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT); 2814 } 2815 2816 /* 2817 * Link detection routines 2818 */ 2819 2820 static u16 advertised_flow_ctrl(struct ksz_port *port, u16 ctrl) 2821 { 2822 ctrl &= ~PORT_AUTO_NEG_SYM_PAUSE; 2823 switch (port->flow_ctrl) { 2824 case PHY_FLOW_CTRL: 2825 ctrl |= PORT_AUTO_NEG_SYM_PAUSE; 2826 break; 2827 /* Not supported. */ 2828 case PHY_TX_ONLY: 2829 case PHY_RX_ONLY: 2830 default: 2831 break; 2832 } 2833 return ctrl; 2834 } 2835 2836 static void set_flow_ctrl(struct ksz_hw *hw, int rx, int tx) 2837 { 2838 u32 rx_cfg; 2839 u32 tx_cfg; 2840 2841 rx_cfg = hw->rx_cfg; 2842 tx_cfg = hw->tx_cfg; 2843 if (rx) 2844 hw->rx_cfg |= DMA_RX_FLOW_ENABLE; 2845 else 2846 hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE; 2847 if (tx) 2848 hw->tx_cfg |= DMA_TX_FLOW_ENABLE; 2849 else 2850 hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE; 2851 if (hw->enabled) { 2852 if (rx_cfg != hw->rx_cfg) 2853 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); 2854 if (tx_cfg != hw->tx_cfg) 2855 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL); 2856 } 2857 } 2858 2859 static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port, 2860 u16 local, u16 remote) 2861 { 2862 int rx; 2863 int tx; 2864 2865 if (hw->overrides & PAUSE_FLOW_CTRL) 2866 return; 2867 2868 rx = tx = 0; 2869 if (port->force_link) 2870 rx = tx = 1; 2871 if (remote & LPA_PAUSE_CAP) { 2872 if (local & ADVERTISE_PAUSE_CAP) { 2873 rx = tx = 1; 2874 } else if ((remote & LPA_PAUSE_ASYM) && 2875 (local & 2876 (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) == 2877 ADVERTISE_PAUSE_ASYM) { 2878 tx = 1; 2879 } 2880 } else if (remote & LPA_PAUSE_ASYM) { 2881 if ((local & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) 2882 == (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) 2883 rx = 1; 2884 } 2885 if (!hw->ksz_switch) 2886 set_flow_ctrl(hw, rx, tx); 2887 } 2888 2889 static inline void port_cfg_change(struct ksz_hw *hw, struct ksz_port *port, 2890 struct ksz_port_info *info, u16 link_status) 2891 { 2892 if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) && 2893 !(hw->overrides & PAUSE_FLOW_CTRL)) { 2894 u32 cfg = hw->tx_cfg; 2895 2896 /* Disable flow control in the half duplex mode. */ 2897 if (1 == info->duplex) 2898 hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE; 2899 if (hw->enabled && cfg != hw->tx_cfg) 2900 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL); 2901 } 2902 } 2903 2904 /** 2905 * port_get_link_speed - get current link status 2906 * @port: The port instance. 2907 * 2908 * This routine reads PHY registers to determine the current link status of the 2909 * switch ports. 2910 */ 2911 static void port_get_link_speed(struct ksz_port *port) 2912 { 2913 uint interrupt; 2914 struct ksz_port_info *info; 2915 struct ksz_port_info *linked = NULL; 2916 struct ksz_hw *hw = port->hw; 2917 u16 data; 2918 u16 status; 2919 u8 local; 2920 u8 remote; 2921 int i; 2922 int p; 2923 2924 interrupt = hw_block_intr(hw); 2925 2926 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { 2927 info = &hw->port_info[p]; 2928 port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data); 2929 port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status); 2930 2931 /* 2932 * Link status is changing all the time even when there is no 2933 * cable connection! 2934 */ 2935 remote = status & (PORT_AUTO_NEG_COMPLETE | 2936 PORT_STATUS_LINK_GOOD); 2937 local = (u8) data; 2938 2939 /* No change to status. */ 2940 if (local == info->advertised && remote == info->partner) 2941 continue; 2942 2943 info->advertised = local; 2944 info->partner = remote; 2945 if (status & PORT_STATUS_LINK_GOOD) { 2946 2947 /* Remember the first linked port. */ 2948 if (!linked) 2949 linked = info; 2950 2951 info->tx_rate = 10 * TX_RATE_UNIT; 2952 if (status & PORT_STATUS_SPEED_100MBIT) 2953 info->tx_rate = 100 * TX_RATE_UNIT; 2954 2955 info->duplex = 1; 2956 if (status & PORT_STATUS_FULL_DUPLEX) 2957 info->duplex = 2; 2958 2959 if (media_connected != info->state) { 2960 hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET, 2961 &data); 2962 hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET, 2963 &status); 2964 determine_flow_ctrl(hw, port, data, status); 2965 if (hw->ksz_switch) { 2966 port_cfg_back_pressure(hw, p, 2967 (1 == info->duplex)); 2968 } 2969 port_cfg_change(hw, port, info, status); 2970 } 2971 info->state = media_connected; 2972 } else { 2973 /* Indicate the link just goes down. */ 2974 if (media_disconnected != info->state) 2975 hw->port_mib[p].link_down = 1; 2976 2977 info->state = media_disconnected; 2978 } 2979 hw->port_mib[p].state = (u8) info->state; 2980 } 2981 2982 if (linked && media_disconnected == port->linked->state) 2983 port->linked = linked; 2984 2985 hw_restore_intr(hw, interrupt); 2986 } 2987 2988 #define PHY_RESET_TIMEOUT 10 2989 2990 /** 2991 * port_set_link_speed - set port speed 2992 * @port: The port instance. 2993 * 2994 * This routine sets the link speed of the switch ports. 2995 */ 2996 static void port_set_link_speed(struct ksz_port *port) 2997 { 2998 struct ksz_hw *hw = port->hw; 2999 u16 data; 3000 u16 cfg; 3001 u8 status; 3002 int i; 3003 int p; 3004 3005 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { 3006 port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data); 3007 port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status); 3008 3009 cfg = 0; 3010 if (status & PORT_STATUS_LINK_GOOD) 3011 cfg = data; 3012 3013 data |= PORT_AUTO_NEG_ENABLE; 3014 data = advertised_flow_ctrl(port, data); 3015 3016 data |= PORT_AUTO_NEG_100BTX_FD | PORT_AUTO_NEG_100BTX | 3017 PORT_AUTO_NEG_10BT_FD | PORT_AUTO_NEG_10BT; 3018 3019 /* Check if manual configuration is specified by the user. */ 3020 if (port->speed || port->duplex) { 3021 if (10 == port->speed) 3022 data &= ~(PORT_AUTO_NEG_100BTX_FD | 3023 PORT_AUTO_NEG_100BTX); 3024 else if (100 == port->speed) 3025 data &= ~(PORT_AUTO_NEG_10BT_FD | 3026 PORT_AUTO_NEG_10BT); 3027 if (1 == port->duplex) 3028 data &= ~(PORT_AUTO_NEG_100BTX_FD | 3029 PORT_AUTO_NEG_10BT_FD); 3030 else if (2 == port->duplex) 3031 data &= ~(PORT_AUTO_NEG_100BTX | 3032 PORT_AUTO_NEG_10BT); 3033 } 3034 if (data != cfg) { 3035 data |= PORT_AUTO_NEG_RESTART; 3036 port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data); 3037 } 3038 } 3039 } 3040 3041 /** 3042 * port_force_link_speed - force port speed 3043 * @port: The port instance. 3044 * 3045 * This routine forces the link speed of the switch ports. 3046 */ 3047 static void port_force_link_speed(struct ksz_port *port) 3048 { 3049 struct ksz_hw *hw = port->hw; 3050 u16 data; 3051 int i; 3052 int phy; 3053 int p; 3054 3055 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { 3056 phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL; 3057 hw_r_phy_ctrl(hw, phy, &data); 3058 3059 data &= ~BMCR_ANENABLE; 3060 3061 if (10 == port->speed) 3062 data &= ~BMCR_SPEED100; 3063 else if (100 == port->speed) 3064 data |= BMCR_SPEED100; 3065 if (1 == port->duplex) 3066 data &= ~BMCR_FULLDPLX; 3067 else if (2 == port->duplex) 3068 data |= BMCR_FULLDPLX; 3069 hw_w_phy_ctrl(hw, phy, data); 3070 } 3071 } 3072 3073 static void port_set_power_saving(struct ksz_port *port, int enable) 3074 { 3075 struct ksz_hw *hw = port->hw; 3076 int i; 3077 int p; 3078 3079 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) 3080 port_cfg(hw, p, 3081 KS884X_PORT_CTRL_4_OFFSET, PORT_POWER_DOWN, enable); 3082 } 3083 3084 /* 3085 * KSZ8841 power management functions 3086 */ 3087 3088 /** 3089 * hw_chk_wol_pme_status - check PMEN pin 3090 * @hw: The hardware instance. 3091 * 3092 * This function is used to check PMEN pin is asserted. 3093 * 3094 * Return 1 if PMEN pin is asserted; otherwise, 0. 3095 */ 3096 static int hw_chk_wol_pme_status(struct ksz_hw *hw) 3097 { 3098 struct dev_info *hw_priv = container_of(hw, struct dev_info, hw); 3099 struct pci_dev *pdev = hw_priv->pdev; 3100 u16 data; 3101 3102 if (!pdev->pm_cap) 3103 return 0; 3104 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data); 3105 return (data & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS; 3106 } 3107 3108 /** 3109 * hw_clr_wol_pme_status - clear PMEN pin 3110 * @hw: The hardware instance. 3111 * 3112 * This routine is used to clear PME_Status to deassert PMEN pin. 3113 */ 3114 static void hw_clr_wol_pme_status(struct ksz_hw *hw) 3115 { 3116 struct dev_info *hw_priv = container_of(hw, struct dev_info, hw); 3117 struct pci_dev *pdev = hw_priv->pdev; 3118 u16 data; 3119 3120 if (!pdev->pm_cap) 3121 return; 3122 3123 /* Clear PME_Status to deassert PMEN pin. */ 3124 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data); 3125 data |= PCI_PM_CTRL_PME_STATUS; 3126 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data); 3127 } 3128 3129 /** 3130 * hw_cfg_wol_pme - enable or disable Wake-on-LAN 3131 * @hw: The hardware instance. 3132 * @set: The flag indicating whether to enable or disable. 3133 * 3134 * This routine is used to enable or disable Wake-on-LAN. 3135 */ 3136 static void hw_cfg_wol_pme(struct ksz_hw *hw, int set) 3137 { 3138 struct dev_info *hw_priv = container_of(hw, struct dev_info, hw); 3139 struct pci_dev *pdev = hw_priv->pdev; 3140 u16 data; 3141 3142 if (!pdev->pm_cap) 3143 return; 3144 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data); 3145 data &= ~PCI_PM_CTRL_STATE_MASK; 3146 if (set) 3147 data |= PCI_PM_CTRL_PME_ENABLE | PCI_D3hot; 3148 else 3149 data &= ~PCI_PM_CTRL_PME_ENABLE; 3150 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data); 3151 } 3152 3153 /** 3154 * hw_cfg_wol - configure Wake-on-LAN features 3155 * @hw: The hardware instance. 3156 * @frame: The pattern frame bit. 3157 * @set: The flag indicating whether to enable or disable. 3158 * 3159 * This routine is used to enable or disable certain Wake-on-LAN features. 3160 */ 3161 static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set) 3162 { 3163 u16 data; 3164 3165 data = readw(hw->io + KS8841_WOL_CTRL_OFFSET); 3166 if (set) 3167 data |= frame; 3168 else 3169 data &= ~frame; 3170 writew(data, hw->io + KS8841_WOL_CTRL_OFFSET); 3171 } 3172 3173 /** 3174 * hw_set_wol_frame - program Wake-on-LAN pattern 3175 * @hw: The hardware instance. 3176 * @i: The frame index. 3177 * @mask_size: The size of the mask. 3178 * @mask: Mask to ignore certain bytes in the pattern. 3179 * @frame_size: The size of the frame. 3180 * @pattern: The frame data. 3181 * 3182 * This routine is used to program Wake-on-LAN pattern. 3183 */ 3184 static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size, 3185 const u8 *mask, uint frame_size, const u8 *pattern) 3186 { 3187 int bits; 3188 int from; 3189 int len; 3190 int to; 3191 u32 crc; 3192 u8 data[64]; 3193 u8 val = 0; 3194 3195 if (frame_size > mask_size * 8) 3196 frame_size = mask_size * 8; 3197 if (frame_size > 64) 3198 frame_size = 64; 3199 3200 i *= 0x10; 3201 writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i); 3202 writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i); 3203 3204 bits = len = from = to = 0; 3205 do { 3206 if (bits) { 3207 if ((val & 1)) 3208 data[to++] = pattern[from]; 3209 val >>= 1; 3210 ++from; 3211 --bits; 3212 } else { 3213 val = mask[len]; 3214 writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i 3215 + len); 3216 ++len; 3217 if (val) 3218 bits = 8; 3219 else 3220 from += 8; 3221 } 3222 } while (from < (int) frame_size); 3223 if (val) { 3224 bits = mask[len - 1]; 3225 val <<= (from % 8); 3226 bits &= ~val; 3227 writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len - 3228 1); 3229 } 3230 crc = ether_crc(to, data); 3231 writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i); 3232 } 3233 3234 /** 3235 * hw_add_wol_arp - add ARP pattern 3236 * @hw: The hardware instance. 3237 * @ip_addr: The IPv4 address assigned to the device. 3238 * 3239 * This routine is used to add ARP pattern for waking up the host. 3240 */ 3241 static void hw_add_wol_arp(struct ksz_hw *hw, const u8 *ip_addr) 3242 { 3243 static const u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 }; 3244 u8 pattern[42] = { 3245 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 3246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 3247 0x08, 0x06, 3248 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 3249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 3250 0x00, 0x00, 0x00, 0x00, 3251 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 3252 0x00, 0x00, 0x00, 0x00 }; 3253 3254 memcpy(&pattern[38], ip_addr, 4); 3255 hw_set_wol_frame(hw, 3, 6, mask, 42, pattern); 3256 } 3257 3258 /** 3259 * hw_add_wol_bcast - add broadcast pattern 3260 * @hw: The hardware instance. 3261 * 3262 * This routine is used to add broadcast pattern for waking up the host. 3263 */ 3264 static void hw_add_wol_bcast(struct ksz_hw *hw) 3265 { 3266 static const u8 mask[] = { 0x3F }; 3267 static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3268 3269 hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern); 3270 } 3271 3272 /** 3273 * hw_add_wol_mcast - add multicast pattern 3274 * @hw: The hardware instance. 3275 * 3276 * This routine is used to add multicast pattern for waking up the host. 3277 * 3278 * It is assumed the multicast packet is the ICMPv6 neighbor solicitation used 3279 * by IPv6 ping command. Note that multicast packets are filtred through the 3280 * multicast hash table, so not all multicast packets can wake up the host. 3281 */ 3282 static void hw_add_wol_mcast(struct ksz_hw *hw) 3283 { 3284 static const u8 mask[] = { 0x3F }; 3285 u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 }; 3286 3287 memcpy(&pattern[3], &hw->override_addr[3], 3); 3288 hw_set_wol_frame(hw, 1, 1, mask, 6, pattern); 3289 } 3290 3291 /** 3292 * hw_add_wol_ucast - add unicast pattern 3293 * @hw: The hardware instance. 3294 * 3295 * This routine is used to add unicast pattern to wakeup the host. 3296 * 3297 * It is assumed the unicast packet is directed to the device, as the hardware 3298 * can only receive them in normal case. 3299 */ 3300 static void hw_add_wol_ucast(struct ksz_hw *hw) 3301 { 3302 static const u8 mask[] = { 0x3F }; 3303 3304 hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr); 3305 } 3306 3307 /** 3308 * hw_enable_wol - enable Wake-on-LAN 3309 * @hw: The hardware instance. 3310 * @wol_enable: The Wake-on-LAN settings. 3311 * @net_addr: The IPv4 address assigned to the device. 3312 * 3313 * This routine is used to enable Wake-on-LAN depending on driver settings. 3314 */ 3315 static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, const u8 *net_addr) 3316 { 3317 hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC)); 3318 hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST)); 3319 hw_add_wol_ucast(hw); 3320 hw_cfg_wol(hw, KS8841_WOL_FRAME1_ENABLE, (wol_enable & WAKE_MCAST)); 3321 hw_add_wol_mcast(hw); 3322 hw_cfg_wol(hw, KS8841_WOL_FRAME2_ENABLE, (wol_enable & WAKE_BCAST)); 3323 hw_cfg_wol(hw, KS8841_WOL_FRAME3_ENABLE, (wol_enable & WAKE_ARP)); 3324 hw_add_wol_arp(hw, net_addr); 3325 } 3326 3327 /** 3328 * hw_init - check driver is correct for the hardware 3329 * @hw: The hardware instance. 3330 * 3331 * This function checks the hardware is correct for this driver and sets the 3332 * hardware up for proper initialization. 3333 * 3334 * Return number of ports or 0 if not right. 3335 */ 3336 static int hw_init(struct ksz_hw *hw) 3337 { 3338 int rc = 0; 3339 u16 data; 3340 u16 revision; 3341 3342 /* Set bus speed to 125MHz. */ 3343 writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET); 3344 3345 /* Check KSZ884x chip ID. */ 3346 data = readw(hw->io + KS884X_CHIP_ID_OFFSET); 3347 3348 revision = (data & KS884X_REVISION_MASK) >> KS884X_REVISION_SHIFT; 3349 data &= KS884X_CHIP_ID_MASK_41; 3350 if (REG_CHIP_ID_41 == data) 3351 rc = 1; 3352 else if (REG_CHIP_ID_42 == data) 3353 rc = 2; 3354 else 3355 return 0; 3356 3357 /* Setup hardware features or bug workarounds. */ 3358 if (revision <= 1) { 3359 hw->features |= SMALL_PACKET_TX_BUG; 3360 if (1 == rc) 3361 hw->features |= HALF_DUPLEX_SIGNAL_BUG; 3362 } 3363 return rc; 3364 } 3365 3366 /** 3367 * hw_reset - reset the hardware 3368 * @hw: The hardware instance. 3369 * 3370 * This routine resets the hardware. 3371 */ 3372 static void hw_reset(struct ksz_hw *hw) 3373 { 3374 writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET); 3375 3376 /* Wait for device to reset. */ 3377 mdelay(10); 3378 3379 /* Write 0 to clear device reset. */ 3380 writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET); 3381 } 3382 3383 /** 3384 * hw_setup - setup the hardware 3385 * @hw: The hardware instance. 3386 * 3387 * This routine setup the hardware for proper operation. 3388 */ 3389 static void hw_setup(struct ksz_hw *hw) 3390 { 3391 #if SET_DEFAULT_LED 3392 u16 data; 3393 3394 /* Change default LED mode. */ 3395 data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET); 3396 data &= ~LED_MODE; 3397 data |= SET_DEFAULT_LED; 3398 writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET); 3399 #endif 3400 3401 /* Setup transmit control. */ 3402 hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE | 3403 (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_TX_ENABLE); 3404 3405 /* Setup receive control. */ 3406 hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST | 3407 (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_RX_ENABLE); 3408 hw->rx_cfg |= KS884X_DMA_RX_MULTICAST; 3409 3410 /* Hardware cannot handle UDP packet in IP fragments. */ 3411 hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP); 3412 3413 if (hw->all_multi) 3414 hw->rx_cfg |= DMA_RX_ALL_MULTICAST; 3415 if (hw->promiscuous) 3416 hw->rx_cfg |= DMA_RX_PROMISCUOUS; 3417 } 3418 3419 /** 3420 * hw_setup_intr - setup interrupt mask 3421 * @hw: The hardware instance. 3422 * 3423 * This routine setup the interrupt mask for proper operation. 3424 */ 3425 static void hw_setup_intr(struct ksz_hw *hw) 3426 { 3427 hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN; 3428 } 3429 3430 static void ksz_check_desc_num(struct ksz_desc_info *info) 3431 { 3432 #define MIN_DESC_SHIFT 2 3433 3434 int alloc = info->alloc; 3435 int shift; 3436 3437 shift = 0; 3438 while (!(alloc & 1)) { 3439 shift++; 3440 alloc >>= 1; 3441 } 3442 if (alloc != 1 || shift < MIN_DESC_SHIFT) { 3443 pr_alert("Hardware descriptor numbers not right!\n"); 3444 while (alloc) { 3445 shift++; 3446 alloc >>= 1; 3447 } 3448 if (shift < MIN_DESC_SHIFT) 3449 shift = MIN_DESC_SHIFT; 3450 alloc = 1 << shift; 3451 info->alloc = alloc; 3452 } 3453 info->mask = info->alloc - 1; 3454 } 3455 3456 static void hw_init_desc(struct ksz_desc_info *desc_info, int transmit) 3457 { 3458 int i; 3459 u32 phys = desc_info->ring_phys; 3460 struct ksz_hw_desc *desc = desc_info->ring_virt; 3461 struct ksz_desc *cur = desc_info->ring; 3462 struct ksz_desc *previous = NULL; 3463 3464 for (i = 0; i < desc_info->alloc; i++) { 3465 cur->phw = desc++; 3466 phys += desc_info->size; 3467 previous = cur++; 3468 previous->phw->next = cpu_to_le32(phys); 3469 } 3470 previous->phw->next = cpu_to_le32(desc_info->ring_phys); 3471 previous->sw.buf.rx.end_of_ring = 1; 3472 previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data); 3473 3474 desc_info->avail = desc_info->alloc; 3475 desc_info->last = desc_info->next = 0; 3476 3477 desc_info->cur = desc_info->ring; 3478 } 3479 3480 /** 3481 * hw_set_desc_base - set descriptor base addresses 3482 * @hw: The hardware instance. 3483 * @tx_addr: The transmit descriptor base. 3484 * @rx_addr: The receive descriptor base. 3485 * 3486 * This routine programs the descriptor base addresses after reset. 3487 */ 3488 static void hw_set_desc_base(struct ksz_hw *hw, u32 tx_addr, u32 rx_addr) 3489 { 3490 /* Set base address of Tx/Rx descriptors. */ 3491 writel(tx_addr, hw->io + KS_DMA_TX_ADDR); 3492 writel(rx_addr, hw->io + KS_DMA_RX_ADDR); 3493 } 3494 3495 static void hw_reset_pkts(struct ksz_desc_info *info) 3496 { 3497 info->cur = info->ring; 3498 info->avail = info->alloc; 3499 info->last = info->next = 0; 3500 } 3501 3502 static inline void hw_resume_rx(struct ksz_hw *hw) 3503 { 3504 writel(DMA_START, hw->io + KS_DMA_RX_START); 3505 } 3506 3507 /** 3508 * hw_start_rx - start receiving 3509 * @hw: The hardware instance. 3510 * 3511 * This routine starts the receive function of the hardware. 3512 */ 3513 static void hw_start_rx(struct ksz_hw *hw) 3514 { 3515 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); 3516 3517 /* Notify when the receive stops. */ 3518 hw->intr_mask |= KS884X_INT_RX_STOPPED; 3519 3520 writel(DMA_START, hw->io + KS_DMA_RX_START); 3521 hw_ack_intr(hw, KS884X_INT_RX_STOPPED); 3522 hw->rx_stop++; 3523 3524 /* Variable overflows. */ 3525 if (0 == hw->rx_stop) 3526 hw->rx_stop = 2; 3527 } 3528 3529 /** 3530 * hw_stop_rx - stop receiving 3531 * @hw: The hardware instance. 3532 * 3533 * This routine stops the receive function of the hardware. 3534 */ 3535 static void hw_stop_rx(struct ksz_hw *hw) 3536 { 3537 hw->rx_stop = 0; 3538 hw_turn_off_intr(hw, KS884X_INT_RX_STOPPED); 3539 writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL); 3540 } 3541 3542 /** 3543 * hw_start_tx - start transmitting 3544 * @hw: The hardware instance. 3545 * 3546 * This routine starts the transmit function of the hardware. 3547 */ 3548 static void hw_start_tx(struct ksz_hw *hw) 3549 { 3550 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL); 3551 } 3552 3553 /** 3554 * hw_stop_tx - stop transmitting 3555 * @hw: The hardware instance. 3556 * 3557 * This routine stops the transmit function of the hardware. 3558 */ 3559 static void hw_stop_tx(struct ksz_hw *hw) 3560 { 3561 writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL); 3562 } 3563 3564 /** 3565 * hw_disable - disable hardware 3566 * @hw: The hardware instance. 3567 * 3568 * This routine disables the hardware. 3569 */ 3570 static void hw_disable(struct ksz_hw *hw) 3571 { 3572 hw_stop_rx(hw); 3573 hw_stop_tx(hw); 3574 hw->enabled = 0; 3575 } 3576 3577 /** 3578 * hw_enable - enable hardware 3579 * @hw: The hardware instance. 3580 * 3581 * This routine enables the hardware. 3582 */ 3583 static void hw_enable(struct ksz_hw *hw) 3584 { 3585 hw_start_tx(hw); 3586 hw_start_rx(hw); 3587 hw->enabled = 1; 3588 } 3589 3590 /** 3591 * hw_alloc_pkt - allocate enough descriptors for transmission 3592 * @hw: The hardware instance. 3593 * @length: The length of the packet. 3594 * @physical: Number of descriptors required. 3595 * 3596 * This function allocates descriptors for transmission. 3597 * 3598 * Return 0 if not successful; 1 for buffer copy; or number of descriptors. 3599 */ 3600 static int hw_alloc_pkt(struct ksz_hw *hw, int length, int physical) 3601 { 3602 /* Always leave one descriptor free. */ 3603 if (hw->tx_desc_info.avail <= 1) 3604 return 0; 3605 3606 /* Allocate a descriptor for transmission and mark it current. */ 3607 get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur); 3608 hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1; 3609 3610 /* Keep track of number of transmit descriptors used so far. */ 3611 ++hw->tx_int_cnt; 3612 hw->tx_size += length; 3613 3614 /* Cannot hold on too much data. */ 3615 if (hw->tx_size >= MAX_TX_HELD_SIZE) 3616 hw->tx_int_cnt = hw->tx_int_mask + 1; 3617 3618 if (physical > hw->tx_desc_info.avail) 3619 return 1; 3620 3621 return hw->tx_desc_info.avail; 3622 } 3623 3624 /** 3625 * hw_send_pkt - mark packet for transmission 3626 * @hw: The hardware instance. 3627 * 3628 * This routine marks the packet for transmission in PCI version. 3629 */ 3630 static void hw_send_pkt(struct ksz_hw *hw) 3631 { 3632 struct ksz_desc *cur = hw->tx_desc_info.cur; 3633 3634 cur->sw.buf.tx.last_seg = 1; 3635 3636 /* Interrupt only after specified number of descriptors used. */ 3637 if (hw->tx_int_cnt > hw->tx_int_mask) { 3638 cur->sw.buf.tx.intr = 1; 3639 hw->tx_int_cnt = 0; 3640 hw->tx_size = 0; 3641 } 3642 3643 /* KSZ8842 supports port directed transmission. */ 3644 cur->sw.buf.tx.dest_port = hw->dst_ports; 3645 3646 release_desc(cur); 3647 3648 writel(0, hw->io + KS_DMA_TX_START); 3649 } 3650 3651 static int empty_addr(u8 *addr) 3652 { 3653 u32 *addr1 = (u32 *) addr; 3654 u16 *addr2 = (u16 *) &addr[4]; 3655 3656 return 0 == *addr1 && 0 == *addr2; 3657 } 3658 3659 /** 3660 * hw_set_addr - set MAC address 3661 * @hw: The hardware instance. 3662 * 3663 * This routine programs the MAC address of the hardware when the address is 3664 * overridden. 3665 */ 3666 static void hw_set_addr(struct ksz_hw *hw) 3667 { 3668 int i; 3669 3670 for (i = 0; i < ETH_ALEN; i++) 3671 writeb(hw->override_addr[MAC_ADDR_ORDER(i)], 3672 hw->io + KS884X_ADDR_0_OFFSET + i); 3673 3674 sw_set_addr(hw, hw->override_addr); 3675 } 3676 3677 /** 3678 * hw_read_addr - read MAC address 3679 * @hw: The hardware instance. 3680 * 3681 * This routine retrieves the MAC address of the hardware. 3682 */ 3683 static void hw_read_addr(struct ksz_hw *hw) 3684 { 3685 int i; 3686 3687 for (i = 0; i < ETH_ALEN; i++) 3688 hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io + 3689 KS884X_ADDR_0_OFFSET + i); 3690 3691 if (!hw->mac_override) { 3692 memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN); 3693 if (empty_addr(hw->override_addr)) { 3694 memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN); 3695 memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS, 3696 ETH_ALEN); 3697 hw->override_addr[5] += hw->id; 3698 hw_set_addr(hw); 3699 } 3700 } 3701 } 3702 3703 static void hw_ena_add_addr(struct ksz_hw *hw, int index, u8 *mac_addr) 3704 { 3705 int i; 3706 u32 mac_addr_lo; 3707 u32 mac_addr_hi; 3708 3709 mac_addr_hi = 0; 3710 for (i = 0; i < 2; i++) { 3711 mac_addr_hi <<= 8; 3712 mac_addr_hi |= mac_addr[i]; 3713 } 3714 mac_addr_hi |= ADD_ADDR_ENABLE; 3715 mac_addr_lo = 0; 3716 for (i = 2; i < 6; i++) { 3717 mac_addr_lo <<= 8; 3718 mac_addr_lo |= mac_addr[i]; 3719 } 3720 index *= ADD_ADDR_INCR; 3721 3722 writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO); 3723 writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI); 3724 } 3725 3726 static void hw_set_add_addr(struct ksz_hw *hw) 3727 { 3728 int i; 3729 3730 for (i = 0; i < ADDITIONAL_ENTRIES; i++) { 3731 if (empty_addr(hw->address[i])) 3732 writel(0, hw->io + ADD_ADDR_INCR * i + 3733 KS_ADD_ADDR_0_HI); 3734 else 3735 hw_ena_add_addr(hw, i, hw->address[i]); 3736 } 3737 } 3738 3739 static int hw_add_addr(struct ksz_hw *hw, const u8 *mac_addr) 3740 { 3741 int i; 3742 int j = ADDITIONAL_ENTRIES; 3743 3744 if (ether_addr_equal(hw->override_addr, mac_addr)) 3745 return 0; 3746 for (i = 0; i < hw->addr_list_size; i++) { 3747 if (ether_addr_equal(hw->address[i], mac_addr)) 3748 return 0; 3749 if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i])) 3750 j = i; 3751 } 3752 if (j < ADDITIONAL_ENTRIES) { 3753 memcpy(hw->address[j], mac_addr, ETH_ALEN); 3754 hw_ena_add_addr(hw, j, hw->address[j]); 3755 return 0; 3756 } 3757 return -1; 3758 } 3759 3760 static int hw_del_addr(struct ksz_hw *hw, const u8 *mac_addr) 3761 { 3762 int i; 3763 3764 for (i = 0; i < hw->addr_list_size; i++) { 3765 if (ether_addr_equal(hw->address[i], mac_addr)) { 3766 eth_zero_addr(hw->address[i]); 3767 writel(0, hw->io + ADD_ADDR_INCR * i + 3768 KS_ADD_ADDR_0_HI); 3769 return 0; 3770 } 3771 } 3772 return -1; 3773 } 3774 3775 /** 3776 * hw_clr_multicast - clear multicast addresses 3777 * @hw: The hardware instance. 3778 * 3779 * This routine removes all multicast addresses set in the hardware. 3780 */ 3781 static void hw_clr_multicast(struct ksz_hw *hw) 3782 { 3783 int i; 3784 3785 for (i = 0; i < HW_MULTICAST_SIZE; i++) { 3786 hw->multi_bits[i] = 0; 3787 3788 writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i); 3789 } 3790 } 3791 3792 /** 3793 * hw_set_grp_addr - set multicast addresses 3794 * @hw: The hardware instance. 3795 * 3796 * This routine programs multicast addresses for the hardware to accept those 3797 * addresses. 3798 */ 3799 static void hw_set_grp_addr(struct ksz_hw *hw) 3800 { 3801 int i; 3802 int index; 3803 int position; 3804 int value; 3805 3806 memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE); 3807 3808 for (i = 0; i < hw->multi_list_size; i++) { 3809 position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f; 3810 index = position >> 3; 3811 value = 1 << (position & 7); 3812 hw->multi_bits[index] |= (u8) value; 3813 } 3814 3815 for (i = 0; i < HW_MULTICAST_SIZE; i++) 3816 writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET + 3817 i); 3818 } 3819 3820 /** 3821 * hw_set_multicast - enable or disable all multicast receiving 3822 * @hw: The hardware instance. 3823 * @multicast: To turn on or off the all multicast feature. 3824 * 3825 * This routine enables/disables the hardware to accept all multicast packets. 3826 */ 3827 static void hw_set_multicast(struct ksz_hw *hw, u8 multicast) 3828 { 3829 /* Stop receiving for reconfiguration. */ 3830 hw_stop_rx(hw); 3831 3832 if (multicast) 3833 hw->rx_cfg |= DMA_RX_ALL_MULTICAST; 3834 else 3835 hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST; 3836 3837 if (hw->enabled) 3838 hw_start_rx(hw); 3839 } 3840 3841 /** 3842 * hw_set_promiscuous - enable or disable promiscuous receiving 3843 * @hw: The hardware instance. 3844 * @prom: To turn on or off the promiscuous feature. 3845 * 3846 * This routine enables/disables the hardware to accept all packets. 3847 */ 3848 static void hw_set_promiscuous(struct ksz_hw *hw, u8 prom) 3849 { 3850 /* Stop receiving for reconfiguration. */ 3851 hw_stop_rx(hw); 3852 3853 if (prom) 3854 hw->rx_cfg |= DMA_RX_PROMISCUOUS; 3855 else 3856 hw->rx_cfg &= ~DMA_RX_PROMISCUOUS; 3857 3858 if (hw->enabled) 3859 hw_start_rx(hw); 3860 } 3861 3862 /** 3863 * sw_enable - enable the switch 3864 * @hw: The hardware instance. 3865 * @enable: The flag to enable or disable the switch 3866 * 3867 * This routine is used to enable/disable the switch in KSZ8842. 3868 */ 3869 static void sw_enable(struct ksz_hw *hw, int enable) 3870 { 3871 int port; 3872 3873 for (port = 0; port < SWITCH_PORT_NUM; port++) { 3874 if (hw->dev_count > 1) { 3875 /* Set port-base vlan membership with host port. */ 3876 sw_cfg_port_base_vlan(hw, port, 3877 HOST_MASK | (1 << port)); 3878 port_set_stp_state(hw, port, STP_STATE_DISABLED); 3879 } else { 3880 sw_cfg_port_base_vlan(hw, port, PORT_MASK); 3881 port_set_stp_state(hw, port, STP_STATE_FORWARDING); 3882 } 3883 } 3884 if (hw->dev_count > 1) 3885 port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE); 3886 else 3887 port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_FORWARDING); 3888 3889 if (enable) 3890 enable = KS8842_START; 3891 writew(enable, hw->io + KS884X_CHIP_ID_OFFSET); 3892 } 3893 3894 /** 3895 * sw_setup - setup the switch 3896 * @hw: The hardware instance. 3897 * 3898 * This routine setup the hardware switch engine for default operation. 3899 */ 3900 static void sw_setup(struct ksz_hw *hw) 3901 { 3902 int port; 3903 3904 sw_set_global_ctrl(hw); 3905 3906 /* Enable switch broadcast storm protection at 10% percent rate. */ 3907 sw_init_broad_storm(hw); 3908 hw_cfg_broad_storm(hw, BROADCAST_STORM_PROTECTION_RATE); 3909 for (port = 0; port < SWITCH_PORT_NUM; port++) 3910 sw_ena_broad_storm(hw, port); 3911 3912 sw_init_prio(hw); 3913 3914 sw_init_mirror(hw); 3915 3916 sw_init_prio_rate(hw); 3917 3918 sw_init_vlan(hw); 3919 3920 if (hw->features & STP_SUPPORT) 3921 sw_init_stp(hw); 3922 if (!sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET, 3923 SWITCH_TX_FLOW_CTRL | SWITCH_RX_FLOW_CTRL)) 3924 hw->overrides |= PAUSE_FLOW_CTRL; 3925 sw_enable(hw, 1); 3926 } 3927 3928 /** 3929 * ksz_start_timer - start kernel timer 3930 * @info: Kernel timer information. 3931 * @time: The time tick. 3932 * 3933 * This routine starts the kernel timer after the specified time tick. 3934 */ 3935 static void ksz_start_timer(struct ksz_timer_info *info, int time) 3936 { 3937 info->cnt = 0; 3938 info->timer.expires = jiffies + time; 3939 add_timer(&info->timer); 3940 3941 /* infinity */ 3942 info->max = -1; 3943 } 3944 3945 /** 3946 * ksz_stop_timer - stop kernel timer 3947 * @info: Kernel timer information. 3948 * 3949 * This routine stops the kernel timer. 3950 */ 3951 static void ksz_stop_timer(struct ksz_timer_info *info) 3952 { 3953 if (info->max) { 3954 info->max = 0; 3955 timer_delete_sync(&info->timer); 3956 } 3957 } 3958 3959 static void ksz_init_timer(struct ksz_timer_info *info, int period, 3960 void (*function)(struct timer_list *)) 3961 { 3962 info->max = 0; 3963 info->period = period; 3964 timer_setup(&info->timer, function, 0); 3965 } 3966 3967 static void ksz_update_timer(struct ksz_timer_info *info) 3968 { 3969 ++info->cnt; 3970 if (info->max > 0) { 3971 if (info->cnt < info->max) { 3972 info->timer.expires = jiffies + info->period; 3973 add_timer(&info->timer); 3974 } else 3975 info->max = 0; 3976 } else if (info->max < 0) { 3977 info->timer.expires = jiffies + info->period; 3978 add_timer(&info->timer); 3979 } 3980 } 3981 3982 /** 3983 * ksz_alloc_soft_desc - allocate software descriptors 3984 * @desc_info: Descriptor information structure. 3985 * @transmit: Indication that descriptors are for transmit. 3986 * 3987 * This local function allocates software descriptors for manipulation in 3988 * memory. 3989 * 3990 * Return 0 if successful. 3991 */ 3992 static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit) 3993 { 3994 desc_info->ring = kcalloc(desc_info->alloc, sizeof(struct ksz_desc), 3995 GFP_KERNEL); 3996 if (!desc_info->ring) 3997 return 1; 3998 hw_init_desc(desc_info, transmit); 3999 return 0; 4000 } 4001 4002 /** 4003 * ksz_alloc_desc - allocate hardware descriptors 4004 * @adapter: Adapter information structure. 4005 * 4006 * This local function allocates hardware descriptors for receiving and 4007 * transmitting. 4008 * 4009 * Return 0 if successful. 4010 */ 4011 static int ksz_alloc_desc(struct dev_info *adapter) 4012 { 4013 struct ksz_hw *hw = &adapter->hw; 4014 int offset; 4015 4016 /* Allocate memory for RX & TX descriptors. */ 4017 adapter->desc_pool.alloc_size = 4018 hw->rx_desc_info.size * hw->rx_desc_info.alloc + 4019 hw->tx_desc_info.size * hw->tx_desc_info.alloc + 4020 DESC_ALIGNMENT; 4021 4022 adapter->desc_pool.alloc_virt = 4023 dma_alloc_coherent(&adapter->pdev->dev, 4024 adapter->desc_pool.alloc_size, 4025 &adapter->desc_pool.dma_addr, GFP_KERNEL); 4026 if (adapter->desc_pool.alloc_virt == NULL) { 4027 adapter->desc_pool.alloc_size = 0; 4028 return 1; 4029 } 4030 4031 /* Align to the next cache line boundary. */ 4032 offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ? 4033 (DESC_ALIGNMENT - 4034 ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0); 4035 adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset; 4036 adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset; 4037 4038 /* Allocate receive/transmit descriptors. */ 4039 hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *) 4040 adapter->desc_pool.virt; 4041 hw->rx_desc_info.ring_phys = adapter->desc_pool.phys; 4042 offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size; 4043 hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *) 4044 (adapter->desc_pool.virt + offset); 4045 hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset; 4046 4047 if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0)) 4048 return 1; 4049 if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1)) 4050 return 1; 4051 4052 return 0; 4053 } 4054 4055 /** 4056 * free_dma_buf - release DMA buffer resources 4057 * @adapter: Adapter information structure. 4058 * @dma_buf: pointer to buf 4059 * @direction: to or from device 4060 * 4061 * This routine is just a helper function to release the DMA buffer resources. 4062 */ 4063 static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf, 4064 int direction) 4065 { 4066 dma_unmap_single(&adapter->pdev->dev, dma_buf->dma, dma_buf->len, 4067 direction); 4068 dev_kfree_skb(dma_buf->skb); 4069 dma_buf->skb = NULL; 4070 dma_buf->dma = 0; 4071 } 4072 4073 /** 4074 * ksz_init_rx_buffers - initialize receive descriptors 4075 * @adapter: Adapter information structure. 4076 * 4077 * This routine initializes DMA buffers for receiving. 4078 */ 4079 static void ksz_init_rx_buffers(struct dev_info *adapter) 4080 { 4081 int i; 4082 struct ksz_desc *desc; 4083 struct ksz_dma_buf *dma_buf; 4084 struct ksz_hw *hw = &adapter->hw; 4085 struct ksz_desc_info *info = &hw->rx_desc_info; 4086 4087 for (i = 0; i < hw->rx_desc_info.alloc; i++) { 4088 get_rx_pkt(info, &desc); 4089 4090 dma_buf = DMA_BUFFER(desc); 4091 if (dma_buf->skb && dma_buf->len != adapter->mtu) 4092 free_dma_buf(adapter, dma_buf, DMA_FROM_DEVICE); 4093 dma_buf->len = adapter->mtu; 4094 if (!dma_buf->skb) 4095 dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC); 4096 if (dma_buf->skb && !dma_buf->dma) 4097 dma_buf->dma = dma_map_single(&adapter->pdev->dev, 4098 skb_tail_pointer(dma_buf->skb), 4099 dma_buf->len, 4100 DMA_FROM_DEVICE); 4101 4102 /* Set descriptor. */ 4103 set_rx_buf(desc, dma_buf->dma); 4104 set_rx_len(desc, dma_buf->len); 4105 release_desc(desc); 4106 } 4107 } 4108 4109 /** 4110 * ksz_alloc_mem - allocate memory for hardware descriptors 4111 * @adapter: Adapter information structure. 4112 * 4113 * This function allocates memory for use by hardware descriptors for receiving 4114 * and transmitting. 4115 * 4116 * Return 0 if successful. 4117 */ 4118 static int ksz_alloc_mem(struct dev_info *adapter) 4119 { 4120 struct ksz_hw *hw = &adapter->hw; 4121 4122 /* Determine the number of receive and transmit descriptors. */ 4123 hw->rx_desc_info.alloc = NUM_OF_RX_DESC; 4124 hw->tx_desc_info.alloc = NUM_OF_TX_DESC; 4125 4126 /* Determine how many descriptors to skip transmit interrupt. */ 4127 hw->tx_int_cnt = 0; 4128 hw->tx_int_mask = NUM_OF_TX_DESC / 4; 4129 if (hw->tx_int_mask > 8) 4130 hw->tx_int_mask = 8; 4131 while (hw->tx_int_mask) { 4132 hw->tx_int_cnt++; 4133 hw->tx_int_mask >>= 1; 4134 } 4135 if (hw->tx_int_cnt) { 4136 hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1; 4137 hw->tx_int_cnt = 0; 4138 } 4139 4140 /* Determine the descriptor size. */ 4141 hw->rx_desc_info.size = 4142 (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) / 4143 DESC_ALIGNMENT) * DESC_ALIGNMENT); 4144 hw->tx_desc_info.size = 4145 (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) / 4146 DESC_ALIGNMENT) * DESC_ALIGNMENT); 4147 if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc)) 4148 pr_alert("Hardware descriptor size not right!\n"); 4149 ksz_check_desc_num(&hw->rx_desc_info); 4150 ksz_check_desc_num(&hw->tx_desc_info); 4151 4152 /* Allocate descriptors. */ 4153 if (ksz_alloc_desc(adapter)) 4154 return 1; 4155 4156 return 0; 4157 } 4158 4159 /** 4160 * ksz_free_desc - free software and hardware descriptors 4161 * @adapter: Adapter information structure. 4162 * 4163 * This local routine frees the software and hardware descriptors allocated by 4164 * ksz_alloc_desc(). 4165 */ 4166 static void ksz_free_desc(struct dev_info *adapter) 4167 { 4168 struct ksz_hw *hw = &adapter->hw; 4169 4170 /* Reset descriptor. */ 4171 hw->rx_desc_info.ring_virt = NULL; 4172 hw->tx_desc_info.ring_virt = NULL; 4173 hw->rx_desc_info.ring_phys = 0; 4174 hw->tx_desc_info.ring_phys = 0; 4175 4176 /* Free memory. */ 4177 if (adapter->desc_pool.alloc_virt) 4178 dma_free_coherent(&adapter->pdev->dev, 4179 adapter->desc_pool.alloc_size, 4180 adapter->desc_pool.alloc_virt, 4181 adapter->desc_pool.dma_addr); 4182 4183 /* Reset resource pool. */ 4184 adapter->desc_pool.alloc_size = 0; 4185 adapter->desc_pool.alloc_virt = NULL; 4186 4187 kfree(hw->rx_desc_info.ring); 4188 hw->rx_desc_info.ring = NULL; 4189 kfree(hw->tx_desc_info.ring); 4190 hw->tx_desc_info.ring = NULL; 4191 } 4192 4193 /** 4194 * ksz_free_buffers - free buffers used in the descriptors 4195 * @adapter: Adapter information structure. 4196 * @desc_info: Descriptor information structure. 4197 * @direction: to or from device 4198 * 4199 * This local routine frees buffers used in the DMA buffers. 4200 */ 4201 static void ksz_free_buffers(struct dev_info *adapter, 4202 struct ksz_desc_info *desc_info, int direction) 4203 { 4204 int i; 4205 struct ksz_dma_buf *dma_buf; 4206 struct ksz_desc *desc = desc_info->ring; 4207 4208 for (i = 0; i < desc_info->alloc; i++) { 4209 dma_buf = DMA_BUFFER(desc); 4210 if (dma_buf->skb) 4211 free_dma_buf(adapter, dma_buf, direction); 4212 desc++; 4213 } 4214 } 4215 4216 /** 4217 * ksz_free_mem - free all resources used by descriptors 4218 * @adapter: Adapter information structure. 4219 * 4220 * This local routine frees all the resources allocated by ksz_alloc_mem(). 4221 */ 4222 static void ksz_free_mem(struct dev_info *adapter) 4223 { 4224 /* Free transmit buffers. */ 4225 ksz_free_buffers(adapter, &adapter->hw.tx_desc_info, DMA_TO_DEVICE); 4226 4227 /* Free receive buffers. */ 4228 ksz_free_buffers(adapter, &adapter->hw.rx_desc_info, DMA_FROM_DEVICE); 4229 4230 /* Free descriptors. */ 4231 ksz_free_desc(adapter); 4232 } 4233 4234 static void get_mib_counters(struct ksz_hw *hw, int first, int cnt, 4235 u64 *counter) 4236 { 4237 int i; 4238 int mib; 4239 int port; 4240 struct ksz_port_mib *port_mib; 4241 4242 memset(counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM); 4243 for (i = 0, port = first; i < cnt; i++, port++) { 4244 port_mib = &hw->port_mib[port]; 4245 for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++) 4246 counter[mib] += port_mib->counter[mib]; 4247 } 4248 } 4249 4250 /** 4251 * send_packet - send packet 4252 * @skb: Socket buffer. 4253 * @dev: Network device. 4254 * 4255 * This routine is used to send a packet out to the network. 4256 */ 4257 static void send_packet(struct sk_buff *skb, struct net_device *dev) 4258 { 4259 struct ksz_desc *desc; 4260 struct ksz_desc *first; 4261 struct dev_priv *priv = netdev_priv(dev); 4262 struct dev_info *hw_priv = priv->adapter; 4263 struct ksz_hw *hw = &hw_priv->hw; 4264 struct ksz_desc_info *info = &hw->tx_desc_info; 4265 struct ksz_dma_buf *dma_buf; 4266 int len; 4267 int last_frag = skb_shinfo(skb)->nr_frags; 4268 4269 /* 4270 * KSZ8842 with multiple device interfaces needs to be told which port 4271 * to send. 4272 */ 4273 if (hw->dev_count > 1) 4274 hw->dst_ports = 1 << priv->port.first_port; 4275 4276 /* Hardware will pad the length to 60. */ 4277 len = skb->len; 4278 4279 /* Remember the very first descriptor. */ 4280 first = info->cur; 4281 desc = first; 4282 4283 dma_buf = DMA_BUFFER(desc); 4284 if (last_frag) { 4285 int frag; 4286 skb_frag_t *this_frag; 4287 4288 dma_buf->len = skb_headlen(skb); 4289 4290 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data, 4291 dma_buf->len, DMA_TO_DEVICE); 4292 set_tx_buf(desc, dma_buf->dma); 4293 set_tx_len(desc, dma_buf->len); 4294 4295 frag = 0; 4296 do { 4297 this_frag = &skb_shinfo(skb)->frags[frag]; 4298 4299 /* Get a new descriptor. */ 4300 get_tx_pkt(info, &desc); 4301 4302 /* Keep track of descriptors used so far. */ 4303 ++hw->tx_int_cnt; 4304 4305 dma_buf = DMA_BUFFER(desc); 4306 dma_buf->len = skb_frag_size(this_frag); 4307 4308 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, 4309 skb_frag_address(this_frag), 4310 dma_buf->len, 4311 DMA_TO_DEVICE); 4312 set_tx_buf(desc, dma_buf->dma); 4313 set_tx_len(desc, dma_buf->len); 4314 4315 frag++; 4316 if (frag == last_frag) 4317 break; 4318 4319 /* Do not release the last descriptor here. */ 4320 release_desc(desc); 4321 } while (1); 4322 4323 /* current points to the last descriptor. */ 4324 info->cur = desc; 4325 4326 /* Release the first descriptor. */ 4327 release_desc(first); 4328 } else { 4329 dma_buf->len = len; 4330 4331 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data, 4332 dma_buf->len, DMA_TO_DEVICE); 4333 set_tx_buf(desc, dma_buf->dma); 4334 set_tx_len(desc, dma_buf->len); 4335 } 4336 4337 if (skb->ip_summed == CHECKSUM_PARTIAL) { 4338 (desc)->sw.buf.tx.csum_gen_tcp = 1; 4339 (desc)->sw.buf.tx.csum_gen_udp = 1; 4340 } 4341 4342 /* 4343 * The last descriptor holds the packet so that it can be returned to 4344 * network subsystem after all descriptors are transmitted. 4345 */ 4346 dma_buf->skb = skb; 4347 4348 hw_send_pkt(hw); 4349 4350 /* Update transmit statistics. */ 4351 dev->stats.tx_packets++; 4352 dev->stats.tx_bytes += len; 4353 } 4354 4355 /** 4356 * transmit_cleanup - clean up transmit descriptors 4357 * @hw_priv: Network device. 4358 * @normal: break if owned 4359 * 4360 * This routine is called to clean up the transmitted buffers. 4361 */ 4362 static void transmit_cleanup(struct dev_info *hw_priv, int normal) 4363 { 4364 int last; 4365 union desc_stat status; 4366 struct ksz_hw *hw = &hw_priv->hw; 4367 struct ksz_desc_info *info = &hw->tx_desc_info; 4368 struct ksz_desc *desc; 4369 struct ksz_dma_buf *dma_buf; 4370 struct net_device *dev = NULL; 4371 4372 spin_lock_irq(&hw_priv->hwlock); 4373 last = info->last; 4374 4375 while (info->avail < info->alloc) { 4376 /* Get next descriptor which is not hardware owned. */ 4377 desc = &info->ring[last]; 4378 status.data = le32_to_cpu(desc->phw->ctrl.data); 4379 if (status.tx.hw_owned) { 4380 if (normal) 4381 break; 4382 else 4383 reset_desc(desc, status); 4384 } 4385 4386 dma_buf = DMA_BUFFER(desc); 4387 dma_unmap_single(&hw_priv->pdev->dev, dma_buf->dma, 4388 dma_buf->len, DMA_TO_DEVICE); 4389 4390 /* This descriptor contains the last buffer in the packet. */ 4391 if (dma_buf->skb) { 4392 dev = dma_buf->skb->dev; 4393 4394 /* Release the packet back to network subsystem. */ 4395 dev_kfree_skb_irq(dma_buf->skb); 4396 dma_buf->skb = NULL; 4397 } 4398 4399 /* Free the transmitted descriptor. */ 4400 last++; 4401 last &= info->mask; 4402 info->avail++; 4403 } 4404 info->last = last; 4405 spin_unlock_irq(&hw_priv->hwlock); 4406 4407 /* Notify the network subsystem that the packet has been sent. */ 4408 if (dev) 4409 netif_trans_update(dev); 4410 } 4411 4412 /** 4413 * tx_done - transmit done processing 4414 * @hw_priv: Network device. 4415 * 4416 * This routine is called when the transmit interrupt is triggered, indicating 4417 * either a packet is sent successfully or there are transmit errors. 4418 */ 4419 static void tx_done(struct dev_info *hw_priv) 4420 { 4421 struct ksz_hw *hw = &hw_priv->hw; 4422 int port; 4423 4424 transmit_cleanup(hw_priv, 1); 4425 4426 for (port = 0; port < hw->dev_count; port++) { 4427 struct net_device *dev = hw->port_info[port].pdev; 4428 4429 if (netif_running(dev) && netif_queue_stopped(dev)) 4430 netif_wake_queue(dev); 4431 } 4432 } 4433 4434 static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb) 4435 { 4436 skb->dev = old->dev; 4437 skb->protocol = old->protocol; 4438 skb->ip_summed = old->ip_summed; 4439 skb->csum = old->csum; 4440 skb_set_network_header(skb, ETH_HLEN); 4441 4442 dev_consume_skb_any(old); 4443 } 4444 4445 /** 4446 * netdev_tx - send out packet 4447 * @skb: Socket buffer. 4448 * @dev: Network device. 4449 * 4450 * This function is used by the upper network layer to send out a packet. 4451 * 4452 * Return 0 if successful; otherwise an error code indicating failure. 4453 */ 4454 static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev) 4455 { 4456 struct dev_priv *priv = netdev_priv(dev); 4457 struct dev_info *hw_priv = priv->adapter; 4458 struct ksz_hw *hw = &hw_priv->hw; 4459 int left; 4460 int num = 1; 4461 int rc = 0; 4462 4463 if (hw->features & SMALL_PACKET_TX_BUG) { 4464 struct sk_buff *org_skb = skb; 4465 4466 if (skb->len <= 48) { 4467 if (skb_end_pointer(skb) - skb->data >= 50) { 4468 memset(&skb->data[skb->len], 0, 50 - skb->len); 4469 skb->len = 50; 4470 } else { 4471 skb = netdev_alloc_skb(dev, 50); 4472 if (!skb) 4473 return NETDEV_TX_BUSY; 4474 memcpy(skb->data, org_skb->data, org_skb->len); 4475 memset(&skb->data[org_skb->len], 0, 4476 50 - org_skb->len); 4477 skb->len = 50; 4478 copy_old_skb(org_skb, skb); 4479 } 4480 } 4481 } 4482 4483 spin_lock_irq(&hw_priv->hwlock); 4484 4485 num = skb_shinfo(skb)->nr_frags + 1; 4486 left = hw_alloc_pkt(hw, skb->len, num); 4487 if (left) { 4488 if (left < num || 4489 (CHECKSUM_PARTIAL == skb->ip_summed && 4490 skb->protocol == htons(ETH_P_IPV6))) { 4491 struct sk_buff *org_skb = skb; 4492 4493 skb = netdev_alloc_skb(dev, org_skb->len); 4494 if (!skb) { 4495 rc = NETDEV_TX_BUSY; 4496 goto unlock; 4497 } 4498 skb_copy_and_csum_dev(org_skb, skb->data); 4499 org_skb->ip_summed = CHECKSUM_NONE; 4500 skb->len = org_skb->len; 4501 copy_old_skb(org_skb, skb); 4502 } 4503 send_packet(skb, dev); 4504 if (left <= num) 4505 netif_stop_queue(dev); 4506 } else { 4507 /* Stop the transmit queue until packet is allocated. */ 4508 netif_stop_queue(dev); 4509 rc = NETDEV_TX_BUSY; 4510 } 4511 unlock: 4512 spin_unlock_irq(&hw_priv->hwlock); 4513 4514 return rc; 4515 } 4516 4517 /** 4518 * netdev_tx_timeout - transmit timeout processing 4519 * @dev: Network device. 4520 * @txqueue: index of hanging queue 4521 * 4522 * This routine is called when the transmit timer expires. That indicates the 4523 * hardware is not running correctly because transmit interrupts are not 4524 * triggered to free up resources so that the transmit routine can continue 4525 * sending out packets. The hardware is reset to correct the problem. 4526 */ 4527 static void netdev_tx_timeout(struct net_device *dev, unsigned int txqueue) 4528 { 4529 static unsigned long last_reset; 4530 4531 struct dev_priv *priv = netdev_priv(dev); 4532 struct dev_info *hw_priv = priv->adapter; 4533 struct ksz_hw *hw = &hw_priv->hw; 4534 int port; 4535 4536 if (hw->dev_count > 1) { 4537 /* 4538 * Only reset the hardware if time between calls is long 4539 * enough. 4540 */ 4541 if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo)) 4542 hw_priv = NULL; 4543 } 4544 4545 last_reset = jiffies; 4546 if (hw_priv) { 4547 hw_dis_intr(hw); 4548 hw_disable(hw); 4549 4550 transmit_cleanup(hw_priv, 0); 4551 hw_reset_pkts(&hw->rx_desc_info); 4552 hw_reset_pkts(&hw->tx_desc_info); 4553 ksz_init_rx_buffers(hw_priv); 4554 4555 hw_reset(hw); 4556 4557 hw_set_desc_base(hw, 4558 hw->tx_desc_info.ring_phys, 4559 hw->rx_desc_info.ring_phys); 4560 hw_set_addr(hw); 4561 if (hw->all_multi) 4562 hw_set_multicast(hw, hw->all_multi); 4563 else if (hw->multi_list_size) 4564 hw_set_grp_addr(hw); 4565 4566 if (hw->dev_count > 1) { 4567 hw_set_add_addr(hw); 4568 for (port = 0; port < SWITCH_PORT_NUM; port++) { 4569 struct net_device *port_dev; 4570 4571 port_set_stp_state(hw, port, 4572 STP_STATE_DISABLED); 4573 4574 port_dev = hw->port_info[port].pdev; 4575 if (netif_running(port_dev)) 4576 port_set_stp_state(hw, port, 4577 STP_STATE_SIMPLE); 4578 } 4579 } 4580 4581 hw_enable(hw); 4582 hw_ena_intr(hw); 4583 } 4584 4585 netif_trans_update(dev); 4586 netif_wake_queue(dev); 4587 } 4588 4589 static inline void csum_verified(struct sk_buff *skb) 4590 { 4591 unsigned short protocol; 4592 struct iphdr *iph; 4593 4594 protocol = skb->protocol; 4595 skb_reset_network_header(skb); 4596 iph = (struct iphdr *) skb_network_header(skb); 4597 if (protocol == htons(ETH_P_8021Q)) { 4598 protocol = iph->tot_len; 4599 skb_set_network_header(skb, VLAN_HLEN); 4600 iph = (struct iphdr *) skb_network_header(skb); 4601 } 4602 if (protocol == htons(ETH_P_IP)) { 4603 if (iph->protocol == IPPROTO_TCP) 4604 skb->ip_summed = CHECKSUM_UNNECESSARY; 4605 } 4606 } 4607 4608 static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw, 4609 struct ksz_desc *desc, union desc_stat status) 4610 { 4611 int packet_len; 4612 struct dev_priv *priv = netdev_priv(dev); 4613 struct dev_info *hw_priv = priv->adapter; 4614 struct ksz_dma_buf *dma_buf; 4615 struct sk_buff *skb; 4616 4617 /* Received length includes 4-byte CRC. */ 4618 packet_len = status.rx.frame_len - 4; 4619 4620 dma_buf = DMA_BUFFER(desc); 4621 dma_sync_single_for_cpu(&hw_priv->pdev->dev, dma_buf->dma, 4622 packet_len + 4, DMA_FROM_DEVICE); 4623 4624 do { 4625 /* skb->data != skb->head */ 4626 skb = netdev_alloc_skb(dev, packet_len + 2); 4627 if (!skb) { 4628 dev->stats.rx_dropped++; 4629 return -ENOMEM; 4630 } 4631 4632 /* 4633 * Align socket buffer in 4-byte boundary for better 4634 * performance. 4635 */ 4636 skb_reserve(skb, 2); 4637 4638 skb_put_data(skb, dma_buf->skb->data, packet_len); 4639 } while (0); 4640 4641 skb->protocol = eth_type_trans(skb, dev); 4642 4643 if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP)) 4644 csum_verified(skb); 4645 4646 /* Update receive statistics. */ 4647 dev->stats.rx_packets++; 4648 dev->stats.rx_bytes += packet_len; 4649 4650 /* Notify upper layer for received packet. */ 4651 netif_rx(skb); 4652 4653 return 0; 4654 } 4655 4656 static int dev_rcv_packets(struct dev_info *hw_priv) 4657 { 4658 int next; 4659 union desc_stat status; 4660 struct ksz_hw *hw = &hw_priv->hw; 4661 struct net_device *dev = hw->port_info[0].pdev; 4662 struct ksz_desc_info *info = &hw->rx_desc_info; 4663 int left = info->alloc; 4664 struct ksz_desc *desc; 4665 int received = 0; 4666 4667 next = info->next; 4668 while (left--) { 4669 /* Get next descriptor which is not hardware owned. */ 4670 desc = &info->ring[next]; 4671 status.data = le32_to_cpu(desc->phw->ctrl.data); 4672 if (status.rx.hw_owned) 4673 break; 4674 4675 /* Status valid only when last descriptor bit is set. */ 4676 if (status.rx.last_desc && status.rx.first_desc) { 4677 if (rx_proc(dev, hw, desc, status)) 4678 goto release_packet; 4679 received++; 4680 } 4681 4682 release_packet: 4683 release_desc(desc); 4684 next++; 4685 next &= info->mask; 4686 } 4687 info->next = next; 4688 4689 return received; 4690 } 4691 4692 static int port_rcv_packets(struct dev_info *hw_priv) 4693 { 4694 int next; 4695 union desc_stat status; 4696 struct ksz_hw *hw = &hw_priv->hw; 4697 struct net_device *dev = hw->port_info[0].pdev; 4698 struct ksz_desc_info *info = &hw->rx_desc_info; 4699 int left = info->alloc; 4700 struct ksz_desc *desc; 4701 int received = 0; 4702 4703 next = info->next; 4704 while (left--) { 4705 /* Get next descriptor which is not hardware owned. */ 4706 desc = &info->ring[next]; 4707 status.data = le32_to_cpu(desc->phw->ctrl.data); 4708 if (status.rx.hw_owned) 4709 break; 4710 4711 if (hw->dev_count > 1) { 4712 /* Get received port number. */ 4713 int p = HW_TO_DEV_PORT(status.rx.src_port); 4714 4715 dev = hw->port_info[p].pdev; 4716 if (!netif_running(dev)) 4717 goto release_packet; 4718 } 4719 4720 /* Status valid only when last descriptor bit is set. */ 4721 if (status.rx.last_desc && status.rx.first_desc) { 4722 if (rx_proc(dev, hw, desc, status)) 4723 goto release_packet; 4724 received++; 4725 } 4726 4727 release_packet: 4728 release_desc(desc); 4729 next++; 4730 next &= info->mask; 4731 } 4732 info->next = next; 4733 4734 return received; 4735 } 4736 4737 static int dev_rcv_special(struct dev_info *hw_priv) 4738 { 4739 int next; 4740 union desc_stat status; 4741 struct ksz_hw *hw = &hw_priv->hw; 4742 struct net_device *dev = hw->port_info[0].pdev; 4743 struct ksz_desc_info *info = &hw->rx_desc_info; 4744 int left = info->alloc; 4745 struct ksz_desc *desc; 4746 int received = 0; 4747 4748 next = info->next; 4749 while (left--) { 4750 /* Get next descriptor which is not hardware owned. */ 4751 desc = &info->ring[next]; 4752 status.data = le32_to_cpu(desc->phw->ctrl.data); 4753 if (status.rx.hw_owned) 4754 break; 4755 4756 if (hw->dev_count > 1) { 4757 /* Get received port number. */ 4758 int p = HW_TO_DEV_PORT(status.rx.src_port); 4759 4760 dev = hw->port_info[p].pdev; 4761 if (!netif_running(dev)) 4762 goto release_packet; 4763 } 4764 4765 /* Status valid only when last descriptor bit is set. */ 4766 if (status.rx.last_desc && status.rx.first_desc) { 4767 /* 4768 * Receive without error. With receive errors 4769 * disabled, packets with receive errors will be 4770 * dropped, so no need to check the error bit. 4771 */ 4772 if (!status.rx.error || (status.data & 4773 KS_DESC_RX_ERROR_COND) == 4774 KS_DESC_RX_ERROR_TOO_LONG) { 4775 if (rx_proc(dev, hw, desc, status)) 4776 goto release_packet; 4777 received++; 4778 } else { 4779 struct dev_priv *priv = netdev_priv(dev); 4780 4781 /* Update receive error statistics. */ 4782 priv->port.counter[OID_COUNTER_RCV_ERROR]++; 4783 } 4784 } 4785 4786 release_packet: 4787 release_desc(desc); 4788 next++; 4789 next &= info->mask; 4790 } 4791 info->next = next; 4792 4793 return received; 4794 } 4795 4796 static void rx_proc_task(struct tasklet_struct *t) 4797 { 4798 struct dev_info *hw_priv = from_tasklet(hw_priv, t, rx_tasklet); 4799 struct ksz_hw *hw = &hw_priv->hw; 4800 4801 if (!hw->enabled) 4802 return; 4803 if (unlikely(!hw_priv->dev_rcv(hw_priv))) { 4804 4805 /* In case receive process is suspended because of overrun. */ 4806 hw_resume_rx(hw); 4807 4808 /* tasklets are interruptible. */ 4809 spin_lock_irq(&hw_priv->hwlock); 4810 hw_turn_on_intr(hw, KS884X_INT_RX_MASK); 4811 spin_unlock_irq(&hw_priv->hwlock); 4812 } else { 4813 hw_ack_intr(hw, KS884X_INT_RX); 4814 tasklet_schedule(&hw_priv->rx_tasklet); 4815 } 4816 } 4817 4818 static void tx_proc_task(struct tasklet_struct *t) 4819 { 4820 struct dev_info *hw_priv = from_tasklet(hw_priv, t, tx_tasklet); 4821 struct ksz_hw *hw = &hw_priv->hw; 4822 4823 hw_ack_intr(hw, KS884X_INT_TX_MASK); 4824 4825 tx_done(hw_priv); 4826 4827 /* tasklets are interruptible. */ 4828 spin_lock_irq(&hw_priv->hwlock); 4829 hw_turn_on_intr(hw, KS884X_INT_TX); 4830 spin_unlock_irq(&hw_priv->hwlock); 4831 } 4832 4833 static inline void handle_rx_stop(struct ksz_hw *hw) 4834 { 4835 /* Receive just has been stopped. */ 4836 if (0 == hw->rx_stop) 4837 hw->intr_mask &= ~KS884X_INT_RX_STOPPED; 4838 else if (hw->rx_stop > 1) { 4839 if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) { 4840 hw_start_rx(hw); 4841 } else { 4842 hw->intr_mask &= ~KS884X_INT_RX_STOPPED; 4843 hw->rx_stop = 0; 4844 } 4845 } else 4846 /* Receive just has been started. */ 4847 hw->rx_stop++; 4848 } 4849 4850 /** 4851 * netdev_intr - interrupt handling 4852 * @irq: Interrupt number. 4853 * @dev_id: Network device. 4854 * 4855 * This function is called by upper network layer to signal interrupt. 4856 * 4857 * Return IRQ_HANDLED if interrupt is handled. 4858 */ 4859 static irqreturn_t netdev_intr(int irq, void *dev_id) 4860 { 4861 uint int_enable = 0; 4862 struct net_device *dev = (struct net_device *) dev_id; 4863 struct dev_priv *priv = netdev_priv(dev); 4864 struct dev_info *hw_priv = priv->adapter; 4865 struct ksz_hw *hw = &hw_priv->hw; 4866 4867 spin_lock(&hw_priv->hwlock); 4868 4869 hw_read_intr(hw, &int_enable); 4870 4871 /* Not our interrupt! */ 4872 if (!int_enable) { 4873 spin_unlock(&hw_priv->hwlock); 4874 return IRQ_NONE; 4875 } 4876 4877 do { 4878 hw_ack_intr(hw, int_enable); 4879 int_enable &= hw->intr_mask; 4880 4881 if (unlikely(int_enable & KS884X_INT_TX_MASK)) { 4882 hw_dis_intr_bit(hw, KS884X_INT_TX_MASK); 4883 tasklet_schedule(&hw_priv->tx_tasklet); 4884 } 4885 4886 if (likely(int_enable & KS884X_INT_RX)) { 4887 hw_dis_intr_bit(hw, KS884X_INT_RX); 4888 tasklet_schedule(&hw_priv->rx_tasklet); 4889 } 4890 4891 if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) { 4892 dev->stats.rx_fifo_errors++; 4893 hw_resume_rx(hw); 4894 } 4895 4896 if (unlikely(int_enable & KS884X_INT_PHY)) { 4897 struct ksz_port *port = &priv->port; 4898 4899 hw->features |= LINK_INT_WORKING; 4900 port_get_link_speed(port); 4901 } 4902 4903 if (unlikely(int_enable & KS884X_INT_RX_STOPPED)) { 4904 handle_rx_stop(hw); 4905 break; 4906 } 4907 4908 if (unlikely(int_enable & KS884X_INT_TX_STOPPED)) { 4909 u32 data; 4910 4911 hw->intr_mask &= ~KS884X_INT_TX_STOPPED; 4912 pr_info("Tx stopped\n"); 4913 data = readl(hw->io + KS_DMA_TX_CTRL); 4914 if (!(data & DMA_TX_ENABLE)) 4915 pr_info("Tx disabled\n"); 4916 break; 4917 } 4918 } while (0); 4919 4920 hw_ena_intr(hw); 4921 4922 spin_unlock(&hw_priv->hwlock); 4923 4924 return IRQ_HANDLED; 4925 } 4926 4927 /* 4928 * Linux network device functions 4929 */ 4930 4931 4932 #ifdef CONFIG_NET_POLL_CONTROLLER 4933 static void netdev_netpoll(struct net_device *dev) 4934 { 4935 struct dev_priv *priv = netdev_priv(dev); 4936 struct dev_info *hw_priv = priv->adapter; 4937 4938 hw_dis_intr(&hw_priv->hw); 4939 netdev_intr(dev->irq, dev); 4940 } 4941 #endif 4942 4943 static void bridge_change(struct ksz_hw *hw) 4944 { 4945 int port; 4946 u8 member; 4947 struct ksz_switch *sw = hw->ksz_switch; 4948 4949 /* No ports in forwarding state. */ 4950 if (!sw->member) { 4951 port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE); 4952 sw_block_addr(hw); 4953 } 4954 for (port = 0; port < SWITCH_PORT_NUM; port++) { 4955 if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state) 4956 member = HOST_MASK | sw->member; 4957 else 4958 member = HOST_MASK | (1 << port); 4959 if (member != sw->port_cfg[port].member) 4960 sw_cfg_port_base_vlan(hw, port, member); 4961 } 4962 } 4963 4964 /** 4965 * netdev_close - close network device 4966 * @dev: Network device. 4967 * 4968 * This function process the close operation of network device. This is caused 4969 * by the user command "ifconfig ethX down." 4970 * 4971 * Return 0 if successful; otherwise an error code indicating failure. 4972 */ 4973 static int netdev_close(struct net_device *dev) 4974 { 4975 struct dev_priv *priv = netdev_priv(dev); 4976 struct dev_info *hw_priv = priv->adapter; 4977 struct ksz_port *port = &priv->port; 4978 struct ksz_hw *hw = &hw_priv->hw; 4979 int pi; 4980 4981 netif_stop_queue(dev); 4982 4983 ksz_stop_timer(&priv->monitor_timer_info); 4984 4985 /* Need to shut the port manually in multiple device interfaces mode. */ 4986 if (hw->dev_count > 1) { 4987 port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED); 4988 4989 /* Port is closed. Need to change bridge setting. */ 4990 if (hw->features & STP_SUPPORT) { 4991 pi = 1 << port->first_port; 4992 if (hw->ksz_switch->member & pi) { 4993 hw->ksz_switch->member &= ~pi; 4994 bridge_change(hw); 4995 } 4996 } 4997 } 4998 if (port->first_port > 0) 4999 hw_del_addr(hw, dev->dev_addr); 5000 if (!hw_priv->wol_enable) 5001 port_set_power_saving(port, true); 5002 5003 if (priv->multicast) 5004 --hw->all_multi; 5005 if (priv->promiscuous) 5006 --hw->promiscuous; 5007 5008 hw_priv->opened--; 5009 if (!(hw_priv->opened)) { 5010 ksz_stop_timer(&hw_priv->mib_timer_info); 5011 flush_work(&hw_priv->mib_read); 5012 5013 hw_dis_intr(hw); 5014 hw_disable(hw); 5015 hw_clr_multicast(hw); 5016 5017 /* Delay for receive task to stop scheduling itself. */ 5018 msleep(2000 / HZ); 5019 5020 tasklet_kill(&hw_priv->rx_tasklet); 5021 tasklet_kill(&hw_priv->tx_tasklet); 5022 free_irq(dev->irq, hw_priv->dev); 5023 5024 transmit_cleanup(hw_priv, 0); 5025 hw_reset_pkts(&hw->rx_desc_info); 5026 hw_reset_pkts(&hw->tx_desc_info); 5027 5028 /* Clean out static MAC table when the switch is shutdown. */ 5029 if (hw->features & STP_SUPPORT) 5030 sw_clr_sta_mac_table(hw); 5031 } 5032 5033 return 0; 5034 } 5035 5036 static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw) 5037 { 5038 if (hw->ksz_switch) { 5039 u32 data; 5040 5041 data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET); 5042 if (hw->features & RX_HUGE_FRAME) 5043 data |= SWITCH_HUGE_PACKET; 5044 else 5045 data &= ~SWITCH_HUGE_PACKET; 5046 writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET); 5047 } 5048 if (hw->features & RX_HUGE_FRAME) { 5049 hw->rx_cfg |= DMA_RX_ERROR; 5050 hw_priv->dev_rcv = dev_rcv_special; 5051 } else { 5052 hw->rx_cfg &= ~DMA_RX_ERROR; 5053 if (hw->dev_count > 1) 5054 hw_priv->dev_rcv = port_rcv_packets; 5055 else 5056 hw_priv->dev_rcv = dev_rcv_packets; 5057 } 5058 } 5059 5060 static int prepare_hardware(struct net_device *dev) 5061 { 5062 struct dev_priv *priv = netdev_priv(dev); 5063 struct dev_info *hw_priv = priv->adapter; 5064 struct ksz_hw *hw = &hw_priv->hw; 5065 int rc = 0; 5066 5067 /* Remember the network device that requests interrupts. */ 5068 hw_priv->dev = dev; 5069 rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev); 5070 if (rc) 5071 return rc; 5072 tasklet_setup(&hw_priv->rx_tasklet, rx_proc_task); 5073 tasklet_setup(&hw_priv->tx_tasklet, tx_proc_task); 5074 5075 hw->promiscuous = 0; 5076 hw->all_multi = 0; 5077 hw->multi_list_size = 0; 5078 5079 hw_reset(hw); 5080 5081 hw_set_desc_base(hw, 5082 hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys); 5083 hw_set_addr(hw); 5084 hw_cfg_huge_frame(hw_priv, hw); 5085 ksz_init_rx_buffers(hw_priv); 5086 return 0; 5087 } 5088 5089 static void set_media_state(struct net_device *dev, int media_state) 5090 { 5091 struct dev_priv *priv = netdev_priv(dev); 5092 5093 if (media_state == priv->media_state) 5094 netif_carrier_on(dev); 5095 else 5096 netif_carrier_off(dev); 5097 netif_info(priv, link, dev, "link %s\n", 5098 media_state == priv->media_state ? "on" : "off"); 5099 } 5100 5101 /** 5102 * netdev_open - open network device 5103 * @dev: Network device. 5104 * 5105 * This function process the open operation of network device. This is caused 5106 * by the user command "ifconfig ethX up." 5107 * 5108 * Return 0 if successful; otherwise an error code indicating failure. 5109 */ 5110 static int netdev_open(struct net_device *dev) 5111 { 5112 struct dev_priv *priv = netdev_priv(dev); 5113 struct dev_info *hw_priv = priv->adapter; 5114 struct ksz_hw *hw = &hw_priv->hw; 5115 struct ksz_port *port = &priv->port; 5116 unsigned long next_jiffies; 5117 int i; 5118 int p; 5119 int rc = 0; 5120 5121 next_jiffies = jiffies + HZ * 2; 5122 priv->multicast = 0; 5123 priv->promiscuous = 0; 5124 5125 /* Reset device statistics. */ 5126 memset(&dev->stats, 0, sizeof(struct net_device_stats)); 5127 memset((void *) port->counter, 0, 5128 (sizeof(u64) * OID_COUNTER_LAST)); 5129 5130 if (!(hw_priv->opened)) { 5131 rc = prepare_hardware(dev); 5132 if (rc) 5133 return rc; 5134 for (i = 0; i < hw->mib_port_cnt; i++) { 5135 next_jiffies += HZ * 1; 5136 hw_priv->counter[i].time = next_jiffies; 5137 hw->port_mib[i].state = media_disconnected; 5138 port_init_cnt(hw, i); 5139 } 5140 if (hw->ksz_switch) 5141 hw->port_mib[HOST_PORT].state = media_connected; 5142 else { 5143 hw_add_wol_bcast(hw); 5144 hw_cfg_wol_pme(hw, 0); 5145 hw_clr_wol_pme_status(&hw_priv->hw); 5146 } 5147 } 5148 port_set_power_saving(port, false); 5149 5150 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { 5151 /* 5152 * Initialize to invalid value so that link detection 5153 * is done. 5154 */ 5155 hw->port_info[p].partner = 0xFF; 5156 hw->port_info[p].state = media_disconnected; 5157 } 5158 5159 /* Need to open the port in multiple device interfaces mode. */ 5160 if (hw->dev_count > 1) { 5161 port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE); 5162 if (port->first_port > 0) 5163 hw_add_addr(hw, dev->dev_addr); 5164 } 5165 5166 port_get_link_speed(port); 5167 if (port->force_link) 5168 port_force_link_speed(port); 5169 else 5170 port_set_link_speed(port); 5171 5172 if (!(hw_priv->opened)) { 5173 hw_setup_intr(hw); 5174 hw_enable(hw); 5175 hw_ena_intr(hw); 5176 5177 if (hw->mib_port_cnt) 5178 ksz_start_timer(&hw_priv->mib_timer_info, 5179 hw_priv->mib_timer_info.period); 5180 } 5181 5182 hw_priv->opened++; 5183 5184 ksz_start_timer(&priv->monitor_timer_info, 5185 priv->monitor_timer_info.period); 5186 5187 priv->media_state = port->linked->state; 5188 5189 set_media_state(dev, media_connected); 5190 netif_start_queue(dev); 5191 5192 return 0; 5193 } 5194 5195 /* RX errors = rx_errors */ 5196 /* RX dropped = rx_dropped */ 5197 /* RX overruns = rx_fifo_errors */ 5198 /* RX frame = rx_crc_errors + rx_frame_errors + rx_length_errors */ 5199 /* TX errors = tx_errors */ 5200 /* TX dropped = tx_dropped */ 5201 /* TX overruns = tx_fifo_errors */ 5202 /* TX carrier = tx_aborted_errors + tx_carrier_errors + tx_window_errors */ 5203 /* collisions = collisions */ 5204 5205 /** 5206 * netdev_query_statistics - query network device statistics 5207 * @dev: Network device. 5208 * 5209 * This function returns the statistics of the network device. The device 5210 * needs not be opened. 5211 * 5212 * Return network device statistics. 5213 */ 5214 static struct net_device_stats *netdev_query_statistics(struct net_device *dev) 5215 { 5216 struct dev_priv *priv = netdev_priv(dev); 5217 struct ksz_port *port = &priv->port; 5218 struct ksz_hw *hw = &priv->adapter->hw; 5219 struct ksz_port_mib *mib; 5220 int i; 5221 int p; 5222 5223 dev->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR]; 5224 dev->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR]; 5225 5226 /* Reset to zero to add count later. */ 5227 dev->stats.multicast = 0; 5228 dev->stats.collisions = 0; 5229 dev->stats.rx_length_errors = 0; 5230 dev->stats.rx_crc_errors = 0; 5231 dev->stats.rx_frame_errors = 0; 5232 dev->stats.tx_window_errors = 0; 5233 5234 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) { 5235 mib = &hw->port_mib[p]; 5236 5237 dev->stats.multicast += (unsigned long) 5238 mib->counter[MIB_COUNTER_RX_MULTICAST]; 5239 5240 dev->stats.collisions += (unsigned long) 5241 mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION]; 5242 5243 dev->stats.rx_length_errors += (unsigned long)( 5244 mib->counter[MIB_COUNTER_RX_UNDERSIZE] + 5245 mib->counter[MIB_COUNTER_RX_FRAGMENT] + 5246 mib->counter[MIB_COUNTER_RX_OVERSIZE] + 5247 mib->counter[MIB_COUNTER_RX_JABBER]); 5248 dev->stats.rx_crc_errors += (unsigned long) 5249 mib->counter[MIB_COUNTER_RX_CRC_ERR]; 5250 dev->stats.rx_frame_errors += (unsigned long)( 5251 mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] + 5252 mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]); 5253 5254 dev->stats.tx_window_errors += (unsigned long) 5255 mib->counter[MIB_COUNTER_TX_LATE_COLLISION]; 5256 } 5257 5258 return &dev->stats; 5259 } 5260 5261 /** 5262 * netdev_set_mac_address - set network device MAC address 5263 * @dev: Network device. 5264 * @addr: Buffer of MAC address. 5265 * 5266 * This function is used to set the MAC address of the network device. 5267 * 5268 * Return 0 to indicate success. 5269 */ 5270 static int netdev_set_mac_address(struct net_device *dev, void *addr) 5271 { 5272 struct dev_priv *priv = netdev_priv(dev); 5273 struct dev_info *hw_priv = priv->adapter; 5274 struct ksz_hw *hw = &hw_priv->hw; 5275 struct sockaddr *mac = addr; 5276 uint interrupt; 5277 5278 if (priv->port.first_port > 0) 5279 hw_del_addr(hw, dev->dev_addr); 5280 else { 5281 hw->mac_override = 1; 5282 memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); 5283 } 5284 5285 eth_hw_addr_set(dev, mac->sa_data); 5286 5287 interrupt = hw_block_intr(hw); 5288 5289 if (priv->port.first_port > 0) 5290 hw_add_addr(hw, dev->dev_addr); 5291 else 5292 hw_set_addr(hw); 5293 hw_restore_intr(hw, interrupt); 5294 5295 return 0; 5296 } 5297 5298 static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv, 5299 struct ksz_hw *hw, int promiscuous) 5300 { 5301 if (promiscuous != priv->promiscuous) { 5302 u8 prev_state = hw->promiscuous; 5303 5304 if (promiscuous) 5305 ++hw->promiscuous; 5306 else 5307 --hw->promiscuous; 5308 priv->promiscuous = promiscuous; 5309 5310 /* Turn on/off promiscuous mode. */ 5311 if (hw->promiscuous <= 1 && prev_state <= 1) 5312 hw_set_promiscuous(hw, hw->promiscuous); 5313 5314 /* 5315 * Port is not in promiscuous mode, meaning it is released 5316 * from the bridge. 5317 */ 5318 if ((hw->features & STP_SUPPORT) && !promiscuous && 5319 netif_is_bridge_port(dev)) { 5320 struct ksz_switch *sw = hw->ksz_switch; 5321 int port = priv->port.first_port; 5322 5323 port_set_stp_state(hw, port, STP_STATE_DISABLED); 5324 port = 1 << port; 5325 if (sw->member & port) { 5326 sw->member &= ~port; 5327 bridge_change(hw); 5328 } 5329 } 5330 } 5331 } 5332 5333 static void dev_set_multicast(struct dev_priv *priv, struct ksz_hw *hw, 5334 int multicast) 5335 { 5336 if (multicast != priv->multicast) { 5337 u8 all_multi = hw->all_multi; 5338 5339 if (multicast) 5340 ++hw->all_multi; 5341 else 5342 --hw->all_multi; 5343 priv->multicast = multicast; 5344 5345 /* Turn on/off all multicast mode. */ 5346 if (hw->all_multi <= 1 && all_multi <= 1) 5347 hw_set_multicast(hw, hw->all_multi); 5348 } 5349 } 5350 5351 /** 5352 * netdev_set_rx_mode 5353 * @dev: Network device. 5354 * 5355 * This routine is used to set multicast addresses or put the network device 5356 * into promiscuous mode. 5357 */ 5358 static void netdev_set_rx_mode(struct net_device *dev) 5359 { 5360 struct dev_priv *priv = netdev_priv(dev); 5361 struct dev_info *hw_priv = priv->adapter; 5362 struct ksz_hw *hw = &hw_priv->hw; 5363 struct netdev_hw_addr *ha; 5364 int multicast = (dev->flags & IFF_ALLMULTI); 5365 5366 dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC)); 5367 5368 if (hw_priv->hw.dev_count > 1) 5369 multicast |= (dev->flags & IFF_MULTICAST); 5370 dev_set_multicast(priv, hw, multicast); 5371 5372 /* Cannot use different hashes in multiple device interfaces mode. */ 5373 if (hw_priv->hw.dev_count > 1) 5374 return; 5375 5376 if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) { 5377 int i = 0; 5378 5379 /* List too big to support so turn on all multicast mode. */ 5380 if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) { 5381 if (MAX_MULTICAST_LIST != hw->multi_list_size) { 5382 hw->multi_list_size = MAX_MULTICAST_LIST; 5383 ++hw->all_multi; 5384 hw_set_multicast(hw, hw->all_multi); 5385 } 5386 return; 5387 } 5388 5389 netdev_for_each_mc_addr(ha, dev) { 5390 if (i >= MAX_MULTICAST_LIST) 5391 break; 5392 memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN); 5393 } 5394 hw->multi_list_size = (u8) i; 5395 hw_set_grp_addr(hw); 5396 } else { 5397 if (MAX_MULTICAST_LIST == hw->multi_list_size) { 5398 --hw->all_multi; 5399 hw_set_multicast(hw, hw->all_multi); 5400 } 5401 hw->multi_list_size = 0; 5402 hw_clr_multicast(hw); 5403 } 5404 } 5405 5406 static int netdev_change_mtu(struct net_device *dev, int new_mtu) 5407 { 5408 struct dev_priv *priv = netdev_priv(dev); 5409 struct dev_info *hw_priv = priv->adapter; 5410 struct ksz_hw *hw = &hw_priv->hw; 5411 int hw_mtu; 5412 5413 if (netif_running(dev)) 5414 return -EBUSY; 5415 5416 /* Cannot use different MTU in multiple device interfaces mode. */ 5417 if (hw->dev_count > 1) 5418 if (dev != hw_priv->dev) 5419 return 0; 5420 5421 hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4; 5422 if (hw_mtu > REGULAR_RX_BUF_SIZE) { 5423 hw->features |= RX_HUGE_FRAME; 5424 hw_mtu = MAX_RX_BUF_SIZE; 5425 } else { 5426 hw->features &= ~RX_HUGE_FRAME; 5427 hw_mtu = REGULAR_RX_BUF_SIZE; 5428 } 5429 hw_mtu = (hw_mtu + 3) & ~3; 5430 hw_priv->mtu = hw_mtu; 5431 WRITE_ONCE(dev->mtu, new_mtu); 5432 5433 return 0; 5434 } 5435 5436 /** 5437 * netdev_ioctl - I/O control processing 5438 * @dev: Network device. 5439 * @ifr: Interface request structure. 5440 * @cmd: I/O control code. 5441 * 5442 * This function is used to process I/O control calls. 5443 * 5444 * Return 0 to indicate success. 5445 */ 5446 static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 5447 { 5448 struct dev_priv *priv = netdev_priv(dev); 5449 struct dev_info *hw_priv = priv->adapter; 5450 struct ksz_hw *hw = &hw_priv->hw; 5451 struct ksz_port *port = &priv->port; 5452 int result = 0; 5453 struct mii_ioctl_data *data = if_mii(ifr); 5454 5455 if (down_interruptible(&priv->proc_sem)) 5456 return -ERESTARTSYS; 5457 5458 switch (cmd) { 5459 /* Get address of MII PHY in use. */ 5460 case SIOCGMIIPHY: 5461 data->phy_id = priv->id; 5462 fallthrough; 5463 5464 /* Read MII PHY register. */ 5465 case SIOCGMIIREG: 5466 if (data->phy_id != priv->id || data->reg_num >= 6) 5467 result = -EIO; 5468 else 5469 hw_r_phy(hw, port->linked->port_id, data->reg_num, 5470 &data->val_out); 5471 break; 5472 5473 /* Write MII PHY register. */ 5474 case SIOCSMIIREG: 5475 if (!capable(CAP_NET_ADMIN)) 5476 result = -EPERM; 5477 else if (data->phy_id != priv->id || data->reg_num >= 6) 5478 result = -EIO; 5479 else 5480 hw_w_phy(hw, port->linked->port_id, data->reg_num, 5481 data->val_in); 5482 break; 5483 5484 default: 5485 result = -EOPNOTSUPP; 5486 } 5487 5488 up(&priv->proc_sem); 5489 5490 return result; 5491 } 5492 5493 /* 5494 * MII support 5495 */ 5496 5497 /** 5498 * mdio_read - read PHY register 5499 * @dev: Network device. 5500 * @phy_id: The PHY id. 5501 * @reg_num: The register number. 5502 * 5503 * This function returns the PHY register value. 5504 * 5505 * Return the register value. 5506 */ 5507 static int mdio_read(struct net_device *dev, int phy_id, int reg_num) 5508 { 5509 struct dev_priv *priv = netdev_priv(dev); 5510 struct ksz_port *port = &priv->port; 5511 struct ksz_hw *hw = port->hw; 5512 u16 val_out; 5513 5514 hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out); 5515 return val_out; 5516 } 5517 5518 /** 5519 * mdio_write - set PHY register 5520 * @dev: Network device. 5521 * @phy_id: The PHY id. 5522 * @reg_num: The register number. 5523 * @val: The register value. 5524 * 5525 * This procedure sets the PHY register value. 5526 */ 5527 static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) 5528 { 5529 struct dev_priv *priv = netdev_priv(dev); 5530 struct ksz_port *port = &priv->port; 5531 struct ksz_hw *hw = port->hw; 5532 int i; 5533 int pi; 5534 5535 for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++) 5536 hw_w_phy(hw, pi, reg_num << 1, val); 5537 } 5538 5539 /* 5540 * ethtool support 5541 */ 5542 5543 #define EEPROM_SIZE 0x40 5544 5545 static u16 eeprom_data[EEPROM_SIZE] = { 0 }; 5546 5547 #define ADVERTISED_ALL \ 5548 (ADVERTISED_10baseT_Half | \ 5549 ADVERTISED_10baseT_Full | \ 5550 ADVERTISED_100baseT_Half | \ 5551 ADVERTISED_100baseT_Full) 5552 5553 /* These functions use the MII functions in mii.c. */ 5554 5555 /** 5556 * netdev_get_link_ksettings - get network device settings 5557 * @dev: Network device. 5558 * @cmd: Ethtool command. 5559 * 5560 * This function queries the PHY and returns its state in the ethtool command. 5561 * 5562 * Return 0 if successful; otherwise an error code. 5563 */ 5564 static int netdev_get_link_ksettings(struct net_device *dev, 5565 struct ethtool_link_ksettings *cmd) 5566 { 5567 struct dev_priv *priv = netdev_priv(dev); 5568 struct dev_info *hw_priv = priv->adapter; 5569 5570 mutex_lock(&hw_priv->lock); 5571 mii_ethtool_get_link_ksettings(&priv->mii_if, cmd); 5572 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); 5573 mutex_unlock(&hw_priv->lock); 5574 5575 /* Save advertised settings for workaround in next function. */ 5576 ethtool_convert_link_mode_to_legacy_u32(&priv->advertising, 5577 cmd->link_modes.advertising); 5578 5579 return 0; 5580 } 5581 5582 /** 5583 * netdev_set_link_ksettings - set network device settings 5584 * @dev: Network device. 5585 * @cmd: Ethtool command. 5586 * 5587 * This function sets the PHY according to the ethtool command. 5588 * 5589 * Return 0 if successful; otherwise an error code. 5590 */ 5591 static int netdev_set_link_ksettings(struct net_device *dev, 5592 const struct ethtool_link_ksettings *cmd) 5593 { 5594 struct dev_priv *priv = netdev_priv(dev); 5595 struct dev_info *hw_priv = priv->adapter; 5596 struct ksz_port *port = &priv->port; 5597 struct ethtool_link_ksettings copy_cmd; 5598 u32 speed = cmd->base.speed; 5599 u32 advertising; 5600 int rc; 5601 5602 ethtool_convert_link_mode_to_legacy_u32(&advertising, 5603 cmd->link_modes.advertising); 5604 5605 /* 5606 * ethtool utility does not change advertised setting if auto 5607 * negotiation is not specified explicitly. 5608 */ 5609 if (cmd->base.autoneg && priv->advertising == advertising) { 5610 advertising |= ADVERTISED_ALL; 5611 if (10 == speed) 5612 advertising &= 5613 ~(ADVERTISED_100baseT_Full | 5614 ADVERTISED_100baseT_Half); 5615 else if (100 == speed) 5616 advertising &= 5617 ~(ADVERTISED_10baseT_Full | 5618 ADVERTISED_10baseT_Half); 5619 if (0 == cmd->base.duplex) 5620 advertising &= 5621 ~(ADVERTISED_100baseT_Full | 5622 ADVERTISED_10baseT_Full); 5623 else if (1 == cmd->base.duplex) 5624 advertising &= 5625 ~(ADVERTISED_100baseT_Half | 5626 ADVERTISED_10baseT_Half); 5627 } 5628 mutex_lock(&hw_priv->lock); 5629 if (cmd->base.autoneg && 5630 (advertising & ADVERTISED_ALL) == ADVERTISED_ALL) { 5631 port->duplex = 0; 5632 port->speed = 0; 5633 port->force_link = 0; 5634 } else { 5635 port->duplex = cmd->base.duplex + 1; 5636 if (1000 != speed) 5637 port->speed = speed; 5638 if (cmd->base.autoneg) 5639 port->force_link = 0; 5640 else 5641 port->force_link = 1; 5642 } 5643 5644 memcpy(©_cmd, cmd, sizeof(copy_cmd)); 5645 ethtool_convert_legacy_u32_to_link_mode(copy_cmd.link_modes.advertising, 5646 advertising); 5647 rc = mii_ethtool_set_link_ksettings( 5648 &priv->mii_if, 5649 (const struct ethtool_link_ksettings *)©_cmd); 5650 mutex_unlock(&hw_priv->lock); 5651 return rc; 5652 } 5653 5654 /** 5655 * netdev_nway_reset - restart auto-negotiation 5656 * @dev: Network device. 5657 * 5658 * This function restarts the PHY for auto-negotiation. 5659 * 5660 * Return 0 if successful; otherwise an error code. 5661 */ 5662 static int netdev_nway_reset(struct net_device *dev) 5663 { 5664 struct dev_priv *priv = netdev_priv(dev); 5665 struct dev_info *hw_priv = priv->adapter; 5666 int rc; 5667 5668 mutex_lock(&hw_priv->lock); 5669 rc = mii_nway_restart(&priv->mii_if); 5670 mutex_unlock(&hw_priv->lock); 5671 return rc; 5672 } 5673 5674 /** 5675 * netdev_get_link - get network device link status 5676 * @dev: Network device. 5677 * 5678 * This function gets the link status from the PHY. 5679 * 5680 * Return true if PHY is linked and false otherwise. 5681 */ 5682 static u32 netdev_get_link(struct net_device *dev) 5683 { 5684 struct dev_priv *priv = netdev_priv(dev); 5685 int rc; 5686 5687 rc = mii_link_ok(&priv->mii_if); 5688 return rc; 5689 } 5690 5691 /** 5692 * netdev_get_drvinfo - get network driver information 5693 * @dev: Network device. 5694 * @info: Ethtool driver info data structure. 5695 * 5696 * This procedure returns the driver information. 5697 */ 5698 static void netdev_get_drvinfo(struct net_device *dev, 5699 struct ethtool_drvinfo *info) 5700 { 5701 struct dev_priv *priv = netdev_priv(dev); 5702 struct dev_info *hw_priv = priv->adapter; 5703 5704 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); 5705 strscpy(info->version, DRV_VERSION, sizeof(info->version)); 5706 strscpy(info->bus_info, pci_name(hw_priv->pdev), 5707 sizeof(info->bus_info)); 5708 } 5709 5710 static struct hw_regs { 5711 int start; 5712 int end; 5713 } hw_regs_range[] = { 5714 { KS_DMA_TX_CTRL, KS884X_INTERRUPTS_STATUS }, 5715 { KS_ADD_ADDR_0_LO, KS_ADD_ADDR_F_HI }, 5716 { KS884X_ADDR_0_OFFSET, KS8841_WOL_FRAME_BYTE2_OFFSET }, 5717 { KS884X_SIDER_P, KS8842_SGCR7_P }, 5718 { KS8842_MACAR1_P, KS8842_TOSR8_P }, 5719 { KS884X_P1MBCR_P, KS8842_P3ERCR_P }, 5720 { 0, 0 } 5721 }; 5722 5723 /** 5724 * netdev_get_regs_len - get length of register dump 5725 * @dev: Network device. 5726 * 5727 * This function returns the length of the register dump. 5728 * 5729 * Return length of the register dump. 5730 */ 5731 static int netdev_get_regs_len(struct net_device *dev) 5732 { 5733 struct hw_regs *range = hw_regs_range; 5734 int regs_len = 0x10 * sizeof(u32); 5735 5736 while (range->end > range->start) { 5737 regs_len += (range->end - range->start + 3) / 4 * 4; 5738 range++; 5739 } 5740 return regs_len; 5741 } 5742 5743 /** 5744 * netdev_get_regs - get register dump 5745 * @dev: Network device. 5746 * @regs: Ethtool registers data structure. 5747 * @ptr: Buffer to store the register values. 5748 * 5749 * This procedure dumps the register values in the provided buffer. 5750 */ 5751 static void netdev_get_regs(struct net_device *dev, struct ethtool_regs *regs, 5752 void *ptr) 5753 { 5754 struct dev_priv *priv = netdev_priv(dev); 5755 struct dev_info *hw_priv = priv->adapter; 5756 struct ksz_hw *hw = &hw_priv->hw; 5757 int *buf = (int *) ptr; 5758 struct hw_regs *range = hw_regs_range; 5759 int len; 5760 5761 mutex_lock(&hw_priv->lock); 5762 regs->version = 0; 5763 for (len = 0; len < 0x40; len += 4) { 5764 pci_read_config_dword(hw_priv->pdev, len, buf); 5765 buf++; 5766 } 5767 while (range->end > range->start) { 5768 for (len = range->start; len < range->end; len += 4) { 5769 *buf = readl(hw->io + len); 5770 buf++; 5771 } 5772 range++; 5773 } 5774 mutex_unlock(&hw_priv->lock); 5775 } 5776 5777 #define WOL_SUPPORT \ 5778 (WAKE_PHY | WAKE_MAGIC | \ 5779 WAKE_UCAST | WAKE_MCAST | \ 5780 WAKE_BCAST | WAKE_ARP) 5781 5782 /** 5783 * netdev_get_wol - get Wake-on-LAN support 5784 * @dev: Network device. 5785 * @wol: Ethtool Wake-on-LAN data structure. 5786 * 5787 * This procedure returns Wake-on-LAN support. 5788 */ 5789 static void netdev_get_wol(struct net_device *dev, 5790 struct ethtool_wolinfo *wol) 5791 { 5792 struct dev_priv *priv = netdev_priv(dev); 5793 struct dev_info *hw_priv = priv->adapter; 5794 5795 wol->supported = hw_priv->wol_support; 5796 wol->wolopts = hw_priv->wol_enable; 5797 memset(&wol->sopass, 0, sizeof(wol->sopass)); 5798 } 5799 5800 /** 5801 * netdev_set_wol - set Wake-on-LAN support 5802 * @dev: Network device. 5803 * @wol: Ethtool Wake-on-LAN data structure. 5804 * 5805 * This function sets Wake-on-LAN support. 5806 * 5807 * Return 0 if successful; otherwise an error code. 5808 */ 5809 static int netdev_set_wol(struct net_device *dev, 5810 struct ethtool_wolinfo *wol) 5811 { 5812 struct dev_priv *priv = netdev_priv(dev); 5813 struct dev_info *hw_priv = priv->adapter; 5814 5815 /* Need to find a way to retrieve the device IP address. */ 5816 static const u8 net_addr[] = { 192, 168, 1, 1 }; 5817 5818 if (wol->wolopts & ~hw_priv->wol_support) 5819 return -EINVAL; 5820 5821 hw_priv->wol_enable = wol->wolopts; 5822 5823 /* Link wakeup cannot really be disabled. */ 5824 if (wol->wolopts) 5825 hw_priv->wol_enable |= WAKE_PHY; 5826 hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr); 5827 return 0; 5828 } 5829 5830 /** 5831 * netdev_get_msglevel - get debug message level 5832 * @dev: Network device. 5833 * 5834 * This function returns current debug message level. 5835 * 5836 * Return current debug message flags. 5837 */ 5838 static u32 netdev_get_msglevel(struct net_device *dev) 5839 { 5840 struct dev_priv *priv = netdev_priv(dev); 5841 5842 return priv->msg_enable; 5843 } 5844 5845 /** 5846 * netdev_set_msglevel - set debug message level 5847 * @dev: Network device. 5848 * @value: Debug message flags. 5849 * 5850 * This procedure sets debug message level. 5851 */ 5852 static void netdev_set_msglevel(struct net_device *dev, u32 value) 5853 { 5854 struct dev_priv *priv = netdev_priv(dev); 5855 5856 priv->msg_enable = value; 5857 } 5858 5859 /** 5860 * netdev_get_eeprom_len - get EEPROM length 5861 * @dev: Network device. 5862 * 5863 * This function returns the length of the EEPROM. 5864 * 5865 * Return length of the EEPROM. 5866 */ 5867 static int netdev_get_eeprom_len(struct net_device *dev) 5868 { 5869 return EEPROM_SIZE * 2; 5870 } 5871 5872 #define EEPROM_MAGIC 0x10A18842 5873 5874 /** 5875 * netdev_get_eeprom - get EEPROM data 5876 * @dev: Network device. 5877 * @eeprom: Ethtool EEPROM data structure. 5878 * @data: Buffer to store the EEPROM data. 5879 * 5880 * This function dumps the EEPROM data in the provided buffer. 5881 * 5882 * Return 0 if successful; otherwise an error code. 5883 */ 5884 static int netdev_get_eeprom(struct net_device *dev, 5885 struct ethtool_eeprom *eeprom, u8 *data) 5886 { 5887 struct dev_priv *priv = netdev_priv(dev); 5888 struct dev_info *hw_priv = priv->adapter; 5889 u8 *eeprom_byte = (u8 *) eeprom_data; 5890 int i; 5891 int len; 5892 5893 len = (eeprom->offset + eeprom->len + 1) / 2; 5894 for (i = eeprom->offset / 2; i < len; i++) 5895 eeprom_data[i] = eeprom_read(&hw_priv->hw, i); 5896 eeprom->magic = EEPROM_MAGIC; 5897 memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len); 5898 5899 return 0; 5900 } 5901 5902 /** 5903 * netdev_set_eeprom - write EEPROM data 5904 * @dev: Network device. 5905 * @eeprom: Ethtool EEPROM data structure. 5906 * @data: Data buffer. 5907 * 5908 * This function modifies the EEPROM data one byte at a time. 5909 * 5910 * Return 0 if successful; otherwise an error code. 5911 */ 5912 static int netdev_set_eeprom(struct net_device *dev, 5913 struct ethtool_eeprom *eeprom, u8 *data) 5914 { 5915 struct dev_priv *priv = netdev_priv(dev); 5916 struct dev_info *hw_priv = priv->adapter; 5917 u16 eeprom_word[EEPROM_SIZE]; 5918 u8 *eeprom_byte = (u8 *) eeprom_word; 5919 int i; 5920 int len; 5921 5922 if (eeprom->magic != EEPROM_MAGIC) 5923 return -EINVAL; 5924 5925 len = (eeprom->offset + eeprom->len + 1) / 2; 5926 for (i = eeprom->offset / 2; i < len; i++) 5927 eeprom_data[i] = eeprom_read(&hw_priv->hw, i); 5928 memcpy(eeprom_word, eeprom_data, EEPROM_SIZE * 2); 5929 memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len); 5930 for (i = 0; i < EEPROM_SIZE; i++) 5931 if (eeprom_word[i] != eeprom_data[i]) { 5932 eeprom_data[i] = eeprom_word[i]; 5933 eeprom_write(&hw_priv->hw, i, eeprom_data[i]); 5934 } 5935 5936 return 0; 5937 } 5938 5939 /** 5940 * netdev_get_pauseparam - get flow control parameters 5941 * @dev: Network device. 5942 * @pause: Ethtool PAUSE settings data structure. 5943 * 5944 * This procedure returns the PAUSE control flow settings. 5945 */ 5946 static void netdev_get_pauseparam(struct net_device *dev, 5947 struct ethtool_pauseparam *pause) 5948 { 5949 struct dev_priv *priv = netdev_priv(dev); 5950 struct dev_info *hw_priv = priv->adapter; 5951 struct ksz_hw *hw = &hw_priv->hw; 5952 5953 pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1; 5954 if (!hw->ksz_switch) { 5955 pause->rx_pause = 5956 (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0; 5957 pause->tx_pause = 5958 (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0; 5959 } else { 5960 pause->rx_pause = 5961 (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET, 5962 SWITCH_RX_FLOW_CTRL)) ? 1 : 0; 5963 pause->tx_pause = 5964 (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET, 5965 SWITCH_TX_FLOW_CTRL)) ? 1 : 0; 5966 } 5967 } 5968 5969 /** 5970 * netdev_set_pauseparam - set flow control parameters 5971 * @dev: Network device. 5972 * @pause: Ethtool PAUSE settings data structure. 5973 * 5974 * This function sets the PAUSE control flow settings. 5975 * Not implemented yet. 5976 * 5977 * Return 0 if successful; otherwise an error code. 5978 */ 5979 static int netdev_set_pauseparam(struct net_device *dev, 5980 struct ethtool_pauseparam *pause) 5981 { 5982 struct dev_priv *priv = netdev_priv(dev); 5983 struct dev_info *hw_priv = priv->adapter; 5984 struct ksz_hw *hw = &hw_priv->hw; 5985 struct ksz_port *port = &priv->port; 5986 5987 mutex_lock(&hw_priv->lock); 5988 if (pause->autoneg) { 5989 if (!pause->rx_pause && !pause->tx_pause) 5990 port->flow_ctrl = PHY_NO_FLOW_CTRL; 5991 else 5992 port->flow_ctrl = PHY_FLOW_CTRL; 5993 hw->overrides &= ~PAUSE_FLOW_CTRL; 5994 port->force_link = 0; 5995 if (hw->ksz_switch) { 5996 sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, 5997 SWITCH_RX_FLOW_CTRL, 1); 5998 sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, 5999 SWITCH_TX_FLOW_CTRL, 1); 6000 } 6001 port_set_link_speed(port); 6002 } else { 6003 hw->overrides |= PAUSE_FLOW_CTRL; 6004 if (hw->ksz_switch) { 6005 sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, 6006 SWITCH_RX_FLOW_CTRL, pause->rx_pause); 6007 sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, 6008 SWITCH_TX_FLOW_CTRL, pause->tx_pause); 6009 } else 6010 set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause); 6011 } 6012 mutex_unlock(&hw_priv->lock); 6013 6014 return 0; 6015 } 6016 6017 /** 6018 * netdev_get_ringparam - get tx/rx ring parameters 6019 * @dev: Network device. 6020 * @ring: Ethtool RING settings data structure. 6021 * @kernel_ring: Ethtool external RING settings data structure. 6022 * @extack: Netlink handle. 6023 * 6024 * This procedure returns the TX/RX ring settings. 6025 */ 6026 static void netdev_get_ringparam(struct net_device *dev, 6027 struct ethtool_ringparam *ring, 6028 struct kernel_ethtool_ringparam *kernel_ring, 6029 struct netlink_ext_ack *extack) 6030 { 6031 struct dev_priv *priv = netdev_priv(dev); 6032 struct dev_info *hw_priv = priv->adapter; 6033 struct ksz_hw *hw = &hw_priv->hw; 6034 6035 ring->tx_max_pending = (1 << 9); 6036 ring->tx_pending = hw->tx_desc_info.alloc; 6037 ring->rx_max_pending = (1 << 9); 6038 ring->rx_pending = hw->rx_desc_info.alloc; 6039 } 6040 6041 #define STATS_LEN (TOTAL_PORT_COUNTER_NUM) 6042 6043 static struct { 6044 char string[ETH_GSTRING_LEN]; 6045 } ethtool_stats_keys[STATS_LEN] = { 6046 { "rx_lo_priority_octets" }, 6047 { "rx_hi_priority_octets" }, 6048 { "rx_undersize_packets" }, 6049 { "rx_fragments" }, 6050 { "rx_oversize_packets" }, 6051 { "rx_jabbers" }, 6052 { "rx_symbol_errors" }, 6053 { "rx_crc_errors" }, 6054 { "rx_align_errors" }, 6055 { "rx_mac_ctrl_packets" }, 6056 { "rx_pause_packets" }, 6057 { "rx_bcast_packets" }, 6058 { "rx_mcast_packets" }, 6059 { "rx_ucast_packets" }, 6060 { "rx_64_or_less_octet_packets" }, 6061 { "rx_65_to_127_octet_packets" }, 6062 { "rx_128_to_255_octet_packets" }, 6063 { "rx_256_to_511_octet_packets" }, 6064 { "rx_512_to_1023_octet_packets" }, 6065 { "rx_1024_to_1522_octet_packets" }, 6066 6067 { "tx_lo_priority_octets" }, 6068 { "tx_hi_priority_octets" }, 6069 { "tx_late_collisions" }, 6070 { "tx_pause_packets" }, 6071 { "tx_bcast_packets" }, 6072 { "tx_mcast_packets" }, 6073 { "tx_ucast_packets" }, 6074 { "tx_deferred" }, 6075 { "tx_total_collisions" }, 6076 { "tx_excessive_collisions" }, 6077 { "tx_single_collisions" }, 6078 { "tx_mult_collisions" }, 6079 6080 { "rx_discards" }, 6081 { "tx_discards" }, 6082 }; 6083 6084 /** 6085 * netdev_get_strings - get statistics identity strings 6086 * @dev: Network device. 6087 * @stringset: String set identifier. 6088 * @buf: Buffer to store the strings. 6089 * 6090 * This procedure returns the strings used to identify the statistics. 6091 */ 6092 static void netdev_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 6093 { 6094 struct dev_priv *priv = netdev_priv(dev); 6095 struct dev_info *hw_priv = priv->adapter; 6096 struct ksz_hw *hw = &hw_priv->hw; 6097 6098 if (ETH_SS_STATS == stringset) 6099 memcpy(buf, ðtool_stats_keys, 6100 ETH_GSTRING_LEN * hw->mib_cnt); 6101 } 6102 6103 /** 6104 * netdev_get_sset_count - get statistics size 6105 * @dev: Network device. 6106 * @sset: The statistics set number. 6107 * 6108 * This function returns the size of the statistics to be reported. 6109 * 6110 * Return size of the statistics to be reported. 6111 */ 6112 static int netdev_get_sset_count(struct net_device *dev, int sset) 6113 { 6114 struct dev_priv *priv = netdev_priv(dev); 6115 struct dev_info *hw_priv = priv->adapter; 6116 struct ksz_hw *hw = &hw_priv->hw; 6117 6118 switch (sset) { 6119 case ETH_SS_STATS: 6120 return hw->mib_cnt; 6121 default: 6122 return -EOPNOTSUPP; 6123 } 6124 } 6125 6126 /** 6127 * netdev_get_ethtool_stats - get network device statistics 6128 * @dev: Network device. 6129 * @stats: Ethtool statistics data structure. 6130 * @data: Buffer to store the statistics. 6131 * 6132 * This procedure returns the statistics. 6133 */ 6134 static void netdev_get_ethtool_stats(struct net_device *dev, 6135 struct ethtool_stats *stats, u64 *data) 6136 { 6137 struct dev_priv *priv = netdev_priv(dev); 6138 struct dev_info *hw_priv = priv->adapter; 6139 struct ksz_hw *hw = &hw_priv->hw; 6140 struct ksz_port *port = &priv->port; 6141 int n_stats = stats->n_stats; 6142 int i; 6143 int n; 6144 int p; 6145 u64 counter[TOTAL_PORT_COUNTER_NUM]; 6146 6147 mutex_lock(&hw_priv->lock); 6148 n = SWITCH_PORT_NUM; 6149 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) { 6150 if (media_connected == hw->port_mib[p].state) { 6151 hw_priv->counter[p].read = 1; 6152 6153 /* Remember first port that requests read. */ 6154 if (n == SWITCH_PORT_NUM) 6155 n = p; 6156 } 6157 } 6158 mutex_unlock(&hw_priv->lock); 6159 6160 if (n < SWITCH_PORT_NUM) 6161 schedule_work(&hw_priv->mib_read); 6162 6163 if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) { 6164 p = n; 6165 wait_event_interruptible_timeout( 6166 hw_priv->counter[p].counter, 6167 2 == hw_priv->counter[p].read, 6168 HZ * 1); 6169 } else 6170 for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) { 6171 if (0 == i) { 6172 wait_event_interruptible_timeout( 6173 hw_priv->counter[p].counter, 6174 2 == hw_priv->counter[p].read, 6175 HZ * 2); 6176 } else if (hw->port_mib[p].cnt_ptr) { 6177 wait_event_interruptible_timeout( 6178 hw_priv->counter[p].counter, 6179 2 == hw_priv->counter[p].read, 6180 HZ * 1); 6181 } 6182 } 6183 6184 get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter); 6185 n = hw->mib_cnt; 6186 if (n > n_stats) 6187 n = n_stats; 6188 n_stats -= n; 6189 for (i = 0; i < n; i++) 6190 *data++ = counter[i]; 6191 } 6192 6193 /** 6194 * netdev_set_features - set receive checksum support 6195 * @dev: Network device. 6196 * @features: New device features (offloads). 6197 * 6198 * This function sets receive checksum support setting. 6199 * 6200 * Return 0 if successful; otherwise an error code. 6201 */ 6202 static int netdev_set_features(struct net_device *dev, 6203 netdev_features_t features) 6204 { 6205 struct dev_priv *priv = netdev_priv(dev); 6206 struct dev_info *hw_priv = priv->adapter; 6207 struct ksz_hw *hw = &hw_priv->hw; 6208 6209 mutex_lock(&hw_priv->lock); 6210 6211 /* see note in hw_setup() */ 6212 if (features & NETIF_F_RXCSUM) 6213 hw->rx_cfg |= DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP; 6214 else 6215 hw->rx_cfg &= ~(DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP); 6216 6217 if (hw->enabled) 6218 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); 6219 6220 mutex_unlock(&hw_priv->lock); 6221 6222 return 0; 6223 } 6224 6225 static const struct ethtool_ops netdev_ethtool_ops = { 6226 .nway_reset = netdev_nway_reset, 6227 .get_link = netdev_get_link, 6228 .get_drvinfo = netdev_get_drvinfo, 6229 .get_regs_len = netdev_get_regs_len, 6230 .get_regs = netdev_get_regs, 6231 .get_wol = netdev_get_wol, 6232 .set_wol = netdev_set_wol, 6233 .get_msglevel = netdev_get_msglevel, 6234 .set_msglevel = netdev_set_msglevel, 6235 .get_eeprom_len = netdev_get_eeprom_len, 6236 .get_eeprom = netdev_get_eeprom, 6237 .set_eeprom = netdev_set_eeprom, 6238 .get_pauseparam = netdev_get_pauseparam, 6239 .set_pauseparam = netdev_set_pauseparam, 6240 .get_ringparam = netdev_get_ringparam, 6241 .get_strings = netdev_get_strings, 6242 .get_sset_count = netdev_get_sset_count, 6243 .get_ethtool_stats = netdev_get_ethtool_stats, 6244 .get_link_ksettings = netdev_get_link_ksettings, 6245 .set_link_ksettings = netdev_set_link_ksettings, 6246 }; 6247 6248 /* 6249 * Hardware monitoring 6250 */ 6251 6252 static void update_link(struct net_device *dev, struct dev_priv *priv, 6253 struct ksz_port *port) 6254 { 6255 if (priv->media_state != port->linked->state) { 6256 priv->media_state = port->linked->state; 6257 if (netif_running(dev)) 6258 set_media_state(dev, media_connected); 6259 } 6260 } 6261 6262 static void mib_read_work(struct work_struct *work) 6263 { 6264 struct dev_info *hw_priv = 6265 container_of(work, struct dev_info, mib_read); 6266 struct ksz_hw *hw = &hw_priv->hw; 6267 unsigned long next_jiffies; 6268 struct ksz_port_mib *mib; 6269 int i; 6270 6271 next_jiffies = jiffies; 6272 for (i = 0; i < hw->mib_port_cnt; i++) { 6273 mib = &hw->port_mib[i]; 6274 6275 /* Reading MIB counters or requested to read. */ 6276 if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) { 6277 6278 /* Need to process receive interrupt. */ 6279 if (port_r_cnt(hw, i)) 6280 break; 6281 hw_priv->counter[i].read = 0; 6282 6283 /* Finish reading counters. */ 6284 if (0 == mib->cnt_ptr) { 6285 hw_priv->counter[i].read = 2; 6286 wake_up_interruptible( 6287 &hw_priv->counter[i].counter); 6288 } 6289 } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) { 6290 /* Only read MIB counters when the port is connected. */ 6291 if (media_connected == mib->state) 6292 hw_priv->counter[i].read = 1; 6293 next_jiffies += HZ * 1 * hw->mib_port_cnt; 6294 hw_priv->counter[i].time = next_jiffies; 6295 6296 /* Port is just disconnected. */ 6297 } else if (mib->link_down) { 6298 mib->link_down = 0; 6299 6300 /* Read counters one last time after link is lost. */ 6301 hw_priv->counter[i].read = 1; 6302 } 6303 } 6304 } 6305 6306 static void mib_monitor(struct timer_list *t) 6307 { 6308 struct dev_info *hw_priv = timer_container_of(hw_priv, t, 6309 mib_timer_info.timer); 6310 6311 mib_read_work(&hw_priv->mib_read); 6312 6313 /* This is used to verify Wake-on-LAN is working. */ 6314 if (hw_priv->pme_wait) { 6315 if (time_is_before_eq_jiffies(hw_priv->pme_wait)) { 6316 hw_clr_wol_pme_status(&hw_priv->hw); 6317 hw_priv->pme_wait = 0; 6318 } 6319 } else if (hw_chk_wol_pme_status(&hw_priv->hw)) { 6320 6321 /* PME is asserted. Wait 2 seconds to clear it. */ 6322 hw_priv->pme_wait = jiffies + HZ * 2; 6323 } 6324 6325 ksz_update_timer(&hw_priv->mib_timer_info); 6326 } 6327 6328 /** 6329 * dev_monitor - periodic monitoring 6330 * @t: timer list containing a network device pointer. 6331 * 6332 * This routine is run in a kernel timer to monitor the network device. 6333 */ 6334 static void dev_monitor(struct timer_list *t) 6335 { 6336 struct dev_priv *priv = timer_container_of(priv, t, 6337 monitor_timer_info.timer); 6338 struct net_device *dev = priv->mii_if.dev; 6339 struct dev_info *hw_priv = priv->adapter; 6340 struct ksz_hw *hw = &hw_priv->hw; 6341 struct ksz_port *port = &priv->port; 6342 6343 if (!(hw->features & LINK_INT_WORKING)) 6344 port_get_link_speed(port); 6345 update_link(dev, priv, port); 6346 6347 ksz_update_timer(&priv->monitor_timer_info); 6348 } 6349 6350 /* 6351 * Linux network device interface functions 6352 */ 6353 6354 /* Driver exported variables */ 6355 6356 static int msg_enable; 6357 6358 static char *macaddr = ":"; 6359 static char *mac1addr = ":"; 6360 6361 /* 6362 * This enables multiple network device mode for KSZ8842, which contains a 6363 * switch with two physical ports. Some users like to take control of the 6364 * ports for running Spanning Tree Protocol. The driver will create an 6365 * additional eth? device for the other port. 6366 * 6367 * Some limitations are the network devices cannot have different MTU and 6368 * multicast hash tables. 6369 */ 6370 static int multi_dev; 6371 6372 /* 6373 * As most users select multiple network device mode to use Spanning Tree 6374 * Protocol, this enables a feature in which most unicast and multicast packets 6375 * are forwarded inside the switch and not passed to the host. Only packets 6376 * that need the host's attention are passed to it. This prevents the host 6377 * wasting CPU time to examine each and every incoming packets and do the 6378 * forwarding itself. 6379 * 6380 * As the hack requires the private bridge header, the driver cannot compile 6381 * with just the kernel headers. 6382 * 6383 * Enabling STP support also turns on multiple network device mode. 6384 */ 6385 static int stp; 6386 6387 /* 6388 * This enables fast aging in the KSZ8842 switch. Not sure what situation 6389 * needs that. However, fast aging is used to flush the dynamic MAC table when 6390 * STP support is enabled. 6391 */ 6392 static int fast_aging; 6393 6394 /** 6395 * netdev_init - initialize network device. 6396 * @dev: Network device. 6397 * 6398 * This function initializes the network device. 6399 * 6400 * Return 0 if successful; otherwise an error code indicating failure. 6401 */ 6402 static int __init netdev_init(struct net_device *dev) 6403 { 6404 struct dev_priv *priv = netdev_priv(dev); 6405 6406 /* 500 ms timeout */ 6407 ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000, 6408 dev_monitor); 6409 6410 /* 500 ms timeout */ 6411 dev->watchdog_timeo = HZ / 2; 6412 6413 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM; 6414 6415 /* 6416 * Hardware does not really support IPv6 checksum generation, but 6417 * driver actually runs faster with this on. 6418 */ 6419 dev->hw_features |= NETIF_F_IPV6_CSUM; 6420 6421 dev->features |= dev->hw_features; 6422 6423 sema_init(&priv->proc_sem, 1); 6424 6425 priv->mii_if.phy_id_mask = 0x1; 6426 priv->mii_if.reg_num_mask = 0x7; 6427 priv->mii_if.dev = dev; 6428 priv->mii_if.mdio_read = mdio_read; 6429 priv->mii_if.mdio_write = mdio_write; 6430 priv->mii_if.phy_id = priv->port.first_port + 1; 6431 6432 priv->msg_enable = netif_msg_init(msg_enable, 6433 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)); 6434 6435 return 0; 6436 } 6437 6438 static const struct net_device_ops netdev_ops = { 6439 .ndo_init = netdev_init, 6440 .ndo_open = netdev_open, 6441 .ndo_stop = netdev_close, 6442 .ndo_get_stats = netdev_query_statistics, 6443 .ndo_start_xmit = netdev_tx, 6444 .ndo_tx_timeout = netdev_tx_timeout, 6445 .ndo_change_mtu = netdev_change_mtu, 6446 .ndo_set_features = netdev_set_features, 6447 .ndo_set_mac_address = netdev_set_mac_address, 6448 .ndo_validate_addr = eth_validate_addr, 6449 .ndo_eth_ioctl = netdev_ioctl, 6450 .ndo_set_rx_mode = netdev_set_rx_mode, 6451 #ifdef CONFIG_NET_POLL_CONTROLLER 6452 .ndo_poll_controller = netdev_netpoll, 6453 #endif 6454 }; 6455 6456 static void netdev_free(struct net_device *dev) 6457 { 6458 if (dev->watchdog_timeo) 6459 unregister_netdev(dev); 6460 6461 free_netdev(dev); 6462 } 6463 6464 struct platform_info { 6465 struct dev_info dev_info; 6466 struct net_device *netdev[SWITCH_PORT_NUM]; 6467 }; 6468 6469 static int net_device_present; 6470 6471 static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port) 6472 { 6473 int i; 6474 int j; 6475 int got_num; 6476 int num; 6477 6478 i = j = num = got_num = 0; 6479 while (j < ETH_ALEN) { 6480 if (macaddr[i]) { 6481 int digit; 6482 6483 got_num = 1; 6484 digit = hex_to_bin(macaddr[i]); 6485 if (digit >= 0) 6486 num = num * 16 + digit; 6487 else if (':' == macaddr[i]) 6488 got_num = 2; 6489 else 6490 break; 6491 } else if (got_num) 6492 got_num = 2; 6493 else 6494 break; 6495 if (2 == got_num) { 6496 if (MAIN_PORT == port) { 6497 hw_priv->hw.override_addr[j++] = (u8) num; 6498 hw_priv->hw.override_addr[5] += 6499 hw_priv->hw.id; 6500 } else { 6501 hw_priv->hw.ksz_switch->other_addr[j++] = 6502 (u8) num; 6503 hw_priv->hw.ksz_switch->other_addr[5] += 6504 hw_priv->hw.id; 6505 } 6506 num = got_num = 0; 6507 } 6508 i++; 6509 } 6510 if (ETH_ALEN == j) { 6511 if (MAIN_PORT == port) 6512 hw_priv->hw.mac_override = 1; 6513 } 6514 } 6515 6516 #define KS884X_DMA_MASK (~0x0UL) 6517 6518 static void read_other_addr(struct ksz_hw *hw) 6519 { 6520 int i; 6521 u16 data[3]; 6522 struct ksz_switch *sw = hw->ksz_switch; 6523 6524 for (i = 0; i < 3; i++) 6525 data[i] = eeprom_read(hw, i + EEPROM_DATA_OTHER_MAC_ADDR); 6526 if ((data[0] || data[1] || data[2]) && data[0] != 0xffff) { 6527 sw->other_addr[5] = (u8) data[0]; 6528 sw->other_addr[4] = (u8)(data[0] >> 8); 6529 sw->other_addr[3] = (u8) data[1]; 6530 sw->other_addr[2] = (u8)(data[1] >> 8); 6531 sw->other_addr[1] = (u8) data[2]; 6532 sw->other_addr[0] = (u8)(data[2] >> 8); 6533 } 6534 } 6535 6536 #ifndef PCI_VENDOR_ID_MICREL_KS 6537 #define PCI_VENDOR_ID_MICREL_KS 0x16c6 6538 #endif 6539 6540 static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id) 6541 { 6542 struct net_device *dev; 6543 struct dev_priv *priv; 6544 struct dev_info *hw_priv; 6545 struct ksz_hw *hw; 6546 struct platform_info *info; 6547 struct ksz_port *port; 6548 unsigned long reg_base; 6549 unsigned long reg_len; 6550 int cnt; 6551 int i; 6552 int mib_port_count; 6553 int pi; 6554 int port_count; 6555 int result; 6556 char banner[sizeof(version)]; 6557 struct ksz_switch *sw = NULL; 6558 6559 result = pcim_enable_device(pdev); 6560 if (result) 6561 return result; 6562 6563 result = -ENODEV; 6564 6565 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) || 6566 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) 6567 return result; 6568 6569 reg_base = pci_resource_start(pdev, 0); 6570 reg_len = pci_resource_len(pdev, 0); 6571 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) 6572 return result; 6573 6574 if (!request_mem_region(reg_base, reg_len, DRV_NAME)) 6575 return result; 6576 pci_set_master(pdev); 6577 6578 result = -ENOMEM; 6579 6580 info = kzalloc(sizeof(struct platform_info), GFP_KERNEL); 6581 if (!info) 6582 goto pcidev_init_dev_err; 6583 6584 hw_priv = &info->dev_info; 6585 hw_priv->pdev = pdev; 6586 6587 hw = &hw_priv->hw; 6588 6589 hw->io = ioremap(reg_base, reg_len); 6590 if (!hw->io) 6591 goto pcidev_init_io_err; 6592 6593 cnt = hw_init(hw); 6594 if (!cnt) { 6595 if (msg_enable & NETIF_MSG_PROBE) 6596 pr_alert("chip not detected\n"); 6597 result = -ENODEV; 6598 goto pcidev_init_alloc_err; 6599 } 6600 6601 snprintf(banner, sizeof(banner), "%s", version); 6602 banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */ 6603 dev_info(&hw_priv->pdev->dev, "%s\n", banner); 6604 dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq); 6605 6606 /* Assume device is KSZ8841. */ 6607 hw->dev_count = 1; 6608 port_count = 1; 6609 mib_port_count = 1; 6610 hw->addr_list_size = 0; 6611 hw->mib_cnt = PORT_COUNTER_NUM; 6612 hw->mib_port_cnt = 1; 6613 6614 /* KSZ8842 has a switch with multiple ports. */ 6615 if (2 == cnt) { 6616 if (fast_aging) 6617 hw->overrides |= FAST_AGING; 6618 6619 hw->mib_cnt = TOTAL_PORT_COUNTER_NUM; 6620 6621 /* Multiple network device interfaces are required. */ 6622 if (multi_dev) { 6623 hw->dev_count = SWITCH_PORT_NUM; 6624 hw->addr_list_size = SWITCH_PORT_NUM - 1; 6625 } 6626 6627 /* Single network device has multiple ports. */ 6628 if (1 == hw->dev_count) { 6629 port_count = SWITCH_PORT_NUM; 6630 mib_port_count = SWITCH_PORT_NUM; 6631 } 6632 hw->mib_port_cnt = TOTAL_PORT_NUM; 6633 hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL); 6634 if (!hw->ksz_switch) 6635 goto pcidev_init_alloc_err; 6636 6637 sw = hw->ksz_switch; 6638 } 6639 for (i = 0; i < hw->mib_port_cnt; i++) 6640 hw->port_mib[i].mib_start = 0; 6641 6642 hw->parent = hw_priv; 6643 6644 /* Default MTU is 1500. */ 6645 hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3; 6646 6647 if (ksz_alloc_mem(hw_priv)) 6648 goto pcidev_init_mem_err; 6649 6650 hw_priv->hw.id = net_device_present; 6651 6652 spin_lock_init(&hw_priv->hwlock); 6653 mutex_init(&hw_priv->lock); 6654 6655 for (i = 0; i < TOTAL_PORT_NUM; i++) 6656 init_waitqueue_head(&hw_priv->counter[i].counter); 6657 6658 if (macaddr[0] != ':') 6659 get_mac_addr(hw_priv, macaddr, MAIN_PORT); 6660 6661 /* Read MAC address and initialize override address if not overridden. */ 6662 hw_read_addr(hw); 6663 6664 /* Multiple device interfaces mode requires a second MAC address. */ 6665 if (hw->dev_count > 1) { 6666 memcpy(sw->other_addr, hw->override_addr, ETH_ALEN); 6667 read_other_addr(hw); 6668 if (mac1addr[0] != ':') 6669 get_mac_addr(hw_priv, mac1addr, OTHER_PORT); 6670 } 6671 6672 hw_setup(hw); 6673 if (hw->ksz_switch) 6674 sw_setup(hw); 6675 else { 6676 hw_priv->wol_support = WOL_SUPPORT; 6677 hw_priv->wol_enable = 0; 6678 } 6679 6680 INIT_WORK(&hw_priv->mib_read, mib_read_work); 6681 6682 /* 500 ms timeout */ 6683 ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000, 6684 mib_monitor); 6685 6686 for (i = 0; i < hw->dev_count; i++) { 6687 dev = alloc_etherdev(sizeof(struct dev_priv)); 6688 if (!dev) 6689 goto pcidev_init_reg_err; 6690 SET_NETDEV_DEV(dev, &pdev->dev); 6691 info->netdev[i] = dev; 6692 6693 priv = netdev_priv(dev); 6694 priv->adapter = hw_priv; 6695 priv->id = net_device_present++; 6696 6697 port = &priv->port; 6698 port->port_cnt = port_count; 6699 port->mib_port_cnt = mib_port_count; 6700 port->first_port = i; 6701 port->flow_ctrl = PHY_FLOW_CTRL; 6702 6703 port->hw = hw; 6704 port->linked = &hw->port_info[port->first_port]; 6705 6706 for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) { 6707 hw->port_info[pi].port_id = pi; 6708 hw->port_info[pi].pdev = dev; 6709 hw->port_info[pi].state = media_disconnected; 6710 } 6711 6712 dev->mem_start = (unsigned long) hw->io; 6713 dev->mem_end = dev->mem_start + reg_len - 1; 6714 dev->irq = pdev->irq; 6715 if (MAIN_PORT == i) 6716 eth_hw_addr_set(dev, hw_priv->hw.override_addr); 6717 else { 6718 u8 addr[ETH_ALEN]; 6719 6720 ether_addr_copy(addr, sw->other_addr); 6721 if (ether_addr_equal(sw->other_addr, hw->override_addr)) 6722 addr[5] += port->first_port; 6723 eth_hw_addr_set(dev, addr); 6724 } 6725 6726 dev->netdev_ops = &netdev_ops; 6727 dev->ethtool_ops = &netdev_ethtool_ops; 6728 6729 /* MTU range: 60 - 1894 */ 6730 dev->min_mtu = ETH_ZLEN; 6731 dev->max_mtu = MAX_RX_BUF_SIZE - 6732 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 6733 6734 if (register_netdev(dev)) 6735 goto pcidev_init_reg_err; 6736 port_set_power_saving(port, true); 6737 } 6738 6739 pci_dev_get(hw_priv->pdev); 6740 pci_set_drvdata(pdev, info); 6741 return 0; 6742 6743 pcidev_init_reg_err: 6744 for (i = 0; i < hw->dev_count; i++) { 6745 if (info->netdev[i]) { 6746 netdev_free(info->netdev[i]); 6747 info->netdev[i] = NULL; 6748 } 6749 } 6750 6751 pcidev_init_mem_err: 6752 ksz_free_mem(hw_priv); 6753 kfree(hw->ksz_switch); 6754 6755 pcidev_init_alloc_err: 6756 iounmap(hw->io); 6757 6758 pcidev_init_io_err: 6759 kfree(info); 6760 6761 pcidev_init_dev_err: 6762 release_mem_region(reg_base, reg_len); 6763 6764 return result; 6765 } 6766 6767 static void pcidev_exit(struct pci_dev *pdev) 6768 { 6769 int i; 6770 struct platform_info *info = pci_get_drvdata(pdev); 6771 struct dev_info *hw_priv = &info->dev_info; 6772 6773 release_mem_region(pci_resource_start(pdev, 0), 6774 pci_resource_len(pdev, 0)); 6775 for (i = 0; i < hw_priv->hw.dev_count; i++) { 6776 if (info->netdev[i]) 6777 netdev_free(info->netdev[i]); 6778 } 6779 if (hw_priv->hw.io) 6780 iounmap(hw_priv->hw.io); 6781 ksz_free_mem(hw_priv); 6782 kfree(hw_priv->hw.ksz_switch); 6783 pci_dev_put(hw_priv->pdev); 6784 kfree(info); 6785 } 6786 6787 static int __maybe_unused pcidev_resume(struct device *dev_d) 6788 { 6789 int i; 6790 struct platform_info *info = dev_get_drvdata(dev_d); 6791 struct dev_info *hw_priv = &info->dev_info; 6792 struct ksz_hw *hw = &hw_priv->hw; 6793 6794 device_wakeup_disable(dev_d); 6795 6796 if (hw_priv->wol_enable) 6797 hw_cfg_wol_pme(hw, 0); 6798 for (i = 0; i < hw->dev_count; i++) { 6799 if (info->netdev[i]) { 6800 struct net_device *dev = info->netdev[i]; 6801 6802 if (netif_running(dev)) { 6803 netdev_open(dev); 6804 netif_device_attach(dev); 6805 } 6806 } 6807 } 6808 return 0; 6809 } 6810 6811 static int __maybe_unused pcidev_suspend(struct device *dev_d) 6812 { 6813 int i; 6814 struct platform_info *info = dev_get_drvdata(dev_d); 6815 struct dev_info *hw_priv = &info->dev_info; 6816 struct ksz_hw *hw = &hw_priv->hw; 6817 6818 /* Need to find a way to retrieve the device IP address. */ 6819 static const u8 net_addr[] = { 192, 168, 1, 1 }; 6820 6821 for (i = 0; i < hw->dev_count; i++) { 6822 if (info->netdev[i]) { 6823 struct net_device *dev = info->netdev[i]; 6824 6825 if (netif_running(dev)) { 6826 netif_device_detach(dev); 6827 netdev_close(dev); 6828 } 6829 } 6830 } 6831 if (hw_priv->wol_enable) { 6832 hw_enable_wol(hw, hw_priv->wol_enable, net_addr); 6833 hw_cfg_wol_pme(hw, 1); 6834 } 6835 6836 device_wakeup_enable(dev_d); 6837 return 0; 6838 } 6839 6840 static char pcidev_name[] = "ksz884xp"; 6841 6842 static const struct pci_device_id pcidev_table[] = { 6843 { PCI_VENDOR_ID_MICREL_KS, 0x8841, 6844 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 6845 { PCI_VENDOR_ID_MICREL_KS, 0x8842, 6846 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 6847 { 0 } 6848 }; 6849 6850 MODULE_DEVICE_TABLE(pci, pcidev_table); 6851 6852 static SIMPLE_DEV_PM_OPS(pcidev_pm_ops, pcidev_suspend, pcidev_resume); 6853 6854 static struct pci_driver pci_device_driver = { 6855 .driver.pm = &pcidev_pm_ops, 6856 .name = pcidev_name, 6857 .id_table = pcidev_table, 6858 .probe = pcidev_init, 6859 .remove = pcidev_exit 6860 }; 6861 6862 module_pci_driver(pci_device_driver); 6863 6864 MODULE_DESCRIPTION("KSZ8841/2 PCI network driver"); 6865 MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>"); 6866 MODULE_LICENSE("GPL"); 6867 6868 module_param_named(message, msg_enable, int, 0); 6869 MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)"); 6870 6871 module_param(macaddr, charp, 0); 6872 module_param(mac1addr, charp, 0); 6873 module_param(fast_aging, int, 0); 6874 module_param(multi_dev, int, 0); 6875 module_param(stp, int, 0); 6876 MODULE_PARM_DESC(macaddr, "MAC address"); 6877 MODULE_PARM_DESC(mac1addr, "Second MAC address"); 6878 MODULE_PARM_DESC(fast_aging, "Fast aging"); 6879 MODULE_PARM_DESC(multi_dev, "Multiple device interfaces"); 6880 MODULE_PARM_DESC(stp, "STP support"); 6881