1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Broadcom GENET (Gigabit Ethernet) controller driver 4 * 5 * Copyright (c) 2014-2020 Broadcom 6 */ 7 8 #define pr_fmt(fmt) "bcmgenet: " fmt 9 10 #include <linux/acpi.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/sched.h> 14 #include <linux/types.h> 15 #include <linux/fcntl.h> 16 #include <linux/interrupt.h> 17 #include <linux/string.h> 18 #include <linux/if_ether.h> 19 #include <linux/init.h> 20 #include <linux/errno.h> 21 #include <linux/delay.h> 22 #include <linux/platform_device.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/pm.h> 25 #include <linux/clk.h> 26 #include <net/arp.h> 27 28 #include <linux/mii.h> 29 #include <linux/ethtool.h> 30 #include <linux/netdevice.h> 31 #include <linux/inetdevice.h> 32 #include <linux/etherdevice.h> 33 #include <linux/skbuff.h> 34 #include <linux/in.h> 35 #include <linux/ip.h> 36 #include <linux/ipv6.h> 37 #include <linux/phy.h> 38 #include <linux/platform_data/bcmgenet.h> 39 40 #include <asm/unaligned.h> 41 42 #include "bcmgenet.h" 43 44 /* Maximum number of hardware queues, downsized if needed */ 45 #define GENET_MAX_MQ_CNT 4 46 47 /* Default highest priority queue for multi queue support */ 48 #define GENET_Q0_PRIORITY 0 49 50 #define GENET_Q16_RX_BD_CNT \ 51 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) 52 #define GENET_Q16_TX_BD_CNT \ 53 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) 54 55 #define RX_BUF_LENGTH 2048 56 #define SKB_ALIGNMENT 32 57 58 /* Tx/Rx DMA register offset, skip 256 descriptors */ 59 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd) 60 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) 61 62 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ 63 TOTAL_DESC * DMA_DESC_SIZE) 64 65 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ 66 TOTAL_DESC * DMA_DESC_SIZE) 67 68 /* Forward declarations */ 69 static void bcmgenet_set_rx_mode(struct net_device *dev); 70 71 static inline void bcmgenet_writel(u32 value, void __iomem *offset) 72 { 73 /* MIPS chips strapped for BE will automagically configure the 74 * peripheral registers for CPU-native byte order. 75 */ 76 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 77 __raw_writel(value, offset); 78 else 79 writel_relaxed(value, offset); 80 } 81 82 static inline u32 bcmgenet_readl(void __iomem *offset) 83 { 84 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 85 return __raw_readl(offset); 86 else 87 return readl_relaxed(offset); 88 } 89 90 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, 91 void __iomem *d, u32 value) 92 { 93 bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS); 94 } 95 96 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, 97 void __iomem *d, 98 dma_addr_t addr) 99 { 100 bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); 101 102 /* Register writes to GISB bus can take couple hundred nanoseconds 103 * and are done for each packet, save these expensive writes unless 104 * the platform is explicitly configured for 64-bits/LPAE. 105 */ 106 #ifdef CONFIG_PHYS_ADDR_T_64BIT 107 if (priv->hw_params->flags & GENET_HAS_40BITS) 108 bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); 109 #endif 110 } 111 112 /* Combined address + length/status setter */ 113 static inline void dmadesc_set(struct bcmgenet_priv *priv, 114 void __iomem *d, dma_addr_t addr, u32 val) 115 { 116 dmadesc_set_addr(priv, d, addr); 117 dmadesc_set_length_status(priv, d, val); 118 } 119 120 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" 121 122 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ 123 NETIF_MSG_LINK) 124 125 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) 126 { 127 if (GENET_IS_V1(priv)) 128 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); 129 else 130 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); 131 } 132 133 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) 134 { 135 if (GENET_IS_V1(priv)) 136 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); 137 else 138 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); 139 } 140 141 /* These macros are defined to deal with register map change 142 * between GENET1.1 and GENET2. Only those currently being used 143 * by driver are defined. 144 */ 145 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) 146 { 147 if (GENET_IS_V1(priv)) 148 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); 149 else 150 return bcmgenet_readl(priv->base + 151 priv->hw_params->tbuf_offset + TBUF_CTRL); 152 } 153 154 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) 155 { 156 if (GENET_IS_V1(priv)) 157 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); 158 else 159 bcmgenet_writel(val, priv->base + 160 priv->hw_params->tbuf_offset + TBUF_CTRL); 161 } 162 163 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) 164 { 165 if (GENET_IS_V1(priv)) 166 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); 167 else 168 return bcmgenet_readl(priv->base + 169 priv->hw_params->tbuf_offset + TBUF_BP_MC); 170 } 171 172 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) 173 { 174 if (GENET_IS_V1(priv)) 175 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); 176 else 177 bcmgenet_writel(val, priv->base + 178 priv->hw_params->tbuf_offset + TBUF_BP_MC); 179 } 180 181 /* RX/TX DMA register accessors */ 182 enum dma_reg { 183 DMA_RING_CFG = 0, 184 DMA_CTRL, 185 DMA_STATUS, 186 DMA_SCB_BURST_SIZE, 187 DMA_ARB_CTRL, 188 DMA_PRIORITY_0, 189 DMA_PRIORITY_1, 190 DMA_PRIORITY_2, 191 DMA_INDEX2RING_0, 192 DMA_INDEX2RING_1, 193 DMA_INDEX2RING_2, 194 DMA_INDEX2RING_3, 195 DMA_INDEX2RING_4, 196 DMA_INDEX2RING_5, 197 DMA_INDEX2RING_6, 198 DMA_INDEX2RING_7, 199 DMA_RING0_TIMEOUT, 200 DMA_RING1_TIMEOUT, 201 DMA_RING2_TIMEOUT, 202 DMA_RING3_TIMEOUT, 203 DMA_RING4_TIMEOUT, 204 DMA_RING5_TIMEOUT, 205 DMA_RING6_TIMEOUT, 206 DMA_RING7_TIMEOUT, 207 DMA_RING8_TIMEOUT, 208 DMA_RING9_TIMEOUT, 209 DMA_RING10_TIMEOUT, 210 DMA_RING11_TIMEOUT, 211 DMA_RING12_TIMEOUT, 212 DMA_RING13_TIMEOUT, 213 DMA_RING14_TIMEOUT, 214 DMA_RING15_TIMEOUT, 215 DMA_RING16_TIMEOUT, 216 }; 217 218 static const u8 bcmgenet_dma_regs_v3plus[] = { 219 [DMA_RING_CFG] = 0x00, 220 [DMA_CTRL] = 0x04, 221 [DMA_STATUS] = 0x08, 222 [DMA_SCB_BURST_SIZE] = 0x0C, 223 [DMA_ARB_CTRL] = 0x2C, 224 [DMA_PRIORITY_0] = 0x30, 225 [DMA_PRIORITY_1] = 0x34, 226 [DMA_PRIORITY_2] = 0x38, 227 [DMA_RING0_TIMEOUT] = 0x2C, 228 [DMA_RING1_TIMEOUT] = 0x30, 229 [DMA_RING2_TIMEOUT] = 0x34, 230 [DMA_RING3_TIMEOUT] = 0x38, 231 [DMA_RING4_TIMEOUT] = 0x3c, 232 [DMA_RING5_TIMEOUT] = 0x40, 233 [DMA_RING6_TIMEOUT] = 0x44, 234 [DMA_RING7_TIMEOUT] = 0x48, 235 [DMA_RING8_TIMEOUT] = 0x4c, 236 [DMA_RING9_TIMEOUT] = 0x50, 237 [DMA_RING10_TIMEOUT] = 0x54, 238 [DMA_RING11_TIMEOUT] = 0x58, 239 [DMA_RING12_TIMEOUT] = 0x5c, 240 [DMA_RING13_TIMEOUT] = 0x60, 241 [DMA_RING14_TIMEOUT] = 0x64, 242 [DMA_RING15_TIMEOUT] = 0x68, 243 [DMA_RING16_TIMEOUT] = 0x6C, 244 [DMA_INDEX2RING_0] = 0x70, 245 [DMA_INDEX2RING_1] = 0x74, 246 [DMA_INDEX2RING_2] = 0x78, 247 [DMA_INDEX2RING_3] = 0x7C, 248 [DMA_INDEX2RING_4] = 0x80, 249 [DMA_INDEX2RING_5] = 0x84, 250 [DMA_INDEX2RING_6] = 0x88, 251 [DMA_INDEX2RING_7] = 0x8C, 252 }; 253 254 static const u8 bcmgenet_dma_regs_v2[] = { 255 [DMA_RING_CFG] = 0x00, 256 [DMA_CTRL] = 0x04, 257 [DMA_STATUS] = 0x08, 258 [DMA_SCB_BURST_SIZE] = 0x0C, 259 [DMA_ARB_CTRL] = 0x30, 260 [DMA_PRIORITY_0] = 0x34, 261 [DMA_PRIORITY_1] = 0x38, 262 [DMA_PRIORITY_2] = 0x3C, 263 [DMA_RING0_TIMEOUT] = 0x2C, 264 [DMA_RING1_TIMEOUT] = 0x30, 265 [DMA_RING2_TIMEOUT] = 0x34, 266 [DMA_RING3_TIMEOUT] = 0x38, 267 [DMA_RING4_TIMEOUT] = 0x3c, 268 [DMA_RING5_TIMEOUT] = 0x40, 269 [DMA_RING6_TIMEOUT] = 0x44, 270 [DMA_RING7_TIMEOUT] = 0x48, 271 [DMA_RING8_TIMEOUT] = 0x4c, 272 [DMA_RING9_TIMEOUT] = 0x50, 273 [DMA_RING10_TIMEOUT] = 0x54, 274 [DMA_RING11_TIMEOUT] = 0x58, 275 [DMA_RING12_TIMEOUT] = 0x5c, 276 [DMA_RING13_TIMEOUT] = 0x60, 277 [DMA_RING14_TIMEOUT] = 0x64, 278 [DMA_RING15_TIMEOUT] = 0x68, 279 [DMA_RING16_TIMEOUT] = 0x6C, 280 }; 281 282 static const u8 bcmgenet_dma_regs_v1[] = { 283 [DMA_CTRL] = 0x00, 284 [DMA_STATUS] = 0x04, 285 [DMA_SCB_BURST_SIZE] = 0x0C, 286 [DMA_ARB_CTRL] = 0x30, 287 [DMA_PRIORITY_0] = 0x34, 288 [DMA_PRIORITY_1] = 0x38, 289 [DMA_PRIORITY_2] = 0x3C, 290 [DMA_RING0_TIMEOUT] = 0x2C, 291 [DMA_RING1_TIMEOUT] = 0x30, 292 [DMA_RING2_TIMEOUT] = 0x34, 293 [DMA_RING3_TIMEOUT] = 0x38, 294 [DMA_RING4_TIMEOUT] = 0x3c, 295 [DMA_RING5_TIMEOUT] = 0x40, 296 [DMA_RING6_TIMEOUT] = 0x44, 297 [DMA_RING7_TIMEOUT] = 0x48, 298 [DMA_RING8_TIMEOUT] = 0x4c, 299 [DMA_RING9_TIMEOUT] = 0x50, 300 [DMA_RING10_TIMEOUT] = 0x54, 301 [DMA_RING11_TIMEOUT] = 0x58, 302 [DMA_RING12_TIMEOUT] = 0x5c, 303 [DMA_RING13_TIMEOUT] = 0x60, 304 [DMA_RING14_TIMEOUT] = 0x64, 305 [DMA_RING15_TIMEOUT] = 0x68, 306 [DMA_RING16_TIMEOUT] = 0x6C, 307 }; 308 309 /* Set at runtime once bcmgenet version is known */ 310 static const u8 *bcmgenet_dma_regs; 311 312 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) 313 { 314 return netdev_priv(dev_get_drvdata(dev)); 315 } 316 317 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, 318 enum dma_reg r) 319 { 320 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + 321 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); 322 } 323 324 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, 325 u32 val, enum dma_reg r) 326 { 327 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + 328 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); 329 } 330 331 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, 332 enum dma_reg r) 333 { 334 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + 335 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); 336 } 337 338 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, 339 u32 val, enum dma_reg r) 340 { 341 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + 342 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); 343 } 344 345 /* RDMA/TDMA ring registers and accessors 346 * we merge the common fields and just prefix with T/D the registers 347 * having different meaning depending on the direction 348 */ 349 enum dma_ring_reg { 350 TDMA_READ_PTR = 0, 351 RDMA_WRITE_PTR = TDMA_READ_PTR, 352 TDMA_READ_PTR_HI, 353 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, 354 TDMA_CONS_INDEX, 355 RDMA_PROD_INDEX = TDMA_CONS_INDEX, 356 TDMA_PROD_INDEX, 357 RDMA_CONS_INDEX = TDMA_PROD_INDEX, 358 DMA_RING_BUF_SIZE, 359 DMA_START_ADDR, 360 DMA_START_ADDR_HI, 361 DMA_END_ADDR, 362 DMA_END_ADDR_HI, 363 DMA_MBUF_DONE_THRESH, 364 TDMA_FLOW_PERIOD, 365 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, 366 TDMA_WRITE_PTR, 367 RDMA_READ_PTR = TDMA_WRITE_PTR, 368 TDMA_WRITE_PTR_HI, 369 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI 370 }; 371 372 /* GENET v4 supports 40-bits pointer addressing 373 * for obvious reasons the LO and HI word parts 374 * are contiguous, but this offsets the other 375 * registers. 376 */ 377 static const u8 genet_dma_ring_regs_v4[] = { 378 [TDMA_READ_PTR] = 0x00, 379 [TDMA_READ_PTR_HI] = 0x04, 380 [TDMA_CONS_INDEX] = 0x08, 381 [TDMA_PROD_INDEX] = 0x0C, 382 [DMA_RING_BUF_SIZE] = 0x10, 383 [DMA_START_ADDR] = 0x14, 384 [DMA_START_ADDR_HI] = 0x18, 385 [DMA_END_ADDR] = 0x1C, 386 [DMA_END_ADDR_HI] = 0x20, 387 [DMA_MBUF_DONE_THRESH] = 0x24, 388 [TDMA_FLOW_PERIOD] = 0x28, 389 [TDMA_WRITE_PTR] = 0x2C, 390 [TDMA_WRITE_PTR_HI] = 0x30, 391 }; 392 393 static const u8 genet_dma_ring_regs_v123[] = { 394 [TDMA_READ_PTR] = 0x00, 395 [TDMA_CONS_INDEX] = 0x04, 396 [TDMA_PROD_INDEX] = 0x08, 397 [DMA_RING_BUF_SIZE] = 0x0C, 398 [DMA_START_ADDR] = 0x10, 399 [DMA_END_ADDR] = 0x14, 400 [DMA_MBUF_DONE_THRESH] = 0x18, 401 [TDMA_FLOW_PERIOD] = 0x1C, 402 [TDMA_WRITE_PTR] = 0x20, 403 }; 404 405 /* Set at runtime once GENET version is known */ 406 static const u8 *genet_dma_ring_regs; 407 408 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, 409 unsigned int ring, 410 enum dma_ring_reg r) 411 { 412 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + 413 (DMA_RING_SIZE * ring) + 414 genet_dma_ring_regs[r]); 415 } 416 417 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, 418 unsigned int ring, u32 val, 419 enum dma_ring_reg r) 420 { 421 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + 422 (DMA_RING_SIZE * ring) + 423 genet_dma_ring_regs[r]); 424 } 425 426 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, 427 unsigned int ring, 428 enum dma_ring_reg r) 429 { 430 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + 431 (DMA_RING_SIZE * ring) + 432 genet_dma_ring_regs[r]); 433 } 434 435 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, 436 unsigned int ring, u32 val, 437 enum dma_ring_reg r) 438 { 439 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + 440 (DMA_RING_SIZE * ring) + 441 genet_dma_ring_regs[r]); 442 } 443 444 static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) 445 { 446 u32 offset; 447 u32 reg; 448 449 offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); 450 reg = bcmgenet_hfb_reg_readl(priv, offset); 451 reg |= (1 << (f_index % 32)); 452 bcmgenet_hfb_reg_writel(priv, reg, offset); 453 reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); 454 reg |= RBUF_HFB_EN; 455 bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); 456 } 457 458 static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index) 459 { 460 u32 offset, reg, reg1; 461 462 offset = HFB_FLT_ENABLE_V3PLUS; 463 reg = bcmgenet_hfb_reg_readl(priv, offset); 464 reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32)); 465 if (f_index < 32) { 466 reg1 &= ~(1 << (f_index % 32)); 467 bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32)); 468 } else { 469 reg &= ~(1 << (f_index % 32)); 470 bcmgenet_hfb_reg_writel(priv, reg, offset); 471 } 472 if (!reg && !reg1) { 473 reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); 474 reg &= ~RBUF_HFB_EN; 475 bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); 476 } 477 } 478 479 static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv, 480 u32 f_index, u32 rx_queue) 481 { 482 u32 offset; 483 u32 reg; 484 485 offset = f_index / 8; 486 reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset); 487 reg &= ~(0xF << (4 * (f_index % 8))); 488 reg |= ((rx_queue & 0xF) << (4 * (f_index % 8))); 489 bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset); 490 } 491 492 static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, 493 u32 f_index, u32 f_length) 494 { 495 u32 offset; 496 u32 reg; 497 498 offset = HFB_FLT_LEN_V3PLUS + 499 ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * 500 sizeof(u32); 501 reg = bcmgenet_hfb_reg_readl(priv, offset); 502 reg &= ~(0xFF << (8 * (f_index % 4))); 503 reg |= ((f_length & 0xFF) << (8 * (f_index % 4))); 504 bcmgenet_hfb_reg_writel(priv, reg, offset); 505 } 506 507 static int bcmgenet_hfb_validate_mask(void *mask, size_t size) 508 { 509 while (size) { 510 switch (*(unsigned char *)mask++) { 511 case 0x00: 512 case 0x0f: 513 case 0xf0: 514 case 0xff: 515 size--; 516 continue; 517 default: 518 return -EINVAL; 519 } 520 } 521 522 return 0; 523 } 524 525 #define VALIDATE_MASK(x) \ 526 bcmgenet_hfb_validate_mask(&(x), sizeof(x)) 527 528 static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index, 529 u32 offset, void *val, void *mask, 530 size_t size) 531 { 532 u32 index, tmp; 533 534 index = f_index * priv->hw_params->hfb_filter_size + offset / 2; 535 tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32)); 536 537 while (size--) { 538 if (offset++ & 1) { 539 tmp &= ~0x300FF; 540 tmp |= (*(unsigned char *)val++); 541 switch ((*(unsigned char *)mask++)) { 542 case 0xFF: 543 tmp |= 0x30000; 544 break; 545 case 0xF0: 546 tmp |= 0x20000; 547 break; 548 case 0x0F: 549 tmp |= 0x10000; 550 break; 551 } 552 bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32)); 553 if (size) 554 tmp = bcmgenet_hfb_readl(priv, 555 index * sizeof(u32)); 556 } else { 557 tmp &= ~0xCFF00; 558 tmp |= (*(unsigned char *)val++) << 8; 559 switch ((*(unsigned char *)mask++)) { 560 case 0xFF: 561 tmp |= 0xC0000; 562 break; 563 case 0xF0: 564 tmp |= 0x80000; 565 break; 566 case 0x0F: 567 tmp |= 0x40000; 568 break; 569 } 570 if (!size) 571 bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32)); 572 } 573 } 574 575 return 0; 576 } 577 578 static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, 579 struct bcmgenet_rxnfc_rule *rule) 580 { 581 struct ethtool_rx_flow_spec *fs = &rule->fs; 582 u32 offset = 0, f_length = 0, f; 583 u8 val_8, mask_8; 584 __be16 val_16; 585 u16 mask_16; 586 size_t size; 587 588 f = fs->location; 589 if (fs->flow_type & FLOW_MAC_EXT) { 590 bcmgenet_hfb_insert_data(priv, f, 0, 591 &fs->h_ext.h_dest, &fs->m_ext.h_dest, 592 sizeof(fs->h_ext.h_dest)); 593 } 594 595 if (fs->flow_type & FLOW_EXT) { 596 if (fs->m_ext.vlan_etype || 597 fs->m_ext.vlan_tci) { 598 bcmgenet_hfb_insert_data(priv, f, 12, 599 &fs->h_ext.vlan_etype, 600 &fs->m_ext.vlan_etype, 601 sizeof(fs->h_ext.vlan_etype)); 602 bcmgenet_hfb_insert_data(priv, f, 14, 603 &fs->h_ext.vlan_tci, 604 &fs->m_ext.vlan_tci, 605 sizeof(fs->h_ext.vlan_tci)); 606 offset += VLAN_HLEN; 607 f_length += DIV_ROUND_UP(VLAN_HLEN, 2); 608 } 609 } 610 611 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 612 case ETHER_FLOW: 613 f_length += DIV_ROUND_UP(ETH_HLEN, 2); 614 bcmgenet_hfb_insert_data(priv, f, 0, 615 &fs->h_u.ether_spec.h_dest, 616 &fs->m_u.ether_spec.h_dest, 617 sizeof(fs->h_u.ether_spec.h_dest)); 618 bcmgenet_hfb_insert_data(priv, f, ETH_ALEN, 619 &fs->h_u.ether_spec.h_source, 620 &fs->m_u.ether_spec.h_source, 621 sizeof(fs->h_u.ether_spec.h_source)); 622 bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, 623 &fs->h_u.ether_spec.h_proto, 624 &fs->m_u.ether_spec.h_proto, 625 sizeof(fs->h_u.ether_spec.h_proto)); 626 break; 627 case IP_USER_FLOW: 628 f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2); 629 /* Specify IP Ether Type */ 630 val_16 = htons(ETH_P_IP); 631 mask_16 = 0xFFFF; 632 bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, 633 &val_16, &mask_16, sizeof(val_16)); 634 bcmgenet_hfb_insert_data(priv, f, 15 + offset, 635 &fs->h_u.usr_ip4_spec.tos, 636 &fs->m_u.usr_ip4_spec.tos, 637 sizeof(fs->h_u.usr_ip4_spec.tos)); 638 bcmgenet_hfb_insert_data(priv, f, 23 + offset, 639 &fs->h_u.usr_ip4_spec.proto, 640 &fs->m_u.usr_ip4_spec.proto, 641 sizeof(fs->h_u.usr_ip4_spec.proto)); 642 bcmgenet_hfb_insert_data(priv, f, 26 + offset, 643 &fs->h_u.usr_ip4_spec.ip4src, 644 &fs->m_u.usr_ip4_spec.ip4src, 645 sizeof(fs->h_u.usr_ip4_spec.ip4src)); 646 bcmgenet_hfb_insert_data(priv, f, 30 + offset, 647 &fs->h_u.usr_ip4_spec.ip4dst, 648 &fs->m_u.usr_ip4_spec.ip4dst, 649 sizeof(fs->h_u.usr_ip4_spec.ip4dst)); 650 if (!fs->m_u.usr_ip4_spec.l4_4_bytes) 651 break; 652 653 /* Only supports 20 byte IPv4 header */ 654 val_8 = 0x45; 655 mask_8 = 0xFF; 656 bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset, 657 &val_8, &mask_8, 658 sizeof(val_8)); 659 size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); 660 bcmgenet_hfb_insert_data(priv, f, 661 ETH_HLEN + 20 + offset, 662 &fs->h_u.usr_ip4_spec.l4_4_bytes, 663 &fs->m_u.usr_ip4_spec.l4_4_bytes, 664 size); 665 f_length += DIV_ROUND_UP(size, 2); 666 break; 667 } 668 669 bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length); 670 if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) { 671 /* Ring 0 flows can be handled by the default Descriptor Ring 672 * We'll map them to ring 0, but don't enable the filter 673 */ 674 bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0); 675 rule->state = BCMGENET_RXNFC_STATE_DISABLED; 676 } else { 677 /* Other Rx rings are direct mapped here */ 678 bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 679 fs->ring_cookie); 680 bcmgenet_hfb_enable_filter(priv, f); 681 rule->state = BCMGENET_RXNFC_STATE_ENABLED; 682 } 683 } 684 685 /* bcmgenet_hfb_clear 686 * 687 * Clear Hardware Filter Block and disable all filtering. 688 */ 689 static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index) 690 { 691 u32 base, i; 692 693 base = f_index * priv->hw_params->hfb_filter_size; 694 for (i = 0; i < priv->hw_params->hfb_filter_size; i++) 695 bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32)); 696 } 697 698 static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) 699 { 700 u32 i; 701 702 if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) 703 return; 704 705 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); 706 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); 707 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); 708 709 for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) 710 bcmgenet_rdma_writel(priv, 0x0, i); 711 712 for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) 713 bcmgenet_hfb_reg_writel(priv, 0x0, 714 HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); 715 716 for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) 717 bcmgenet_hfb_clear_filter(priv, i); 718 } 719 720 static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) 721 { 722 int i; 723 724 INIT_LIST_HEAD(&priv->rxnfc_list); 725 if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) 726 return; 727 728 for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { 729 INIT_LIST_HEAD(&priv->rxnfc_rules[i].list); 730 priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; 731 } 732 733 bcmgenet_hfb_clear(priv); 734 } 735 736 static int bcmgenet_begin(struct net_device *dev) 737 { 738 struct bcmgenet_priv *priv = netdev_priv(dev); 739 740 /* Turn on the clock */ 741 return clk_prepare_enable(priv->clk); 742 } 743 744 static void bcmgenet_complete(struct net_device *dev) 745 { 746 struct bcmgenet_priv *priv = netdev_priv(dev); 747 748 /* Turn off the clock */ 749 clk_disable_unprepare(priv->clk); 750 } 751 752 static int bcmgenet_get_link_ksettings(struct net_device *dev, 753 struct ethtool_link_ksettings *cmd) 754 { 755 if (!netif_running(dev)) 756 return -EINVAL; 757 758 if (!dev->phydev) 759 return -ENODEV; 760 761 phy_ethtool_ksettings_get(dev->phydev, cmd); 762 763 return 0; 764 } 765 766 static int bcmgenet_set_link_ksettings(struct net_device *dev, 767 const struct ethtool_link_ksettings *cmd) 768 { 769 if (!netif_running(dev)) 770 return -EINVAL; 771 772 if (!dev->phydev) 773 return -ENODEV; 774 775 return phy_ethtool_ksettings_set(dev->phydev, cmd); 776 } 777 778 static int bcmgenet_set_features(struct net_device *dev, 779 netdev_features_t features) 780 { 781 struct bcmgenet_priv *priv = netdev_priv(dev); 782 u32 reg; 783 int ret; 784 785 ret = clk_prepare_enable(priv->clk); 786 if (ret) 787 return ret; 788 789 /* Make sure we reflect the value of CRC_CMD_FWD */ 790 reg = bcmgenet_umac_readl(priv, UMAC_CMD); 791 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); 792 793 clk_disable_unprepare(priv->clk); 794 795 return ret; 796 } 797 798 static u32 bcmgenet_get_msglevel(struct net_device *dev) 799 { 800 struct bcmgenet_priv *priv = netdev_priv(dev); 801 802 return priv->msg_enable; 803 } 804 805 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) 806 { 807 struct bcmgenet_priv *priv = netdev_priv(dev); 808 809 priv->msg_enable = level; 810 } 811 812 static int bcmgenet_get_coalesce(struct net_device *dev, 813 struct ethtool_coalesce *ec, 814 struct kernel_ethtool_coalesce *kernel_coal, 815 struct netlink_ext_ack *extack) 816 { 817 struct bcmgenet_priv *priv = netdev_priv(dev); 818 struct bcmgenet_rx_ring *ring; 819 unsigned int i; 820 821 ec->tx_max_coalesced_frames = 822 bcmgenet_tdma_ring_readl(priv, DESC_INDEX, 823 DMA_MBUF_DONE_THRESH); 824 ec->rx_max_coalesced_frames = 825 bcmgenet_rdma_ring_readl(priv, DESC_INDEX, 826 DMA_MBUF_DONE_THRESH); 827 ec->rx_coalesce_usecs = 828 bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000; 829 830 for (i = 0; i < priv->hw_params->rx_queues; i++) { 831 ring = &priv->rx_rings[i]; 832 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; 833 } 834 ring = &priv->rx_rings[DESC_INDEX]; 835 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; 836 837 return 0; 838 } 839 840 static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring, 841 u32 usecs, u32 pkts) 842 { 843 struct bcmgenet_priv *priv = ring->priv; 844 unsigned int i = ring->index; 845 u32 reg; 846 847 bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH); 848 849 reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i); 850 reg &= ~DMA_TIMEOUT_MASK; 851 reg |= DIV_ROUND_UP(usecs * 1000, 8192); 852 bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i); 853 } 854 855 static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring, 856 struct ethtool_coalesce *ec) 857 { 858 struct dim_cq_moder moder; 859 u32 usecs, pkts; 860 861 ring->rx_coalesce_usecs = ec->rx_coalesce_usecs; 862 ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 863 usecs = ring->rx_coalesce_usecs; 864 pkts = ring->rx_max_coalesced_frames; 865 866 if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) { 867 moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode); 868 usecs = moder.usec; 869 pkts = moder.pkts; 870 } 871 872 ring->dim.use_dim = ec->use_adaptive_rx_coalesce; 873 bcmgenet_set_rx_coalesce(ring, usecs, pkts); 874 } 875 876 static int bcmgenet_set_coalesce(struct net_device *dev, 877 struct ethtool_coalesce *ec, 878 struct kernel_ethtool_coalesce *kernel_coal, 879 struct netlink_ext_ack *extack) 880 { 881 struct bcmgenet_priv *priv = netdev_priv(dev); 882 unsigned int i; 883 884 /* Base system clock is 125Mhz, DMA timeout is this reference clock 885 * divided by 1024, which yields roughly 8.192us, our maximum value 886 * has to fit in the DMA_TIMEOUT_MASK (16 bits) 887 */ 888 if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || 889 ec->tx_max_coalesced_frames == 0 || 890 ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || 891 ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1) 892 return -EINVAL; 893 894 if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) 895 return -EINVAL; 896 897 /* GENET TDMA hardware does not support a configurable timeout, but will 898 * always generate an interrupt either after MBDONE packets have been 899 * transmitted, or when the ring is empty. 900 */ 901 902 /* Program all TX queues with the same values, as there is no 903 * ethtool knob to do coalescing on a per-queue basis 904 */ 905 for (i = 0; i < priv->hw_params->tx_queues; i++) 906 bcmgenet_tdma_ring_writel(priv, i, 907 ec->tx_max_coalesced_frames, 908 DMA_MBUF_DONE_THRESH); 909 bcmgenet_tdma_ring_writel(priv, DESC_INDEX, 910 ec->tx_max_coalesced_frames, 911 DMA_MBUF_DONE_THRESH); 912 913 for (i = 0; i < priv->hw_params->rx_queues; i++) 914 bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec); 915 bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec); 916 917 return 0; 918 } 919 920 static void bcmgenet_get_pauseparam(struct net_device *dev, 921 struct ethtool_pauseparam *epause) 922 { 923 struct bcmgenet_priv *priv; 924 u32 umac_cmd; 925 926 priv = netdev_priv(dev); 927 928 epause->autoneg = priv->autoneg_pause; 929 930 if (netif_carrier_ok(dev)) { 931 /* report active state when link is up */ 932 umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD); 933 epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE); 934 epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE); 935 } else { 936 /* otherwise report stored settings */ 937 epause->tx_pause = priv->tx_pause; 938 epause->rx_pause = priv->rx_pause; 939 } 940 } 941 942 static int bcmgenet_set_pauseparam(struct net_device *dev, 943 struct ethtool_pauseparam *epause) 944 { 945 struct bcmgenet_priv *priv = netdev_priv(dev); 946 947 if (!dev->phydev) 948 return -ENODEV; 949 950 if (!phy_validate_pause(dev->phydev, epause)) 951 return -EINVAL; 952 953 priv->autoneg_pause = !!epause->autoneg; 954 priv->tx_pause = !!epause->tx_pause; 955 priv->rx_pause = !!epause->rx_pause; 956 957 bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); 958 959 return 0; 960 } 961 962 /* standard ethtool support functions. */ 963 enum bcmgenet_stat_type { 964 BCMGENET_STAT_NETDEV = -1, 965 BCMGENET_STAT_MIB_RX, 966 BCMGENET_STAT_MIB_TX, 967 BCMGENET_STAT_RUNT, 968 BCMGENET_STAT_MISC, 969 BCMGENET_STAT_SOFT, 970 }; 971 972 struct bcmgenet_stats { 973 char stat_string[ETH_GSTRING_LEN]; 974 int stat_sizeof; 975 int stat_offset; 976 enum bcmgenet_stat_type type; 977 /* reg offset from UMAC base for misc counters */ 978 u16 reg_offset; 979 }; 980 981 #define STAT_NETDEV(m) { \ 982 .stat_string = __stringify(m), \ 983 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ 984 .stat_offset = offsetof(struct net_device_stats, m), \ 985 .type = BCMGENET_STAT_NETDEV, \ 986 } 987 988 #define STAT_GENET_MIB(str, m, _type) { \ 989 .stat_string = str, \ 990 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ 991 .stat_offset = offsetof(struct bcmgenet_priv, m), \ 992 .type = _type, \ 993 } 994 995 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) 996 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) 997 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) 998 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) 999 1000 #define STAT_GENET_MISC(str, m, offset) { \ 1001 .stat_string = str, \ 1002 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ 1003 .stat_offset = offsetof(struct bcmgenet_priv, m), \ 1004 .type = BCMGENET_STAT_MISC, \ 1005 .reg_offset = offset, \ 1006 } 1007 1008 #define STAT_GENET_Q(num) \ 1009 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \ 1010 tx_rings[num].packets), \ 1011 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \ 1012 tx_rings[num].bytes), \ 1013 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \ 1014 rx_rings[num].bytes), \ 1015 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \ 1016 rx_rings[num].packets), \ 1017 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \ 1018 rx_rings[num].errors), \ 1019 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \ 1020 rx_rings[num].dropped) 1021 1022 /* There is a 0xC gap between the end of RX and beginning of TX stats and then 1023 * between the end of TX stats and the beginning of the RX RUNT 1024 */ 1025 #define BCMGENET_STAT_OFFSET 0xc 1026 1027 /* Hardware counters must be kept in sync because the order/offset 1028 * is important here (order in structure declaration = order in hardware) 1029 */ 1030 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { 1031 /* general stats */ 1032 STAT_NETDEV(rx_packets), 1033 STAT_NETDEV(tx_packets), 1034 STAT_NETDEV(rx_bytes), 1035 STAT_NETDEV(tx_bytes), 1036 STAT_NETDEV(rx_errors), 1037 STAT_NETDEV(tx_errors), 1038 STAT_NETDEV(rx_dropped), 1039 STAT_NETDEV(tx_dropped), 1040 STAT_NETDEV(multicast), 1041 /* UniMAC RSV counters */ 1042 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), 1043 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), 1044 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), 1045 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), 1046 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), 1047 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), 1048 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), 1049 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), 1050 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), 1051 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), 1052 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), 1053 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), 1054 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), 1055 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), 1056 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), 1057 STAT_GENET_MIB_RX("rx_control", mib.rx.cf), 1058 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), 1059 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), 1060 STAT_GENET_MIB_RX("rx_align", mib.rx.aln), 1061 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), 1062 STAT_GENET_MIB_RX("rx_code", mib.rx.cde), 1063 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), 1064 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), 1065 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), 1066 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), 1067 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), 1068 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), 1069 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), 1070 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), 1071 /* UniMAC TSV counters */ 1072 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), 1073 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), 1074 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), 1075 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), 1076 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), 1077 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), 1078 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), 1079 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), 1080 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), 1081 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), 1082 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), 1083 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), 1084 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), 1085 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), 1086 STAT_GENET_MIB_TX("tx_control", mib.tx.cf), 1087 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), 1088 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), 1089 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), 1090 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), 1091 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), 1092 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), 1093 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), 1094 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), 1095 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), 1096 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), 1097 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), 1098 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), 1099 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), 1100 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), 1101 /* UniMAC RUNT counters */ 1102 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), 1103 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), 1104 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), 1105 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), 1106 /* Misc UniMAC counters */ 1107 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, 1108 UMAC_RBUF_OVFL_CNT_V1), 1109 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, 1110 UMAC_RBUF_ERR_CNT_V1), 1111 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), 1112 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 1113 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), 1114 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), 1115 STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb), 1116 STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed", 1117 mib.tx_realloc_tsb_failed), 1118 /* Per TX queues */ 1119 STAT_GENET_Q(0), 1120 STAT_GENET_Q(1), 1121 STAT_GENET_Q(2), 1122 STAT_GENET_Q(3), 1123 STAT_GENET_Q(16), 1124 }; 1125 1126 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) 1127 1128 static void bcmgenet_get_drvinfo(struct net_device *dev, 1129 struct ethtool_drvinfo *info) 1130 { 1131 strscpy(info->driver, "bcmgenet", sizeof(info->driver)); 1132 } 1133 1134 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) 1135 { 1136 switch (string_set) { 1137 case ETH_SS_STATS: 1138 return BCMGENET_STATS_LEN; 1139 default: 1140 return -EOPNOTSUPP; 1141 } 1142 } 1143 1144 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, 1145 u8 *data) 1146 { 1147 int i; 1148 1149 switch (stringset) { 1150 case ETH_SS_STATS: 1151 for (i = 0; i < BCMGENET_STATS_LEN; i++) { 1152 memcpy(data + i * ETH_GSTRING_LEN, 1153 bcmgenet_gstrings_stats[i].stat_string, 1154 ETH_GSTRING_LEN); 1155 } 1156 break; 1157 } 1158 } 1159 1160 static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset) 1161 { 1162 u16 new_offset; 1163 u32 val; 1164 1165 switch (offset) { 1166 case UMAC_RBUF_OVFL_CNT_V1: 1167 if (GENET_IS_V2(priv)) 1168 new_offset = RBUF_OVFL_CNT_V2; 1169 else 1170 new_offset = RBUF_OVFL_CNT_V3PLUS; 1171 1172 val = bcmgenet_rbuf_readl(priv, new_offset); 1173 /* clear if overflowed */ 1174 if (val == ~0) 1175 bcmgenet_rbuf_writel(priv, 0, new_offset); 1176 break; 1177 case UMAC_RBUF_ERR_CNT_V1: 1178 if (GENET_IS_V2(priv)) 1179 new_offset = RBUF_ERR_CNT_V2; 1180 else 1181 new_offset = RBUF_ERR_CNT_V3PLUS; 1182 1183 val = bcmgenet_rbuf_readl(priv, new_offset); 1184 /* clear if overflowed */ 1185 if (val == ~0) 1186 bcmgenet_rbuf_writel(priv, 0, new_offset); 1187 break; 1188 default: 1189 val = bcmgenet_umac_readl(priv, offset); 1190 /* clear if overflowed */ 1191 if (val == ~0) 1192 bcmgenet_umac_writel(priv, 0, offset); 1193 break; 1194 } 1195 1196 return val; 1197 } 1198 1199 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) 1200 { 1201 int i, j = 0; 1202 1203 for (i = 0; i < BCMGENET_STATS_LEN; i++) { 1204 const struct bcmgenet_stats *s; 1205 u8 offset = 0; 1206 u32 val = 0; 1207 char *p; 1208 1209 s = &bcmgenet_gstrings_stats[i]; 1210 switch (s->type) { 1211 case BCMGENET_STAT_NETDEV: 1212 case BCMGENET_STAT_SOFT: 1213 continue; 1214 case BCMGENET_STAT_RUNT: 1215 offset += BCMGENET_STAT_OFFSET; 1216 fallthrough; 1217 case BCMGENET_STAT_MIB_TX: 1218 offset += BCMGENET_STAT_OFFSET; 1219 fallthrough; 1220 case BCMGENET_STAT_MIB_RX: 1221 val = bcmgenet_umac_readl(priv, 1222 UMAC_MIB_START + j + offset); 1223 offset = 0; /* Reset Offset */ 1224 break; 1225 case BCMGENET_STAT_MISC: 1226 if (GENET_IS_V1(priv)) { 1227 val = bcmgenet_umac_readl(priv, s->reg_offset); 1228 /* clear if overflowed */ 1229 if (val == ~0) 1230 bcmgenet_umac_writel(priv, 0, 1231 s->reg_offset); 1232 } else { 1233 val = bcmgenet_update_stat_misc(priv, 1234 s->reg_offset); 1235 } 1236 break; 1237 } 1238 1239 j += s->stat_sizeof; 1240 p = (char *)priv + s->stat_offset; 1241 *(u32 *)p = val; 1242 } 1243 } 1244 1245 static void bcmgenet_get_ethtool_stats(struct net_device *dev, 1246 struct ethtool_stats *stats, 1247 u64 *data) 1248 { 1249 struct bcmgenet_priv *priv = netdev_priv(dev); 1250 int i; 1251 1252 if (netif_running(dev)) 1253 bcmgenet_update_mib_counters(priv); 1254 1255 dev->netdev_ops->ndo_get_stats(dev); 1256 1257 for (i = 0; i < BCMGENET_STATS_LEN; i++) { 1258 const struct bcmgenet_stats *s; 1259 char *p; 1260 1261 s = &bcmgenet_gstrings_stats[i]; 1262 if (s->type == BCMGENET_STAT_NETDEV) 1263 p = (char *)&dev->stats; 1264 else 1265 p = (char *)priv; 1266 p += s->stat_offset; 1267 if (sizeof(unsigned long) != sizeof(u32) && 1268 s->stat_sizeof == sizeof(unsigned long)) 1269 data[i] = *(unsigned long *)p; 1270 else 1271 data[i] = *(u32 *)p; 1272 } 1273 } 1274 1275 void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, 1276 bool tx_lpi_enabled) 1277 { 1278 struct bcmgenet_priv *priv = netdev_priv(dev); 1279 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; 1280 u32 reg; 1281 1282 if (enable && !priv->clk_eee_enabled) { 1283 clk_prepare_enable(priv->clk_eee); 1284 priv->clk_eee_enabled = true; 1285 } 1286 1287 reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL); 1288 if (enable) 1289 reg |= EEE_EN; 1290 else 1291 reg &= ~EEE_EN; 1292 bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); 1293 1294 /* Enable EEE and switch to a 27Mhz clock automatically */ 1295 reg = bcmgenet_readl(priv->base + off); 1296 if (tx_lpi_enabled) 1297 reg |= TBUF_EEE_EN | TBUF_PM_EN; 1298 else 1299 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); 1300 bcmgenet_writel(reg, priv->base + off); 1301 1302 /* Do the same for thing for RBUF */ 1303 reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); 1304 if (enable) 1305 reg |= RBUF_EEE_EN | RBUF_PM_EN; 1306 else 1307 reg &= ~(RBUF_EEE_EN | RBUF_PM_EN); 1308 bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL); 1309 1310 if (!enable && priv->clk_eee_enabled) { 1311 clk_disable_unprepare(priv->clk_eee); 1312 priv->clk_eee_enabled = false; 1313 } 1314 1315 priv->eee.eee_enabled = enable; 1316 priv->eee.tx_lpi_enabled = tx_lpi_enabled; 1317 } 1318 1319 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e) 1320 { 1321 struct bcmgenet_priv *priv = netdev_priv(dev); 1322 struct ethtool_keee *p = &priv->eee; 1323 1324 if (GENET_IS_V1(priv)) 1325 return -EOPNOTSUPP; 1326 1327 if (!dev->phydev) 1328 return -ENODEV; 1329 1330 e->tx_lpi_enabled = p->tx_lpi_enabled; 1331 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); 1332 1333 return phy_ethtool_get_eee(dev->phydev, e); 1334 } 1335 1336 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e) 1337 { 1338 struct bcmgenet_priv *priv = netdev_priv(dev); 1339 struct ethtool_keee *p = &priv->eee; 1340 bool active; 1341 1342 if (GENET_IS_V1(priv)) 1343 return -EOPNOTSUPP; 1344 1345 if (!dev->phydev) 1346 return -ENODEV; 1347 1348 p->eee_enabled = e->eee_enabled; 1349 1350 if (!p->eee_enabled) { 1351 bcmgenet_eee_enable_set(dev, false, false); 1352 } else { 1353 active = phy_init_eee(dev->phydev, false) >= 0; 1354 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); 1355 bcmgenet_eee_enable_set(dev, active, e->tx_lpi_enabled); 1356 } 1357 1358 return phy_ethtool_set_eee(dev->phydev, e); 1359 } 1360 1361 static int bcmgenet_validate_flow(struct net_device *dev, 1362 struct ethtool_rxnfc *cmd) 1363 { 1364 struct ethtool_usrip4_spec *l4_mask; 1365 struct ethhdr *eth_mask; 1366 1367 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES && 1368 cmd->fs.location != RX_CLS_LOC_ANY) { 1369 netdev_err(dev, "rxnfc: Invalid location (%d)\n", 1370 cmd->fs.location); 1371 return -EINVAL; 1372 } 1373 1374 switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 1375 case IP_USER_FLOW: 1376 l4_mask = &cmd->fs.m_u.usr_ip4_spec; 1377 /* don't allow mask which isn't valid */ 1378 if (VALIDATE_MASK(l4_mask->ip4src) || 1379 VALIDATE_MASK(l4_mask->ip4dst) || 1380 VALIDATE_MASK(l4_mask->l4_4_bytes) || 1381 VALIDATE_MASK(l4_mask->proto) || 1382 VALIDATE_MASK(l4_mask->ip_ver) || 1383 VALIDATE_MASK(l4_mask->tos)) { 1384 netdev_err(dev, "rxnfc: Unsupported mask\n"); 1385 return -EINVAL; 1386 } 1387 break; 1388 case ETHER_FLOW: 1389 eth_mask = &cmd->fs.m_u.ether_spec; 1390 /* don't allow mask which isn't valid */ 1391 if (VALIDATE_MASK(eth_mask->h_dest) || 1392 VALIDATE_MASK(eth_mask->h_source) || 1393 VALIDATE_MASK(eth_mask->h_proto)) { 1394 netdev_err(dev, "rxnfc: Unsupported mask\n"); 1395 return -EINVAL; 1396 } 1397 break; 1398 default: 1399 netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n", 1400 cmd->fs.flow_type); 1401 return -EINVAL; 1402 } 1403 1404 if ((cmd->fs.flow_type & FLOW_EXT)) { 1405 /* don't allow mask which isn't valid */ 1406 if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) || 1407 VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) { 1408 netdev_err(dev, "rxnfc: Unsupported mask\n"); 1409 return -EINVAL; 1410 } 1411 if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) { 1412 netdev_err(dev, "rxnfc: user-def not supported\n"); 1413 return -EINVAL; 1414 } 1415 } 1416 1417 if ((cmd->fs.flow_type & FLOW_MAC_EXT)) { 1418 /* don't allow mask which isn't valid */ 1419 if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) { 1420 netdev_err(dev, "rxnfc: Unsupported mask\n"); 1421 return -EINVAL; 1422 } 1423 } 1424 1425 return 0; 1426 } 1427 1428 static int bcmgenet_insert_flow(struct net_device *dev, 1429 struct ethtool_rxnfc *cmd) 1430 { 1431 struct bcmgenet_priv *priv = netdev_priv(dev); 1432 struct bcmgenet_rxnfc_rule *loc_rule; 1433 int err, i; 1434 1435 if (priv->hw_params->hfb_filter_size < 128) { 1436 netdev_err(dev, "rxnfc: Not supported by this device\n"); 1437 return -EINVAL; 1438 } 1439 1440 if (cmd->fs.ring_cookie > priv->hw_params->rx_queues && 1441 cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) { 1442 netdev_err(dev, "rxnfc: Unsupported action (%llu)\n", 1443 cmd->fs.ring_cookie); 1444 return -EINVAL; 1445 } 1446 1447 err = bcmgenet_validate_flow(dev, cmd); 1448 if (err) 1449 return err; 1450 1451 if (cmd->fs.location == RX_CLS_LOC_ANY) { 1452 list_for_each_entry(loc_rule, &priv->rxnfc_list, list) { 1453 cmd->fs.location = loc_rule->fs.location; 1454 err = memcmp(&loc_rule->fs, &cmd->fs, 1455 sizeof(struct ethtool_rx_flow_spec)); 1456 if (!err) 1457 /* rule exists so return current location */ 1458 return 0; 1459 } 1460 for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { 1461 loc_rule = &priv->rxnfc_rules[i]; 1462 if (loc_rule->state == BCMGENET_RXNFC_STATE_UNUSED) { 1463 cmd->fs.location = i; 1464 break; 1465 } 1466 } 1467 if (i == MAX_NUM_OF_FS_RULES) { 1468 cmd->fs.location = RX_CLS_LOC_ANY; 1469 return -ENOSPC; 1470 } 1471 } else { 1472 loc_rule = &priv->rxnfc_rules[cmd->fs.location]; 1473 } 1474 if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) 1475 bcmgenet_hfb_disable_filter(priv, cmd->fs.location); 1476 if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { 1477 list_del(&loc_rule->list); 1478 bcmgenet_hfb_clear_filter(priv, cmd->fs.location); 1479 } 1480 loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; 1481 memcpy(&loc_rule->fs, &cmd->fs, 1482 sizeof(struct ethtool_rx_flow_spec)); 1483 1484 bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule); 1485 1486 list_add_tail(&loc_rule->list, &priv->rxnfc_list); 1487 1488 return 0; 1489 } 1490 1491 static int bcmgenet_delete_flow(struct net_device *dev, 1492 struct ethtool_rxnfc *cmd) 1493 { 1494 struct bcmgenet_priv *priv = netdev_priv(dev); 1495 struct bcmgenet_rxnfc_rule *rule; 1496 int err = 0; 1497 1498 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) 1499 return -EINVAL; 1500 1501 rule = &priv->rxnfc_rules[cmd->fs.location]; 1502 if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) { 1503 err = -ENOENT; 1504 goto out; 1505 } 1506 1507 if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) 1508 bcmgenet_hfb_disable_filter(priv, cmd->fs.location); 1509 if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { 1510 list_del(&rule->list); 1511 bcmgenet_hfb_clear_filter(priv, cmd->fs.location); 1512 } 1513 rule->state = BCMGENET_RXNFC_STATE_UNUSED; 1514 memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); 1515 1516 out: 1517 return err; 1518 } 1519 1520 static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 1521 { 1522 struct bcmgenet_priv *priv = netdev_priv(dev); 1523 int err = 0; 1524 1525 switch (cmd->cmd) { 1526 case ETHTOOL_SRXCLSRLINS: 1527 err = bcmgenet_insert_flow(dev, cmd); 1528 break; 1529 case ETHTOOL_SRXCLSRLDEL: 1530 err = bcmgenet_delete_flow(dev, cmd); 1531 break; 1532 default: 1533 netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n", 1534 cmd->cmd); 1535 return -EINVAL; 1536 } 1537 1538 return err; 1539 } 1540 1541 static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd, 1542 int loc) 1543 { 1544 struct bcmgenet_priv *priv = netdev_priv(dev); 1545 struct bcmgenet_rxnfc_rule *rule; 1546 int err = 0; 1547 1548 if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES) 1549 return -EINVAL; 1550 1551 rule = &priv->rxnfc_rules[loc]; 1552 if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) 1553 err = -ENOENT; 1554 else 1555 memcpy(&cmd->fs, &rule->fs, 1556 sizeof(struct ethtool_rx_flow_spec)); 1557 1558 return err; 1559 } 1560 1561 static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv) 1562 { 1563 struct list_head *pos; 1564 int res = 0; 1565 1566 list_for_each(pos, &priv->rxnfc_list) 1567 res++; 1568 1569 return res; 1570 } 1571 1572 static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1573 u32 *rule_locs) 1574 { 1575 struct bcmgenet_priv *priv = netdev_priv(dev); 1576 struct bcmgenet_rxnfc_rule *rule; 1577 int err = 0; 1578 int i = 0; 1579 1580 switch (cmd->cmd) { 1581 case ETHTOOL_GRXRINGS: 1582 cmd->data = priv->hw_params->rx_queues ?: 1; 1583 break; 1584 case ETHTOOL_GRXCLSRLCNT: 1585 cmd->rule_cnt = bcmgenet_get_num_flows(priv); 1586 cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL; 1587 break; 1588 case ETHTOOL_GRXCLSRULE: 1589 err = bcmgenet_get_flow(dev, cmd, cmd->fs.location); 1590 break; 1591 case ETHTOOL_GRXCLSRLALL: 1592 list_for_each_entry(rule, &priv->rxnfc_list, list) 1593 if (i < cmd->rule_cnt) 1594 rule_locs[i++] = rule->fs.location; 1595 cmd->rule_cnt = i; 1596 cmd->data = MAX_NUM_OF_FS_RULES; 1597 break; 1598 default: 1599 err = -EOPNOTSUPP; 1600 break; 1601 } 1602 1603 return err; 1604 } 1605 1606 /* standard ethtool support functions. */ 1607 static const struct ethtool_ops bcmgenet_ethtool_ops = { 1608 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 1609 ETHTOOL_COALESCE_MAX_FRAMES | 1610 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 1611 .begin = bcmgenet_begin, 1612 .complete = bcmgenet_complete, 1613 .get_strings = bcmgenet_get_strings, 1614 .get_sset_count = bcmgenet_get_sset_count, 1615 .get_ethtool_stats = bcmgenet_get_ethtool_stats, 1616 .get_drvinfo = bcmgenet_get_drvinfo, 1617 .get_link = ethtool_op_get_link, 1618 .get_msglevel = bcmgenet_get_msglevel, 1619 .set_msglevel = bcmgenet_set_msglevel, 1620 .get_wol = bcmgenet_get_wol, 1621 .set_wol = bcmgenet_set_wol, 1622 .get_eee = bcmgenet_get_eee, 1623 .set_eee = bcmgenet_set_eee, 1624 .nway_reset = phy_ethtool_nway_reset, 1625 .get_coalesce = bcmgenet_get_coalesce, 1626 .set_coalesce = bcmgenet_set_coalesce, 1627 .get_link_ksettings = bcmgenet_get_link_ksettings, 1628 .set_link_ksettings = bcmgenet_set_link_ksettings, 1629 .get_ts_info = ethtool_op_get_ts_info, 1630 .get_rxnfc = bcmgenet_get_rxnfc, 1631 .set_rxnfc = bcmgenet_set_rxnfc, 1632 .get_pauseparam = bcmgenet_get_pauseparam, 1633 .set_pauseparam = bcmgenet_set_pauseparam, 1634 }; 1635 1636 /* Power down the unimac, based on mode. */ 1637 static int bcmgenet_power_down(struct bcmgenet_priv *priv, 1638 enum bcmgenet_power_mode mode) 1639 { 1640 int ret = 0; 1641 u32 reg; 1642 1643 switch (mode) { 1644 case GENET_POWER_CABLE_SENSE: 1645 phy_detach(priv->dev->phydev); 1646 break; 1647 1648 case GENET_POWER_WOL_MAGIC: 1649 ret = bcmgenet_wol_power_down_cfg(priv, mode); 1650 break; 1651 1652 case GENET_POWER_PASSIVE: 1653 /* Power down LED */ 1654 if (priv->hw_params->flags & GENET_HAS_EXT) { 1655 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); 1656 if (GENET_IS_V5(priv) && !priv->ephy_16nm) 1657 reg |= EXT_PWR_DOWN_PHY_EN | 1658 EXT_PWR_DOWN_PHY_RD | 1659 EXT_PWR_DOWN_PHY_SD | 1660 EXT_PWR_DOWN_PHY_RX | 1661 EXT_PWR_DOWN_PHY_TX | 1662 EXT_IDDQ_GLBL_PWR; 1663 else 1664 reg |= EXT_PWR_DOWN_PHY; 1665 1666 reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); 1667 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); 1668 1669 bcmgenet_phy_power_set(priv->dev, false); 1670 } 1671 break; 1672 default: 1673 break; 1674 } 1675 1676 return ret; 1677 } 1678 1679 static void bcmgenet_power_up(struct bcmgenet_priv *priv, 1680 enum bcmgenet_power_mode mode) 1681 { 1682 u32 reg; 1683 1684 if (!(priv->hw_params->flags & GENET_HAS_EXT)) 1685 return; 1686 1687 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); 1688 1689 switch (mode) { 1690 case GENET_POWER_PASSIVE: 1691 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS | 1692 EXT_ENERGY_DET_MASK); 1693 if (GENET_IS_V5(priv) && !priv->ephy_16nm) { 1694 reg &= ~(EXT_PWR_DOWN_PHY_EN | 1695 EXT_PWR_DOWN_PHY_RD | 1696 EXT_PWR_DOWN_PHY_SD | 1697 EXT_PWR_DOWN_PHY_RX | 1698 EXT_PWR_DOWN_PHY_TX | 1699 EXT_IDDQ_GLBL_PWR); 1700 reg |= EXT_PHY_RESET; 1701 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); 1702 mdelay(1); 1703 1704 reg &= ~EXT_PHY_RESET; 1705 } else { 1706 reg &= ~EXT_PWR_DOWN_PHY; 1707 reg |= EXT_PWR_DN_EN_LD; 1708 } 1709 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); 1710 bcmgenet_phy_power_set(priv->dev, true); 1711 break; 1712 1713 case GENET_POWER_CABLE_SENSE: 1714 /* enable APD */ 1715 if (!GENET_IS_V5(priv)) { 1716 reg |= EXT_PWR_DN_EN_LD; 1717 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); 1718 } 1719 break; 1720 case GENET_POWER_WOL_MAGIC: 1721 bcmgenet_wol_power_up_cfg(priv, mode); 1722 return; 1723 default: 1724 break; 1725 } 1726 } 1727 1728 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, 1729 struct bcmgenet_tx_ring *ring) 1730 { 1731 struct enet_cb *tx_cb_ptr; 1732 1733 tx_cb_ptr = ring->cbs; 1734 tx_cb_ptr += ring->write_ptr - ring->cb_ptr; 1735 1736 /* Advancing local write pointer */ 1737 if (ring->write_ptr == ring->end_ptr) 1738 ring->write_ptr = ring->cb_ptr; 1739 else 1740 ring->write_ptr++; 1741 1742 return tx_cb_ptr; 1743 } 1744 1745 static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv, 1746 struct bcmgenet_tx_ring *ring) 1747 { 1748 struct enet_cb *tx_cb_ptr; 1749 1750 tx_cb_ptr = ring->cbs; 1751 tx_cb_ptr += ring->write_ptr - ring->cb_ptr; 1752 1753 /* Rewinding local write pointer */ 1754 if (ring->write_ptr == ring->cb_ptr) 1755 ring->write_ptr = ring->end_ptr; 1756 else 1757 ring->write_ptr--; 1758 1759 return tx_cb_ptr; 1760 } 1761 1762 static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) 1763 { 1764 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, 1765 INTRL2_CPU_MASK_SET); 1766 } 1767 1768 static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring) 1769 { 1770 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, 1771 INTRL2_CPU_MASK_CLEAR); 1772 } 1773 1774 static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring) 1775 { 1776 bcmgenet_intrl2_1_writel(ring->priv, 1777 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), 1778 INTRL2_CPU_MASK_SET); 1779 } 1780 1781 static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring) 1782 { 1783 bcmgenet_intrl2_1_writel(ring->priv, 1784 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), 1785 INTRL2_CPU_MASK_CLEAR); 1786 } 1787 1788 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring) 1789 { 1790 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, 1791 INTRL2_CPU_MASK_SET); 1792 } 1793 1794 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring) 1795 { 1796 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, 1797 INTRL2_CPU_MASK_CLEAR); 1798 } 1799 1800 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring) 1801 { 1802 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, 1803 INTRL2_CPU_MASK_CLEAR); 1804 } 1805 1806 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring) 1807 { 1808 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, 1809 INTRL2_CPU_MASK_SET); 1810 } 1811 1812 /* Simple helper to free a transmit control block's resources 1813 * Returns an skb when the last transmit control block associated with the 1814 * skb is freed. The skb should be freed by the caller if necessary. 1815 */ 1816 static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev, 1817 struct enet_cb *cb) 1818 { 1819 struct sk_buff *skb; 1820 1821 skb = cb->skb; 1822 1823 if (skb) { 1824 cb->skb = NULL; 1825 if (cb == GENET_CB(skb)->first_cb) 1826 dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), 1827 dma_unmap_len(cb, dma_len), 1828 DMA_TO_DEVICE); 1829 else 1830 dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr), 1831 dma_unmap_len(cb, dma_len), 1832 DMA_TO_DEVICE); 1833 dma_unmap_addr_set(cb, dma_addr, 0); 1834 1835 if (cb == GENET_CB(skb)->last_cb) 1836 return skb; 1837 1838 } else if (dma_unmap_addr(cb, dma_addr)) { 1839 dma_unmap_page(dev, 1840 dma_unmap_addr(cb, dma_addr), 1841 dma_unmap_len(cb, dma_len), 1842 DMA_TO_DEVICE); 1843 dma_unmap_addr_set(cb, dma_addr, 0); 1844 } 1845 1846 return NULL; 1847 } 1848 1849 /* Simple helper to free a receive control block's resources */ 1850 static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev, 1851 struct enet_cb *cb) 1852 { 1853 struct sk_buff *skb; 1854 1855 skb = cb->skb; 1856 cb->skb = NULL; 1857 1858 if (dma_unmap_addr(cb, dma_addr)) { 1859 dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), 1860 dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE); 1861 dma_unmap_addr_set(cb, dma_addr, 0); 1862 } 1863 1864 return skb; 1865 } 1866 1867 /* Unlocked version of the reclaim routine */ 1868 static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, 1869 struct bcmgenet_tx_ring *ring) 1870 { 1871 struct bcmgenet_priv *priv = netdev_priv(dev); 1872 unsigned int txbds_processed = 0; 1873 unsigned int bytes_compl = 0; 1874 unsigned int pkts_compl = 0; 1875 unsigned int txbds_ready; 1876 unsigned int c_index; 1877 struct sk_buff *skb; 1878 1879 /* Clear status before servicing to reduce spurious interrupts */ 1880 if (ring->index == DESC_INDEX) 1881 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE, 1882 INTRL2_CPU_CLEAR); 1883 else 1884 bcmgenet_intrl2_1_writel(priv, (1 << ring->index), 1885 INTRL2_CPU_CLEAR); 1886 1887 /* Compute how many buffers are transmitted since last xmit call */ 1888 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX) 1889 & DMA_C_INDEX_MASK; 1890 txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK; 1891 1892 netif_dbg(priv, tx_done, dev, 1893 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", 1894 __func__, ring->index, ring->c_index, c_index, txbds_ready); 1895 1896 /* Reclaim transmitted buffers */ 1897 while (txbds_processed < txbds_ready) { 1898 skb = bcmgenet_free_tx_cb(&priv->pdev->dev, 1899 &priv->tx_cbs[ring->clean_ptr]); 1900 if (skb) { 1901 pkts_compl++; 1902 bytes_compl += GENET_CB(skb)->bytes_sent; 1903 dev_consume_skb_any(skb); 1904 } 1905 1906 txbds_processed++; 1907 if (likely(ring->clean_ptr < ring->end_ptr)) 1908 ring->clean_ptr++; 1909 else 1910 ring->clean_ptr = ring->cb_ptr; 1911 } 1912 1913 ring->free_bds += txbds_processed; 1914 ring->c_index = c_index; 1915 1916 ring->packets += pkts_compl; 1917 ring->bytes += bytes_compl; 1918 1919 netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue), 1920 pkts_compl, bytes_compl); 1921 1922 return txbds_processed; 1923 } 1924 1925 static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, 1926 struct bcmgenet_tx_ring *ring) 1927 { 1928 unsigned int released; 1929 1930 spin_lock_bh(&ring->lock); 1931 released = __bcmgenet_tx_reclaim(dev, ring); 1932 spin_unlock_bh(&ring->lock); 1933 1934 return released; 1935 } 1936 1937 static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) 1938 { 1939 struct bcmgenet_tx_ring *ring = 1940 container_of(napi, struct bcmgenet_tx_ring, napi); 1941 unsigned int work_done = 0; 1942 struct netdev_queue *txq; 1943 1944 spin_lock(&ring->lock); 1945 work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring); 1946 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { 1947 txq = netdev_get_tx_queue(ring->priv->dev, ring->queue); 1948 netif_tx_wake_queue(txq); 1949 } 1950 spin_unlock(&ring->lock); 1951 1952 if (work_done == 0) { 1953 napi_complete(napi); 1954 ring->int_enable(ring); 1955 1956 return 0; 1957 } 1958 1959 return budget; 1960 } 1961 1962 static void bcmgenet_tx_reclaim_all(struct net_device *dev) 1963 { 1964 struct bcmgenet_priv *priv = netdev_priv(dev); 1965 int i; 1966 1967 if (netif_is_multiqueue(dev)) { 1968 for (i = 0; i < priv->hw_params->tx_queues; i++) 1969 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); 1970 } 1971 1972 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); 1973 } 1974 1975 /* Reallocate the SKB to put enough headroom in front of it and insert 1976 * the transmit checksum offsets in the descriptors 1977 */ 1978 static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, 1979 struct sk_buff *skb) 1980 { 1981 struct bcmgenet_priv *priv = netdev_priv(dev); 1982 struct status_64 *status = NULL; 1983 struct sk_buff *new_skb; 1984 u16 offset; 1985 u8 ip_proto; 1986 __be16 ip_ver; 1987 u32 tx_csum_info; 1988 1989 if (unlikely(skb_headroom(skb) < sizeof(*status))) { 1990 /* If 64 byte status block enabled, must make sure skb has 1991 * enough headroom for us to insert 64B status block. 1992 */ 1993 new_skb = skb_realloc_headroom(skb, sizeof(*status)); 1994 if (!new_skb) { 1995 dev_kfree_skb_any(skb); 1996 priv->mib.tx_realloc_tsb_failed++; 1997 dev->stats.tx_dropped++; 1998 return NULL; 1999 } 2000 dev_consume_skb_any(skb); 2001 skb = new_skb; 2002 priv->mib.tx_realloc_tsb++; 2003 } 2004 2005 skb_push(skb, sizeof(*status)); 2006 status = (struct status_64 *)skb->data; 2007 2008 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2009 ip_ver = skb->protocol; 2010 switch (ip_ver) { 2011 case htons(ETH_P_IP): 2012 ip_proto = ip_hdr(skb)->protocol; 2013 break; 2014 case htons(ETH_P_IPV6): 2015 ip_proto = ipv6_hdr(skb)->nexthdr; 2016 break; 2017 default: 2018 /* don't use UDP flag */ 2019 ip_proto = 0; 2020 break; 2021 } 2022 2023 offset = skb_checksum_start_offset(skb) - sizeof(*status); 2024 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | 2025 (offset + skb->csum_offset) | 2026 STATUS_TX_CSUM_LV; 2027 2028 /* Set the special UDP flag for UDP */ 2029 if (ip_proto == IPPROTO_UDP) 2030 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; 2031 2032 status->tx_csum_info = tx_csum_info; 2033 } 2034 2035 return skb; 2036 } 2037 2038 static void bcmgenet_hide_tsb(struct sk_buff *skb) 2039 { 2040 __skb_pull(skb, sizeof(struct status_64)); 2041 } 2042 2043 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) 2044 { 2045 struct bcmgenet_priv *priv = netdev_priv(dev); 2046 struct device *kdev = &priv->pdev->dev; 2047 struct bcmgenet_tx_ring *ring = NULL; 2048 struct enet_cb *tx_cb_ptr; 2049 struct netdev_queue *txq; 2050 int nr_frags, index; 2051 dma_addr_t mapping; 2052 unsigned int size; 2053 skb_frag_t *frag; 2054 u32 len_stat; 2055 int ret; 2056 int i; 2057 2058 index = skb_get_queue_mapping(skb); 2059 /* Mapping strategy: 2060 * queue_mapping = 0, unclassified, packet xmited through ring16 2061 * queue_mapping = 1, goes to ring 0. (highest priority queue 2062 * queue_mapping = 2, goes to ring 1. 2063 * queue_mapping = 3, goes to ring 2. 2064 * queue_mapping = 4, goes to ring 3. 2065 */ 2066 if (index == 0) 2067 index = DESC_INDEX; 2068 else 2069 index -= 1; 2070 2071 ring = &priv->tx_rings[index]; 2072 txq = netdev_get_tx_queue(dev, ring->queue); 2073 2074 nr_frags = skb_shinfo(skb)->nr_frags; 2075 2076 spin_lock(&ring->lock); 2077 if (ring->free_bds <= (nr_frags + 1)) { 2078 if (!netif_tx_queue_stopped(txq)) 2079 netif_tx_stop_queue(txq); 2080 ret = NETDEV_TX_BUSY; 2081 goto out; 2082 } 2083 2084 /* Retain how many bytes will be sent on the wire, without TSB inserted 2085 * by transmit checksum offload 2086 */ 2087 GENET_CB(skb)->bytes_sent = skb->len; 2088 2089 /* add the Transmit Status Block */ 2090 skb = bcmgenet_add_tsb(dev, skb); 2091 if (!skb) { 2092 ret = NETDEV_TX_OK; 2093 goto out; 2094 } 2095 2096 for (i = 0; i <= nr_frags; i++) { 2097 tx_cb_ptr = bcmgenet_get_txcb(priv, ring); 2098 2099 BUG_ON(!tx_cb_ptr); 2100 2101 if (!i) { 2102 /* Transmit single SKB or head of fragment list */ 2103 GENET_CB(skb)->first_cb = tx_cb_ptr; 2104 size = skb_headlen(skb); 2105 mapping = dma_map_single(kdev, skb->data, size, 2106 DMA_TO_DEVICE); 2107 } else { 2108 /* xmit fragment */ 2109 frag = &skb_shinfo(skb)->frags[i - 1]; 2110 size = skb_frag_size(frag); 2111 mapping = skb_frag_dma_map(kdev, frag, 0, size, 2112 DMA_TO_DEVICE); 2113 } 2114 2115 ret = dma_mapping_error(kdev, mapping); 2116 if (ret) { 2117 priv->mib.tx_dma_failed++; 2118 netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); 2119 ret = NETDEV_TX_OK; 2120 goto out_unmap_frags; 2121 } 2122 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); 2123 dma_unmap_len_set(tx_cb_ptr, dma_len, size); 2124 2125 tx_cb_ptr->skb = skb; 2126 2127 len_stat = (size << DMA_BUFLENGTH_SHIFT) | 2128 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); 2129 2130 /* Note: if we ever change from DMA_TX_APPEND_CRC below we 2131 * will need to restore software padding of "runt" packets 2132 */ 2133 len_stat |= DMA_TX_APPEND_CRC; 2134 2135 if (!i) { 2136 len_stat |= DMA_SOP; 2137 if (skb->ip_summed == CHECKSUM_PARTIAL) 2138 len_stat |= DMA_TX_DO_CSUM; 2139 } 2140 if (i == nr_frags) 2141 len_stat |= DMA_EOP; 2142 2143 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat); 2144 } 2145 2146 GENET_CB(skb)->last_cb = tx_cb_ptr; 2147 2148 bcmgenet_hide_tsb(skb); 2149 skb_tx_timestamp(skb); 2150 2151 /* Decrement total BD count and advance our write pointer */ 2152 ring->free_bds -= nr_frags + 1; 2153 ring->prod_index += nr_frags + 1; 2154 ring->prod_index &= DMA_P_INDEX_MASK; 2155 2156 netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent); 2157 2158 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) 2159 netif_tx_stop_queue(txq); 2160 2161 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) 2162 /* Packets are ready, update producer index */ 2163 bcmgenet_tdma_ring_writel(priv, ring->index, 2164 ring->prod_index, TDMA_PROD_INDEX); 2165 out: 2166 spin_unlock(&ring->lock); 2167 2168 return ret; 2169 2170 out_unmap_frags: 2171 /* Back up for failed control block mapping */ 2172 bcmgenet_put_txcb(priv, ring); 2173 2174 /* Unmap successfully mapped control blocks */ 2175 while (i-- > 0) { 2176 tx_cb_ptr = bcmgenet_put_txcb(priv, ring); 2177 bcmgenet_free_tx_cb(kdev, tx_cb_ptr); 2178 } 2179 2180 dev_kfree_skb(skb); 2181 goto out; 2182 } 2183 2184 static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, 2185 struct enet_cb *cb) 2186 { 2187 struct device *kdev = &priv->pdev->dev; 2188 struct sk_buff *skb; 2189 struct sk_buff *rx_skb; 2190 dma_addr_t mapping; 2191 2192 /* Allocate a new Rx skb */ 2193 skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT, 2194 GFP_ATOMIC | __GFP_NOWARN); 2195 if (!skb) { 2196 priv->mib.alloc_rx_buff_failed++; 2197 netif_err(priv, rx_err, priv->dev, 2198 "%s: Rx skb allocation failed\n", __func__); 2199 return NULL; 2200 } 2201 2202 /* DMA-map the new Rx skb */ 2203 mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, 2204 DMA_FROM_DEVICE); 2205 if (dma_mapping_error(kdev, mapping)) { 2206 priv->mib.rx_dma_failed++; 2207 dev_kfree_skb_any(skb); 2208 netif_err(priv, rx_err, priv->dev, 2209 "%s: Rx skb DMA mapping failed\n", __func__); 2210 return NULL; 2211 } 2212 2213 /* Grab the current Rx skb from the ring and DMA-unmap it */ 2214 rx_skb = bcmgenet_free_rx_cb(kdev, cb); 2215 2216 /* Put the new Rx skb on the ring */ 2217 cb->skb = skb; 2218 dma_unmap_addr_set(cb, dma_addr, mapping); 2219 dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); 2220 dmadesc_set_addr(priv, cb->bd_addr, mapping); 2221 2222 /* Return the current Rx skb to caller */ 2223 return rx_skb; 2224 } 2225 2226 /* bcmgenet_desc_rx - descriptor based rx process. 2227 * this could be called from bottom half, or from NAPI polling method. 2228 */ 2229 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, 2230 unsigned int budget) 2231 { 2232 struct bcmgenet_priv *priv = ring->priv; 2233 struct net_device *dev = priv->dev; 2234 struct enet_cb *cb; 2235 struct sk_buff *skb; 2236 u32 dma_length_status; 2237 unsigned long dma_flag; 2238 int len; 2239 unsigned int rxpktprocessed = 0, rxpkttoprocess; 2240 unsigned int bytes_processed = 0; 2241 unsigned int p_index, mask; 2242 unsigned int discards; 2243 2244 /* Clear status before servicing to reduce spurious interrupts */ 2245 if (ring->index == DESC_INDEX) { 2246 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE, 2247 INTRL2_CPU_CLEAR); 2248 } else { 2249 mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index); 2250 bcmgenet_intrl2_1_writel(priv, 2251 mask, 2252 INTRL2_CPU_CLEAR); 2253 } 2254 2255 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); 2256 2257 discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & 2258 DMA_P_INDEX_DISCARD_CNT_MASK; 2259 if (discards > ring->old_discards) { 2260 discards = discards - ring->old_discards; 2261 ring->errors += discards; 2262 ring->old_discards += discards; 2263 2264 /* Clear HW register when we reach 75% of maximum 0xFFFF */ 2265 if (ring->old_discards >= 0xC000) { 2266 ring->old_discards = 0; 2267 bcmgenet_rdma_ring_writel(priv, ring->index, 0, 2268 RDMA_PROD_INDEX); 2269 } 2270 } 2271 2272 p_index &= DMA_P_INDEX_MASK; 2273 rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK; 2274 2275 netif_dbg(priv, rx_status, dev, 2276 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); 2277 2278 while ((rxpktprocessed < rxpkttoprocess) && 2279 (rxpktprocessed < budget)) { 2280 struct status_64 *status; 2281 __be16 rx_csum; 2282 2283 cb = &priv->rx_cbs[ring->read_ptr]; 2284 skb = bcmgenet_rx_refill(priv, cb); 2285 2286 if (unlikely(!skb)) { 2287 ring->dropped++; 2288 goto next; 2289 } 2290 2291 status = (struct status_64 *)skb->data; 2292 dma_length_status = status->length_status; 2293 if (dev->features & NETIF_F_RXCSUM) { 2294 rx_csum = (__force __be16)(status->rx_csum & 0xffff); 2295 if (rx_csum) { 2296 skb->csum = (__force __wsum)ntohs(rx_csum); 2297 skb->ip_summed = CHECKSUM_COMPLETE; 2298 } 2299 } 2300 2301 /* DMA flags and length are still valid no matter how 2302 * we got the Receive Status Vector (64B RSB or register) 2303 */ 2304 dma_flag = dma_length_status & 0xffff; 2305 len = dma_length_status >> DMA_BUFLENGTH_SHIFT; 2306 2307 netif_dbg(priv, rx_status, dev, 2308 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", 2309 __func__, p_index, ring->c_index, 2310 ring->read_ptr, dma_length_status); 2311 2312 if (unlikely(len > RX_BUF_LENGTH)) { 2313 netif_err(priv, rx_status, dev, "oversized packet\n"); 2314 dev->stats.rx_length_errors++; 2315 dev->stats.rx_errors++; 2316 dev_kfree_skb_any(skb); 2317 goto next; 2318 } 2319 2320 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { 2321 netif_err(priv, rx_status, dev, 2322 "dropping fragmented packet!\n"); 2323 ring->errors++; 2324 dev_kfree_skb_any(skb); 2325 goto next; 2326 } 2327 2328 /* report errors */ 2329 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | 2330 DMA_RX_OV | 2331 DMA_RX_NO | 2332 DMA_RX_LG | 2333 DMA_RX_RXER))) { 2334 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", 2335 (unsigned int)dma_flag); 2336 if (dma_flag & DMA_RX_CRC_ERROR) 2337 dev->stats.rx_crc_errors++; 2338 if (dma_flag & DMA_RX_OV) 2339 dev->stats.rx_over_errors++; 2340 if (dma_flag & DMA_RX_NO) 2341 dev->stats.rx_frame_errors++; 2342 if (dma_flag & DMA_RX_LG) 2343 dev->stats.rx_length_errors++; 2344 dev->stats.rx_errors++; 2345 dev_kfree_skb_any(skb); 2346 goto next; 2347 } /* error packet */ 2348 2349 skb_put(skb, len); 2350 2351 /* remove RSB and hardware 2bytes added for IP alignment */ 2352 skb_pull(skb, 66); 2353 len -= 66; 2354 2355 if (priv->crc_fwd_en) { 2356 skb_trim(skb, len - ETH_FCS_LEN); 2357 len -= ETH_FCS_LEN; 2358 } 2359 2360 bytes_processed += len; 2361 2362 /*Finish setting up the received SKB and send it to the kernel*/ 2363 skb->protocol = eth_type_trans(skb, priv->dev); 2364 ring->packets++; 2365 ring->bytes += len; 2366 if (dma_flag & DMA_RX_MULT) 2367 dev->stats.multicast++; 2368 2369 /* Notify kernel */ 2370 napi_gro_receive(&ring->napi, skb); 2371 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); 2372 2373 next: 2374 rxpktprocessed++; 2375 if (likely(ring->read_ptr < ring->end_ptr)) 2376 ring->read_ptr++; 2377 else 2378 ring->read_ptr = ring->cb_ptr; 2379 2380 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; 2381 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); 2382 } 2383 2384 ring->dim.bytes = bytes_processed; 2385 ring->dim.packets = rxpktprocessed; 2386 2387 return rxpktprocessed; 2388 } 2389 2390 /* Rx NAPI polling method */ 2391 static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) 2392 { 2393 struct bcmgenet_rx_ring *ring = container_of(napi, 2394 struct bcmgenet_rx_ring, napi); 2395 struct dim_sample dim_sample = {}; 2396 unsigned int work_done; 2397 2398 work_done = bcmgenet_desc_rx(ring, budget); 2399 2400 if (work_done < budget) { 2401 napi_complete_done(napi, work_done); 2402 ring->int_enable(ring); 2403 } 2404 2405 if (ring->dim.use_dim) { 2406 dim_update_sample(ring->dim.event_ctr, ring->dim.packets, 2407 ring->dim.bytes, &dim_sample); 2408 net_dim(&ring->dim.dim, dim_sample); 2409 } 2410 2411 return work_done; 2412 } 2413 2414 static void bcmgenet_dim_work(struct work_struct *work) 2415 { 2416 struct dim *dim = container_of(work, struct dim, work); 2417 struct bcmgenet_net_dim *ndim = 2418 container_of(dim, struct bcmgenet_net_dim, dim); 2419 struct bcmgenet_rx_ring *ring = 2420 container_of(ndim, struct bcmgenet_rx_ring, dim); 2421 struct dim_cq_moder cur_profile = 2422 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 2423 2424 bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts); 2425 dim->state = DIM_START_MEASURE; 2426 } 2427 2428 /* Assign skb to RX DMA descriptor. */ 2429 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, 2430 struct bcmgenet_rx_ring *ring) 2431 { 2432 struct enet_cb *cb; 2433 struct sk_buff *skb; 2434 int i; 2435 2436 netif_dbg(priv, hw, priv->dev, "%s\n", __func__); 2437 2438 /* loop here for each buffer needing assign */ 2439 for (i = 0; i < ring->size; i++) { 2440 cb = ring->cbs + i; 2441 skb = bcmgenet_rx_refill(priv, cb); 2442 if (skb) 2443 dev_consume_skb_any(skb); 2444 if (!cb->skb) 2445 return -ENOMEM; 2446 } 2447 2448 return 0; 2449 } 2450 2451 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) 2452 { 2453 struct sk_buff *skb; 2454 struct enet_cb *cb; 2455 int i; 2456 2457 for (i = 0; i < priv->num_rx_bds; i++) { 2458 cb = &priv->rx_cbs[i]; 2459 2460 skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); 2461 if (skb) 2462 dev_consume_skb_any(skb); 2463 } 2464 } 2465 2466 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) 2467 { 2468 u32 reg; 2469 2470 reg = bcmgenet_umac_readl(priv, UMAC_CMD); 2471 if (reg & CMD_SW_RESET) 2472 return; 2473 if (enable) 2474 reg |= mask; 2475 else 2476 reg &= ~mask; 2477 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 2478 2479 /* UniMAC stops on a packet boundary, wait for a full-size packet 2480 * to be processed 2481 */ 2482 if (enable == 0) 2483 usleep_range(1000, 2000); 2484 } 2485 2486 static void reset_umac(struct bcmgenet_priv *priv) 2487 { 2488 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ 2489 bcmgenet_rbuf_ctrl_set(priv, 0); 2490 udelay(10); 2491 2492 /* issue soft reset and disable MAC while updating its registers */ 2493 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); 2494 udelay(2); 2495 } 2496 2497 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) 2498 { 2499 /* Mask all interrupts.*/ 2500 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); 2501 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); 2502 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); 2503 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); 2504 } 2505 2506 static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) 2507 { 2508 u32 int0_enable = 0; 2509 2510 /* Monitor cable plug/unplugged event for internal PHY, external PHY 2511 * and MoCA PHY 2512 */ 2513 if (priv->internal_phy) { 2514 int0_enable |= UMAC_IRQ_LINK_EVENT; 2515 if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) 2516 int0_enable |= UMAC_IRQ_PHY_DET_R; 2517 } else if (priv->ext_phy) { 2518 int0_enable |= UMAC_IRQ_LINK_EVENT; 2519 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { 2520 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) 2521 int0_enable |= UMAC_IRQ_LINK_EVENT; 2522 } 2523 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); 2524 } 2525 2526 static void init_umac(struct bcmgenet_priv *priv) 2527 { 2528 struct device *kdev = &priv->pdev->dev; 2529 u32 reg; 2530 u32 int0_enable = 0; 2531 2532 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); 2533 2534 reset_umac(priv); 2535 2536 /* clear tx/rx counter */ 2537 bcmgenet_umac_writel(priv, 2538 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, 2539 UMAC_MIB_CTRL); 2540 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); 2541 2542 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 2543 2544 /* init tx registers, enable TSB */ 2545 reg = bcmgenet_tbuf_ctrl_get(priv); 2546 reg |= TBUF_64B_EN; 2547 bcmgenet_tbuf_ctrl_set(priv, reg); 2548 2549 /* init rx registers, enable ip header optimization and RSB */ 2550 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); 2551 reg |= RBUF_ALIGN_2B | RBUF_64B_EN; 2552 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); 2553 2554 /* enable rx checksumming */ 2555 reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); 2556 reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS; 2557 /* If UniMAC forwards CRC, we need to skip over it to get 2558 * a valid CHK bit to be set in the per-packet status word 2559 */ 2560 if (priv->crc_fwd_en) 2561 reg |= RBUF_SKIP_FCS; 2562 else 2563 reg &= ~RBUF_SKIP_FCS; 2564 bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL); 2565 2566 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) 2567 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); 2568 2569 bcmgenet_intr_disable(priv); 2570 2571 /* Configure backpressure vectors for MoCA */ 2572 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { 2573 reg = bcmgenet_bp_mc_get(priv); 2574 reg |= BIT(priv->hw_params->bp_in_en_shift); 2575 2576 /* bp_mask: back pressure mask */ 2577 if (netif_is_multiqueue(priv->dev)) 2578 reg |= priv->hw_params->bp_in_mask; 2579 else 2580 reg &= ~priv->hw_params->bp_in_mask; 2581 bcmgenet_bp_mc_set(priv, reg); 2582 } 2583 2584 /* Enable MDIO interrupts on GENET v3+ */ 2585 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) 2586 int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); 2587 2588 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); 2589 2590 dev_dbg(kdev, "done init umac\n"); 2591 } 2592 2593 static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring, 2594 void (*cb)(struct work_struct *work)) 2595 { 2596 struct bcmgenet_net_dim *dim = &ring->dim; 2597 2598 INIT_WORK(&dim->dim.work, cb); 2599 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 2600 dim->event_ctr = 0; 2601 dim->packets = 0; 2602 dim->bytes = 0; 2603 } 2604 2605 static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring) 2606 { 2607 struct bcmgenet_net_dim *dim = &ring->dim; 2608 struct dim_cq_moder moder; 2609 u32 usecs, pkts; 2610 2611 usecs = ring->rx_coalesce_usecs; 2612 pkts = ring->rx_max_coalesced_frames; 2613 2614 /* If DIM was enabled, re-apply default parameters */ 2615 if (dim->use_dim) { 2616 moder = net_dim_get_def_rx_moderation(dim->dim.mode); 2617 usecs = moder.usec; 2618 pkts = moder.pkts; 2619 } 2620 2621 bcmgenet_set_rx_coalesce(ring, usecs, pkts); 2622 } 2623 2624 /* Initialize a Tx ring along with corresponding hardware registers */ 2625 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, 2626 unsigned int index, unsigned int size, 2627 unsigned int start_ptr, unsigned int end_ptr) 2628 { 2629 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; 2630 u32 words_per_bd = WORDS_PER_BD(priv); 2631 u32 flow_period_val = 0; 2632 2633 spin_lock_init(&ring->lock); 2634 ring->priv = priv; 2635 ring->index = index; 2636 if (index == DESC_INDEX) { 2637 ring->queue = 0; 2638 ring->int_enable = bcmgenet_tx_ring16_int_enable; 2639 ring->int_disable = bcmgenet_tx_ring16_int_disable; 2640 } else { 2641 ring->queue = index + 1; 2642 ring->int_enable = bcmgenet_tx_ring_int_enable; 2643 ring->int_disable = bcmgenet_tx_ring_int_disable; 2644 } 2645 ring->cbs = priv->tx_cbs + start_ptr; 2646 ring->size = size; 2647 ring->clean_ptr = start_ptr; 2648 ring->c_index = 0; 2649 ring->free_bds = size; 2650 ring->write_ptr = start_ptr; 2651 ring->cb_ptr = start_ptr; 2652 ring->end_ptr = end_ptr - 1; 2653 ring->prod_index = 0; 2654 2655 /* Set flow period for ring != 16 */ 2656 if (index != DESC_INDEX) 2657 flow_period_val = ENET_MAX_MTU_SIZE << 16; 2658 2659 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); 2660 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); 2661 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); 2662 /* Disable rate control for now */ 2663 bcmgenet_tdma_ring_writel(priv, index, flow_period_val, 2664 TDMA_FLOW_PERIOD); 2665 bcmgenet_tdma_ring_writel(priv, index, 2666 ((size << DMA_RING_SIZE_SHIFT) | 2667 RX_BUF_LENGTH), DMA_RING_BUF_SIZE); 2668 2669 /* Set start and end address, read and write pointers */ 2670 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, 2671 DMA_START_ADDR); 2672 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, 2673 TDMA_READ_PTR); 2674 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, 2675 TDMA_WRITE_PTR); 2676 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, 2677 DMA_END_ADDR); 2678 2679 /* Initialize Tx NAPI */ 2680 netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll); 2681 } 2682 2683 /* Initialize a RDMA ring */ 2684 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, 2685 unsigned int index, unsigned int size, 2686 unsigned int start_ptr, unsigned int end_ptr) 2687 { 2688 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; 2689 u32 words_per_bd = WORDS_PER_BD(priv); 2690 int ret; 2691 2692 ring->priv = priv; 2693 ring->index = index; 2694 if (index == DESC_INDEX) { 2695 ring->int_enable = bcmgenet_rx_ring16_int_enable; 2696 ring->int_disable = bcmgenet_rx_ring16_int_disable; 2697 } else { 2698 ring->int_enable = bcmgenet_rx_ring_int_enable; 2699 ring->int_disable = bcmgenet_rx_ring_int_disable; 2700 } 2701 ring->cbs = priv->rx_cbs + start_ptr; 2702 ring->size = size; 2703 ring->c_index = 0; 2704 ring->read_ptr = start_ptr; 2705 ring->cb_ptr = start_ptr; 2706 ring->end_ptr = end_ptr - 1; 2707 2708 ret = bcmgenet_alloc_rx_buffers(priv, ring); 2709 if (ret) 2710 return ret; 2711 2712 bcmgenet_init_dim(ring, bcmgenet_dim_work); 2713 bcmgenet_init_rx_coalesce(ring); 2714 2715 /* Initialize Rx NAPI */ 2716 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll); 2717 2718 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); 2719 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); 2720 bcmgenet_rdma_ring_writel(priv, index, 2721 ((size << DMA_RING_SIZE_SHIFT) | 2722 RX_BUF_LENGTH), DMA_RING_BUF_SIZE); 2723 bcmgenet_rdma_ring_writel(priv, index, 2724 (DMA_FC_THRESH_LO << 2725 DMA_XOFF_THRESHOLD_SHIFT) | 2726 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); 2727 2728 /* Set start and end address, read and write pointers */ 2729 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, 2730 DMA_START_ADDR); 2731 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, 2732 RDMA_READ_PTR); 2733 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, 2734 RDMA_WRITE_PTR); 2735 bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, 2736 DMA_END_ADDR); 2737 2738 return ret; 2739 } 2740 2741 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) 2742 { 2743 unsigned int i; 2744 struct bcmgenet_tx_ring *ring; 2745 2746 for (i = 0; i < priv->hw_params->tx_queues; ++i) { 2747 ring = &priv->tx_rings[i]; 2748 napi_enable(&ring->napi); 2749 ring->int_enable(ring); 2750 } 2751 2752 ring = &priv->tx_rings[DESC_INDEX]; 2753 napi_enable(&ring->napi); 2754 ring->int_enable(ring); 2755 } 2756 2757 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) 2758 { 2759 unsigned int i; 2760 struct bcmgenet_tx_ring *ring; 2761 2762 for (i = 0; i < priv->hw_params->tx_queues; ++i) { 2763 ring = &priv->tx_rings[i]; 2764 napi_disable(&ring->napi); 2765 } 2766 2767 ring = &priv->tx_rings[DESC_INDEX]; 2768 napi_disable(&ring->napi); 2769 } 2770 2771 static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) 2772 { 2773 unsigned int i; 2774 struct bcmgenet_tx_ring *ring; 2775 2776 for (i = 0; i < priv->hw_params->tx_queues; ++i) { 2777 ring = &priv->tx_rings[i]; 2778 netif_napi_del(&ring->napi); 2779 } 2780 2781 ring = &priv->tx_rings[DESC_INDEX]; 2782 netif_napi_del(&ring->napi); 2783 } 2784 2785 /* Initialize Tx queues 2786 * 2787 * Queues 0-3 are priority-based, each one has 32 descriptors, 2788 * with queue 0 being the highest priority queue. 2789 * 2790 * Queue 16 is the default Tx queue with 2791 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. 2792 * 2793 * The transmit control block pool is then partitioned as follows: 2794 * - Tx queue 0 uses tx_cbs[0..31] 2795 * - Tx queue 1 uses tx_cbs[32..63] 2796 * - Tx queue 2 uses tx_cbs[64..95] 2797 * - Tx queue 3 uses tx_cbs[96..127] 2798 * - Tx queue 16 uses tx_cbs[128..255] 2799 */ 2800 static void bcmgenet_init_tx_queues(struct net_device *dev) 2801 { 2802 struct bcmgenet_priv *priv = netdev_priv(dev); 2803 u32 i, dma_enable; 2804 u32 dma_ctrl, ring_cfg; 2805 u32 dma_priority[3] = {0, 0, 0}; 2806 2807 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); 2808 dma_enable = dma_ctrl & DMA_EN; 2809 dma_ctrl &= ~DMA_EN; 2810 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); 2811 2812 dma_ctrl = 0; 2813 ring_cfg = 0; 2814 2815 /* Enable strict priority arbiter mode */ 2816 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); 2817 2818 /* Initialize Tx priority queues */ 2819 for (i = 0; i < priv->hw_params->tx_queues; i++) { 2820 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, 2821 i * priv->hw_params->tx_bds_per_q, 2822 (i + 1) * priv->hw_params->tx_bds_per_q); 2823 ring_cfg |= (1 << i); 2824 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); 2825 dma_priority[DMA_PRIO_REG_INDEX(i)] |= 2826 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); 2827 } 2828 2829 /* Initialize Tx default queue 16 */ 2830 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, 2831 priv->hw_params->tx_queues * 2832 priv->hw_params->tx_bds_per_q, 2833 TOTAL_DESC); 2834 ring_cfg |= (1 << DESC_INDEX); 2835 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); 2836 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= 2837 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << 2838 DMA_PRIO_REG_SHIFT(DESC_INDEX)); 2839 2840 /* Set Tx queue priorities */ 2841 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); 2842 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); 2843 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); 2844 2845 /* Enable Tx queues */ 2846 bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); 2847 2848 /* Enable Tx DMA */ 2849 if (dma_enable) 2850 dma_ctrl |= DMA_EN; 2851 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); 2852 } 2853 2854 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) 2855 { 2856 unsigned int i; 2857 struct bcmgenet_rx_ring *ring; 2858 2859 for (i = 0; i < priv->hw_params->rx_queues; ++i) { 2860 ring = &priv->rx_rings[i]; 2861 napi_enable(&ring->napi); 2862 ring->int_enable(ring); 2863 } 2864 2865 ring = &priv->rx_rings[DESC_INDEX]; 2866 napi_enable(&ring->napi); 2867 ring->int_enable(ring); 2868 } 2869 2870 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) 2871 { 2872 unsigned int i; 2873 struct bcmgenet_rx_ring *ring; 2874 2875 for (i = 0; i < priv->hw_params->rx_queues; ++i) { 2876 ring = &priv->rx_rings[i]; 2877 napi_disable(&ring->napi); 2878 cancel_work_sync(&ring->dim.dim.work); 2879 } 2880 2881 ring = &priv->rx_rings[DESC_INDEX]; 2882 napi_disable(&ring->napi); 2883 cancel_work_sync(&ring->dim.dim.work); 2884 } 2885 2886 static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) 2887 { 2888 unsigned int i; 2889 struct bcmgenet_rx_ring *ring; 2890 2891 for (i = 0; i < priv->hw_params->rx_queues; ++i) { 2892 ring = &priv->rx_rings[i]; 2893 netif_napi_del(&ring->napi); 2894 } 2895 2896 ring = &priv->rx_rings[DESC_INDEX]; 2897 netif_napi_del(&ring->napi); 2898 } 2899 2900 /* Initialize Rx queues 2901 * 2902 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be 2903 * used to direct traffic to these queues. 2904 * 2905 * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. 2906 */ 2907 static int bcmgenet_init_rx_queues(struct net_device *dev) 2908 { 2909 struct bcmgenet_priv *priv = netdev_priv(dev); 2910 u32 i; 2911 u32 dma_enable; 2912 u32 dma_ctrl; 2913 u32 ring_cfg; 2914 int ret; 2915 2916 dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); 2917 dma_enable = dma_ctrl & DMA_EN; 2918 dma_ctrl &= ~DMA_EN; 2919 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); 2920 2921 dma_ctrl = 0; 2922 ring_cfg = 0; 2923 2924 /* Initialize Rx priority queues */ 2925 for (i = 0; i < priv->hw_params->rx_queues; i++) { 2926 ret = bcmgenet_init_rx_ring(priv, i, 2927 priv->hw_params->rx_bds_per_q, 2928 i * priv->hw_params->rx_bds_per_q, 2929 (i + 1) * 2930 priv->hw_params->rx_bds_per_q); 2931 if (ret) 2932 return ret; 2933 2934 ring_cfg |= (1 << i); 2935 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); 2936 } 2937 2938 /* Initialize Rx default queue 16 */ 2939 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, 2940 priv->hw_params->rx_queues * 2941 priv->hw_params->rx_bds_per_q, 2942 TOTAL_DESC); 2943 if (ret) 2944 return ret; 2945 2946 ring_cfg |= (1 << DESC_INDEX); 2947 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); 2948 2949 /* Enable rings */ 2950 bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); 2951 2952 /* Configure ring as descriptor ring and re-enable DMA if enabled */ 2953 if (dma_enable) 2954 dma_ctrl |= DMA_EN; 2955 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); 2956 2957 return 0; 2958 } 2959 2960 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) 2961 { 2962 int ret = 0; 2963 int timeout = 0; 2964 u32 reg; 2965 u32 dma_ctrl; 2966 int i; 2967 2968 /* Disable TDMA to stop add more frames in TX DMA */ 2969 reg = bcmgenet_tdma_readl(priv, DMA_CTRL); 2970 reg &= ~DMA_EN; 2971 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); 2972 2973 /* Check TDMA status register to confirm TDMA is disabled */ 2974 while (timeout++ < DMA_TIMEOUT_VAL) { 2975 reg = bcmgenet_tdma_readl(priv, DMA_STATUS); 2976 if (reg & DMA_DISABLED) 2977 break; 2978 2979 udelay(1); 2980 } 2981 2982 if (timeout == DMA_TIMEOUT_VAL) { 2983 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); 2984 ret = -ETIMEDOUT; 2985 } 2986 2987 /* Wait 10ms for packet drain in both tx and rx dma */ 2988 usleep_range(10000, 20000); 2989 2990 /* Disable RDMA */ 2991 reg = bcmgenet_rdma_readl(priv, DMA_CTRL); 2992 reg &= ~DMA_EN; 2993 bcmgenet_rdma_writel(priv, reg, DMA_CTRL); 2994 2995 timeout = 0; 2996 /* Check RDMA status register to confirm RDMA is disabled */ 2997 while (timeout++ < DMA_TIMEOUT_VAL) { 2998 reg = bcmgenet_rdma_readl(priv, DMA_STATUS); 2999 if (reg & DMA_DISABLED) 3000 break; 3001 3002 udelay(1); 3003 } 3004 3005 if (timeout == DMA_TIMEOUT_VAL) { 3006 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); 3007 ret = -ETIMEDOUT; 3008 } 3009 3010 dma_ctrl = 0; 3011 for (i = 0; i < priv->hw_params->rx_queues; i++) 3012 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); 3013 reg = bcmgenet_rdma_readl(priv, DMA_CTRL); 3014 reg &= ~dma_ctrl; 3015 bcmgenet_rdma_writel(priv, reg, DMA_CTRL); 3016 3017 dma_ctrl = 0; 3018 for (i = 0; i < priv->hw_params->tx_queues; i++) 3019 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); 3020 reg = bcmgenet_tdma_readl(priv, DMA_CTRL); 3021 reg &= ~dma_ctrl; 3022 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); 3023 3024 return ret; 3025 } 3026 3027 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) 3028 { 3029 struct netdev_queue *txq; 3030 int i; 3031 3032 bcmgenet_fini_rx_napi(priv); 3033 bcmgenet_fini_tx_napi(priv); 3034 3035 for (i = 0; i < priv->num_tx_bds; i++) 3036 dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev, 3037 priv->tx_cbs + i)); 3038 3039 for (i = 0; i < priv->hw_params->tx_queues; i++) { 3040 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue); 3041 netdev_tx_reset_queue(txq); 3042 } 3043 3044 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue); 3045 netdev_tx_reset_queue(txq); 3046 3047 bcmgenet_free_rx_buffers(priv); 3048 kfree(priv->rx_cbs); 3049 kfree(priv->tx_cbs); 3050 } 3051 3052 /* init_edma: Initialize DMA control register */ 3053 static int bcmgenet_init_dma(struct bcmgenet_priv *priv) 3054 { 3055 int ret; 3056 unsigned int i; 3057 struct enet_cb *cb; 3058 3059 netif_dbg(priv, hw, priv->dev, "%s\n", __func__); 3060 3061 /* Initialize common Rx ring structures */ 3062 priv->rx_bds = priv->base + priv->hw_params->rdma_offset; 3063 priv->num_rx_bds = TOTAL_DESC; 3064 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), 3065 GFP_KERNEL); 3066 if (!priv->rx_cbs) 3067 return -ENOMEM; 3068 3069 for (i = 0; i < priv->num_rx_bds; i++) { 3070 cb = priv->rx_cbs + i; 3071 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; 3072 } 3073 3074 /* Initialize common TX ring structures */ 3075 priv->tx_bds = priv->base + priv->hw_params->tdma_offset; 3076 priv->num_tx_bds = TOTAL_DESC; 3077 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), 3078 GFP_KERNEL); 3079 if (!priv->tx_cbs) { 3080 kfree(priv->rx_cbs); 3081 return -ENOMEM; 3082 } 3083 3084 for (i = 0; i < priv->num_tx_bds; i++) { 3085 cb = priv->tx_cbs + i; 3086 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; 3087 } 3088 3089 /* Init rDma */ 3090 bcmgenet_rdma_writel(priv, priv->dma_max_burst_length, 3091 DMA_SCB_BURST_SIZE); 3092 3093 /* Initialize Rx queues */ 3094 ret = bcmgenet_init_rx_queues(priv->dev); 3095 if (ret) { 3096 netdev_err(priv->dev, "failed to initialize Rx queues\n"); 3097 bcmgenet_free_rx_buffers(priv); 3098 kfree(priv->rx_cbs); 3099 kfree(priv->tx_cbs); 3100 return ret; 3101 } 3102 3103 /* Init tDma */ 3104 bcmgenet_tdma_writel(priv, priv->dma_max_burst_length, 3105 DMA_SCB_BURST_SIZE); 3106 3107 /* Initialize Tx queues */ 3108 bcmgenet_init_tx_queues(priv->dev); 3109 3110 return 0; 3111 } 3112 3113 /* Interrupt bottom half */ 3114 static void bcmgenet_irq_task(struct work_struct *work) 3115 { 3116 unsigned int status; 3117 struct bcmgenet_priv *priv = container_of( 3118 work, struct bcmgenet_priv, bcmgenet_irq_work); 3119 3120 netif_dbg(priv, intr, priv->dev, "%s\n", __func__); 3121 3122 spin_lock_irq(&priv->lock); 3123 status = priv->irq0_stat; 3124 priv->irq0_stat = 0; 3125 spin_unlock_irq(&priv->lock); 3126 3127 if (status & UMAC_IRQ_PHY_DET_R && 3128 priv->dev->phydev->autoneg != AUTONEG_ENABLE) { 3129 phy_init_hw(priv->dev->phydev); 3130 genphy_config_aneg(priv->dev->phydev); 3131 } 3132 3133 /* Link UP/DOWN event */ 3134 if (status & UMAC_IRQ_LINK_EVENT) 3135 phy_mac_interrupt(priv->dev->phydev); 3136 3137 } 3138 3139 /* bcmgenet_isr1: handle Rx and Tx priority queues */ 3140 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) 3141 { 3142 struct bcmgenet_priv *priv = dev_id; 3143 struct bcmgenet_rx_ring *rx_ring; 3144 struct bcmgenet_tx_ring *tx_ring; 3145 unsigned int index, status; 3146 3147 /* Read irq status */ 3148 status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & 3149 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); 3150 3151 /* clear interrupts */ 3152 bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR); 3153 3154 netif_dbg(priv, intr, priv->dev, 3155 "%s: IRQ=0x%x\n", __func__, status); 3156 3157 /* Check Rx priority queue interrupts */ 3158 for (index = 0; index < priv->hw_params->rx_queues; index++) { 3159 if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) 3160 continue; 3161 3162 rx_ring = &priv->rx_rings[index]; 3163 rx_ring->dim.event_ctr++; 3164 3165 if (likely(napi_schedule_prep(&rx_ring->napi))) { 3166 rx_ring->int_disable(rx_ring); 3167 __napi_schedule_irqoff(&rx_ring->napi); 3168 } 3169 } 3170 3171 /* Check Tx priority queue interrupts */ 3172 for (index = 0; index < priv->hw_params->tx_queues; index++) { 3173 if (!(status & BIT(index))) 3174 continue; 3175 3176 tx_ring = &priv->tx_rings[index]; 3177 3178 if (likely(napi_schedule_prep(&tx_ring->napi))) { 3179 tx_ring->int_disable(tx_ring); 3180 __napi_schedule_irqoff(&tx_ring->napi); 3181 } 3182 } 3183 3184 return IRQ_HANDLED; 3185 } 3186 3187 /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */ 3188 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) 3189 { 3190 struct bcmgenet_priv *priv = dev_id; 3191 struct bcmgenet_rx_ring *rx_ring; 3192 struct bcmgenet_tx_ring *tx_ring; 3193 unsigned int status; 3194 unsigned long flags; 3195 3196 /* Read irq status */ 3197 status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & 3198 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 3199 3200 /* clear interrupts */ 3201 bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR); 3202 3203 netif_dbg(priv, intr, priv->dev, 3204 "IRQ=0x%x\n", status); 3205 3206 if (status & UMAC_IRQ_RXDMA_DONE) { 3207 rx_ring = &priv->rx_rings[DESC_INDEX]; 3208 rx_ring->dim.event_ctr++; 3209 3210 if (likely(napi_schedule_prep(&rx_ring->napi))) { 3211 rx_ring->int_disable(rx_ring); 3212 __napi_schedule_irqoff(&rx_ring->napi); 3213 } 3214 } 3215 3216 if (status & UMAC_IRQ_TXDMA_DONE) { 3217 tx_ring = &priv->tx_rings[DESC_INDEX]; 3218 3219 if (likely(napi_schedule_prep(&tx_ring->napi))) { 3220 tx_ring->int_disable(tx_ring); 3221 __napi_schedule_irqoff(&tx_ring->napi); 3222 } 3223 } 3224 3225 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && 3226 status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { 3227 wake_up(&priv->wq); 3228 } 3229 3230 /* all other interested interrupts handled in bottom half */ 3231 status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R); 3232 if (status) { 3233 /* Save irq status for bottom-half processing. */ 3234 spin_lock_irqsave(&priv->lock, flags); 3235 priv->irq0_stat |= status; 3236 spin_unlock_irqrestore(&priv->lock, flags); 3237 3238 schedule_work(&priv->bcmgenet_irq_work); 3239 } 3240 3241 return IRQ_HANDLED; 3242 } 3243 3244 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) 3245 { 3246 /* Acknowledge the interrupt */ 3247 return IRQ_HANDLED; 3248 } 3249 3250 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) 3251 { 3252 u32 reg; 3253 3254 reg = bcmgenet_rbuf_ctrl_get(priv); 3255 reg |= BIT(1); 3256 bcmgenet_rbuf_ctrl_set(priv, reg); 3257 udelay(10); 3258 3259 reg &= ~BIT(1); 3260 bcmgenet_rbuf_ctrl_set(priv, reg); 3261 udelay(10); 3262 } 3263 3264 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, 3265 const unsigned char *addr) 3266 { 3267 bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0); 3268 bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1); 3269 } 3270 3271 static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv, 3272 unsigned char *addr) 3273 { 3274 u32 addr_tmp; 3275 3276 addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0); 3277 put_unaligned_be32(addr_tmp, &addr[0]); 3278 addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1); 3279 put_unaligned_be16(addr_tmp, &addr[4]); 3280 } 3281 3282 /* Returns a reusable dma control register value */ 3283 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) 3284 { 3285 unsigned int i; 3286 u32 reg; 3287 u32 dma_ctrl; 3288 3289 /* disable DMA */ 3290 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; 3291 for (i = 0; i < priv->hw_params->tx_queues; i++) 3292 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); 3293 reg = bcmgenet_tdma_readl(priv, DMA_CTRL); 3294 reg &= ~dma_ctrl; 3295 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); 3296 3297 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; 3298 for (i = 0; i < priv->hw_params->rx_queues; i++) 3299 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); 3300 reg = bcmgenet_rdma_readl(priv, DMA_CTRL); 3301 reg &= ~dma_ctrl; 3302 bcmgenet_rdma_writel(priv, reg, DMA_CTRL); 3303 3304 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); 3305 udelay(10); 3306 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); 3307 3308 return dma_ctrl; 3309 } 3310 3311 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) 3312 { 3313 u32 reg; 3314 3315 reg = bcmgenet_rdma_readl(priv, DMA_CTRL); 3316 reg |= dma_ctrl; 3317 bcmgenet_rdma_writel(priv, reg, DMA_CTRL); 3318 3319 reg = bcmgenet_tdma_readl(priv, DMA_CTRL); 3320 reg |= dma_ctrl; 3321 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); 3322 } 3323 3324 static void bcmgenet_netif_start(struct net_device *dev) 3325 { 3326 struct bcmgenet_priv *priv = netdev_priv(dev); 3327 3328 /* Start the network engine */ 3329 bcmgenet_set_rx_mode(dev); 3330 bcmgenet_enable_rx_napi(priv); 3331 3332 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); 3333 3334 bcmgenet_enable_tx_napi(priv); 3335 3336 /* Monitor link interrupts now */ 3337 bcmgenet_link_intr_enable(priv); 3338 3339 phy_start(dev->phydev); 3340 } 3341 3342 static int bcmgenet_open(struct net_device *dev) 3343 { 3344 struct bcmgenet_priv *priv = netdev_priv(dev); 3345 unsigned long dma_ctrl; 3346 int ret; 3347 3348 netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); 3349 3350 /* Turn on the clock */ 3351 clk_prepare_enable(priv->clk); 3352 3353 /* If this is an internal GPHY, power it back on now, before UniMAC is 3354 * brought out of reset as absolutely no UniMAC activity is allowed 3355 */ 3356 if (priv->internal_phy) 3357 bcmgenet_power_up(priv, GENET_POWER_PASSIVE); 3358 3359 /* take MAC out of reset */ 3360 bcmgenet_umac_reset(priv); 3361 3362 init_umac(priv); 3363 3364 /* Apply features again in case we changed them while interface was 3365 * down 3366 */ 3367 bcmgenet_set_features(dev, dev->features); 3368 3369 bcmgenet_set_hw_addr(priv, dev->dev_addr); 3370 3371 /* Disable RX/TX DMA and flush TX queues */ 3372 dma_ctrl = bcmgenet_dma_disable(priv); 3373 3374 /* Reinitialize TDMA and RDMA and SW housekeeping */ 3375 ret = bcmgenet_init_dma(priv); 3376 if (ret) { 3377 netdev_err(dev, "failed to initialize DMA\n"); 3378 goto err_clk_disable; 3379 } 3380 3381 /* Always enable ring 16 - descriptor ring */ 3382 bcmgenet_enable_dma(priv, dma_ctrl); 3383 3384 /* HFB init */ 3385 bcmgenet_hfb_init(priv); 3386 3387 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, 3388 dev->name, priv); 3389 if (ret < 0) { 3390 netdev_err(dev, "can't request IRQ %d\n", priv->irq0); 3391 goto err_fini_dma; 3392 } 3393 3394 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, 3395 dev->name, priv); 3396 if (ret < 0) { 3397 netdev_err(dev, "can't request IRQ %d\n", priv->irq1); 3398 goto err_irq0; 3399 } 3400 3401 ret = bcmgenet_mii_probe(dev); 3402 if (ret) { 3403 netdev_err(dev, "failed to connect to PHY\n"); 3404 goto err_irq1; 3405 } 3406 3407 bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); 3408 3409 bcmgenet_netif_start(dev); 3410 3411 netif_tx_start_all_queues(dev); 3412 3413 return 0; 3414 3415 err_irq1: 3416 free_irq(priv->irq1, priv); 3417 err_irq0: 3418 free_irq(priv->irq0, priv); 3419 err_fini_dma: 3420 bcmgenet_dma_teardown(priv); 3421 bcmgenet_fini_dma(priv); 3422 err_clk_disable: 3423 if (priv->internal_phy) 3424 bcmgenet_power_down(priv, GENET_POWER_PASSIVE); 3425 clk_disable_unprepare(priv->clk); 3426 return ret; 3427 } 3428 3429 static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy) 3430 { 3431 struct bcmgenet_priv *priv = netdev_priv(dev); 3432 3433 bcmgenet_disable_tx_napi(priv); 3434 netif_tx_disable(dev); 3435 3436 /* Disable MAC receive */ 3437 umac_enable_set(priv, CMD_RX_EN, false); 3438 3439 bcmgenet_dma_teardown(priv); 3440 3441 /* Disable MAC transmit. TX DMA disabled must be done before this */ 3442 umac_enable_set(priv, CMD_TX_EN, false); 3443 3444 if (stop_phy) 3445 phy_stop(dev->phydev); 3446 bcmgenet_disable_rx_napi(priv); 3447 bcmgenet_intr_disable(priv); 3448 3449 /* Wait for pending work items to complete. Since interrupts are 3450 * disabled no new work will be scheduled. 3451 */ 3452 cancel_work_sync(&priv->bcmgenet_irq_work); 3453 3454 /* tx reclaim */ 3455 bcmgenet_tx_reclaim_all(dev); 3456 bcmgenet_fini_dma(priv); 3457 } 3458 3459 static int bcmgenet_close(struct net_device *dev) 3460 { 3461 struct bcmgenet_priv *priv = netdev_priv(dev); 3462 int ret = 0; 3463 3464 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); 3465 3466 bcmgenet_netif_stop(dev, false); 3467 3468 /* Really kill the PHY state machine and disconnect from it */ 3469 phy_disconnect(dev->phydev); 3470 3471 free_irq(priv->irq0, priv); 3472 free_irq(priv->irq1, priv); 3473 3474 if (priv->internal_phy) 3475 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); 3476 3477 clk_disable_unprepare(priv->clk); 3478 3479 return ret; 3480 } 3481 3482 static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) 3483 { 3484 struct bcmgenet_priv *priv = ring->priv; 3485 u32 p_index, c_index, intsts, intmsk; 3486 struct netdev_queue *txq; 3487 unsigned int free_bds; 3488 bool txq_stopped; 3489 3490 if (!netif_msg_tx_err(priv)) 3491 return; 3492 3493 txq = netdev_get_tx_queue(priv->dev, ring->queue); 3494 3495 spin_lock(&ring->lock); 3496 if (ring->index == DESC_INDEX) { 3497 intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 3498 intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE; 3499 } else { 3500 intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); 3501 intmsk = 1 << ring->index; 3502 } 3503 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); 3504 p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); 3505 txq_stopped = netif_tx_queue_stopped(txq); 3506 free_bds = ring->free_bds; 3507 spin_unlock(&ring->lock); 3508 3509 netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" 3510 "TX queue status: %s, interrupts: %s\n" 3511 "(sw)free_bds: %d (sw)size: %d\n" 3512 "(sw)p_index: %d (hw)p_index: %d\n" 3513 "(sw)c_index: %d (hw)c_index: %d\n" 3514 "(sw)clean_p: %d (sw)write_p: %d\n" 3515 "(sw)cb_ptr: %d (sw)end_ptr: %d\n", 3516 ring->index, ring->queue, 3517 txq_stopped ? "stopped" : "active", 3518 intsts & intmsk ? "enabled" : "disabled", 3519 free_bds, ring->size, 3520 ring->prod_index, p_index & DMA_P_INDEX_MASK, 3521 ring->c_index, c_index & DMA_C_INDEX_MASK, 3522 ring->clean_ptr, ring->write_ptr, 3523 ring->cb_ptr, ring->end_ptr); 3524 } 3525 3526 static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue) 3527 { 3528 struct bcmgenet_priv *priv = netdev_priv(dev); 3529 u32 int0_enable = 0; 3530 u32 int1_enable = 0; 3531 unsigned int q; 3532 3533 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); 3534 3535 for (q = 0; q < priv->hw_params->tx_queues; q++) 3536 bcmgenet_dump_tx_queue(&priv->tx_rings[q]); 3537 bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); 3538 3539 bcmgenet_tx_reclaim_all(dev); 3540 3541 for (q = 0; q < priv->hw_params->tx_queues; q++) 3542 int1_enable |= (1 << q); 3543 3544 int0_enable = UMAC_IRQ_TXDMA_DONE; 3545 3546 /* Re-enable TX interrupts if disabled */ 3547 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); 3548 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); 3549 3550 netif_trans_update(dev); 3551 3552 dev->stats.tx_errors++; 3553 3554 netif_tx_wake_all_queues(dev); 3555 } 3556 3557 #define MAX_MDF_FILTER 17 3558 3559 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, 3560 const unsigned char *addr, 3561 int *i) 3562 { 3563 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], 3564 UMAC_MDF_ADDR + (*i * 4)); 3565 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | 3566 addr[4] << 8 | addr[5], 3567 UMAC_MDF_ADDR + ((*i + 1) * 4)); 3568 *i += 2; 3569 } 3570 3571 static void bcmgenet_set_rx_mode(struct net_device *dev) 3572 { 3573 struct bcmgenet_priv *priv = netdev_priv(dev); 3574 struct netdev_hw_addr *ha; 3575 int i, nfilter; 3576 u32 reg; 3577 3578 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); 3579 3580 /* Number of filters needed */ 3581 nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2; 3582 3583 /* 3584 * Turn on promicuous mode for three scenarios 3585 * 1. IFF_PROMISC flag is set 3586 * 2. IFF_ALLMULTI flag is set 3587 * 3. The number of filters needed exceeds the number filters 3588 * supported by the hardware. 3589 */ 3590 reg = bcmgenet_umac_readl(priv, UMAC_CMD); 3591 if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || 3592 (nfilter > MAX_MDF_FILTER)) { 3593 reg |= CMD_PROMISC; 3594 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 3595 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); 3596 return; 3597 } else { 3598 reg &= ~CMD_PROMISC; 3599 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 3600 } 3601 3602 /* update MDF filter */ 3603 i = 0; 3604 /* Broadcast */ 3605 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i); 3606 /* my own address.*/ 3607 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i); 3608 3609 /* Unicast */ 3610 netdev_for_each_uc_addr(ha, dev) 3611 bcmgenet_set_mdf_addr(priv, ha->addr, &i); 3612 3613 /* Multicast */ 3614 netdev_for_each_mc_addr(ha, dev) 3615 bcmgenet_set_mdf_addr(priv, ha->addr, &i); 3616 3617 /* Enable filters */ 3618 reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter); 3619 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); 3620 } 3621 3622 /* Set the hardware MAC address. */ 3623 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) 3624 { 3625 struct sockaddr *addr = p; 3626 3627 /* Setting the MAC address at the hardware level is not possible 3628 * without disabling the UniMAC RX/TX enable bits. 3629 */ 3630 if (netif_running(dev)) 3631 return -EBUSY; 3632 3633 eth_hw_addr_set(dev, addr->sa_data); 3634 3635 return 0; 3636 } 3637 3638 static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) 3639 { 3640 struct bcmgenet_priv *priv = netdev_priv(dev); 3641 unsigned long tx_bytes = 0, tx_packets = 0; 3642 unsigned long rx_bytes = 0, rx_packets = 0; 3643 unsigned long rx_errors = 0, rx_dropped = 0; 3644 struct bcmgenet_tx_ring *tx_ring; 3645 struct bcmgenet_rx_ring *rx_ring; 3646 unsigned int q; 3647 3648 for (q = 0; q < priv->hw_params->tx_queues; q++) { 3649 tx_ring = &priv->tx_rings[q]; 3650 tx_bytes += tx_ring->bytes; 3651 tx_packets += tx_ring->packets; 3652 } 3653 tx_ring = &priv->tx_rings[DESC_INDEX]; 3654 tx_bytes += tx_ring->bytes; 3655 tx_packets += tx_ring->packets; 3656 3657 for (q = 0; q < priv->hw_params->rx_queues; q++) { 3658 rx_ring = &priv->rx_rings[q]; 3659 3660 rx_bytes += rx_ring->bytes; 3661 rx_packets += rx_ring->packets; 3662 rx_errors += rx_ring->errors; 3663 rx_dropped += rx_ring->dropped; 3664 } 3665 rx_ring = &priv->rx_rings[DESC_INDEX]; 3666 rx_bytes += rx_ring->bytes; 3667 rx_packets += rx_ring->packets; 3668 rx_errors += rx_ring->errors; 3669 rx_dropped += rx_ring->dropped; 3670 3671 dev->stats.tx_bytes = tx_bytes; 3672 dev->stats.tx_packets = tx_packets; 3673 dev->stats.rx_bytes = rx_bytes; 3674 dev->stats.rx_packets = rx_packets; 3675 dev->stats.rx_errors = rx_errors; 3676 dev->stats.rx_missed_errors = rx_errors; 3677 dev->stats.rx_dropped = rx_dropped; 3678 return &dev->stats; 3679 } 3680 3681 static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier) 3682 { 3683 struct bcmgenet_priv *priv = netdev_priv(dev); 3684 3685 if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) || 3686 priv->phy_interface != PHY_INTERFACE_MODE_MOCA) 3687 return -EOPNOTSUPP; 3688 3689 if (new_carrier) 3690 netif_carrier_on(dev); 3691 else 3692 netif_carrier_off(dev); 3693 3694 return 0; 3695 } 3696 3697 static const struct net_device_ops bcmgenet_netdev_ops = { 3698 .ndo_open = bcmgenet_open, 3699 .ndo_stop = bcmgenet_close, 3700 .ndo_start_xmit = bcmgenet_xmit, 3701 .ndo_tx_timeout = bcmgenet_timeout, 3702 .ndo_set_rx_mode = bcmgenet_set_rx_mode, 3703 .ndo_set_mac_address = bcmgenet_set_mac_addr, 3704 .ndo_eth_ioctl = phy_do_ioctl_running, 3705 .ndo_set_features = bcmgenet_set_features, 3706 .ndo_get_stats = bcmgenet_get_stats, 3707 .ndo_change_carrier = bcmgenet_change_carrier, 3708 }; 3709 3710 /* Array of GENET hardware parameters/characteristics */ 3711 static struct bcmgenet_hw_params bcmgenet_hw_params[] = { 3712 [GENET_V1] = { 3713 .tx_queues = 0, 3714 .tx_bds_per_q = 0, 3715 .rx_queues = 0, 3716 .rx_bds_per_q = 0, 3717 .bp_in_en_shift = 16, 3718 .bp_in_mask = 0xffff, 3719 .hfb_filter_cnt = 16, 3720 .qtag_mask = 0x1F, 3721 .hfb_offset = 0x1000, 3722 .rdma_offset = 0x2000, 3723 .tdma_offset = 0x3000, 3724 .words_per_bd = 2, 3725 }, 3726 [GENET_V2] = { 3727 .tx_queues = 4, 3728 .tx_bds_per_q = 32, 3729 .rx_queues = 0, 3730 .rx_bds_per_q = 0, 3731 .bp_in_en_shift = 16, 3732 .bp_in_mask = 0xffff, 3733 .hfb_filter_cnt = 16, 3734 .qtag_mask = 0x1F, 3735 .tbuf_offset = 0x0600, 3736 .hfb_offset = 0x1000, 3737 .hfb_reg_offset = 0x2000, 3738 .rdma_offset = 0x3000, 3739 .tdma_offset = 0x4000, 3740 .words_per_bd = 2, 3741 .flags = GENET_HAS_EXT, 3742 }, 3743 [GENET_V3] = { 3744 .tx_queues = 4, 3745 .tx_bds_per_q = 32, 3746 .rx_queues = 0, 3747 .rx_bds_per_q = 0, 3748 .bp_in_en_shift = 17, 3749 .bp_in_mask = 0x1ffff, 3750 .hfb_filter_cnt = 48, 3751 .hfb_filter_size = 128, 3752 .qtag_mask = 0x3F, 3753 .tbuf_offset = 0x0600, 3754 .hfb_offset = 0x8000, 3755 .hfb_reg_offset = 0xfc00, 3756 .rdma_offset = 0x10000, 3757 .tdma_offset = 0x11000, 3758 .words_per_bd = 2, 3759 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR | 3760 GENET_HAS_MOCA_LINK_DET, 3761 }, 3762 [GENET_V4] = { 3763 .tx_queues = 4, 3764 .tx_bds_per_q = 32, 3765 .rx_queues = 0, 3766 .rx_bds_per_q = 0, 3767 .bp_in_en_shift = 17, 3768 .bp_in_mask = 0x1ffff, 3769 .hfb_filter_cnt = 48, 3770 .hfb_filter_size = 128, 3771 .qtag_mask = 0x3F, 3772 .tbuf_offset = 0x0600, 3773 .hfb_offset = 0x8000, 3774 .hfb_reg_offset = 0xfc00, 3775 .rdma_offset = 0x2000, 3776 .tdma_offset = 0x4000, 3777 .words_per_bd = 3, 3778 .flags = GENET_HAS_40BITS | GENET_HAS_EXT | 3779 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, 3780 }, 3781 [GENET_V5] = { 3782 .tx_queues = 4, 3783 .tx_bds_per_q = 32, 3784 .rx_queues = 0, 3785 .rx_bds_per_q = 0, 3786 .bp_in_en_shift = 17, 3787 .bp_in_mask = 0x1ffff, 3788 .hfb_filter_cnt = 48, 3789 .hfb_filter_size = 128, 3790 .qtag_mask = 0x3F, 3791 .tbuf_offset = 0x0600, 3792 .hfb_offset = 0x8000, 3793 .hfb_reg_offset = 0xfc00, 3794 .rdma_offset = 0x2000, 3795 .tdma_offset = 0x4000, 3796 .words_per_bd = 3, 3797 .flags = GENET_HAS_40BITS | GENET_HAS_EXT | 3798 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, 3799 }, 3800 }; 3801 3802 /* Infer hardware parameters from the detected GENET version */ 3803 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) 3804 { 3805 struct bcmgenet_hw_params *params; 3806 u32 reg; 3807 u8 major; 3808 u16 gphy_rev; 3809 3810 if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) { 3811 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; 3812 genet_dma_ring_regs = genet_dma_ring_regs_v4; 3813 } else if (GENET_IS_V3(priv)) { 3814 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; 3815 genet_dma_ring_regs = genet_dma_ring_regs_v123; 3816 } else if (GENET_IS_V2(priv)) { 3817 bcmgenet_dma_regs = bcmgenet_dma_regs_v2; 3818 genet_dma_ring_regs = genet_dma_ring_regs_v123; 3819 } else if (GENET_IS_V1(priv)) { 3820 bcmgenet_dma_regs = bcmgenet_dma_regs_v1; 3821 genet_dma_ring_regs = genet_dma_ring_regs_v123; 3822 } 3823 3824 /* enum genet_version starts at 1 */ 3825 priv->hw_params = &bcmgenet_hw_params[priv->version]; 3826 params = priv->hw_params; 3827 3828 /* Read GENET HW version */ 3829 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); 3830 major = (reg >> 24 & 0x0f); 3831 if (major == 6) 3832 major = 5; 3833 else if (major == 5) 3834 major = 4; 3835 else if (major == 0) 3836 major = 1; 3837 if (major != priv->version) { 3838 dev_err(&priv->pdev->dev, 3839 "GENET version mismatch, got: %d, configured for: %d\n", 3840 major, priv->version); 3841 } 3842 3843 /* Print the GENET core version */ 3844 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, 3845 major, (reg >> 16) & 0x0f, reg & 0xffff); 3846 3847 /* Store the integrated PHY revision for the MDIO probing function 3848 * to pass this information to the PHY driver. The PHY driver expects 3849 * to find the PHY major revision in bits 15:8 while the GENET register 3850 * stores that information in bits 7:0, account for that. 3851 * 3852 * On newer chips, starting with PHY revision G0, a new scheme is 3853 * deployed similar to the Starfighter 2 switch with GPHY major 3854 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0 3855 * is reserved as well as special value 0x01ff, we have a small 3856 * heuristic to check for the new GPHY revision and re-arrange things 3857 * so the GPHY driver is happy. 3858 */ 3859 gphy_rev = reg & 0xffff; 3860 3861 if (GENET_IS_V5(priv)) { 3862 /* The EPHY revision should come from the MDIO registers of 3863 * the PHY not from GENET. 3864 */ 3865 if (gphy_rev != 0) { 3866 pr_warn("GENET is reporting EPHY revision: 0x%04x\n", 3867 gphy_rev); 3868 } 3869 /* This is reserved so should require special treatment */ 3870 } else if (gphy_rev == 0 || gphy_rev == 0x01ff) { 3871 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); 3872 return; 3873 /* This is the good old scheme, just GPHY major, no minor nor patch */ 3874 } else if ((gphy_rev & 0xf0) != 0) { 3875 priv->gphy_rev = gphy_rev << 8; 3876 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ 3877 } else if ((gphy_rev & 0xff00) != 0) { 3878 priv->gphy_rev = gphy_rev; 3879 } 3880 3881 #ifdef CONFIG_PHYS_ADDR_T_64BIT 3882 if (!(params->flags & GENET_HAS_40BITS)) 3883 pr_warn("GENET does not support 40-bits PA\n"); 3884 #endif 3885 3886 pr_debug("Configuration for version: %d\n" 3887 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n" 3888 "BP << en: %2d, BP msk: 0x%05x\n" 3889 "HFB count: %2d, QTAQ msk: 0x%05x\n" 3890 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" 3891 "RDMA: 0x%05x, TDMA: 0x%05x\n" 3892 "Words/BD: %d\n", 3893 priv->version, 3894 params->tx_queues, params->tx_bds_per_q, 3895 params->rx_queues, params->rx_bds_per_q, 3896 params->bp_in_en_shift, params->bp_in_mask, 3897 params->hfb_filter_cnt, params->qtag_mask, 3898 params->tbuf_offset, params->hfb_offset, 3899 params->hfb_reg_offset, 3900 params->rdma_offset, params->tdma_offset, 3901 params->words_per_bd); 3902 } 3903 3904 struct bcmgenet_plat_data { 3905 enum bcmgenet_version version; 3906 u32 dma_max_burst_length; 3907 bool ephy_16nm; 3908 }; 3909 3910 static const struct bcmgenet_plat_data v1_plat_data = { 3911 .version = GENET_V1, 3912 .dma_max_burst_length = DMA_MAX_BURST_LENGTH, 3913 }; 3914 3915 static const struct bcmgenet_plat_data v2_plat_data = { 3916 .version = GENET_V2, 3917 .dma_max_burst_length = DMA_MAX_BURST_LENGTH, 3918 }; 3919 3920 static const struct bcmgenet_plat_data v3_plat_data = { 3921 .version = GENET_V3, 3922 .dma_max_burst_length = DMA_MAX_BURST_LENGTH, 3923 }; 3924 3925 static const struct bcmgenet_plat_data v4_plat_data = { 3926 .version = GENET_V4, 3927 .dma_max_burst_length = DMA_MAX_BURST_LENGTH, 3928 }; 3929 3930 static const struct bcmgenet_plat_data v5_plat_data = { 3931 .version = GENET_V5, 3932 .dma_max_burst_length = DMA_MAX_BURST_LENGTH, 3933 }; 3934 3935 static const struct bcmgenet_plat_data bcm2711_plat_data = { 3936 .version = GENET_V5, 3937 .dma_max_burst_length = 0x08, 3938 }; 3939 3940 static const struct bcmgenet_plat_data bcm7712_plat_data = { 3941 .version = GENET_V5, 3942 .dma_max_burst_length = DMA_MAX_BURST_LENGTH, 3943 .ephy_16nm = true, 3944 }; 3945 3946 static const struct of_device_id bcmgenet_match[] = { 3947 { .compatible = "brcm,genet-v1", .data = &v1_plat_data }, 3948 { .compatible = "brcm,genet-v2", .data = &v2_plat_data }, 3949 { .compatible = "brcm,genet-v3", .data = &v3_plat_data }, 3950 { .compatible = "brcm,genet-v4", .data = &v4_plat_data }, 3951 { .compatible = "brcm,genet-v5", .data = &v5_plat_data }, 3952 { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data }, 3953 { .compatible = "brcm,bcm7712-genet-v5", .data = &bcm7712_plat_data }, 3954 { }, 3955 }; 3956 MODULE_DEVICE_TABLE(of, bcmgenet_match); 3957 3958 static int bcmgenet_probe(struct platform_device *pdev) 3959 { 3960 struct bcmgenet_platform_data *pd = pdev->dev.platform_data; 3961 const struct bcmgenet_plat_data *pdata; 3962 struct bcmgenet_priv *priv; 3963 struct net_device *dev; 3964 unsigned int i; 3965 int err = -EIO; 3966 3967 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ 3968 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 3969 GENET_MAX_MQ_CNT + 1); 3970 if (!dev) { 3971 dev_err(&pdev->dev, "can't allocate net device\n"); 3972 return -ENOMEM; 3973 } 3974 3975 priv = netdev_priv(dev); 3976 priv->irq0 = platform_get_irq(pdev, 0); 3977 if (priv->irq0 < 0) { 3978 err = priv->irq0; 3979 goto err; 3980 } 3981 priv->irq1 = platform_get_irq(pdev, 1); 3982 if (priv->irq1 < 0) { 3983 err = priv->irq1; 3984 goto err; 3985 } 3986 priv->wol_irq = platform_get_irq_optional(pdev, 2); 3987 if (priv->wol_irq == -EPROBE_DEFER) { 3988 err = priv->wol_irq; 3989 goto err; 3990 } 3991 3992 priv->base = devm_platform_ioremap_resource(pdev, 0); 3993 if (IS_ERR(priv->base)) { 3994 err = PTR_ERR(priv->base); 3995 goto err; 3996 } 3997 3998 spin_lock_init(&priv->lock); 3999 4000 /* Set default pause parameters */ 4001 priv->autoneg_pause = 1; 4002 priv->tx_pause = 1; 4003 priv->rx_pause = 1; 4004 4005 SET_NETDEV_DEV(dev, &pdev->dev); 4006 dev_set_drvdata(&pdev->dev, dev); 4007 dev->watchdog_timeo = 2 * HZ; 4008 dev->ethtool_ops = &bcmgenet_ethtool_ops; 4009 dev->netdev_ops = &bcmgenet_netdev_ops; 4010 4011 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); 4012 4013 /* Set default features */ 4014 dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | 4015 NETIF_F_RXCSUM; 4016 dev->hw_features |= dev->features; 4017 dev->vlan_features |= dev->features; 4018 4019 /* Request the WOL interrupt and advertise suspend if available */ 4020 priv->wol_irq_disabled = true; 4021 if (priv->wol_irq > 0) { 4022 err = devm_request_irq(&pdev->dev, priv->wol_irq, 4023 bcmgenet_wol_isr, 0, dev->name, priv); 4024 if (!err) 4025 device_set_wakeup_capable(&pdev->dev, 1); 4026 } 4027 4028 /* Set the needed headroom to account for any possible 4029 * features enabling/disabling at runtime 4030 */ 4031 dev->needed_headroom += 64; 4032 4033 priv->dev = dev; 4034 priv->pdev = pdev; 4035 4036 pdata = device_get_match_data(&pdev->dev); 4037 if (pdata) { 4038 priv->version = pdata->version; 4039 priv->dma_max_burst_length = pdata->dma_max_burst_length; 4040 priv->ephy_16nm = pdata->ephy_16nm; 4041 } else { 4042 priv->version = pd->genet_version; 4043 priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; 4044 } 4045 4046 priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet"); 4047 if (IS_ERR(priv->clk)) { 4048 dev_dbg(&priv->pdev->dev, "failed to get enet clock\n"); 4049 err = PTR_ERR(priv->clk); 4050 goto err; 4051 } 4052 4053 err = clk_prepare_enable(priv->clk); 4054 if (err) 4055 goto err; 4056 4057 bcmgenet_set_hw_params(priv); 4058 4059 err = -EIO; 4060 if (priv->hw_params->flags & GENET_HAS_40BITS) 4061 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 4062 if (err) 4063 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 4064 if (err) 4065 goto err_clk_disable; 4066 4067 /* Mii wait queue */ 4068 init_waitqueue_head(&priv->wq); 4069 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ 4070 priv->rx_buf_len = RX_BUF_LENGTH; 4071 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); 4072 4073 priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol"); 4074 if (IS_ERR(priv->clk_wol)) { 4075 dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n"); 4076 err = PTR_ERR(priv->clk_wol); 4077 goto err_clk_disable; 4078 } 4079 4080 priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee"); 4081 if (IS_ERR(priv->clk_eee)) { 4082 dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n"); 4083 err = PTR_ERR(priv->clk_eee); 4084 goto err_clk_disable; 4085 } 4086 4087 /* If this is an internal GPHY, power it on now, before UniMAC is 4088 * brought out of reset as absolutely no UniMAC activity is allowed 4089 */ 4090 if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL) 4091 bcmgenet_power_up(priv, GENET_POWER_PASSIVE); 4092 4093 if (pd && !IS_ERR_OR_NULL(pd->mac_address)) 4094 eth_hw_addr_set(dev, pd->mac_address); 4095 else 4096 if (device_get_ethdev_address(&pdev->dev, dev)) 4097 if (has_acpi_companion(&pdev->dev)) { 4098 u8 addr[ETH_ALEN]; 4099 4100 bcmgenet_get_hw_addr(priv, addr); 4101 eth_hw_addr_set(dev, addr); 4102 } 4103 4104 if (!is_valid_ether_addr(dev->dev_addr)) { 4105 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); 4106 eth_hw_addr_random(dev); 4107 } 4108 4109 reset_umac(priv); 4110 4111 err = bcmgenet_mii_init(dev); 4112 if (err) 4113 goto err_clk_disable; 4114 4115 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues 4116 * just the ring 16 descriptor based TX 4117 */ 4118 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); 4119 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); 4120 4121 /* Set default coalescing parameters */ 4122 for (i = 0; i < priv->hw_params->rx_queues; i++) 4123 priv->rx_rings[i].rx_max_coalesced_frames = 1; 4124 priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1; 4125 4126 /* libphy will determine the link state */ 4127 netif_carrier_off(dev); 4128 4129 /* Turn off the main clock, WOL clock is handled separately */ 4130 clk_disable_unprepare(priv->clk); 4131 4132 err = register_netdev(dev); 4133 if (err) { 4134 bcmgenet_mii_exit(dev); 4135 goto err; 4136 } 4137 4138 return err; 4139 4140 err_clk_disable: 4141 clk_disable_unprepare(priv->clk); 4142 err: 4143 free_netdev(dev); 4144 return err; 4145 } 4146 4147 static void bcmgenet_remove(struct platform_device *pdev) 4148 { 4149 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); 4150 4151 dev_set_drvdata(&pdev->dev, NULL); 4152 unregister_netdev(priv->dev); 4153 bcmgenet_mii_exit(priv->dev); 4154 free_netdev(priv->dev); 4155 } 4156 4157 static void bcmgenet_shutdown(struct platform_device *pdev) 4158 { 4159 bcmgenet_remove(pdev); 4160 } 4161 4162 #ifdef CONFIG_PM_SLEEP 4163 static int bcmgenet_resume_noirq(struct device *d) 4164 { 4165 struct net_device *dev = dev_get_drvdata(d); 4166 struct bcmgenet_priv *priv = netdev_priv(dev); 4167 int ret; 4168 u32 reg; 4169 4170 if (!netif_running(dev)) 4171 return 0; 4172 4173 /* Turn on the clock */ 4174 ret = clk_prepare_enable(priv->clk); 4175 if (ret) 4176 return ret; 4177 4178 if (device_may_wakeup(d) && priv->wolopts) { 4179 /* Account for Wake-on-LAN events and clear those events 4180 * (Some devices need more time between enabling the clocks 4181 * and the interrupt register reflecting the wake event so 4182 * read the register twice) 4183 */ 4184 reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); 4185 reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); 4186 if (reg & UMAC_IRQ_WAKE_EVENT) 4187 pm_wakeup_event(&priv->pdev->dev, 0); 4188 } 4189 4190 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR); 4191 4192 return 0; 4193 } 4194 4195 static int bcmgenet_resume(struct device *d) 4196 { 4197 struct net_device *dev = dev_get_drvdata(d); 4198 struct bcmgenet_priv *priv = netdev_priv(dev); 4199 struct bcmgenet_rxnfc_rule *rule; 4200 unsigned long dma_ctrl; 4201 int ret; 4202 4203 if (!netif_running(dev)) 4204 return 0; 4205 4206 /* From WOL-enabled suspend, switch to regular clock */ 4207 if (device_may_wakeup(d) && priv->wolopts) 4208 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); 4209 4210 /* If this is an internal GPHY, power it back on now, before UniMAC is 4211 * brought out of reset as absolutely no UniMAC activity is allowed 4212 */ 4213 if (priv->internal_phy) 4214 bcmgenet_power_up(priv, GENET_POWER_PASSIVE); 4215 4216 bcmgenet_umac_reset(priv); 4217 4218 init_umac(priv); 4219 4220 phy_init_hw(dev->phydev); 4221 4222 /* Speed settings must be restored */ 4223 genphy_config_aneg(dev->phydev); 4224 bcmgenet_mii_config(priv->dev, false); 4225 4226 /* Restore enabled features */ 4227 bcmgenet_set_features(dev, dev->features); 4228 4229 bcmgenet_set_hw_addr(priv, dev->dev_addr); 4230 4231 /* Restore hardware filters */ 4232 bcmgenet_hfb_clear(priv); 4233 list_for_each_entry(rule, &priv->rxnfc_list, list) 4234 if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) 4235 bcmgenet_hfb_create_rxnfc_filter(priv, rule); 4236 4237 /* Disable RX/TX DMA and flush TX queues */ 4238 dma_ctrl = bcmgenet_dma_disable(priv); 4239 4240 /* Reinitialize TDMA and RDMA and SW housekeeping */ 4241 ret = bcmgenet_init_dma(priv); 4242 if (ret) { 4243 netdev_err(dev, "failed to initialize DMA\n"); 4244 goto out_clk_disable; 4245 } 4246 4247 /* Always enable ring 16 - descriptor ring */ 4248 bcmgenet_enable_dma(priv, dma_ctrl); 4249 4250 if (!device_may_wakeup(d)) 4251 phy_resume(dev->phydev); 4252 4253 bcmgenet_netif_start(dev); 4254 4255 netif_device_attach(dev); 4256 4257 return 0; 4258 4259 out_clk_disable: 4260 if (priv->internal_phy) 4261 bcmgenet_power_down(priv, GENET_POWER_PASSIVE); 4262 clk_disable_unprepare(priv->clk); 4263 return ret; 4264 } 4265 4266 static int bcmgenet_suspend(struct device *d) 4267 { 4268 struct net_device *dev = dev_get_drvdata(d); 4269 struct bcmgenet_priv *priv = netdev_priv(dev); 4270 4271 if (!netif_running(dev)) 4272 return 0; 4273 4274 netif_device_detach(dev); 4275 4276 bcmgenet_netif_stop(dev, true); 4277 4278 if (!device_may_wakeup(d)) 4279 phy_suspend(dev->phydev); 4280 4281 /* Disable filtering */ 4282 bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL); 4283 4284 return 0; 4285 } 4286 4287 static int bcmgenet_suspend_noirq(struct device *d) 4288 { 4289 struct net_device *dev = dev_get_drvdata(d); 4290 struct bcmgenet_priv *priv = netdev_priv(dev); 4291 int ret = 0; 4292 4293 if (!netif_running(dev)) 4294 return 0; 4295 4296 /* Prepare the device for Wake-on-LAN and switch to the slow clock */ 4297 if (device_may_wakeup(d) && priv->wolopts) 4298 ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); 4299 else if (priv->internal_phy) 4300 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); 4301 4302 /* Let the framework handle resumption and leave the clocks on */ 4303 if (ret) 4304 return ret; 4305 4306 /* Turn off the clocks */ 4307 clk_disable_unprepare(priv->clk); 4308 4309 return 0; 4310 } 4311 #else 4312 #define bcmgenet_suspend NULL 4313 #define bcmgenet_suspend_noirq NULL 4314 #define bcmgenet_resume NULL 4315 #define bcmgenet_resume_noirq NULL 4316 #endif /* CONFIG_PM_SLEEP */ 4317 4318 static const struct dev_pm_ops bcmgenet_pm_ops = { 4319 .suspend = bcmgenet_suspend, 4320 .suspend_noirq = bcmgenet_suspend_noirq, 4321 .resume = bcmgenet_resume, 4322 .resume_noirq = bcmgenet_resume_noirq, 4323 }; 4324 4325 static const struct acpi_device_id genet_acpi_match[] = { 4326 { "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data }, 4327 { }, 4328 }; 4329 MODULE_DEVICE_TABLE(acpi, genet_acpi_match); 4330 4331 static struct platform_driver bcmgenet_driver = { 4332 .probe = bcmgenet_probe, 4333 .remove_new = bcmgenet_remove, 4334 .shutdown = bcmgenet_shutdown, 4335 .driver = { 4336 .name = "bcmgenet", 4337 .of_match_table = bcmgenet_match, 4338 .pm = &bcmgenet_pm_ops, 4339 .acpi_match_table = genet_acpi_match, 4340 }, 4341 }; 4342 module_platform_driver(bcmgenet_driver); 4343 4344 MODULE_AUTHOR("Broadcom Corporation"); 4345 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver"); 4346 MODULE_ALIAS("platform:bcmgenet"); 4347 MODULE_LICENSE("GPL"); 4348 MODULE_SOFTDEP("pre: mdio-bcm-unimac"); 4349