1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* Copyright (C) 2018 Microchip Technology Inc. */ 3 4 #include <linux/module.h> 5 #include <linux/pci.h> 6 #include <linux/netdevice.h> 7 #include <linux/etherdevice.h> 8 #include <linux/crc32.h> 9 #include <linux/microchipphy.h> 10 #include <linux/net_tstamp.h> 11 #include <linux/of_mdio.h> 12 #include <linux/of_net.h> 13 #include <linux/phy.h> 14 #include <linux/phy_fixed.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/iopoll.h> 17 #include <linux/crc16.h> 18 #include "lan743x_main.h" 19 #include "lan743x_ethtool.h" 20 21 #define MMD_ACCESS_ADDRESS 0 22 #define MMD_ACCESS_WRITE 1 23 #define MMD_ACCESS_READ 2 24 #define MMD_ACCESS_READ_INC 3 25 #define PCS_POWER_STATE_DOWN 0x6 26 #define PCS_POWER_STATE_UP 0x4 27 28 #define RFE_RD_FIFO_TH_3_DWORDS 0x3 29 30 static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter) 31 { 32 u32 chip_rev; 33 u32 cfg_load; 34 u32 hw_cfg; 35 u32 strap; 36 int ret; 37 38 /* Timeout = 100 (i.e. 1 sec (10 msce * 100)) */ 39 ret = lan743x_hs_syslock_acquire(adapter, 100); 40 if (ret < 0) { 41 netif_err(adapter, drv, adapter->netdev, 42 "Sys Lock acquire failed ret:%d\n", ret); 43 return; 44 } 45 46 cfg_load = lan743x_csr_read(adapter, ETH_SYS_CONFIG_LOAD_STARTED_REG); 47 lan743x_hs_syslock_release(adapter); 48 hw_cfg = lan743x_csr_read(adapter, HW_CFG); 49 50 if (cfg_load & GEN_SYS_LOAD_STARTED_REG_ETH_ || 51 hw_cfg & HW_CFG_RST_PROTECT_) { 52 strap = lan743x_csr_read(adapter, STRAP_READ); 53 if (strap & STRAP_READ_SGMII_EN_) 54 adapter->is_sgmii_en = true; 55 else 56 adapter->is_sgmii_en = false; 57 } else { 58 chip_rev = lan743x_csr_read(adapter, FPGA_REV); 59 if (chip_rev) { 60 if (chip_rev & FPGA_SGMII_OP) 61 adapter->is_sgmii_en = true; 62 else 63 adapter->is_sgmii_en = false; 64 } else { 65 adapter->is_sgmii_en = false; 66 } 67 } 68 netif_dbg(adapter, drv, adapter->netdev, 69 "SGMII I/F %sable\n", adapter->is_sgmii_en ? "En" : "Dis"); 70 } 71 72 static bool is_pci11x1x_chip(struct lan743x_adapter *adapter) 73 { 74 struct lan743x_csr *csr = &adapter->csr; 75 u32 id_rev = csr->id_rev; 76 77 if (((id_rev & 0xFFFF0000) == ID_REV_ID_A011_) || 78 ((id_rev & 0xFFFF0000) == ID_REV_ID_A041_)) { 79 return true; 80 } 81 return false; 82 } 83 84 static void lan743x_pci_cleanup(struct lan743x_adapter *adapter) 85 { 86 pci_release_selected_regions(adapter->pdev, 87 pci_select_bars(adapter->pdev, 88 IORESOURCE_MEM)); 89 pci_disable_device(adapter->pdev); 90 } 91 92 static int lan743x_pci_init(struct lan743x_adapter *adapter, 93 struct pci_dev *pdev) 94 { 95 unsigned long bars = 0; 96 int ret; 97 98 adapter->pdev = pdev; 99 ret = pci_enable_device_mem(pdev); 100 if (ret) 101 goto return_error; 102 103 netif_info(adapter, probe, adapter->netdev, 104 "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n", 105 pdev->vendor, pdev->device); 106 bars = pci_select_bars(pdev, IORESOURCE_MEM); 107 if (!test_bit(0, &bars)) 108 goto disable_device; 109 110 ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME); 111 if (ret) 112 goto disable_device; 113 114 pci_set_master(pdev); 115 return 0; 116 117 disable_device: 118 pci_disable_device(adapter->pdev); 119 120 return_error: 121 return ret; 122 } 123 124 u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset) 125 { 126 return ioread32(&adapter->csr.csr_address[offset]); 127 } 128 129 void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, 130 u32 data) 131 { 132 iowrite32(data, &adapter->csr.csr_address[offset]); 133 } 134 135 #define LAN743X_CSR_READ_OP(offset) lan743x_csr_read(adapter, offset) 136 137 static int lan743x_csr_light_reset(struct lan743x_adapter *adapter) 138 { 139 u32 data; 140 141 data = lan743x_csr_read(adapter, HW_CFG); 142 data |= HW_CFG_LRST_; 143 lan743x_csr_write(adapter, HW_CFG, data); 144 145 return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data, 146 !(data & HW_CFG_LRST_), 100000, 10000000); 147 } 148 149 static int lan743x_csr_wait_for_bit_atomic(struct lan743x_adapter *adapter, 150 int offset, u32 bit_mask, 151 int target_value, int udelay_min, 152 int udelay_max, int count) 153 { 154 u32 data; 155 156 return readx_poll_timeout_atomic(LAN743X_CSR_READ_OP, offset, data, 157 target_value == !!(data & bit_mask), 158 udelay_max, udelay_min * count); 159 } 160 161 static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter, 162 int offset, u32 bit_mask, 163 int target_value, int usleep_min, 164 int usleep_max, int count) 165 { 166 u32 data; 167 168 return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data, 169 target_value == !!(data & bit_mask), 170 usleep_max, usleep_min * count); 171 } 172 173 static int lan743x_csr_init(struct lan743x_adapter *adapter) 174 { 175 struct lan743x_csr *csr = &adapter->csr; 176 resource_size_t bar_start, bar_length; 177 178 bar_start = pci_resource_start(adapter->pdev, 0); 179 bar_length = pci_resource_len(adapter->pdev, 0); 180 csr->csr_address = devm_ioremap(&adapter->pdev->dev, 181 bar_start, bar_length); 182 if (!csr->csr_address) 183 return -ENOMEM; 184 185 csr->id_rev = lan743x_csr_read(adapter, ID_REV); 186 csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV); 187 netif_info(adapter, probe, adapter->netdev, 188 "ID_REV = 0x%08X, FPGA_REV = %d.%d\n", 189 csr->id_rev, FPGA_REV_GET_MAJOR_(csr->fpga_rev), 190 FPGA_REV_GET_MINOR_(csr->fpga_rev)); 191 if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev)) 192 return -ENODEV; 193 194 csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR; 195 switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) { 196 case ID_REV_CHIP_REV_A0_: 197 csr->flags |= LAN743X_CSR_FLAG_IS_A0; 198 csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR; 199 break; 200 case ID_REV_CHIP_REV_B0_: 201 csr->flags |= LAN743X_CSR_FLAG_IS_B0; 202 break; 203 } 204 205 return lan743x_csr_light_reset(adapter); 206 } 207 208 static void lan743x_intr_software_isr(struct lan743x_adapter *adapter) 209 { 210 struct lan743x_intr *intr = &adapter->intr; 211 212 /* disable the interrupt to prevent repeated re-triggering */ 213 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_); 214 intr->software_isr_flag = true; 215 wake_up(&intr->software_isr_wq); 216 } 217 218 static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags) 219 { 220 struct lan743x_tx *tx = context; 221 struct lan743x_adapter *adapter = tx->adapter; 222 bool enable_flag = true; 223 224 lan743x_csr_read(adapter, INT_EN_SET); 225 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) { 226 lan743x_csr_write(adapter, INT_EN_CLR, 227 INT_BIT_DMA_TX_(tx->channel_number)); 228 } 229 230 if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) { 231 u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number); 232 u32 dmac_int_sts; 233 u32 dmac_int_en; 234 235 if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) 236 dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); 237 else 238 dmac_int_sts = ioc_bit; 239 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) 240 dmac_int_en = lan743x_csr_read(adapter, 241 DMAC_INT_EN_SET); 242 else 243 dmac_int_en = ioc_bit; 244 245 dmac_int_en &= ioc_bit; 246 dmac_int_sts &= dmac_int_en; 247 if (dmac_int_sts & ioc_bit) { 248 napi_schedule(&tx->napi); 249 enable_flag = false;/* poll func will enable later */ 250 } 251 } 252 253 if (enable_flag) 254 /* enable isr */ 255 lan743x_csr_write(adapter, INT_EN_SET, 256 INT_BIT_DMA_TX_(tx->channel_number)); 257 } 258 259 static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags) 260 { 261 struct lan743x_rx *rx = context; 262 struct lan743x_adapter *adapter = rx->adapter; 263 bool enable_flag = true; 264 265 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) { 266 lan743x_csr_write(adapter, INT_EN_CLR, 267 INT_BIT_DMA_RX_(rx->channel_number)); 268 } 269 270 if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) { 271 u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number); 272 u32 dmac_int_sts; 273 u32 dmac_int_en; 274 275 if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) 276 dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); 277 else 278 dmac_int_sts = rx_frame_bit; 279 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) 280 dmac_int_en = lan743x_csr_read(adapter, 281 DMAC_INT_EN_SET); 282 else 283 dmac_int_en = rx_frame_bit; 284 285 dmac_int_en &= rx_frame_bit; 286 dmac_int_sts &= dmac_int_en; 287 if (dmac_int_sts & rx_frame_bit) { 288 napi_schedule(&rx->napi); 289 enable_flag = false;/* poll funct will enable later */ 290 } 291 } 292 293 if (enable_flag) { 294 /* enable isr */ 295 lan743x_csr_write(adapter, INT_EN_SET, 296 INT_BIT_DMA_RX_(rx->channel_number)); 297 } 298 } 299 300 static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags) 301 { 302 struct lan743x_adapter *adapter = context; 303 unsigned int channel; 304 305 if (int_sts & INT_BIT_ALL_RX_) { 306 for (channel = 0; channel < LAN743X_USED_RX_CHANNELS; 307 channel++) { 308 u32 int_bit = INT_BIT_DMA_RX_(channel); 309 310 if (int_sts & int_bit) { 311 lan743x_rx_isr(&adapter->rx[channel], 312 int_bit, flags); 313 int_sts &= ~int_bit; 314 } 315 } 316 } 317 if (int_sts & INT_BIT_ALL_TX_) { 318 for (channel = 0; channel < adapter->used_tx_channels; 319 channel++) { 320 u32 int_bit = INT_BIT_DMA_TX_(channel); 321 322 if (int_sts & int_bit) { 323 lan743x_tx_isr(&adapter->tx[channel], 324 int_bit, flags); 325 int_sts &= ~int_bit; 326 } 327 } 328 } 329 if (int_sts & INT_BIT_ALL_OTHER_) { 330 if (int_sts & INT_BIT_SW_GP_) { 331 lan743x_intr_software_isr(adapter); 332 int_sts &= ~INT_BIT_SW_GP_; 333 } 334 if (int_sts & INT_BIT_1588_) { 335 lan743x_ptp_isr(adapter); 336 int_sts &= ~INT_BIT_1588_; 337 } 338 } 339 if (int_sts) 340 lan743x_csr_write(adapter, INT_EN_CLR, int_sts); 341 } 342 343 static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr) 344 { 345 struct lan743x_vector *vector = ptr; 346 struct lan743x_adapter *adapter = vector->adapter; 347 irqreturn_t result = IRQ_NONE; 348 u32 int_enables; 349 u32 int_sts; 350 351 if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) { 352 int_sts = lan743x_csr_read(adapter, INT_STS); 353 } else if (vector->flags & 354 (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C | 355 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) { 356 int_sts = lan743x_csr_read(adapter, INT_STS_R2C); 357 } else { 358 /* use mask as implied status */ 359 int_sts = vector->int_mask | INT_BIT_MAS_; 360 } 361 362 if (!(int_sts & INT_BIT_MAS_)) 363 goto irq_done; 364 365 if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR) 366 /* disable vector interrupt */ 367 lan743x_csr_write(adapter, 368 INT_VEC_EN_CLR, 369 INT_VEC_EN_(vector->vector_index)); 370 371 if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR) 372 /* disable master interrupt */ 373 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_); 374 375 if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) { 376 int_enables = lan743x_csr_read(adapter, INT_EN_SET); 377 } else { 378 /* use vector mask as implied enable mask */ 379 int_enables = vector->int_mask; 380 } 381 382 int_sts &= int_enables; 383 int_sts &= vector->int_mask; 384 if (int_sts) { 385 if (vector->handler) { 386 vector->handler(vector->context, 387 int_sts, vector->flags); 388 } else { 389 /* disable interrupts on this vector */ 390 lan743x_csr_write(adapter, INT_EN_CLR, 391 vector->int_mask); 392 } 393 result = IRQ_HANDLED; 394 } 395 396 if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET) 397 /* enable master interrupt */ 398 lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_); 399 400 if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET) 401 /* enable vector interrupt */ 402 lan743x_csr_write(adapter, 403 INT_VEC_EN_SET, 404 INT_VEC_EN_(vector->vector_index)); 405 irq_done: 406 return result; 407 } 408 409 static int lan743x_intr_test_isr(struct lan743x_adapter *adapter) 410 { 411 struct lan743x_intr *intr = &adapter->intr; 412 int ret; 413 414 intr->software_isr_flag = false; 415 416 /* enable and activate test interrupt */ 417 lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_); 418 lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_); 419 420 ret = wait_event_timeout(intr->software_isr_wq, 421 intr->software_isr_flag, 422 msecs_to_jiffies(200)); 423 424 /* disable test interrupt */ 425 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_); 426 427 return ret > 0 ? 0 : -ENODEV; 428 } 429 430 static int lan743x_intr_register_isr(struct lan743x_adapter *adapter, 431 int vector_index, u32 flags, 432 u32 int_mask, 433 lan743x_vector_handler handler, 434 void *context) 435 { 436 struct lan743x_vector *vector = &adapter->intr.vector_list 437 [vector_index]; 438 int ret; 439 440 vector->adapter = adapter; 441 vector->flags = flags; 442 vector->vector_index = vector_index; 443 vector->int_mask = int_mask; 444 vector->handler = handler; 445 vector->context = context; 446 447 ret = request_irq(vector->irq, 448 lan743x_intr_entry_isr, 449 (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ? 450 IRQF_SHARED : 0, DRIVER_NAME, vector); 451 if (ret) { 452 vector->handler = NULL; 453 vector->context = NULL; 454 vector->int_mask = 0; 455 vector->flags = 0; 456 } 457 return ret; 458 } 459 460 static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter, 461 int vector_index) 462 { 463 struct lan743x_vector *vector = &adapter->intr.vector_list 464 [vector_index]; 465 466 free_irq(vector->irq, vector); 467 vector->handler = NULL; 468 vector->context = NULL; 469 vector->int_mask = 0; 470 vector->flags = 0; 471 } 472 473 static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter, 474 u32 int_mask) 475 { 476 int index; 477 478 for (index = 0; index < adapter->max_vector_count; index++) { 479 if (adapter->intr.vector_list[index].int_mask & int_mask) 480 return adapter->intr.vector_list[index].flags; 481 } 482 return 0; 483 } 484 485 static void lan743x_intr_close(struct lan743x_adapter *adapter) 486 { 487 struct lan743x_intr *intr = &adapter->intr; 488 int index = 0; 489 490 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_); 491 if (adapter->is_pci11x1x) 492 lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x0000FFFF); 493 else 494 lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF); 495 496 for (index = 0; index < intr->number_of_vectors; index++) { 497 if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) { 498 lan743x_intr_unregister_isr(adapter, index); 499 intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index); 500 } 501 } 502 503 if (intr->flags & INTR_FLAG_MSI_ENABLED) { 504 pci_disable_msi(adapter->pdev); 505 intr->flags &= ~INTR_FLAG_MSI_ENABLED; 506 } 507 508 if (intr->flags & INTR_FLAG_MSIX_ENABLED) { 509 pci_disable_msix(adapter->pdev); 510 intr->flags &= ~INTR_FLAG_MSIX_ENABLED; 511 } 512 } 513 514 static int lan743x_intr_open(struct lan743x_adapter *adapter) 515 { 516 struct msix_entry msix_entries[PCI11X1X_MAX_VECTOR_COUNT]; 517 struct lan743x_intr *intr = &adapter->intr; 518 unsigned int used_tx_channels; 519 u32 int_vec_en_auto_clr = 0; 520 u8 max_vector_count; 521 u32 int_vec_map0 = 0; 522 u32 int_vec_map1 = 0; 523 int ret = -ENODEV; 524 int index = 0; 525 u32 flags = 0; 526 527 intr->number_of_vectors = 0; 528 529 /* Try to set up MSIX interrupts */ 530 max_vector_count = adapter->max_vector_count; 531 memset(&msix_entries[0], 0, 532 sizeof(struct msix_entry) * max_vector_count); 533 for (index = 0; index < max_vector_count; index++) 534 msix_entries[index].entry = index; 535 used_tx_channels = adapter->used_tx_channels; 536 ret = pci_enable_msix_range(adapter->pdev, 537 msix_entries, 1, 538 1 + used_tx_channels + 539 LAN743X_USED_RX_CHANNELS); 540 541 if (ret > 0) { 542 intr->flags |= INTR_FLAG_MSIX_ENABLED; 543 intr->number_of_vectors = ret; 544 intr->using_vectors = true; 545 for (index = 0; index < intr->number_of_vectors; index++) 546 intr->vector_list[index].irq = msix_entries 547 [index].vector; 548 netif_info(adapter, ifup, adapter->netdev, 549 "using MSIX interrupts, number of vectors = %d\n", 550 intr->number_of_vectors); 551 } 552 553 /* If MSIX failed try to setup using MSI interrupts */ 554 if (!intr->number_of_vectors) { 555 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { 556 if (!pci_enable_msi(adapter->pdev)) { 557 intr->flags |= INTR_FLAG_MSI_ENABLED; 558 intr->number_of_vectors = 1; 559 intr->using_vectors = true; 560 intr->vector_list[0].irq = 561 adapter->pdev->irq; 562 netif_info(adapter, ifup, adapter->netdev, 563 "using MSI interrupts, number of vectors = %d\n", 564 intr->number_of_vectors); 565 } 566 } 567 } 568 569 /* If MSIX, and MSI failed, setup using legacy interrupt */ 570 if (!intr->number_of_vectors) { 571 intr->number_of_vectors = 1; 572 intr->using_vectors = false; 573 intr->vector_list[0].irq = intr->irq; 574 netif_info(adapter, ifup, adapter->netdev, 575 "using legacy interrupts\n"); 576 } 577 578 /* At this point we must have at least one irq */ 579 lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF); 580 581 /* map all interrupts to vector 0 */ 582 lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000); 583 lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000); 584 lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000); 585 flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | 586 LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | 587 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | 588 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR; 589 590 if (intr->using_vectors) { 591 flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | 592 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; 593 } else { 594 flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR | 595 LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET | 596 LAN743X_VECTOR_FLAG_IRQ_SHARED; 597 } 598 599 if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { 600 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ; 601 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C; 602 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR; 603 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK; 604 flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C; 605 flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C; 606 } 607 608 init_waitqueue_head(&intr->software_isr_wq); 609 610 ret = lan743x_intr_register_isr(adapter, 0, flags, 611 INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ | 612 INT_BIT_ALL_OTHER_, 613 lan743x_intr_shared_isr, adapter); 614 if (ret) 615 goto clean_up; 616 intr->flags |= INTR_FLAG_IRQ_REQUESTED(0); 617 618 if (intr->using_vectors) 619 lan743x_csr_write(adapter, INT_VEC_EN_SET, 620 INT_VEC_EN_(0)); 621 622 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { 623 lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD); 624 lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD); 625 lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD); 626 lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD); 627 lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD); 628 lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD); 629 lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD); 630 lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD); 631 if (adapter->is_pci11x1x) { 632 lan743x_csr_write(adapter, INT_MOD_CFG8, LAN743X_INT_MOD); 633 lan743x_csr_write(adapter, INT_MOD_CFG9, LAN743X_INT_MOD); 634 lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00007654); 635 lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00003210); 636 } else { 637 lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432); 638 lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001); 639 } 640 lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF); 641 } 642 643 /* enable interrupts */ 644 lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_); 645 ret = lan743x_intr_test_isr(adapter); 646 if (ret) 647 goto clean_up; 648 649 if (intr->number_of_vectors > 1) { 650 int number_of_tx_vectors = intr->number_of_vectors - 1; 651 652 if (number_of_tx_vectors > used_tx_channels) 653 number_of_tx_vectors = used_tx_channels; 654 flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | 655 LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | 656 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | 657 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR | 658 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | 659 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; 660 661 if (adapter->csr.flags & 662 LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { 663 flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | 664 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | 665 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | 666 LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; 667 } 668 669 for (index = 0; index < number_of_tx_vectors; index++) { 670 u32 int_bit = INT_BIT_DMA_TX_(index); 671 int vector = index + 1; 672 673 /* map TX interrupt to vector */ 674 int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector); 675 lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1); 676 677 /* Remove TX interrupt from shared mask */ 678 intr->vector_list[0].int_mask &= ~int_bit; 679 ret = lan743x_intr_register_isr(adapter, vector, flags, 680 int_bit, lan743x_tx_isr, 681 &adapter->tx[index]); 682 if (ret) 683 goto clean_up; 684 intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector); 685 if (!(flags & 686 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)) 687 lan743x_csr_write(adapter, INT_VEC_EN_SET, 688 INT_VEC_EN_(vector)); 689 } 690 } 691 if ((intr->number_of_vectors - used_tx_channels) > 1) { 692 int number_of_rx_vectors = intr->number_of_vectors - 693 used_tx_channels - 1; 694 695 if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS) 696 number_of_rx_vectors = LAN743X_USED_RX_CHANNELS; 697 698 flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | 699 LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | 700 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | 701 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR | 702 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | 703 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; 704 705 if (adapter->csr.flags & 706 LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { 707 flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR | 708 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | 709 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | 710 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | 711 LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; 712 } 713 for (index = 0; index < number_of_rx_vectors; index++) { 714 int vector = index + 1 + used_tx_channels; 715 u32 int_bit = INT_BIT_DMA_RX_(index); 716 717 /* map RX interrupt to vector */ 718 int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector); 719 lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0); 720 if (flags & 721 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) { 722 int_vec_en_auto_clr |= INT_VEC_EN_(vector); 723 lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR, 724 int_vec_en_auto_clr); 725 } 726 727 /* Remove RX interrupt from shared mask */ 728 intr->vector_list[0].int_mask &= ~int_bit; 729 ret = lan743x_intr_register_isr(adapter, vector, flags, 730 int_bit, lan743x_rx_isr, 731 &adapter->rx[index]); 732 if (ret) 733 goto clean_up; 734 intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector); 735 736 lan743x_csr_write(adapter, INT_VEC_EN_SET, 737 INT_VEC_EN_(vector)); 738 } 739 } 740 return 0; 741 742 clean_up: 743 lan743x_intr_close(adapter); 744 return ret; 745 } 746 747 static int lan743x_dp_write(struct lan743x_adapter *adapter, 748 u32 select, u32 addr, u32 length, u32 *buf) 749 { 750 u32 dp_sel; 751 int i; 752 753 if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL, DP_SEL_DPRDY_, 754 1, 40, 100, 100)) 755 return -EIO; 756 dp_sel = lan743x_csr_read(adapter, DP_SEL); 757 dp_sel &= ~DP_SEL_MASK_; 758 dp_sel |= select; 759 lan743x_csr_write(adapter, DP_SEL, dp_sel); 760 761 for (i = 0; i < length; i++) { 762 lan743x_csr_write(adapter, DP_ADDR, addr + i); 763 lan743x_csr_write(adapter, DP_DATA_0, buf[i]); 764 lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_); 765 if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL, 766 DP_SEL_DPRDY_, 767 1, 40, 100, 100)) 768 return -EIO; 769 } 770 771 return 0; 772 } 773 774 static u32 lan743x_mac_mii_access(u16 id, u16 index, int read) 775 { 776 u32 ret; 777 778 ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) & 779 MAC_MII_ACC_PHY_ADDR_MASK_; 780 ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) & 781 MAC_MII_ACC_MIIRINDA_MASK_; 782 783 if (read) 784 ret |= MAC_MII_ACC_MII_READ_; 785 else 786 ret |= MAC_MII_ACC_MII_WRITE_; 787 ret |= MAC_MII_ACC_MII_BUSY_; 788 789 return ret; 790 } 791 792 static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter) 793 { 794 u32 data; 795 796 return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data, 797 !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000); 798 } 799 800 static int lan743x_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int index) 801 { 802 struct lan743x_adapter *adapter = bus->priv; 803 u32 val, mii_access; 804 int ret; 805 806 /* confirm MII not busy */ 807 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 808 if (ret < 0) 809 return ret; 810 811 /* set the address, index & direction (read from PHY) */ 812 mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ); 813 lan743x_csr_write(adapter, MAC_MII_ACC, mii_access); 814 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 815 if (ret < 0) 816 return ret; 817 818 val = lan743x_csr_read(adapter, MAC_MII_DATA); 819 return (int)(val & 0xFFFF); 820 } 821 822 static int lan743x_mdiobus_write_c22(struct mii_bus *bus, 823 int phy_id, int index, u16 regval) 824 { 825 struct lan743x_adapter *adapter = bus->priv; 826 u32 val, mii_access; 827 int ret; 828 829 /* confirm MII not busy */ 830 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 831 if (ret < 0) 832 return ret; 833 val = (u32)regval; 834 lan743x_csr_write(adapter, MAC_MII_DATA, val); 835 836 /* set the address, index & direction (write to PHY) */ 837 mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE); 838 lan743x_csr_write(adapter, MAC_MII_ACC, mii_access); 839 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 840 return ret; 841 } 842 843 static u32 lan743x_mac_mmd_access(int id, int dev_addr, int op) 844 { 845 u32 ret; 846 847 ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) & 848 MAC_MII_ACC_PHY_ADDR_MASK_; 849 ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) & 850 MAC_MII_ACC_MIIMMD_MASK_; 851 if (op == MMD_ACCESS_WRITE) 852 ret |= MAC_MII_ACC_MIICMD_WRITE_; 853 else if (op == MMD_ACCESS_READ) 854 ret |= MAC_MII_ACC_MIICMD_READ_; 855 else if (op == MMD_ACCESS_READ_INC) 856 ret |= MAC_MII_ACC_MIICMD_READ_INC_; 857 else 858 ret |= MAC_MII_ACC_MIICMD_ADDR_; 859 ret |= (MAC_MII_ACC_MII_BUSY_ | MAC_MII_ACC_MIICL45_); 860 861 return ret; 862 } 863 864 static int lan743x_mdiobus_read_c45(struct mii_bus *bus, int phy_id, 865 int dev_addr, int index) 866 { 867 struct lan743x_adapter *adapter = bus->priv; 868 u32 mmd_access; 869 int ret; 870 871 /* confirm MII not busy */ 872 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 873 if (ret < 0) 874 return ret; 875 876 /* Load Register Address */ 877 lan743x_csr_write(adapter, MAC_MII_DATA, index); 878 mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr, 879 MMD_ACCESS_ADDRESS); 880 lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); 881 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 882 if (ret < 0) 883 return ret; 884 885 /* Read Data */ 886 mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr, 887 MMD_ACCESS_READ); 888 lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); 889 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 890 if (ret < 0) 891 return ret; 892 893 ret = lan743x_csr_read(adapter, MAC_MII_DATA); 894 return (int)(ret & 0xFFFF); 895 } 896 897 static int lan743x_mdiobus_write_c45(struct mii_bus *bus, int phy_id, 898 int dev_addr, int index, u16 regval) 899 { 900 struct lan743x_adapter *adapter = bus->priv; 901 u32 mmd_access; 902 int ret; 903 904 /* confirm MII not busy */ 905 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 906 if (ret < 0) 907 return ret; 908 909 /* Load Register Address */ 910 lan743x_csr_write(adapter, MAC_MII_DATA, (u32)index); 911 mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr, 912 MMD_ACCESS_ADDRESS); 913 lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); 914 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 915 if (ret < 0) 916 return ret; 917 918 /* Write Data */ 919 lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval); 920 mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr, 921 MMD_ACCESS_WRITE); 922 lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); 923 924 return lan743x_mac_mii_wait_till_not_busy(adapter); 925 } 926 927 static int lan743x_sgmii_wait_till_not_busy(struct lan743x_adapter *adapter) 928 { 929 u32 data; 930 int ret; 931 932 ret = readx_poll_timeout(LAN743X_CSR_READ_OP, SGMII_ACC, data, 933 !(data & SGMII_ACC_SGMII_BZY_), 100, 1000000); 934 if (ret < 0) 935 netif_err(adapter, drv, adapter->netdev, 936 "%s: error %d sgmii wait timeout\n", __func__, ret); 937 938 return ret; 939 } 940 941 int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr) 942 { 943 u32 mmd_access; 944 int ret; 945 u32 val; 946 947 if (mmd > 31) { 948 netif_err(adapter, probe, adapter->netdev, 949 "%s mmd should <= 31\n", __func__); 950 return -EINVAL; 951 } 952 953 mutex_lock(&adapter->sgmii_rw_lock); 954 /* Load Register Address */ 955 mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_; 956 mmd_access |= (addr | SGMII_ACC_SGMII_BZY_); 957 lan743x_csr_write(adapter, SGMII_ACC, mmd_access); 958 ret = lan743x_sgmii_wait_till_not_busy(adapter); 959 if (ret < 0) 960 goto sgmii_unlock; 961 962 val = lan743x_csr_read(adapter, SGMII_DATA); 963 ret = (int)(val & SGMII_DATA_MASK_); 964 965 sgmii_unlock: 966 mutex_unlock(&adapter->sgmii_rw_lock); 967 968 return ret; 969 } 970 971 static int lan743x_sgmii_write(struct lan743x_adapter *adapter, 972 u8 mmd, u16 addr, u16 val) 973 { 974 u32 mmd_access; 975 int ret; 976 977 if (mmd > 31) { 978 netif_err(adapter, probe, adapter->netdev, 979 "%s mmd should <= 31\n", __func__); 980 return -EINVAL; 981 } 982 mutex_lock(&adapter->sgmii_rw_lock); 983 /* Load Register Data */ 984 lan743x_csr_write(adapter, SGMII_DATA, (u32)(val & SGMII_DATA_MASK_)); 985 /* Load Register Address */ 986 mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_; 987 mmd_access |= (addr | SGMII_ACC_SGMII_BZY_ | SGMII_ACC_SGMII_WR_); 988 lan743x_csr_write(adapter, SGMII_ACC, mmd_access); 989 ret = lan743x_sgmii_wait_till_not_busy(adapter); 990 mutex_unlock(&adapter->sgmii_rw_lock); 991 992 return ret; 993 } 994 995 static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter, 996 u16 baud) 997 { 998 int mpllctrl0; 999 int mpllctrl1; 1000 int miscctrl1; 1001 int ret; 1002 1003 mpllctrl0 = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, 1004 VR_MII_GEN2_4_MPLL_CTRL0); 1005 if (mpllctrl0 < 0) 1006 return mpllctrl0; 1007 1008 mpllctrl0 &= ~VR_MII_MPLL_CTRL0_USE_REFCLK_PAD_; 1009 if (baud == VR_MII_BAUD_RATE_1P25GBPS) { 1010 mpllctrl1 = VR_MII_MPLL_MULTIPLIER_100; 1011 /* mpll_baud_clk/4 */ 1012 miscctrl1 = 0xA; 1013 } else { 1014 mpllctrl1 = VR_MII_MPLL_MULTIPLIER_125; 1015 /* mpll_baud_clk/2 */ 1016 miscctrl1 = 0x5; 1017 } 1018 1019 ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, 1020 VR_MII_GEN2_4_MPLL_CTRL0, mpllctrl0); 1021 if (ret < 0) 1022 return ret; 1023 1024 ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, 1025 VR_MII_GEN2_4_MPLL_CTRL1, mpllctrl1); 1026 if (ret < 0) 1027 return ret; 1028 1029 return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, 1030 VR_MII_GEN2_4_MISC_CTRL1, miscctrl1); 1031 } 1032 1033 static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter, 1034 bool enable) 1035 { 1036 if (enable) 1037 return lan743x_sgmii_mpll_set(adapter, 1038 VR_MII_BAUD_RATE_3P125GBPS); 1039 else 1040 return lan743x_sgmii_mpll_set(adapter, 1041 VR_MII_BAUD_RATE_1P25GBPS); 1042 } 1043 1044 static int lan743x_is_sgmii_2_5G_mode(struct lan743x_adapter *adapter, 1045 bool *status) 1046 { 1047 int ret; 1048 1049 ret = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, 1050 VR_MII_GEN2_4_MPLL_CTRL1); 1051 if (ret < 0) 1052 return ret; 1053 1054 if (ret == VR_MII_MPLL_MULTIPLIER_125 || 1055 ret == VR_MII_MPLL_MULTIPLIER_50) 1056 *status = true; 1057 else 1058 *status = false; 1059 1060 return 0; 1061 } 1062 1063 static int lan743x_sgmii_aneg_update(struct lan743x_adapter *adapter) 1064 { 1065 enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd; 1066 int mii_ctrl; 1067 int dgt_ctrl; 1068 int an_ctrl; 1069 int ret; 1070 1071 if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE) 1072 /* Switch to 2.5 Gbps */ 1073 ret = lan743x_sgmii_2_5G_mode_set(adapter, true); 1074 else 1075 /* Switch to 10/100/1000 Mbps clock */ 1076 ret = lan743x_sgmii_2_5G_mode_set(adapter, false); 1077 if (ret < 0) 1078 return ret; 1079 1080 /* Enable SGMII Auto NEG */ 1081 mii_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR); 1082 if (mii_ctrl < 0) 1083 return mii_ctrl; 1084 1085 an_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, VR_MII_AN_CTRL); 1086 if (an_ctrl < 0) 1087 return an_ctrl; 1088 1089 dgt_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, 1090 VR_MII_DIG_CTRL1); 1091 if (dgt_ctrl < 0) 1092 return dgt_ctrl; 1093 1094 if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE) { 1095 mii_ctrl &= ~(BMCR_ANENABLE | BMCR_ANRESTART | BMCR_SPEED100); 1096 mii_ctrl |= BMCR_SPEED1000; 1097 dgt_ctrl |= VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_; 1098 dgt_ctrl &= ~VR_MII_DIG_CTRL1_MAC_AUTO_SW_; 1099 /* In order for Auto-Negotiation to operate properly at 1100 * 2.5 Gbps the 1.6ms link timer values must be adjusted 1101 * The VR_MII_LINK_TIMER_CTRL Register must be set to 1102 * 16'h7A1 and The CL37_TMR_OVR_RIDE bit of the 1103 * VR_MII_DIG_CTRL1 Register set to 1 1104 */ 1105 ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, 1106 VR_MII_LINK_TIMER_CTRL, 0x7A1); 1107 if (ret < 0) 1108 return ret; 1109 } else { 1110 mii_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART); 1111 an_ctrl &= ~VR_MII_AN_CTRL_SGMII_LINK_STS_; 1112 dgt_ctrl &= ~VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_; 1113 dgt_ctrl |= VR_MII_DIG_CTRL1_MAC_AUTO_SW_; 1114 } 1115 1116 ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, 1117 mii_ctrl); 1118 if (ret < 0) 1119 return ret; 1120 1121 ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, 1122 VR_MII_DIG_CTRL1, dgt_ctrl); 1123 if (ret < 0) 1124 return ret; 1125 1126 return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, 1127 VR_MII_AN_CTRL, an_ctrl); 1128 } 1129 1130 static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state) 1131 { 1132 u8 wait_cnt = 0; 1133 u32 dig_sts; 1134 1135 do { 1136 dig_sts = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, 1137 VR_MII_DIG_STS); 1138 if (((dig_sts & VR_MII_DIG_STS_PSEQ_STATE_MASK_) >> 1139 VR_MII_DIG_STS_PSEQ_STATE_POS_) == state) 1140 break; 1141 usleep_range(1000, 2000); 1142 } while (wait_cnt++ < 10); 1143 1144 if (wait_cnt >= 10) 1145 return -ETIMEDOUT; 1146 1147 return 0; 1148 } 1149 1150 static int lan743x_sgmii_config(struct lan743x_adapter *adapter) 1151 { 1152 struct net_device *netdev = adapter->netdev; 1153 struct phy_device *phydev = netdev->phydev; 1154 enum lan743x_sgmii_lsd lsd = POWER_DOWN; 1155 int mii_ctl; 1156 bool status; 1157 int ret; 1158 1159 switch (phydev->speed) { 1160 case SPEED_2500: 1161 if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER) 1162 lsd = LINK_2500_MASTER; 1163 else 1164 lsd = LINK_2500_SLAVE; 1165 break; 1166 case SPEED_1000: 1167 if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER) 1168 lsd = LINK_1000_MASTER; 1169 else 1170 lsd = LINK_1000_SLAVE; 1171 break; 1172 case SPEED_100: 1173 if (phydev->duplex) 1174 lsd = LINK_100FD; 1175 else 1176 lsd = LINK_100HD; 1177 break; 1178 case SPEED_10: 1179 if (phydev->duplex) 1180 lsd = LINK_10FD; 1181 else 1182 lsd = LINK_10HD; 1183 break; 1184 default: 1185 netif_err(adapter, drv, adapter->netdev, 1186 "Invalid speed %d\n", phydev->speed); 1187 return -EINVAL; 1188 } 1189 1190 adapter->sgmii_lsd = lsd; 1191 ret = lan743x_sgmii_aneg_update(adapter); 1192 if (ret < 0) { 1193 netif_err(adapter, drv, adapter->netdev, 1194 "error %d SGMII cfg failed\n", ret); 1195 return ret; 1196 } 1197 1198 ret = lan743x_is_sgmii_2_5G_mode(adapter, &status); 1199 if (ret < 0) { 1200 netif_err(adapter, drv, adapter->netdev, 1201 "error %d SGMII get mode failed\n", ret); 1202 return ret; 1203 } 1204 1205 if (status) 1206 netif_dbg(adapter, drv, adapter->netdev, 1207 "SGMII 2.5G mode enable\n"); 1208 else 1209 netif_dbg(adapter, drv, adapter->netdev, 1210 "SGMII 1G mode enable\n"); 1211 1212 /* SGMII/1000/2500BASE-X PCS power down */ 1213 mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR); 1214 if (mii_ctl < 0) 1215 return mii_ctl; 1216 1217 mii_ctl |= BMCR_PDOWN; 1218 ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl); 1219 if (ret < 0) 1220 return ret; 1221 1222 ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_DOWN); 1223 if (ret < 0) 1224 return ret; 1225 1226 /* SGMII/1000/2500BASE-X PCS power up */ 1227 mii_ctl &= ~BMCR_PDOWN; 1228 ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl); 1229 if (ret < 0) 1230 return ret; 1231 1232 ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP); 1233 if (ret < 0) 1234 return ret; 1235 1236 return 0; 1237 } 1238 1239 static void lan743x_mac_set_address(struct lan743x_adapter *adapter, 1240 u8 *addr) 1241 { 1242 u32 addr_lo, addr_hi; 1243 1244 addr_lo = addr[0] | 1245 addr[1] << 8 | 1246 addr[2] << 16 | 1247 addr[3] << 24; 1248 addr_hi = addr[4] | 1249 addr[5] << 8; 1250 lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo); 1251 lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi); 1252 1253 ether_addr_copy(adapter->mac_address, addr); 1254 netif_info(adapter, drv, adapter->netdev, 1255 "MAC address set to %pM\n", addr); 1256 } 1257 1258 static int lan743x_mac_init(struct lan743x_adapter *adapter) 1259 { 1260 bool mac_address_valid = true; 1261 struct net_device *netdev; 1262 u32 mac_addr_hi = 0; 1263 u32 mac_addr_lo = 0; 1264 u32 data; 1265 1266 netdev = adapter->netdev; 1267 1268 /* disable auto duplex, and speed detection. Phylib does that */ 1269 data = lan743x_csr_read(adapter, MAC_CR); 1270 data &= ~(MAC_CR_ADD_ | MAC_CR_ASD_); 1271 data |= MAC_CR_CNTR_RST_; 1272 lan743x_csr_write(adapter, MAC_CR, data); 1273 1274 if (!is_valid_ether_addr(adapter->mac_address)) { 1275 mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH); 1276 mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL); 1277 adapter->mac_address[0] = mac_addr_lo & 0xFF; 1278 adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF; 1279 adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF; 1280 adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF; 1281 adapter->mac_address[4] = mac_addr_hi & 0xFF; 1282 adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF; 1283 1284 if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) && 1285 mac_addr_lo == 0xFFFFFFFF) { 1286 mac_address_valid = false; 1287 } else if (!is_valid_ether_addr(adapter->mac_address)) { 1288 mac_address_valid = false; 1289 } 1290 1291 if (!mac_address_valid) 1292 eth_random_addr(adapter->mac_address); 1293 } 1294 lan743x_mac_set_address(adapter, adapter->mac_address); 1295 eth_hw_addr_set(netdev, adapter->mac_address); 1296 1297 return 0; 1298 } 1299 1300 static int lan743x_mac_open(struct lan743x_adapter *adapter) 1301 { 1302 u32 temp; 1303 1304 temp = lan743x_csr_read(adapter, MAC_RX); 1305 lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_); 1306 temp = lan743x_csr_read(adapter, MAC_TX); 1307 lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_); 1308 return 0; 1309 } 1310 1311 static void lan743x_mac_close(struct lan743x_adapter *adapter) 1312 { 1313 u32 temp; 1314 1315 temp = lan743x_csr_read(adapter, MAC_TX); 1316 temp &= ~MAC_TX_TXEN_; 1317 lan743x_csr_write(adapter, MAC_TX, temp); 1318 lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_, 1319 1, 1000, 20000, 100); 1320 1321 temp = lan743x_csr_read(adapter, MAC_RX); 1322 temp &= ~MAC_RX_RXEN_; 1323 lan743x_csr_write(adapter, MAC_RX, temp); 1324 lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_, 1325 1, 1000, 20000, 100); 1326 } 1327 1328 void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter, 1329 bool tx_enable, bool rx_enable) 1330 { 1331 u32 flow_setting = 0; 1332 1333 /* set maximum pause time because when fifo space frees 1334 * up a zero value pause frame will be sent to release the pause 1335 */ 1336 flow_setting = MAC_FLOW_CR_FCPT_MASK_; 1337 if (tx_enable) 1338 flow_setting |= MAC_FLOW_CR_TX_FCEN_; 1339 if (rx_enable) 1340 flow_setting |= MAC_FLOW_CR_RX_FCEN_; 1341 lan743x_csr_write(adapter, MAC_FLOW, flow_setting); 1342 } 1343 1344 static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu) 1345 { 1346 int enabled = 0; 1347 u32 mac_rx = 0; 1348 1349 mac_rx = lan743x_csr_read(adapter, MAC_RX); 1350 if (mac_rx & MAC_RX_RXEN_) { 1351 enabled = 1; 1352 if (mac_rx & MAC_RX_RXD_) { 1353 lan743x_csr_write(adapter, MAC_RX, mac_rx); 1354 mac_rx &= ~MAC_RX_RXD_; 1355 } 1356 mac_rx &= ~MAC_RX_RXEN_; 1357 lan743x_csr_write(adapter, MAC_RX, mac_rx); 1358 lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_, 1359 1, 1000, 20000, 100); 1360 lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_); 1361 } 1362 1363 mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_); 1364 mac_rx |= (((new_mtu + ETH_HLEN + ETH_FCS_LEN) 1365 << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_); 1366 lan743x_csr_write(adapter, MAC_RX, mac_rx); 1367 1368 if (enabled) { 1369 mac_rx |= MAC_RX_RXEN_; 1370 lan743x_csr_write(adapter, MAC_RX, mac_rx); 1371 } 1372 return 0; 1373 } 1374 1375 /* PHY */ 1376 static int lan743x_phy_reset(struct lan743x_adapter *adapter) 1377 { 1378 u32 data; 1379 1380 /* Only called with in probe, and before mdiobus_register */ 1381 1382 data = lan743x_csr_read(adapter, PMT_CTL); 1383 data |= PMT_CTL_ETH_PHY_RST_; 1384 lan743x_csr_write(adapter, PMT_CTL, data); 1385 1386 return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data, 1387 (!(data & PMT_CTL_ETH_PHY_RST_) && 1388 (data & PMT_CTL_READY_)), 1389 50000, 1000000); 1390 } 1391 1392 static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter, 1393 u16 local_adv, u16 remote_adv) 1394 { 1395 struct lan743x_phy *phy = &adapter->phy; 1396 u8 cap; 1397 1398 if (phy->fc_autoneg) 1399 cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv); 1400 else 1401 cap = phy->fc_request_control; 1402 1403 lan743x_mac_flow_ctrl_set_enables(adapter, 1404 cap & FLOW_CTRL_TX, 1405 cap & FLOW_CTRL_RX); 1406 } 1407 1408 static int lan743x_phy_init(struct lan743x_adapter *adapter) 1409 { 1410 return lan743x_phy_reset(adapter); 1411 } 1412 1413 static void lan743x_phy_link_status_change(struct net_device *netdev) 1414 { 1415 struct lan743x_adapter *adapter = netdev_priv(netdev); 1416 struct phy_device *phydev = netdev->phydev; 1417 u32 data; 1418 1419 phy_print_status(phydev); 1420 if (phydev->state == PHY_RUNNING) { 1421 int remote_advertisement = 0; 1422 int local_advertisement = 0; 1423 1424 data = lan743x_csr_read(adapter, MAC_CR); 1425 1426 /* set duplex mode */ 1427 if (phydev->duplex) 1428 data |= MAC_CR_DPX_; 1429 else 1430 data &= ~MAC_CR_DPX_; 1431 1432 /* set bus speed */ 1433 switch (phydev->speed) { 1434 case SPEED_10: 1435 data &= ~MAC_CR_CFG_H_; 1436 data &= ~MAC_CR_CFG_L_; 1437 break; 1438 case SPEED_100: 1439 data &= ~MAC_CR_CFG_H_; 1440 data |= MAC_CR_CFG_L_; 1441 break; 1442 case SPEED_1000: 1443 data |= MAC_CR_CFG_H_; 1444 data &= ~MAC_CR_CFG_L_; 1445 break; 1446 case SPEED_2500: 1447 data |= MAC_CR_CFG_H_; 1448 data |= MAC_CR_CFG_L_; 1449 break; 1450 } 1451 lan743x_csr_write(adapter, MAC_CR, data); 1452 1453 local_advertisement = 1454 linkmode_adv_to_mii_adv_t(phydev->advertising); 1455 remote_advertisement = 1456 linkmode_adv_to_mii_adv_t(phydev->lp_advertising); 1457 1458 lan743x_phy_update_flowcontrol(adapter, local_advertisement, 1459 remote_advertisement); 1460 lan743x_ptp_update_latency(adapter, phydev->speed); 1461 if (phydev->interface == PHY_INTERFACE_MODE_SGMII || 1462 phydev->interface == PHY_INTERFACE_MODE_1000BASEX || 1463 phydev->interface == PHY_INTERFACE_MODE_2500BASEX) 1464 lan743x_sgmii_config(adapter); 1465 1466 data = lan743x_csr_read(adapter, MAC_CR); 1467 if (phydev->enable_tx_lpi) 1468 data |= MAC_CR_EEE_EN_; 1469 else 1470 data &= ~MAC_CR_EEE_EN_; 1471 lan743x_csr_write(adapter, MAC_CR, data); 1472 } 1473 } 1474 1475 static void lan743x_phy_close(struct lan743x_adapter *adapter) 1476 { 1477 struct net_device *netdev = adapter->netdev; 1478 struct phy_device *phydev = netdev->phydev; 1479 1480 phy_stop(netdev->phydev); 1481 phy_disconnect(netdev->phydev); 1482 1483 /* using phydev here as phy_disconnect NULLs netdev->phydev */ 1484 if (phy_is_pseudo_fixed_link(phydev)) 1485 fixed_phy_unregister(phydev); 1486 1487 } 1488 1489 static void lan743x_phy_interface_select(struct lan743x_adapter *adapter) 1490 { 1491 u32 id_rev; 1492 u32 data; 1493 1494 data = lan743x_csr_read(adapter, MAC_CR); 1495 id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_; 1496 1497 if (adapter->is_pci11x1x && adapter->is_sgmii_en) 1498 adapter->phy_interface = PHY_INTERFACE_MODE_SGMII; 1499 else if (id_rev == ID_REV_ID_LAN7430_) 1500 adapter->phy_interface = PHY_INTERFACE_MODE_GMII; 1501 else if ((id_rev == ID_REV_ID_LAN7431_) && (data & MAC_CR_MII_EN_)) 1502 adapter->phy_interface = PHY_INTERFACE_MODE_MII; 1503 else 1504 adapter->phy_interface = PHY_INTERFACE_MODE_RGMII; 1505 } 1506 1507 static int lan743x_phy_open(struct lan743x_adapter *adapter) 1508 { 1509 struct net_device *netdev = adapter->netdev; 1510 struct lan743x_phy *phy = &adapter->phy; 1511 struct fixed_phy_status fphy_status = { 1512 .link = 1, 1513 .speed = SPEED_1000, 1514 .duplex = DUPLEX_FULL, 1515 }; 1516 struct phy_device *phydev; 1517 int ret = -EIO; 1518 1519 /* try devicetree phy, or fixed link */ 1520 phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node, 1521 lan743x_phy_link_status_change); 1522 1523 if (!phydev) { 1524 /* try internal phy */ 1525 phydev = phy_find_first(adapter->mdiobus); 1526 if (!phydev) { 1527 if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == 1528 ID_REV_ID_LAN7431_) { 1529 phydev = fixed_phy_register(PHY_POLL, 1530 &fphy_status, NULL); 1531 if (IS_ERR(phydev)) { 1532 netdev_err(netdev, "No PHY/fixed_PHY found\n"); 1533 return PTR_ERR(phydev); 1534 } 1535 } else { 1536 goto return_error; 1537 } 1538 } 1539 1540 lan743x_phy_interface_select(adapter); 1541 1542 ret = phy_connect_direct(netdev, phydev, 1543 lan743x_phy_link_status_change, 1544 adapter->phy_interface); 1545 if (ret) 1546 goto return_error; 1547 } 1548 1549 /* MAC doesn't support 1000T Half */ 1550 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 1551 1552 /* support both flow controls */ 1553 phy_support_asym_pause(phydev); 1554 phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); 1555 phy->fc_autoneg = phydev->autoneg; 1556 1557 phy_start(phydev); 1558 phy_start_aneg(phydev); 1559 phy_attached_info(phydev); 1560 return 0; 1561 1562 return_error: 1563 return ret; 1564 } 1565 1566 static void lan743x_rfe_open(struct lan743x_adapter *adapter) 1567 { 1568 lan743x_csr_write(adapter, RFE_RSS_CFG, 1569 RFE_RSS_CFG_UDP_IPV6_EX_ | 1570 RFE_RSS_CFG_TCP_IPV6_EX_ | 1571 RFE_RSS_CFG_IPV6_EX_ | 1572 RFE_RSS_CFG_UDP_IPV6_ | 1573 RFE_RSS_CFG_TCP_IPV6_ | 1574 RFE_RSS_CFG_IPV6_ | 1575 RFE_RSS_CFG_UDP_IPV4_ | 1576 RFE_RSS_CFG_TCP_IPV4_ | 1577 RFE_RSS_CFG_IPV4_ | 1578 RFE_RSS_CFG_VALID_HASH_BITS_ | 1579 RFE_RSS_CFG_RSS_QUEUE_ENABLE_ | 1580 RFE_RSS_CFG_RSS_HASH_STORE_ | 1581 RFE_RSS_CFG_RSS_ENABLE_); 1582 } 1583 1584 static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter) 1585 { 1586 u8 *mac_addr; 1587 u32 mac_addr_hi = 0; 1588 u32 mac_addr_lo = 0; 1589 1590 /* Add mac address to perfect Filter */ 1591 mac_addr = adapter->mac_address; 1592 mac_addr_lo = ((((u32)(mac_addr[0])) << 0) | 1593 (((u32)(mac_addr[1])) << 8) | 1594 (((u32)(mac_addr[2])) << 16) | 1595 (((u32)(mac_addr[3])) << 24)); 1596 mac_addr_hi = ((((u32)(mac_addr[4])) << 0) | 1597 (((u32)(mac_addr[5])) << 8)); 1598 1599 lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo); 1600 lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0), 1601 mac_addr_hi | RFE_ADDR_FILT_HI_VALID_); 1602 } 1603 1604 static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter) 1605 { 1606 struct net_device *netdev = adapter->netdev; 1607 u32 hash_table[DP_SEL_VHF_HASH_LEN]; 1608 u32 rfctl; 1609 u32 data; 1610 1611 rfctl = lan743x_csr_read(adapter, RFE_CTL); 1612 rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ | 1613 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_); 1614 rfctl |= RFE_CTL_AB_; 1615 if (netdev->flags & IFF_PROMISC) { 1616 rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_; 1617 } else { 1618 if (netdev->flags & IFF_ALLMULTI) 1619 rfctl |= RFE_CTL_AM_; 1620 } 1621 1622 if (netdev->features & NETIF_F_RXCSUM) 1623 rfctl |= RFE_CTL_IP_COE_ | RFE_CTL_TCP_UDP_COE_; 1624 1625 memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32)); 1626 if (netdev_mc_count(netdev)) { 1627 struct netdev_hw_addr *ha; 1628 int i; 1629 1630 rfctl |= RFE_CTL_DA_PERFECT_; 1631 i = 1; 1632 netdev_for_each_mc_addr(ha, netdev) { 1633 /* set first 32 into Perfect Filter */ 1634 if (i < 33) { 1635 lan743x_csr_write(adapter, 1636 RFE_ADDR_FILT_HI(i), 0); 1637 data = ha->addr[3]; 1638 data = ha->addr[2] | (data << 8); 1639 data = ha->addr[1] | (data << 8); 1640 data = ha->addr[0] | (data << 8); 1641 lan743x_csr_write(adapter, 1642 RFE_ADDR_FILT_LO(i), data); 1643 data = ha->addr[5]; 1644 data = ha->addr[4] | (data << 8); 1645 data |= RFE_ADDR_FILT_HI_VALID_; 1646 lan743x_csr_write(adapter, 1647 RFE_ADDR_FILT_HI(i), data); 1648 } else { 1649 u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >> 1650 23) & 0x1FF; 1651 hash_table[bitnum / 32] |= (1 << (bitnum % 32)); 1652 rfctl |= RFE_CTL_MCAST_HASH_; 1653 } 1654 i++; 1655 } 1656 } 1657 1658 lan743x_dp_write(adapter, DP_SEL_RFE_RAM, 1659 DP_SEL_VHF_VLAN_LEN, 1660 DP_SEL_VHF_HASH_LEN, hash_table); 1661 lan743x_csr_write(adapter, RFE_CTL, rfctl); 1662 } 1663 1664 static int lan743x_dmac_init(struct lan743x_adapter *adapter) 1665 { 1666 u32 data = 0; 1667 1668 lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_); 1669 lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_, 1670 0, 1000, 20000, 100); 1671 switch (DEFAULT_DMA_DESCRIPTOR_SPACING) { 1672 case DMA_DESCRIPTOR_SPACING_16: 1673 data = DMAC_CFG_MAX_DSPACE_16_; 1674 break; 1675 case DMA_DESCRIPTOR_SPACING_32: 1676 data = DMAC_CFG_MAX_DSPACE_32_; 1677 break; 1678 case DMA_DESCRIPTOR_SPACING_64: 1679 data = DMAC_CFG_MAX_DSPACE_64_; 1680 break; 1681 case DMA_DESCRIPTOR_SPACING_128: 1682 data = DMAC_CFG_MAX_DSPACE_128_; 1683 break; 1684 default: 1685 return -EPERM; 1686 } 1687 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) 1688 data |= DMAC_CFG_COAL_EN_; 1689 data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_; 1690 data |= DMAC_CFG_MAX_READ_REQ_SET_(6); 1691 lan743x_csr_write(adapter, DMAC_CFG, data); 1692 data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1); 1693 data |= DMAC_COAL_CFG_TIMER_TX_START_; 1694 data |= DMAC_COAL_CFG_FLUSH_INTS_; 1695 data |= DMAC_COAL_CFG_INT_EXIT_COAL_; 1696 data |= DMAC_COAL_CFG_CSR_EXIT_COAL_; 1697 data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A); 1698 data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C); 1699 lan743x_csr_write(adapter, DMAC_COAL_CFG, data); 1700 data = DMAC_OBFF_TX_THRES_SET_(0x08); 1701 data |= DMAC_OBFF_RX_THRES_SET_(0x0A); 1702 lan743x_csr_write(adapter, DMAC_OBFF_CFG, data); 1703 return 0; 1704 } 1705 1706 static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter, 1707 int tx_channel) 1708 { 1709 u32 dmac_cmd = 0; 1710 1711 dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD); 1712 return DMAC_CHANNEL_STATE_SET((dmac_cmd & 1713 DMAC_CMD_START_T_(tx_channel)), 1714 (dmac_cmd & 1715 DMAC_CMD_STOP_T_(tx_channel))); 1716 } 1717 1718 static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter, 1719 int tx_channel) 1720 { 1721 int timeout = 100; 1722 int result = 0; 1723 1724 while (timeout && 1725 ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) == 1726 DMAC_CHANNEL_STATE_STOP_PENDING)) { 1727 usleep_range(1000, 20000); 1728 timeout--; 1729 } 1730 if (result == DMAC_CHANNEL_STATE_STOP_PENDING) 1731 result = -ENODEV; 1732 return result; 1733 } 1734 1735 static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter, 1736 int rx_channel) 1737 { 1738 u32 dmac_cmd = 0; 1739 1740 dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD); 1741 return DMAC_CHANNEL_STATE_SET((dmac_cmd & 1742 DMAC_CMD_START_R_(rx_channel)), 1743 (dmac_cmd & 1744 DMAC_CMD_STOP_R_(rx_channel))); 1745 } 1746 1747 static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter, 1748 int rx_channel) 1749 { 1750 int timeout = 100; 1751 int result = 0; 1752 1753 while (timeout && 1754 ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) == 1755 DMAC_CHANNEL_STATE_STOP_PENDING)) { 1756 usleep_range(1000, 20000); 1757 timeout--; 1758 } 1759 if (result == DMAC_CHANNEL_STATE_STOP_PENDING) 1760 result = -ENODEV; 1761 return result; 1762 } 1763 1764 static void lan743x_tx_release_desc(struct lan743x_tx *tx, 1765 int descriptor_index, bool cleanup) 1766 { 1767 struct lan743x_tx_buffer_info *buffer_info = NULL; 1768 struct lan743x_tx_descriptor *descriptor = NULL; 1769 u32 descriptor_type = 0; 1770 bool ignore_sync; 1771 1772 descriptor = &tx->ring_cpu_ptr[descriptor_index]; 1773 buffer_info = &tx->buffer_info[descriptor_index]; 1774 if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE)) 1775 goto done; 1776 1777 descriptor_type = le32_to_cpu(descriptor->data0) & 1778 TX_DESC_DATA0_DTYPE_MASK_; 1779 if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_) 1780 goto clean_up_data_descriptor; 1781 else 1782 goto clear_active; 1783 1784 clean_up_data_descriptor: 1785 if (buffer_info->dma_ptr) { 1786 if (buffer_info->flags & 1787 TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) { 1788 dma_unmap_page(&tx->adapter->pdev->dev, 1789 buffer_info->dma_ptr, 1790 buffer_info->buffer_length, 1791 DMA_TO_DEVICE); 1792 } else { 1793 dma_unmap_single(&tx->adapter->pdev->dev, 1794 buffer_info->dma_ptr, 1795 buffer_info->buffer_length, 1796 DMA_TO_DEVICE); 1797 } 1798 buffer_info->dma_ptr = 0; 1799 buffer_info->buffer_length = 0; 1800 } 1801 if (!buffer_info->skb) 1802 goto clear_active; 1803 1804 if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) { 1805 dev_kfree_skb_any(buffer_info->skb); 1806 goto clear_skb; 1807 } 1808 1809 if (cleanup) { 1810 lan743x_ptp_unrequest_tx_timestamp(tx->adapter); 1811 dev_kfree_skb_any(buffer_info->skb); 1812 } else { 1813 ignore_sync = (buffer_info->flags & 1814 TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0; 1815 lan743x_ptp_tx_timestamp_skb(tx->adapter, 1816 buffer_info->skb, ignore_sync); 1817 } 1818 1819 clear_skb: 1820 buffer_info->skb = NULL; 1821 1822 clear_active: 1823 buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE; 1824 1825 done: 1826 memset(buffer_info, 0, sizeof(*buffer_info)); 1827 memset(descriptor, 0, sizeof(*descriptor)); 1828 } 1829 1830 static int lan743x_tx_next_index(struct lan743x_tx *tx, int index) 1831 { 1832 return ((++index) % tx->ring_size); 1833 } 1834 1835 static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx) 1836 { 1837 while (le32_to_cpu(*tx->head_cpu_ptr) != (tx->last_head)) { 1838 lan743x_tx_release_desc(tx, tx->last_head, false); 1839 tx->last_head = lan743x_tx_next_index(tx, tx->last_head); 1840 } 1841 } 1842 1843 static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx) 1844 { 1845 u32 original_head = 0; 1846 1847 original_head = tx->last_head; 1848 do { 1849 lan743x_tx_release_desc(tx, tx->last_head, true); 1850 tx->last_head = lan743x_tx_next_index(tx, tx->last_head); 1851 } while (tx->last_head != original_head); 1852 memset(tx->ring_cpu_ptr, 0, 1853 sizeof(*tx->ring_cpu_ptr) * (tx->ring_size)); 1854 memset(tx->buffer_info, 0, 1855 sizeof(*tx->buffer_info) * (tx->ring_size)); 1856 } 1857 1858 static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx, 1859 struct sk_buff *skb) 1860 { 1861 int result = 1; /* 1 for the main skb buffer */ 1862 int nr_frags = 0; 1863 1864 if (skb_is_gso(skb)) 1865 result++; /* requires an extension descriptor */ 1866 nr_frags = skb_shinfo(skb)->nr_frags; 1867 result += nr_frags; /* 1 for each fragment buffer */ 1868 return result; 1869 } 1870 1871 static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx) 1872 { 1873 int last_head = tx->last_head; 1874 int last_tail = tx->last_tail; 1875 1876 if (last_tail >= last_head) 1877 return tx->ring_size - last_tail + last_head - 1; 1878 else 1879 return last_head - last_tail - 1; 1880 } 1881 1882 static void lan743x_rx_cfg_b_tstamp_config(struct lan743x_adapter *adapter, 1883 int rx_ts_config) 1884 { 1885 int channel_number; 1886 int index; 1887 u32 data; 1888 1889 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { 1890 channel_number = adapter->rx[index].channel_number; 1891 data = lan743x_csr_read(adapter, RX_CFG_B(channel_number)); 1892 data &= RX_CFG_B_TS_MASK_; 1893 data |= rx_ts_config; 1894 lan743x_csr_write(adapter, RX_CFG_B(channel_number), 1895 data); 1896 } 1897 } 1898 1899 int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter, 1900 int rx_filter) 1901 { 1902 u32 data; 1903 1904 switch (rx_filter) { 1905 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1906 lan743x_rx_cfg_b_tstamp_config(adapter, 1907 RX_CFG_B_TS_DESCR_EN_); 1908 data = lan743x_csr_read(adapter, PTP_RX_TS_CFG); 1909 data |= PTP_RX_TS_CFG_EVENT_MSGS_; 1910 lan743x_csr_write(adapter, PTP_RX_TS_CFG, data); 1911 break; 1912 case HWTSTAMP_FILTER_NONE: 1913 lan743x_rx_cfg_b_tstamp_config(adapter, 1914 RX_CFG_B_TS_NONE_); 1915 break; 1916 case HWTSTAMP_FILTER_ALL: 1917 lan743x_rx_cfg_b_tstamp_config(adapter, 1918 RX_CFG_B_TS_ALL_RX_); 1919 break; 1920 default: 1921 return -ERANGE; 1922 } 1923 return 0; 1924 } 1925 1926 void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx, 1927 bool enable_timestamping, 1928 bool enable_onestep_sync) 1929 { 1930 if (enable_timestamping) 1931 tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED; 1932 else 1933 tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED; 1934 if (enable_onestep_sync) 1935 tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC; 1936 else 1937 tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC; 1938 } 1939 1940 static int lan743x_tx_frame_start(struct lan743x_tx *tx, 1941 unsigned char *first_buffer, 1942 unsigned int first_buffer_length, 1943 unsigned int frame_length, 1944 bool time_stamp, 1945 bool check_sum) 1946 { 1947 /* called only from within lan743x_tx_xmit_frame. 1948 * assuming tx->ring_lock has already been acquired. 1949 */ 1950 struct lan743x_tx_descriptor *tx_descriptor = NULL; 1951 struct lan743x_tx_buffer_info *buffer_info = NULL; 1952 struct lan743x_adapter *adapter = tx->adapter; 1953 struct device *dev = &adapter->pdev->dev; 1954 dma_addr_t dma_ptr; 1955 1956 tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS; 1957 tx->frame_first = tx->last_tail; 1958 tx->frame_tail = tx->frame_first; 1959 1960 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1961 buffer_info = &tx->buffer_info[tx->frame_tail]; 1962 dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length, 1963 DMA_TO_DEVICE); 1964 if (dma_mapping_error(dev, dma_ptr)) 1965 return -ENOMEM; 1966 1967 tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr)); 1968 tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr)); 1969 tx_descriptor->data3 = cpu_to_le32((frame_length << 16) & 1970 TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_); 1971 1972 buffer_info->skb = NULL; 1973 buffer_info->dma_ptr = dma_ptr; 1974 buffer_info->buffer_length = first_buffer_length; 1975 buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; 1976 1977 tx->frame_data0 = (first_buffer_length & 1978 TX_DESC_DATA0_BUF_LENGTH_MASK_) | 1979 TX_DESC_DATA0_DTYPE_DATA_ | 1980 TX_DESC_DATA0_FS_ | 1981 TX_DESC_DATA0_FCS_; 1982 if (time_stamp) 1983 tx->frame_data0 |= TX_DESC_DATA0_TSE_; 1984 1985 if (check_sum) 1986 tx->frame_data0 |= TX_DESC_DATA0_ICE_ | 1987 TX_DESC_DATA0_IPE_ | 1988 TX_DESC_DATA0_TPE_; 1989 1990 /* data0 will be programmed in one of other frame assembler functions */ 1991 return 0; 1992 } 1993 1994 static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, 1995 unsigned int frame_length, 1996 int nr_frags) 1997 { 1998 /* called only from within lan743x_tx_xmit_frame. 1999 * assuming tx->ring_lock has already been acquired. 2000 */ 2001 struct lan743x_tx_descriptor *tx_descriptor = NULL; 2002 struct lan743x_tx_buffer_info *buffer_info = NULL; 2003 2004 /* wrap up previous descriptor */ 2005 tx->frame_data0 |= TX_DESC_DATA0_EXT_; 2006 if (nr_frags <= 0) { 2007 tx->frame_data0 |= TX_DESC_DATA0_LS_; 2008 tx->frame_data0 |= TX_DESC_DATA0_IOC_; 2009 } 2010 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 2011 tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); 2012 2013 /* move to next descriptor */ 2014 tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); 2015 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 2016 buffer_info = &tx->buffer_info[tx->frame_tail]; 2017 2018 /* add extension descriptor */ 2019 tx_descriptor->data1 = 0; 2020 tx_descriptor->data2 = 0; 2021 tx_descriptor->data3 = 0; 2022 2023 buffer_info->skb = NULL; 2024 buffer_info->dma_ptr = 0; 2025 buffer_info->buffer_length = 0; 2026 buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; 2027 2028 tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) | 2029 TX_DESC_DATA0_DTYPE_EXT_ | 2030 TX_DESC_DATA0_EXT_LSO_; 2031 2032 /* data0 will be programmed in one of other frame assembler functions */ 2033 } 2034 2035 static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx, 2036 const skb_frag_t *fragment, 2037 unsigned int frame_length) 2038 { 2039 /* called only from within lan743x_tx_xmit_frame 2040 * assuming tx->ring_lock has already been acquired 2041 */ 2042 struct lan743x_tx_descriptor *tx_descriptor = NULL; 2043 struct lan743x_tx_buffer_info *buffer_info = NULL; 2044 struct lan743x_adapter *adapter = tx->adapter; 2045 struct device *dev = &adapter->pdev->dev; 2046 unsigned int fragment_length = 0; 2047 dma_addr_t dma_ptr; 2048 2049 fragment_length = skb_frag_size(fragment); 2050 if (!fragment_length) 2051 return 0; 2052 2053 /* wrap up previous descriptor */ 2054 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 2055 tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); 2056 2057 /* move to next descriptor */ 2058 tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); 2059 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 2060 buffer_info = &tx->buffer_info[tx->frame_tail]; 2061 dma_ptr = skb_frag_dma_map(dev, fragment, 2062 0, fragment_length, 2063 DMA_TO_DEVICE); 2064 if (dma_mapping_error(dev, dma_ptr)) { 2065 int desc_index; 2066 2067 /* cleanup all previously setup descriptors */ 2068 desc_index = tx->frame_first; 2069 while (desc_index != tx->frame_tail) { 2070 lan743x_tx_release_desc(tx, desc_index, true); 2071 desc_index = lan743x_tx_next_index(tx, desc_index); 2072 } 2073 dma_wmb(); 2074 tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS; 2075 tx->frame_first = 0; 2076 tx->frame_data0 = 0; 2077 tx->frame_tail = 0; 2078 return -ENOMEM; 2079 } 2080 2081 tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr)); 2082 tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr)); 2083 tx_descriptor->data3 = cpu_to_le32((frame_length << 16) & 2084 TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_); 2085 2086 buffer_info->skb = NULL; 2087 buffer_info->dma_ptr = dma_ptr; 2088 buffer_info->buffer_length = fragment_length; 2089 buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; 2090 buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT; 2091 2092 tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) | 2093 TX_DESC_DATA0_DTYPE_DATA_ | 2094 TX_DESC_DATA0_FCS_; 2095 2096 /* data0 will be programmed in one of other frame assembler functions */ 2097 return 0; 2098 } 2099 2100 static void lan743x_tx_frame_end(struct lan743x_tx *tx, 2101 struct sk_buff *skb, 2102 bool time_stamp, 2103 bool ignore_sync) 2104 { 2105 /* called only from within lan743x_tx_xmit_frame 2106 * assuming tx->ring_lock has already been acquired 2107 */ 2108 struct lan743x_tx_descriptor *tx_descriptor = NULL; 2109 struct lan743x_tx_buffer_info *buffer_info = NULL; 2110 struct lan743x_adapter *adapter = tx->adapter; 2111 u32 tx_tail_flags = 0; 2112 2113 /* wrap up previous descriptor */ 2114 if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) == 2115 TX_DESC_DATA0_DTYPE_DATA_) { 2116 tx->frame_data0 |= TX_DESC_DATA0_LS_; 2117 tx->frame_data0 |= TX_DESC_DATA0_IOC_; 2118 } 2119 2120 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 2121 buffer_info = &tx->buffer_info[tx->frame_tail]; 2122 buffer_info->skb = skb; 2123 if (time_stamp) 2124 buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED; 2125 if (ignore_sync) 2126 buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC; 2127 2128 tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); 2129 tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); 2130 tx->last_tail = tx->frame_tail; 2131 2132 dma_wmb(); 2133 2134 if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET) 2135 tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_; 2136 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) 2137 tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ | 2138 TX_TAIL_SET_TOP_INT_EN_; 2139 2140 lan743x_csr_write(adapter, TX_TAIL(tx->channel_number), 2141 tx_tail_flags | tx->frame_tail); 2142 tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS; 2143 } 2144 2145 static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, 2146 struct sk_buff *skb) 2147 { 2148 int required_number_of_descriptors = 0; 2149 unsigned int start_frame_length = 0; 2150 netdev_tx_t retval = NETDEV_TX_OK; 2151 unsigned int frame_length = 0; 2152 unsigned int head_length = 0; 2153 unsigned long irq_flags = 0; 2154 bool do_timestamp = false; 2155 bool ignore_sync = false; 2156 struct netdev_queue *txq; 2157 int nr_frags = 0; 2158 bool gso = false; 2159 int j; 2160 2161 required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb); 2162 2163 spin_lock_irqsave(&tx->ring_lock, irq_flags); 2164 if (required_number_of_descriptors > 2165 lan743x_tx_get_avail_desc(tx)) { 2166 if (required_number_of_descriptors > (tx->ring_size - 1)) { 2167 dev_kfree_skb_irq(skb); 2168 } else { 2169 /* save how many descriptors we needed to restart the queue */ 2170 tx->rqd_descriptors = required_number_of_descriptors; 2171 retval = NETDEV_TX_BUSY; 2172 txq = netdev_get_tx_queue(tx->adapter->netdev, 2173 tx->channel_number); 2174 netif_tx_stop_queue(txq); 2175 } 2176 goto unlock; 2177 } 2178 2179 /* space available, transmit skb */ 2180 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 2181 (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) && 2182 (lan743x_ptp_request_tx_timestamp(tx->adapter))) { 2183 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2184 do_timestamp = true; 2185 if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC) 2186 ignore_sync = true; 2187 } 2188 head_length = skb_headlen(skb); 2189 frame_length = skb_pagelen(skb); 2190 nr_frags = skb_shinfo(skb)->nr_frags; 2191 start_frame_length = frame_length; 2192 gso = skb_is_gso(skb); 2193 if (gso) { 2194 start_frame_length = max(skb_shinfo(skb)->gso_size, 2195 (unsigned short)8); 2196 } 2197 2198 if (lan743x_tx_frame_start(tx, 2199 skb->data, head_length, 2200 start_frame_length, 2201 do_timestamp, 2202 skb->ip_summed == CHECKSUM_PARTIAL)) { 2203 dev_kfree_skb_irq(skb); 2204 goto unlock; 2205 } 2206 tx->frame_count++; 2207 2208 if (gso) 2209 lan743x_tx_frame_add_lso(tx, frame_length, nr_frags); 2210 2211 if (nr_frags <= 0) 2212 goto finish; 2213 2214 for (j = 0; j < nr_frags; j++) { 2215 const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]); 2216 2217 if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) { 2218 /* upon error no need to call 2219 * lan743x_tx_frame_end 2220 * frame assembler clean up was performed inside 2221 * lan743x_tx_frame_add_fragment 2222 */ 2223 dev_kfree_skb_irq(skb); 2224 goto unlock; 2225 } 2226 } 2227 2228 finish: 2229 lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync); 2230 2231 unlock: 2232 spin_unlock_irqrestore(&tx->ring_lock, irq_flags); 2233 return retval; 2234 } 2235 2236 static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight) 2237 { 2238 struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi); 2239 struct lan743x_adapter *adapter = tx->adapter; 2240 unsigned long irq_flags = 0; 2241 struct netdev_queue *txq; 2242 u32 ioc_bit = 0; 2243 2244 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number); 2245 lan743x_csr_read(adapter, DMAC_INT_STS); 2246 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) 2247 lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit); 2248 spin_lock_irqsave(&tx->ring_lock, irq_flags); 2249 2250 /* clean up tx ring */ 2251 lan743x_tx_release_completed_descriptors(tx); 2252 txq = netdev_get_tx_queue(adapter->netdev, tx->channel_number); 2253 if (netif_tx_queue_stopped(txq)) { 2254 if (tx->rqd_descriptors) { 2255 if (tx->rqd_descriptors <= 2256 lan743x_tx_get_avail_desc(tx)) { 2257 tx->rqd_descriptors = 0; 2258 netif_tx_wake_queue(txq); 2259 } 2260 } else { 2261 netif_tx_wake_queue(txq); 2262 } 2263 } 2264 spin_unlock_irqrestore(&tx->ring_lock, irq_flags); 2265 2266 if (!napi_complete(napi)) 2267 goto done; 2268 2269 /* enable isr */ 2270 lan743x_csr_write(adapter, INT_EN_SET, 2271 INT_BIT_DMA_TX_(tx->channel_number)); 2272 lan743x_csr_read(adapter, INT_STS); 2273 2274 done: 2275 return 0; 2276 } 2277 2278 static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx) 2279 { 2280 if (tx->head_cpu_ptr) { 2281 dma_free_coherent(&tx->adapter->pdev->dev, 2282 sizeof(*tx->head_cpu_ptr), tx->head_cpu_ptr, 2283 tx->head_dma_ptr); 2284 tx->head_cpu_ptr = NULL; 2285 tx->head_dma_ptr = 0; 2286 } 2287 kfree(tx->buffer_info); 2288 tx->buffer_info = NULL; 2289 2290 if (tx->ring_cpu_ptr) { 2291 dma_free_coherent(&tx->adapter->pdev->dev, 2292 tx->ring_allocation_size, tx->ring_cpu_ptr, 2293 tx->ring_dma_ptr); 2294 tx->ring_allocation_size = 0; 2295 tx->ring_cpu_ptr = NULL; 2296 tx->ring_dma_ptr = 0; 2297 } 2298 tx->ring_size = 0; 2299 } 2300 2301 static int lan743x_tx_ring_init(struct lan743x_tx *tx) 2302 { 2303 size_t ring_allocation_size = 0; 2304 void *cpu_ptr = NULL; 2305 dma_addr_t dma_ptr; 2306 int ret = -ENOMEM; 2307 2308 tx->ring_size = LAN743X_TX_RING_SIZE; 2309 if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) { 2310 ret = -EINVAL; 2311 goto cleanup; 2312 } 2313 if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev, 2314 DMA_BIT_MASK(64))) { 2315 dev_warn(&tx->adapter->pdev->dev, 2316 "lan743x_: No suitable DMA available\n"); 2317 ret = -ENOMEM; 2318 goto cleanup; 2319 } 2320 ring_allocation_size = ALIGN(tx->ring_size * 2321 sizeof(struct lan743x_tx_descriptor), 2322 PAGE_SIZE); 2323 dma_ptr = 0; 2324 cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev, 2325 ring_allocation_size, &dma_ptr, GFP_KERNEL); 2326 if (!cpu_ptr) { 2327 ret = -ENOMEM; 2328 goto cleanup; 2329 } 2330 2331 tx->ring_allocation_size = ring_allocation_size; 2332 tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr; 2333 tx->ring_dma_ptr = dma_ptr; 2334 2335 cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL); 2336 if (!cpu_ptr) { 2337 ret = -ENOMEM; 2338 goto cleanup; 2339 } 2340 tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr; 2341 dma_ptr = 0; 2342 cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev, 2343 sizeof(*tx->head_cpu_ptr), &dma_ptr, 2344 GFP_KERNEL); 2345 if (!cpu_ptr) { 2346 ret = -ENOMEM; 2347 goto cleanup; 2348 } 2349 2350 tx->head_cpu_ptr = cpu_ptr; 2351 tx->head_dma_ptr = dma_ptr; 2352 if (tx->head_dma_ptr & 0x3) { 2353 ret = -ENOMEM; 2354 goto cleanup; 2355 } 2356 2357 return 0; 2358 2359 cleanup: 2360 lan743x_tx_ring_cleanup(tx); 2361 return ret; 2362 } 2363 2364 static void lan743x_tx_close(struct lan743x_tx *tx) 2365 { 2366 struct lan743x_adapter *adapter = tx->adapter; 2367 2368 lan743x_csr_write(adapter, 2369 DMAC_CMD, 2370 DMAC_CMD_STOP_T_(tx->channel_number)); 2371 lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number); 2372 2373 lan743x_csr_write(adapter, 2374 DMAC_INT_EN_CLR, 2375 DMAC_INT_BIT_TX_IOC_(tx->channel_number)); 2376 lan743x_csr_write(adapter, INT_EN_CLR, 2377 INT_BIT_DMA_TX_(tx->channel_number)); 2378 napi_disable(&tx->napi); 2379 netif_napi_del(&tx->napi); 2380 2381 lan743x_csr_write(adapter, FCT_TX_CTL, 2382 FCT_TX_CTL_DIS_(tx->channel_number)); 2383 lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL, 2384 FCT_TX_CTL_EN_(tx->channel_number), 2385 0, 1000, 20000, 100); 2386 2387 lan743x_tx_release_all_descriptors(tx); 2388 2389 tx->rqd_descriptors = 0; 2390 2391 lan743x_tx_ring_cleanup(tx); 2392 } 2393 2394 static int lan743x_tx_open(struct lan743x_tx *tx) 2395 { 2396 struct lan743x_adapter *adapter = NULL; 2397 u32 data = 0; 2398 int ret; 2399 2400 adapter = tx->adapter; 2401 ret = lan743x_tx_ring_init(tx); 2402 if (ret) 2403 return ret; 2404 2405 /* initialize fifo */ 2406 lan743x_csr_write(adapter, FCT_TX_CTL, 2407 FCT_TX_CTL_RESET_(tx->channel_number)); 2408 lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL, 2409 FCT_TX_CTL_RESET_(tx->channel_number), 2410 0, 1000, 20000, 100); 2411 2412 /* enable fifo */ 2413 lan743x_csr_write(adapter, FCT_TX_CTL, 2414 FCT_TX_CTL_EN_(tx->channel_number)); 2415 2416 /* reset tx channel */ 2417 lan743x_csr_write(adapter, DMAC_CMD, 2418 DMAC_CMD_TX_SWR_(tx->channel_number)); 2419 lan743x_csr_wait_for_bit(adapter, DMAC_CMD, 2420 DMAC_CMD_TX_SWR_(tx->channel_number), 2421 0, 1000, 20000, 100); 2422 2423 /* Write TX_BASE_ADDR */ 2424 lan743x_csr_write(adapter, 2425 TX_BASE_ADDRH(tx->channel_number), 2426 DMA_ADDR_HIGH32(tx->ring_dma_ptr)); 2427 lan743x_csr_write(adapter, 2428 TX_BASE_ADDRL(tx->channel_number), 2429 DMA_ADDR_LOW32(tx->ring_dma_ptr)); 2430 2431 /* Write TX_CFG_B */ 2432 data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number)); 2433 data &= ~TX_CFG_B_TX_RING_LEN_MASK_; 2434 data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_); 2435 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) 2436 data |= TX_CFG_B_TDMABL_512_; 2437 lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data); 2438 2439 /* Write TX_CFG_A */ 2440 data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_; 2441 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { 2442 data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_; 2443 data |= TX_CFG_A_TX_PF_THRES_SET_(0x10); 2444 data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04); 2445 data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07); 2446 } 2447 lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data); 2448 2449 /* Write TX_HEAD_WRITEBACK_ADDR */ 2450 lan743x_csr_write(adapter, 2451 TX_HEAD_WRITEBACK_ADDRH(tx->channel_number), 2452 DMA_ADDR_HIGH32(tx->head_dma_ptr)); 2453 lan743x_csr_write(adapter, 2454 TX_HEAD_WRITEBACK_ADDRL(tx->channel_number), 2455 DMA_ADDR_LOW32(tx->head_dma_ptr)); 2456 2457 /* set last head */ 2458 tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number)); 2459 2460 /* write TX_TAIL */ 2461 tx->last_tail = 0; 2462 lan743x_csr_write(adapter, TX_TAIL(tx->channel_number), 2463 (u32)(tx->last_tail)); 2464 tx->vector_flags = lan743x_intr_get_vector_flags(adapter, 2465 INT_BIT_DMA_TX_ 2466 (tx->channel_number)); 2467 netif_napi_add_tx_weight(adapter->netdev, 2468 &tx->napi, lan743x_tx_napi_poll, 2469 NAPI_POLL_WEIGHT); 2470 napi_enable(&tx->napi); 2471 2472 data = 0; 2473 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR) 2474 data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_; 2475 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR) 2476 data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_; 2477 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C) 2478 data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_; 2479 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C) 2480 data |= TX_CFG_C_TX_INT_EN_R2C_; 2481 lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data); 2482 2483 if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)) 2484 lan743x_csr_write(adapter, INT_EN_SET, 2485 INT_BIT_DMA_TX_(tx->channel_number)); 2486 lan743x_csr_write(adapter, DMAC_INT_EN_SET, 2487 DMAC_INT_BIT_TX_IOC_(tx->channel_number)); 2488 2489 /* start dmac channel */ 2490 lan743x_csr_write(adapter, DMAC_CMD, 2491 DMAC_CMD_START_T_(tx->channel_number)); 2492 return 0; 2493 } 2494 2495 static int lan743x_rx_next_index(struct lan743x_rx *rx, int index) 2496 { 2497 return ((++index) % rx->ring_size); 2498 } 2499 2500 static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index) 2501 { 2502 /* update the tail once per 8 descriptors */ 2503 if ((index & 7) == 7) 2504 lan743x_csr_write(rx->adapter, RX_TAIL(rx->channel_number), 2505 index); 2506 } 2507 2508 static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index, 2509 gfp_t gfp) 2510 { 2511 struct net_device *netdev = rx->adapter->netdev; 2512 struct device *dev = &rx->adapter->pdev->dev; 2513 struct lan743x_rx_buffer_info *buffer_info; 2514 unsigned int buffer_length, used_length; 2515 struct lan743x_rx_descriptor *descriptor; 2516 struct sk_buff *skb; 2517 dma_addr_t dma_ptr; 2518 2519 buffer_length = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + RX_HEAD_PADDING; 2520 2521 descriptor = &rx->ring_cpu_ptr[index]; 2522 buffer_info = &rx->buffer_info[index]; 2523 skb = __netdev_alloc_skb(netdev, buffer_length, gfp); 2524 if (!skb) 2525 return -ENOMEM; 2526 dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE); 2527 if (dma_mapping_error(dev, dma_ptr)) { 2528 dev_kfree_skb_any(skb); 2529 return -ENOMEM; 2530 } 2531 if (buffer_info->dma_ptr) { 2532 /* sync used area of buffer only */ 2533 if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_) 2534 /* frame length is valid only if LS bit is set. 2535 * it's a safe upper bound for the used area in this 2536 * buffer. 2537 */ 2538 used_length = min(RX_DESC_DATA0_FRAME_LENGTH_GET_ 2539 (le32_to_cpu(descriptor->data0)), 2540 buffer_info->buffer_length); 2541 else 2542 used_length = buffer_info->buffer_length; 2543 dma_sync_single_for_cpu(dev, buffer_info->dma_ptr, 2544 used_length, 2545 DMA_FROM_DEVICE); 2546 dma_unmap_single_attrs(dev, buffer_info->dma_ptr, 2547 buffer_info->buffer_length, 2548 DMA_FROM_DEVICE, 2549 DMA_ATTR_SKIP_CPU_SYNC); 2550 } 2551 2552 buffer_info->skb = skb; 2553 buffer_info->dma_ptr = dma_ptr; 2554 buffer_info->buffer_length = buffer_length; 2555 descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr)); 2556 descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr)); 2557 descriptor->data3 = 0; 2558 descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ | 2559 (buffer_length & RX_DESC_DATA0_BUF_LENGTH_MASK_))); 2560 lan743x_rx_update_tail(rx, index); 2561 2562 return 0; 2563 } 2564 2565 static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index) 2566 { 2567 struct lan743x_rx_buffer_info *buffer_info; 2568 struct lan743x_rx_descriptor *descriptor; 2569 2570 descriptor = &rx->ring_cpu_ptr[index]; 2571 buffer_info = &rx->buffer_info[index]; 2572 2573 descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr)); 2574 descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr)); 2575 descriptor->data3 = 0; 2576 descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ | 2577 ((buffer_info->buffer_length) & 2578 RX_DESC_DATA0_BUF_LENGTH_MASK_))); 2579 lan743x_rx_update_tail(rx, index); 2580 } 2581 2582 static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index) 2583 { 2584 struct lan743x_rx_buffer_info *buffer_info; 2585 struct lan743x_rx_descriptor *descriptor; 2586 2587 descriptor = &rx->ring_cpu_ptr[index]; 2588 buffer_info = &rx->buffer_info[index]; 2589 2590 memset(descriptor, 0, sizeof(*descriptor)); 2591 2592 if (buffer_info->dma_ptr) { 2593 dma_unmap_single(&rx->adapter->pdev->dev, 2594 buffer_info->dma_ptr, 2595 buffer_info->buffer_length, 2596 DMA_FROM_DEVICE); 2597 buffer_info->dma_ptr = 0; 2598 } 2599 2600 if (buffer_info->skb) { 2601 dev_kfree_skb(buffer_info->skb); 2602 buffer_info->skb = NULL; 2603 } 2604 2605 memset(buffer_info, 0, sizeof(*buffer_info)); 2606 } 2607 2608 static struct sk_buff * 2609 lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length) 2610 { 2611 if (skb_linearize(skb)) { 2612 dev_kfree_skb_irq(skb); 2613 return NULL; 2614 } 2615 frame_length = max_t(int, 0, frame_length - ETH_FCS_LEN); 2616 if (skb->len > frame_length) { 2617 skb->tail -= skb->len - frame_length; 2618 skb->len = frame_length; 2619 } 2620 return skb; 2621 } 2622 2623 static int lan743x_rx_process_buffer(struct lan743x_rx *rx) 2624 { 2625 int current_head_index = le32_to_cpu(*rx->head_cpu_ptr); 2626 struct lan743x_rx_descriptor *descriptor, *desc_ext; 2627 struct net_device *netdev = rx->adapter->netdev; 2628 int result = RX_PROCESS_RESULT_NOTHING_TO_DO; 2629 struct lan743x_rx_buffer_info *buffer_info; 2630 int frame_length, buffer_length; 2631 bool is_ice, is_tce, is_icsm; 2632 int extension_index = -1; 2633 bool is_last, is_first; 2634 struct sk_buff *skb; 2635 2636 if (current_head_index < 0 || current_head_index >= rx->ring_size) 2637 goto done; 2638 2639 if (rx->last_head < 0 || rx->last_head >= rx->ring_size) 2640 goto done; 2641 2642 if (rx->last_head == current_head_index) 2643 goto done; 2644 2645 descriptor = &rx->ring_cpu_ptr[rx->last_head]; 2646 if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_) 2647 goto done; 2648 buffer_info = &rx->buffer_info[rx->last_head]; 2649 2650 is_last = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_; 2651 is_first = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_FS_; 2652 2653 if (is_last && le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_EXT_) { 2654 /* extension is expected to follow */ 2655 int index = lan743x_rx_next_index(rx, rx->last_head); 2656 2657 if (index == current_head_index) 2658 /* extension not yet available */ 2659 goto done; 2660 desc_ext = &rx->ring_cpu_ptr[index]; 2661 if (le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_OWN_) 2662 /* extension not yet available */ 2663 goto done; 2664 if (!(le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_EXT_)) 2665 goto move_forward; 2666 extension_index = index; 2667 } 2668 2669 /* Only the last buffer in a multi-buffer frame contains the total frame 2670 * length. The chip occasionally sends more buffers than strictly 2671 * required to reach the total frame length. 2672 * Handle this by adding all buffers to the skb in their entirety. 2673 * Once the real frame length is known, trim the skb. 2674 */ 2675 frame_length = 2676 RX_DESC_DATA0_FRAME_LENGTH_GET_(le32_to_cpu(descriptor->data0)); 2677 buffer_length = buffer_info->buffer_length; 2678 is_ice = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICE_; 2679 is_tce = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_TCE_; 2680 is_icsm = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICSM_; 2681 2682 netdev_dbg(netdev, "%s%schunk: %d/%d", 2683 is_first ? "first " : " ", 2684 is_last ? "last " : " ", 2685 frame_length, buffer_length); 2686 2687 /* save existing skb, allocate new skb and map to dma */ 2688 skb = buffer_info->skb; 2689 if (lan743x_rx_init_ring_element(rx, rx->last_head, 2690 GFP_ATOMIC | GFP_DMA)) { 2691 /* failed to allocate next skb. 2692 * Memory is very low. 2693 * Drop this packet and reuse buffer. 2694 */ 2695 lan743x_rx_reuse_ring_element(rx, rx->last_head); 2696 /* drop packet that was being assembled */ 2697 dev_kfree_skb_irq(rx->skb_head); 2698 rx->skb_head = NULL; 2699 goto process_extension; 2700 } 2701 2702 /* add buffers to skb via skb->frag_list */ 2703 if (is_first) { 2704 skb_reserve(skb, RX_HEAD_PADDING); 2705 skb_put(skb, buffer_length - RX_HEAD_PADDING); 2706 if (rx->skb_head) 2707 dev_kfree_skb_irq(rx->skb_head); 2708 rx->skb_head = skb; 2709 } else if (rx->skb_head) { 2710 skb_put(skb, buffer_length); 2711 if (skb_shinfo(rx->skb_head)->frag_list) 2712 rx->skb_tail->next = skb; 2713 else 2714 skb_shinfo(rx->skb_head)->frag_list = skb; 2715 rx->skb_tail = skb; 2716 rx->skb_head->len += skb->len; 2717 rx->skb_head->data_len += skb->len; 2718 rx->skb_head->truesize += skb->truesize; 2719 } else { 2720 /* packet to assemble has already been dropped because one or 2721 * more of its buffers could not be allocated 2722 */ 2723 netdev_dbg(netdev, "drop buffer intended for dropped packet"); 2724 dev_kfree_skb_irq(skb); 2725 } 2726 2727 process_extension: 2728 if (extension_index >= 0) { 2729 u32 ts_sec; 2730 u32 ts_nsec; 2731 2732 ts_sec = le32_to_cpu(desc_ext->data1); 2733 ts_nsec = (le32_to_cpu(desc_ext->data2) & 2734 RX_DESC_DATA2_TS_NS_MASK_); 2735 if (rx->skb_head) 2736 skb_hwtstamps(rx->skb_head)->hwtstamp = 2737 ktime_set(ts_sec, ts_nsec); 2738 lan743x_rx_reuse_ring_element(rx, extension_index); 2739 rx->last_head = extension_index; 2740 netdev_dbg(netdev, "process extension"); 2741 } 2742 2743 if (is_last && rx->skb_head) 2744 rx->skb_head = lan743x_rx_trim_skb(rx->skb_head, frame_length); 2745 2746 if (is_last && rx->skb_head) { 2747 rx->skb_head->protocol = eth_type_trans(rx->skb_head, 2748 rx->adapter->netdev); 2749 if (rx->adapter->netdev->features & NETIF_F_RXCSUM) { 2750 if (!is_ice && !is_tce && !is_icsm) 2751 skb->ip_summed = CHECKSUM_UNNECESSARY; 2752 } 2753 netdev_dbg(netdev, "sending %d byte frame to OS", 2754 rx->skb_head->len); 2755 napi_gro_receive(&rx->napi, rx->skb_head); 2756 rx->skb_head = NULL; 2757 } 2758 2759 move_forward: 2760 /* push tail and head forward */ 2761 rx->last_tail = rx->last_head; 2762 rx->last_head = lan743x_rx_next_index(rx, rx->last_head); 2763 result = RX_PROCESS_RESULT_BUFFER_RECEIVED; 2764 done: 2765 return result; 2766 } 2767 2768 static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight) 2769 { 2770 struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi); 2771 struct lan743x_adapter *adapter = rx->adapter; 2772 int result = RX_PROCESS_RESULT_NOTHING_TO_DO; 2773 u32 rx_tail_flags = 0; 2774 int count; 2775 2776 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) { 2777 /* clear int status bit before reading packet */ 2778 lan743x_csr_write(adapter, DMAC_INT_STS, 2779 DMAC_INT_BIT_RXFRM_(rx->channel_number)); 2780 } 2781 for (count = 0; count < weight; count++) { 2782 result = lan743x_rx_process_buffer(rx); 2783 if (result == RX_PROCESS_RESULT_NOTHING_TO_DO) 2784 break; 2785 } 2786 rx->frame_count += count; 2787 if (count == weight || result == RX_PROCESS_RESULT_BUFFER_RECEIVED) 2788 return weight; 2789 2790 if (!napi_complete_done(napi, count)) 2791 return count; 2792 2793 /* re-arm interrupts, must write to rx tail on some chip variants */ 2794 if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET) 2795 rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_; 2796 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) { 2797 rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_; 2798 } else { 2799 lan743x_csr_write(adapter, INT_EN_SET, 2800 INT_BIT_DMA_RX_(rx->channel_number)); 2801 } 2802 2803 if (rx_tail_flags) 2804 lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), 2805 rx_tail_flags | rx->last_tail); 2806 2807 return count; 2808 } 2809 2810 static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx) 2811 { 2812 if (rx->buffer_info && rx->ring_cpu_ptr) { 2813 int index; 2814 2815 for (index = 0; index < rx->ring_size; index++) 2816 lan743x_rx_release_ring_element(rx, index); 2817 } 2818 2819 if (rx->head_cpu_ptr) { 2820 dma_free_coherent(&rx->adapter->pdev->dev, 2821 sizeof(*rx->head_cpu_ptr), rx->head_cpu_ptr, 2822 rx->head_dma_ptr); 2823 rx->head_cpu_ptr = NULL; 2824 rx->head_dma_ptr = 0; 2825 } 2826 2827 kfree(rx->buffer_info); 2828 rx->buffer_info = NULL; 2829 2830 if (rx->ring_cpu_ptr) { 2831 dma_free_coherent(&rx->adapter->pdev->dev, 2832 rx->ring_allocation_size, rx->ring_cpu_ptr, 2833 rx->ring_dma_ptr); 2834 rx->ring_allocation_size = 0; 2835 rx->ring_cpu_ptr = NULL; 2836 rx->ring_dma_ptr = 0; 2837 } 2838 2839 rx->ring_size = 0; 2840 rx->last_head = 0; 2841 } 2842 2843 static int lan743x_rx_ring_init(struct lan743x_rx *rx) 2844 { 2845 size_t ring_allocation_size = 0; 2846 dma_addr_t dma_ptr = 0; 2847 void *cpu_ptr = NULL; 2848 int ret = -ENOMEM; 2849 int index = 0; 2850 2851 rx->ring_size = LAN743X_RX_RING_SIZE; 2852 if (rx->ring_size <= 1) { 2853 ret = -EINVAL; 2854 goto cleanup; 2855 } 2856 if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) { 2857 ret = -EINVAL; 2858 goto cleanup; 2859 } 2860 if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev, 2861 DMA_BIT_MASK(64))) { 2862 dev_warn(&rx->adapter->pdev->dev, 2863 "lan743x_: No suitable DMA available\n"); 2864 ret = -ENOMEM; 2865 goto cleanup; 2866 } 2867 ring_allocation_size = ALIGN(rx->ring_size * 2868 sizeof(struct lan743x_rx_descriptor), 2869 PAGE_SIZE); 2870 dma_ptr = 0; 2871 cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev, 2872 ring_allocation_size, &dma_ptr, GFP_KERNEL); 2873 if (!cpu_ptr) { 2874 ret = -ENOMEM; 2875 goto cleanup; 2876 } 2877 rx->ring_allocation_size = ring_allocation_size; 2878 rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr; 2879 rx->ring_dma_ptr = dma_ptr; 2880 2881 cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info), 2882 GFP_KERNEL); 2883 if (!cpu_ptr) { 2884 ret = -ENOMEM; 2885 goto cleanup; 2886 } 2887 rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr; 2888 dma_ptr = 0; 2889 cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev, 2890 sizeof(*rx->head_cpu_ptr), &dma_ptr, 2891 GFP_KERNEL); 2892 if (!cpu_ptr) { 2893 ret = -ENOMEM; 2894 goto cleanup; 2895 } 2896 2897 rx->head_cpu_ptr = cpu_ptr; 2898 rx->head_dma_ptr = dma_ptr; 2899 if (rx->head_dma_ptr & 0x3) { 2900 ret = -ENOMEM; 2901 goto cleanup; 2902 } 2903 2904 rx->last_head = 0; 2905 for (index = 0; index < rx->ring_size; index++) { 2906 ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL); 2907 if (ret) 2908 goto cleanup; 2909 } 2910 return 0; 2911 2912 cleanup: 2913 netif_warn(rx->adapter, ifup, rx->adapter->netdev, 2914 "Error allocating memory for LAN743x\n"); 2915 2916 lan743x_rx_ring_cleanup(rx); 2917 return ret; 2918 } 2919 2920 static void lan743x_rx_close(struct lan743x_rx *rx) 2921 { 2922 struct lan743x_adapter *adapter = rx->adapter; 2923 2924 lan743x_csr_write(adapter, FCT_RX_CTL, 2925 FCT_RX_CTL_DIS_(rx->channel_number)); 2926 lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL, 2927 FCT_RX_CTL_EN_(rx->channel_number), 2928 0, 1000, 20000, 100); 2929 2930 lan743x_csr_write(adapter, DMAC_CMD, 2931 DMAC_CMD_STOP_R_(rx->channel_number)); 2932 lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number); 2933 2934 lan743x_csr_write(adapter, DMAC_INT_EN_CLR, 2935 DMAC_INT_BIT_RXFRM_(rx->channel_number)); 2936 lan743x_csr_write(adapter, INT_EN_CLR, 2937 INT_BIT_DMA_RX_(rx->channel_number)); 2938 napi_disable(&rx->napi); 2939 2940 netif_napi_del(&rx->napi); 2941 2942 lan743x_rx_ring_cleanup(rx); 2943 } 2944 2945 static int lan743x_rx_open(struct lan743x_rx *rx) 2946 { 2947 struct lan743x_adapter *adapter = rx->adapter; 2948 u32 data = 0; 2949 int ret; 2950 2951 rx->frame_count = 0; 2952 ret = lan743x_rx_ring_init(rx); 2953 if (ret) 2954 goto return_error; 2955 2956 netif_napi_add(adapter->netdev, &rx->napi, lan743x_rx_napi_poll); 2957 2958 lan743x_csr_write(adapter, DMAC_CMD, 2959 DMAC_CMD_RX_SWR_(rx->channel_number)); 2960 lan743x_csr_wait_for_bit(adapter, DMAC_CMD, 2961 DMAC_CMD_RX_SWR_(rx->channel_number), 2962 0, 1000, 20000, 100); 2963 2964 /* set ring base address */ 2965 lan743x_csr_write(adapter, 2966 RX_BASE_ADDRH(rx->channel_number), 2967 DMA_ADDR_HIGH32(rx->ring_dma_ptr)); 2968 lan743x_csr_write(adapter, 2969 RX_BASE_ADDRL(rx->channel_number), 2970 DMA_ADDR_LOW32(rx->ring_dma_ptr)); 2971 2972 /* set rx write back address */ 2973 lan743x_csr_write(adapter, 2974 RX_HEAD_WRITEBACK_ADDRH(rx->channel_number), 2975 DMA_ADDR_HIGH32(rx->head_dma_ptr)); 2976 lan743x_csr_write(adapter, 2977 RX_HEAD_WRITEBACK_ADDRL(rx->channel_number), 2978 DMA_ADDR_LOW32(rx->head_dma_ptr)); 2979 data = RX_CFG_A_RX_HP_WB_EN_; 2980 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { 2981 data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ | 2982 RX_CFG_A_RX_WB_THRES_SET_(0x7) | 2983 RX_CFG_A_RX_PF_THRES_SET_(16) | 2984 RX_CFG_A_RX_PF_PRI_THRES_SET_(4)); 2985 } 2986 2987 /* set RX_CFG_A */ 2988 lan743x_csr_write(adapter, 2989 RX_CFG_A(rx->channel_number), data); 2990 2991 /* set RX_CFG_B */ 2992 data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number)); 2993 data &= ~RX_CFG_B_RX_PAD_MASK_; 2994 if (!RX_HEAD_PADDING) 2995 data |= RX_CFG_B_RX_PAD_0_; 2996 else 2997 data |= RX_CFG_B_RX_PAD_2_; 2998 data &= ~RX_CFG_B_RX_RING_LEN_MASK_; 2999 data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_); 3000 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) 3001 data |= RX_CFG_B_RDMABL_512_; 3002 3003 lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data); 3004 rx->vector_flags = lan743x_intr_get_vector_flags(adapter, 3005 INT_BIT_DMA_RX_ 3006 (rx->channel_number)); 3007 3008 /* set RX_CFG_C */ 3009 data = 0; 3010 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR) 3011 data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_; 3012 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR) 3013 data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_; 3014 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C) 3015 data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_; 3016 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C) 3017 data |= RX_CFG_C_RX_INT_EN_R2C_; 3018 lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data); 3019 3020 rx->last_tail = ((u32)(rx->ring_size - 1)); 3021 lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), 3022 rx->last_tail); 3023 rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number)); 3024 if (rx->last_head) { 3025 ret = -EIO; 3026 goto napi_delete; 3027 } 3028 3029 napi_enable(&rx->napi); 3030 3031 lan743x_csr_write(adapter, INT_EN_SET, 3032 INT_BIT_DMA_RX_(rx->channel_number)); 3033 lan743x_csr_write(adapter, DMAC_INT_STS, 3034 DMAC_INT_BIT_RXFRM_(rx->channel_number)); 3035 lan743x_csr_write(adapter, DMAC_INT_EN_SET, 3036 DMAC_INT_BIT_RXFRM_(rx->channel_number)); 3037 lan743x_csr_write(adapter, DMAC_CMD, 3038 DMAC_CMD_START_R_(rx->channel_number)); 3039 3040 /* initialize fifo */ 3041 lan743x_csr_write(adapter, FCT_RX_CTL, 3042 FCT_RX_CTL_RESET_(rx->channel_number)); 3043 lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL, 3044 FCT_RX_CTL_RESET_(rx->channel_number), 3045 0, 1000, 20000, 100); 3046 lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number), 3047 FCT_FLOW_CTL_REQ_EN_ | 3048 FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) | 3049 FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA)); 3050 3051 /* enable fifo */ 3052 lan743x_csr_write(adapter, FCT_RX_CTL, 3053 FCT_RX_CTL_EN_(rx->channel_number)); 3054 return 0; 3055 3056 napi_delete: 3057 netif_napi_del(&rx->napi); 3058 lan743x_rx_ring_cleanup(rx); 3059 3060 return_error: 3061 return ret; 3062 } 3063 3064 static int lan743x_netdev_close(struct net_device *netdev) 3065 { 3066 struct lan743x_adapter *adapter = netdev_priv(netdev); 3067 int index; 3068 3069 for (index = 0; index < adapter->used_tx_channels; index++) 3070 lan743x_tx_close(&adapter->tx[index]); 3071 3072 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) 3073 lan743x_rx_close(&adapter->rx[index]); 3074 3075 lan743x_ptp_close(adapter); 3076 3077 lan743x_phy_close(adapter); 3078 3079 lan743x_mac_close(adapter); 3080 3081 lan743x_intr_close(adapter); 3082 3083 return 0; 3084 } 3085 3086 static int lan743x_netdev_open(struct net_device *netdev) 3087 { 3088 struct lan743x_adapter *adapter = netdev_priv(netdev); 3089 int index; 3090 int ret; 3091 3092 ret = lan743x_intr_open(adapter); 3093 if (ret) 3094 goto return_error; 3095 3096 ret = lan743x_mac_open(adapter); 3097 if (ret) 3098 goto close_intr; 3099 3100 ret = lan743x_phy_open(adapter); 3101 if (ret) 3102 goto close_mac; 3103 3104 ret = lan743x_ptp_open(adapter); 3105 if (ret) 3106 goto close_phy; 3107 3108 lan743x_rfe_open(adapter); 3109 3110 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { 3111 ret = lan743x_rx_open(&adapter->rx[index]); 3112 if (ret) 3113 goto close_rx; 3114 } 3115 3116 for (index = 0; index < adapter->used_tx_channels; index++) { 3117 ret = lan743x_tx_open(&adapter->tx[index]); 3118 if (ret) 3119 goto close_tx; 3120 } 3121 3122 #ifdef CONFIG_PM 3123 if (adapter->netdev->phydev) { 3124 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 3125 3126 phy_ethtool_get_wol(netdev->phydev, &wol); 3127 adapter->phy_wol_supported = wol.supported; 3128 adapter->phy_wolopts = wol.wolopts; 3129 } 3130 #endif 3131 3132 return 0; 3133 3134 close_tx: 3135 for (index = 0; index < adapter->used_tx_channels; index++) { 3136 if (adapter->tx[index].ring_cpu_ptr) 3137 lan743x_tx_close(&adapter->tx[index]); 3138 } 3139 3140 close_rx: 3141 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { 3142 if (adapter->rx[index].ring_cpu_ptr) 3143 lan743x_rx_close(&adapter->rx[index]); 3144 } 3145 lan743x_ptp_close(adapter); 3146 3147 close_phy: 3148 lan743x_phy_close(adapter); 3149 3150 close_mac: 3151 lan743x_mac_close(adapter); 3152 3153 close_intr: 3154 lan743x_intr_close(adapter); 3155 3156 return_error: 3157 netif_warn(adapter, ifup, adapter->netdev, 3158 "Error opening LAN743x\n"); 3159 return ret; 3160 } 3161 3162 static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb, 3163 struct net_device *netdev) 3164 { 3165 struct lan743x_adapter *adapter = netdev_priv(netdev); 3166 u8 ch = 0; 3167 3168 if (adapter->is_pci11x1x) 3169 ch = skb->queue_mapping % PCI11X1X_USED_TX_CHANNELS; 3170 3171 return lan743x_tx_xmit_frame(&adapter->tx[ch], skb); 3172 } 3173 3174 static int lan743x_netdev_ioctl(struct net_device *netdev, 3175 struct ifreq *ifr, int cmd) 3176 { 3177 if (!netif_running(netdev)) 3178 return -EINVAL; 3179 if (cmd == SIOCSHWTSTAMP) 3180 return lan743x_ptp_ioctl(netdev, ifr, cmd); 3181 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 3182 } 3183 3184 static void lan743x_netdev_set_multicast(struct net_device *netdev) 3185 { 3186 struct lan743x_adapter *adapter = netdev_priv(netdev); 3187 3188 lan743x_rfe_set_multicast(adapter); 3189 } 3190 3191 static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu) 3192 { 3193 struct lan743x_adapter *adapter = netdev_priv(netdev); 3194 int ret = 0; 3195 3196 ret = lan743x_mac_set_mtu(adapter, new_mtu); 3197 if (!ret) 3198 WRITE_ONCE(netdev->mtu, new_mtu); 3199 return ret; 3200 } 3201 3202 static void lan743x_netdev_get_stats64(struct net_device *netdev, 3203 struct rtnl_link_stats64 *stats) 3204 { 3205 struct lan743x_adapter *adapter = netdev_priv(netdev); 3206 3207 stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES); 3208 stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES); 3209 stats->rx_bytes = lan743x_csr_read(adapter, 3210 STAT_RX_UNICAST_BYTE_COUNT) + 3211 lan743x_csr_read(adapter, 3212 STAT_RX_BROADCAST_BYTE_COUNT) + 3213 lan743x_csr_read(adapter, 3214 STAT_RX_MULTICAST_BYTE_COUNT); 3215 stats->tx_bytes = lan743x_csr_read(adapter, 3216 STAT_TX_UNICAST_BYTE_COUNT) + 3217 lan743x_csr_read(adapter, 3218 STAT_TX_BROADCAST_BYTE_COUNT) + 3219 lan743x_csr_read(adapter, 3220 STAT_TX_MULTICAST_BYTE_COUNT); 3221 stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) + 3222 lan743x_csr_read(adapter, 3223 STAT_RX_ALIGNMENT_ERRORS) + 3224 lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) + 3225 lan743x_csr_read(adapter, 3226 STAT_RX_UNDERSIZE_FRAME_ERRORS) + 3227 lan743x_csr_read(adapter, 3228 STAT_RX_OVERSIZE_FRAME_ERRORS); 3229 stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) + 3230 lan743x_csr_read(adapter, 3231 STAT_TX_EXCESS_DEFERRAL_ERRORS) + 3232 lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS); 3233 stats->rx_dropped = lan743x_csr_read(adapter, 3234 STAT_RX_DROPPED_FRAMES); 3235 stats->tx_dropped = lan743x_csr_read(adapter, 3236 STAT_TX_EXCESSIVE_COLLISION); 3237 stats->multicast = lan743x_csr_read(adapter, 3238 STAT_RX_MULTICAST_FRAMES) + 3239 lan743x_csr_read(adapter, 3240 STAT_TX_MULTICAST_FRAMES); 3241 stats->collisions = lan743x_csr_read(adapter, 3242 STAT_TX_SINGLE_COLLISIONS) + 3243 lan743x_csr_read(adapter, 3244 STAT_TX_MULTIPLE_COLLISIONS) + 3245 lan743x_csr_read(adapter, 3246 STAT_TX_LATE_COLLISIONS); 3247 } 3248 3249 static int lan743x_netdev_set_mac_address(struct net_device *netdev, 3250 void *addr) 3251 { 3252 struct lan743x_adapter *adapter = netdev_priv(netdev); 3253 struct sockaddr *sock_addr = addr; 3254 int ret; 3255 3256 ret = eth_prepare_mac_addr_change(netdev, sock_addr); 3257 if (ret) 3258 return ret; 3259 eth_hw_addr_set(netdev, sock_addr->sa_data); 3260 lan743x_mac_set_address(adapter, sock_addr->sa_data); 3261 lan743x_rfe_update_mac_address(adapter); 3262 return 0; 3263 } 3264 3265 static const struct net_device_ops lan743x_netdev_ops = { 3266 .ndo_open = lan743x_netdev_open, 3267 .ndo_stop = lan743x_netdev_close, 3268 .ndo_start_xmit = lan743x_netdev_xmit_frame, 3269 .ndo_eth_ioctl = lan743x_netdev_ioctl, 3270 .ndo_set_rx_mode = lan743x_netdev_set_multicast, 3271 .ndo_change_mtu = lan743x_netdev_change_mtu, 3272 .ndo_get_stats64 = lan743x_netdev_get_stats64, 3273 .ndo_set_mac_address = lan743x_netdev_set_mac_address, 3274 }; 3275 3276 static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter) 3277 { 3278 lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); 3279 } 3280 3281 static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter) 3282 { 3283 mdiobus_unregister(adapter->mdiobus); 3284 } 3285 3286 static void lan743x_full_cleanup(struct lan743x_adapter *adapter) 3287 { 3288 unregister_netdev(adapter->netdev); 3289 3290 lan743x_mdiobus_cleanup(adapter); 3291 lan743x_hardware_cleanup(adapter); 3292 lan743x_pci_cleanup(adapter); 3293 } 3294 3295 static void pci11x1x_set_rfe_rd_fifo_threshold(struct lan743x_adapter *adapter) 3296 { 3297 u16 rev = adapter->csr.id_rev & ID_REV_CHIP_REV_MASK_; 3298 3299 if (rev == ID_REV_CHIP_REV_PCI11X1X_B0_) { 3300 u32 misc_ctl; 3301 3302 misc_ctl = lan743x_csr_read(adapter, MISC_CTL_0); 3303 misc_ctl &= ~MISC_CTL_0_RFE_READ_FIFO_MASK_; 3304 misc_ctl |= FIELD_PREP(MISC_CTL_0_RFE_READ_FIFO_MASK_, 3305 RFE_RD_FIFO_TH_3_DWORDS); 3306 lan743x_csr_write(adapter, MISC_CTL_0, misc_ctl); 3307 } 3308 } 3309 3310 static int lan743x_hardware_init(struct lan743x_adapter *adapter, 3311 struct pci_dev *pdev) 3312 { 3313 struct lan743x_tx *tx; 3314 int index; 3315 int ret; 3316 3317 adapter->is_pci11x1x = is_pci11x1x_chip(adapter); 3318 if (adapter->is_pci11x1x) { 3319 adapter->max_tx_channels = PCI11X1X_MAX_TX_CHANNELS; 3320 adapter->used_tx_channels = PCI11X1X_USED_TX_CHANNELS; 3321 adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT; 3322 pci11x1x_strap_get_status(adapter); 3323 spin_lock_init(&adapter->eth_syslock_spinlock); 3324 mutex_init(&adapter->sgmii_rw_lock); 3325 pci11x1x_set_rfe_rd_fifo_threshold(adapter); 3326 } else { 3327 adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS; 3328 adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS; 3329 adapter->max_vector_count = LAN743X_MAX_VECTOR_COUNT; 3330 } 3331 3332 adapter->intr.irq = adapter->pdev->irq; 3333 lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); 3334 3335 ret = lan743x_gpio_init(adapter); 3336 if (ret) 3337 return ret; 3338 3339 ret = lan743x_mac_init(adapter); 3340 if (ret) 3341 return ret; 3342 3343 ret = lan743x_phy_init(adapter); 3344 if (ret) 3345 return ret; 3346 3347 ret = lan743x_ptp_init(adapter); 3348 if (ret) 3349 return ret; 3350 3351 lan743x_rfe_update_mac_address(adapter); 3352 3353 ret = lan743x_dmac_init(adapter); 3354 if (ret) 3355 return ret; 3356 3357 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { 3358 adapter->rx[index].adapter = adapter; 3359 adapter->rx[index].channel_number = index; 3360 } 3361 3362 for (index = 0; index < adapter->used_tx_channels; index++) { 3363 tx = &adapter->tx[index]; 3364 tx->adapter = adapter; 3365 tx->channel_number = index; 3366 spin_lock_init(&tx->ring_lock); 3367 } 3368 3369 return 0; 3370 } 3371 3372 static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) 3373 { 3374 u32 sgmii_ctl; 3375 int ret; 3376 3377 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); 3378 if (!(adapter->mdiobus)) { 3379 ret = -ENOMEM; 3380 goto return_error; 3381 } 3382 3383 adapter->mdiobus->priv = (void *)adapter; 3384 if (adapter->is_pci11x1x) { 3385 if (adapter->is_sgmii_en) { 3386 sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); 3387 sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; 3388 sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; 3389 lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); 3390 netif_dbg(adapter, drv, adapter->netdev, 3391 "SGMII operation\n"); 3392 adapter->mdiobus->read = lan743x_mdiobus_read_c22; 3393 adapter->mdiobus->write = lan743x_mdiobus_write_c22; 3394 adapter->mdiobus->read_c45 = lan743x_mdiobus_read_c45; 3395 adapter->mdiobus->write_c45 = lan743x_mdiobus_write_c45; 3396 adapter->mdiobus->name = "lan743x-mdiobus-c45"; 3397 netif_dbg(adapter, drv, adapter->netdev, 3398 "lan743x-mdiobus-c45\n"); 3399 } else { 3400 sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); 3401 sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; 3402 sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; 3403 lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); 3404 netif_dbg(adapter, drv, adapter->netdev, 3405 "RGMII operation\n"); 3406 // Only C22 support when RGMII I/F 3407 adapter->mdiobus->read = lan743x_mdiobus_read_c22; 3408 adapter->mdiobus->write = lan743x_mdiobus_write_c22; 3409 adapter->mdiobus->name = "lan743x-mdiobus"; 3410 netif_dbg(adapter, drv, adapter->netdev, 3411 "lan743x-mdiobus\n"); 3412 } 3413 } else { 3414 adapter->mdiobus->read = lan743x_mdiobus_read_c22; 3415 adapter->mdiobus->write = lan743x_mdiobus_write_c22; 3416 adapter->mdiobus->name = "lan743x-mdiobus"; 3417 netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n"); 3418 } 3419 3420 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, 3421 "pci-%s", pci_name(adapter->pdev)); 3422 3423 if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_) 3424 /* LAN7430 uses internal phy at address 1 */ 3425 adapter->mdiobus->phy_mask = ~(u32)BIT(1); 3426 3427 /* register mdiobus */ 3428 ret = mdiobus_register(adapter->mdiobus); 3429 if (ret < 0) 3430 goto return_error; 3431 return 0; 3432 3433 return_error: 3434 return ret; 3435 } 3436 3437 /* lan743x_pcidev_probe - Device Initialization Routine 3438 * @pdev: PCI device information struct 3439 * @id: entry in lan743x_pci_tbl 3440 * 3441 * Returns 0 on success, negative on failure 3442 * 3443 * initializes an adapter identified by a pci_dev structure. 3444 * The OS initialization, configuring of the adapter private structure, 3445 * and a hardware reset occur. 3446 **/ 3447 static int lan743x_pcidev_probe(struct pci_dev *pdev, 3448 const struct pci_device_id *id) 3449 { 3450 struct lan743x_adapter *adapter = NULL; 3451 struct net_device *netdev = NULL; 3452 int ret = -ENODEV; 3453 3454 if (id->device == PCI_DEVICE_ID_SMSC_A011 || 3455 id->device == PCI_DEVICE_ID_SMSC_A041) { 3456 netdev = devm_alloc_etherdev_mqs(&pdev->dev, 3457 sizeof(struct lan743x_adapter), 3458 PCI11X1X_USED_TX_CHANNELS, 3459 LAN743X_USED_RX_CHANNELS); 3460 } else { 3461 netdev = devm_alloc_etherdev_mqs(&pdev->dev, 3462 sizeof(struct lan743x_adapter), 3463 LAN743X_USED_TX_CHANNELS, 3464 LAN743X_USED_RX_CHANNELS); 3465 } 3466 3467 if (!netdev) 3468 goto return_error; 3469 3470 SET_NETDEV_DEV(netdev, &pdev->dev); 3471 pci_set_drvdata(pdev, netdev); 3472 adapter = netdev_priv(netdev); 3473 adapter->netdev = netdev; 3474 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | 3475 NETIF_MSG_LINK | NETIF_MSG_IFUP | 3476 NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; 3477 netdev->max_mtu = LAN743X_MAX_FRAME_SIZE; 3478 3479 of_get_mac_address(pdev->dev.of_node, adapter->mac_address); 3480 3481 ret = lan743x_pci_init(adapter, pdev); 3482 if (ret) 3483 goto return_error; 3484 3485 ret = lan743x_csr_init(adapter); 3486 if (ret) 3487 goto cleanup_pci; 3488 3489 ret = lan743x_hardware_init(adapter, pdev); 3490 if (ret) 3491 goto cleanup_pci; 3492 3493 ret = lan743x_mdiobus_init(adapter); 3494 if (ret) 3495 goto cleanup_hardware; 3496 3497 adapter->netdev->netdev_ops = &lan743x_netdev_ops; 3498 adapter->netdev->ethtool_ops = &lan743x_ethtool_ops; 3499 adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | 3500 NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 3501 adapter->netdev->hw_features = adapter->netdev->features; 3502 3503 /* carrier off reporting is important to ethtool even BEFORE open */ 3504 netif_carrier_off(netdev); 3505 3506 ret = register_netdev(adapter->netdev); 3507 if (ret < 0) 3508 goto cleanup_mdiobus; 3509 return 0; 3510 3511 cleanup_mdiobus: 3512 lan743x_mdiobus_cleanup(adapter); 3513 3514 cleanup_hardware: 3515 lan743x_hardware_cleanup(adapter); 3516 3517 cleanup_pci: 3518 lan743x_pci_cleanup(adapter); 3519 3520 return_error: 3521 pr_warn("Initialization failed\n"); 3522 return ret; 3523 } 3524 3525 /** 3526 * lan743x_pcidev_remove - Device Removal Routine 3527 * @pdev: PCI device information struct 3528 * 3529 * this is called by the PCI subsystem to alert the driver 3530 * that it should release a PCI device. This could be caused by a 3531 * Hot-Plug event, or because the driver is going to be removed from 3532 * memory. 3533 **/ 3534 static void lan743x_pcidev_remove(struct pci_dev *pdev) 3535 { 3536 struct net_device *netdev = pci_get_drvdata(pdev); 3537 struct lan743x_adapter *adapter = netdev_priv(netdev); 3538 3539 lan743x_full_cleanup(adapter); 3540 } 3541 3542 static void lan743x_pcidev_shutdown(struct pci_dev *pdev) 3543 { 3544 struct net_device *netdev = pci_get_drvdata(pdev); 3545 struct lan743x_adapter *adapter = netdev_priv(netdev); 3546 3547 rtnl_lock(); 3548 netif_device_detach(netdev); 3549 3550 /* close netdev when netdev is at running state. 3551 * For instance, it is true when system goes to sleep by pm-suspend 3552 * However, it is false when system goes to sleep by suspend GUI menu 3553 */ 3554 if (netif_running(netdev)) 3555 lan743x_netdev_close(netdev); 3556 rtnl_unlock(); 3557 3558 #ifdef CONFIG_PM 3559 pci_save_state(pdev); 3560 #endif 3561 3562 /* clean up lan743x portion */ 3563 lan743x_hardware_cleanup(adapter); 3564 } 3565 3566 #ifdef CONFIG_PM_SLEEP 3567 static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len) 3568 { 3569 return bitrev16(crc16(0xFFFF, buf, len)); 3570 } 3571 3572 static void lan743x_pm_set_wol(struct lan743x_adapter *adapter) 3573 { 3574 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E }; 3575 const u8 ipv6_multicast[3] = { 0x33, 0x33 }; 3576 const u8 arp_type[2] = { 0x08, 0x06 }; 3577 int mask_index; 3578 u32 sopass; 3579 u32 pmtctl; 3580 u32 wucsr; 3581 u32 macrx; 3582 u16 crc; 3583 3584 for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++) 3585 lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0); 3586 3587 /* clear wake settings */ 3588 pmtctl = lan743x_csr_read(adapter, PMT_CTL); 3589 pmtctl |= PMT_CTL_WUPS_MASK_ | PMT_CTL_RES_CLR_WKP_MASK_; 3590 pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ | 3591 PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ | 3592 PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_); 3593 3594 macrx = lan743x_csr_read(adapter, MAC_RX); 3595 3596 wucsr = 0; 3597 mask_index = 0; 3598 3599 pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_; 3600 3601 if (adapter->phy_wolopts) 3602 pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_; 3603 3604 if (adapter->wolopts & WAKE_MAGIC) { 3605 wucsr |= MAC_WUCSR_MPEN_; 3606 macrx |= MAC_RX_RXEN_; 3607 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 3608 } 3609 if (adapter->wolopts & WAKE_UCAST) { 3610 wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_; 3611 macrx |= MAC_RX_RXEN_; 3612 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 3613 pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; 3614 } 3615 if (adapter->wolopts & WAKE_BCAST) { 3616 wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_; 3617 macrx |= MAC_RX_RXEN_; 3618 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 3619 pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; 3620 } 3621 if (adapter->wolopts & WAKE_MCAST) { 3622 /* IPv4 multicast */ 3623 crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3); 3624 lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 3625 MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ | 3626 (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | 3627 (crc & MAC_WUF_CFG_CRC16_MASK_)); 3628 lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7); 3629 lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); 3630 lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); 3631 lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); 3632 mask_index++; 3633 3634 /* IPv6 multicast */ 3635 crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2); 3636 lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 3637 MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ | 3638 (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | 3639 (crc & MAC_WUF_CFG_CRC16_MASK_)); 3640 lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3); 3641 lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); 3642 lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); 3643 lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); 3644 mask_index++; 3645 3646 wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_; 3647 macrx |= MAC_RX_RXEN_; 3648 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 3649 pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; 3650 } 3651 if (adapter->wolopts & WAKE_ARP) { 3652 /* set MAC_WUF_CFG & WUF_MASK 3653 * for packettype (offset 12,13) = ARP (0x0806) 3654 */ 3655 crc = lan743x_pm_wakeframe_crc16(arp_type, 2); 3656 lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 3657 MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ | 3658 (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | 3659 (crc & MAC_WUF_CFG_CRC16_MASK_)); 3660 lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000); 3661 lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); 3662 lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); 3663 lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); 3664 mask_index++; 3665 3666 wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_; 3667 macrx |= MAC_RX_RXEN_; 3668 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 3669 pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; 3670 } 3671 3672 if (adapter->wolopts & WAKE_MAGICSECURE) { 3673 sopass = *(u32 *)adapter->sopass; 3674 lan743x_csr_write(adapter, MAC_MP_SO_LO, sopass); 3675 sopass = *(u16 *)&adapter->sopass[4]; 3676 lan743x_csr_write(adapter, MAC_MP_SO_HI, sopass); 3677 wucsr |= MAC_MP_SO_EN_; 3678 } 3679 3680 lan743x_csr_write(adapter, MAC_WUCSR, wucsr); 3681 lan743x_csr_write(adapter, PMT_CTL, pmtctl); 3682 lan743x_csr_write(adapter, MAC_RX, macrx); 3683 } 3684 3685 static int lan743x_pm_suspend(struct device *dev) 3686 { 3687 struct pci_dev *pdev = to_pci_dev(dev); 3688 struct net_device *netdev = pci_get_drvdata(pdev); 3689 struct lan743x_adapter *adapter = netdev_priv(netdev); 3690 u32 data; 3691 3692 lan743x_pcidev_shutdown(pdev); 3693 3694 /* clear all wakes */ 3695 lan743x_csr_write(adapter, MAC_WUCSR, 0); 3696 lan743x_csr_write(adapter, MAC_WUCSR2, 0); 3697 lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF); 3698 3699 if (adapter->wolopts || adapter->phy_wolopts) 3700 lan743x_pm_set_wol(adapter); 3701 3702 if (adapter->is_pci11x1x) { 3703 /* Save HW_CFG to config again in PM resume */ 3704 data = lan743x_csr_read(adapter, HW_CFG); 3705 adapter->hw_cfg = data; 3706 data |= (HW_CFG_RST_PROTECT_PCIE_ | 3707 HW_CFG_D3_RESET_DIS_ | 3708 HW_CFG_D3_VAUX_OVR_ | 3709 HW_CFG_HOT_RESET_DIS_ | 3710 HW_CFG_RST_PROTECT_); 3711 lan743x_csr_write(adapter, HW_CFG, data); 3712 } 3713 3714 /* Host sets PME_En, put D3hot */ 3715 return pci_prepare_to_sleep(pdev); 3716 } 3717 3718 static int lan743x_pm_resume(struct device *dev) 3719 { 3720 struct pci_dev *pdev = to_pci_dev(dev); 3721 struct net_device *netdev = pci_get_drvdata(pdev); 3722 struct lan743x_adapter *adapter = netdev_priv(netdev); 3723 u32 data; 3724 int ret; 3725 3726 pci_set_power_state(pdev, PCI_D0); 3727 pci_restore_state(pdev); 3728 pci_save_state(pdev); 3729 3730 /* Restore HW_CFG that was saved during pm suspend */ 3731 if (adapter->is_pci11x1x) 3732 lan743x_csr_write(adapter, HW_CFG, adapter->hw_cfg); 3733 3734 ret = lan743x_hardware_init(adapter, pdev); 3735 if (ret) { 3736 netif_err(adapter, probe, adapter->netdev, 3737 "lan743x_hardware_init returned %d\n", ret); 3738 lan743x_pci_cleanup(adapter); 3739 return ret; 3740 } 3741 3742 ret = lan743x_csr_read(adapter, MAC_WK_SRC); 3743 netif_dbg(adapter, drv, adapter->netdev, 3744 "Wakeup source : 0x%08X\n", ret); 3745 3746 /* Clear the wol configuration and status bits. Note that 3747 * the status bits are "Write One to Clear (W1C)" 3748 */ 3749 data = MAC_WUCSR_EEE_TX_WAKE_ | MAC_WUCSR_EEE_RX_WAKE_ | 3750 MAC_WUCSR_RFE_WAKE_FR_ | MAC_WUCSR_PFDA_FR_ | MAC_WUCSR_WUFR_ | 3751 MAC_WUCSR_MPR_ | MAC_WUCSR_BCAST_FR_; 3752 lan743x_csr_write(adapter, MAC_WUCSR, data); 3753 3754 data = MAC_WUCSR2_NS_RCD_ | MAC_WUCSR2_ARP_RCD_ | 3755 MAC_WUCSR2_IPV6_TCPSYN_RCD_ | MAC_WUCSR2_IPV4_TCPSYN_RCD_; 3756 lan743x_csr_write(adapter, MAC_WUCSR2, data); 3757 3758 data = MAC_WK_SRC_ETH_PHY_WK_ | MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ | 3759 MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ | MAC_WK_SRC_EEE_TX_WK_ | 3760 MAC_WK_SRC_EEE_RX_WK_ | MAC_WK_SRC_RFE_FR_WK_ | 3761 MAC_WK_SRC_PFDA_FR_WK_ | MAC_WK_SRC_MP_FR_WK_ | 3762 MAC_WK_SRC_BCAST_FR_WK_ | MAC_WK_SRC_WU_FR_WK_ | 3763 MAC_WK_SRC_WK_FR_SAVED_; 3764 lan743x_csr_write(adapter, MAC_WK_SRC, data); 3765 3766 /* open netdev when netdev is at running state while resume. 3767 * For instance, it is true when system wakesup after pm-suspend 3768 * However, it is false when system wakes up after suspend GUI menu 3769 */ 3770 if (netif_running(netdev)) 3771 lan743x_netdev_open(netdev); 3772 3773 netif_device_attach(netdev); 3774 3775 return 0; 3776 } 3777 3778 static const struct dev_pm_ops lan743x_pm_ops = { 3779 SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) 3780 }; 3781 #endif /* CONFIG_PM_SLEEP */ 3782 3783 static const struct pci_device_id lan743x_pcidev_tbl[] = { 3784 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, 3785 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) }, 3786 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A011) }, 3787 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A041) }, 3788 { 0, } 3789 }; 3790 3791 MODULE_DEVICE_TABLE(pci, lan743x_pcidev_tbl); 3792 3793 static struct pci_driver lan743x_pcidev_driver = { 3794 .name = DRIVER_NAME, 3795 .id_table = lan743x_pcidev_tbl, 3796 .probe = lan743x_pcidev_probe, 3797 .remove = lan743x_pcidev_remove, 3798 #ifdef CONFIG_PM_SLEEP 3799 .driver.pm = &lan743x_pm_ops, 3800 #endif 3801 .shutdown = lan743x_pcidev_shutdown, 3802 }; 3803 3804 module_pci_driver(lan743x_pcidev_driver); 3805 3806 MODULE_AUTHOR(DRIVER_AUTHOR); 3807 MODULE_DESCRIPTION(DRIVER_DESC); 3808 MODULE_LICENSE("GPL"); 3809