1 /* 2 * Driver for (BCM4706)? GBit MAC core on BCMA bus. 3 * 4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com> 5 * 6 * Licensed under the GNU/GPL. See COPYING for details. 7 */ 8 9 #include "bgmac.h" 10 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/delay.h> 14 #include <linux/etherdevice.h> 15 #include <linux/mii.h> 16 #include <linux/phy.h> 17 #include <linux/interrupt.h> 18 #include <linux/dma-mapping.h> 19 #include <bcm47xx_nvram.h> 20 21 static const struct bcma_device_id bgmac_bcma_tbl[] = { 22 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), 23 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), 24 BCMA_CORETABLE_END 25 }; 26 MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); 27 28 static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask, 29 u32 value, int timeout) 30 { 31 u32 val; 32 int i; 33 34 for (i = 0; i < timeout / 10; i++) { 35 val = bcma_read32(core, reg); 36 if ((val & mask) == value) 37 return true; 38 udelay(10); 39 } 40 pr_err("Timeout waiting for reg 0x%X\n", reg); 41 return false; 42 } 43 44 /************************************************** 45 * DMA 46 **************************************************/ 47 48 static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) 49 { 50 u32 val; 51 int i; 52 53 if (!ring->mmio_base) 54 return; 55 56 /* Suspend DMA TX ring first. 57 * bgmac_wait_value doesn't support waiting for any of few values, so 58 * implement whole loop here. 59 */ 60 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 61 BGMAC_DMA_TX_SUSPEND); 62 for (i = 0; i < 10000 / 10; i++) { 63 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); 64 val &= BGMAC_DMA_TX_STAT; 65 if (val == BGMAC_DMA_TX_STAT_DISABLED || 66 val == BGMAC_DMA_TX_STAT_IDLEWAIT || 67 val == BGMAC_DMA_TX_STAT_STOPPED) { 68 i = 0; 69 break; 70 } 71 udelay(10); 72 } 73 if (i) 74 bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n", 75 ring->mmio_base, val); 76 77 /* Remove SUSPEND bit */ 78 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0); 79 if (!bgmac_wait_value(bgmac->core, 80 ring->mmio_base + BGMAC_DMA_TX_STATUS, 81 BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED, 82 10000)) { 83 bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n", 84 ring->mmio_base); 85 udelay(300); 86 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); 87 if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED) 88 bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n", 89 ring->mmio_base); 90 } 91 } 92 93 static void bgmac_dma_tx_enable(struct bgmac *bgmac, 94 struct bgmac_dma_ring *ring) 95 { 96 u32 ctl; 97 98 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL); 99 ctl |= BGMAC_DMA_TX_ENABLE; 100 ctl |= BGMAC_DMA_TX_PARITY_DISABLE; 101 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl); 102 } 103 104 static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, 105 struct bgmac_dma_ring *ring, 106 struct sk_buff *skb) 107 { 108 struct device *dma_dev = bgmac->core->dma_dev; 109 struct net_device *net_dev = bgmac->net_dev; 110 struct bgmac_dma_desc *dma_desc; 111 struct bgmac_slot_info *slot; 112 u32 ctl0, ctl1; 113 int free_slots; 114 115 if (skb->len > BGMAC_DESC_CTL1_LEN) { 116 bgmac_err(bgmac, "Too long skb (%d)\n", skb->len); 117 goto err_stop_drop; 118 } 119 120 if (ring->start <= ring->end) 121 free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS; 122 else 123 free_slots = ring->start - ring->end; 124 if (free_slots == 1) { 125 bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n"); 126 netif_stop_queue(net_dev); 127 return NETDEV_TX_BUSY; 128 } 129 130 slot = &ring->slots[ring->end]; 131 slot->skb = skb; 132 slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len, 133 DMA_TO_DEVICE); 134 if (dma_mapping_error(dma_dev, slot->dma_addr)) { 135 bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n", 136 ring->mmio_base); 137 goto err_stop_drop; 138 } 139 140 ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF; 141 if (ring->end == ring->num_slots - 1) 142 ctl0 |= BGMAC_DESC_CTL0_EOT; 143 ctl1 = skb->len & BGMAC_DESC_CTL1_LEN; 144 145 dma_desc = ring->cpu_base; 146 dma_desc += ring->end; 147 dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr)); 148 dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr)); 149 dma_desc->ctl0 = cpu_to_le32(ctl0); 150 dma_desc->ctl1 = cpu_to_le32(ctl1); 151 152 netdev_sent_queue(net_dev, skb->len); 153 154 wmb(); 155 156 /* Increase ring->end to point empty slot. We tell hardware the first 157 * slot it should *not* read. 158 */ 159 if (++ring->end >= BGMAC_TX_RING_SLOTS) 160 ring->end = 0; 161 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX, 162 ring->index_base + 163 ring->end * sizeof(struct bgmac_dma_desc)); 164 165 /* Always keep one slot free to allow detecting bugged calls. */ 166 if (--free_slots == 1) 167 netif_stop_queue(net_dev); 168 169 return NETDEV_TX_OK; 170 171 err_stop_drop: 172 netif_stop_queue(net_dev); 173 dev_kfree_skb(skb); 174 return NETDEV_TX_OK; 175 } 176 177 /* Free transmitted packets */ 178 static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) 179 { 180 struct device *dma_dev = bgmac->core->dma_dev; 181 int empty_slot; 182 bool freed = false; 183 unsigned bytes_compl = 0, pkts_compl = 0; 184 185 /* The last slot that hardware didn't consume yet */ 186 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); 187 empty_slot &= BGMAC_DMA_TX_STATDPTR; 188 empty_slot -= ring->index_base; 189 empty_slot &= BGMAC_DMA_TX_STATDPTR; 190 empty_slot /= sizeof(struct bgmac_dma_desc); 191 192 while (ring->start != empty_slot) { 193 struct bgmac_slot_info *slot = &ring->slots[ring->start]; 194 195 if (slot->skb) { 196 /* Unmap no longer used buffer */ 197 dma_unmap_single(dma_dev, slot->dma_addr, 198 slot->skb->len, DMA_TO_DEVICE); 199 slot->dma_addr = 0; 200 201 bytes_compl += slot->skb->len; 202 pkts_compl++; 203 204 /* Free memory! :) */ 205 dev_kfree_skb(slot->skb); 206 slot->skb = NULL; 207 } else { 208 bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n", 209 ring->start, ring->end); 210 } 211 212 if (++ring->start >= BGMAC_TX_RING_SLOTS) 213 ring->start = 0; 214 freed = true; 215 } 216 217 netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl); 218 219 if (freed && netif_queue_stopped(bgmac->net_dev)) 220 netif_wake_queue(bgmac->net_dev); 221 } 222 223 static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) 224 { 225 if (!ring->mmio_base) 226 return; 227 228 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0); 229 if (!bgmac_wait_value(bgmac->core, 230 ring->mmio_base + BGMAC_DMA_RX_STATUS, 231 BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED, 232 10000)) 233 bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n", 234 ring->mmio_base); 235 } 236 237 static void bgmac_dma_rx_enable(struct bgmac *bgmac, 238 struct bgmac_dma_ring *ring) 239 { 240 u32 ctl; 241 242 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); 243 ctl &= BGMAC_DMA_RX_ADDREXT_MASK; 244 ctl |= BGMAC_DMA_RX_ENABLE; 245 ctl |= BGMAC_DMA_RX_PARITY_DISABLE; 246 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; 247 ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT; 248 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl); 249 } 250 251 static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, 252 struct bgmac_slot_info *slot) 253 { 254 struct device *dma_dev = bgmac->core->dma_dev; 255 struct sk_buff *skb; 256 dma_addr_t dma_addr; 257 struct bgmac_rx_header *rx; 258 259 /* Alloc skb */ 260 skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); 261 if (!skb) 262 return -ENOMEM; 263 264 /* Poison - if everything goes fine, hardware will overwrite it */ 265 rx = (struct bgmac_rx_header *)skb->data; 266 rx->len = cpu_to_le16(0xdead); 267 rx->flags = cpu_to_le16(0xbeef); 268 269 /* Map skb for the DMA */ 270 dma_addr = dma_map_single(dma_dev, skb->data, 271 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); 272 if (dma_mapping_error(dma_dev, dma_addr)) { 273 bgmac_err(bgmac, "DMA mapping error\n"); 274 dev_kfree_skb(skb); 275 return -ENOMEM; 276 } 277 278 /* Update the slot */ 279 slot->skb = skb; 280 slot->dma_addr = dma_addr; 281 282 if (slot->dma_addr & 0xC0000000) 283 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); 284 285 return 0; 286 } 287 288 static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac, 289 struct bgmac_dma_ring *ring, int desc_idx) 290 { 291 struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx; 292 u32 ctl0 = 0, ctl1 = 0; 293 294 if (desc_idx == ring->num_slots - 1) 295 ctl0 |= BGMAC_DESC_CTL0_EOT; 296 ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN; 297 /* Is there any BGMAC device that requires extension? */ 298 /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & 299 * B43_DMA64_DCTL1_ADDREXT_MASK; 300 */ 301 302 dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr)); 303 dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr)); 304 dma_desc->ctl0 = cpu_to_le32(ctl0); 305 dma_desc->ctl1 = cpu_to_le32(ctl1); 306 } 307 308 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, 309 int weight) 310 { 311 u32 end_slot; 312 int handled = 0; 313 314 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS); 315 end_slot &= BGMAC_DMA_RX_STATDPTR; 316 end_slot -= ring->index_base; 317 end_slot &= BGMAC_DMA_RX_STATDPTR; 318 end_slot /= sizeof(struct bgmac_dma_desc); 319 320 ring->end = end_slot; 321 322 while (ring->start != ring->end) { 323 struct device *dma_dev = bgmac->core->dma_dev; 324 struct bgmac_slot_info *slot = &ring->slots[ring->start]; 325 struct sk_buff *skb = slot->skb; 326 struct bgmac_rx_header *rx; 327 u16 len, flags; 328 329 /* Unmap buffer to make it accessible to the CPU */ 330 dma_sync_single_for_cpu(dma_dev, slot->dma_addr, 331 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); 332 333 /* Get info from the header */ 334 rx = (struct bgmac_rx_header *)skb->data; 335 len = le16_to_cpu(rx->len); 336 flags = le16_to_cpu(rx->flags); 337 338 do { 339 dma_addr_t old_dma_addr = slot->dma_addr; 340 int err; 341 342 /* Check for poison and drop or pass the packet */ 343 if (len == 0xdead && flags == 0xbeef) { 344 bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", 345 ring->start); 346 dma_sync_single_for_device(dma_dev, 347 slot->dma_addr, 348 BGMAC_RX_BUF_SIZE, 349 DMA_FROM_DEVICE); 350 break; 351 } 352 353 /* Omit CRC. */ 354 len -= ETH_FCS_LEN; 355 356 /* Prepare new skb as replacement */ 357 err = bgmac_dma_rx_skb_for_slot(bgmac, slot); 358 if (err) { 359 /* Poison the old skb */ 360 rx->len = cpu_to_le16(0xdead); 361 rx->flags = cpu_to_le16(0xbeef); 362 363 dma_sync_single_for_device(dma_dev, 364 slot->dma_addr, 365 BGMAC_RX_BUF_SIZE, 366 DMA_FROM_DEVICE); 367 break; 368 } 369 bgmac_dma_rx_setup_desc(bgmac, ring, ring->start); 370 371 /* Unmap old skb, we'll pass it to the netfif */ 372 dma_unmap_single(dma_dev, old_dma_addr, 373 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); 374 375 skb_put(skb, BGMAC_RX_FRAME_OFFSET + len); 376 skb_pull(skb, BGMAC_RX_FRAME_OFFSET); 377 378 skb_checksum_none_assert(skb); 379 skb->protocol = eth_type_trans(skb, bgmac->net_dev); 380 netif_receive_skb(skb); 381 handled++; 382 } while (0); 383 384 if (++ring->start >= BGMAC_RX_RING_SLOTS) 385 ring->start = 0; 386 387 if (handled >= weight) /* Should never be greater */ 388 break; 389 } 390 391 return handled; 392 } 393 394 /* Does ring support unaligned addressing? */ 395 static bool bgmac_dma_unaligned(struct bgmac *bgmac, 396 struct bgmac_dma_ring *ring, 397 enum bgmac_dma_ring_type ring_type) 398 { 399 switch (ring_type) { 400 case BGMAC_DMA_RING_TX: 401 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, 402 0xff0); 403 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO)) 404 return true; 405 break; 406 case BGMAC_DMA_RING_RX: 407 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, 408 0xff0); 409 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO)) 410 return true; 411 break; 412 } 413 return false; 414 } 415 416 static void bgmac_dma_ring_free(struct bgmac *bgmac, 417 struct bgmac_dma_ring *ring) 418 { 419 struct device *dma_dev = bgmac->core->dma_dev; 420 struct bgmac_slot_info *slot; 421 int size; 422 int i; 423 424 for (i = 0; i < ring->num_slots; i++) { 425 slot = &ring->slots[i]; 426 if (slot->skb) { 427 if (slot->dma_addr) 428 dma_unmap_single(dma_dev, slot->dma_addr, 429 slot->skb->len, DMA_TO_DEVICE); 430 dev_kfree_skb(slot->skb); 431 } 432 } 433 434 if (ring->cpu_base) { 435 /* Free ring of descriptors */ 436 size = ring->num_slots * sizeof(struct bgmac_dma_desc); 437 dma_free_coherent(dma_dev, size, ring->cpu_base, 438 ring->dma_base); 439 } 440 } 441 442 static void bgmac_dma_free(struct bgmac *bgmac) 443 { 444 int i; 445 446 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) 447 bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]); 448 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) 449 bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]); 450 } 451 452 static int bgmac_dma_alloc(struct bgmac *bgmac) 453 { 454 struct device *dma_dev = bgmac->core->dma_dev; 455 struct bgmac_dma_ring *ring; 456 static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1, 457 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, }; 458 int size; /* ring size: different for Tx and Rx */ 459 int err; 460 int i; 461 462 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); 463 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base)); 464 465 if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) { 466 bgmac_err(bgmac, "Core does not report 64-bit DMA\n"); 467 return -ENOTSUPP; 468 } 469 470 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { 471 ring = &bgmac->tx_ring[i]; 472 ring->num_slots = BGMAC_TX_RING_SLOTS; 473 ring->mmio_base = ring_base[i]; 474 475 /* Alloc ring of descriptors */ 476 size = ring->num_slots * sizeof(struct bgmac_dma_desc); 477 ring->cpu_base = dma_zalloc_coherent(dma_dev, size, 478 &ring->dma_base, 479 GFP_KERNEL); 480 if (!ring->cpu_base) { 481 bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n", 482 ring->mmio_base); 483 goto err_dma_free; 484 } 485 if (ring->dma_base & 0xC0000000) 486 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); 487 488 ring->unaligned = bgmac_dma_unaligned(bgmac, ring, 489 BGMAC_DMA_RING_TX); 490 if (ring->unaligned) 491 ring->index_base = lower_32_bits(ring->dma_base); 492 else 493 ring->index_base = 0; 494 495 /* No need to alloc TX slots yet */ 496 } 497 498 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { 499 int j; 500 501 ring = &bgmac->rx_ring[i]; 502 ring->num_slots = BGMAC_RX_RING_SLOTS; 503 ring->mmio_base = ring_base[i]; 504 505 /* Alloc ring of descriptors */ 506 size = ring->num_slots * sizeof(struct bgmac_dma_desc); 507 ring->cpu_base = dma_zalloc_coherent(dma_dev, size, 508 &ring->dma_base, 509 GFP_KERNEL); 510 if (!ring->cpu_base) { 511 bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n", 512 ring->mmio_base); 513 err = -ENOMEM; 514 goto err_dma_free; 515 } 516 if (ring->dma_base & 0xC0000000) 517 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); 518 519 ring->unaligned = bgmac_dma_unaligned(bgmac, ring, 520 BGMAC_DMA_RING_RX); 521 if (ring->unaligned) 522 ring->index_base = lower_32_bits(ring->dma_base); 523 else 524 ring->index_base = 0; 525 526 /* Alloc RX slots */ 527 for (j = 0; j < ring->num_slots; j++) { 528 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]); 529 if (err) { 530 bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n"); 531 goto err_dma_free; 532 } 533 } 534 } 535 536 return 0; 537 538 err_dma_free: 539 bgmac_dma_free(bgmac); 540 return -ENOMEM; 541 } 542 543 static void bgmac_dma_init(struct bgmac *bgmac) 544 { 545 struct bgmac_dma_ring *ring; 546 int i; 547 548 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { 549 ring = &bgmac->tx_ring[i]; 550 551 if (!ring->unaligned) 552 bgmac_dma_tx_enable(bgmac, ring); 553 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, 554 lower_32_bits(ring->dma_base)); 555 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI, 556 upper_32_bits(ring->dma_base)); 557 if (ring->unaligned) 558 bgmac_dma_tx_enable(bgmac, ring); 559 560 ring->start = 0; 561 ring->end = 0; /* Points the slot that should *not* be read */ 562 } 563 564 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { 565 int j; 566 567 ring = &bgmac->rx_ring[i]; 568 569 if (!ring->unaligned) 570 bgmac_dma_rx_enable(bgmac, ring); 571 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, 572 lower_32_bits(ring->dma_base)); 573 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI, 574 upper_32_bits(ring->dma_base)); 575 if (ring->unaligned) 576 bgmac_dma_rx_enable(bgmac, ring); 577 578 for (j = 0; j < ring->num_slots; j++) 579 bgmac_dma_rx_setup_desc(bgmac, ring, j); 580 581 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX, 582 ring->index_base + 583 ring->num_slots * sizeof(struct bgmac_dma_desc)); 584 585 ring->start = 0; 586 ring->end = 0; 587 } 588 } 589 590 /************************************************** 591 * PHY ops 592 **************************************************/ 593 594 static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg) 595 { 596 struct bcma_device *core; 597 u16 phy_access_addr; 598 u16 phy_ctl_addr; 599 u32 tmp; 600 601 BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK); 602 BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK); 603 BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT); 604 BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK); 605 BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT); 606 BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE); 607 BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START); 608 BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK); 609 BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK); 610 BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT); 611 BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE); 612 613 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) { 614 core = bgmac->core->bus->drv_gmac_cmn.core; 615 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; 616 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; 617 } else { 618 core = bgmac->core; 619 phy_access_addr = BGMAC_PHY_ACCESS; 620 phy_ctl_addr = BGMAC_PHY_CNTL; 621 } 622 623 tmp = bcma_read32(core, phy_ctl_addr); 624 tmp &= ~BGMAC_PC_EPA_MASK; 625 tmp |= phyaddr; 626 bcma_write32(core, phy_ctl_addr, tmp); 627 628 tmp = BGMAC_PA_START; 629 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; 630 tmp |= reg << BGMAC_PA_REG_SHIFT; 631 bcma_write32(core, phy_access_addr, tmp); 632 633 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) { 634 bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n", 635 phyaddr, reg); 636 return 0xffff; 637 } 638 639 return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK; 640 } 641 642 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */ 643 static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value) 644 { 645 struct bcma_device *core; 646 u16 phy_access_addr; 647 u16 phy_ctl_addr; 648 u32 tmp; 649 650 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) { 651 core = bgmac->core->bus->drv_gmac_cmn.core; 652 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; 653 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; 654 } else { 655 core = bgmac->core; 656 phy_access_addr = BGMAC_PHY_ACCESS; 657 phy_ctl_addr = BGMAC_PHY_CNTL; 658 } 659 660 tmp = bcma_read32(core, phy_ctl_addr); 661 tmp &= ~BGMAC_PC_EPA_MASK; 662 tmp |= phyaddr; 663 bcma_write32(core, phy_ctl_addr, tmp); 664 665 bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO); 666 if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) 667 bgmac_warn(bgmac, "Error setting MDIO int\n"); 668 669 tmp = BGMAC_PA_START; 670 tmp |= BGMAC_PA_WRITE; 671 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; 672 tmp |= reg << BGMAC_PA_REG_SHIFT; 673 tmp |= value; 674 bcma_write32(core, phy_access_addr, tmp); 675 676 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) { 677 bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n", 678 phyaddr, reg); 679 return -ETIMEDOUT; 680 } 681 682 return 0; 683 } 684 685 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */ 686 static void bgmac_phy_init(struct bgmac *bgmac) 687 { 688 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; 689 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; 690 u8 i; 691 692 if (ci->id == BCMA_CHIP_ID_BCM5356) { 693 for (i = 0; i < 5; i++) { 694 bgmac_phy_write(bgmac, i, 0x1f, 0x008b); 695 bgmac_phy_write(bgmac, i, 0x15, 0x0100); 696 bgmac_phy_write(bgmac, i, 0x1f, 0x000f); 697 bgmac_phy_write(bgmac, i, 0x12, 0x2aaa); 698 bgmac_phy_write(bgmac, i, 0x1f, 0x000b); 699 } 700 } 701 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) || 702 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) || 703 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) { 704 bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0); 705 bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0); 706 for (i = 0; i < 5; i++) { 707 bgmac_phy_write(bgmac, i, 0x1f, 0x000f); 708 bgmac_phy_write(bgmac, i, 0x16, 0x5284); 709 bgmac_phy_write(bgmac, i, 0x1f, 0x000b); 710 bgmac_phy_write(bgmac, i, 0x17, 0x0010); 711 bgmac_phy_write(bgmac, i, 0x1f, 0x000f); 712 bgmac_phy_write(bgmac, i, 0x16, 0x5296); 713 bgmac_phy_write(bgmac, i, 0x17, 0x1073); 714 bgmac_phy_write(bgmac, i, 0x17, 0x9073); 715 bgmac_phy_write(bgmac, i, 0x16, 0x52b6); 716 bgmac_phy_write(bgmac, i, 0x17, 0x9273); 717 bgmac_phy_write(bgmac, i, 0x1f, 0x000b); 718 } 719 } 720 } 721 722 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */ 723 static void bgmac_phy_reset(struct bgmac *bgmac) 724 { 725 if (bgmac->phyaddr == BGMAC_PHY_NOREGS) 726 return; 727 728 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, 729 BGMAC_PHY_CTL_RESET); 730 udelay(100); 731 if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) & 732 BGMAC_PHY_CTL_RESET) 733 bgmac_err(bgmac, "PHY reset failed\n"); 734 bgmac_phy_init(bgmac); 735 } 736 737 /************************************************** 738 * Chip ops 739 **************************************************/ 740 741 /* TODO: can we just drop @force? Can we don't reset MAC at all if there is 742 * nothing to change? Try if after stabilizng driver. 743 */ 744 static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set, 745 bool force) 746 { 747 u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG); 748 u32 new_val = (cmdcfg & mask) | set; 749 750 bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR); 751 udelay(2); 752 753 if (new_val != cmdcfg || force) 754 bgmac_write(bgmac, BGMAC_CMDCFG, new_val); 755 756 bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR); 757 udelay(2); 758 } 759 760 static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr) 761 { 762 u32 tmp; 763 764 tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; 765 bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp); 766 tmp = (addr[4] << 8) | addr[5]; 767 bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp); 768 } 769 770 static void bgmac_set_rx_mode(struct net_device *net_dev) 771 { 772 struct bgmac *bgmac = netdev_priv(net_dev); 773 774 if (net_dev->flags & IFF_PROMISC) 775 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true); 776 else 777 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true); 778 } 779 780 #if 0 /* We don't use that regs yet */ 781 static void bgmac_chip_stats_update(struct bgmac *bgmac) 782 { 783 int i; 784 785 if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) { 786 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++) 787 bgmac->mib_tx_regs[i] = 788 bgmac_read(bgmac, 789 BGMAC_TX_GOOD_OCTETS + (i * 4)); 790 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++) 791 bgmac->mib_rx_regs[i] = 792 bgmac_read(bgmac, 793 BGMAC_RX_GOOD_OCTETS + (i * 4)); 794 } 795 796 /* TODO: what else? how to handle BCM4706? Specs are needed */ 797 } 798 #endif 799 800 static void bgmac_clear_mib(struct bgmac *bgmac) 801 { 802 int i; 803 804 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) 805 return; 806 807 bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR); 808 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++) 809 bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4)); 810 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++) 811 bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4)); 812 } 813 814 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */ 815 static void bgmac_mac_speed(struct bgmac *bgmac) 816 { 817 u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD); 818 u32 set = 0; 819 820 switch (bgmac->mac_speed) { 821 case SPEED_10: 822 set |= BGMAC_CMDCFG_ES_10; 823 break; 824 case SPEED_100: 825 set |= BGMAC_CMDCFG_ES_100; 826 break; 827 case SPEED_1000: 828 set |= BGMAC_CMDCFG_ES_1000; 829 break; 830 default: 831 bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed); 832 } 833 834 if (bgmac->mac_duplex == DUPLEX_HALF) 835 set |= BGMAC_CMDCFG_HD; 836 837 bgmac_cmdcfg_maskset(bgmac, mask, set, true); 838 } 839 840 static void bgmac_miiconfig(struct bgmac *bgmac) 841 { 842 u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> 843 BGMAC_DS_MM_SHIFT; 844 if (imode == 0 || imode == 1) { 845 bgmac->mac_speed = SPEED_100; 846 bgmac->mac_duplex = DUPLEX_FULL; 847 bgmac_mac_speed(bgmac); 848 } 849 } 850 851 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */ 852 static void bgmac_chip_reset(struct bgmac *bgmac) 853 { 854 struct bcma_device *core = bgmac->core; 855 struct bcma_bus *bus = core->bus; 856 struct bcma_chipinfo *ci = &bus->chipinfo; 857 u32 flags = 0; 858 u32 iost; 859 int i; 860 861 if (bcma_core_is_enabled(core)) { 862 if (!bgmac->stats_grabbed) { 863 /* bgmac_chip_stats_update(bgmac); */ 864 bgmac->stats_grabbed = true; 865 } 866 867 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) 868 bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]); 869 870 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false); 871 udelay(1); 872 873 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) 874 bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]); 875 876 /* TODO: Clear software multicast filter list */ 877 } 878 879 iost = bcma_aread32(core, BCMA_IOST); 880 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || 881 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || 882 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) 883 iost &= ~BGMAC_BCMA_IOST_ATTACHED; 884 885 if (iost & BGMAC_BCMA_IOST_ATTACHED) { 886 flags = BGMAC_BCMA_IOCTL_SW_CLKEN; 887 if (!bgmac->has_robosw) 888 flags |= BGMAC_BCMA_IOCTL_SW_RESET; 889 } 890 891 bcma_core_enable(core, flags); 892 893 if (core->id.rev > 2) { 894 bgmac_set(bgmac, BCMA_CLKCTLST, 895 BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ); 896 bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 897 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, 898 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, 899 1000); 900 } 901 902 if (ci->id == BCMA_CHIP_ID_BCM5357 || 903 ci->id == BCMA_CHIP_ID_BCM4749 || 904 ci->id == BCMA_CHIP_ID_BCM53572) { 905 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; 906 u8 et_swtype = 0; 907 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY | 908 BGMAC_CHIPCTL_1_IF_TYPE_MII; 909 char buf[4]; 910 911 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) { 912 if (kstrtou8(buf, 0, &et_swtype)) 913 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n", 914 buf); 915 et_swtype &= 0x0f; 916 et_swtype <<= 4; 917 sw_type = et_swtype; 918 } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) { 919 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; 920 } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || 921 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || 922 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) { 923 sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII | 924 BGMAC_CHIPCTL_1_SW_TYPE_RGMII; 925 } 926 bcma_chipco_chipctl_maskset(cc, 1, 927 ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK | 928 BGMAC_CHIPCTL_1_SW_TYPE_MASK), 929 sw_type); 930 } 931 932 if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw) 933 bcma_awrite32(core, BCMA_IOCTL, 934 bcma_aread32(core, BCMA_IOCTL) & 935 ~BGMAC_BCMA_IOCTL_SW_RESET); 936 937 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset 938 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine 939 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to 940 * be keps until taking MAC out of the reset. 941 */ 942 bgmac_cmdcfg_maskset(bgmac, 943 ~(BGMAC_CMDCFG_TE | 944 BGMAC_CMDCFG_RE | 945 BGMAC_CMDCFG_RPI | 946 BGMAC_CMDCFG_TAI | 947 BGMAC_CMDCFG_HD | 948 BGMAC_CMDCFG_ML | 949 BGMAC_CMDCFG_CFE | 950 BGMAC_CMDCFG_RL | 951 BGMAC_CMDCFG_RED | 952 BGMAC_CMDCFG_PE | 953 BGMAC_CMDCFG_TPI | 954 BGMAC_CMDCFG_PAD_EN | 955 BGMAC_CMDCFG_PF), 956 BGMAC_CMDCFG_PROM | 957 BGMAC_CMDCFG_NLC | 958 BGMAC_CMDCFG_CFE | 959 BGMAC_CMDCFG_SR, 960 false); 961 bgmac->mac_speed = SPEED_UNKNOWN; 962 bgmac->mac_duplex = DUPLEX_UNKNOWN; 963 964 bgmac_clear_mib(bgmac); 965 if (core->id.id == BCMA_CORE_4706_MAC_GBIT) 966 bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0, 967 BCMA_GMAC_CMN_PC_MTE); 968 else 969 bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE); 970 bgmac_miiconfig(bgmac); 971 bgmac_phy_init(bgmac); 972 973 netdev_reset_queue(bgmac->net_dev); 974 975 bgmac->int_status = 0; 976 } 977 978 static void bgmac_chip_intrs_on(struct bgmac *bgmac) 979 { 980 bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask); 981 } 982 983 static void bgmac_chip_intrs_off(struct bgmac *bgmac) 984 { 985 bgmac_write(bgmac, BGMAC_INT_MASK, 0); 986 bgmac_read(bgmac, BGMAC_INT_MASK); 987 } 988 989 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */ 990 static void bgmac_enable(struct bgmac *bgmac) 991 { 992 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; 993 u32 cmdcfg; 994 u32 mode; 995 u32 rxq_ctl; 996 u32 fl_ctl; 997 u16 bp_clk; 998 u8 mdp; 999 1000 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG); 1001 bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE), 1002 BGMAC_CMDCFG_SR, true); 1003 udelay(2); 1004 cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE; 1005 bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg); 1006 1007 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> 1008 BGMAC_DS_MM_SHIFT; 1009 if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0) 1010 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); 1011 if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2) 1012 bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0, 1013 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); 1014 1015 switch (ci->id) { 1016 case BCMA_CHIP_ID_BCM5357: 1017 case BCMA_CHIP_ID_BCM4749: 1018 case BCMA_CHIP_ID_BCM53572: 1019 case BCMA_CHIP_ID_BCM4716: 1020 case BCMA_CHIP_ID_BCM47162: 1021 fl_ctl = 0x03cb04cb; 1022 if (ci->id == BCMA_CHIP_ID_BCM5357 || 1023 ci->id == BCMA_CHIP_ID_BCM4749 || 1024 ci->id == BCMA_CHIP_ID_BCM53572) 1025 fl_ctl = 0x2300e1; 1026 bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl); 1027 bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff); 1028 break; 1029 } 1030 1031 rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL); 1032 rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK; 1033 bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000; 1034 mdp = (bp_clk * 128 / 1000) - 3; 1035 rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT); 1036 bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl); 1037 } 1038 1039 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */ 1040 static void bgmac_chip_init(struct bgmac *bgmac, bool full_init) 1041 { 1042 struct bgmac_dma_ring *ring; 1043 int i; 1044 1045 /* 1 interrupt per received frame */ 1046 bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT); 1047 1048 /* Enable 802.3x tx flow control (honor received PAUSE frames) */ 1049 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true); 1050 1051 bgmac_set_rx_mode(bgmac->net_dev); 1052 1053 bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr); 1054 1055 if (bgmac->loopback) 1056 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false); 1057 else 1058 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false); 1059 1060 bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN); 1061 1062 if (full_init) { 1063 bgmac_dma_init(bgmac); 1064 if (1) /* FIXME: is there any case we don't want IRQs? */ 1065 bgmac_chip_intrs_on(bgmac); 1066 } else { 1067 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { 1068 ring = &bgmac->rx_ring[i]; 1069 bgmac_dma_rx_enable(bgmac, ring); 1070 } 1071 } 1072 1073 bgmac_enable(bgmac); 1074 } 1075 1076 static irqreturn_t bgmac_interrupt(int irq, void *dev_id) 1077 { 1078 struct bgmac *bgmac = netdev_priv(dev_id); 1079 1080 u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS); 1081 int_status &= bgmac->int_mask; 1082 1083 if (!int_status) 1084 return IRQ_NONE; 1085 1086 /* Ack */ 1087 bgmac_write(bgmac, BGMAC_INT_STATUS, int_status); 1088 1089 /* Disable new interrupts until handling existing ones */ 1090 bgmac_chip_intrs_off(bgmac); 1091 1092 bgmac->int_status = int_status; 1093 1094 napi_schedule(&bgmac->napi); 1095 1096 return IRQ_HANDLED; 1097 } 1098 1099 static int bgmac_poll(struct napi_struct *napi, int weight) 1100 { 1101 struct bgmac *bgmac = container_of(napi, struct bgmac, napi); 1102 struct bgmac_dma_ring *ring; 1103 int handled = 0; 1104 1105 if (bgmac->int_status & BGMAC_IS_TX0) { 1106 ring = &bgmac->tx_ring[0]; 1107 bgmac_dma_tx_free(bgmac, ring); 1108 bgmac->int_status &= ~BGMAC_IS_TX0; 1109 } 1110 1111 if (bgmac->int_status & BGMAC_IS_RX) { 1112 ring = &bgmac->rx_ring[0]; 1113 handled += bgmac_dma_rx_read(bgmac, ring, weight); 1114 bgmac->int_status &= ~BGMAC_IS_RX; 1115 } 1116 1117 if (bgmac->int_status) { 1118 bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", bgmac->int_status); 1119 bgmac->int_status = 0; 1120 } 1121 1122 if (handled < weight) 1123 napi_complete(napi); 1124 1125 bgmac_chip_intrs_on(bgmac); 1126 1127 return handled; 1128 } 1129 1130 /************************************************** 1131 * net_device_ops 1132 **************************************************/ 1133 1134 static int bgmac_open(struct net_device *net_dev) 1135 { 1136 struct bgmac *bgmac = netdev_priv(net_dev); 1137 int err = 0; 1138 1139 bgmac_chip_reset(bgmac); 1140 /* Specs say about reclaiming rings here, but we do that in DMA init */ 1141 bgmac_chip_init(bgmac, true); 1142 1143 err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED, 1144 KBUILD_MODNAME, net_dev); 1145 if (err < 0) { 1146 bgmac_err(bgmac, "IRQ request error: %d!\n", err); 1147 goto err_out; 1148 } 1149 napi_enable(&bgmac->napi); 1150 1151 phy_start(bgmac->phy_dev); 1152 1153 netif_carrier_on(net_dev); 1154 1155 err_out: 1156 return err; 1157 } 1158 1159 static int bgmac_stop(struct net_device *net_dev) 1160 { 1161 struct bgmac *bgmac = netdev_priv(net_dev); 1162 1163 netif_carrier_off(net_dev); 1164 1165 phy_stop(bgmac->phy_dev); 1166 1167 napi_disable(&bgmac->napi); 1168 bgmac_chip_intrs_off(bgmac); 1169 free_irq(bgmac->core->irq, net_dev); 1170 1171 bgmac_chip_reset(bgmac); 1172 1173 return 0; 1174 } 1175 1176 static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb, 1177 struct net_device *net_dev) 1178 { 1179 struct bgmac *bgmac = netdev_priv(net_dev); 1180 struct bgmac_dma_ring *ring; 1181 1182 /* No QOS support yet */ 1183 ring = &bgmac->tx_ring[0]; 1184 return bgmac_dma_tx_add(bgmac, ring, skb); 1185 } 1186 1187 static int bgmac_set_mac_address(struct net_device *net_dev, void *addr) 1188 { 1189 struct bgmac *bgmac = netdev_priv(net_dev); 1190 int ret; 1191 1192 ret = eth_prepare_mac_addr_change(net_dev, addr); 1193 if (ret < 0) 1194 return ret; 1195 bgmac_write_mac_address(bgmac, (u8 *)addr); 1196 eth_commit_mac_addr_change(net_dev, addr); 1197 return 0; 1198 } 1199 1200 static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 1201 { 1202 struct bgmac *bgmac = netdev_priv(net_dev); 1203 struct mii_ioctl_data *data = if_mii(ifr); 1204 1205 switch (cmd) { 1206 case SIOCGMIIPHY: 1207 data->phy_id = bgmac->phyaddr; 1208 /* fallthru */ 1209 case SIOCGMIIREG: 1210 if (!netif_running(net_dev)) 1211 return -EAGAIN; 1212 data->val_out = bgmac_phy_read(bgmac, data->phy_id, 1213 data->reg_num & 0x1f); 1214 return 0; 1215 case SIOCSMIIREG: 1216 if (!netif_running(net_dev)) 1217 return -EAGAIN; 1218 bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f, 1219 data->val_in); 1220 return 0; 1221 default: 1222 return -EOPNOTSUPP; 1223 } 1224 } 1225 1226 static const struct net_device_ops bgmac_netdev_ops = { 1227 .ndo_open = bgmac_open, 1228 .ndo_stop = bgmac_stop, 1229 .ndo_start_xmit = bgmac_start_xmit, 1230 .ndo_set_rx_mode = bgmac_set_rx_mode, 1231 .ndo_set_mac_address = bgmac_set_mac_address, 1232 .ndo_validate_addr = eth_validate_addr, 1233 .ndo_do_ioctl = bgmac_ioctl, 1234 }; 1235 1236 /************************************************** 1237 * ethtool_ops 1238 **************************************************/ 1239 1240 static int bgmac_get_settings(struct net_device *net_dev, 1241 struct ethtool_cmd *cmd) 1242 { 1243 struct bgmac *bgmac = netdev_priv(net_dev); 1244 1245 return phy_ethtool_gset(bgmac->phy_dev, cmd); 1246 } 1247 1248 static int bgmac_set_settings(struct net_device *net_dev, 1249 struct ethtool_cmd *cmd) 1250 { 1251 struct bgmac *bgmac = netdev_priv(net_dev); 1252 1253 return phy_ethtool_sset(bgmac->phy_dev, cmd); 1254 } 1255 1256 static void bgmac_get_drvinfo(struct net_device *net_dev, 1257 struct ethtool_drvinfo *info) 1258 { 1259 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 1260 strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info)); 1261 } 1262 1263 static const struct ethtool_ops bgmac_ethtool_ops = { 1264 .get_settings = bgmac_get_settings, 1265 .set_settings = bgmac_set_settings, 1266 .get_drvinfo = bgmac_get_drvinfo, 1267 }; 1268 1269 /************************************************** 1270 * MII 1271 **************************************************/ 1272 1273 static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum) 1274 { 1275 return bgmac_phy_read(bus->priv, mii_id, regnum); 1276 } 1277 1278 static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum, 1279 u16 value) 1280 { 1281 return bgmac_phy_write(bus->priv, mii_id, regnum, value); 1282 } 1283 1284 static void bgmac_adjust_link(struct net_device *net_dev) 1285 { 1286 struct bgmac *bgmac = netdev_priv(net_dev); 1287 struct phy_device *phy_dev = bgmac->phy_dev; 1288 bool update = false; 1289 1290 if (phy_dev->link) { 1291 if (phy_dev->speed != bgmac->mac_speed) { 1292 bgmac->mac_speed = phy_dev->speed; 1293 update = true; 1294 } 1295 1296 if (phy_dev->duplex != bgmac->mac_duplex) { 1297 bgmac->mac_duplex = phy_dev->duplex; 1298 update = true; 1299 } 1300 } 1301 1302 if (update) { 1303 bgmac_mac_speed(bgmac); 1304 phy_print_status(phy_dev); 1305 } 1306 } 1307 1308 static int bgmac_mii_register(struct bgmac *bgmac) 1309 { 1310 struct mii_bus *mii_bus; 1311 struct phy_device *phy_dev; 1312 char bus_id[MII_BUS_ID_SIZE + 3]; 1313 int i, err = 0; 1314 1315 mii_bus = mdiobus_alloc(); 1316 if (!mii_bus) 1317 return -ENOMEM; 1318 1319 mii_bus->name = "bgmac mii bus"; 1320 sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num, 1321 bgmac->core->core_unit); 1322 mii_bus->priv = bgmac; 1323 mii_bus->read = bgmac_mii_read; 1324 mii_bus->write = bgmac_mii_write; 1325 mii_bus->parent = &bgmac->core->dev; 1326 mii_bus->phy_mask = ~(1 << bgmac->phyaddr); 1327 1328 mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); 1329 if (!mii_bus->irq) { 1330 err = -ENOMEM; 1331 goto err_free_bus; 1332 } 1333 for (i = 0; i < PHY_MAX_ADDR; i++) 1334 mii_bus->irq[i] = PHY_POLL; 1335 1336 err = mdiobus_register(mii_bus); 1337 if (err) { 1338 bgmac_err(bgmac, "Registration of mii bus failed\n"); 1339 goto err_free_irq; 1340 } 1341 1342 bgmac->mii_bus = mii_bus; 1343 1344 /* Connect to the PHY */ 1345 snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id, 1346 bgmac->phyaddr); 1347 phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link, 1348 PHY_INTERFACE_MODE_MII); 1349 if (IS_ERR(phy_dev)) { 1350 bgmac_err(bgmac, "PHY connecton failed\n"); 1351 err = PTR_ERR(phy_dev); 1352 goto err_unregister_bus; 1353 } 1354 bgmac->phy_dev = phy_dev; 1355 1356 return err; 1357 1358 err_unregister_bus: 1359 mdiobus_unregister(mii_bus); 1360 err_free_irq: 1361 kfree(mii_bus->irq); 1362 err_free_bus: 1363 mdiobus_free(mii_bus); 1364 return err; 1365 } 1366 1367 static void bgmac_mii_unregister(struct bgmac *bgmac) 1368 { 1369 struct mii_bus *mii_bus = bgmac->mii_bus; 1370 1371 mdiobus_unregister(mii_bus); 1372 kfree(mii_bus->irq); 1373 mdiobus_free(mii_bus); 1374 } 1375 1376 /************************************************** 1377 * BCMA bus ops 1378 **************************************************/ 1379 1380 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ 1381 static int bgmac_probe(struct bcma_device *core) 1382 { 1383 struct net_device *net_dev; 1384 struct bgmac *bgmac; 1385 struct ssb_sprom *sprom = &core->bus->sprom; 1386 u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac; 1387 int err; 1388 1389 /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */ 1390 if (core->core_unit > 1) { 1391 pr_err("Unsupported core_unit %d\n", core->core_unit); 1392 return -ENOTSUPP; 1393 } 1394 1395 if (!is_valid_ether_addr(mac)) { 1396 dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac); 1397 eth_random_addr(mac); 1398 dev_warn(&core->dev, "Using random MAC: %pM\n", mac); 1399 } 1400 1401 /* Allocation and references */ 1402 net_dev = alloc_etherdev(sizeof(*bgmac)); 1403 if (!net_dev) 1404 return -ENOMEM; 1405 net_dev->netdev_ops = &bgmac_netdev_ops; 1406 net_dev->irq = core->irq; 1407 SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops); 1408 bgmac = netdev_priv(net_dev); 1409 bgmac->net_dev = net_dev; 1410 bgmac->core = core; 1411 bcma_set_drvdata(core, bgmac); 1412 1413 /* Defaults */ 1414 memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN); 1415 1416 /* On BCM4706 we need common core to access PHY */ 1417 if (core->id.id == BCMA_CORE_4706_MAC_GBIT && 1418 !core->bus->drv_gmac_cmn.core) { 1419 bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n"); 1420 err = -ENODEV; 1421 goto err_netdev_free; 1422 } 1423 bgmac->cmn = core->bus->drv_gmac_cmn.core; 1424 1425 bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr : 1426 sprom->et0phyaddr; 1427 bgmac->phyaddr &= BGMAC_PHY_MASK; 1428 if (bgmac->phyaddr == BGMAC_PHY_MASK) { 1429 bgmac_err(bgmac, "No PHY found\n"); 1430 err = -ENODEV; 1431 goto err_netdev_free; 1432 } 1433 bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr, 1434 bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : ""); 1435 1436 if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) { 1437 bgmac_err(bgmac, "PCI setup not implemented\n"); 1438 err = -ENOTSUPP; 1439 goto err_netdev_free; 1440 } 1441 1442 bgmac_chip_reset(bgmac); 1443 1444 err = bgmac_dma_alloc(bgmac); 1445 if (err) { 1446 bgmac_err(bgmac, "Unable to alloc memory for DMA\n"); 1447 goto err_netdev_free; 1448 } 1449 1450 bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK; 1451 if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0) 1452 bgmac->int_mask &= ~BGMAC_IS_TX_MASK; 1453 1454 /* TODO: reset the external phy. Specs are needed */ 1455 bgmac_phy_reset(bgmac); 1456 1457 bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo & 1458 BGMAC_BFL_ENETROBO); 1459 if (bgmac->has_robosw) 1460 bgmac_warn(bgmac, "Support for Roboswitch not implemented\n"); 1461 1462 if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) 1463 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); 1464 1465 err = bgmac_mii_register(bgmac); 1466 if (err) { 1467 bgmac_err(bgmac, "Cannot register MDIO\n"); 1468 err = -ENOTSUPP; 1469 goto err_dma_free; 1470 } 1471 1472 err = register_netdev(bgmac->net_dev); 1473 if (err) { 1474 bgmac_err(bgmac, "Cannot register net device\n"); 1475 err = -ENOTSUPP; 1476 goto err_mii_unregister; 1477 } 1478 1479 netif_carrier_off(net_dev); 1480 1481 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); 1482 1483 return 0; 1484 1485 err_mii_unregister: 1486 bgmac_mii_unregister(bgmac); 1487 err_dma_free: 1488 bgmac_dma_free(bgmac); 1489 1490 err_netdev_free: 1491 bcma_set_drvdata(core, NULL); 1492 free_netdev(net_dev); 1493 1494 return err; 1495 } 1496 1497 static void bgmac_remove(struct bcma_device *core) 1498 { 1499 struct bgmac *bgmac = bcma_get_drvdata(core); 1500 1501 netif_napi_del(&bgmac->napi); 1502 unregister_netdev(bgmac->net_dev); 1503 bgmac_mii_unregister(bgmac); 1504 bgmac_dma_free(bgmac); 1505 bcma_set_drvdata(core, NULL); 1506 free_netdev(bgmac->net_dev); 1507 } 1508 1509 static struct bcma_driver bgmac_bcma_driver = { 1510 .name = KBUILD_MODNAME, 1511 .id_table = bgmac_bcma_tbl, 1512 .probe = bgmac_probe, 1513 .remove = bgmac_remove, 1514 }; 1515 1516 static int __init bgmac_init(void) 1517 { 1518 int err; 1519 1520 err = bcma_driver_register(&bgmac_bcma_driver); 1521 if (err) 1522 return err; 1523 pr_info("Broadcom 47xx GBit MAC driver loaded\n"); 1524 1525 return 0; 1526 } 1527 1528 static void __exit bgmac_exit(void) 1529 { 1530 bcma_driver_unregister(&bgmac_bcma_driver); 1531 } 1532 1533 module_init(bgmac_init) 1534 module_exit(bgmac_exit) 1535 1536 MODULE_AUTHOR("Rafał Miłecki"); 1537 MODULE_LICENSE("GPL"); 1538