1 /* 2 * Driver for (BCM4706)? GBit MAC core on BCMA bus. 3 * 4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com> 5 * 6 * Licensed under the GNU/GPL. See COPYING for details. 7 */ 8 9 #include "bgmac.h" 10 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/delay.h> 14 #include <linux/etherdevice.h> 15 #include <linux/mii.h> 16 #include <linux/phy.h> 17 #include <linux/phy_fixed.h> 18 #include <linux/interrupt.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/bcm47xx_nvram.h> 21 22 static const struct bcma_device_id bgmac_bcma_tbl[] = { 23 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), 24 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), 25 {}, 26 }; 27 MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); 28 29 static inline bool bgmac_is_bcm4707_family(struct bgmac *bgmac) 30 { 31 switch (bgmac->core->bus->chipinfo.id) { 32 case BCMA_CHIP_ID_BCM4707: 33 case BCMA_CHIP_ID_BCM47094: 34 case BCMA_CHIP_ID_BCM53018: 35 return true; 36 default: 37 return false; 38 } 39 } 40 41 static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask, 42 u32 value, int timeout) 43 { 44 u32 val; 45 int i; 46 47 for (i = 0; i < timeout / 10; i++) { 48 val = bcma_read32(core, reg); 49 if ((val & mask) == value) 50 return true; 51 udelay(10); 52 } 53 pr_err("Timeout waiting for reg 0x%X\n", reg); 54 return false; 55 } 56 57 /************************************************** 58 * DMA 59 **************************************************/ 60 61 static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) 62 { 63 u32 val; 64 int i; 65 66 if (!ring->mmio_base) 67 return; 68 69 /* Suspend DMA TX ring first. 70 * bgmac_wait_value doesn't support waiting for any of few values, so 71 * implement whole loop here. 72 */ 73 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 74 BGMAC_DMA_TX_SUSPEND); 75 for (i = 0; i < 10000 / 10; i++) { 76 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); 77 val &= BGMAC_DMA_TX_STAT; 78 if (val == BGMAC_DMA_TX_STAT_DISABLED || 79 val == BGMAC_DMA_TX_STAT_IDLEWAIT || 80 val == BGMAC_DMA_TX_STAT_STOPPED) { 81 i = 0; 82 break; 83 } 84 udelay(10); 85 } 86 if (i) 87 bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n", 88 ring->mmio_base, val); 89 90 /* Remove SUSPEND bit */ 91 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0); 92 if (!bgmac_wait_value(bgmac->core, 93 ring->mmio_base + BGMAC_DMA_TX_STATUS, 94 BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED, 95 10000)) { 96 bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n", 97 ring->mmio_base); 98 udelay(300); 99 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); 100 if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED) 101 bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n", 102 ring->mmio_base); 103 } 104 } 105 106 static void bgmac_dma_tx_enable(struct bgmac *bgmac, 107 struct bgmac_dma_ring *ring) 108 { 109 u32 ctl; 110 111 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL); 112 if (bgmac->core->id.rev >= 4) { 113 ctl &= ~BGMAC_DMA_TX_BL_MASK; 114 ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT; 115 116 ctl &= ~BGMAC_DMA_TX_MR_MASK; 117 ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT; 118 119 ctl &= ~BGMAC_DMA_TX_PC_MASK; 120 ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT; 121 122 ctl &= ~BGMAC_DMA_TX_PT_MASK; 123 ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT; 124 } 125 ctl |= BGMAC_DMA_TX_ENABLE; 126 ctl |= BGMAC_DMA_TX_PARITY_DISABLE; 127 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl); 128 } 129 130 static void 131 bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring, 132 int i, int len, u32 ctl0) 133 { 134 struct bgmac_slot_info *slot; 135 struct bgmac_dma_desc *dma_desc; 136 u32 ctl1; 137 138 if (i == BGMAC_TX_RING_SLOTS - 1) 139 ctl0 |= BGMAC_DESC_CTL0_EOT; 140 141 ctl1 = len & BGMAC_DESC_CTL1_LEN; 142 143 slot = &ring->slots[i]; 144 dma_desc = &ring->cpu_base[i]; 145 dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr)); 146 dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr)); 147 dma_desc->ctl0 = cpu_to_le32(ctl0); 148 dma_desc->ctl1 = cpu_to_le32(ctl1); 149 } 150 151 static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, 152 struct bgmac_dma_ring *ring, 153 struct sk_buff *skb) 154 { 155 struct device *dma_dev = bgmac->core->dma_dev; 156 struct net_device *net_dev = bgmac->net_dev; 157 int index = ring->end % BGMAC_TX_RING_SLOTS; 158 struct bgmac_slot_info *slot = &ring->slots[index]; 159 int nr_frags; 160 u32 flags; 161 int i; 162 163 if (skb->len > BGMAC_DESC_CTL1_LEN) { 164 bgmac_err(bgmac, "Too long skb (%d)\n", skb->len); 165 goto err_drop; 166 } 167 168 if (skb->ip_summed == CHECKSUM_PARTIAL) 169 skb_checksum_help(skb); 170 171 nr_frags = skb_shinfo(skb)->nr_frags; 172 173 /* ring->end - ring->start will return the number of valid slots, 174 * even when ring->end overflows 175 */ 176 if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) { 177 bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n"); 178 netif_stop_queue(net_dev); 179 return NETDEV_TX_BUSY; 180 } 181 182 slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb), 183 DMA_TO_DEVICE); 184 if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr))) 185 goto err_dma_head; 186 187 flags = BGMAC_DESC_CTL0_SOF; 188 if (!nr_frags) 189 flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC; 190 191 bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags); 192 flags = 0; 193 194 for (i = 0; i < nr_frags; i++) { 195 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 196 int len = skb_frag_size(frag); 197 198 index = (index + 1) % BGMAC_TX_RING_SLOTS; 199 slot = &ring->slots[index]; 200 slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0, 201 len, DMA_TO_DEVICE); 202 if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr))) 203 goto err_dma; 204 205 if (i == nr_frags - 1) 206 flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC; 207 208 bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags); 209 } 210 211 slot->skb = skb; 212 ring->end += nr_frags + 1; 213 netdev_sent_queue(net_dev, skb->len); 214 215 wmb(); 216 217 /* Increase ring->end to point empty slot. We tell hardware the first 218 * slot it should *not* read. 219 */ 220 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX, 221 ring->index_base + 222 (ring->end % BGMAC_TX_RING_SLOTS) * 223 sizeof(struct bgmac_dma_desc)); 224 225 if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8) 226 netif_stop_queue(net_dev); 227 228 return NETDEV_TX_OK; 229 230 err_dma: 231 dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb), 232 DMA_TO_DEVICE); 233 234 while (i > 0) { 235 int index = (ring->end + i) % BGMAC_TX_RING_SLOTS; 236 struct bgmac_slot_info *slot = &ring->slots[index]; 237 u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1); 238 int len = ctl1 & BGMAC_DESC_CTL1_LEN; 239 240 dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE); 241 } 242 243 err_dma_head: 244 bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n", 245 ring->mmio_base); 246 247 err_drop: 248 dev_kfree_skb(skb); 249 return NETDEV_TX_OK; 250 } 251 252 /* Free transmitted packets */ 253 static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) 254 { 255 struct device *dma_dev = bgmac->core->dma_dev; 256 int empty_slot; 257 bool freed = false; 258 unsigned bytes_compl = 0, pkts_compl = 0; 259 260 /* The last slot that hardware didn't consume yet */ 261 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); 262 empty_slot &= BGMAC_DMA_TX_STATDPTR; 263 empty_slot -= ring->index_base; 264 empty_slot &= BGMAC_DMA_TX_STATDPTR; 265 empty_slot /= sizeof(struct bgmac_dma_desc); 266 267 while (ring->start != ring->end) { 268 int slot_idx = ring->start % BGMAC_TX_RING_SLOTS; 269 struct bgmac_slot_info *slot = &ring->slots[slot_idx]; 270 u32 ctl1; 271 int len; 272 273 if (slot_idx == empty_slot) 274 break; 275 276 ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1); 277 len = ctl1 & BGMAC_DESC_CTL1_LEN; 278 if (ctl1 & BGMAC_DESC_CTL0_SOF) 279 /* Unmap no longer used buffer */ 280 dma_unmap_single(dma_dev, slot->dma_addr, len, 281 DMA_TO_DEVICE); 282 else 283 dma_unmap_page(dma_dev, slot->dma_addr, len, 284 DMA_TO_DEVICE); 285 286 if (slot->skb) { 287 bytes_compl += slot->skb->len; 288 pkts_compl++; 289 290 /* Free memory! :) */ 291 dev_kfree_skb(slot->skb); 292 slot->skb = NULL; 293 } 294 295 slot->dma_addr = 0; 296 ring->start++; 297 freed = true; 298 } 299 300 if (!pkts_compl) 301 return; 302 303 netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl); 304 305 if (netif_queue_stopped(bgmac->net_dev)) 306 netif_wake_queue(bgmac->net_dev); 307 } 308 309 static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) 310 { 311 if (!ring->mmio_base) 312 return; 313 314 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0); 315 if (!bgmac_wait_value(bgmac->core, 316 ring->mmio_base + BGMAC_DMA_RX_STATUS, 317 BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED, 318 10000)) 319 bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n", 320 ring->mmio_base); 321 } 322 323 static void bgmac_dma_rx_enable(struct bgmac *bgmac, 324 struct bgmac_dma_ring *ring) 325 { 326 u32 ctl; 327 328 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); 329 if (bgmac->core->id.rev >= 4) { 330 ctl &= ~BGMAC_DMA_RX_BL_MASK; 331 ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; 332 333 ctl &= ~BGMAC_DMA_RX_PC_MASK; 334 ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT; 335 336 ctl &= ~BGMAC_DMA_RX_PT_MASK; 337 ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT; 338 } 339 ctl &= BGMAC_DMA_RX_ADDREXT_MASK; 340 ctl |= BGMAC_DMA_RX_ENABLE; 341 ctl |= BGMAC_DMA_RX_PARITY_DISABLE; 342 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; 343 ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT; 344 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl); 345 } 346 347 static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, 348 struct bgmac_slot_info *slot) 349 { 350 struct device *dma_dev = bgmac->core->dma_dev; 351 dma_addr_t dma_addr; 352 struct bgmac_rx_header *rx; 353 void *buf; 354 355 /* Alloc skb */ 356 buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE); 357 if (!buf) 358 return -ENOMEM; 359 360 /* Poison - if everything goes fine, hardware will overwrite it */ 361 rx = buf + BGMAC_RX_BUF_OFFSET; 362 rx->len = cpu_to_le16(0xdead); 363 rx->flags = cpu_to_le16(0xbeef); 364 365 /* Map skb for the DMA */ 366 dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET, 367 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); 368 if (dma_mapping_error(dma_dev, dma_addr)) { 369 bgmac_err(bgmac, "DMA mapping error\n"); 370 put_page(virt_to_head_page(buf)); 371 return -ENOMEM; 372 } 373 374 /* Update the slot */ 375 slot->buf = buf; 376 slot->dma_addr = dma_addr; 377 378 return 0; 379 } 380 381 static void bgmac_dma_rx_update_index(struct bgmac *bgmac, 382 struct bgmac_dma_ring *ring) 383 { 384 dma_wmb(); 385 386 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX, 387 ring->index_base + 388 ring->end * sizeof(struct bgmac_dma_desc)); 389 } 390 391 static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac, 392 struct bgmac_dma_ring *ring, int desc_idx) 393 { 394 struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx; 395 u32 ctl0 = 0, ctl1 = 0; 396 397 if (desc_idx == BGMAC_RX_RING_SLOTS - 1) 398 ctl0 |= BGMAC_DESC_CTL0_EOT; 399 ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN; 400 /* Is there any BGMAC device that requires extension? */ 401 /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & 402 * B43_DMA64_DCTL1_ADDREXT_MASK; 403 */ 404 405 dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr)); 406 dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr)); 407 dma_desc->ctl0 = cpu_to_le32(ctl0); 408 dma_desc->ctl1 = cpu_to_le32(ctl1); 409 410 ring->end = desc_idx; 411 } 412 413 static void bgmac_dma_rx_poison_buf(struct device *dma_dev, 414 struct bgmac_slot_info *slot) 415 { 416 struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET; 417 418 dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE, 419 DMA_FROM_DEVICE); 420 rx->len = cpu_to_le16(0xdead); 421 rx->flags = cpu_to_le16(0xbeef); 422 dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE, 423 DMA_FROM_DEVICE); 424 } 425 426 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, 427 int weight) 428 { 429 u32 end_slot; 430 int handled = 0; 431 432 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS); 433 end_slot &= BGMAC_DMA_RX_STATDPTR; 434 end_slot -= ring->index_base; 435 end_slot &= BGMAC_DMA_RX_STATDPTR; 436 end_slot /= sizeof(struct bgmac_dma_desc); 437 438 while (ring->start != end_slot) { 439 struct device *dma_dev = bgmac->core->dma_dev; 440 struct bgmac_slot_info *slot = &ring->slots[ring->start]; 441 struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET; 442 struct sk_buff *skb; 443 void *buf = slot->buf; 444 dma_addr_t dma_addr = slot->dma_addr; 445 u16 len, flags; 446 447 do { 448 /* Prepare new skb as replacement */ 449 if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) { 450 bgmac_dma_rx_poison_buf(dma_dev, slot); 451 break; 452 } 453 454 /* Unmap buffer to make it accessible to the CPU */ 455 dma_unmap_single(dma_dev, dma_addr, 456 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); 457 458 /* Get info from the header */ 459 len = le16_to_cpu(rx->len); 460 flags = le16_to_cpu(rx->flags); 461 462 /* Check for poison and drop or pass the packet */ 463 if (len == 0xdead && flags == 0xbeef) { 464 bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", 465 ring->start); 466 put_page(virt_to_head_page(buf)); 467 break; 468 } 469 470 if (len > BGMAC_RX_ALLOC_SIZE) { 471 bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n", 472 ring->start); 473 put_page(virt_to_head_page(buf)); 474 break; 475 } 476 477 /* Omit CRC. */ 478 len -= ETH_FCS_LEN; 479 480 skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE); 481 if (unlikely(!skb)) { 482 bgmac_err(bgmac, "build_skb failed\n"); 483 put_page(virt_to_head_page(buf)); 484 break; 485 } 486 skb_put(skb, BGMAC_RX_FRAME_OFFSET + 487 BGMAC_RX_BUF_OFFSET + len); 488 skb_pull(skb, BGMAC_RX_FRAME_OFFSET + 489 BGMAC_RX_BUF_OFFSET); 490 491 skb_checksum_none_assert(skb); 492 skb->protocol = eth_type_trans(skb, bgmac->net_dev); 493 napi_gro_receive(&bgmac->napi, skb); 494 handled++; 495 } while (0); 496 497 bgmac_dma_rx_setup_desc(bgmac, ring, ring->start); 498 499 if (++ring->start >= BGMAC_RX_RING_SLOTS) 500 ring->start = 0; 501 502 if (handled >= weight) /* Should never be greater */ 503 break; 504 } 505 506 bgmac_dma_rx_update_index(bgmac, ring); 507 508 return handled; 509 } 510 511 /* Does ring support unaligned addressing? */ 512 static bool bgmac_dma_unaligned(struct bgmac *bgmac, 513 struct bgmac_dma_ring *ring, 514 enum bgmac_dma_ring_type ring_type) 515 { 516 switch (ring_type) { 517 case BGMAC_DMA_RING_TX: 518 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, 519 0xff0); 520 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO)) 521 return true; 522 break; 523 case BGMAC_DMA_RING_RX: 524 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, 525 0xff0); 526 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO)) 527 return true; 528 break; 529 } 530 return false; 531 } 532 533 static void bgmac_dma_tx_ring_free(struct bgmac *bgmac, 534 struct bgmac_dma_ring *ring) 535 { 536 struct device *dma_dev = bgmac->core->dma_dev; 537 struct bgmac_dma_desc *dma_desc = ring->cpu_base; 538 struct bgmac_slot_info *slot; 539 int i; 540 541 for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) { 542 int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN; 543 544 slot = &ring->slots[i]; 545 dev_kfree_skb(slot->skb); 546 547 if (!slot->dma_addr) 548 continue; 549 550 if (slot->skb) 551 dma_unmap_single(dma_dev, slot->dma_addr, 552 len, DMA_TO_DEVICE); 553 else 554 dma_unmap_page(dma_dev, slot->dma_addr, 555 len, DMA_TO_DEVICE); 556 } 557 } 558 559 static void bgmac_dma_rx_ring_free(struct bgmac *bgmac, 560 struct bgmac_dma_ring *ring) 561 { 562 struct device *dma_dev = bgmac->core->dma_dev; 563 struct bgmac_slot_info *slot; 564 int i; 565 566 for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) { 567 slot = &ring->slots[i]; 568 if (!slot->dma_addr) 569 continue; 570 571 dma_unmap_single(dma_dev, slot->dma_addr, 572 BGMAC_RX_BUF_SIZE, 573 DMA_FROM_DEVICE); 574 put_page(virt_to_head_page(slot->buf)); 575 slot->dma_addr = 0; 576 } 577 } 578 579 static void bgmac_dma_ring_desc_free(struct bgmac *bgmac, 580 struct bgmac_dma_ring *ring, 581 int num_slots) 582 { 583 struct device *dma_dev = bgmac->core->dma_dev; 584 int size; 585 586 if (!ring->cpu_base) 587 return; 588 589 /* Free ring of descriptors */ 590 size = num_slots * sizeof(struct bgmac_dma_desc); 591 dma_free_coherent(dma_dev, size, ring->cpu_base, 592 ring->dma_base); 593 } 594 595 static void bgmac_dma_cleanup(struct bgmac *bgmac) 596 { 597 int i; 598 599 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) 600 bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]); 601 602 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) 603 bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]); 604 } 605 606 static void bgmac_dma_free(struct bgmac *bgmac) 607 { 608 int i; 609 610 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) 611 bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i], 612 BGMAC_TX_RING_SLOTS); 613 614 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) 615 bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i], 616 BGMAC_RX_RING_SLOTS); 617 } 618 619 static int bgmac_dma_alloc(struct bgmac *bgmac) 620 { 621 struct device *dma_dev = bgmac->core->dma_dev; 622 struct bgmac_dma_ring *ring; 623 static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1, 624 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, }; 625 int size; /* ring size: different for Tx and Rx */ 626 int err; 627 int i; 628 629 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); 630 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base)); 631 632 if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) { 633 bgmac_err(bgmac, "Core does not report 64-bit DMA\n"); 634 return -ENOTSUPP; 635 } 636 637 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { 638 ring = &bgmac->tx_ring[i]; 639 ring->mmio_base = ring_base[i]; 640 641 /* Alloc ring of descriptors */ 642 size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc); 643 ring->cpu_base = dma_zalloc_coherent(dma_dev, size, 644 &ring->dma_base, 645 GFP_KERNEL); 646 if (!ring->cpu_base) { 647 bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n", 648 ring->mmio_base); 649 goto err_dma_free; 650 } 651 652 ring->unaligned = bgmac_dma_unaligned(bgmac, ring, 653 BGMAC_DMA_RING_TX); 654 if (ring->unaligned) 655 ring->index_base = lower_32_bits(ring->dma_base); 656 else 657 ring->index_base = 0; 658 659 /* No need to alloc TX slots yet */ 660 } 661 662 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { 663 ring = &bgmac->rx_ring[i]; 664 ring->mmio_base = ring_base[i]; 665 666 /* Alloc ring of descriptors */ 667 size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc); 668 ring->cpu_base = dma_zalloc_coherent(dma_dev, size, 669 &ring->dma_base, 670 GFP_KERNEL); 671 if (!ring->cpu_base) { 672 bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n", 673 ring->mmio_base); 674 err = -ENOMEM; 675 goto err_dma_free; 676 } 677 678 ring->unaligned = bgmac_dma_unaligned(bgmac, ring, 679 BGMAC_DMA_RING_RX); 680 if (ring->unaligned) 681 ring->index_base = lower_32_bits(ring->dma_base); 682 else 683 ring->index_base = 0; 684 } 685 686 return 0; 687 688 err_dma_free: 689 bgmac_dma_free(bgmac); 690 return -ENOMEM; 691 } 692 693 static int bgmac_dma_init(struct bgmac *bgmac) 694 { 695 struct bgmac_dma_ring *ring; 696 int i, err; 697 698 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { 699 ring = &bgmac->tx_ring[i]; 700 701 if (!ring->unaligned) 702 bgmac_dma_tx_enable(bgmac, ring); 703 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, 704 lower_32_bits(ring->dma_base)); 705 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI, 706 upper_32_bits(ring->dma_base)); 707 if (ring->unaligned) 708 bgmac_dma_tx_enable(bgmac, ring); 709 710 ring->start = 0; 711 ring->end = 0; /* Points the slot that should *not* be read */ 712 } 713 714 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { 715 int j; 716 717 ring = &bgmac->rx_ring[i]; 718 719 if (!ring->unaligned) 720 bgmac_dma_rx_enable(bgmac, ring); 721 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, 722 lower_32_bits(ring->dma_base)); 723 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI, 724 upper_32_bits(ring->dma_base)); 725 if (ring->unaligned) 726 bgmac_dma_rx_enable(bgmac, ring); 727 728 ring->start = 0; 729 ring->end = 0; 730 for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) { 731 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]); 732 if (err) 733 goto error; 734 735 bgmac_dma_rx_setup_desc(bgmac, ring, j); 736 } 737 738 bgmac_dma_rx_update_index(bgmac, ring); 739 } 740 741 return 0; 742 743 error: 744 bgmac_dma_cleanup(bgmac); 745 return err; 746 } 747 748 /************************************************** 749 * PHY ops 750 **************************************************/ 751 752 static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg) 753 { 754 struct bcma_device *core; 755 u16 phy_access_addr; 756 u16 phy_ctl_addr; 757 u32 tmp; 758 759 BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK); 760 BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK); 761 BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT); 762 BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK); 763 BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT); 764 BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE); 765 BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START); 766 BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK); 767 BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK); 768 BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT); 769 BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE); 770 771 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) { 772 core = bgmac->core->bus->drv_gmac_cmn.core; 773 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; 774 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; 775 } else { 776 core = bgmac->core; 777 phy_access_addr = BGMAC_PHY_ACCESS; 778 phy_ctl_addr = BGMAC_PHY_CNTL; 779 } 780 781 tmp = bcma_read32(core, phy_ctl_addr); 782 tmp &= ~BGMAC_PC_EPA_MASK; 783 tmp |= phyaddr; 784 bcma_write32(core, phy_ctl_addr, tmp); 785 786 tmp = BGMAC_PA_START; 787 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; 788 tmp |= reg << BGMAC_PA_REG_SHIFT; 789 bcma_write32(core, phy_access_addr, tmp); 790 791 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) { 792 bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n", 793 phyaddr, reg); 794 return 0xffff; 795 } 796 797 return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK; 798 } 799 800 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */ 801 static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value) 802 { 803 struct bcma_device *core; 804 u16 phy_access_addr; 805 u16 phy_ctl_addr; 806 u32 tmp; 807 808 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) { 809 core = bgmac->core->bus->drv_gmac_cmn.core; 810 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; 811 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; 812 } else { 813 core = bgmac->core; 814 phy_access_addr = BGMAC_PHY_ACCESS; 815 phy_ctl_addr = BGMAC_PHY_CNTL; 816 } 817 818 tmp = bcma_read32(core, phy_ctl_addr); 819 tmp &= ~BGMAC_PC_EPA_MASK; 820 tmp |= phyaddr; 821 bcma_write32(core, phy_ctl_addr, tmp); 822 823 bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO); 824 if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) 825 bgmac_warn(bgmac, "Error setting MDIO int\n"); 826 827 tmp = BGMAC_PA_START; 828 tmp |= BGMAC_PA_WRITE; 829 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; 830 tmp |= reg << BGMAC_PA_REG_SHIFT; 831 tmp |= value; 832 bcma_write32(core, phy_access_addr, tmp); 833 834 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) { 835 bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n", 836 phyaddr, reg); 837 return -ETIMEDOUT; 838 } 839 840 return 0; 841 } 842 843 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */ 844 static void bgmac_phy_init(struct bgmac *bgmac) 845 { 846 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; 847 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; 848 u8 i; 849 850 if (ci->id == BCMA_CHIP_ID_BCM5356) { 851 for (i = 0; i < 5; i++) { 852 bgmac_phy_write(bgmac, i, 0x1f, 0x008b); 853 bgmac_phy_write(bgmac, i, 0x15, 0x0100); 854 bgmac_phy_write(bgmac, i, 0x1f, 0x000f); 855 bgmac_phy_write(bgmac, i, 0x12, 0x2aaa); 856 bgmac_phy_write(bgmac, i, 0x1f, 0x000b); 857 } 858 } 859 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) || 860 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) || 861 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) { 862 bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0); 863 bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0); 864 for (i = 0; i < 5; i++) { 865 bgmac_phy_write(bgmac, i, 0x1f, 0x000f); 866 bgmac_phy_write(bgmac, i, 0x16, 0x5284); 867 bgmac_phy_write(bgmac, i, 0x1f, 0x000b); 868 bgmac_phy_write(bgmac, i, 0x17, 0x0010); 869 bgmac_phy_write(bgmac, i, 0x1f, 0x000f); 870 bgmac_phy_write(bgmac, i, 0x16, 0x5296); 871 bgmac_phy_write(bgmac, i, 0x17, 0x1073); 872 bgmac_phy_write(bgmac, i, 0x17, 0x9073); 873 bgmac_phy_write(bgmac, i, 0x16, 0x52b6); 874 bgmac_phy_write(bgmac, i, 0x17, 0x9273); 875 bgmac_phy_write(bgmac, i, 0x1f, 0x000b); 876 } 877 } 878 } 879 880 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */ 881 static void bgmac_phy_reset(struct bgmac *bgmac) 882 { 883 if (bgmac->phyaddr == BGMAC_PHY_NOREGS) 884 return; 885 886 bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET); 887 udelay(100); 888 if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET) 889 bgmac_err(bgmac, "PHY reset failed\n"); 890 bgmac_phy_init(bgmac); 891 } 892 893 /************************************************** 894 * Chip ops 895 **************************************************/ 896 897 /* TODO: can we just drop @force? Can we don't reset MAC at all if there is 898 * nothing to change? Try if after stabilizng driver. 899 */ 900 static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set, 901 bool force) 902 { 903 u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG); 904 u32 new_val = (cmdcfg & mask) | set; 905 906 bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev)); 907 udelay(2); 908 909 if (new_val != cmdcfg || force) 910 bgmac_write(bgmac, BGMAC_CMDCFG, new_val); 911 912 bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev)); 913 udelay(2); 914 } 915 916 static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr) 917 { 918 u32 tmp; 919 920 tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; 921 bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp); 922 tmp = (addr[4] << 8) | addr[5]; 923 bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp); 924 } 925 926 static void bgmac_set_rx_mode(struct net_device *net_dev) 927 { 928 struct bgmac *bgmac = netdev_priv(net_dev); 929 930 if (net_dev->flags & IFF_PROMISC) 931 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true); 932 else 933 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true); 934 } 935 936 #if 0 /* We don't use that regs yet */ 937 static void bgmac_chip_stats_update(struct bgmac *bgmac) 938 { 939 int i; 940 941 if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) { 942 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++) 943 bgmac->mib_tx_regs[i] = 944 bgmac_read(bgmac, 945 BGMAC_TX_GOOD_OCTETS + (i * 4)); 946 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++) 947 bgmac->mib_rx_regs[i] = 948 bgmac_read(bgmac, 949 BGMAC_RX_GOOD_OCTETS + (i * 4)); 950 } 951 952 /* TODO: what else? how to handle BCM4706? Specs are needed */ 953 } 954 #endif 955 956 static void bgmac_clear_mib(struct bgmac *bgmac) 957 { 958 int i; 959 960 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) 961 return; 962 963 bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR); 964 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++) 965 bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4)); 966 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++) 967 bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4)); 968 } 969 970 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */ 971 static void bgmac_mac_speed(struct bgmac *bgmac) 972 { 973 u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD); 974 u32 set = 0; 975 976 switch (bgmac->mac_speed) { 977 case SPEED_10: 978 set |= BGMAC_CMDCFG_ES_10; 979 break; 980 case SPEED_100: 981 set |= BGMAC_CMDCFG_ES_100; 982 break; 983 case SPEED_1000: 984 set |= BGMAC_CMDCFG_ES_1000; 985 break; 986 case SPEED_2500: 987 set |= BGMAC_CMDCFG_ES_2500; 988 break; 989 default: 990 bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed); 991 } 992 993 if (bgmac->mac_duplex == DUPLEX_HALF) 994 set |= BGMAC_CMDCFG_HD; 995 996 bgmac_cmdcfg_maskset(bgmac, mask, set, true); 997 } 998 999 static void bgmac_miiconfig(struct bgmac *bgmac) 1000 { 1001 struct bcma_device *core = bgmac->core; 1002 u8 imode; 1003 1004 if (bgmac_is_bcm4707_family(bgmac)) { 1005 bcma_awrite32(core, BCMA_IOCTL, 1006 bcma_aread32(core, BCMA_IOCTL) | 0x40 | 1007 BGMAC_BCMA_IOCTL_SW_CLKEN); 1008 bgmac->mac_speed = SPEED_2500; 1009 bgmac->mac_duplex = DUPLEX_FULL; 1010 bgmac_mac_speed(bgmac); 1011 } else { 1012 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & 1013 BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT; 1014 if (imode == 0 || imode == 1) { 1015 bgmac->mac_speed = SPEED_100; 1016 bgmac->mac_duplex = DUPLEX_FULL; 1017 bgmac_mac_speed(bgmac); 1018 } 1019 } 1020 } 1021 1022 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */ 1023 static void bgmac_chip_reset(struct bgmac *bgmac) 1024 { 1025 struct bcma_device *core = bgmac->core; 1026 struct bcma_bus *bus = core->bus; 1027 struct bcma_chipinfo *ci = &bus->chipinfo; 1028 u32 flags; 1029 u32 iost; 1030 int i; 1031 1032 if (bcma_core_is_enabled(core)) { 1033 if (!bgmac->stats_grabbed) { 1034 /* bgmac_chip_stats_update(bgmac); */ 1035 bgmac->stats_grabbed = true; 1036 } 1037 1038 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) 1039 bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]); 1040 1041 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false); 1042 udelay(1); 1043 1044 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) 1045 bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]); 1046 1047 /* TODO: Clear software multicast filter list */ 1048 } 1049 1050 iost = bcma_aread32(core, BCMA_IOST); 1051 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || 1052 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || 1053 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) 1054 iost &= ~BGMAC_BCMA_IOST_ATTACHED; 1055 1056 /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */ 1057 if (ci->id != BCMA_CHIP_ID_BCM4707 && 1058 ci->id != BCMA_CHIP_ID_BCM47094) { 1059 flags = 0; 1060 if (iost & BGMAC_BCMA_IOST_ATTACHED) { 1061 flags = BGMAC_BCMA_IOCTL_SW_CLKEN; 1062 if (!bgmac->has_robosw) 1063 flags |= BGMAC_BCMA_IOCTL_SW_RESET; 1064 } 1065 bcma_core_enable(core, flags); 1066 } 1067 1068 /* Request Misc PLL for corerev > 2 */ 1069 if (core->id.rev > 2 && !bgmac_is_bcm4707_family(bgmac)) { 1070 bgmac_set(bgmac, BCMA_CLKCTLST, 1071 BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ); 1072 bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1073 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, 1074 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, 1075 1000); 1076 } 1077 1078 if (ci->id == BCMA_CHIP_ID_BCM5357 || 1079 ci->id == BCMA_CHIP_ID_BCM4749 || 1080 ci->id == BCMA_CHIP_ID_BCM53572) { 1081 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; 1082 u8 et_swtype = 0; 1083 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY | 1084 BGMAC_CHIPCTL_1_IF_TYPE_MII; 1085 char buf[4]; 1086 1087 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) { 1088 if (kstrtou8(buf, 0, &et_swtype)) 1089 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n", 1090 buf); 1091 et_swtype &= 0x0f; 1092 et_swtype <<= 4; 1093 sw_type = et_swtype; 1094 } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) { 1095 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; 1096 } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || 1097 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || 1098 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) { 1099 sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII | 1100 BGMAC_CHIPCTL_1_SW_TYPE_RGMII; 1101 } 1102 bcma_chipco_chipctl_maskset(cc, 1, 1103 ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK | 1104 BGMAC_CHIPCTL_1_SW_TYPE_MASK), 1105 sw_type); 1106 } 1107 1108 if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw) 1109 bcma_awrite32(core, BCMA_IOCTL, 1110 bcma_aread32(core, BCMA_IOCTL) & 1111 ~BGMAC_BCMA_IOCTL_SW_RESET); 1112 1113 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset 1114 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine 1115 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to 1116 * be keps until taking MAC out of the reset. 1117 */ 1118 bgmac_cmdcfg_maskset(bgmac, 1119 ~(BGMAC_CMDCFG_TE | 1120 BGMAC_CMDCFG_RE | 1121 BGMAC_CMDCFG_RPI | 1122 BGMAC_CMDCFG_TAI | 1123 BGMAC_CMDCFG_HD | 1124 BGMAC_CMDCFG_ML | 1125 BGMAC_CMDCFG_CFE | 1126 BGMAC_CMDCFG_RL | 1127 BGMAC_CMDCFG_RED | 1128 BGMAC_CMDCFG_PE | 1129 BGMAC_CMDCFG_TPI | 1130 BGMAC_CMDCFG_PAD_EN | 1131 BGMAC_CMDCFG_PF), 1132 BGMAC_CMDCFG_PROM | 1133 BGMAC_CMDCFG_NLC | 1134 BGMAC_CMDCFG_CFE | 1135 BGMAC_CMDCFG_SR(core->id.rev), 1136 false); 1137 bgmac->mac_speed = SPEED_UNKNOWN; 1138 bgmac->mac_duplex = DUPLEX_UNKNOWN; 1139 1140 bgmac_clear_mib(bgmac); 1141 if (core->id.id == BCMA_CORE_4706_MAC_GBIT) 1142 bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0, 1143 BCMA_GMAC_CMN_PC_MTE); 1144 else 1145 bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE); 1146 bgmac_miiconfig(bgmac); 1147 bgmac_phy_init(bgmac); 1148 1149 netdev_reset_queue(bgmac->net_dev); 1150 } 1151 1152 static void bgmac_chip_intrs_on(struct bgmac *bgmac) 1153 { 1154 bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask); 1155 } 1156 1157 static void bgmac_chip_intrs_off(struct bgmac *bgmac) 1158 { 1159 bgmac_write(bgmac, BGMAC_INT_MASK, 0); 1160 bgmac_read(bgmac, BGMAC_INT_MASK); 1161 } 1162 1163 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */ 1164 static void bgmac_enable(struct bgmac *bgmac) 1165 { 1166 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; 1167 u32 cmdcfg; 1168 u32 mode; 1169 u32 rxq_ctl; 1170 u32 fl_ctl; 1171 u16 bp_clk; 1172 u8 mdp; 1173 1174 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG); 1175 bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE), 1176 BGMAC_CMDCFG_SR(bgmac->core->id.rev), true); 1177 udelay(2); 1178 cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE; 1179 bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg); 1180 1181 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> 1182 BGMAC_DS_MM_SHIFT; 1183 if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0) 1184 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); 1185 if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2) 1186 bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0, 1187 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); 1188 1189 switch (ci->id) { 1190 case BCMA_CHIP_ID_BCM5357: 1191 case BCMA_CHIP_ID_BCM4749: 1192 case BCMA_CHIP_ID_BCM53572: 1193 case BCMA_CHIP_ID_BCM4716: 1194 case BCMA_CHIP_ID_BCM47162: 1195 fl_ctl = 0x03cb04cb; 1196 if (ci->id == BCMA_CHIP_ID_BCM5357 || 1197 ci->id == BCMA_CHIP_ID_BCM4749 || 1198 ci->id == BCMA_CHIP_ID_BCM53572) 1199 fl_ctl = 0x2300e1; 1200 bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl); 1201 bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff); 1202 break; 1203 } 1204 1205 if (!bgmac_is_bcm4707_family(bgmac)) { 1206 rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL); 1207 rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK; 1208 bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1209 1000000; 1210 mdp = (bp_clk * 128 / 1000) - 3; 1211 rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT); 1212 bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl); 1213 } 1214 } 1215 1216 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */ 1217 static void bgmac_chip_init(struct bgmac *bgmac) 1218 { 1219 /* 1 interrupt per received frame */ 1220 bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT); 1221 1222 /* Enable 802.3x tx flow control (honor received PAUSE frames) */ 1223 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true); 1224 1225 bgmac_set_rx_mode(bgmac->net_dev); 1226 1227 bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr); 1228 1229 if (bgmac->loopback) 1230 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false); 1231 else 1232 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false); 1233 1234 bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN); 1235 1236 bgmac_chip_intrs_on(bgmac); 1237 1238 bgmac_enable(bgmac); 1239 } 1240 1241 static irqreturn_t bgmac_interrupt(int irq, void *dev_id) 1242 { 1243 struct bgmac *bgmac = netdev_priv(dev_id); 1244 1245 u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS); 1246 int_status &= bgmac->int_mask; 1247 1248 if (!int_status) 1249 return IRQ_NONE; 1250 1251 int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX); 1252 if (int_status) 1253 bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", int_status); 1254 1255 /* Disable new interrupts until handling existing ones */ 1256 bgmac_chip_intrs_off(bgmac); 1257 1258 napi_schedule(&bgmac->napi); 1259 1260 return IRQ_HANDLED; 1261 } 1262 1263 static int bgmac_poll(struct napi_struct *napi, int weight) 1264 { 1265 struct bgmac *bgmac = container_of(napi, struct bgmac, napi); 1266 int handled = 0; 1267 1268 /* Ack */ 1269 bgmac_write(bgmac, BGMAC_INT_STATUS, ~0); 1270 1271 bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]); 1272 handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight); 1273 1274 /* Poll again if more events arrived in the meantime */ 1275 if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX)) 1276 return weight; 1277 1278 if (handled < weight) { 1279 napi_complete(napi); 1280 bgmac_chip_intrs_on(bgmac); 1281 } 1282 1283 return handled; 1284 } 1285 1286 /************************************************** 1287 * net_device_ops 1288 **************************************************/ 1289 1290 static int bgmac_open(struct net_device *net_dev) 1291 { 1292 struct bgmac *bgmac = netdev_priv(net_dev); 1293 int err = 0; 1294 1295 bgmac_chip_reset(bgmac); 1296 1297 err = bgmac_dma_init(bgmac); 1298 if (err) 1299 return err; 1300 1301 /* Specs say about reclaiming rings here, but we do that in DMA init */ 1302 bgmac_chip_init(bgmac); 1303 1304 err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED, 1305 KBUILD_MODNAME, net_dev); 1306 if (err < 0) { 1307 bgmac_err(bgmac, "IRQ request error: %d!\n", err); 1308 bgmac_dma_cleanup(bgmac); 1309 return err; 1310 } 1311 napi_enable(&bgmac->napi); 1312 1313 phy_start(bgmac->phy_dev); 1314 1315 netif_carrier_on(net_dev); 1316 return 0; 1317 } 1318 1319 static int bgmac_stop(struct net_device *net_dev) 1320 { 1321 struct bgmac *bgmac = netdev_priv(net_dev); 1322 1323 netif_carrier_off(net_dev); 1324 1325 phy_stop(bgmac->phy_dev); 1326 1327 napi_disable(&bgmac->napi); 1328 bgmac_chip_intrs_off(bgmac); 1329 free_irq(bgmac->core->irq, net_dev); 1330 1331 bgmac_chip_reset(bgmac); 1332 bgmac_dma_cleanup(bgmac); 1333 1334 return 0; 1335 } 1336 1337 static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb, 1338 struct net_device *net_dev) 1339 { 1340 struct bgmac *bgmac = netdev_priv(net_dev); 1341 struct bgmac_dma_ring *ring; 1342 1343 /* No QOS support yet */ 1344 ring = &bgmac->tx_ring[0]; 1345 return bgmac_dma_tx_add(bgmac, ring, skb); 1346 } 1347 1348 static int bgmac_set_mac_address(struct net_device *net_dev, void *addr) 1349 { 1350 struct bgmac *bgmac = netdev_priv(net_dev); 1351 int ret; 1352 1353 ret = eth_prepare_mac_addr_change(net_dev, addr); 1354 if (ret < 0) 1355 return ret; 1356 bgmac_write_mac_address(bgmac, (u8 *)addr); 1357 eth_commit_mac_addr_change(net_dev, addr); 1358 return 0; 1359 } 1360 1361 static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 1362 { 1363 struct bgmac *bgmac = netdev_priv(net_dev); 1364 1365 if (!netif_running(net_dev)) 1366 return -EINVAL; 1367 1368 return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd); 1369 } 1370 1371 static const struct net_device_ops bgmac_netdev_ops = { 1372 .ndo_open = bgmac_open, 1373 .ndo_stop = bgmac_stop, 1374 .ndo_start_xmit = bgmac_start_xmit, 1375 .ndo_set_rx_mode = bgmac_set_rx_mode, 1376 .ndo_set_mac_address = bgmac_set_mac_address, 1377 .ndo_validate_addr = eth_validate_addr, 1378 .ndo_do_ioctl = bgmac_ioctl, 1379 }; 1380 1381 /************************************************** 1382 * ethtool_ops 1383 **************************************************/ 1384 1385 static int bgmac_get_settings(struct net_device *net_dev, 1386 struct ethtool_cmd *cmd) 1387 { 1388 struct bgmac *bgmac = netdev_priv(net_dev); 1389 1390 return phy_ethtool_gset(bgmac->phy_dev, cmd); 1391 } 1392 1393 static int bgmac_set_settings(struct net_device *net_dev, 1394 struct ethtool_cmd *cmd) 1395 { 1396 struct bgmac *bgmac = netdev_priv(net_dev); 1397 1398 return phy_ethtool_sset(bgmac->phy_dev, cmd); 1399 } 1400 1401 static void bgmac_get_drvinfo(struct net_device *net_dev, 1402 struct ethtool_drvinfo *info) 1403 { 1404 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 1405 strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info)); 1406 } 1407 1408 static const struct ethtool_ops bgmac_ethtool_ops = { 1409 .get_settings = bgmac_get_settings, 1410 .set_settings = bgmac_set_settings, 1411 .get_drvinfo = bgmac_get_drvinfo, 1412 }; 1413 1414 /************************************************** 1415 * MII 1416 **************************************************/ 1417 1418 static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum) 1419 { 1420 return bgmac_phy_read(bus->priv, mii_id, regnum); 1421 } 1422 1423 static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum, 1424 u16 value) 1425 { 1426 return bgmac_phy_write(bus->priv, mii_id, regnum, value); 1427 } 1428 1429 static void bgmac_adjust_link(struct net_device *net_dev) 1430 { 1431 struct bgmac *bgmac = netdev_priv(net_dev); 1432 struct phy_device *phy_dev = bgmac->phy_dev; 1433 bool update = false; 1434 1435 if (phy_dev->link) { 1436 if (phy_dev->speed != bgmac->mac_speed) { 1437 bgmac->mac_speed = phy_dev->speed; 1438 update = true; 1439 } 1440 1441 if (phy_dev->duplex != bgmac->mac_duplex) { 1442 bgmac->mac_duplex = phy_dev->duplex; 1443 update = true; 1444 } 1445 } 1446 1447 if (update) { 1448 bgmac_mac_speed(bgmac); 1449 phy_print_status(phy_dev); 1450 } 1451 } 1452 1453 static int bgmac_fixed_phy_register(struct bgmac *bgmac) 1454 { 1455 struct fixed_phy_status fphy_status = { 1456 .link = 1, 1457 .speed = SPEED_1000, 1458 .duplex = DUPLEX_FULL, 1459 }; 1460 struct phy_device *phy_dev; 1461 int err; 1462 1463 phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL); 1464 if (!phy_dev || IS_ERR(phy_dev)) { 1465 bgmac_err(bgmac, "Failed to register fixed PHY device\n"); 1466 return -ENODEV; 1467 } 1468 1469 err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link, 1470 PHY_INTERFACE_MODE_MII); 1471 if (err) { 1472 bgmac_err(bgmac, "Connecting PHY failed\n"); 1473 return err; 1474 } 1475 1476 bgmac->phy_dev = phy_dev; 1477 1478 return err; 1479 } 1480 1481 static int bgmac_mii_register(struct bgmac *bgmac) 1482 { 1483 struct mii_bus *mii_bus; 1484 struct phy_device *phy_dev; 1485 char bus_id[MII_BUS_ID_SIZE + 3]; 1486 int err = 0; 1487 1488 if (bgmac_is_bcm4707_family(bgmac)) 1489 return bgmac_fixed_phy_register(bgmac); 1490 1491 mii_bus = mdiobus_alloc(); 1492 if (!mii_bus) 1493 return -ENOMEM; 1494 1495 mii_bus->name = "bgmac mii bus"; 1496 sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num, 1497 bgmac->core->core_unit); 1498 mii_bus->priv = bgmac; 1499 mii_bus->read = bgmac_mii_read; 1500 mii_bus->write = bgmac_mii_write; 1501 mii_bus->parent = &bgmac->core->dev; 1502 mii_bus->phy_mask = ~(1 << bgmac->phyaddr); 1503 1504 err = mdiobus_register(mii_bus); 1505 if (err) { 1506 bgmac_err(bgmac, "Registration of mii bus failed\n"); 1507 goto err_free_bus; 1508 } 1509 1510 bgmac->mii_bus = mii_bus; 1511 1512 /* Connect to the PHY */ 1513 snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id, 1514 bgmac->phyaddr); 1515 phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link, 1516 PHY_INTERFACE_MODE_MII); 1517 if (IS_ERR(phy_dev)) { 1518 bgmac_err(bgmac, "PHY connection failed\n"); 1519 err = PTR_ERR(phy_dev); 1520 goto err_unregister_bus; 1521 } 1522 bgmac->phy_dev = phy_dev; 1523 1524 return err; 1525 1526 err_unregister_bus: 1527 mdiobus_unregister(mii_bus); 1528 err_free_bus: 1529 mdiobus_free(mii_bus); 1530 return err; 1531 } 1532 1533 static void bgmac_mii_unregister(struct bgmac *bgmac) 1534 { 1535 struct mii_bus *mii_bus = bgmac->mii_bus; 1536 1537 mdiobus_unregister(mii_bus); 1538 mdiobus_free(mii_bus); 1539 } 1540 1541 /************************************************** 1542 * BCMA bus ops 1543 **************************************************/ 1544 1545 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ 1546 static int bgmac_probe(struct bcma_device *core) 1547 { 1548 struct net_device *net_dev; 1549 struct bgmac *bgmac; 1550 struct ssb_sprom *sprom = &core->bus->sprom; 1551 u8 *mac; 1552 int err; 1553 1554 switch (core->core_unit) { 1555 case 0: 1556 mac = sprom->et0mac; 1557 break; 1558 case 1: 1559 mac = sprom->et1mac; 1560 break; 1561 case 2: 1562 mac = sprom->et2mac; 1563 break; 1564 default: 1565 pr_err("Unsupported core_unit %d\n", core->core_unit); 1566 return -ENOTSUPP; 1567 } 1568 1569 if (!is_valid_ether_addr(mac)) { 1570 dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac); 1571 eth_random_addr(mac); 1572 dev_warn(&core->dev, "Using random MAC: %pM\n", mac); 1573 } 1574 1575 /* This (reset &) enable is not preset in specs or reference driver but 1576 * Broadcom does it in arch PCI code when enabling fake PCI device. 1577 */ 1578 bcma_core_enable(core, 0); 1579 1580 /* Allocation and references */ 1581 net_dev = alloc_etherdev(sizeof(*bgmac)); 1582 if (!net_dev) 1583 return -ENOMEM; 1584 net_dev->netdev_ops = &bgmac_netdev_ops; 1585 net_dev->irq = core->irq; 1586 net_dev->ethtool_ops = &bgmac_ethtool_ops; 1587 bgmac = netdev_priv(net_dev); 1588 bgmac->net_dev = net_dev; 1589 bgmac->core = core; 1590 bcma_set_drvdata(core, bgmac); 1591 1592 /* Defaults */ 1593 memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN); 1594 1595 /* On BCM4706 we need common core to access PHY */ 1596 if (core->id.id == BCMA_CORE_4706_MAC_GBIT && 1597 !core->bus->drv_gmac_cmn.core) { 1598 bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n"); 1599 err = -ENODEV; 1600 goto err_netdev_free; 1601 } 1602 bgmac->cmn = core->bus->drv_gmac_cmn.core; 1603 1604 switch (core->core_unit) { 1605 case 0: 1606 bgmac->phyaddr = sprom->et0phyaddr; 1607 break; 1608 case 1: 1609 bgmac->phyaddr = sprom->et1phyaddr; 1610 break; 1611 case 2: 1612 bgmac->phyaddr = sprom->et2phyaddr; 1613 break; 1614 } 1615 bgmac->phyaddr &= BGMAC_PHY_MASK; 1616 if (bgmac->phyaddr == BGMAC_PHY_MASK) { 1617 bgmac_err(bgmac, "No PHY found\n"); 1618 err = -ENODEV; 1619 goto err_netdev_free; 1620 } 1621 bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr, 1622 bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : ""); 1623 1624 if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) { 1625 bgmac_err(bgmac, "PCI setup not implemented\n"); 1626 err = -ENOTSUPP; 1627 goto err_netdev_free; 1628 } 1629 1630 bgmac_chip_reset(bgmac); 1631 1632 /* For Northstar, we have to take all GMAC core out of reset */ 1633 if (bgmac_is_bcm4707_family(bgmac)) { 1634 struct bcma_device *ns_core; 1635 int ns_gmac; 1636 1637 /* Northstar has 4 GMAC cores */ 1638 for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) { 1639 /* As Northstar requirement, we have to reset all GMACs 1640 * before accessing one. bgmac_chip_reset() call 1641 * bcma_core_enable() for this core. Then the other 1642 * three GMACs didn't reset. We do it here. 1643 */ 1644 ns_core = bcma_find_core_unit(core->bus, 1645 BCMA_CORE_MAC_GBIT, 1646 ns_gmac); 1647 if (ns_core && !bcma_core_is_enabled(ns_core)) 1648 bcma_core_enable(ns_core, 0); 1649 } 1650 } 1651 1652 err = bgmac_dma_alloc(bgmac); 1653 if (err) { 1654 bgmac_err(bgmac, "Unable to alloc memory for DMA\n"); 1655 goto err_netdev_free; 1656 } 1657 1658 bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK; 1659 if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0) 1660 bgmac->int_mask &= ~BGMAC_IS_TX_MASK; 1661 1662 /* TODO: reset the external phy. Specs are needed */ 1663 bgmac_phy_reset(bgmac); 1664 1665 bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo & 1666 BGMAC_BFL_ENETROBO); 1667 if (bgmac->has_robosw) 1668 bgmac_warn(bgmac, "Support for Roboswitch not implemented\n"); 1669 1670 if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) 1671 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); 1672 1673 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); 1674 1675 err = bgmac_mii_register(bgmac); 1676 if (err) { 1677 bgmac_err(bgmac, "Cannot register MDIO\n"); 1678 goto err_dma_free; 1679 } 1680 1681 net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1682 net_dev->hw_features = net_dev->features; 1683 net_dev->vlan_features = net_dev->features; 1684 1685 err = register_netdev(bgmac->net_dev); 1686 if (err) { 1687 bgmac_err(bgmac, "Cannot register net device\n"); 1688 goto err_mii_unregister; 1689 } 1690 1691 netif_carrier_off(net_dev); 1692 1693 return 0; 1694 1695 err_mii_unregister: 1696 bgmac_mii_unregister(bgmac); 1697 err_dma_free: 1698 bgmac_dma_free(bgmac); 1699 1700 err_netdev_free: 1701 bcma_set_drvdata(core, NULL); 1702 free_netdev(net_dev); 1703 1704 return err; 1705 } 1706 1707 static void bgmac_remove(struct bcma_device *core) 1708 { 1709 struct bgmac *bgmac = bcma_get_drvdata(core); 1710 1711 unregister_netdev(bgmac->net_dev); 1712 bgmac_mii_unregister(bgmac); 1713 netif_napi_del(&bgmac->napi); 1714 bgmac_dma_free(bgmac); 1715 bcma_set_drvdata(core, NULL); 1716 free_netdev(bgmac->net_dev); 1717 } 1718 1719 static struct bcma_driver bgmac_bcma_driver = { 1720 .name = KBUILD_MODNAME, 1721 .id_table = bgmac_bcma_tbl, 1722 .probe = bgmac_probe, 1723 .remove = bgmac_remove, 1724 }; 1725 1726 static int __init bgmac_init(void) 1727 { 1728 int err; 1729 1730 err = bcma_driver_register(&bgmac_bcma_driver); 1731 if (err) 1732 return err; 1733 pr_info("Broadcom 47xx GBit MAC driver loaded\n"); 1734 1735 return 0; 1736 } 1737 1738 static void __exit bgmac_exit(void) 1739 { 1740 bcma_driver_unregister(&bgmac_bcma_driver); 1741 } 1742 1743 module_init(bgmac_init) 1744 module_exit(bgmac_exit) 1745 1746 MODULE_AUTHOR("Rafał Miłecki"); 1747 MODULE_LICENSE("GPL"); 1748