1 /* Applied Micro X-Gene SoC Ethernet Driver 2 * 3 * Copyright (c) 2014, Applied Micro Circuits Corporation 4 * Authors: Iyappan Subramanian <isubramanian@apm.com> 5 * Ravi Patel <rapatel@apm.com> 6 * Keyur Chudgar <kchudgar@apm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/gpio.h> 23 #include "xgene_enet_main.h" 24 #include "xgene_enet_hw.h" 25 #include "xgene_enet_sgmac.h" 26 #include "xgene_enet_xgmac.h" 27 28 #define RES_ENET_CSR 0 29 #define RES_RING_CSR 1 30 #define RES_RING_CMD 2 31 32 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) 33 { 34 struct xgene_enet_raw_desc16 *raw_desc; 35 int i; 36 37 if (!buf_pool) 38 return; 39 40 for (i = 0; i < buf_pool->slots; i++) { 41 raw_desc = &buf_pool->raw_desc16[i]; 42 43 /* Hardware expects descriptor in little endian format */ 44 raw_desc->m0 = cpu_to_le64(i | 45 SET_VAL(FPQNUM, buf_pool->dst_ring_num) | 46 SET_VAL(STASH, 3)); 47 } 48 } 49 50 static u16 xgene_enet_get_data_len(u64 bufdatalen) 51 { 52 u16 hw_len, mask; 53 54 hw_len = GET_VAL(BUFDATALEN, bufdatalen); 55 56 if (unlikely(hw_len == 0x7800)) { 57 return 0; 58 } else if (!(hw_len & BIT(14))) { 59 mask = GENMASK(13, 0); 60 return (hw_len & mask) ? (hw_len & mask) : SIZE_16K; 61 } else if (!(hw_len & GENMASK(13, 12))) { 62 mask = GENMASK(11, 0); 63 return (hw_len & mask) ? (hw_len & mask) : SIZE_4K; 64 } else { 65 mask = GENMASK(11, 0); 66 return (hw_len & mask) ? (hw_len & mask) : SIZE_2K; 67 } 68 } 69 70 static u16 xgene_enet_set_data_len(u32 size) 71 { 72 u16 hw_len; 73 74 hw_len = (size == SIZE_4K) ? BIT(14) : 0; 75 76 return hw_len; 77 } 78 79 static int xgene_enet_refill_pagepool(struct xgene_enet_desc_ring *buf_pool, 80 u32 nbuf) 81 { 82 struct xgene_enet_raw_desc16 *raw_desc; 83 struct xgene_enet_pdata *pdata; 84 struct net_device *ndev; 85 dma_addr_t dma_addr; 86 struct device *dev; 87 struct page *page; 88 u32 slots, tail; 89 u16 hw_len; 90 int i; 91 92 if (unlikely(!buf_pool)) 93 return 0; 94 95 ndev = buf_pool->ndev; 96 pdata = netdev_priv(ndev); 97 dev = ndev_to_dev(ndev); 98 slots = buf_pool->slots - 1; 99 tail = buf_pool->tail; 100 101 for (i = 0; i < nbuf; i++) { 102 raw_desc = &buf_pool->raw_desc16[tail]; 103 104 page = dev_alloc_page(); 105 if (unlikely(!page)) 106 return -ENOMEM; 107 108 dma_addr = dma_map_page(dev, page, 0, 109 PAGE_SIZE, DMA_FROM_DEVICE); 110 if (unlikely(dma_mapping_error(dev, dma_addr))) { 111 put_page(page); 112 return -ENOMEM; 113 } 114 115 hw_len = xgene_enet_set_data_len(PAGE_SIZE); 116 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | 117 SET_VAL(BUFDATALEN, hw_len) | 118 SET_BIT(COHERENT)); 119 120 buf_pool->frag_page[tail] = page; 121 tail = (tail + 1) & slots; 122 } 123 124 pdata->ring_ops->wr_cmd(buf_pool, nbuf); 125 buf_pool->tail = tail; 126 127 return 0; 128 } 129 130 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, 131 u32 nbuf) 132 { 133 struct sk_buff *skb; 134 struct xgene_enet_raw_desc16 *raw_desc; 135 struct xgene_enet_pdata *pdata; 136 struct net_device *ndev; 137 struct device *dev; 138 dma_addr_t dma_addr; 139 u32 tail = buf_pool->tail; 140 u32 slots = buf_pool->slots - 1; 141 u16 bufdatalen, len; 142 int i; 143 144 ndev = buf_pool->ndev; 145 dev = ndev_to_dev(buf_pool->ndev); 146 pdata = netdev_priv(ndev); 147 148 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0)); 149 len = XGENE_ENET_STD_MTU; 150 151 for (i = 0; i < nbuf; i++) { 152 raw_desc = &buf_pool->raw_desc16[tail]; 153 154 skb = netdev_alloc_skb_ip_align(ndev, len); 155 if (unlikely(!skb)) 156 return -ENOMEM; 157 158 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); 159 if (dma_mapping_error(dev, dma_addr)) { 160 netdev_err(ndev, "DMA mapping error\n"); 161 dev_kfree_skb_any(skb); 162 return -EINVAL; 163 } 164 165 buf_pool->rx_skb[tail] = skb; 166 167 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | 168 SET_VAL(BUFDATALEN, bufdatalen) | 169 SET_BIT(COHERENT)); 170 tail = (tail + 1) & slots; 171 } 172 173 pdata->ring_ops->wr_cmd(buf_pool, nbuf); 174 buf_pool->tail = tail; 175 176 return 0; 177 } 178 179 static u8 xgene_enet_hdr_len(const void *data) 180 { 181 const struct ethhdr *eth = data; 182 183 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN; 184 } 185 186 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) 187 { 188 struct device *dev = ndev_to_dev(buf_pool->ndev); 189 struct xgene_enet_raw_desc16 *raw_desc; 190 dma_addr_t dma_addr; 191 int i; 192 193 /* Free up the buffers held by hardware */ 194 for (i = 0; i < buf_pool->slots; i++) { 195 if (buf_pool->rx_skb[i]) { 196 dev_kfree_skb_any(buf_pool->rx_skb[i]); 197 198 raw_desc = &buf_pool->raw_desc16[i]; 199 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)); 200 dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU, 201 DMA_FROM_DEVICE); 202 } 203 } 204 } 205 206 static void xgene_enet_delete_pagepool(struct xgene_enet_desc_ring *buf_pool) 207 { 208 struct device *dev = ndev_to_dev(buf_pool->ndev); 209 dma_addr_t dma_addr; 210 struct page *page; 211 int i; 212 213 /* Free up the buffers held by hardware */ 214 for (i = 0; i < buf_pool->slots; i++) { 215 page = buf_pool->frag_page[i]; 216 if (page) { 217 dma_addr = buf_pool->frag_dma_addr[i]; 218 dma_unmap_page(dev, dma_addr, PAGE_SIZE, 219 DMA_FROM_DEVICE); 220 put_page(page); 221 } 222 } 223 } 224 225 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data) 226 { 227 struct xgene_enet_desc_ring *rx_ring = data; 228 229 if (napi_schedule_prep(&rx_ring->napi)) { 230 disable_irq_nosync(irq); 231 __napi_schedule(&rx_ring->napi); 232 } 233 234 return IRQ_HANDLED; 235 } 236 237 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, 238 struct xgene_enet_raw_desc *raw_desc) 239 { 240 struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev); 241 struct sk_buff *skb; 242 struct device *dev; 243 skb_frag_t *frag; 244 dma_addr_t *frag_dma_addr; 245 u16 skb_index; 246 u8 mss_index; 247 u8 status; 248 int i; 249 250 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); 251 skb = cp_ring->cp_skb[skb_index]; 252 frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS]; 253 254 dev = ndev_to_dev(cp_ring->ndev); 255 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), 256 skb_headlen(skb), 257 DMA_TO_DEVICE); 258 259 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 260 frag = &skb_shinfo(skb)->frags[i]; 261 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag), 262 DMA_TO_DEVICE); 263 } 264 265 if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) { 266 mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3)); 267 spin_lock(&pdata->mss_lock); 268 pdata->mss_refcnt[mss_index]--; 269 spin_unlock(&pdata->mss_lock); 270 } 271 272 /* Checking for error */ 273 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); 274 if (unlikely(status > 2)) { 275 cp_ring->tx_dropped++; 276 cp_ring->tx_errors++; 277 } 278 279 if (likely(skb)) { 280 dev_kfree_skb_any(skb); 281 } else { 282 netdev_err(cp_ring->ndev, "completion skb is NULL\n"); 283 } 284 285 return 0; 286 } 287 288 static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss) 289 { 290 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 291 int mss_index = -EBUSY; 292 int i; 293 294 spin_lock(&pdata->mss_lock); 295 296 /* Reuse the slot if MSS matches */ 297 for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) { 298 if (pdata->mss[i] == mss) { 299 pdata->mss_refcnt[i]++; 300 mss_index = i; 301 } 302 } 303 304 /* Overwrite the slot with ref_count = 0 */ 305 for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) { 306 if (!pdata->mss_refcnt[i]) { 307 pdata->mss_refcnt[i]++; 308 pdata->mac_ops->set_mss(pdata, mss, i); 309 pdata->mss[i] = mss; 310 mss_index = i; 311 } 312 } 313 314 spin_unlock(&pdata->mss_lock); 315 316 return mss_index; 317 } 318 319 static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo) 320 { 321 struct net_device *ndev = skb->dev; 322 struct iphdr *iph; 323 u8 l3hlen = 0, l4hlen = 0; 324 u8 ethhdr, proto = 0, csum_enable = 0; 325 u32 hdr_len, mss = 0; 326 u32 i, len, nr_frags; 327 int mss_index; 328 329 ethhdr = xgene_enet_hdr_len(skb->data); 330 331 if (unlikely(skb->protocol != htons(ETH_P_IP)) && 332 unlikely(skb->protocol != htons(ETH_P_8021Q))) 333 goto out; 334 335 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM))) 336 goto out; 337 338 iph = ip_hdr(skb); 339 if (unlikely(ip_is_fragment(iph))) 340 goto out; 341 342 if (likely(iph->protocol == IPPROTO_TCP)) { 343 l4hlen = tcp_hdrlen(skb) >> 2; 344 csum_enable = 1; 345 proto = TSO_IPPROTO_TCP; 346 if (ndev->features & NETIF_F_TSO) { 347 hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb); 348 mss = skb_shinfo(skb)->gso_size; 349 350 if (skb_is_nonlinear(skb)) { 351 len = skb_headlen(skb); 352 nr_frags = skb_shinfo(skb)->nr_frags; 353 354 for (i = 0; i < 2 && i < nr_frags; i++) 355 len += skb_shinfo(skb)->frags[i].size; 356 357 /* HW requires header must reside in 3 buffer */ 358 if (unlikely(hdr_len > len)) { 359 if (skb_linearize(skb)) 360 return 0; 361 } 362 } 363 364 if (!mss || ((skb->len - hdr_len) <= mss)) 365 goto out; 366 367 mss_index = xgene_enet_setup_mss(ndev, mss); 368 if (unlikely(mss_index < 0)) 369 return -EBUSY; 370 371 *hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index); 372 } 373 } else if (iph->protocol == IPPROTO_UDP) { 374 l4hlen = UDP_HDR_SIZE; 375 csum_enable = 1; 376 } 377 out: 378 l3hlen = ip_hdrlen(skb) >> 2; 379 *hopinfo |= SET_VAL(TCPHDR, l4hlen) | 380 SET_VAL(IPHDR, l3hlen) | 381 SET_VAL(ETHHDR, ethhdr) | 382 SET_VAL(EC, csum_enable) | 383 SET_VAL(IS, proto) | 384 SET_BIT(IC) | 385 SET_BIT(TYPE_ETH_WORK_MESSAGE); 386 387 return 0; 388 } 389 390 static u16 xgene_enet_encode_len(u16 len) 391 { 392 return (len == BUFLEN_16K) ? 0 : len; 393 } 394 395 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len) 396 { 397 desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) | 398 SET_VAL(BUFDATALEN, len)); 399 } 400 401 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring) 402 { 403 __le64 *exp_bufs; 404 405 exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS]; 406 memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS); 407 ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1); 408 409 return exp_bufs; 410 } 411 412 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring) 413 { 414 return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS]; 415 } 416 417 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, 418 struct sk_buff *skb) 419 { 420 struct device *dev = ndev_to_dev(tx_ring->ndev); 421 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev); 422 struct xgene_enet_raw_desc *raw_desc; 423 __le64 *exp_desc = NULL, *exp_bufs = NULL; 424 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; 425 skb_frag_t *frag; 426 u16 tail = tx_ring->tail; 427 u64 hopinfo = 0; 428 u32 len, hw_len; 429 u8 ll = 0, nv = 0, idx = 0; 430 bool split = false; 431 u32 size, offset, ell_bytes = 0; 432 u32 i, fidx, nr_frags, count = 1; 433 int ret; 434 435 raw_desc = &tx_ring->raw_desc[tail]; 436 tail = (tail + 1) & (tx_ring->slots - 1); 437 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc)); 438 439 ret = xgene_enet_work_msg(skb, &hopinfo); 440 if (ret) 441 return ret; 442 443 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) | 444 hopinfo); 445 446 len = skb_headlen(skb); 447 hw_len = xgene_enet_encode_len(len); 448 449 dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); 450 if (dma_mapping_error(dev, dma_addr)) { 451 netdev_err(tx_ring->ndev, "DMA mapping error\n"); 452 return -EINVAL; 453 } 454 455 /* Hardware expects descriptor in little endian format */ 456 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | 457 SET_VAL(BUFDATALEN, hw_len) | 458 SET_BIT(COHERENT)); 459 460 if (!skb_is_nonlinear(skb)) 461 goto out; 462 463 /* scatter gather */ 464 nv = 1; 465 exp_desc = (void *)&tx_ring->raw_desc[tail]; 466 tail = (tail + 1) & (tx_ring->slots - 1); 467 memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc)); 468 469 nr_frags = skb_shinfo(skb)->nr_frags; 470 for (i = nr_frags; i < 4 ; i++) 471 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER); 472 473 frag_dma_addr = xgene_get_frag_dma_array(tx_ring); 474 475 for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) { 476 if (!split) { 477 frag = &skb_shinfo(skb)->frags[fidx]; 478 size = skb_frag_size(frag); 479 offset = 0; 480 481 pbuf_addr = skb_frag_dma_map(dev, frag, 0, size, 482 DMA_TO_DEVICE); 483 if (dma_mapping_error(dev, pbuf_addr)) 484 return -EINVAL; 485 486 frag_dma_addr[fidx] = pbuf_addr; 487 fidx++; 488 489 if (size > BUFLEN_16K) 490 split = true; 491 } 492 493 if (size > BUFLEN_16K) { 494 len = BUFLEN_16K; 495 size -= BUFLEN_16K; 496 } else { 497 len = size; 498 split = false; 499 } 500 501 dma_addr = pbuf_addr + offset; 502 hw_len = xgene_enet_encode_len(len); 503 504 switch (i) { 505 case 0: 506 case 1: 507 case 2: 508 xgene_set_addr_len(exp_desc, i, dma_addr, hw_len); 509 break; 510 case 3: 511 if (split || (fidx != nr_frags)) { 512 exp_bufs = xgene_enet_get_exp_bufs(tx_ring); 513 xgene_set_addr_len(exp_bufs, idx, dma_addr, 514 hw_len); 515 idx++; 516 ell_bytes += len; 517 } else { 518 xgene_set_addr_len(exp_desc, i, dma_addr, 519 hw_len); 520 } 521 break; 522 default: 523 xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len); 524 idx++; 525 ell_bytes += len; 526 break; 527 } 528 529 if (split) 530 offset += BUFLEN_16K; 531 } 532 count++; 533 534 if (idx) { 535 ll = 1; 536 dma_addr = dma_map_single(dev, exp_bufs, 537 sizeof(u64) * MAX_EXP_BUFFS, 538 DMA_TO_DEVICE); 539 if (dma_mapping_error(dev, dma_addr)) { 540 dev_kfree_skb_any(skb); 541 return -EINVAL; 542 } 543 i = ell_bytes >> LL_BYTES_LSB_LEN; 544 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | 545 SET_VAL(LL_BYTES_MSB, i) | 546 SET_VAL(LL_LEN, idx)); 547 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes)); 548 } 549 550 out: 551 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | 552 SET_VAL(USERINFO, tx_ring->tail)); 553 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; 554 pdata->tx_level[tx_ring->cp_ring->index] += count; 555 tx_ring->tail = tail; 556 557 return count; 558 } 559 560 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, 561 struct net_device *ndev) 562 { 563 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 564 struct xgene_enet_desc_ring *tx_ring; 565 int index = skb->queue_mapping; 566 u32 tx_level = pdata->tx_level[index]; 567 int count; 568 569 tx_ring = pdata->tx_ring[index]; 570 if (tx_level < pdata->txc_level[index]) 571 tx_level += ((typeof(pdata->tx_level[index]))~0U); 572 573 if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) { 574 netif_stop_subqueue(ndev, index); 575 return NETDEV_TX_BUSY; 576 } 577 578 if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE)) 579 return NETDEV_TX_OK; 580 581 count = xgene_enet_setup_tx_desc(tx_ring, skb); 582 if (count == -EBUSY) 583 return NETDEV_TX_BUSY; 584 585 if (count <= 0) { 586 dev_kfree_skb_any(skb); 587 return NETDEV_TX_OK; 588 } 589 590 skb_tx_timestamp(skb); 591 592 tx_ring->tx_packets++; 593 tx_ring->tx_bytes += skb->len; 594 595 pdata->ring_ops->wr_cmd(tx_ring, count); 596 return NETDEV_TX_OK; 597 } 598 599 static void xgene_enet_rx_csum(struct sk_buff *skb) 600 { 601 struct net_device *ndev = skb->dev; 602 struct iphdr *iph = ip_hdr(skb); 603 604 if (!(ndev->features & NETIF_F_RXCSUM)) 605 return; 606 607 if (skb->protocol != htons(ETH_P_IP)) 608 return; 609 610 if (ip_is_fragment(iph)) 611 return; 612 613 if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP) 614 return; 615 616 skb->ip_summed = CHECKSUM_UNNECESSARY; 617 } 618 619 static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool, 620 struct xgene_enet_raw_desc *raw_desc, 621 struct xgene_enet_raw_desc *exp_desc) 622 { 623 __le64 *desc = (void *)exp_desc; 624 dma_addr_t dma_addr; 625 struct device *dev; 626 struct page *page; 627 u16 slots, head; 628 u32 frag_size; 629 int i; 630 631 if (!buf_pool || !raw_desc || !exp_desc || 632 (!GET_VAL(NV, le64_to_cpu(raw_desc->m0)))) 633 return; 634 635 dev = ndev_to_dev(buf_pool->ndev); 636 slots = buf_pool->slots - 1; 637 head = buf_pool->head; 638 639 for (i = 0; i < 4; i++) { 640 frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1])); 641 if (!frag_size) 642 break; 643 644 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1])); 645 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); 646 647 page = buf_pool->frag_page[head]; 648 put_page(page); 649 650 buf_pool->frag_page[head] = NULL; 651 head = (head + 1) & slots; 652 } 653 buf_pool->head = head; 654 } 655 656 /* Errata 10GE_10 and ENET_15 - Fix duplicated HW statistic counters */ 657 static bool xgene_enet_errata_10GE_10(struct sk_buff *skb, u32 len, u8 status) 658 { 659 if (status == INGRESS_CRC && 660 len >= (ETHER_STD_PACKET + 1) && 661 len <= (ETHER_STD_PACKET + 4) && 662 skb->protocol == htons(ETH_P_8021Q)) 663 return true; 664 665 return false; 666 } 667 668 /* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */ 669 static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status) 670 { 671 if (status == INGRESS_PKT_LEN && len == ETHER_MIN_PACKET) { 672 if (ntohs(eth_hdr(skb)->h_proto) < 46) 673 return true; 674 } 675 676 return false; 677 } 678 679 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, 680 struct xgene_enet_raw_desc *raw_desc, 681 struct xgene_enet_raw_desc *exp_desc) 682 { 683 struct xgene_enet_desc_ring *buf_pool, *page_pool; 684 u32 datalen, frag_size, skb_index; 685 struct xgene_enet_pdata *pdata; 686 struct net_device *ndev; 687 dma_addr_t dma_addr; 688 struct sk_buff *skb; 689 struct device *dev; 690 struct page *page; 691 u16 slots, head; 692 int i, ret = 0; 693 __le64 *desc; 694 u8 status; 695 bool nv; 696 697 ndev = rx_ring->ndev; 698 pdata = netdev_priv(ndev); 699 dev = ndev_to_dev(rx_ring->ndev); 700 buf_pool = rx_ring->buf_pool; 701 page_pool = rx_ring->page_pool; 702 703 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), 704 XGENE_ENET_STD_MTU, DMA_FROM_DEVICE); 705 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); 706 skb = buf_pool->rx_skb[skb_index]; 707 buf_pool->rx_skb[skb_index] = NULL; 708 709 datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1)); 710 skb_put(skb, datalen); 711 prefetch(skb->data - NET_IP_ALIGN); 712 skb->protocol = eth_type_trans(skb, ndev); 713 714 /* checking for error */ 715 status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) | 716 GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); 717 if (unlikely(status)) { 718 if (xgene_enet_errata_10GE_8(skb, datalen, status)) { 719 pdata->false_rflr++; 720 } else if (xgene_enet_errata_10GE_10(skb, datalen, status)) { 721 pdata->vlan_rjbr++; 722 } else { 723 dev_kfree_skb_any(skb); 724 xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc); 725 xgene_enet_parse_error(rx_ring, status); 726 rx_ring->rx_dropped++; 727 goto out; 728 } 729 } 730 731 nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0)); 732 if (!nv) { 733 /* strip off CRC as HW isn't doing this */ 734 datalen -= 4; 735 goto skip_jumbo; 736 } 737 738 slots = page_pool->slots - 1; 739 head = page_pool->head; 740 desc = (void *)exp_desc; 741 742 for (i = 0; i < 4; i++) { 743 frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1])); 744 if (!frag_size) 745 break; 746 747 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1])); 748 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); 749 750 page = page_pool->frag_page[head]; 751 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0, 752 frag_size, PAGE_SIZE); 753 754 datalen += frag_size; 755 756 page_pool->frag_page[head] = NULL; 757 head = (head + 1) & slots; 758 } 759 760 page_pool->head = head; 761 rx_ring->npagepool -= skb_shinfo(skb)->nr_frags; 762 763 skip_jumbo: 764 skb_checksum_none_assert(skb); 765 xgene_enet_rx_csum(skb); 766 767 rx_ring->rx_packets++; 768 rx_ring->rx_bytes += datalen; 769 napi_gro_receive(&rx_ring->napi, skb); 770 771 out: 772 if (rx_ring->npagepool <= 0) { 773 ret = xgene_enet_refill_pagepool(page_pool, NUM_NXTBUFPOOL); 774 rx_ring->npagepool = NUM_NXTBUFPOOL; 775 if (ret) 776 return ret; 777 } 778 779 if (--rx_ring->nbufpool == 0) { 780 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL); 781 rx_ring->nbufpool = NUM_BUFPOOL; 782 } 783 784 return ret; 785 } 786 787 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc) 788 { 789 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false; 790 } 791 792 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, 793 int budget) 794 { 795 struct net_device *ndev = ring->ndev; 796 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 797 struct xgene_enet_raw_desc *raw_desc, *exp_desc; 798 u16 head = ring->head; 799 u16 slots = ring->slots - 1; 800 int ret, desc_count, count = 0, processed = 0; 801 bool is_completion; 802 803 do { 804 raw_desc = &ring->raw_desc[head]; 805 desc_count = 0; 806 is_completion = false; 807 exp_desc = NULL; 808 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) 809 break; 810 811 /* read fpqnum field after dataaddr field */ 812 dma_rmb(); 813 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) { 814 head = (head + 1) & slots; 815 exp_desc = &ring->raw_desc[head]; 816 817 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) { 818 head = (head - 1) & slots; 819 break; 820 } 821 dma_rmb(); 822 count++; 823 desc_count++; 824 } 825 if (is_rx_desc(raw_desc)) { 826 ret = xgene_enet_rx_frame(ring, raw_desc, exp_desc); 827 } else { 828 ret = xgene_enet_tx_completion(ring, raw_desc); 829 is_completion = true; 830 } 831 xgene_enet_mark_desc_slot_empty(raw_desc); 832 if (exp_desc) 833 xgene_enet_mark_desc_slot_empty(exp_desc); 834 835 head = (head + 1) & slots; 836 count++; 837 desc_count++; 838 processed++; 839 if (is_completion) 840 pdata->txc_level[ring->index] += desc_count; 841 842 if (ret) 843 break; 844 } while (--budget); 845 846 if (likely(count)) { 847 pdata->ring_ops->wr_cmd(ring, -count); 848 ring->head = head; 849 850 if (__netif_subqueue_stopped(ndev, ring->index)) 851 netif_start_subqueue(ndev, ring->index); 852 } 853 854 return processed; 855 } 856 857 static int xgene_enet_napi(struct napi_struct *napi, const int budget) 858 { 859 struct xgene_enet_desc_ring *ring; 860 int processed; 861 862 ring = container_of(napi, struct xgene_enet_desc_ring, napi); 863 processed = xgene_enet_process_ring(ring, budget); 864 865 if (processed != budget) { 866 napi_complete_done(napi, processed); 867 enable_irq(ring->irq); 868 } 869 870 return processed; 871 } 872 873 static void xgene_enet_timeout(struct net_device *ndev) 874 { 875 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 876 struct netdev_queue *txq; 877 int i; 878 879 pdata->mac_ops->reset(pdata); 880 881 for (i = 0; i < pdata->txq_cnt; i++) { 882 txq = netdev_get_tx_queue(ndev, i); 883 txq->trans_start = jiffies; 884 netif_tx_start_queue(txq); 885 } 886 } 887 888 static void xgene_enet_set_irq_name(struct net_device *ndev) 889 { 890 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 891 struct xgene_enet_desc_ring *ring; 892 int i; 893 894 for (i = 0; i < pdata->rxq_cnt; i++) { 895 ring = pdata->rx_ring[i]; 896 if (!pdata->cq_cnt) { 897 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", 898 ndev->name); 899 } else { 900 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d", 901 ndev->name, i); 902 } 903 } 904 905 for (i = 0; i < pdata->cq_cnt; i++) { 906 ring = pdata->tx_ring[i]->cp_ring; 907 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d", 908 ndev->name, i); 909 } 910 } 911 912 static int xgene_enet_register_irq(struct net_device *ndev) 913 { 914 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 915 struct device *dev = ndev_to_dev(ndev); 916 struct xgene_enet_desc_ring *ring; 917 int ret = 0, i; 918 919 xgene_enet_set_irq_name(ndev); 920 for (i = 0; i < pdata->rxq_cnt; i++) { 921 ring = pdata->rx_ring[i]; 922 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); 923 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, 924 0, ring->irq_name, ring); 925 if (ret) { 926 netdev_err(ndev, "Failed to request irq %s\n", 927 ring->irq_name); 928 } 929 } 930 931 for (i = 0; i < pdata->cq_cnt; i++) { 932 ring = pdata->tx_ring[i]->cp_ring; 933 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); 934 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, 935 0, ring->irq_name, ring); 936 if (ret) { 937 netdev_err(ndev, "Failed to request irq %s\n", 938 ring->irq_name); 939 } 940 } 941 942 return ret; 943 } 944 945 static void xgene_enet_free_irq(struct net_device *ndev) 946 { 947 struct xgene_enet_pdata *pdata; 948 struct xgene_enet_desc_ring *ring; 949 struct device *dev; 950 int i; 951 952 pdata = netdev_priv(ndev); 953 dev = ndev_to_dev(ndev); 954 955 for (i = 0; i < pdata->rxq_cnt; i++) { 956 ring = pdata->rx_ring[i]; 957 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); 958 devm_free_irq(dev, ring->irq, ring); 959 } 960 961 for (i = 0; i < pdata->cq_cnt; i++) { 962 ring = pdata->tx_ring[i]->cp_ring; 963 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); 964 devm_free_irq(dev, ring->irq, ring); 965 } 966 } 967 968 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) 969 { 970 struct napi_struct *napi; 971 int i; 972 973 for (i = 0; i < pdata->rxq_cnt; i++) { 974 napi = &pdata->rx_ring[i]->napi; 975 napi_enable(napi); 976 } 977 978 for (i = 0; i < pdata->cq_cnt; i++) { 979 napi = &pdata->tx_ring[i]->cp_ring->napi; 980 napi_enable(napi); 981 } 982 } 983 984 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata) 985 { 986 struct napi_struct *napi; 987 int i; 988 989 for (i = 0; i < pdata->rxq_cnt; i++) { 990 napi = &pdata->rx_ring[i]->napi; 991 napi_disable(napi); 992 } 993 994 for (i = 0; i < pdata->cq_cnt; i++) { 995 napi = &pdata->tx_ring[i]->cp_ring->napi; 996 napi_disable(napi); 997 } 998 } 999 1000 static int xgene_enet_open(struct net_device *ndev) 1001 { 1002 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1003 const struct xgene_mac_ops *mac_ops = pdata->mac_ops; 1004 int ret; 1005 1006 ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt); 1007 if (ret) 1008 return ret; 1009 1010 ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt); 1011 if (ret) 1012 return ret; 1013 1014 xgene_enet_napi_enable(pdata); 1015 ret = xgene_enet_register_irq(ndev); 1016 if (ret) 1017 return ret; 1018 1019 if (ndev->phydev) { 1020 phy_start(ndev->phydev); 1021 } else { 1022 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF); 1023 netif_carrier_off(ndev); 1024 } 1025 1026 mac_ops->tx_enable(pdata); 1027 mac_ops->rx_enable(pdata); 1028 netif_tx_start_all_queues(ndev); 1029 1030 return ret; 1031 } 1032 1033 static int xgene_enet_close(struct net_device *ndev) 1034 { 1035 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1036 const struct xgene_mac_ops *mac_ops = pdata->mac_ops; 1037 int i; 1038 1039 netif_tx_stop_all_queues(ndev); 1040 mac_ops->tx_disable(pdata); 1041 mac_ops->rx_disable(pdata); 1042 1043 if (ndev->phydev) 1044 phy_stop(ndev->phydev); 1045 else 1046 cancel_delayed_work_sync(&pdata->link_work); 1047 1048 xgene_enet_free_irq(ndev); 1049 xgene_enet_napi_disable(pdata); 1050 for (i = 0; i < pdata->rxq_cnt; i++) 1051 xgene_enet_process_ring(pdata->rx_ring[i], -1); 1052 1053 return 0; 1054 } 1055 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) 1056 { 1057 struct xgene_enet_pdata *pdata; 1058 struct device *dev; 1059 1060 pdata = netdev_priv(ring->ndev); 1061 dev = ndev_to_dev(ring->ndev); 1062 1063 pdata->ring_ops->clear(ring); 1064 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); 1065 } 1066 1067 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) 1068 { 1069 struct xgene_enet_desc_ring *buf_pool, *page_pool; 1070 struct xgene_enet_desc_ring *ring; 1071 int i; 1072 1073 for (i = 0; i < pdata->txq_cnt; i++) { 1074 ring = pdata->tx_ring[i]; 1075 if (ring) { 1076 xgene_enet_delete_ring(ring); 1077 pdata->port_ops->clear(pdata, ring); 1078 if (pdata->cq_cnt) 1079 xgene_enet_delete_ring(ring->cp_ring); 1080 pdata->tx_ring[i] = NULL; 1081 } 1082 1083 } 1084 1085 for (i = 0; i < pdata->rxq_cnt; i++) { 1086 ring = pdata->rx_ring[i]; 1087 if (ring) { 1088 page_pool = ring->page_pool; 1089 if (page_pool) { 1090 xgene_enet_delete_pagepool(page_pool); 1091 xgene_enet_delete_ring(page_pool); 1092 pdata->port_ops->clear(pdata, page_pool); 1093 } 1094 1095 buf_pool = ring->buf_pool; 1096 xgene_enet_delete_bufpool(buf_pool); 1097 xgene_enet_delete_ring(buf_pool); 1098 pdata->port_ops->clear(pdata, buf_pool); 1099 1100 xgene_enet_delete_ring(ring); 1101 pdata->rx_ring[i] = NULL; 1102 } 1103 1104 } 1105 } 1106 1107 static int xgene_enet_get_ring_size(struct device *dev, 1108 enum xgene_enet_ring_cfgsize cfgsize) 1109 { 1110 int size = -EINVAL; 1111 1112 switch (cfgsize) { 1113 case RING_CFGSIZE_512B: 1114 size = 0x200; 1115 break; 1116 case RING_CFGSIZE_2KB: 1117 size = 0x800; 1118 break; 1119 case RING_CFGSIZE_16KB: 1120 size = 0x4000; 1121 break; 1122 case RING_CFGSIZE_64KB: 1123 size = 0x10000; 1124 break; 1125 case RING_CFGSIZE_512KB: 1126 size = 0x80000; 1127 break; 1128 default: 1129 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize); 1130 break; 1131 } 1132 1133 return size; 1134 } 1135 1136 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring) 1137 { 1138 struct xgene_enet_pdata *pdata; 1139 struct device *dev; 1140 1141 if (!ring) 1142 return; 1143 1144 dev = ndev_to_dev(ring->ndev); 1145 pdata = netdev_priv(ring->ndev); 1146 1147 if (ring->desc_addr) { 1148 pdata->ring_ops->clear(ring); 1149 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); 1150 } 1151 devm_kfree(dev, ring); 1152 } 1153 1154 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) 1155 { 1156 struct xgene_enet_desc_ring *page_pool; 1157 struct device *dev = &pdata->pdev->dev; 1158 struct xgene_enet_desc_ring *ring; 1159 void *p; 1160 int i; 1161 1162 for (i = 0; i < pdata->txq_cnt; i++) { 1163 ring = pdata->tx_ring[i]; 1164 if (ring) { 1165 if (ring->cp_ring && ring->cp_ring->cp_skb) 1166 devm_kfree(dev, ring->cp_ring->cp_skb); 1167 1168 if (ring->cp_ring && pdata->cq_cnt) 1169 xgene_enet_free_desc_ring(ring->cp_ring); 1170 1171 xgene_enet_free_desc_ring(ring); 1172 } 1173 1174 } 1175 1176 for (i = 0; i < pdata->rxq_cnt; i++) { 1177 ring = pdata->rx_ring[i]; 1178 if (ring) { 1179 if (ring->buf_pool) { 1180 if (ring->buf_pool->rx_skb) 1181 devm_kfree(dev, ring->buf_pool->rx_skb); 1182 1183 xgene_enet_free_desc_ring(ring->buf_pool); 1184 } 1185 1186 page_pool = ring->page_pool; 1187 if (page_pool) { 1188 p = page_pool->frag_page; 1189 if (p) 1190 devm_kfree(dev, p); 1191 1192 p = page_pool->frag_dma_addr; 1193 if (p) 1194 devm_kfree(dev, p); 1195 } 1196 1197 xgene_enet_free_desc_ring(ring); 1198 } 1199 } 1200 } 1201 1202 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata, 1203 struct xgene_enet_desc_ring *ring) 1204 { 1205 if ((pdata->enet_id == XGENE_ENET2) && 1206 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) { 1207 return true; 1208 } 1209 1210 return false; 1211 } 1212 1213 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata, 1214 struct xgene_enet_desc_ring *ring) 1215 { 1216 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift; 1217 1218 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift); 1219 } 1220 1221 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( 1222 struct net_device *ndev, u32 ring_num, 1223 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id) 1224 { 1225 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1226 struct device *dev = ndev_to_dev(ndev); 1227 struct xgene_enet_desc_ring *ring; 1228 void *irq_mbox_addr; 1229 int size; 1230 1231 size = xgene_enet_get_ring_size(dev, cfgsize); 1232 if (size < 0) 1233 return NULL; 1234 1235 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring), 1236 GFP_KERNEL); 1237 if (!ring) 1238 return NULL; 1239 1240 ring->ndev = ndev; 1241 ring->num = ring_num; 1242 ring->cfgsize = cfgsize; 1243 ring->id = ring_id; 1244 1245 ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma, 1246 GFP_KERNEL | __GFP_ZERO); 1247 if (!ring->desc_addr) { 1248 devm_kfree(dev, ring); 1249 return NULL; 1250 } 1251 ring->size = size; 1252 1253 if (is_irq_mbox_required(pdata, ring)) { 1254 irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE, 1255 &ring->irq_mbox_dma, 1256 GFP_KERNEL | __GFP_ZERO); 1257 if (!irq_mbox_addr) { 1258 dmam_free_coherent(dev, size, ring->desc_addr, 1259 ring->dma); 1260 devm_kfree(dev, ring); 1261 return NULL; 1262 } 1263 ring->irq_mbox_addr = irq_mbox_addr; 1264 } 1265 1266 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring); 1267 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR; 1268 ring = pdata->ring_ops->setup(ring); 1269 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n", 1270 ring->num, ring->size, ring->id, ring->slots); 1271 1272 return ring; 1273 } 1274 1275 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum) 1276 { 1277 return (owner << 6) | (bufnum & GENMASK(5, 0)); 1278 } 1279 1280 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p) 1281 { 1282 enum xgene_ring_owner owner; 1283 1284 if (p->enet_id == XGENE_ENET1) { 1285 switch (p->phy_mode) { 1286 case PHY_INTERFACE_MODE_SGMII: 1287 owner = RING_OWNER_ETH0; 1288 break; 1289 default: 1290 owner = (!p->port_id) ? RING_OWNER_ETH0 : 1291 RING_OWNER_ETH1; 1292 break; 1293 } 1294 } else { 1295 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1; 1296 } 1297 1298 return owner; 1299 } 1300 1301 static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata) 1302 { 1303 struct device *dev = &pdata->pdev->dev; 1304 u32 cpu_bufnum; 1305 int ret; 1306 1307 ret = device_property_read_u32(dev, "channel", &cpu_bufnum); 1308 1309 return (!ret) ? cpu_bufnum : pdata->cpu_bufnum; 1310 } 1311 1312 static int xgene_enet_create_desc_rings(struct net_device *ndev) 1313 { 1314 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; 1315 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1316 struct xgene_enet_desc_ring *page_pool = NULL; 1317 struct xgene_enet_desc_ring *buf_pool = NULL; 1318 struct device *dev = ndev_to_dev(ndev); 1319 u8 eth_bufnum = pdata->eth_bufnum; 1320 u8 bp_bufnum = pdata->bp_bufnum; 1321 u16 ring_num = pdata->ring_num; 1322 enum xgene_ring_owner owner; 1323 dma_addr_t dma_exp_bufs; 1324 u16 ring_id, slots; 1325 __le64 *exp_bufs; 1326 int i, ret, size; 1327 u8 cpu_bufnum; 1328 1329 cpu_bufnum = xgene_start_cpu_bufnum(pdata); 1330 1331 for (i = 0; i < pdata->rxq_cnt; i++) { 1332 /* allocate rx descriptor ring */ 1333 owner = xgene_derive_ring_owner(pdata); 1334 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); 1335 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, 1336 RING_CFGSIZE_16KB, 1337 ring_id); 1338 if (!rx_ring) { 1339 ret = -ENOMEM; 1340 goto err; 1341 } 1342 1343 /* allocate buffer pool for receiving packets */ 1344 owner = xgene_derive_ring_owner(pdata); 1345 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); 1346 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, 1347 RING_CFGSIZE_16KB, 1348 ring_id); 1349 if (!buf_pool) { 1350 ret = -ENOMEM; 1351 goto err; 1352 } 1353 1354 rx_ring->nbufpool = NUM_BUFPOOL; 1355 rx_ring->npagepool = NUM_NXTBUFPOOL; 1356 rx_ring->irq = pdata->irqs[i]; 1357 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, 1358 sizeof(struct sk_buff *), 1359 GFP_KERNEL); 1360 if (!buf_pool->rx_skb) { 1361 ret = -ENOMEM; 1362 goto err; 1363 } 1364 1365 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); 1366 rx_ring->buf_pool = buf_pool; 1367 pdata->rx_ring[i] = rx_ring; 1368 1369 if ((pdata->enet_id == XGENE_ENET1 && pdata->rxq_cnt > 4) || 1370 (pdata->enet_id == XGENE_ENET2 && pdata->rxq_cnt > 16)) { 1371 break; 1372 } 1373 1374 /* allocate next buffer pool for jumbo packets */ 1375 owner = xgene_derive_ring_owner(pdata); 1376 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); 1377 page_pool = xgene_enet_create_desc_ring(ndev, ring_num++, 1378 RING_CFGSIZE_16KB, 1379 ring_id); 1380 if (!page_pool) { 1381 ret = -ENOMEM; 1382 goto err; 1383 } 1384 1385 slots = page_pool->slots; 1386 page_pool->frag_page = devm_kcalloc(dev, slots, 1387 sizeof(struct page *), 1388 GFP_KERNEL); 1389 if (!page_pool->frag_page) { 1390 ret = -ENOMEM; 1391 goto err; 1392 } 1393 1394 page_pool->frag_dma_addr = devm_kcalloc(dev, slots, 1395 sizeof(dma_addr_t), 1396 GFP_KERNEL); 1397 if (!page_pool->frag_dma_addr) { 1398 ret = -ENOMEM; 1399 goto err; 1400 } 1401 1402 page_pool->dst_ring_num = xgene_enet_dst_ring_num(page_pool); 1403 rx_ring->page_pool = page_pool; 1404 } 1405 1406 for (i = 0; i < pdata->txq_cnt; i++) { 1407 /* allocate tx descriptor ring */ 1408 owner = xgene_derive_ring_owner(pdata); 1409 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++); 1410 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, 1411 RING_CFGSIZE_16KB, 1412 ring_id); 1413 if (!tx_ring) { 1414 ret = -ENOMEM; 1415 goto err; 1416 } 1417 1418 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS; 1419 exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs, 1420 GFP_KERNEL | __GFP_ZERO); 1421 if (!exp_bufs) { 1422 ret = -ENOMEM; 1423 goto err; 1424 } 1425 tx_ring->exp_bufs = exp_bufs; 1426 1427 pdata->tx_ring[i] = tx_ring; 1428 1429 if (!pdata->cq_cnt) { 1430 cp_ring = pdata->rx_ring[i]; 1431 } else { 1432 /* allocate tx completion descriptor ring */ 1433 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, 1434 cpu_bufnum++); 1435 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++, 1436 RING_CFGSIZE_16KB, 1437 ring_id); 1438 if (!cp_ring) { 1439 ret = -ENOMEM; 1440 goto err; 1441 } 1442 1443 cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i]; 1444 cp_ring->index = i; 1445 } 1446 1447 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, 1448 sizeof(struct sk_buff *), 1449 GFP_KERNEL); 1450 if (!cp_ring->cp_skb) { 1451 ret = -ENOMEM; 1452 goto err; 1453 } 1454 1455 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS; 1456 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots, 1457 size, GFP_KERNEL); 1458 if (!cp_ring->frag_dma_addr) { 1459 devm_kfree(dev, cp_ring->cp_skb); 1460 ret = -ENOMEM; 1461 goto err; 1462 } 1463 1464 tx_ring->cp_ring = cp_ring; 1465 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); 1466 } 1467 1468 if (pdata->ring_ops->coalesce) 1469 pdata->ring_ops->coalesce(pdata->tx_ring[0]); 1470 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; 1471 1472 return 0; 1473 1474 err: 1475 xgene_enet_free_desc_rings(pdata); 1476 return ret; 1477 } 1478 1479 static void xgene_enet_get_stats64( 1480 struct net_device *ndev, 1481 struct rtnl_link_stats64 *stats) 1482 { 1483 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1484 struct xgene_enet_desc_ring *ring; 1485 int i; 1486 1487 for (i = 0; i < pdata->txq_cnt; i++) { 1488 ring = pdata->tx_ring[i]; 1489 if (ring) { 1490 stats->tx_packets += ring->tx_packets; 1491 stats->tx_bytes += ring->tx_bytes; 1492 stats->tx_dropped += ring->tx_dropped; 1493 stats->tx_errors += ring->tx_errors; 1494 } 1495 } 1496 1497 for (i = 0; i < pdata->rxq_cnt; i++) { 1498 ring = pdata->rx_ring[i]; 1499 if (ring) { 1500 stats->rx_packets += ring->rx_packets; 1501 stats->rx_bytes += ring->rx_bytes; 1502 stats->rx_dropped += ring->rx_dropped; 1503 stats->rx_errors += ring->rx_errors + 1504 ring->rx_length_errors + 1505 ring->rx_crc_errors + 1506 ring->rx_frame_errors + 1507 ring->rx_fifo_errors; 1508 stats->rx_length_errors += ring->rx_length_errors; 1509 stats->rx_crc_errors += ring->rx_crc_errors; 1510 stats->rx_frame_errors += ring->rx_frame_errors; 1511 stats->rx_fifo_errors += ring->rx_fifo_errors; 1512 } 1513 } 1514 } 1515 1516 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) 1517 { 1518 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1519 int ret; 1520 1521 ret = eth_mac_addr(ndev, addr); 1522 if (ret) 1523 return ret; 1524 pdata->mac_ops->set_mac_addr(pdata); 1525 1526 return ret; 1527 } 1528 1529 static int xgene_change_mtu(struct net_device *ndev, int new_mtu) 1530 { 1531 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1532 int frame_size; 1533 1534 if (!netif_running(ndev)) 1535 return 0; 1536 1537 frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600; 1538 1539 xgene_enet_close(ndev); 1540 ndev->mtu = new_mtu; 1541 pdata->mac_ops->set_framesize(pdata, frame_size); 1542 xgene_enet_open(ndev); 1543 1544 return 0; 1545 } 1546 1547 static const struct net_device_ops xgene_ndev_ops = { 1548 .ndo_open = xgene_enet_open, 1549 .ndo_stop = xgene_enet_close, 1550 .ndo_start_xmit = xgene_enet_start_xmit, 1551 .ndo_tx_timeout = xgene_enet_timeout, 1552 .ndo_get_stats64 = xgene_enet_get_stats64, 1553 .ndo_change_mtu = xgene_change_mtu, 1554 .ndo_set_mac_address = xgene_enet_set_mac_address, 1555 }; 1556 1557 #ifdef CONFIG_ACPI 1558 static void xgene_get_port_id_acpi(struct device *dev, 1559 struct xgene_enet_pdata *pdata) 1560 { 1561 acpi_status status; 1562 u64 temp; 1563 1564 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp); 1565 if (ACPI_FAILURE(status)) { 1566 pdata->port_id = 0; 1567 } else { 1568 pdata->port_id = temp; 1569 } 1570 1571 return; 1572 } 1573 #endif 1574 1575 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata) 1576 { 1577 u32 id = 0; 1578 1579 of_property_read_u32(dev->of_node, "port-id", &id); 1580 1581 pdata->port_id = id & BIT(0); 1582 1583 return; 1584 } 1585 1586 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata) 1587 { 1588 struct device *dev = &pdata->pdev->dev; 1589 int delay, ret; 1590 1591 ret = device_property_read_u32(dev, "tx-delay", &delay); 1592 if (ret) { 1593 pdata->tx_delay = 4; 1594 return 0; 1595 } 1596 1597 if (delay < 0 || delay > 7) { 1598 dev_err(dev, "Invalid tx-delay specified\n"); 1599 return -EINVAL; 1600 } 1601 1602 pdata->tx_delay = delay; 1603 1604 return 0; 1605 } 1606 1607 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata) 1608 { 1609 struct device *dev = &pdata->pdev->dev; 1610 int delay, ret; 1611 1612 ret = device_property_read_u32(dev, "rx-delay", &delay); 1613 if (ret) { 1614 pdata->rx_delay = 2; 1615 return 0; 1616 } 1617 1618 if (delay < 0 || delay > 7) { 1619 dev_err(dev, "Invalid rx-delay specified\n"); 1620 return -EINVAL; 1621 } 1622 1623 pdata->rx_delay = delay; 1624 1625 return 0; 1626 } 1627 1628 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) 1629 { 1630 struct platform_device *pdev = pdata->pdev; 1631 struct device *dev = &pdev->dev; 1632 int i, ret, max_irqs; 1633 1634 if (phy_interface_mode_is_rgmii(pdata->phy_mode)) 1635 max_irqs = 1; 1636 else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) 1637 max_irqs = 2; 1638 else 1639 max_irqs = XGENE_MAX_ENET_IRQ; 1640 1641 for (i = 0; i < max_irqs; i++) { 1642 ret = platform_get_irq(pdev, i); 1643 if (ret <= 0) { 1644 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 1645 max_irqs = i; 1646 pdata->rxq_cnt = max_irqs / 2; 1647 pdata->txq_cnt = max_irqs / 2; 1648 pdata->cq_cnt = max_irqs / 2; 1649 break; 1650 } 1651 dev_err(dev, "Unable to get ENET IRQ\n"); 1652 ret = ret ? : -ENXIO; 1653 return ret; 1654 } 1655 pdata->irqs[i] = ret; 1656 } 1657 1658 return 0; 1659 } 1660 1661 static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata) 1662 { 1663 int ret; 1664 1665 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) 1666 return; 1667 1668 if (!IS_ENABLED(CONFIG_MDIO_XGENE)) 1669 return; 1670 1671 ret = xgene_enet_phy_connect(pdata->ndev); 1672 if (!ret) 1673 pdata->mdio_driver = true; 1674 } 1675 1676 static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata) 1677 { 1678 struct device *dev = &pdata->pdev->dev; 1679 1680 pdata->sfp_gpio_en = false; 1681 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII || 1682 (!device_property_present(dev, "sfp-gpios") && 1683 !device_property_present(dev, "rxlos-gpios"))) 1684 return; 1685 1686 pdata->sfp_gpio_en = true; 1687 pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN); 1688 if (IS_ERR(pdata->sfp_rdy)) 1689 pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN); 1690 } 1691 1692 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) 1693 { 1694 struct platform_device *pdev; 1695 struct net_device *ndev; 1696 struct device *dev; 1697 struct resource *res; 1698 void __iomem *base_addr; 1699 u32 offset; 1700 int ret = 0; 1701 1702 pdev = pdata->pdev; 1703 dev = &pdev->dev; 1704 ndev = pdata->ndev; 1705 1706 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR); 1707 if (!res) { 1708 dev_err(dev, "Resource enet_csr not defined\n"); 1709 return -ENODEV; 1710 } 1711 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res)); 1712 if (!pdata->base_addr) { 1713 dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); 1714 return -ENOMEM; 1715 } 1716 1717 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR); 1718 if (!res) { 1719 dev_err(dev, "Resource ring_csr not defined\n"); 1720 return -ENODEV; 1721 } 1722 pdata->ring_csr_addr = devm_ioremap(dev, res->start, 1723 resource_size(res)); 1724 if (!pdata->ring_csr_addr) { 1725 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); 1726 return -ENOMEM; 1727 } 1728 1729 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD); 1730 if (!res) { 1731 dev_err(dev, "Resource ring_cmd not defined\n"); 1732 return -ENODEV; 1733 } 1734 pdata->ring_cmd_addr = devm_ioremap(dev, res->start, 1735 resource_size(res)); 1736 if (!pdata->ring_cmd_addr) { 1737 dev_err(dev, "Unable to retrieve ENET Ring command region\n"); 1738 return -ENOMEM; 1739 } 1740 1741 if (dev->of_node) 1742 xgene_get_port_id_dt(dev, pdata); 1743 #ifdef CONFIG_ACPI 1744 else 1745 xgene_get_port_id_acpi(dev, pdata); 1746 #endif 1747 1748 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN)) 1749 eth_hw_addr_random(ndev); 1750 1751 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 1752 1753 pdata->phy_mode = device_get_phy_mode(dev); 1754 if (pdata->phy_mode < 0) { 1755 dev_err(dev, "Unable to get phy-connection-type\n"); 1756 return pdata->phy_mode; 1757 } 1758 if (!phy_interface_mode_is_rgmii(pdata->phy_mode) && 1759 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII && 1760 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) { 1761 dev_err(dev, "Incorrect phy-connection-type specified\n"); 1762 return -ENODEV; 1763 } 1764 1765 ret = xgene_get_tx_delay(pdata); 1766 if (ret) 1767 return ret; 1768 1769 ret = xgene_get_rx_delay(pdata); 1770 if (ret) 1771 return ret; 1772 1773 ret = xgene_enet_get_irqs(pdata); 1774 if (ret) 1775 return ret; 1776 1777 xgene_enet_gpiod_get(pdata); 1778 1779 pdata->clk = devm_clk_get(&pdev->dev, NULL); 1780 if (IS_ERR(pdata->clk)) { 1781 if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { 1782 /* Abort if the clock is defined but couldn't be 1783 * retrived. Always abort if the clock is missing on 1784 * DT system as the driver can't cope with this case. 1785 */ 1786 if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node) 1787 return PTR_ERR(pdata->clk); 1788 /* Firmware may have set up the clock already. */ 1789 dev_info(dev, "clocks have been setup already\n"); 1790 } 1791 } 1792 1793 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) 1794 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET); 1795 else 1796 base_addr = pdata->base_addr; 1797 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; 1798 pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET; 1799 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; 1800 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; 1801 if (phy_interface_mode_is_rgmii(pdata->phy_mode) || 1802 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { 1803 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET; 1804 pdata->mcx_stats_addr = 1805 pdata->base_addr + BLOCK_ETH_STATS_OFFSET; 1806 offset = (pdata->enet_id == XGENE_ENET1) ? 1807 BLOCK_ETH_MAC_CSR_OFFSET : 1808 X2_BLOCK_ETH_MAC_CSR_OFFSET; 1809 pdata->mcx_mac_csr_addr = base_addr + offset; 1810 } else { 1811 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; 1812 pdata->mcx_stats_addr = base_addr + BLOCK_AXG_STATS_OFFSET; 1813 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET; 1814 pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET; 1815 } 1816 pdata->rx_buff_cnt = NUM_PKT_BUF; 1817 1818 return 0; 1819 } 1820 1821 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) 1822 { 1823 struct xgene_enet_cle *enet_cle = &pdata->cle; 1824 struct xgene_enet_desc_ring *page_pool; 1825 struct net_device *ndev = pdata->ndev; 1826 struct xgene_enet_desc_ring *buf_pool; 1827 u16 dst_ring_num, ring_id; 1828 int i, ret; 1829 u32 count; 1830 1831 ret = pdata->port_ops->reset(pdata); 1832 if (ret) 1833 return ret; 1834 1835 ret = xgene_enet_create_desc_rings(ndev); 1836 if (ret) { 1837 netdev_err(ndev, "Error in ring configuration\n"); 1838 return ret; 1839 } 1840 1841 /* setup buffer pool */ 1842 for (i = 0; i < pdata->rxq_cnt; i++) { 1843 buf_pool = pdata->rx_ring[i]->buf_pool; 1844 xgene_enet_init_bufpool(buf_pool); 1845 page_pool = pdata->rx_ring[i]->page_pool; 1846 xgene_enet_init_bufpool(page_pool); 1847 1848 count = pdata->rx_buff_cnt; 1849 ret = xgene_enet_refill_bufpool(buf_pool, count); 1850 if (ret) 1851 goto err; 1852 1853 ret = xgene_enet_refill_pagepool(page_pool, count); 1854 if (ret) 1855 goto err; 1856 1857 } 1858 1859 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]); 1860 buf_pool = pdata->rx_ring[0]->buf_pool; 1861 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 1862 /* Initialize and Enable PreClassifier Tree */ 1863 enet_cle->max_nodes = 512; 1864 enet_cle->max_dbptrs = 1024; 1865 enet_cle->parsers = 3; 1866 enet_cle->active_parser = PARSER_ALL; 1867 enet_cle->ptree.start_node = 0; 1868 enet_cle->ptree.start_dbptr = 0; 1869 enet_cle->jump_bytes = 8; 1870 ret = pdata->cle_ops->cle_init(pdata); 1871 if (ret) { 1872 netdev_err(ndev, "Preclass Tree init error\n"); 1873 goto err; 1874 } 1875 1876 } else { 1877 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]); 1878 buf_pool = pdata->rx_ring[0]->buf_pool; 1879 page_pool = pdata->rx_ring[0]->page_pool; 1880 ring_id = (page_pool) ? page_pool->id : 0; 1881 pdata->port_ops->cle_bypass(pdata, dst_ring_num, 1882 buf_pool->id, ring_id); 1883 } 1884 1885 ndev->max_mtu = XGENE_ENET_MAX_MTU; 1886 pdata->phy_speed = SPEED_UNKNOWN; 1887 pdata->mac_ops->init(pdata); 1888 1889 return ret; 1890 1891 err: 1892 xgene_enet_delete_desc_rings(pdata); 1893 return ret; 1894 } 1895 1896 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) 1897 { 1898 switch (pdata->phy_mode) { 1899 case PHY_INTERFACE_MODE_RGMII: 1900 case PHY_INTERFACE_MODE_RGMII_ID: 1901 case PHY_INTERFACE_MODE_RGMII_RXID: 1902 case PHY_INTERFACE_MODE_RGMII_TXID: 1903 pdata->mac_ops = &xgene_gmac_ops; 1904 pdata->port_ops = &xgene_gport_ops; 1905 pdata->rm = RM3; 1906 pdata->rxq_cnt = 1; 1907 pdata->txq_cnt = 1; 1908 pdata->cq_cnt = 0; 1909 break; 1910 case PHY_INTERFACE_MODE_SGMII: 1911 pdata->mac_ops = &xgene_sgmac_ops; 1912 pdata->port_ops = &xgene_sgport_ops; 1913 pdata->rm = RM1; 1914 pdata->rxq_cnt = 1; 1915 pdata->txq_cnt = 1; 1916 pdata->cq_cnt = 1; 1917 break; 1918 default: 1919 pdata->mac_ops = &xgene_xgmac_ops; 1920 pdata->port_ops = &xgene_xgport_ops; 1921 pdata->cle_ops = &xgene_cle3in_ops; 1922 pdata->rm = RM0; 1923 if (!pdata->rxq_cnt) { 1924 pdata->rxq_cnt = XGENE_NUM_RX_RING; 1925 pdata->txq_cnt = XGENE_NUM_TX_RING; 1926 pdata->cq_cnt = XGENE_NUM_TXC_RING; 1927 } 1928 break; 1929 } 1930 1931 if (pdata->enet_id == XGENE_ENET1) { 1932 switch (pdata->port_id) { 1933 case 0: 1934 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 1935 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0; 1936 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0; 1937 pdata->bp_bufnum = X2_START_BP_BUFNUM_0; 1938 pdata->ring_num = START_RING_NUM_0; 1939 } else { 1940 pdata->cpu_bufnum = START_CPU_BUFNUM_0; 1941 pdata->eth_bufnum = START_ETH_BUFNUM_0; 1942 pdata->bp_bufnum = START_BP_BUFNUM_0; 1943 pdata->ring_num = START_RING_NUM_0; 1944 } 1945 break; 1946 case 1: 1947 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 1948 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1; 1949 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1; 1950 pdata->bp_bufnum = XG_START_BP_BUFNUM_1; 1951 pdata->ring_num = XG_START_RING_NUM_1; 1952 } else { 1953 pdata->cpu_bufnum = START_CPU_BUFNUM_1; 1954 pdata->eth_bufnum = START_ETH_BUFNUM_1; 1955 pdata->bp_bufnum = START_BP_BUFNUM_1; 1956 pdata->ring_num = START_RING_NUM_1; 1957 } 1958 break; 1959 default: 1960 break; 1961 } 1962 pdata->ring_ops = &xgene_ring1_ops; 1963 } else { 1964 switch (pdata->port_id) { 1965 case 0: 1966 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0; 1967 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0; 1968 pdata->bp_bufnum = X2_START_BP_BUFNUM_0; 1969 pdata->ring_num = X2_START_RING_NUM_0; 1970 break; 1971 case 1: 1972 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1; 1973 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1; 1974 pdata->bp_bufnum = X2_START_BP_BUFNUM_1; 1975 pdata->ring_num = X2_START_RING_NUM_1; 1976 break; 1977 default: 1978 break; 1979 } 1980 pdata->rm = RM0; 1981 pdata->ring_ops = &xgene_ring2_ops; 1982 } 1983 } 1984 1985 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) 1986 { 1987 struct napi_struct *napi; 1988 int i; 1989 1990 for (i = 0; i < pdata->rxq_cnt; i++) { 1991 napi = &pdata->rx_ring[i]->napi; 1992 netif_napi_add(pdata->ndev, napi, xgene_enet_napi, 1993 NAPI_POLL_WEIGHT); 1994 } 1995 1996 for (i = 0; i < pdata->cq_cnt; i++) { 1997 napi = &pdata->tx_ring[i]->cp_ring->napi; 1998 netif_napi_add(pdata->ndev, napi, xgene_enet_napi, 1999 NAPI_POLL_WEIGHT); 2000 } 2001 } 2002 2003 #ifdef CONFIG_ACPI 2004 static const struct acpi_device_id xgene_enet_acpi_match[] = { 2005 { "APMC0D05", XGENE_ENET1}, 2006 { "APMC0D30", XGENE_ENET1}, 2007 { "APMC0D31", XGENE_ENET1}, 2008 { "APMC0D3F", XGENE_ENET1}, 2009 { "APMC0D26", XGENE_ENET2}, 2010 { "APMC0D25", XGENE_ENET2}, 2011 { } 2012 }; 2013 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); 2014 #endif 2015 2016 static const struct of_device_id xgene_enet_of_match[] = { 2017 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1}, 2018 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1}, 2019 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1}, 2020 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2}, 2021 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2}, 2022 {}, 2023 }; 2024 2025 MODULE_DEVICE_TABLE(of, xgene_enet_of_match); 2026 2027 static int xgene_enet_probe(struct platform_device *pdev) 2028 { 2029 struct net_device *ndev; 2030 struct xgene_enet_pdata *pdata; 2031 struct device *dev = &pdev->dev; 2032 void (*link_state)(struct work_struct *); 2033 const struct of_device_id *of_id; 2034 int ret; 2035 2036 ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata), 2037 XGENE_NUM_RX_RING, XGENE_NUM_TX_RING); 2038 if (!ndev) 2039 return -ENOMEM; 2040 2041 pdata = netdev_priv(ndev); 2042 2043 pdata->pdev = pdev; 2044 pdata->ndev = ndev; 2045 SET_NETDEV_DEV(ndev, dev); 2046 platform_set_drvdata(pdev, pdata); 2047 ndev->netdev_ops = &xgene_ndev_ops; 2048 xgene_enet_set_ethtool_ops(ndev); 2049 ndev->features |= NETIF_F_IP_CSUM | 2050 NETIF_F_GSO | 2051 NETIF_F_GRO | 2052 NETIF_F_SG; 2053 2054 of_id = of_match_device(xgene_enet_of_match, &pdev->dev); 2055 if (of_id) { 2056 pdata->enet_id = (enum xgene_enet_id)of_id->data; 2057 } 2058 #ifdef CONFIG_ACPI 2059 else { 2060 const struct acpi_device_id *acpi_id; 2061 2062 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev); 2063 if (acpi_id) 2064 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data; 2065 } 2066 #endif 2067 if (!pdata->enet_id) { 2068 ret = -ENODEV; 2069 goto err; 2070 } 2071 2072 ret = xgene_enet_get_resources(pdata); 2073 if (ret) 2074 goto err; 2075 2076 xgene_enet_setup_ops(pdata); 2077 spin_lock_init(&pdata->mac_lock); 2078 2079 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 2080 ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM; 2081 spin_lock_init(&pdata->mss_lock); 2082 } 2083 ndev->hw_features = ndev->features; 2084 2085 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2086 if (ret) { 2087 netdev_err(ndev, "No usable DMA configuration\n"); 2088 goto err; 2089 } 2090 2091 xgene_enet_check_phy_handle(pdata); 2092 2093 ret = xgene_enet_init_hw(pdata); 2094 if (ret) 2095 goto err2; 2096 2097 link_state = pdata->mac_ops->link_state; 2098 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 2099 INIT_DELAYED_WORK(&pdata->link_work, link_state); 2100 } else if (!pdata->mdio_driver) { 2101 if (phy_interface_mode_is_rgmii(pdata->phy_mode)) 2102 ret = xgene_enet_mdio_config(pdata); 2103 else 2104 INIT_DELAYED_WORK(&pdata->link_work, link_state); 2105 2106 if (ret) 2107 goto err1; 2108 } 2109 2110 spin_lock_init(&pdata->stats_lock); 2111 ret = xgene_extd_stats_init(pdata); 2112 if (ret) 2113 goto err1; 2114 2115 xgene_enet_napi_add(pdata); 2116 ret = register_netdev(ndev); 2117 if (ret) { 2118 netdev_err(ndev, "Failed to register netdev\n"); 2119 goto err1; 2120 } 2121 2122 return 0; 2123 2124 err1: 2125 /* 2126 * If necessary, free_netdev() will call netif_napi_del() and undo 2127 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add(). 2128 */ 2129 2130 xgene_enet_delete_desc_rings(pdata); 2131 2132 err2: 2133 if (pdata->mdio_driver) 2134 xgene_enet_phy_disconnect(pdata); 2135 else if (phy_interface_mode_is_rgmii(pdata->phy_mode)) 2136 xgene_enet_mdio_remove(pdata); 2137 err: 2138 free_netdev(ndev); 2139 return ret; 2140 } 2141 2142 static int xgene_enet_remove(struct platform_device *pdev) 2143 { 2144 struct xgene_enet_pdata *pdata; 2145 struct net_device *ndev; 2146 2147 pdata = platform_get_drvdata(pdev); 2148 ndev = pdata->ndev; 2149 2150 rtnl_lock(); 2151 if (netif_running(ndev)) 2152 dev_close(ndev); 2153 rtnl_unlock(); 2154 2155 if (pdata->mdio_driver) 2156 xgene_enet_phy_disconnect(pdata); 2157 else if (phy_interface_mode_is_rgmii(pdata->phy_mode)) 2158 xgene_enet_mdio_remove(pdata); 2159 2160 unregister_netdev(ndev); 2161 xgene_enet_delete_desc_rings(pdata); 2162 pdata->port_ops->shutdown(pdata); 2163 free_netdev(ndev); 2164 2165 return 0; 2166 } 2167 2168 static void xgene_enet_shutdown(struct platform_device *pdev) 2169 { 2170 struct xgene_enet_pdata *pdata; 2171 2172 pdata = platform_get_drvdata(pdev); 2173 if (!pdata) 2174 return; 2175 2176 if (!pdata->ndev) 2177 return; 2178 2179 xgene_enet_remove(pdev); 2180 } 2181 2182 static struct platform_driver xgene_enet_driver = { 2183 .driver = { 2184 .name = "xgene-enet", 2185 .of_match_table = of_match_ptr(xgene_enet_of_match), 2186 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match), 2187 }, 2188 .probe = xgene_enet_probe, 2189 .remove = xgene_enet_remove, 2190 .shutdown = xgene_enet_shutdown, 2191 }; 2192 2193 module_platform_driver(xgene_enet_driver); 2194 2195 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver"); 2196 MODULE_VERSION(XGENE_DRV_VERSION); 2197 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>"); 2198 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>"); 2199 MODULE_LICENSE("GPL"); 2200