1 /* Applied Micro X-Gene SoC Ethernet Driver 2 * 3 * Copyright (c) 2014, Applied Micro Circuits Corporation 4 * Authors: Iyappan Subramanian <isubramanian@apm.com> 5 * Ravi Patel <rapatel@apm.com> 6 * Keyur Chudgar <kchudgar@apm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "xgene_enet_main.h" 23 #include "xgene_enet_hw.h" 24 #include "xgene_enet_sgmac.h" 25 #include "xgene_enet_xgmac.h" 26 27 #define RES_ENET_CSR 0 28 #define RES_RING_CSR 1 29 #define RES_RING_CMD 2 30 31 static const struct of_device_id xgene_enet_of_match[]; 32 static const struct acpi_device_id xgene_enet_acpi_match[]; 33 34 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) 35 { 36 struct xgene_enet_raw_desc16 *raw_desc; 37 int i; 38 39 for (i = 0; i < buf_pool->slots; i++) { 40 raw_desc = &buf_pool->raw_desc16[i]; 41 42 /* Hardware expects descriptor in little endian format */ 43 raw_desc->m0 = cpu_to_le64(i | 44 SET_VAL(FPQNUM, buf_pool->dst_ring_num) | 45 SET_VAL(STASH, 3)); 46 } 47 } 48 49 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, 50 u32 nbuf) 51 { 52 struct sk_buff *skb; 53 struct xgene_enet_raw_desc16 *raw_desc; 54 struct xgene_enet_pdata *pdata; 55 struct net_device *ndev; 56 struct device *dev; 57 dma_addr_t dma_addr; 58 u32 tail = buf_pool->tail; 59 u32 slots = buf_pool->slots - 1; 60 u16 bufdatalen, len; 61 int i; 62 63 ndev = buf_pool->ndev; 64 dev = ndev_to_dev(buf_pool->ndev); 65 pdata = netdev_priv(ndev); 66 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0)); 67 len = XGENE_ENET_MAX_MTU; 68 69 for (i = 0; i < nbuf; i++) { 70 raw_desc = &buf_pool->raw_desc16[tail]; 71 72 skb = netdev_alloc_skb_ip_align(ndev, len); 73 if (unlikely(!skb)) 74 return -ENOMEM; 75 buf_pool->rx_skb[tail] = skb; 76 77 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); 78 if (dma_mapping_error(dev, dma_addr)) { 79 netdev_err(ndev, "DMA mapping error\n"); 80 dev_kfree_skb_any(skb); 81 return -EINVAL; 82 } 83 84 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | 85 SET_VAL(BUFDATALEN, bufdatalen) | 86 SET_BIT(COHERENT)); 87 tail = (tail + 1) & slots; 88 } 89 90 pdata->ring_ops->wr_cmd(buf_pool, nbuf); 91 buf_pool->tail = tail; 92 93 return 0; 94 } 95 96 static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring) 97 { 98 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 99 100 return ((u16)pdata->rm << 10) | ring->num; 101 } 102 103 static u8 xgene_enet_hdr_len(const void *data) 104 { 105 const struct ethhdr *eth = data; 106 107 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN; 108 } 109 110 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) 111 { 112 struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev); 113 struct xgene_enet_raw_desc16 *raw_desc; 114 u32 slots = buf_pool->slots - 1; 115 u32 tail = buf_pool->tail; 116 u32 userinfo; 117 int i, len; 118 119 len = pdata->ring_ops->len(buf_pool); 120 for (i = 0; i < len; i++) { 121 tail = (tail - 1) & slots; 122 raw_desc = &buf_pool->raw_desc16[tail]; 123 124 /* Hardware stores descriptor in little endian format */ 125 userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); 126 dev_kfree_skb_any(buf_pool->rx_skb[userinfo]); 127 } 128 129 pdata->ring_ops->wr_cmd(buf_pool, -len); 130 buf_pool->tail = tail; 131 } 132 133 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data) 134 { 135 struct xgene_enet_desc_ring *rx_ring = data; 136 137 if (napi_schedule_prep(&rx_ring->napi)) { 138 disable_irq_nosync(irq); 139 __napi_schedule(&rx_ring->napi); 140 } 141 142 return IRQ_HANDLED; 143 } 144 145 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, 146 struct xgene_enet_raw_desc *raw_desc) 147 { 148 struct sk_buff *skb; 149 struct device *dev; 150 skb_frag_t *frag; 151 dma_addr_t *frag_dma_addr; 152 u16 skb_index; 153 u8 status; 154 int i, ret = 0; 155 156 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); 157 skb = cp_ring->cp_skb[skb_index]; 158 frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS]; 159 160 dev = ndev_to_dev(cp_ring->ndev); 161 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), 162 skb_headlen(skb), 163 DMA_TO_DEVICE); 164 165 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 166 frag = &skb_shinfo(skb)->frags[i]; 167 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag), 168 DMA_TO_DEVICE); 169 } 170 171 /* Checking for error */ 172 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); 173 if (unlikely(status > 2)) { 174 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev), 175 status); 176 ret = -EIO; 177 } 178 179 if (likely(skb)) { 180 dev_kfree_skb_any(skb); 181 } else { 182 netdev_err(cp_ring->ndev, "completion skb is NULL\n"); 183 ret = -EIO; 184 } 185 186 return ret; 187 } 188 189 static u64 xgene_enet_work_msg(struct sk_buff *skb) 190 { 191 struct net_device *ndev = skb->dev; 192 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 193 struct iphdr *iph; 194 u8 l3hlen = 0, l4hlen = 0; 195 u8 ethhdr, proto = 0, csum_enable = 0; 196 u64 hopinfo = 0; 197 u32 hdr_len, mss = 0; 198 u32 i, len, nr_frags; 199 200 ethhdr = xgene_enet_hdr_len(skb->data); 201 202 if (unlikely(skb->protocol != htons(ETH_P_IP)) && 203 unlikely(skb->protocol != htons(ETH_P_8021Q))) 204 goto out; 205 206 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM))) 207 goto out; 208 209 iph = ip_hdr(skb); 210 if (unlikely(ip_is_fragment(iph))) 211 goto out; 212 213 if (likely(iph->protocol == IPPROTO_TCP)) { 214 l4hlen = tcp_hdrlen(skb) >> 2; 215 csum_enable = 1; 216 proto = TSO_IPPROTO_TCP; 217 if (ndev->features & NETIF_F_TSO) { 218 hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb); 219 mss = skb_shinfo(skb)->gso_size; 220 221 if (skb_is_nonlinear(skb)) { 222 len = skb_headlen(skb); 223 nr_frags = skb_shinfo(skb)->nr_frags; 224 225 for (i = 0; i < 2 && i < nr_frags; i++) 226 len += skb_shinfo(skb)->frags[i].size; 227 228 /* HW requires header must reside in 3 buffer */ 229 if (unlikely(hdr_len > len)) { 230 if (skb_linearize(skb)) 231 return 0; 232 } 233 } 234 235 if (!mss || ((skb->len - hdr_len) <= mss)) 236 goto out; 237 238 if (mss != pdata->mss) { 239 pdata->mss = mss; 240 pdata->mac_ops->set_mss(pdata); 241 } 242 hopinfo |= SET_BIT(ET); 243 } 244 } else if (iph->protocol == IPPROTO_UDP) { 245 l4hlen = UDP_HDR_SIZE; 246 csum_enable = 1; 247 } 248 out: 249 l3hlen = ip_hdrlen(skb) >> 2; 250 hopinfo |= SET_VAL(TCPHDR, l4hlen) | 251 SET_VAL(IPHDR, l3hlen) | 252 SET_VAL(ETHHDR, ethhdr) | 253 SET_VAL(EC, csum_enable) | 254 SET_VAL(IS, proto) | 255 SET_BIT(IC) | 256 SET_BIT(TYPE_ETH_WORK_MESSAGE); 257 258 return hopinfo; 259 } 260 261 static u16 xgene_enet_encode_len(u16 len) 262 { 263 return (len == BUFLEN_16K) ? 0 : len; 264 } 265 266 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len) 267 { 268 desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) | 269 SET_VAL(BUFDATALEN, len)); 270 } 271 272 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring) 273 { 274 __le64 *exp_bufs; 275 276 exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS]; 277 memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS); 278 ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1); 279 280 return exp_bufs; 281 } 282 283 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring) 284 { 285 return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS]; 286 } 287 288 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, 289 struct sk_buff *skb) 290 { 291 struct device *dev = ndev_to_dev(tx_ring->ndev); 292 struct xgene_enet_raw_desc *raw_desc; 293 __le64 *exp_desc = NULL, *exp_bufs = NULL; 294 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; 295 skb_frag_t *frag; 296 u16 tail = tx_ring->tail; 297 u64 hopinfo; 298 u32 len, hw_len; 299 u8 ll = 0, nv = 0, idx = 0; 300 bool split = false; 301 u32 size, offset, ell_bytes = 0; 302 u32 i, fidx, nr_frags, count = 1; 303 304 raw_desc = &tx_ring->raw_desc[tail]; 305 tail = (tail + 1) & (tx_ring->slots - 1); 306 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc)); 307 308 hopinfo = xgene_enet_work_msg(skb); 309 if (!hopinfo) 310 return -EINVAL; 311 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) | 312 hopinfo); 313 314 len = skb_headlen(skb); 315 hw_len = xgene_enet_encode_len(len); 316 317 dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); 318 if (dma_mapping_error(dev, dma_addr)) { 319 netdev_err(tx_ring->ndev, "DMA mapping error\n"); 320 return -EINVAL; 321 } 322 323 /* Hardware expects descriptor in little endian format */ 324 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | 325 SET_VAL(BUFDATALEN, hw_len) | 326 SET_BIT(COHERENT)); 327 328 if (!skb_is_nonlinear(skb)) 329 goto out; 330 331 /* scatter gather */ 332 nv = 1; 333 exp_desc = (void *)&tx_ring->raw_desc[tail]; 334 tail = (tail + 1) & (tx_ring->slots - 1); 335 memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc)); 336 337 nr_frags = skb_shinfo(skb)->nr_frags; 338 for (i = nr_frags; i < 4 ; i++) 339 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER); 340 341 frag_dma_addr = xgene_get_frag_dma_array(tx_ring); 342 343 for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) { 344 if (!split) { 345 frag = &skb_shinfo(skb)->frags[fidx]; 346 size = skb_frag_size(frag); 347 offset = 0; 348 349 pbuf_addr = skb_frag_dma_map(dev, frag, 0, size, 350 DMA_TO_DEVICE); 351 if (dma_mapping_error(dev, pbuf_addr)) 352 return -EINVAL; 353 354 frag_dma_addr[fidx] = pbuf_addr; 355 fidx++; 356 357 if (size > BUFLEN_16K) 358 split = true; 359 } 360 361 if (size > BUFLEN_16K) { 362 len = BUFLEN_16K; 363 size -= BUFLEN_16K; 364 } else { 365 len = size; 366 split = false; 367 } 368 369 dma_addr = pbuf_addr + offset; 370 hw_len = xgene_enet_encode_len(len); 371 372 switch (i) { 373 case 0: 374 case 1: 375 case 2: 376 xgene_set_addr_len(exp_desc, i, dma_addr, hw_len); 377 break; 378 case 3: 379 if (split || (fidx != nr_frags)) { 380 exp_bufs = xgene_enet_get_exp_bufs(tx_ring); 381 xgene_set_addr_len(exp_bufs, idx, dma_addr, 382 hw_len); 383 idx++; 384 ell_bytes += len; 385 } else { 386 xgene_set_addr_len(exp_desc, i, dma_addr, 387 hw_len); 388 } 389 break; 390 default: 391 xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len); 392 idx++; 393 ell_bytes += len; 394 break; 395 } 396 397 if (split) 398 offset += BUFLEN_16K; 399 } 400 count++; 401 402 if (idx) { 403 ll = 1; 404 dma_addr = dma_map_single(dev, exp_bufs, 405 sizeof(u64) * MAX_EXP_BUFFS, 406 DMA_TO_DEVICE); 407 if (dma_mapping_error(dev, dma_addr)) { 408 dev_kfree_skb_any(skb); 409 return -EINVAL; 410 } 411 i = ell_bytes >> LL_BYTES_LSB_LEN; 412 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | 413 SET_VAL(LL_BYTES_MSB, i) | 414 SET_VAL(LL_LEN, idx)); 415 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes)); 416 } 417 418 out: 419 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | 420 SET_VAL(USERINFO, tx_ring->tail)); 421 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; 422 tx_ring->tail = tail; 423 424 return count; 425 } 426 427 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, 428 struct net_device *ndev) 429 { 430 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 431 struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; 432 struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; 433 u32 tx_level, cq_level; 434 int count; 435 436 tx_level = pdata->ring_ops->len(tx_ring); 437 cq_level = pdata->ring_ops->len(cp_ring); 438 if (unlikely(tx_level > pdata->tx_qcnt_hi || 439 cq_level > pdata->cp_qcnt_hi)) { 440 netif_stop_queue(ndev); 441 return NETDEV_TX_BUSY; 442 } 443 444 if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE)) 445 return NETDEV_TX_OK; 446 447 count = xgene_enet_setup_tx_desc(tx_ring, skb); 448 if (count <= 0) { 449 dev_kfree_skb_any(skb); 450 return NETDEV_TX_OK; 451 } 452 453 pdata->ring_ops->wr_cmd(tx_ring, count); 454 skb_tx_timestamp(skb); 455 456 pdata->stats.tx_packets++; 457 pdata->stats.tx_bytes += skb->len; 458 459 return NETDEV_TX_OK; 460 } 461 462 static void xgene_enet_skip_csum(struct sk_buff *skb) 463 { 464 struct iphdr *iph = ip_hdr(skb); 465 466 if (!ip_is_fragment(iph) || 467 (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) { 468 skb->ip_summed = CHECKSUM_UNNECESSARY; 469 } 470 } 471 472 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, 473 struct xgene_enet_raw_desc *raw_desc) 474 { 475 struct net_device *ndev; 476 struct xgene_enet_pdata *pdata; 477 struct device *dev; 478 struct xgene_enet_desc_ring *buf_pool; 479 u32 datalen, skb_index; 480 struct sk_buff *skb; 481 u8 status; 482 int ret = 0; 483 484 ndev = rx_ring->ndev; 485 pdata = netdev_priv(ndev); 486 dev = ndev_to_dev(rx_ring->ndev); 487 buf_pool = rx_ring->buf_pool; 488 489 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), 490 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE); 491 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); 492 skb = buf_pool->rx_skb[skb_index]; 493 494 /* checking for error */ 495 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); 496 if (unlikely(status > 2)) { 497 dev_kfree_skb_any(skb); 498 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), 499 status); 500 pdata->stats.rx_dropped++; 501 ret = -EIO; 502 goto out; 503 } 504 505 /* strip off CRC as HW isn't doing this */ 506 datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)); 507 datalen = (datalen & DATALEN_MASK) - 4; 508 prefetch(skb->data - NET_IP_ALIGN); 509 skb_put(skb, datalen); 510 511 skb_checksum_none_assert(skb); 512 skb->protocol = eth_type_trans(skb, ndev); 513 if (likely((ndev->features & NETIF_F_IP_CSUM) && 514 skb->protocol == htons(ETH_P_IP))) { 515 xgene_enet_skip_csum(skb); 516 } 517 518 pdata->stats.rx_packets++; 519 pdata->stats.rx_bytes += datalen; 520 napi_gro_receive(&rx_ring->napi, skb); 521 out: 522 if (--rx_ring->nbufpool == 0) { 523 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL); 524 rx_ring->nbufpool = NUM_BUFPOOL; 525 } 526 527 return ret; 528 } 529 530 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc) 531 { 532 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false; 533 } 534 535 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, 536 int budget) 537 { 538 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 539 struct xgene_enet_raw_desc *raw_desc, *exp_desc; 540 u16 head = ring->head; 541 u16 slots = ring->slots - 1; 542 int ret, count = 0, processed = 0; 543 544 do { 545 raw_desc = &ring->raw_desc[head]; 546 exp_desc = NULL; 547 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) 548 break; 549 550 /* read fpqnum field after dataaddr field */ 551 dma_rmb(); 552 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) { 553 head = (head + 1) & slots; 554 exp_desc = &ring->raw_desc[head]; 555 556 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) { 557 head = (head - 1) & slots; 558 break; 559 } 560 dma_rmb(); 561 count++; 562 } 563 if (is_rx_desc(raw_desc)) 564 ret = xgene_enet_rx_frame(ring, raw_desc); 565 else 566 ret = xgene_enet_tx_completion(ring, raw_desc); 567 xgene_enet_mark_desc_slot_empty(raw_desc); 568 if (exp_desc) 569 xgene_enet_mark_desc_slot_empty(exp_desc); 570 571 head = (head + 1) & slots; 572 count++; 573 processed++; 574 575 if (ret) 576 break; 577 } while (--budget); 578 579 if (likely(count)) { 580 pdata->ring_ops->wr_cmd(ring, -count); 581 ring->head = head; 582 583 if (netif_queue_stopped(ring->ndev)) { 584 if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low) 585 netif_wake_queue(ring->ndev); 586 } 587 } 588 589 return processed; 590 } 591 592 static int xgene_enet_napi(struct napi_struct *napi, const int budget) 593 { 594 struct xgene_enet_desc_ring *ring; 595 int processed; 596 597 ring = container_of(napi, struct xgene_enet_desc_ring, napi); 598 processed = xgene_enet_process_ring(ring, budget); 599 600 if (processed != budget) { 601 napi_complete(napi); 602 enable_irq(ring->irq); 603 } 604 605 return processed; 606 } 607 608 static void xgene_enet_timeout(struct net_device *ndev) 609 { 610 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 611 612 pdata->mac_ops->reset(pdata); 613 } 614 615 static int xgene_enet_register_irq(struct net_device *ndev) 616 { 617 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 618 struct device *dev = ndev_to_dev(ndev); 619 struct xgene_enet_desc_ring *ring; 620 int ret; 621 622 ring = pdata->rx_ring; 623 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, 624 IRQF_SHARED, ring->irq_name, ring); 625 if (ret) 626 netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name); 627 628 if (pdata->cq_cnt) { 629 ring = pdata->tx_ring->cp_ring; 630 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, 631 IRQF_SHARED, ring->irq_name, ring); 632 if (ret) { 633 netdev_err(ndev, "Failed to request irq %s\n", 634 ring->irq_name); 635 } 636 } 637 638 return ret; 639 } 640 641 static void xgene_enet_free_irq(struct net_device *ndev) 642 { 643 struct xgene_enet_pdata *pdata; 644 struct device *dev; 645 646 pdata = netdev_priv(ndev); 647 dev = ndev_to_dev(ndev); 648 devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring); 649 650 if (pdata->cq_cnt) { 651 devm_free_irq(dev, pdata->tx_ring->cp_ring->irq, 652 pdata->tx_ring->cp_ring); 653 } 654 } 655 656 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) 657 { 658 struct napi_struct *napi; 659 660 napi = &pdata->rx_ring->napi; 661 napi_enable(napi); 662 663 if (pdata->cq_cnt) { 664 napi = &pdata->tx_ring->cp_ring->napi; 665 napi_enable(napi); 666 } 667 } 668 669 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata) 670 { 671 struct napi_struct *napi; 672 673 napi = &pdata->rx_ring->napi; 674 napi_disable(napi); 675 676 if (pdata->cq_cnt) { 677 napi = &pdata->tx_ring->cp_ring->napi; 678 napi_disable(napi); 679 } 680 } 681 682 static int xgene_enet_open(struct net_device *ndev) 683 { 684 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 685 struct xgene_mac_ops *mac_ops = pdata->mac_ops; 686 int ret; 687 688 mac_ops->tx_enable(pdata); 689 mac_ops->rx_enable(pdata); 690 691 ret = xgene_enet_register_irq(ndev); 692 if (ret) 693 return ret; 694 xgene_enet_napi_enable(pdata); 695 696 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 697 phy_start(pdata->phy_dev); 698 else 699 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF); 700 701 netif_carrier_off(ndev); 702 netif_start_queue(ndev); 703 704 return ret; 705 } 706 707 static int xgene_enet_close(struct net_device *ndev) 708 { 709 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 710 struct xgene_mac_ops *mac_ops = pdata->mac_ops; 711 712 netif_stop_queue(ndev); 713 714 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 715 phy_stop(pdata->phy_dev); 716 else 717 cancel_delayed_work_sync(&pdata->link_work); 718 719 xgene_enet_napi_disable(pdata); 720 xgene_enet_free_irq(ndev); 721 xgene_enet_process_ring(pdata->rx_ring, -1); 722 723 mac_ops->tx_disable(pdata); 724 mac_ops->rx_disable(pdata); 725 726 return 0; 727 } 728 729 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) 730 { 731 struct xgene_enet_pdata *pdata; 732 struct device *dev; 733 734 pdata = netdev_priv(ring->ndev); 735 dev = ndev_to_dev(ring->ndev); 736 737 pdata->ring_ops->clear(ring); 738 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); 739 } 740 741 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) 742 { 743 struct xgene_enet_desc_ring *buf_pool; 744 745 if (pdata->tx_ring) { 746 xgene_enet_delete_ring(pdata->tx_ring); 747 pdata->tx_ring = NULL; 748 } 749 750 if (pdata->rx_ring) { 751 buf_pool = pdata->rx_ring->buf_pool; 752 xgene_enet_delete_bufpool(buf_pool); 753 xgene_enet_delete_ring(buf_pool); 754 xgene_enet_delete_ring(pdata->rx_ring); 755 pdata->rx_ring = NULL; 756 } 757 } 758 759 static int xgene_enet_get_ring_size(struct device *dev, 760 enum xgene_enet_ring_cfgsize cfgsize) 761 { 762 int size = -EINVAL; 763 764 switch (cfgsize) { 765 case RING_CFGSIZE_512B: 766 size = 0x200; 767 break; 768 case RING_CFGSIZE_2KB: 769 size = 0x800; 770 break; 771 case RING_CFGSIZE_16KB: 772 size = 0x4000; 773 break; 774 case RING_CFGSIZE_64KB: 775 size = 0x10000; 776 break; 777 case RING_CFGSIZE_512KB: 778 size = 0x80000; 779 break; 780 default: 781 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize); 782 break; 783 } 784 785 return size; 786 } 787 788 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring) 789 { 790 struct xgene_enet_pdata *pdata; 791 struct device *dev; 792 793 if (!ring) 794 return; 795 796 dev = ndev_to_dev(ring->ndev); 797 pdata = netdev_priv(ring->ndev); 798 799 if (ring->desc_addr) { 800 pdata->ring_ops->clear(ring); 801 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); 802 } 803 devm_kfree(dev, ring); 804 } 805 806 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) 807 { 808 struct device *dev = &pdata->pdev->dev; 809 struct xgene_enet_desc_ring *ring; 810 811 ring = pdata->tx_ring; 812 if (ring) { 813 if (ring->cp_ring && ring->cp_ring->cp_skb) 814 devm_kfree(dev, ring->cp_ring->cp_skb); 815 if (ring->cp_ring && pdata->cq_cnt) 816 xgene_enet_free_desc_ring(ring->cp_ring); 817 xgene_enet_free_desc_ring(ring); 818 } 819 820 ring = pdata->rx_ring; 821 if (ring) { 822 if (ring->buf_pool) { 823 if (ring->buf_pool->rx_skb) 824 devm_kfree(dev, ring->buf_pool->rx_skb); 825 xgene_enet_free_desc_ring(ring->buf_pool); 826 } 827 xgene_enet_free_desc_ring(ring); 828 } 829 } 830 831 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata, 832 struct xgene_enet_desc_ring *ring) 833 { 834 if ((pdata->enet_id == XGENE_ENET2) && 835 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) { 836 return true; 837 } 838 839 return false; 840 } 841 842 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata, 843 struct xgene_enet_desc_ring *ring) 844 { 845 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift; 846 847 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift); 848 } 849 850 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( 851 struct net_device *ndev, u32 ring_num, 852 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id) 853 { 854 struct xgene_enet_desc_ring *ring; 855 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 856 struct device *dev = ndev_to_dev(ndev); 857 int size; 858 859 size = xgene_enet_get_ring_size(dev, cfgsize); 860 if (size < 0) 861 return NULL; 862 863 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring), 864 GFP_KERNEL); 865 if (!ring) 866 return NULL; 867 868 ring->ndev = ndev; 869 ring->num = ring_num; 870 ring->cfgsize = cfgsize; 871 ring->id = ring_id; 872 873 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma, 874 GFP_KERNEL); 875 if (!ring->desc_addr) { 876 devm_kfree(dev, ring); 877 return NULL; 878 } 879 ring->size = size; 880 881 if (is_irq_mbox_required(pdata, ring)) { 882 ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE, 883 &ring->irq_mbox_dma, GFP_KERNEL); 884 if (!ring->irq_mbox_addr) { 885 dma_free_coherent(dev, size, ring->desc_addr, 886 ring->dma); 887 devm_kfree(dev, ring); 888 return NULL; 889 } 890 } 891 892 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring); 893 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR; 894 ring = pdata->ring_ops->setup(ring); 895 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n", 896 ring->num, ring->size, ring->id, ring->slots); 897 898 return ring; 899 } 900 901 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum) 902 { 903 return (owner << 6) | (bufnum & GENMASK(5, 0)); 904 } 905 906 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p) 907 { 908 enum xgene_ring_owner owner; 909 910 if (p->enet_id == XGENE_ENET1) { 911 switch (p->phy_mode) { 912 case PHY_INTERFACE_MODE_SGMII: 913 owner = RING_OWNER_ETH0; 914 break; 915 default: 916 owner = (!p->port_id) ? RING_OWNER_ETH0 : 917 RING_OWNER_ETH1; 918 break; 919 } 920 } else { 921 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1; 922 } 923 924 return owner; 925 } 926 927 static int xgene_enet_create_desc_rings(struct net_device *ndev) 928 { 929 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 930 struct device *dev = ndev_to_dev(ndev); 931 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; 932 struct xgene_enet_desc_ring *buf_pool = NULL; 933 enum xgene_ring_owner owner; 934 dma_addr_t dma_exp_bufs; 935 u8 cpu_bufnum = pdata->cpu_bufnum; 936 u8 eth_bufnum = pdata->eth_bufnum; 937 u8 bp_bufnum = pdata->bp_bufnum; 938 u16 ring_num = pdata->ring_num; 939 u16 ring_id; 940 int ret, size; 941 942 /* allocate rx descriptor ring */ 943 owner = xgene_derive_ring_owner(pdata); 944 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); 945 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, 946 RING_CFGSIZE_16KB, ring_id); 947 if (!rx_ring) { 948 ret = -ENOMEM; 949 goto err; 950 } 951 952 /* allocate buffer pool for receiving packets */ 953 owner = xgene_derive_ring_owner(pdata); 954 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); 955 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, 956 RING_CFGSIZE_2KB, ring_id); 957 if (!buf_pool) { 958 ret = -ENOMEM; 959 goto err; 960 } 961 962 rx_ring->nbufpool = NUM_BUFPOOL; 963 rx_ring->buf_pool = buf_pool; 964 rx_ring->irq = pdata->rx_irq; 965 if (!pdata->cq_cnt) { 966 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", 967 ndev->name); 968 } else { 969 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name); 970 } 971 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, 972 sizeof(struct sk_buff *), GFP_KERNEL); 973 if (!buf_pool->rx_skb) { 974 ret = -ENOMEM; 975 goto err; 976 } 977 978 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); 979 rx_ring->buf_pool = buf_pool; 980 pdata->rx_ring = rx_ring; 981 982 /* allocate tx descriptor ring */ 983 owner = xgene_derive_ring_owner(pdata); 984 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++); 985 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, 986 RING_CFGSIZE_16KB, ring_id); 987 if (!tx_ring) { 988 ret = -ENOMEM; 989 goto err; 990 } 991 992 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS; 993 tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, &dma_exp_bufs, 994 GFP_KERNEL); 995 if (!tx_ring->exp_bufs) { 996 ret = -ENOMEM; 997 goto err; 998 } 999 1000 pdata->tx_ring = tx_ring; 1001 1002 if (!pdata->cq_cnt) { 1003 cp_ring = pdata->rx_ring; 1004 } else { 1005 /* allocate tx completion descriptor ring */ 1006 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); 1007 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++, 1008 RING_CFGSIZE_16KB, 1009 ring_id); 1010 if (!cp_ring) { 1011 ret = -ENOMEM; 1012 goto err; 1013 } 1014 cp_ring->irq = pdata->txc_irq; 1015 snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name); 1016 } 1017 1018 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, 1019 sizeof(struct sk_buff *), GFP_KERNEL); 1020 if (!cp_ring->cp_skb) { 1021 ret = -ENOMEM; 1022 goto err; 1023 } 1024 1025 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS; 1026 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots, 1027 size, GFP_KERNEL); 1028 if (!cp_ring->frag_dma_addr) { 1029 devm_kfree(dev, cp_ring->cp_skb); 1030 ret = -ENOMEM; 1031 goto err; 1032 } 1033 1034 pdata->tx_ring->cp_ring = cp_ring; 1035 pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); 1036 1037 pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; 1038 pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2; 1039 pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2; 1040 1041 return 0; 1042 1043 err: 1044 xgene_enet_free_desc_rings(pdata); 1045 return ret; 1046 } 1047 1048 static struct rtnl_link_stats64 *xgene_enet_get_stats64( 1049 struct net_device *ndev, 1050 struct rtnl_link_stats64 *storage) 1051 { 1052 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1053 struct rtnl_link_stats64 *stats = &pdata->stats; 1054 1055 stats->rx_errors += stats->rx_length_errors + 1056 stats->rx_crc_errors + 1057 stats->rx_frame_errors + 1058 stats->rx_fifo_errors; 1059 memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64)); 1060 1061 return storage; 1062 } 1063 1064 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) 1065 { 1066 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1067 int ret; 1068 1069 ret = eth_mac_addr(ndev, addr); 1070 if (ret) 1071 return ret; 1072 pdata->mac_ops->set_mac_addr(pdata); 1073 1074 return ret; 1075 } 1076 1077 static const struct net_device_ops xgene_ndev_ops = { 1078 .ndo_open = xgene_enet_open, 1079 .ndo_stop = xgene_enet_close, 1080 .ndo_start_xmit = xgene_enet_start_xmit, 1081 .ndo_tx_timeout = xgene_enet_timeout, 1082 .ndo_get_stats64 = xgene_enet_get_stats64, 1083 .ndo_change_mtu = eth_change_mtu, 1084 .ndo_set_mac_address = xgene_enet_set_mac_address, 1085 }; 1086 1087 #ifdef CONFIG_ACPI 1088 static int xgene_get_port_id_acpi(struct device *dev, 1089 struct xgene_enet_pdata *pdata) 1090 { 1091 acpi_status status; 1092 u64 temp; 1093 1094 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp); 1095 if (ACPI_FAILURE(status)) { 1096 pdata->port_id = 0; 1097 } else { 1098 pdata->port_id = temp; 1099 } 1100 1101 return 0; 1102 } 1103 #endif 1104 1105 static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata) 1106 { 1107 u32 id = 0; 1108 int ret; 1109 1110 ret = of_property_read_u32(dev->of_node, "port-id", &id); 1111 if (ret) { 1112 pdata->port_id = 0; 1113 ret = 0; 1114 } else { 1115 pdata->port_id = id & BIT(0); 1116 } 1117 1118 return ret; 1119 } 1120 1121 1122 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) 1123 { 1124 struct platform_device *pdev; 1125 struct net_device *ndev; 1126 struct device *dev; 1127 struct resource *res; 1128 void __iomem *base_addr; 1129 u32 offset; 1130 int ret = 0; 1131 1132 pdev = pdata->pdev; 1133 dev = &pdev->dev; 1134 ndev = pdata->ndev; 1135 1136 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR); 1137 if (!res) { 1138 dev_err(dev, "Resource enet_csr not defined\n"); 1139 return -ENODEV; 1140 } 1141 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res)); 1142 if (!pdata->base_addr) { 1143 dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); 1144 return -ENOMEM; 1145 } 1146 1147 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR); 1148 if (!res) { 1149 dev_err(dev, "Resource ring_csr not defined\n"); 1150 return -ENODEV; 1151 } 1152 pdata->ring_csr_addr = devm_ioremap(dev, res->start, 1153 resource_size(res)); 1154 if (!pdata->ring_csr_addr) { 1155 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); 1156 return -ENOMEM; 1157 } 1158 1159 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD); 1160 if (!res) { 1161 dev_err(dev, "Resource ring_cmd not defined\n"); 1162 return -ENODEV; 1163 } 1164 pdata->ring_cmd_addr = devm_ioremap(dev, res->start, 1165 resource_size(res)); 1166 if (!pdata->ring_cmd_addr) { 1167 dev_err(dev, "Unable to retrieve ENET Ring command region\n"); 1168 return -ENOMEM; 1169 } 1170 1171 if (dev->of_node) 1172 ret = xgene_get_port_id_dt(dev, pdata); 1173 #ifdef CONFIG_ACPI 1174 else 1175 ret = xgene_get_port_id_acpi(dev, pdata); 1176 #endif 1177 if (ret) 1178 return ret; 1179 1180 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN)) 1181 eth_hw_addr_random(ndev); 1182 1183 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 1184 1185 pdata->phy_mode = device_get_phy_mode(dev); 1186 if (pdata->phy_mode < 0) { 1187 dev_err(dev, "Unable to get phy-connection-type\n"); 1188 return pdata->phy_mode; 1189 } 1190 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII && 1191 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII && 1192 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) { 1193 dev_err(dev, "Incorrect phy-connection-type specified\n"); 1194 return -ENODEV; 1195 } 1196 1197 ret = platform_get_irq(pdev, 0); 1198 if (ret <= 0) { 1199 dev_err(dev, "Unable to get ENET Rx IRQ\n"); 1200 ret = ret ? : -ENXIO; 1201 return ret; 1202 } 1203 pdata->rx_irq = ret; 1204 1205 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) { 1206 ret = platform_get_irq(pdev, 1); 1207 if (ret <= 0) { 1208 pdata->cq_cnt = 0; 1209 dev_info(dev, "Unable to get Tx completion IRQ," 1210 "using Rx IRQ instead\n"); 1211 } else { 1212 pdata->cq_cnt = XGENE_MAX_TXC_RINGS; 1213 pdata->txc_irq = ret; 1214 } 1215 } 1216 1217 pdata->clk = devm_clk_get(&pdev->dev, NULL); 1218 if (IS_ERR(pdata->clk)) { 1219 /* Firmware may have set up the clock already. */ 1220 dev_info(dev, "clocks have been setup already\n"); 1221 } 1222 1223 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) 1224 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET); 1225 else 1226 base_addr = pdata->base_addr; 1227 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; 1228 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; 1229 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; 1230 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII || 1231 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { 1232 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET; 1233 offset = (pdata->enet_id == XGENE_ENET1) ? 1234 BLOCK_ETH_MAC_CSR_OFFSET : 1235 X2_BLOCK_ETH_MAC_CSR_OFFSET; 1236 pdata->mcx_mac_csr_addr = base_addr + offset; 1237 } else { 1238 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; 1239 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET; 1240 } 1241 pdata->rx_buff_cnt = NUM_PKT_BUF; 1242 1243 return 0; 1244 } 1245 1246 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) 1247 { 1248 struct net_device *ndev = pdata->ndev; 1249 struct xgene_enet_desc_ring *buf_pool; 1250 u16 dst_ring_num; 1251 int ret; 1252 1253 ret = pdata->port_ops->reset(pdata); 1254 if (ret) 1255 return ret; 1256 1257 ret = xgene_enet_create_desc_rings(ndev); 1258 if (ret) { 1259 netdev_err(ndev, "Error in ring configuration\n"); 1260 return ret; 1261 } 1262 1263 /* setup buffer pool */ 1264 buf_pool = pdata->rx_ring->buf_pool; 1265 xgene_enet_init_bufpool(buf_pool); 1266 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); 1267 if (ret) { 1268 xgene_enet_delete_desc_rings(pdata); 1269 return ret; 1270 } 1271 1272 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring); 1273 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); 1274 pdata->mac_ops->init(pdata); 1275 1276 return ret; 1277 } 1278 1279 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) 1280 { 1281 switch (pdata->phy_mode) { 1282 case PHY_INTERFACE_MODE_RGMII: 1283 pdata->mac_ops = &xgene_gmac_ops; 1284 pdata->port_ops = &xgene_gport_ops; 1285 pdata->rm = RM3; 1286 break; 1287 case PHY_INTERFACE_MODE_SGMII: 1288 pdata->mac_ops = &xgene_sgmac_ops; 1289 pdata->port_ops = &xgene_sgport_ops; 1290 pdata->rm = RM1; 1291 break; 1292 default: 1293 pdata->mac_ops = &xgene_xgmac_ops; 1294 pdata->port_ops = &xgene_xgport_ops; 1295 pdata->rm = RM0; 1296 break; 1297 } 1298 1299 if (pdata->enet_id == XGENE_ENET1) { 1300 switch (pdata->port_id) { 1301 case 0: 1302 pdata->cpu_bufnum = START_CPU_BUFNUM_0; 1303 pdata->eth_bufnum = START_ETH_BUFNUM_0; 1304 pdata->bp_bufnum = START_BP_BUFNUM_0; 1305 pdata->ring_num = START_RING_NUM_0; 1306 break; 1307 case 1: 1308 pdata->cpu_bufnum = START_CPU_BUFNUM_1; 1309 pdata->eth_bufnum = START_ETH_BUFNUM_1; 1310 pdata->bp_bufnum = START_BP_BUFNUM_1; 1311 pdata->ring_num = START_RING_NUM_1; 1312 break; 1313 default: 1314 break; 1315 } 1316 pdata->ring_ops = &xgene_ring1_ops; 1317 } else { 1318 switch (pdata->port_id) { 1319 case 0: 1320 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0; 1321 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0; 1322 pdata->bp_bufnum = X2_START_BP_BUFNUM_0; 1323 pdata->ring_num = X2_START_RING_NUM_0; 1324 break; 1325 case 1: 1326 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1; 1327 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1; 1328 pdata->bp_bufnum = X2_START_BP_BUFNUM_1; 1329 pdata->ring_num = X2_START_RING_NUM_1; 1330 break; 1331 default: 1332 break; 1333 } 1334 pdata->rm = RM0; 1335 pdata->ring_ops = &xgene_ring2_ops; 1336 } 1337 } 1338 1339 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) 1340 { 1341 struct napi_struct *napi; 1342 1343 napi = &pdata->rx_ring->napi; 1344 netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); 1345 1346 if (pdata->cq_cnt) { 1347 napi = &pdata->tx_ring->cp_ring->napi; 1348 netif_napi_add(pdata->ndev, napi, xgene_enet_napi, 1349 NAPI_POLL_WEIGHT); 1350 } 1351 } 1352 1353 static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata) 1354 { 1355 struct napi_struct *napi; 1356 1357 napi = &pdata->rx_ring->napi; 1358 netif_napi_del(napi); 1359 1360 if (pdata->cq_cnt) { 1361 napi = &pdata->tx_ring->cp_ring->napi; 1362 netif_napi_del(napi); 1363 } 1364 } 1365 1366 static int xgene_enet_probe(struct platform_device *pdev) 1367 { 1368 struct net_device *ndev; 1369 struct xgene_enet_pdata *pdata; 1370 struct device *dev = &pdev->dev; 1371 struct xgene_mac_ops *mac_ops; 1372 const struct of_device_id *of_id; 1373 int ret; 1374 1375 ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata)); 1376 if (!ndev) 1377 return -ENOMEM; 1378 1379 pdata = netdev_priv(ndev); 1380 1381 pdata->pdev = pdev; 1382 pdata->ndev = ndev; 1383 SET_NETDEV_DEV(ndev, dev); 1384 platform_set_drvdata(pdev, pdata); 1385 ndev->netdev_ops = &xgene_ndev_ops; 1386 xgene_enet_set_ethtool_ops(ndev); 1387 ndev->features |= NETIF_F_IP_CSUM | 1388 NETIF_F_GSO | 1389 NETIF_F_GRO | 1390 NETIF_F_SG; 1391 1392 of_id = of_match_device(xgene_enet_of_match, &pdev->dev); 1393 if (of_id) { 1394 pdata->enet_id = (enum xgene_enet_id)of_id->data; 1395 } 1396 #ifdef CONFIG_ACPI 1397 else { 1398 const struct acpi_device_id *acpi_id; 1399 1400 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev); 1401 if (acpi_id) 1402 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data; 1403 } 1404 #endif 1405 if (!pdata->enet_id) { 1406 free_netdev(ndev); 1407 return -ENODEV; 1408 } 1409 1410 ret = xgene_enet_get_resources(pdata); 1411 if (ret) 1412 goto err; 1413 1414 xgene_enet_setup_ops(pdata); 1415 1416 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 1417 ndev->features |= NETIF_F_TSO; 1418 pdata->mss = XGENE_ENET_MSS; 1419 } 1420 ndev->hw_features = ndev->features; 1421 1422 ret = register_netdev(ndev); 1423 if (ret) { 1424 netdev_err(ndev, "Failed to register netdev\n"); 1425 goto err; 1426 } 1427 1428 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); 1429 if (ret) { 1430 netdev_err(ndev, "No usable DMA configuration\n"); 1431 goto err; 1432 } 1433 1434 ret = xgene_enet_init_hw(pdata); 1435 if (ret) 1436 goto err; 1437 1438 xgene_enet_napi_add(pdata); 1439 mac_ops = pdata->mac_ops; 1440 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 1441 ret = xgene_enet_mdio_config(pdata); 1442 else 1443 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); 1444 1445 return ret; 1446 err: 1447 unregister_netdev(ndev); 1448 free_netdev(ndev); 1449 return ret; 1450 } 1451 1452 static int xgene_enet_remove(struct platform_device *pdev) 1453 { 1454 struct xgene_enet_pdata *pdata; 1455 struct xgene_mac_ops *mac_ops; 1456 struct net_device *ndev; 1457 1458 pdata = platform_get_drvdata(pdev); 1459 mac_ops = pdata->mac_ops; 1460 ndev = pdata->ndev; 1461 1462 mac_ops->rx_disable(pdata); 1463 mac_ops->tx_disable(pdata); 1464 1465 xgene_enet_napi_del(pdata); 1466 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 1467 xgene_enet_mdio_remove(pdata); 1468 unregister_netdev(ndev); 1469 xgene_enet_delete_desc_rings(pdata); 1470 pdata->port_ops->shutdown(pdata); 1471 free_netdev(ndev); 1472 1473 return 0; 1474 } 1475 1476 #ifdef CONFIG_ACPI 1477 static const struct acpi_device_id xgene_enet_acpi_match[] = { 1478 { "APMC0D05", XGENE_ENET1}, 1479 { "APMC0D30", XGENE_ENET1}, 1480 { "APMC0D31", XGENE_ENET1}, 1481 { "APMC0D26", XGENE_ENET2}, 1482 { "APMC0D25", XGENE_ENET2}, 1483 { } 1484 }; 1485 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); 1486 #endif 1487 1488 #ifdef CONFIG_OF 1489 static const struct of_device_id xgene_enet_of_match[] = { 1490 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1}, 1491 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1}, 1492 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1}, 1493 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2}, 1494 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2}, 1495 {}, 1496 }; 1497 1498 MODULE_DEVICE_TABLE(of, xgene_enet_of_match); 1499 #endif 1500 1501 static struct platform_driver xgene_enet_driver = { 1502 .driver = { 1503 .name = "xgene-enet", 1504 .of_match_table = of_match_ptr(xgene_enet_of_match), 1505 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match), 1506 }, 1507 .probe = xgene_enet_probe, 1508 .remove = xgene_enet_remove, 1509 }; 1510 1511 module_platform_driver(xgene_enet_driver); 1512 1513 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver"); 1514 MODULE_VERSION(XGENE_DRV_VERSION); 1515 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>"); 1516 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>"); 1517 MODULE_LICENSE("GPL"); 1518