1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2017-2019 NXP */ 3 4 #include "enetc.h" 5 #include <linux/bpf_trace.h> 6 #include <linux/tcp.h> 7 #include <linux/udp.h> 8 #include <linux/vmalloc.h> 9 #include <linux/ptp_classify.h> 10 #include <net/ip6_checksum.h> 11 #include <net/pkt_sched.h> 12 #include <net/tso.h> 13 14 u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg) 15 { 16 return enetc_port_rd(&si->hw, reg); 17 } 18 EXPORT_SYMBOL_GPL(enetc_port_mac_rd); 19 20 void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val) 21 { 22 enetc_port_wr(&si->hw, reg, val); 23 if (si->hw_features & ENETC_SI_F_QBU) 24 enetc_port_wr(&si->hw, reg + ENETC_PMAC_OFFSET, val); 25 } 26 EXPORT_SYMBOL_GPL(enetc_port_mac_wr); 27 28 static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv) 29 { 30 int num_tx_rings = priv->num_tx_rings; 31 int i; 32 33 for (i = 0; i < priv->num_rx_rings; i++) 34 if (priv->rx_ring[i]->xdp.prog) 35 return num_tx_rings - num_possible_cpus(); 36 37 return num_tx_rings; 38 } 39 40 static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv, 41 struct enetc_bdr *tx_ring) 42 { 43 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring; 44 45 return priv->rx_ring[index]; 46 } 47 48 static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd) 49 { 50 if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect) 51 return NULL; 52 53 return tx_swbd->skb; 54 } 55 56 static struct xdp_frame * 57 enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd) 58 { 59 if (tx_swbd->is_xdp_redirect) 60 return tx_swbd->xdp_frame; 61 62 return NULL; 63 } 64 65 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, 66 struct enetc_tx_swbd *tx_swbd) 67 { 68 /* For XDP_TX, pages come from RX, whereas for the other contexts where 69 * we have is_dma_page_set, those come from skb_frag_dma_map. We need 70 * to match the DMA mapping length, so we need to differentiate those. 71 */ 72 if (tx_swbd->is_dma_page) 73 dma_unmap_page(tx_ring->dev, tx_swbd->dma, 74 tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len, 75 tx_swbd->dir); 76 else 77 dma_unmap_single(tx_ring->dev, tx_swbd->dma, 78 tx_swbd->len, tx_swbd->dir); 79 tx_swbd->dma = 0; 80 } 81 82 static void enetc_free_tx_frame(struct enetc_bdr *tx_ring, 83 struct enetc_tx_swbd *tx_swbd) 84 { 85 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd); 86 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd); 87 88 if (tx_swbd->dma) 89 enetc_unmap_tx_buff(tx_ring, tx_swbd); 90 91 if (xdp_frame) { 92 xdp_return_frame(tx_swbd->xdp_frame); 93 tx_swbd->xdp_frame = NULL; 94 } else if (skb) { 95 dev_kfree_skb_any(skb); 96 tx_swbd->skb = NULL; 97 } 98 } 99 100 /* Let H/W know BD ring has been updated */ 101 static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring) 102 { 103 /* includes wmb() */ 104 enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use); 105 } 106 107 static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp, 108 u8 *msgtype, u8 *twostep, 109 u16 *correction_offset, u16 *body_offset) 110 { 111 unsigned int ptp_class; 112 struct ptp_header *hdr; 113 unsigned int type; 114 u8 *base; 115 116 ptp_class = ptp_classify_raw(skb); 117 if (ptp_class == PTP_CLASS_NONE) 118 return -EINVAL; 119 120 hdr = ptp_parse_header(skb, ptp_class); 121 if (!hdr) 122 return -EINVAL; 123 124 type = ptp_class & PTP_CLASS_PMASK; 125 if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6) 126 *udp = 1; 127 else 128 *udp = 0; 129 130 *msgtype = ptp_get_msgtype(hdr, ptp_class); 131 *twostep = hdr->flag_field[0] & 0x2; 132 133 base = skb_mac_header(skb); 134 *correction_offset = (u8 *)&hdr->correction - base; 135 *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base; 136 137 return 0; 138 } 139 140 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) 141 { 142 bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false; 143 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); 144 struct enetc_hw *hw = &priv->si->hw; 145 struct enetc_tx_swbd *tx_swbd; 146 int len = skb_headlen(skb); 147 union enetc_tx_bd temp_bd; 148 u8 msgtype, twostep, udp; 149 union enetc_tx_bd *txbd; 150 u16 offset1, offset2; 151 int i, count = 0; 152 skb_frag_t *frag; 153 unsigned int f; 154 dma_addr_t dma; 155 u8 flags = 0; 156 157 i = tx_ring->next_to_use; 158 txbd = ENETC_TXBD(*tx_ring, i); 159 prefetchw(txbd); 160 161 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); 162 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) 163 goto dma_err; 164 165 temp_bd.addr = cpu_to_le64(dma); 166 temp_bd.buf_len = cpu_to_le16(len); 167 temp_bd.lstatus = 0; 168 169 tx_swbd = &tx_ring->tx_swbd[i]; 170 tx_swbd->dma = dma; 171 tx_swbd->len = len; 172 tx_swbd->is_dma_page = 0; 173 tx_swbd->dir = DMA_TO_DEVICE; 174 count++; 175 176 do_vlan = skb_vlan_tag_present(skb); 177 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { 178 if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1, 179 &offset2) || 180 msgtype != PTP_MSGTYPE_SYNC || twostep) 181 WARN_ONCE(1, "Bad packet for one-step timestamping\n"); 182 else 183 do_onestep_tstamp = true; 184 } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) { 185 do_twostep_tstamp = true; 186 } 187 188 tx_swbd->do_twostep_tstamp = do_twostep_tstamp; 189 tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV); 190 tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en; 191 192 if (do_vlan || do_onestep_tstamp || do_twostep_tstamp) 193 flags |= ENETC_TXBD_FLAGS_EX; 194 195 if (tx_ring->tsd_enable) 196 flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART; 197 198 /* first BD needs frm_len and offload flags set */ 199 temp_bd.frm_len = cpu_to_le16(skb->len); 200 temp_bd.flags = flags; 201 202 if (flags & ENETC_TXBD_FLAGS_TSE) 203 temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns, 204 flags); 205 206 if (flags & ENETC_TXBD_FLAGS_EX) { 207 u8 e_flags = 0; 208 *txbd = temp_bd; 209 enetc_clear_tx_bd(&temp_bd); 210 211 /* add extension BD for VLAN and/or timestamping */ 212 flags = 0; 213 tx_swbd++; 214 txbd++; 215 i++; 216 if (unlikely(i == tx_ring->bd_count)) { 217 i = 0; 218 tx_swbd = tx_ring->tx_swbd; 219 txbd = ENETC_TXBD(*tx_ring, 0); 220 } 221 prefetchw(txbd); 222 223 if (do_vlan) { 224 temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); 225 temp_bd.ext.tpid = 0; /* < C-TAG */ 226 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; 227 } 228 229 if (do_onestep_tstamp) { 230 u32 lo, hi, val; 231 u64 sec, nsec; 232 u8 *data; 233 234 lo = enetc_rd_hot(hw, ENETC_SICTR0); 235 hi = enetc_rd_hot(hw, ENETC_SICTR1); 236 sec = (u64)hi << 32 | lo; 237 nsec = do_div(sec, 1000000000); 238 239 /* Configure extension BD */ 240 temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff); 241 e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP; 242 243 /* Update originTimestamp field of Sync packet 244 * - 48 bits seconds field 245 * - 32 bits nanseconds field 246 */ 247 data = skb_mac_header(skb); 248 *(__be16 *)(data + offset2) = 249 htons((sec >> 32) & 0xffff); 250 *(__be32 *)(data + offset2 + 2) = 251 htonl(sec & 0xffffffff); 252 *(__be32 *)(data + offset2 + 6) = htonl(nsec); 253 254 /* Configure single-step register */ 255 val = ENETC_PM0_SINGLE_STEP_EN; 256 val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1); 257 if (udp) 258 val |= ENETC_PM0_SINGLE_STEP_CH; 259 260 enetc_port_mac_wr(priv->si, ENETC_PM0_SINGLE_STEP, 261 val); 262 } else if (do_twostep_tstamp) { 263 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 264 e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP; 265 } 266 267 temp_bd.ext.e_flags = e_flags; 268 count++; 269 } 270 271 frag = &skb_shinfo(skb)->frags[0]; 272 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) { 273 len = skb_frag_size(frag); 274 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, 275 DMA_TO_DEVICE); 276 if (dma_mapping_error(tx_ring->dev, dma)) 277 goto dma_err; 278 279 *txbd = temp_bd; 280 enetc_clear_tx_bd(&temp_bd); 281 282 flags = 0; 283 tx_swbd++; 284 txbd++; 285 i++; 286 if (unlikely(i == tx_ring->bd_count)) { 287 i = 0; 288 tx_swbd = tx_ring->tx_swbd; 289 txbd = ENETC_TXBD(*tx_ring, 0); 290 } 291 prefetchw(txbd); 292 293 temp_bd.addr = cpu_to_le64(dma); 294 temp_bd.buf_len = cpu_to_le16(len); 295 296 tx_swbd->dma = dma; 297 tx_swbd->len = len; 298 tx_swbd->is_dma_page = 1; 299 tx_swbd->dir = DMA_TO_DEVICE; 300 count++; 301 } 302 303 /* last BD needs 'F' bit set */ 304 flags |= ENETC_TXBD_FLAGS_F; 305 temp_bd.flags = flags; 306 *txbd = temp_bd; 307 308 tx_ring->tx_swbd[i].is_eof = true; 309 tx_ring->tx_swbd[i].skb = skb; 310 311 enetc_bdr_idx_inc(tx_ring, &i); 312 tx_ring->next_to_use = i; 313 314 skb_tx_timestamp(skb); 315 316 enetc_update_tx_ring_tail(tx_ring); 317 318 return count; 319 320 dma_err: 321 dev_err(tx_ring->dev, "DMA map error"); 322 323 do { 324 tx_swbd = &tx_ring->tx_swbd[i]; 325 enetc_free_tx_frame(tx_ring, tx_swbd); 326 if (i == 0) 327 i = tx_ring->bd_count; 328 i--; 329 } while (count--); 330 331 return 0; 332 } 333 334 static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, 335 struct enetc_tx_swbd *tx_swbd, 336 union enetc_tx_bd *txbd, int *i, int hdr_len, 337 int data_len) 338 { 339 union enetc_tx_bd txbd_tmp; 340 u8 flags = 0, e_flags = 0; 341 dma_addr_t addr; 342 343 enetc_clear_tx_bd(&txbd_tmp); 344 addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; 345 346 if (skb_vlan_tag_present(skb)) 347 flags |= ENETC_TXBD_FLAGS_EX; 348 349 txbd_tmp.addr = cpu_to_le64(addr); 350 txbd_tmp.buf_len = cpu_to_le16(hdr_len); 351 352 /* first BD needs frm_len and offload flags set */ 353 txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len); 354 txbd_tmp.flags = flags; 355 356 /* For the TSO header we do not set the dma address since we do not 357 * want it unmapped when we do cleanup. We still set len so that we 358 * count the bytes sent. 359 */ 360 tx_swbd->len = hdr_len; 361 tx_swbd->do_twostep_tstamp = false; 362 tx_swbd->check_wb = false; 363 364 /* Actually write the header in the BD */ 365 *txbd = txbd_tmp; 366 367 /* Add extension BD for VLAN */ 368 if (flags & ENETC_TXBD_FLAGS_EX) { 369 /* Get the next BD */ 370 enetc_bdr_idx_inc(tx_ring, i); 371 txbd = ENETC_TXBD(*tx_ring, *i); 372 tx_swbd = &tx_ring->tx_swbd[*i]; 373 prefetchw(txbd); 374 375 /* Setup the VLAN fields */ 376 enetc_clear_tx_bd(&txbd_tmp); 377 txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); 378 txbd_tmp.ext.tpid = 0; /* < C-TAG */ 379 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; 380 381 /* Write the BD */ 382 txbd_tmp.ext.e_flags = e_flags; 383 *txbd = txbd_tmp; 384 } 385 } 386 387 static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, 388 struct enetc_tx_swbd *tx_swbd, 389 union enetc_tx_bd *txbd, char *data, 390 int size, bool last_bd) 391 { 392 union enetc_tx_bd txbd_tmp; 393 dma_addr_t addr; 394 u8 flags = 0; 395 396 enetc_clear_tx_bd(&txbd_tmp); 397 398 addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); 399 if (unlikely(dma_mapping_error(tx_ring->dev, addr))) { 400 netdev_err(tx_ring->ndev, "DMA map error\n"); 401 return -ENOMEM; 402 } 403 404 if (last_bd) { 405 flags |= ENETC_TXBD_FLAGS_F; 406 tx_swbd->is_eof = 1; 407 } 408 409 txbd_tmp.addr = cpu_to_le64(addr); 410 txbd_tmp.buf_len = cpu_to_le16(size); 411 txbd_tmp.flags = flags; 412 413 tx_swbd->dma = addr; 414 tx_swbd->len = size; 415 tx_swbd->dir = DMA_TO_DEVICE; 416 417 *txbd = txbd_tmp; 418 419 return 0; 420 } 421 422 static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb, 423 char *hdr, int hdr_len, int *l4_hdr_len) 424 { 425 char *l4_hdr = hdr + skb_transport_offset(skb); 426 int mac_hdr_len = skb_network_offset(skb); 427 428 if (tso->tlen != sizeof(struct udphdr)) { 429 struct tcphdr *tcph = (struct tcphdr *)(l4_hdr); 430 431 tcph->check = 0; 432 } else { 433 struct udphdr *udph = (struct udphdr *)(l4_hdr); 434 435 udph->check = 0; 436 } 437 438 /* Compute the IP checksum. This is necessary since tso_build_hdr() 439 * already incremented the IP ID field. 440 */ 441 if (!tso->ipv6) { 442 struct iphdr *iph = (void *)(hdr + mac_hdr_len); 443 444 iph->check = 0; 445 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 446 } 447 448 /* Compute the checksum over the L4 header. */ 449 *l4_hdr_len = hdr_len - skb_transport_offset(skb); 450 return csum_partial(l4_hdr, *l4_hdr_len, 0); 451 } 452 453 static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso, 454 struct sk_buff *skb, char *hdr, int len, 455 __wsum sum) 456 { 457 char *l4_hdr = hdr + skb_transport_offset(skb); 458 __sum16 csum_final; 459 460 /* Complete the L4 checksum by appending the pseudo-header to the 461 * already computed checksum. 462 */ 463 if (!tso->ipv6) 464 csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr, 465 ip_hdr(skb)->daddr, 466 len, ip_hdr(skb)->protocol, sum); 467 else 468 csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 469 &ipv6_hdr(skb)->daddr, 470 len, ipv6_hdr(skb)->nexthdr, sum); 471 472 if (tso->tlen != sizeof(struct udphdr)) { 473 struct tcphdr *tcph = (struct tcphdr *)(l4_hdr); 474 475 tcph->check = csum_final; 476 } else { 477 struct udphdr *udph = (struct udphdr *)(l4_hdr); 478 479 udph->check = csum_final; 480 } 481 } 482 483 static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) 484 { 485 int hdr_len, total_len, data_len; 486 struct enetc_tx_swbd *tx_swbd; 487 union enetc_tx_bd *txbd; 488 struct tso_t tso; 489 __wsum csum, csum2; 490 int count = 0, pos; 491 int err, i, bd_data_num; 492 493 /* Initialize the TSO handler, and prepare the first payload */ 494 hdr_len = tso_start(skb, &tso); 495 total_len = skb->len - hdr_len; 496 i = tx_ring->next_to_use; 497 498 while (total_len > 0) { 499 char *hdr; 500 501 /* Get the BD */ 502 txbd = ENETC_TXBD(*tx_ring, i); 503 tx_swbd = &tx_ring->tx_swbd[i]; 504 prefetchw(txbd); 505 506 /* Determine the length of this packet */ 507 data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len); 508 total_len -= data_len; 509 510 /* prepare packet headers: MAC + IP + TCP */ 511 hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE; 512 tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0); 513 514 /* compute the csum over the L4 header */ 515 csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos); 516 enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len); 517 bd_data_num = 0; 518 count++; 519 520 while (data_len > 0) { 521 int size; 522 523 size = min_t(int, tso.size, data_len); 524 525 /* Advance the index in the BDR */ 526 enetc_bdr_idx_inc(tx_ring, &i); 527 txbd = ENETC_TXBD(*tx_ring, i); 528 tx_swbd = &tx_ring->tx_swbd[i]; 529 prefetchw(txbd); 530 531 /* Compute the checksum over this segment of data and 532 * add it to the csum already computed (over the L4 533 * header and possible other data segments). 534 */ 535 csum2 = csum_partial(tso.data, size, 0); 536 csum = csum_block_add(csum, csum2, pos); 537 pos += size; 538 539 err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd, 540 tso.data, size, 541 size == data_len); 542 if (err) 543 goto err_map_data; 544 545 data_len -= size; 546 count++; 547 bd_data_num++; 548 tso_build_data(skb, &tso, size); 549 550 if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len)) 551 goto err_chained_bd; 552 } 553 554 enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum); 555 556 if (total_len == 0) 557 tx_swbd->skb = skb; 558 559 /* Go to the next BD */ 560 enetc_bdr_idx_inc(tx_ring, &i); 561 } 562 563 tx_ring->next_to_use = i; 564 enetc_update_tx_ring_tail(tx_ring); 565 566 return count; 567 568 err_map_data: 569 dev_err(tx_ring->dev, "DMA map error"); 570 571 err_chained_bd: 572 do { 573 tx_swbd = &tx_ring->tx_swbd[i]; 574 enetc_free_tx_frame(tx_ring, tx_swbd); 575 if (i == 0) 576 i = tx_ring->bd_count; 577 i--; 578 } while (count--); 579 580 return 0; 581 } 582 583 static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, 584 struct net_device *ndev) 585 { 586 struct enetc_ndev_priv *priv = netdev_priv(ndev); 587 struct enetc_bdr *tx_ring; 588 int count, err; 589 590 /* Queue one-step Sync packet if already locked */ 591 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { 592 if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, 593 &priv->flags)) { 594 skb_queue_tail(&priv->tx_skbs, skb); 595 return NETDEV_TX_OK; 596 } 597 } 598 599 tx_ring = priv->tx_ring[skb->queue_mapping]; 600 601 if (skb_is_gso(skb)) { 602 if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) { 603 netif_stop_subqueue(ndev, tx_ring->index); 604 return NETDEV_TX_BUSY; 605 } 606 607 enetc_lock_mdio(); 608 count = enetc_map_tx_tso_buffs(tx_ring, skb); 609 enetc_unlock_mdio(); 610 } else { 611 if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) 612 if (unlikely(skb_linearize(skb))) 613 goto drop_packet_err; 614 615 count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ 616 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { 617 netif_stop_subqueue(ndev, tx_ring->index); 618 return NETDEV_TX_BUSY; 619 } 620 621 if (skb->ip_summed == CHECKSUM_PARTIAL) { 622 err = skb_checksum_help(skb); 623 if (err) 624 goto drop_packet_err; 625 } 626 enetc_lock_mdio(); 627 count = enetc_map_tx_buffs(tx_ring, skb); 628 enetc_unlock_mdio(); 629 } 630 631 if (unlikely(!count)) 632 goto drop_packet_err; 633 634 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) 635 netif_stop_subqueue(ndev, tx_ring->index); 636 637 return NETDEV_TX_OK; 638 639 drop_packet_err: 640 dev_kfree_skb_any(skb); 641 return NETDEV_TX_OK; 642 } 643 644 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) 645 { 646 struct enetc_ndev_priv *priv = netdev_priv(ndev); 647 u8 udp, msgtype, twostep; 648 u16 offset1, offset2; 649 650 /* Mark tx timestamp type on skb->cb[0] if requires */ 651 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 652 (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) { 653 skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK; 654 } else { 655 skb->cb[0] = 0; 656 } 657 658 /* Fall back to two-step timestamp if not one-step Sync packet */ 659 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { 660 if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, 661 &offset1, &offset2) || 662 msgtype != PTP_MSGTYPE_SYNC || twostep != 0) 663 skb->cb[0] = ENETC_F_TX_TSTAMP; 664 } 665 666 return enetc_start_xmit(skb, ndev); 667 } 668 EXPORT_SYMBOL_GPL(enetc_xmit); 669 670 static irqreturn_t enetc_msix(int irq, void *data) 671 { 672 struct enetc_int_vector *v = data; 673 int i; 674 675 enetc_lock_mdio(); 676 677 /* disable interrupts */ 678 enetc_wr_reg_hot(v->rbier, 0); 679 enetc_wr_reg_hot(v->ricr1, v->rx_ictt); 680 681 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) 682 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0); 683 684 enetc_unlock_mdio(); 685 686 napi_schedule(&v->napi); 687 688 return IRQ_HANDLED; 689 } 690 691 static void enetc_rx_dim_work(struct work_struct *w) 692 { 693 struct dim *dim = container_of(w, struct dim, work); 694 struct dim_cq_moder moder = 695 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 696 struct enetc_int_vector *v = 697 container_of(dim, struct enetc_int_vector, rx_dim); 698 699 v->rx_ictt = enetc_usecs_to_cycles(moder.usec); 700 dim->state = DIM_START_MEASURE; 701 } 702 703 static void enetc_rx_net_dim(struct enetc_int_vector *v) 704 { 705 struct dim_sample dim_sample = {}; 706 707 v->comp_cnt++; 708 709 if (!v->rx_napi_work) 710 return; 711 712 dim_update_sample(v->comp_cnt, 713 v->rx_ring.stats.packets, 714 v->rx_ring.stats.bytes, 715 &dim_sample); 716 net_dim(&v->rx_dim, dim_sample); 717 } 718 719 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) 720 { 721 int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; 722 723 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; 724 } 725 726 static bool enetc_page_reusable(struct page *page) 727 { 728 return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1); 729 } 730 731 static void enetc_reuse_page(struct enetc_bdr *rx_ring, 732 struct enetc_rx_swbd *old) 733 { 734 struct enetc_rx_swbd *new; 735 736 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; 737 738 /* next buf that may reuse a page */ 739 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); 740 741 /* copy page reference */ 742 *new = *old; 743 } 744 745 static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd, 746 u64 *tstamp) 747 { 748 u32 lo, hi, tstamp_lo; 749 750 lo = enetc_rd_hot(hw, ENETC_SICTR0); 751 hi = enetc_rd_hot(hw, ENETC_SICTR1); 752 tstamp_lo = le32_to_cpu(txbd->wb.tstamp); 753 if (lo <= tstamp_lo) 754 hi -= 1; 755 *tstamp = (u64)hi << 32 | tstamp_lo; 756 } 757 758 static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp) 759 { 760 struct skb_shared_hwtstamps shhwtstamps; 761 762 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { 763 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 764 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 765 skb_txtime_consumed(skb); 766 skb_tstamp_tx(skb, &shhwtstamps); 767 } 768 } 769 770 static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring, 771 struct enetc_tx_swbd *tx_swbd) 772 { 773 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); 774 struct enetc_rx_swbd rx_swbd = { 775 .dma = tx_swbd->dma, 776 .page = tx_swbd->page, 777 .page_offset = tx_swbd->page_offset, 778 .dir = tx_swbd->dir, 779 .len = tx_swbd->len, 780 }; 781 struct enetc_bdr *rx_ring; 782 783 rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring); 784 785 if (likely(enetc_swbd_unused(rx_ring))) { 786 enetc_reuse_page(rx_ring, &rx_swbd); 787 788 /* sync for use by the device */ 789 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma, 790 rx_swbd.page_offset, 791 ENETC_RXB_DMA_SIZE_XDP, 792 rx_swbd.dir); 793 794 rx_ring->stats.recycles++; 795 } else { 796 /* RX ring is already full, we need to unmap and free the 797 * page, since there's nothing useful we can do with it. 798 */ 799 rx_ring->stats.recycle_failures++; 800 801 dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE, 802 rx_swbd.dir); 803 __free_page(rx_swbd.page); 804 } 805 806 rx_ring->xdp.xdp_tx_in_flight--; 807 } 808 809 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) 810 { 811 int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0; 812 struct net_device *ndev = tx_ring->ndev; 813 struct enetc_ndev_priv *priv = netdev_priv(ndev); 814 struct enetc_tx_swbd *tx_swbd; 815 int i, bds_to_clean; 816 bool do_twostep_tstamp; 817 u64 tstamp = 0; 818 819 i = tx_ring->next_to_clean; 820 tx_swbd = &tx_ring->tx_swbd[i]; 821 822 bds_to_clean = enetc_bd_ready_count(tx_ring, i); 823 824 do_twostep_tstamp = false; 825 826 while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) { 827 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd); 828 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd); 829 bool is_eof = tx_swbd->is_eof; 830 831 if (unlikely(tx_swbd->check_wb)) { 832 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); 833 834 if (txbd->flags & ENETC_TXBD_FLAGS_W && 835 tx_swbd->do_twostep_tstamp) { 836 enetc_get_tx_tstamp(&priv->si->hw, txbd, 837 &tstamp); 838 do_twostep_tstamp = true; 839 } 840 841 if (tx_swbd->qbv_en && 842 txbd->wb.status & ENETC_TXBD_STATS_WIN) 843 tx_win_drop++; 844 } 845 846 if (tx_swbd->is_xdp_tx) 847 enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd); 848 else if (likely(tx_swbd->dma)) 849 enetc_unmap_tx_buff(tx_ring, tx_swbd); 850 851 if (xdp_frame) { 852 xdp_return_frame(xdp_frame); 853 } else if (skb) { 854 if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) { 855 /* Start work to release lock for next one-step 856 * timestamping packet. And send one skb in 857 * tx_skbs queue if has. 858 */ 859 schedule_work(&priv->tx_onestep_tstamp); 860 } else if (unlikely(do_twostep_tstamp)) { 861 enetc_tstamp_tx(skb, tstamp); 862 do_twostep_tstamp = false; 863 } 864 napi_consume_skb(skb, napi_budget); 865 } 866 867 tx_byte_cnt += tx_swbd->len; 868 /* Scrub the swbd here so we don't have to do that 869 * when we reuse it during xmit 870 */ 871 memset(tx_swbd, 0, sizeof(*tx_swbd)); 872 873 bds_to_clean--; 874 tx_swbd++; 875 i++; 876 if (unlikely(i == tx_ring->bd_count)) { 877 i = 0; 878 tx_swbd = tx_ring->tx_swbd; 879 } 880 881 /* BD iteration loop end */ 882 if (is_eof) { 883 tx_frm_cnt++; 884 /* re-arm interrupt source */ 885 enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) | 886 BIT(16 + tx_ring->index)); 887 } 888 889 if (unlikely(!bds_to_clean)) 890 bds_to_clean = enetc_bd_ready_count(tx_ring, i); 891 } 892 893 tx_ring->next_to_clean = i; 894 tx_ring->stats.packets += tx_frm_cnt; 895 tx_ring->stats.bytes += tx_byte_cnt; 896 tx_ring->stats.win_drop += tx_win_drop; 897 898 if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) && 899 __netif_subqueue_stopped(ndev, tx_ring->index) && 900 (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) { 901 netif_wake_subqueue(ndev, tx_ring->index); 902 } 903 904 return tx_frm_cnt != ENETC_DEFAULT_TX_WORK; 905 } 906 907 static bool enetc_new_page(struct enetc_bdr *rx_ring, 908 struct enetc_rx_swbd *rx_swbd) 909 { 910 bool xdp = !!(rx_ring->xdp.prog); 911 struct page *page; 912 dma_addr_t addr; 913 914 page = dev_alloc_page(); 915 if (unlikely(!page)) 916 return false; 917 918 /* For XDP_TX, we forgo dma_unmap -> dma_map */ 919 rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 920 921 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir); 922 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { 923 __free_page(page); 924 925 return false; 926 } 927 928 rx_swbd->dma = addr; 929 rx_swbd->page = page; 930 rx_swbd->page_offset = rx_ring->buffer_offset; 931 932 return true; 933 } 934 935 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) 936 { 937 struct enetc_rx_swbd *rx_swbd; 938 union enetc_rx_bd *rxbd; 939 int i, j; 940 941 i = rx_ring->next_to_use; 942 rx_swbd = &rx_ring->rx_swbd[i]; 943 rxbd = enetc_rxbd(rx_ring, i); 944 945 for (j = 0; j < buff_cnt; j++) { 946 /* try reuse page */ 947 if (unlikely(!rx_swbd->page)) { 948 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { 949 rx_ring->stats.rx_alloc_errs++; 950 break; 951 } 952 } 953 954 /* update RxBD */ 955 rxbd->w.addr = cpu_to_le64(rx_swbd->dma + 956 rx_swbd->page_offset); 957 /* clear 'R" as well */ 958 rxbd->r.lstatus = 0; 959 960 enetc_rxbd_next(rx_ring, &rxbd, &i); 961 rx_swbd = &rx_ring->rx_swbd[i]; 962 } 963 964 if (likely(j)) { 965 rx_ring->next_to_alloc = i; /* keep track from page reuse */ 966 rx_ring->next_to_use = i; 967 968 /* update ENETC's consumer index */ 969 enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use); 970 } 971 972 return j; 973 } 974 975 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK 976 static void enetc_get_rx_tstamp(struct net_device *ndev, 977 union enetc_rx_bd *rxbd, 978 struct sk_buff *skb) 979 { 980 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 981 struct enetc_ndev_priv *priv = netdev_priv(ndev); 982 struct enetc_hw *hw = &priv->si->hw; 983 u32 lo, hi, tstamp_lo; 984 u64 tstamp; 985 986 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) { 987 lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0); 988 hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1); 989 rxbd = enetc_rxbd_ext(rxbd); 990 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp); 991 if (lo <= tstamp_lo) 992 hi -= 1; 993 994 tstamp = (u64)hi << 32 | tstamp_lo; 995 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 996 shhwtstamps->hwtstamp = ns_to_ktime(tstamp); 997 } 998 } 999 #endif 1000 1001 static void enetc_get_offloads(struct enetc_bdr *rx_ring, 1002 union enetc_rx_bd *rxbd, struct sk_buff *skb) 1003 { 1004 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); 1005 1006 /* TODO: hashing */ 1007 if (rx_ring->ndev->features & NETIF_F_RXCSUM) { 1008 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum); 1009 1010 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum)); 1011 skb->ip_summed = CHECKSUM_COMPLETE; 1012 } 1013 1014 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) { 1015 __be16 tpid = 0; 1016 1017 switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) { 1018 case 0: 1019 tpid = htons(ETH_P_8021Q); 1020 break; 1021 case 1: 1022 tpid = htons(ETH_P_8021AD); 1023 break; 1024 case 2: 1025 tpid = htons(enetc_port_rd(&priv->si->hw, 1026 ENETC_PCVLANR1)); 1027 break; 1028 case 3: 1029 tpid = htons(enetc_port_rd(&priv->si->hw, 1030 ENETC_PCVLANR2)); 1031 break; 1032 default: 1033 break; 1034 } 1035 1036 __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt)); 1037 } 1038 1039 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK 1040 if (priv->active_offloads & ENETC_F_RX_TSTAMP) 1041 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb); 1042 #endif 1043 } 1044 1045 /* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS, 1046 * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL 1047 * mapped buffers. 1048 */ 1049 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring, 1050 int i, u16 size) 1051 { 1052 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; 1053 1054 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, 1055 rx_swbd->page_offset, 1056 size, rx_swbd->dir); 1057 return rx_swbd; 1058 } 1059 1060 /* Reuse the current page without performing half-page buffer flipping */ 1061 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring, 1062 struct enetc_rx_swbd *rx_swbd) 1063 { 1064 size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset; 1065 1066 enetc_reuse_page(rx_ring, rx_swbd); 1067 1068 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, 1069 rx_swbd->page_offset, 1070 buffer_size, rx_swbd->dir); 1071 1072 rx_swbd->page = NULL; 1073 } 1074 1075 /* Reuse the current page by performing half-page buffer flipping */ 1076 static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring, 1077 struct enetc_rx_swbd *rx_swbd) 1078 { 1079 if (likely(enetc_page_reusable(rx_swbd->page))) { 1080 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; 1081 page_ref_inc(rx_swbd->page); 1082 1083 enetc_put_rx_buff(rx_ring, rx_swbd); 1084 } else { 1085 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, 1086 rx_swbd->dir); 1087 rx_swbd->page = NULL; 1088 } 1089 } 1090 1091 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring, 1092 int i, u16 size) 1093 { 1094 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 1095 struct sk_buff *skb; 1096 void *ba; 1097 1098 ba = page_address(rx_swbd->page) + rx_swbd->page_offset; 1099 skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE); 1100 if (unlikely(!skb)) { 1101 rx_ring->stats.rx_alloc_errs++; 1102 return NULL; 1103 } 1104 1105 skb_reserve(skb, rx_ring->buffer_offset); 1106 __skb_put(skb, size); 1107 1108 enetc_flip_rx_buff(rx_ring, rx_swbd); 1109 1110 return skb; 1111 } 1112 1113 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i, 1114 u16 size, struct sk_buff *skb) 1115 { 1116 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 1117 1118 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, 1119 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); 1120 1121 enetc_flip_rx_buff(rx_ring, rx_swbd); 1122 } 1123 1124 static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring, 1125 u32 bd_status, 1126 union enetc_rx_bd **rxbd, int *i) 1127 { 1128 if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK)))) 1129 return false; 1130 1131 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); 1132 enetc_rxbd_next(rx_ring, rxbd, i); 1133 1134 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { 1135 dma_rmb(); 1136 bd_status = le32_to_cpu((*rxbd)->r.lstatus); 1137 1138 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); 1139 enetc_rxbd_next(rx_ring, rxbd, i); 1140 } 1141 1142 rx_ring->ndev->stats.rx_dropped++; 1143 rx_ring->ndev->stats.rx_errors++; 1144 1145 return true; 1146 } 1147 1148 static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring, 1149 u32 bd_status, union enetc_rx_bd **rxbd, 1150 int *i, int *cleaned_cnt, int buffer_size) 1151 { 1152 struct sk_buff *skb; 1153 u16 size; 1154 1155 size = le16_to_cpu((*rxbd)->r.buf_len); 1156 skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size); 1157 if (!skb) 1158 return NULL; 1159 1160 enetc_get_offloads(rx_ring, *rxbd, skb); 1161 1162 (*cleaned_cnt)++; 1163 1164 enetc_rxbd_next(rx_ring, rxbd, i); 1165 1166 /* not last BD in frame? */ 1167 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { 1168 bd_status = le32_to_cpu((*rxbd)->r.lstatus); 1169 size = buffer_size; 1170 1171 if (bd_status & ENETC_RXBD_LSTATUS_F) { 1172 dma_rmb(); 1173 size = le16_to_cpu((*rxbd)->r.buf_len); 1174 } 1175 1176 enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb); 1177 1178 (*cleaned_cnt)++; 1179 1180 enetc_rxbd_next(rx_ring, rxbd, i); 1181 } 1182 1183 skb_record_rx_queue(skb, rx_ring->index); 1184 skb->protocol = eth_type_trans(skb, rx_ring->ndev); 1185 1186 return skb; 1187 } 1188 1189 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */ 1190 1191 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, 1192 struct napi_struct *napi, int work_limit) 1193 { 1194 int rx_frm_cnt = 0, rx_byte_cnt = 0; 1195 int cleaned_cnt, i; 1196 1197 cleaned_cnt = enetc_bd_unused(rx_ring); 1198 /* next descriptor to process */ 1199 i = rx_ring->next_to_clean; 1200 1201 while (likely(rx_frm_cnt < work_limit)) { 1202 union enetc_rx_bd *rxbd; 1203 struct sk_buff *skb; 1204 u32 bd_status; 1205 1206 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) 1207 cleaned_cnt -= enetc_refill_rx_ring(rx_ring, 1208 cleaned_cnt); 1209 1210 rxbd = enetc_rxbd(rx_ring, i); 1211 bd_status = le32_to_cpu(rxbd->r.lstatus); 1212 if (!bd_status) 1213 break; 1214 1215 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); 1216 dma_rmb(); /* for reading other rxbd fields */ 1217 1218 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status, 1219 &rxbd, &i)) 1220 break; 1221 1222 skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i, 1223 &cleaned_cnt, ENETC_RXB_DMA_SIZE); 1224 if (!skb) 1225 break; 1226 1227 rx_byte_cnt += skb->len; 1228 rx_frm_cnt++; 1229 1230 napi_gro_receive(napi, skb); 1231 } 1232 1233 rx_ring->next_to_clean = i; 1234 1235 rx_ring->stats.packets += rx_frm_cnt; 1236 rx_ring->stats.bytes += rx_byte_cnt; 1237 1238 return rx_frm_cnt; 1239 } 1240 1241 static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i, 1242 struct enetc_tx_swbd *tx_swbd, 1243 int frm_len) 1244 { 1245 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); 1246 1247 prefetchw(txbd); 1248 1249 enetc_clear_tx_bd(txbd); 1250 txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset); 1251 txbd->buf_len = cpu_to_le16(tx_swbd->len); 1252 txbd->frm_len = cpu_to_le16(frm_len); 1253 1254 memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd)); 1255 } 1256 1257 /* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer 1258 * descriptors. 1259 */ 1260 static bool enetc_xdp_tx(struct enetc_bdr *tx_ring, 1261 struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd) 1262 { 1263 struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr; 1264 int i, k, frm_len = tmp_tx_swbd->len; 1265 1266 if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd))) 1267 return false; 1268 1269 while (unlikely(!tmp_tx_swbd->is_eof)) { 1270 tmp_tx_swbd++; 1271 frm_len += tmp_tx_swbd->len; 1272 } 1273 1274 i = tx_ring->next_to_use; 1275 1276 for (k = 0; k < num_tx_swbd; k++) { 1277 struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k]; 1278 1279 enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len); 1280 1281 /* last BD needs 'F' bit set */ 1282 if (xdp_tx_swbd->is_eof) { 1283 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); 1284 1285 txbd->flags = ENETC_TXBD_FLAGS_F; 1286 } 1287 1288 enetc_bdr_idx_inc(tx_ring, &i); 1289 } 1290 1291 tx_ring->next_to_use = i; 1292 1293 return true; 1294 } 1295 1296 static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring, 1297 struct enetc_tx_swbd *xdp_tx_arr, 1298 struct xdp_frame *xdp_frame) 1299 { 1300 struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0]; 1301 struct skb_shared_info *shinfo; 1302 void *data = xdp_frame->data; 1303 int len = xdp_frame->len; 1304 skb_frag_t *frag; 1305 dma_addr_t dma; 1306 unsigned int f; 1307 int n = 0; 1308 1309 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); 1310 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { 1311 netdev_err(tx_ring->ndev, "DMA map error\n"); 1312 return -1; 1313 } 1314 1315 xdp_tx_swbd->dma = dma; 1316 xdp_tx_swbd->dir = DMA_TO_DEVICE; 1317 xdp_tx_swbd->len = len; 1318 xdp_tx_swbd->is_xdp_redirect = true; 1319 xdp_tx_swbd->is_eof = false; 1320 xdp_tx_swbd->xdp_frame = NULL; 1321 1322 n++; 1323 1324 if (!xdp_frame_has_frags(xdp_frame)) 1325 goto out; 1326 1327 xdp_tx_swbd = &xdp_tx_arr[n]; 1328 1329 shinfo = xdp_get_shared_info_from_frame(xdp_frame); 1330 1331 for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags; 1332 f++, frag++) { 1333 data = skb_frag_address(frag); 1334 len = skb_frag_size(frag); 1335 1336 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); 1337 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { 1338 /* Undo the DMA mapping for all fragments */ 1339 while (--n >= 0) 1340 enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]); 1341 1342 netdev_err(tx_ring->ndev, "DMA map error\n"); 1343 return -1; 1344 } 1345 1346 xdp_tx_swbd->dma = dma; 1347 xdp_tx_swbd->dir = DMA_TO_DEVICE; 1348 xdp_tx_swbd->len = len; 1349 xdp_tx_swbd->is_xdp_redirect = true; 1350 xdp_tx_swbd->is_eof = false; 1351 xdp_tx_swbd->xdp_frame = NULL; 1352 1353 n++; 1354 xdp_tx_swbd = &xdp_tx_arr[n]; 1355 } 1356 out: 1357 xdp_tx_arr[n - 1].is_eof = true; 1358 xdp_tx_arr[n - 1].xdp_frame = xdp_frame; 1359 1360 return n; 1361 } 1362 1363 int enetc_xdp_xmit(struct net_device *ndev, int num_frames, 1364 struct xdp_frame **frames, u32 flags) 1365 { 1366 struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0}; 1367 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1368 struct enetc_bdr *tx_ring; 1369 int xdp_tx_bd_cnt, i, k; 1370 int xdp_tx_frm_cnt = 0; 1371 1372 enetc_lock_mdio(); 1373 1374 tx_ring = priv->xdp_tx_ring[smp_processor_id()]; 1375 1376 prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use)); 1377 1378 for (k = 0; k < num_frames; k++) { 1379 xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring, 1380 xdp_redirect_arr, 1381 frames[k]); 1382 if (unlikely(xdp_tx_bd_cnt < 0)) 1383 break; 1384 1385 if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr, 1386 xdp_tx_bd_cnt))) { 1387 for (i = 0; i < xdp_tx_bd_cnt; i++) 1388 enetc_unmap_tx_buff(tx_ring, 1389 &xdp_redirect_arr[i]); 1390 tx_ring->stats.xdp_tx_drops++; 1391 break; 1392 } 1393 1394 xdp_tx_frm_cnt++; 1395 } 1396 1397 if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt)) 1398 enetc_update_tx_ring_tail(tx_ring); 1399 1400 tx_ring->stats.xdp_tx += xdp_tx_frm_cnt; 1401 1402 enetc_unlock_mdio(); 1403 1404 return xdp_tx_frm_cnt; 1405 } 1406 EXPORT_SYMBOL_GPL(enetc_xdp_xmit); 1407 1408 static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, 1409 struct xdp_buff *xdp_buff, u16 size) 1410 { 1411 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 1412 void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset; 1413 1414 /* To be used for XDP_TX */ 1415 rx_swbd->len = size; 1416 1417 xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset, 1418 rx_ring->buffer_offset, size, false); 1419 } 1420 1421 static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, 1422 u16 size, struct xdp_buff *xdp_buff) 1423 { 1424 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff); 1425 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 1426 skb_frag_t *frag; 1427 1428 /* To be used for XDP_TX */ 1429 rx_swbd->len = size; 1430 1431 if (!xdp_buff_has_frags(xdp_buff)) { 1432 xdp_buff_set_frags_flag(xdp_buff); 1433 shinfo->xdp_frags_size = size; 1434 shinfo->nr_frags = 0; 1435 } else { 1436 shinfo->xdp_frags_size += size; 1437 } 1438 1439 if (page_is_pfmemalloc(rx_swbd->page)) 1440 xdp_buff_set_frag_pfmemalloc(xdp_buff); 1441 1442 frag = &shinfo->frags[shinfo->nr_frags]; 1443 skb_frag_off_set(frag, rx_swbd->page_offset); 1444 skb_frag_size_set(frag, size); 1445 __skb_frag_set_page(frag, rx_swbd->page); 1446 1447 shinfo->nr_frags++; 1448 } 1449 1450 static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status, 1451 union enetc_rx_bd **rxbd, int *i, 1452 int *cleaned_cnt, struct xdp_buff *xdp_buff) 1453 { 1454 u16 size = le16_to_cpu((*rxbd)->r.buf_len); 1455 1456 xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq); 1457 1458 enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size); 1459 (*cleaned_cnt)++; 1460 enetc_rxbd_next(rx_ring, rxbd, i); 1461 1462 /* not last BD in frame? */ 1463 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { 1464 bd_status = le32_to_cpu((*rxbd)->r.lstatus); 1465 size = ENETC_RXB_DMA_SIZE_XDP; 1466 1467 if (bd_status & ENETC_RXBD_LSTATUS_F) { 1468 dma_rmb(); 1469 size = le16_to_cpu((*rxbd)->r.buf_len); 1470 } 1471 1472 enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff); 1473 (*cleaned_cnt)++; 1474 enetc_rxbd_next(rx_ring, rxbd, i); 1475 } 1476 } 1477 1478 /* Convert RX buffer descriptors to TX buffer descriptors. These will be 1479 * recycled back into the RX ring in enetc_clean_tx_ring. 1480 */ 1481 static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr, 1482 struct enetc_bdr *rx_ring, 1483 int rx_ring_first, int rx_ring_last) 1484 { 1485 int n = 0; 1486 1487 for (; rx_ring_first != rx_ring_last; 1488 n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) { 1489 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; 1490 struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n]; 1491 1492 /* No need to dma_map, we already have DMA_BIDIRECTIONAL */ 1493 tx_swbd->dma = rx_swbd->dma; 1494 tx_swbd->dir = rx_swbd->dir; 1495 tx_swbd->page = rx_swbd->page; 1496 tx_swbd->page_offset = rx_swbd->page_offset; 1497 tx_swbd->len = rx_swbd->len; 1498 tx_swbd->is_dma_page = true; 1499 tx_swbd->is_xdp_tx = true; 1500 tx_swbd->is_eof = false; 1501 } 1502 1503 /* We rely on caller providing an rx_ring_last > rx_ring_first */ 1504 xdp_tx_arr[n - 1].is_eof = true; 1505 1506 return n; 1507 } 1508 1509 static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, 1510 int rx_ring_last) 1511 { 1512 while (rx_ring_first != rx_ring_last) { 1513 enetc_put_rx_buff(rx_ring, 1514 &rx_ring->rx_swbd[rx_ring_first]); 1515 enetc_bdr_idx_inc(rx_ring, &rx_ring_first); 1516 } 1517 rx_ring->stats.xdp_drops++; 1518 } 1519 1520 static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, 1521 struct napi_struct *napi, int work_limit, 1522 struct bpf_prog *prog) 1523 { 1524 int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0; 1525 struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0}; 1526 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); 1527 int rx_frm_cnt = 0, rx_byte_cnt = 0; 1528 struct enetc_bdr *tx_ring; 1529 int cleaned_cnt, i; 1530 u32 xdp_act; 1531 1532 cleaned_cnt = enetc_bd_unused(rx_ring); 1533 /* next descriptor to process */ 1534 i = rx_ring->next_to_clean; 1535 1536 while (likely(rx_frm_cnt < work_limit)) { 1537 union enetc_rx_bd *rxbd, *orig_rxbd; 1538 int orig_i, orig_cleaned_cnt; 1539 struct xdp_buff xdp_buff; 1540 struct sk_buff *skb; 1541 u32 bd_status; 1542 int err; 1543 1544 rxbd = enetc_rxbd(rx_ring, i); 1545 bd_status = le32_to_cpu(rxbd->r.lstatus); 1546 if (!bd_status) 1547 break; 1548 1549 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); 1550 dma_rmb(); /* for reading other rxbd fields */ 1551 1552 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status, 1553 &rxbd, &i)) 1554 break; 1555 1556 orig_rxbd = rxbd; 1557 orig_cleaned_cnt = cleaned_cnt; 1558 orig_i = i; 1559 1560 enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i, 1561 &cleaned_cnt, &xdp_buff); 1562 1563 xdp_act = bpf_prog_run_xdp(prog, &xdp_buff); 1564 1565 switch (xdp_act) { 1566 default: 1567 bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act); 1568 fallthrough; 1569 case XDP_ABORTED: 1570 trace_xdp_exception(rx_ring->ndev, prog, xdp_act); 1571 fallthrough; 1572 case XDP_DROP: 1573 enetc_xdp_drop(rx_ring, orig_i, i); 1574 break; 1575 case XDP_PASS: 1576 rxbd = orig_rxbd; 1577 cleaned_cnt = orig_cleaned_cnt; 1578 i = orig_i; 1579 1580 skb = enetc_build_skb(rx_ring, bd_status, &rxbd, 1581 &i, &cleaned_cnt, 1582 ENETC_RXB_DMA_SIZE_XDP); 1583 if (unlikely(!skb)) 1584 goto out; 1585 1586 napi_gro_receive(napi, skb); 1587 break; 1588 case XDP_TX: 1589 tx_ring = priv->xdp_tx_ring[rx_ring->index]; 1590 xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr, 1591 rx_ring, 1592 orig_i, i); 1593 1594 if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) { 1595 enetc_xdp_drop(rx_ring, orig_i, i); 1596 tx_ring->stats.xdp_tx_drops++; 1597 } else { 1598 tx_ring->stats.xdp_tx += xdp_tx_bd_cnt; 1599 rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt; 1600 xdp_tx_frm_cnt++; 1601 /* The XDP_TX enqueue was successful, so we 1602 * need to scrub the RX software BDs because 1603 * the ownership of the buffers no longer 1604 * belongs to the RX ring, and we must prevent 1605 * enetc_refill_rx_ring() from reusing 1606 * rx_swbd->page. 1607 */ 1608 while (orig_i != i) { 1609 rx_ring->rx_swbd[orig_i].page = NULL; 1610 enetc_bdr_idx_inc(rx_ring, &orig_i); 1611 } 1612 } 1613 break; 1614 case XDP_REDIRECT: 1615 err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); 1616 if (unlikely(err)) { 1617 enetc_xdp_drop(rx_ring, orig_i, i); 1618 rx_ring->stats.xdp_redirect_failures++; 1619 } else { 1620 while (orig_i != i) { 1621 enetc_flip_rx_buff(rx_ring, 1622 &rx_ring->rx_swbd[orig_i]); 1623 enetc_bdr_idx_inc(rx_ring, &orig_i); 1624 } 1625 xdp_redirect_frm_cnt++; 1626 rx_ring->stats.xdp_redirect++; 1627 } 1628 } 1629 1630 rx_frm_cnt++; 1631 } 1632 1633 out: 1634 rx_ring->next_to_clean = i; 1635 1636 rx_ring->stats.packets += rx_frm_cnt; 1637 rx_ring->stats.bytes += rx_byte_cnt; 1638 1639 if (xdp_redirect_frm_cnt) 1640 xdp_do_flush_map(); 1641 1642 if (xdp_tx_frm_cnt) 1643 enetc_update_tx_ring_tail(tx_ring); 1644 1645 if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight) 1646 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) - 1647 rx_ring->xdp.xdp_tx_in_flight); 1648 1649 return rx_frm_cnt; 1650 } 1651 1652 static int enetc_poll(struct napi_struct *napi, int budget) 1653 { 1654 struct enetc_int_vector 1655 *v = container_of(napi, struct enetc_int_vector, napi); 1656 struct enetc_bdr *rx_ring = &v->rx_ring; 1657 struct bpf_prog *prog; 1658 bool complete = true; 1659 int work_done; 1660 int i; 1661 1662 enetc_lock_mdio(); 1663 1664 for (i = 0; i < v->count_tx_rings; i++) 1665 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) 1666 complete = false; 1667 1668 prog = rx_ring->xdp.prog; 1669 if (prog) 1670 work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog); 1671 else 1672 work_done = enetc_clean_rx_ring(rx_ring, napi, budget); 1673 if (work_done == budget) 1674 complete = false; 1675 if (work_done) 1676 v->rx_napi_work = true; 1677 1678 if (!complete) { 1679 enetc_unlock_mdio(); 1680 return budget; 1681 } 1682 1683 napi_complete_done(napi, work_done); 1684 1685 if (likely(v->rx_dim_en)) 1686 enetc_rx_net_dim(v); 1687 1688 v->rx_napi_work = false; 1689 1690 /* enable interrupts */ 1691 enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); 1692 1693 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) 1694 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 1695 ENETC_TBIER_TXTIE); 1696 1697 enetc_unlock_mdio(); 1698 1699 return work_done; 1700 } 1701 1702 /* Probing and Init */ 1703 #define ENETC_MAX_RFS_SIZE 64 1704 void enetc_get_si_caps(struct enetc_si *si) 1705 { 1706 struct enetc_hw *hw = &si->hw; 1707 u32 val; 1708 1709 /* find out how many of various resources we have to work with */ 1710 val = enetc_rd(hw, ENETC_SICAPR0); 1711 si->num_rx_rings = (val >> 16) & 0xff; 1712 si->num_tx_rings = val & 0xff; 1713 1714 val = enetc_rd(hw, ENETC_SIRFSCAPR); 1715 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val); 1716 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE); 1717 1718 si->num_rss = 0; 1719 val = enetc_rd(hw, ENETC_SIPCAPR0); 1720 if (val & ENETC_SIPCAPR0_RSS) { 1721 u32 rss; 1722 1723 rss = enetc_rd(hw, ENETC_SIRSSCAPR); 1724 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss); 1725 } 1726 1727 if (val & ENETC_SIPCAPR0_QBV) 1728 si->hw_features |= ENETC_SI_F_QBV; 1729 1730 if (val & ENETC_SIPCAPR0_QBU) 1731 si->hw_features |= ENETC_SI_F_QBU; 1732 1733 if (val & ENETC_SIPCAPR0_PSFP) 1734 si->hw_features |= ENETC_SI_F_PSFP; 1735 } 1736 EXPORT_SYMBOL_GPL(enetc_get_si_caps); 1737 1738 static int enetc_dma_alloc_bdr(struct enetc_bdr_resource *res) 1739 { 1740 size_t bd_base_size = res->bd_count * res->bd_size; 1741 1742 res->bd_base = dma_alloc_coherent(res->dev, bd_base_size, 1743 &res->bd_dma_base, GFP_KERNEL); 1744 if (!res->bd_base) 1745 return -ENOMEM; 1746 1747 /* h/w requires 128B alignment */ 1748 if (!IS_ALIGNED(res->bd_dma_base, 128)) { 1749 dma_free_coherent(res->dev, bd_base_size, res->bd_base, 1750 res->bd_dma_base); 1751 return -EINVAL; 1752 } 1753 1754 return 0; 1755 } 1756 1757 static void enetc_dma_free_bdr(const struct enetc_bdr_resource *res) 1758 { 1759 size_t bd_base_size = res->bd_count * res->bd_size; 1760 1761 dma_free_coherent(res->dev, bd_base_size, res->bd_base, 1762 res->bd_dma_base); 1763 } 1764 1765 static int enetc_alloc_tx_resource(struct enetc_bdr_resource *res, 1766 struct device *dev, size_t bd_count) 1767 { 1768 int err; 1769 1770 res->dev = dev; 1771 res->bd_count = bd_count; 1772 res->bd_size = sizeof(union enetc_tx_bd); 1773 1774 res->tx_swbd = vzalloc(bd_count * sizeof(*res->tx_swbd)); 1775 if (!res->tx_swbd) 1776 return -ENOMEM; 1777 1778 err = enetc_dma_alloc_bdr(res); 1779 if (err) 1780 goto err_alloc_bdr; 1781 1782 res->tso_headers = dma_alloc_coherent(dev, bd_count * TSO_HEADER_SIZE, 1783 &res->tso_headers_dma, 1784 GFP_KERNEL); 1785 if (!res->tso_headers) { 1786 err = -ENOMEM; 1787 goto err_alloc_tso; 1788 } 1789 1790 return 0; 1791 1792 err_alloc_tso: 1793 enetc_dma_free_bdr(res); 1794 err_alloc_bdr: 1795 vfree(res->tx_swbd); 1796 res->tx_swbd = NULL; 1797 1798 return err; 1799 } 1800 1801 static void enetc_free_tx_resource(const struct enetc_bdr_resource *res) 1802 { 1803 dma_free_coherent(res->dev, res->bd_count * TSO_HEADER_SIZE, 1804 res->tso_headers, res->tso_headers_dma); 1805 enetc_dma_free_bdr(res); 1806 vfree(res->tx_swbd); 1807 } 1808 1809 static struct enetc_bdr_resource * 1810 enetc_alloc_tx_resources(struct enetc_ndev_priv *priv) 1811 { 1812 struct enetc_bdr_resource *tx_res; 1813 int i, err; 1814 1815 tx_res = kcalloc(priv->num_tx_rings, sizeof(*tx_res), GFP_KERNEL); 1816 if (!tx_res) 1817 return ERR_PTR(-ENOMEM); 1818 1819 for (i = 0; i < priv->num_tx_rings; i++) { 1820 struct enetc_bdr *tx_ring = priv->tx_ring[i]; 1821 1822 err = enetc_alloc_tx_resource(&tx_res[i], tx_ring->dev, 1823 tx_ring->bd_count); 1824 if (err) 1825 goto fail; 1826 } 1827 1828 return tx_res; 1829 1830 fail: 1831 while (i-- > 0) 1832 enetc_free_tx_resource(&tx_res[i]); 1833 1834 kfree(tx_res); 1835 1836 return ERR_PTR(err); 1837 } 1838 1839 static void enetc_free_tx_resources(const struct enetc_bdr_resource *tx_res, 1840 size_t num_resources) 1841 { 1842 size_t i; 1843 1844 for (i = 0; i < num_resources; i++) 1845 enetc_free_tx_resource(&tx_res[i]); 1846 1847 kfree(tx_res); 1848 } 1849 1850 static int enetc_alloc_rx_resource(struct enetc_bdr_resource *res, 1851 struct device *dev, size_t bd_count, 1852 bool extended) 1853 { 1854 int err; 1855 1856 res->dev = dev; 1857 res->bd_count = bd_count; 1858 res->bd_size = sizeof(union enetc_rx_bd); 1859 if (extended) 1860 res->bd_size *= 2; 1861 1862 res->rx_swbd = vzalloc(bd_count * sizeof(struct enetc_rx_swbd)); 1863 if (!res->rx_swbd) 1864 return -ENOMEM; 1865 1866 err = enetc_dma_alloc_bdr(res); 1867 if (err) { 1868 vfree(res->rx_swbd); 1869 return err; 1870 } 1871 1872 return 0; 1873 } 1874 1875 static void enetc_free_rx_resource(const struct enetc_bdr_resource *res) 1876 { 1877 enetc_dma_free_bdr(res); 1878 vfree(res->rx_swbd); 1879 } 1880 1881 static struct enetc_bdr_resource * 1882 enetc_alloc_rx_resources(struct enetc_ndev_priv *priv, bool extended) 1883 { 1884 struct enetc_bdr_resource *rx_res; 1885 int i, err; 1886 1887 rx_res = kcalloc(priv->num_rx_rings, sizeof(*rx_res), GFP_KERNEL); 1888 if (!rx_res) 1889 return ERR_PTR(-ENOMEM); 1890 1891 for (i = 0; i < priv->num_rx_rings; i++) { 1892 struct enetc_bdr *rx_ring = priv->rx_ring[i]; 1893 1894 err = enetc_alloc_rx_resource(&rx_res[i], rx_ring->dev, 1895 rx_ring->bd_count, extended); 1896 if (err) 1897 goto fail; 1898 } 1899 1900 return rx_res; 1901 1902 fail: 1903 while (i-- > 0) 1904 enetc_free_rx_resource(&rx_res[i]); 1905 1906 kfree(rx_res); 1907 1908 return ERR_PTR(err); 1909 } 1910 1911 static void enetc_free_rx_resources(const struct enetc_bdr_resource *rx_res, 1912 size_t num_resources) 1913 { 1914 size_t i; 1915 1916 for (i = 0; i < num_resources; i++) 1917 enetc_free_rx_resource(&rx_res[i]); 1918 1919 kfree(rx_res); 1920 } 1921 1922 static void enetc_assign_tx_resource(struct enetc_bdr *tx_ring, 1923 const struct enetc_bdr_resource *res) 1924 { 1925 tx_ring->bd_base = res ? res->bd_base : NULL; 1926 tx_ring->bd_dma_base = res ? res->bd_dma_base : 0; 1927 tx_ring->tx_swbd = res ? res->tx_swbd : NULL; 1928 tx_ring->tso_headers = res ? res->tso_headers : NULL; 1929 tx_ring->tso_headers_dma = res ? res->tso_headers_dma : 0; 1930 } 1931 1932 static void enetc_assign_rx_resource(struct enetc_bdr *rx_ring, 1933 const struct enetc_bdr_resource *res) 1934 { 1935 rx_ring->bd_base = res ? res->bd_base : NULL; 1936 rx_ring->bd_dma_base = res ? res->bd_dma_base : 0; 1937 rx_ring->rx_swbd = res ? res->rx_swbd : NULL; 1938 } 1939 1940 static void enetc_assign_tx_resources(struct enetc_ndev_priv *priv, 1941 const struct enetc_bdr_resource *res) 1942 { 1943 int i; 1944 1945 if (priv->tx_res) 1946 enetc_free_tx_resources(priv->tx_res, priv->num_tx_rings); 1947 1948 for (i = 0; i < priv->num_tx_rings; i++) { 1949 enetc_assign_tx_resource(priv->tx_ring[i], 1950 res ? &res[i] : NULL); 1951 } 1952 1953 priv->tx_res = res; 1954 } 1955 1956 static void enetc_assign_rx_resources(struct enetc_ndev_priv *priv, 1957 const struct enetc_bdr_resource *res) 1958 { 1959 int i; 1960 1961 if (priv->rx_res) 1962 enetc_free_rx_resources(priv->rx_res, priv->num_rx_rings); 1963 1964 for (i = 0; i < priv->num_rx_rings; i++) { 1965 enetc_assign_rx_resource(priv->rx_ring[i], 1966 res ? &res[i] : NULL); 1967 } 1968 1969 priv->rx_res = res; 1970 } 1971 1972 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) 1973 { 1974 int i; 1975 1976 for (i = 0; i < tx_ring->bd_count; i++) { 1977 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; 1978 1979 enetc_free_tx_frame(tx_ring, tx_swbd); 1980 } 1981 } 1982 1983 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring) 1984 { 1985 int i; 1986 1987 for (i = 0; i < rx_ring->bd_count; i++) { 1988 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; 1989 1990 if (!rx_swbd->page) 1991 continue; 1992 1993 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, 1994 rx_swbd->dir); 1995 __free_page(rx_swbd->page); 1996 rx_swbd->page = NULL; 1997 } 1998 } 1999 2000 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) 2001 { 2002 int i; 2003 2004 for (i = 0; i < priv->num_rx_rings; i++) 2005 enetc_free_rx_ring(priv->rx_ring[i]); 2006 2007 for (i = 0; i < priv->num_tx_rings; i++) 2008 enetc_free_tx_ring(priv->tx_ring[i]); 2009 } 2010 2011 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) 2012 { 2013 int *rss_table; 2014 int i; 2015 2016 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL); 2017 if (!rss_table) 2018 return -ENOMEM; 2019 2020 /* Set up RSS table defaults */ 2021 for (i = 0; i < si->num_rss; i++) 2022 rss_table[i] = i % num_groups; 2023 2024 enetc_set_rss_table(si, rss_table, si->num_rss); 2025 2026 kfree(rss_table); 2027 2028 return 0; 2029 } 2030 2031 int enetc_configure_si(struct enetc_ndev_priv *priv) 2032 { 2033 struct enetc_si *si = priv->si; 2034 struct enetc_hw *hw = &si->hw; 2035 int err; 2036 2037 /* set SI cache attributes */ 2038 enetc_wr(hw, ENETC_SICAR0, 2039 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 2040 enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI); 2041 /* enable SI */ 2042 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN); 2043 2044 if (si->num_rss) { 2045 err = enetc_setup_default_rss_table(si, priv->num_rx_rings); 2046 if (err) 2047 return err; 2048 } 2049 2050 return 0; 2051 } 2052 EXPORT_SYMBOL_GPL(enetc_configure_si); 2053 2054 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv) 2055 { 2056 struct enetc_si *si = priv->si; 2057 int cpus = num_online_cpus(); 2058 2059 priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE; 2060 priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE; 2061 2062 /* Enable all available TX rings in order to configure as many 2063 * priorities as possible, when needed. 2064 * TODO: Make # of TX rings run-time configurable 2065 */ 2066 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings); 2067 priv->num_tx_rings = si->num_tx_rings; 2068 priv->bdr_int_num = cpus; 2069 priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL; 2070 priv->tx_ictt = ENETC_TXIC_TIMETHR; 2071 } 2072 EXPORT_SYMBOL_GPL(enetc_init_si_rings_params); 2073 2074 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) 2075 { 2076 struct enetc_si *si = priv->si; 2077 2078 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules), 2079 GFP_KERNEL); 2080 if (!priv->cls_rules) 2081 return -ENOMEM; 2082 2083 return 0; 2084 } 2085 EXPORT_SYMBOL_GPL(enetc_alloc_si_resources); 2086 2087 void enetc_free_si_resources(struct enetc_ndev_priv *priv) 2088 { 2089 kfree(priv->cls_rules); 2090 } 2091 EXPORT_SYMBOL_GPL(enetc_free_si_resources); 2092 2093 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 2094 { 2095 int idx = tx_ring->index; 2096 u32 tbmr; 2097 2098 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0, 2099 lower_32_bits(tx_ring->bd_dma_base)); 2100 2101 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1, 2102 upper_32_bits(tx_ring->bd_dma_base)); 2103 2104 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */ 2105 enetc_txbdr_wr(hw, idx, ENETC_TBLENR, 2106 ENETC_RTBLENR_LEN(tx_ring->bd_count)); 2107 2108 /* clearing PI/CI registers for Tx not supported, adjust sw indexes */ 2109 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR); 2110 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); 2111 2112 /* enable Tx ints by setting pkt thr to 1 */ 2113 enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1); 2114 2115 tbmr = ENETC_TBMR_SET_PRIO(tx_ring->prio); 2116 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) 2117 tbmr |= ENETC_TBMR_VIH; 2118 2119 /* enable ring */ 2120 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); 2121 2122 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR); 2123 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR); 2124 tx_ring->idr = hw->reg + ENETC_SITXIDR; 2125 } 2126 2127 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring, 2128 bool extended) 2129 { 2130 int idx = rx_ring->index; 2131 u32 rbmr = 0; 2132 2133 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0, 2134 lower_32_bits(rx_ring->bd_dma_base)); 2135 2136 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1, 2137 upper_32_bits(rx_ring->bd_dma_base)); 2138 2139 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */ 2140 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR, 2141 ENETC_RTBLENR_LEN(rx_ring->bd_count)); 2142 2143 if (rx_ring->xdp.prog) 2144 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP); 2145 else 2146 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE); 2147 2148 /* Also prepare the consumer index in case page allocation never 2149 * succeeds. In that case, hardware will never advance producer index 2150 * to match consumer index, and will drop all frames. 2151 */ 2152 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); 2153 enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1); 2154 2155 /* enable Rx ints by setting pkt thr to 1 */ 2156 enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1); 2157 2158 rx_ring->ext_en = extended; 2159 if (rx_ring->ext_en) 2160 rbmr |= ENETC_RBMR_BDS; 2161 2162 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) 2163 rbmr |= ENETC_RBMR_VTE; 2164 2165 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); 2166 rx_ring->idr = hw->reg + ENETC_SIRXIDR; 2167 2168 rx_ring->next_to_clean = 0; 2169 rx_ring->next_to_use = 0; 2170 rx_ring->next_to_alloc = 0; 2171 2172 enetc_lock_mdio(); 2173 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); 2174 enetc_unlock_mdio(); 2175 2176 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); 2177 } 2178 2179 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv, bool extended) 2180 { 2181 struct enetc_hw *hw = &priv->si->hw; 2182 int i; 2183 2184 for (i = 0; i < priv->num_tx_rings; i++) 2185 enetc_setup_txbdr(hw, priv->tx_ring[i]); 2186 2187 for (i = 0; i < priv->num_rx_rings; i++) 2188 enetc_setup_rxbdr(hw, priv->rx_ring[i], extended); 2189 } 2190 2191 static void enetc_enable_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 2192 { 2193 int idx = tx_ring->index; 2194 u32 tbmr; 2195 2196 tbmr = enetc_txbdr_rd(hw, idx, ENETC_TBMR); 2197 tbmr |= ENETC_TBMR_EN; 2198 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); 2199 } 2200 2201 static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2202 { 2203 int idx = rx_ring->index; 2204 u32 rbmr; 2205 2206 rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR); 2207 rbmr |= ENETC_RBMR_EN; 2208 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); 2209 } 2210 2211 static void enetc_enable_bdrs(struct enetc_ndev_priv *priv) 2212 { 2213 struct enetc_hw *hw = &priv->si->hw; 2214 int i; 2215 2216 for (i = 0; i < priv->num_tx_rings; i++) 2217 enetc_enable_txbdr(hw, priv->tx_ring[i]); 2218 2219 for (i = 0; i < priv->num_rx_rings; i++) 2220 enetc_enable_rxbdr(hw, priv->rx_ring[i]); 2221 } 2222 2223 static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2224 { 2225 int idx = rx_ring->index; 2226 2227 /* disable EN bit on ring */ 2228 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0); 2229 } 2230 2231 static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2232 { 2233 int idx = rx_ring->index; 2234 2235 /* disable EN bit on ring */ 2236 enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0); 2237 } 2238 2239 static void enetc_disable_bdrs(struct enetc_ndev_priv *priv) 2240 { 2241 struct enetc_hw *hw = &priv->si->hw; 2242 int i; 2243 2244 for (i = 0; i < priv->num_tx_rings; i++) 2245 enetc_disable_txbdr(hw, priv->tx_ring[i]); 2246 2247 for (i = 0; i < priv->num_rx_rings; i++) 2248 enetc_disable_rxbdr(hw, priv->rx_ring[i]); 2249 } 2250 2251 static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 2252 { 2253 int delay = 8, timeout = 100; 2254 int idx = tx_ring->index; 2255 2256 /* wait for busy to clear */ 2257 while (delay < timeout && 2258 enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) { 2259 msleep(delay); 2260 delay *= 2; 2261 } 2262 2263 if (delay >= timeout) 2264 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n", 2265 idx); 2266 } 2267 2268 static void enetc_wait_bdrs(struct enetc_ndev_priv *priv) 2269 { 2270 struct enetc_hw *hw = &priv->si->hw; 2271 int i; 2272 2273 for (i = 0; i < priv->num_tx_rings; i++) 2274 enetc_wait_txbdr(hw, priv->tx_ring[i]); 2275 } 2276 2277 static int enetc_setup_irqs(struct enetc_ndev_priv *priv) 2278 { 2279 struct pci_dev *pdev = priv->si->pdev; 2280 struct enetc_hw *hw = &priv->si->hw; 2281 int i, j, err; 2282 2283 for (i = 0; i < priv->bdr_int_num; i++) { 2284 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); 2285 struct enetc_int_vector *v = priv->int_vector[i]; 2286 int entry = ENETC_BDR_INT_BASE_IDX + i; 2287 2288 snprintf(v->name, sizeof(v->name), "%s-rxtx%d", 2289 priv->ndev->name, i); 2290 err = request_irq(irq, enetc_msix, 0, v->name, v); 2291 if (err) { 2292 dev_err(priv->dev, "request_irq() failed!\n"); 2293 goto irq_err; 2294 } 2295 disable_irq(irq); 2296 2297 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); 2298 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); 2299 v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1); 2300 2301 enetc_wr(hw, ENETC_SIMSIRRV(i), entry); 2302 2303 for (j = 0; j < v->count_tx_rings; j++) { 2304 int idx = v->tx_ring[j].index; 2305 2306 enetc_wr(hw, ENETC_SIMSITRV(idx), entry); 2307 } 2308 irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus())); 2309 } 2310 2311 return 0; 2312 2313 irq_err: 2314 while (i--) { 2315 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); 2316 2317 irq_set_affinity_hint(irq, NULL); 2318 free_irq(irq, priv->int_vector[i]); 2319 } 2320 2321 return err; 2322 } 2323 2324 static void enetc_free_irqs(struct enetc_ndev_priv *priv) 2325 { 2326 struct pci_dev *pdev = priv->si->pdev; 2327 int i; 2328 2329 for (i = 0; i < priv->bdr_int_num; i++) { 2330 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); 2331 2332 irq_set_affinity_hint(irq, NULL); 2333 free_irq(irq, priv->int_vector[i]); 2334 } 2335 } 2336 2337 static void enetc_setup_interrupts(struct enetc_ndev_priv *priv) 2338 { 2339 struct enetc_hw *hw = &priv->si->hw; 2340 u32 icpt, ictt; 2341 int i; 2342 2343 /* enable Tx & Rx event indication */ 2344 if (priv->ic_mode & 2345 (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) { 2346 icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR); 2347 /* init to non-0 minimum, will be adjusted later */ 2348 ictt = 0x1; 2349 } else { 2350 icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */ 2351 ictt = 0; 2352 } 2353 2354 for (i = 0; i < priv->num_rx_rings; i++) { 2355 enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt); 2356 enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt); 2357 enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE); 2358 } 2359 2360 if (priv->ic_mode & ENETC_IC_TX_MANUAL) 2361 icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR); 2362 else 2363 icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */ 2364 2365 for (i = 0; i < priv->num_tx_rings; i++) { 2366 enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt); 2367 enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt); 2368 enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE); 2369 } 2370 } 2371 2372 static void enetc_clear_interrupts(struct enetc_ndev_priv *priv) 2373 { 2374 struct enetc_hw *hw = &priv->si->hw; 2375 int i; 2376 2377 for (i = 0; i < priv->num_tx_rings; i++) 2378 enetc_txbdr_wr(hw, i, ENETC_TBIER, 0); 2379 2380 for (i = 0; i < priv->num_rx_rings; i++) 2381 enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0); 2382 } 2383 2384 static int enetc_phylink_connect(struct net_device *ndev) 2385 { 2386 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2387 struct ethtool_eee edata; 2388 int err; 2389 2390 if (!priv->phylink) { 2391 /* phy-less mode */ 2392 netif_carrier_on(ndev); 2393 return 0; 2394 } 2395 2396 err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0); 2397 if (err) { 2398 dev_err(&ndev->dev, "could not attach to PHY\n"); 2399 return err; 2400 } 2401 2402 /* disable EEE autoneg, until ENETC driver supports it */ 2403 memset(&edata, 0, sizeof(struct ethtool_eee)); 2404 phylink_ethtool_set_eee(priv->phylink, &edata); 2405 2406 phylink_start(priv->phylink); 2407 2408 return 0; 2409 } 2410 2411 static void enetc_tx_onestep_tstamp(struct work_struct *work) 2412 { 2413 struct enetc_ndev_priv *priv; 2414 struct sk_buff *skb; 2415 2416 priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp); 2417 2418 netif_tx_lock_bh(priv->ndev); 2419 2420 clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags); 2421 skb = skb_dequeue(&priv->tx_skbs); 2422 if (skb) 2423 enetc_start_xmit(skb, priv->ndev); 2424 2425 netif_tx_unlock_bh(priv->ndev); 2426 } 2427 2428 static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv) 2429 { 2430 INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp); 2431 skb_queue_head_init(&priv->tx_skbs); 2432 } 2433 2434 void enetc_start(struct net_device *ndev) 2435 { 2436 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2437 int i; 2438 2439 enetc_setup_interrupts(priv); 2440 2441 for (i = 0; i < priv->bdr_int_num; i++) { 2442 int irq = pci_irq_vector(priv->si->pdev, 2443 ENETC_BDR_INT_BASE_IDX + i); 2444 2445 napi_enable(&priv->int_vector[i]->napi); 2446 enable_irq(irq); 2447 } 2448 2449 enetc_enable_bdrs(priv); 2450 2451 netif_tx_start_all_queues(ndev); 2452 } 2453 EXPORT_SYMBOL_GPL(enetc_start); 2454 2455 int enetc_open(struct net_device *ndev) 2456 { 2457 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2458 struct enetc_bdr_resource *tx_res, *rx_res; 2459 int num_stack_tx_queues; 2460 bool extended; 2461 int err; 2462 2463 extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); 2464 2465 err = enetc_setup_irqs(priv); 2466 if (err) 2467 return err; 2468 2469 err = enetc_phylink_connect(ndev); 2470 if (err) 2471 goto err_phy_connect; 2472 2473 tx_res = enetc_alloc_tx_resources(priv); 2474 if (IS_ERR(tx_res)) { 2475 err = PTR_ERR(tx_res); 2476 goto err_alloc_tx; 2477 } 2478 2479 rx_res = enetc_alloc_rx_resources(priv, extended); 2480 if (IS_ERR(rx_res)) { 2481 err = PTR_ERR(rx_res); 2482 goto err_alloc_rx; 2483 } 2484 2485 num_stack_tx_queues = enetc_num_stack_tx_queues(priv); 2486 2487 err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); 2488 if (err) 2489 goto err_set_queues; 2490 2491 err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings); 2492 if (err) 2493 goto err_set_queues; 2494 2495 enetc_tx_onestep_tstamp_init(priv); 2496 enetc_assign_tx_resources(priv, tx_res); 2497 enetc_assign_rx_resources(priv, rx_res); 2498 enetc_setup_bdrs(priv, extended); 2499 enetc_start(ndev); 2500 2501 return 0; 2502 2503 err_set_queues: 2504 enetc_free_rx_resources(rx_res, priv->num_rx_rings); 2505 err_alloc_rx: 2506 enetc_free_tx_resources(tx_res, priv->num_tx_rings); 2507 err_alloc_tx: 2508 if (priv->phylink) 2509 phylink_disconnect_phy(priv->phylink); 2510 err_phy_connect: 2511 enetc_free_irqs(priv); 2512 2513 return err; 2514 } 2515 EXPORT_SYMBOL_GPL(enetc_open); 2516 2517 void enetc_stop(struct net_device *ndev) 2518 { 2519 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2520 int i; 2521 2522 netif_tx_stop_all_queues(ndev); 2523 2524 enetc_disable_bdrs(priv); 2525 2526 for (i = 0; i < priv->bdr_int_num; i++) { 2527 int irq = pci_irq_vector(priv->si->pdev, 2528 ENETC_BDR_INT_BASE_IDX + i); 2529 2530 disable_irq(irq); 2531 napi_synchronize(&priv->int_vector[i]->napi); 2532 napi_disable(&priv->int_vector[i]->napi); 2533 } 2534 2535 enetc_wait_bdrs(priv); 2536 2537 enetc_clear_interrupts(priv); 2538 } 2539 EXPORT_SYMBOL_GPL(enetc_stop); 2540 2541 int enetc_close(struct net_device *ndev) 2542 { 2543 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2544 2545 enetc_stop(ndev); 2546 2547 if (priv->phylink) { 2548 phylink_stop(priv->phylink); 2549 phylink_disconnect_phy(priv->phylink); 2550 } else { 2551 netif_carrier_off(ndev); 2552 } 2553 2554 enetc_free_rxtx_rings(priv); 2555 2556 /* Avoids dangling pointers and also frees old resources */ 2557 enetc_assign_rx_resources(priv, NULL); 2558 enetc_assign_tx_resources(priv, NULL); 2559 2560 enetc_free_irqs(priv); 2561 2562 return 0; 2563 } 2564 EXPORT_SYMBOL_GPL(enetc_close); 2565 2566 static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended, 2567 int (*cb)(struct enetc_ndev_priv *priv, void *ctx), 2568 void *ctx) 2569 { 2570 struct enetc_bdr_resource *tx_res, *rx_res; 2571 int err; 2572 2573 ASSERT_RTNL(); 2574 2575 /* If the interface is down, run the callback right away, 2576 * without reconfiguration. 2577 */ 2578 if (!netif_running(priv->ndev)) { 2579 if (cb) 2580 cb(priv, ctx); 2581 2582 return 0; 2583 } 2584 2585 tx_res = enetc_alloc_tx_resources(priv); 2586 if (IS_ERR(tx_res)) { 2587 err = PTR_ERR(tx_res); 2588 goto out; 2589 } 2590 2591 rx_res = enetc_alloc_rx_resources(priv, extended); 2592 if (IS_ERR(rx_res)) { 2593 err = PTR_ERR(rx_res); 2594 goto out_free_tx_res; 2595 } 2596 2597 enetc_stop(priv->ndev); 2598 enetc_free_rxtx_rings(priv); 2599 2600 /* Interface is down, run optional callback now */ 2601 if (cb) 2602 cb(priv, ctx); 2603 2604 enetc_assign_tx_resources(priv, tx_res); 2605 enetc_assign_rx_resources(priv, rx_res); 2606 enetc_setup_bdrs(priv, extended); 2607 enetc_start(priv->ndev); 2608 2609 return 0; 2610 2611 out_free_tx_res: 2612 enetc_free_tx_resources(tx_res, priv->num_tx_rings); 2613 out: 2614 return err; 2615 } 2616 2617 int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) 2618 { 2619 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2620 struct tc_mqprio_qopt *mqprio = type_data; 2621 struct enetc_hw *hw = &priv->si->hw; 2622 struct enetc_bdr *tx_ring; 2623 int num_stack_tx_queues; 2624 u8 num_tc; 2625 int i; 2626 2627 num_stack_tx_queues = enetc_num_stack_tx_queues(priv); 2628 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 2629 num_tc = mqprio->num_tc; 2630 2631 if (!num_tc) { 2632 netdev_reset_tc(ndev); 2633 netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); 2634 2635 /* Reset all ring priorities to 0 */ 2636 for (i = 0; i < priv->num_tx_rings; i++) { 2637 tx_ring = priv->tx_ring[i]; 2638 tx_ring->prio = 0; 2639 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); 2640 } 2641 2642 return 0; 2643 } 2644 2645 /* Check if we have enough BD rings available to accommodate all TCs */ 2646 if (num_tc > num_stack_tx_queues) { 2647 netdev_err(ndev, "Max %d traffic classes supported\n", 2648 priv->num_tx_rings); 2649 return -EINVAL; 2650 } 2651 2652 /* For the moment, we use only one BD ring per TC. 2653 * 2654 * Configure num_tc BD rings with increasing priorities. 2655 */ 2656 for (i = 0; i < num_tc; i++) { 2657 tx_ring = priv->tx_ring[i]; 2658 tx_ring->prio = i; 2659 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); 2660 } 2661 2662 /* Reset the number of netdev queues based on the TC count */ 2663 netif_set_real_num_tx_queues(ndev, num_tc); 2664 2665 netdev_set_num_tc(ndev, num_tc); 2666 2667 /* Each TC is associated with one netdev queue */ 2668 for (i = 0; i < num_tc; i++) 2669 netdev_set_tc_queue(ndev, i, 1, i); 2670 2671 return 0; 2672 } 2673 EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio); 2674 2675 static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx) 2676 { 2677 struct bpf_prog *old_prog, *prog = ctx; 2678 int i; 2679 2680 old_prog = xchg(&priv->xdp_prog, prog); 2681 if (old_prog) 2682 bpf_prog_put(old_prog); 2683 2684 for (i = 0; i < priv->num_rx_rings; i++) { 2685 struct enetc_bdr *rx_ring = priv->rx_ring[i]; 2686 2687 rx_ring->xdp.prog = prog; 2688 2689 if (prog) 2690 rx_ring->buffer_offset = XDP_PACKET_HEADROOM; 2691 else 2692 rx_ring->buffer_offset = ENETC_RXB_PAD; 2693 } 2694 2695 return 0; 2696 } 2697 2698 static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog, 2699 struct netlink_ext_ack *extack) 2700 { 2701 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2702 bool extended; 2703 2704 extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); 2705 2706 /* The buffer layout is changing, so we need to drain the old 2707 * RX buffers and seed new ones. 2708 */ 2709 return enetc_reconfigure(priv, extended, enetc_reconfigure_xdp_cb, prog); 2710 } 2711 2712 int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf) 2713 { 2714 switch (bpf->command) { 2715 case XDP_SETUP_PROG: 2716 return enetc_setup_xdp_prog(ndev, bpf->prog, bpf->extack); 2717 default: 2718 return -EINVAL; 2719 } 2720 2721 return 0; 2722 } 2723 EXPORT_SYMBOL_GPL(enetc_setup_bpf); 2724 2725 struct net_device_stats *enetc_get_stats(struct net_device *ndev) 2726 { 2727 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2728 struct net_device_stats *stats = &ndev->stats; 2729 unsigned long packets = 0, bytes = 0; 2730 unsigned long tx_dropped = 0; 2731 int i; 2732 2733 for (i = 0; i < priv->num_rx_rings; i++) { 2734 packets += priv->rx_ring[i]->stats.packets; 2735 bytes += priv->rx_ring[i]->stats.bytes; 2736 } 2737 2738 stats->rx_packets = packets; 2739 stats->rx_bytes = bytes; 2740 bytes = 0; 2741 packets = 0; 2742 2743 for (i = 0; i < priv->num_tx_rings; i++) { 2744 packets += priv->tx_ring[i]->stats.packets; 2745 bytes += priv->tx_ring[i]->stats.bytes; 2746 tx_dropped += priv->tx_ring[i]->stats.win_drop; 2747 } 2748 2749 stats->tx_packets = packets; 2750 stats->tx_bytes = bytes; 2751 stats->tx_dropped = tx_dropped; 2752 2753 return stats; 2754 } 2755 EXPORT_SYMBOL_GPL(enetc_get_stats); 2756 2757 static int enetc_set_rss(struct net_device *ndev, int en) 2758 { 2759 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2760 struct enetc_hw *hw = &priv->si->hw; 2761 u32 reg; 2762 2763 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings); 2764 2765 reg = enetc_rd(hw, ENETC_SIMR); 2766 reg &= ~ENETC_SIMR_RSSE; 2767 reg |= (en) ? ENETC_SIMR_RSSE : 0; 2768 enetc_wr(hw, ENETC_SIMR, reg); 2769 2770 return 0; 2771 } 2772 2773 static void enetc_enable_rxvlan(struct net_device *ndev, bool en) 2774 { 2775 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2776 struct enetc_hw *hw = &priv->si->hw; 2777 int i; 2778 2779 for (i = 0; i < priv->num_rx_rings; i++) 2780 enetc_bdr_enable_rxvlan(hw, i, en); 2781 } 2782 2783 static void enetc_enable_txvlan(struct net_device *ndev, bool en) 2784 { 2785 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2786 struct enetc_hw *hw = &priv->si->hw; 2787 int i; 2788 2789 for (i = 0; i < priv->num_tx_rings; i++) 2790 enetc_bdr_enable_txvlan(hw, i, en); 2791 } 2792 2793 void enetc_set_features(struct net_device *ndev, netdev_features_t features) 2794 { 2795 netdev_features_t changed = ndev->features ^ features; 2796 2797 if (changed & NETIF_F_RXHASH) 2798 enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH)); 2799 2800 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 2801 enetc_enable_rxvlan(ndev, 2802 !!(features & NETIF_F_HW_VLAN_CTAG_RX)); 2803 2804 if (changed & NETIF_F_HW_VLAN_CTAG_TX) 2805 enetc_enable_txvlan(ndev, 2806 !!(features & NETIF_F_HW_VLAN_CTAG_TX)); 2807 } 2808 EXPORT_SYMBOL_GPL(enetc_set_features); 2809 2810 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK 2811 static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) 2812 { 2813 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2814 int err, new_offloads = priv->active_offloads; 2815 struct hwtstamp_config config; 2816 2817 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2818 return -EFAULT; 2819 2820 switch (config.tx_type) { 2821 case HWTSTAMP_TX_OFF: 2822 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 2823 break; 2824 case HWTSTAMP_TX_ON: 2825 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 2826 new_offloads |= ENETC_F_TX_TSTAMP; 2827 break; 2828 case HWTSTAMP_TX_ONESTEP_SYNC: 2829 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 2830 new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP; 2831 break; 2832 default: 2833 return -ERANGE; 2834 } 2835 2836 switch (config.rx_filter) { 2837 case HWTSTAMP_FILTER_NONE: 2838 new_offloads &= ~ENETC_F_RX_TSTAMP; 2839 break; 2840 default: 2841 new_offloads |= ENETC_F_RX_TSTAMP; 2842 config.rx_filter = HWTSTAMP_FILTER_ALL; 2843 } 2844 2845 if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) { 2846 bool extended = !!(new_offloads & ENETC_F_RX_TSTAMP); 2847 2848 err = enetc_reconfigure(priv, extended, NULL, NULL); 2849 if (err) 2850 return err; 2851 } 2852 2853 priv->active_offloads = new_offloads; 2854 2855 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2856 -EFAULT : 0; 2857 } 2858 2859 static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr) 2860 { 2861 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2862 struct hwtstamp_config config; 2863 2864 config.flags = 0; 2865 2866 if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) 2867 config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC; 2868 else if (priv->active_offloads & ENETC_F_TX_TSTAMP) 2869 config.tx_type = HWTSTAMP_TX_ON; 2870 else 2871 config.tx_type = HWTSTAMP_TX_OFF; 2872 2873 config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ? 2874 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; 2875 2876 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2877 -EFAULT : 0; 2878 } 2879 #endif 2880 2881 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 2882 { 2883 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2884 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK 2885 if (cmd == SIOCSHWTSTAMP) 2886 return enetc_hwtstamp_set(ndev, rq); 2887 if (cmd == SIOCGHWTSTAMP) 2888 return enetc_hwtstamp_get(ndev, rq); 2889 #endif 2890 2891 if (!priv->phylink) 2892 return -EOPNOTSUPP; 2893 2894 return phylink_mii_ioctl(priv->phylink, rq, cmd); 2895 } 2896 EXPORT_SYMBOL_GPL(enetc_ioctl); 2897 2898 int enetc_alloc_msix(struct enetc_ndev_priv *priv) 2899 { 2900 struct pci_dev *pdev = priv->si->pdev; 2901 int first_xdp_tx_ring; 2902 int i, n, err, nvec; 2903 int v_tx_rings; 2904 2905 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num; 2906 /* allocate MSIX for both messaging and Rx/Tx interrupts */ 2907 n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); 2908 2909 if (n < 0) 2910 return n; 2911 2912 if (n != nvec) 2913 return -EPERM; 2914 2915 /* # of tx rings per int vector */ 2916 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; 2917 2918 for (i = 0; i < priv->bdr_int_num; i++) { 2919 struct enetc_int_vector *v; 2920 struct enetc_bdr *bdr; 2921 int j; 2922 2923 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); 2924 if (!v) { 2925 err = -ENOMEM; 2926 goto fail; 2927 } 2928 2929 priv->int_vector[i] = v; 2930 2931 bdr = &v->rx_ring; 2932 bdr->index = i; 2933 bdr->ndev = priv->ndev; 2934 bdr->dev = priv->dev; 2935 bdr->bd_count = priv->rx_bd_count; 2936 bdr->buffer_offset = ENETC_RXB_PAD; 2937 priv->rx_ring[i] = bdr; 2938 2939 err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); 2940 if (err) { 2941 kfree(v); 2942 goto fail; 2943 } 2944 2945 err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, 2946 MEM_TYPE_PAGE_SHARED, NULL); 2947 if (err) { 2948 xdp_rxq_info_unreg(&bdr->xdp.rxq); 2949 kfree(v); 2950 goto fail; 2951 } 2952 2953 /* init defaults for adaptive IC */ 2954 if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { 2955 v->rx_ictt = 0x1; 2956 v->rx_dim_en = true; 2957 } 2958 INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); 2959 netif_napi_add(priv->ndev, &v->napi, enetc_poll); 2960 v->count_tx_rings = v_tx_rings; 2961 2962 for (j = 0; j < v_tx_rings; j++) { 2963 int idx; 2964 2965 /* default tx ring mapping policy */ 2966 idx = priv->bdr_int_num * j + i; 2967 __set_bit(idx, &v->tx_rings_map); 2968 bdr = &v->tx_ring[j]; 2969 bdr->index = idx; 2970 bdr->ndev = priv->ndev; 2971 bdr->dev = priv->dev; 2972 bdr->bd_count = priv->tx_bd_count; 2973 priv->tx_ring[idx] = bdr; 2974 } 2975 } 2976 2977 first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus(); 2978 priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring]; 2979 2980 return 0; 2981 2982 fail: 2983 while (i--) { 2984 struct enetc_int_vector *v = priv->int_vector[i]; 2985 struct enetc_bdr *rx_ring = &v->rx_ring; 2986 2987 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); 2988 xdp_rxq_info_unreg(&rx_ring->xdp.rxq); 2989 netif_napi_del(&v->napi); 2990 cancel_work_sync(&v->rx_dim.work); 2991 kfree(v); 2992 } 2993 2994 pci_free_irq_vectors(pdev); 2995 2996 return err; 2997 } 2998 EXPORT_SYMBOL_GPL(enetc_alloc_msix); 2999 3000 void enetc_free_msix(struct enetc_ndev_priv *priv) 3001 { 3002 int i; 3003 3004 for (i = 0; i < priv->bdr_int_num; i++) { 3005 struct enetc_int_vector *v = priv->int_vector[i]; 3006 struct enetc_bdr *rx_ring = &v->rx_ring; 3007 3008 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); 3009 xdp_rxq_info_unreg(&rx_ring->xdp.rxq); 3010 netif_napi_del(&v->napi); 3011 cancel_work_sync(&v->rx_dim.work); 3012 } 3013 3014 for (i = 0; i < priv->num_rx_rings; i++) 3015 priv->rx_ring[i] = NULL; 3016 3017 for (i = 0; i < priv->num_tx_rings; i++) 3018 priv->tx_ring[i] = NULL; 3019 3020 for (i = 0; i < priv->bdr_int_num; i++) { 3021 kfree(priv->int_vector[i]); 3022 priv->int_vector[i] = NULL; 3023 } 3024 3025 /* disable all MSIX for this device */ 3026 pci_free_irq_vectors(priv->si->pdev); 3027 } 3028 EXPORT_SYMBOL_GPL(enetc_free_msix); 3029 3030 static void enetc_kfree_si(struct enetc_si *si) 3031 { 3032 char *p = (char *)si - si->pad; 3033 3034 kfree(p); 3035 } 3036 3037 static void enetc_detect_errata(struct enetc_si *si) 3038 { 3039 if (si->pdev->revision == ENETC_REV1) 3040 si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP; 3041 } 3042 3043 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv) 3044 { 3045 struct enetc_si *si, *p; 3046 struct enetc_hw *hw; 3047 size_t alloc_size; 3048 int err, len; 3049 3050 pcie_flr(pdev); 3051 err = pci_enable_device_mem(pdev); 3052 if (err) 3053 return dev_err_probe(&pdev->dev, err, "device enable failed\n"); 3054 3055 /* set up for high or low dma */ 3056 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3057 if (err) { 3058 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); 3059 goto err_dma; 3060 } 3061 3062 err = pci_request_mem_regions(pdev, name); 3063 if (err) { 3064 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err); 3065 goto err_pci_mem_reg; 3066 } 3067 3068 pci_set_master(pdev); 3069 3070 alloc_size = sizeof(struct enetc_si); 3071 if (sizeof_priv) { 3072 /* align priv to 32B */ 3073 alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN); 3074 alloc_size += sizeof_priv; 3075 } 3076 /* force 32B alignment for enetc_si */ 3077 alloc_size += ENETC_SI_ALIGN - 1; 3078 3079 p = kzalloc(alloc_size, GFP_KERNEL); 3080 if (!p) { 3081 err = -ENOMEM; 3082 goto err_alloc_si; 3083 } 3084 3085 si = PTR_ALIGN(p, ENETC_SI_ALIGN); 3086 si->pad = (char *)si - (char *)p; 3087 3088 pci_set_drvdata(pdev, si); 3089 si->pdev = pdev; 3090 hw = &si->hw; 3091 3092 len = pci_resource_len(pdev, ENETC_BAR_REGS); 3093 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len); 3094 if (!hw->reg) { 3095 err = -ENXIO; 3096 dev_err(&pdev->dev, "ioremap() failed\n"); 3097 goto err_ioremap; 3098 } 3099 if (len > ENETC_PORT_BASE) 3100 hw->port = hw->reg + ENETC_PORT_BASE; 3101 if (len > ENETC_GLOBAL_BASE) 3102 hw->global = hw->reg + ENETC_GLOBAL_BASE; 3103 3104 enetc_detect_errata(si); 3105 3106 return 0; 3107 3108 err_ioremap: 3109 enetc_kfree_si(si); 3110 err_alloc_si: 3111 pci_release_mem_regions(pdev); 3112 err_pci_mem_reg: 3113 err_dma: 3114 pci_disable_device(pdev); 3115 3116 return err; 3117 } 3118 EXPORT_SYMBOL_GPL(enetc_pci_probe); 3119 3120 void enetc_pci_remove(struct pci_dev *pdev) 3121 { 3122 struct enetc_si *si = pci_get_drvdata(pdev); 3123 struct enetc_hw *hw = &si->hw; 3124 3125 iounmap(hw->reg); 3126 enetc_kfree_si(si); 3127 pci_release_mem_regions(pdev); 3128 pci_disable_device(pdev); 3129 } 3130 EXPORT_SYMBOL_GPL(enetc_pci_remove); 3131 3132 MODULE_LICENSE("Dual BSD/GPL"); 3133