1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2017-2019 NXP */ 3 4 #include "enetc.h" 5 #include <linux/bpf_trace.h> 6 #include <linux/clk.h> 7 #include <linux/tcp.h> 8 #include <linux/udp.h> 9 #include <linux/vmalloc.h> 10 #include <linux/ptp_classify.h> 11 #include <net/ip6_checksum.h> 12 #include <net/pkt_sched.h> 13 #include <net/tso.h> 14 15 u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg) 16 { 17 return enetc_port_rd(&si->hw, reg); 18 } 19 EXPORT_SYMBOL_GPL(enetc_port_mac_rd); 20 21 void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val) 22 { 23 enetc_port_wr(&si->hw, reg, val); 24 if (si->hw_features & ENETC_SI_F_QBU) 25 enetc_port_wr(&si->hw, reg + si->drvdata->pmac_offset, val); 26 } 27 EXPORT_SYMBOL_GPL(enetc_port_mac_wr); 28 29 static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv, 30 u8 preemptible_tcs) 31 { 32 if (!(priv->si->hw_features & ENETC_SI_F_QBU)) 33 return; 34 35 priv->preemptible_tcs = preemptible_tcs; 36 enetc_mm_commit_preemptible_tcs(priv); 37 } 38 39 static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv) 40 { 41 int num_tx_rings = priv->num_tx_rings; 42 43 if (priv->xdp_prog) 44 return num_tx_rings - num_possible_cpus(); 45 46 return num_tx_rings; 47 } 48 49 static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv, 50 struct enetc_bdr *tx_ring) 51 { 52 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring; 53 54 return priv->rx_ring[index]; 55 } 56 57 static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd) 58 { 59 if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect) 60 return NULL; 61 62 return tx_swbd->skb; 63 } 64 65 static struct xdp_frame * 66 enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd) 67 { 68 if (tx_swbd->is_xdp_redirect) 69 return tx_swbd->xdp_frame; 70 71 return NULL; 72 } 73 74 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, 75 struct enetc_tx_swbd *tx_swbd) 76 { 77 /* For XDP_TX, pages come from RX, whereas for the other contexts where 78 * we have is_dma_page_set, those come from skb_frag_dma_map. We need 79 * to match the DMA mapping length, so we need to differentiate those. 80 */ 81 if (tx_swbd->is_dma_page) 82 dma_unmap_page(tx_ring->dev, tx_swbd->dma, 83 tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len, 84 tx_swbd->dir); 85 else 86 dma_unmap_single(tx_ring->dev, tx_swbd->dma, 87 tx_swbd->len, tx_swbd->dir); 88 tx_swbd->dma = 0; 89 } 90 91 static void enetc_free_tx_frame(struct enetc_bdr *tx_ring, 92 struct enetc_tx_swbd *tx_swbd) 93 { 94 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd); 95 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd); 96 97 if (tx_swbd->dma) 98 enetc_unmap_tx_buff(tx_ring, tx_swbd); 99 100 if (xdp_frame) { 101 xdp_return_frame(tx_swbd->xdp_frame); 102 tx_swbd->xdp_frame = NULL; 103 } else if (skb) { 104 dev_kfree_skb_any(skb); 105 tx_swbd->skb = NULL; 106 } 107 } 108 109 /* Let H/W know BD ring has been updated */ 110 static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring) 111 { 112 /* includes wmb() */ 113 enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use); 114 } 115 116 static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp, 117 u8 *msgtype, u8 *twostep, 118 u16 *correction_offset, u16 *body_offset) 119 { 120 unsigned int ptp_class; 121 struct ptp_header *hdr; 122 unsigned int type; 123 u8 *base; 124 125 ptp_class = ptp_classify_raw(skb); 126 if (ptp_class == PTP_CLASS_NONE) 127 return -EINVAL; 128 129 hdr = ptp_parse_header(skb, ptp_class); 130 if (!hdr) 131 return -EINVAL; 132 133 type = ptp_class & PTP_CLASS_PMASK; 134 if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6) 135 *udp = 1; 136 else 137 *udp = 0; 138 139 *msgtype = ptp_get_msgtype(hdr, ptp_class); 140 *twostep = hdr->flag_field[0] & 0x2; 141 142 base = skb_mac_header(skb); 143 *correction_offset = (u8 *)&hdr->correction - base; 144 *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base; 145 146 return 0; 147 } 148 149 static bool enetc_tx_csum_offload_check(struct sk_buff *skb) 150 { 151 switch (skb->csum_offset) { 152 case offsetof(struct tcphdr, check): 153 case offsetof(struct udphdr, check): 154 return true; 155 default: 156 return false; 157 } 158 } 159 160 static bool enetc_skb_is_ipv6(struct sk_buff *skb) 161 { 162 return vlan_get_protocol(skb) == htons(ETH_P_IPV6); 163 } 164 165 static bool enetc_skb_is_tcp(struct sk_buff *skb) 166 { 167 return skb->csum_offset == offsetof(struct tcphdr, check); 168 } 169 170 /** 171 * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame 172 * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located 173 * @count: Number of Tx buffer descriptors which need to be unmapped 174 * @i: Index of the last successfully mapped Tx buffer descriptor 175 */ 176 static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i) 177 { 178 while (count--) { 179 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; 180 181 enetc_free_tx_frame(tx_ring, tx_swbd); 182 if (i == 0) 183 i = tx_ring->bd_count; 184 i--; 185 } 186 } 187 188 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) 189 { 190 bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false; 191 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); 192 struct enetc_hw *hw = &priv->si->hw; 193 struct enetc_tx_swbd *tx_swbd; 194 int len = skb_headlen(skb); 195 union enetc_tx_bd temp_bd; 196 u8 msgtype, twostep, udp; 197 union enetc_tx_bd *txbd; 198 u16 offset1, offset2; 199 int i, count = 0; 200 skb_frag_t *frag; 201 unsigned int f; 202 dma_addr_t dma; 203 u8 flags = 0; 204 205 enetc_clear_tx_bd(&temp_bd); 206 if (skb->ip_summed == CHECKSUM_PARTIAL) { 207 /* Can not support TSD and checksum offload at the same time */ 208 if (priv->active_offloads & ENETC_F_TXCSUM && 209 enetc_tx_csum_offload_check(skb) && !tx_ring->tsd_enable) { 210 temp_bd.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START, 211 skb_network_offset(skb)); 212 temp_bd.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN, 213 skb_network_header_len(skb) / 4); 214 temp_bd.l3_aux1 |= FIELD_PREP(ENETC_TX_BD_L3T, 215 enetc_skb_is_ipv6(skb)); 216 if (enetc_skb_is_tcp(skb)) 217 temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, 218 ENETC_TXBD_L4T_TCP); 219 else 220 temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, 221 ENETC_TXBD_L4T_UDP); 222 flags |= ENETC_TXBD_FLAGS_CSUM_LSO | ENETC_TXBD_FLAGS_L4CS; 223 } else if (skb_checksum_help(skb)) { 224 return 0; 225 } 226 } 227 228 i = tx_ring->next_to_use; 229 txbd = ENETC_TXBD(*tx_ring, i); 230 prefetchw(txbd); 231 232 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); 233 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) 234 goto dma_err; 235 236 temp_bd.addr = cpu_to_le64(dma); 237 temp_bd.buf_len = cpu_to_le16(len); 238 239 tx_swbd = &tx_ring->tx_swbd[i]; 240 tx_swbd->dma = dma; 241 tx_swbd->len = len; 242 tx_swbd->is_dma_page = 0; 243 tx_swbd->dir = DMA_TO_DEVICE; 244 count++; 245 246 do_vlan = skb_vlan_tag_present(skb); 247 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { 248 if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1, 249 &offset2) || 250 msgtype != PTP_MSGTYPE_SYNC || twostep) 251 WARN_ONCE(1, "Bad packet for one-step timestamping\n"); 252 else 253 do_onestep_tstamp = true; 254 } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) { 255 do_twostep_tstamp = true; 256 } 257 258 tx_swbd->do_twostep_tstamp = do_twostep_tstamp; 259 tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV); 260 tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en; 261 262 if (do_vlan || do_onestep_tstamp || do_twostep_tstamp) 263 flags |= ENETC_TXBD_FLAGS_EX; 264 265 if (tx_ring->tsd_enable) 266 flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART; 267 268 /* first BD needs frm_len and offload flags set */ 269 temp_bd.frm_len = cpu_to_le16(skb->len); 270 temp_bd.flags = flags; 271 272 if (flags & ENETC_TXBD_FLAGS_TSE) 273 temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns, 274 flags); 275 276 if (flags & ENETC_TXBD_FLAGS_EX) { 277 u8 e_flags = 0; 278 *txbd = temp_bd; 279 enetc_clear_tx_bd(&temp_bd); 280 281 /* add extension BD for VLAN and/or timestamping */ 282 flags = 0; 283 tx_swbd++; 284 txbd++; 285 i++; 286 if (unlikely(i == tx_ring->bd_count)) { 287 i = 0; 288 tx_swbd = tx_ring->tx_swbd; 289 txbd = ENETC_TXBD(*tx_ring, 0); 290 } 291 prefetchw(txbd); 292 293 if (do_vlan) { 294 temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); 295 temp_bd.ext.tpid = 0; /* < C-TAG */ 296 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; 297 } 298 299 if (do_onestep_tstamp) { 300 __be32 new_sec_l, new_nsec; 301 u32 lo, hi, nsec, val; 302 __be16 new_sec_h; 303 u8 *data; 304 u64 sec; 305 306 lo = enetc_rd_hot(hw, ENETC_SICTR0); 307 hi = enetc_rd_hot(hw, ENETC_SICTR1); 308 sec = (u64)hi << 32 | lo; 309 nsec = do_div(sec, 1000000000); 310 311 /* Configure extension BD */ 312 temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff); 313 e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP; 314 315 /* Update originTimestamp field of Sync packet 316 * - 48 bits seconds field 317 * - 32 bits nanseconds field 318 * 319 * In addition, the UDP checksum needs to be updated 320 * by software after updating originTimestamp field, 321 * otherwise the hardware will calculate the wrong 322 * checksum when updating the correction field and 323 * update it to the packet. 324 */ 325 data = skb_mac_header(skb); 326 new_sec_h = htons((sec >> 32) & 0xffff); 327 new_sec_l = htonl(sec & 0xffffffff); 328 new_nsec = htonl(nsec); 329 if (udp) { 330 struct udphdr *uh = udp_hdr(skb); 331 __be32 old_sec_l, old_nsec; 332 __be16 old_sec_h; 333 334 old_sec_h = *(__be16 *)(data + offset2); 335 inet_proto_csum_replace2(&uh->check, skb, old_sec_h, 336 new_sec_h, false); 337 338 old_sec_l = *(__be32 *)(data + offset2 + 2); 339 inet_proto_csum_replace4(&uh->check, skb, old_sec_l, 340 new_sec_l, false); 341 342 old_nsec = *(__be32 *)(data + offset2 + 6); 343 inet_proto_csum_replace4(&uh->check, skb, old_nsec, 344 new_nsec, false); 345 } 346 347 *(__be16 *)(data + offset2) = new_sec_h; 348 *(__be32 *)(data + offset2 + 2) = new_sec_l; 349 *(__be32 *)(data + offset2 + 6) = new_nsec; 350 351 /* Configure single-step register */ 352 val = ENETC_PM0_SINGLE_STEP_EN; 353 val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1); 354 if (udp) 355 val |= ENETC_PM0_SINGLE_STEP_CH; 356 357 enetc_port_mac_wr(priv->si, ENETC_PM0_SINGLE_STEP, 358 val); 359 } else if (do_twostep_tstamp) { 360 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 361 e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP; 362 } 363 364 temp_bd.ext.e_flags = e_flags; 365 count++; 366 } 367 368 frag = &skb_shinfo(skb)->frags[0]; 369 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) { 370 len = skb_frag_size(frag); 371 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, 372 DMA_TO_DEVICE); 373 if (dma_mapping_error(tx_ring->dev, dma)) 374 goto dma_err; 375 376 *txbd = temp_bd; 377 enetc_clear_tx_bd(&temp_bd); 378 379 flags = 0; 380 tx_swbd++; 381 txbd++; 382 i++; 383 if (unlikely(i == tx_ring->bd_count)) { 384 i = 0; 385 tx_swbd = tx_ring->tx_swbd; 386 txbd = ENETC_TXBD(*tx_ring, 0); 387 } 388 prefetchw(txbd); 389 390 temp_bd.addr = cpu_to_le64(dma); 391 temp_bd.buf_len = cpu_to_le16(len); 392 393 tx_swbd->dma = dma; 394 tx_swbd->len = len; 395 tx_swbd->is_dma_page = 1; 396 tx_swbd->dir = DMA_TO_DEVICE; 397 count++; 398 } 399 400 /* last BD needs 'F' bit set */ 401 flags |= ENETC_TXBD_FLAGS_F; 402 temp_bd.flags = flags; 403 *txbd = temp_bd; 404 405 tx_ring->tx_swbd[i].is_eof = true; 406 tx_ring->tx_swbd[i].skb = skb; 407 408 enetc_bdr_idx_inc(tx_ring, &i); 409 tx_ring->next_to_use = i; 410 411 skb_tx_timestamp(skb); 412 413 enetc_update_tx_ring_tail(tx_ring); 414 415 return count; 416 417 dma_err: 418 dev_err(tx_ring->dev, "DMA map error"); 419 420 enetc_unwind_tx_frame(tx_ring, count, i); 421 422 return 0; 423 } 424 425 static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, 426 struct enetc_tx_swbd *tx_swbd, 427 union enetc_tx_bd *txbd, int *i, int hdr_len, 428 int data_len) 429 { 430 union enetc_tx_bd txbd_tmp; 431 u8 flags = 0, e_flags = 0; 432 dma_addr_t addr; 433 int count = 1; 434 435 enetc_clear_tx_bd(&txbd_tmp); 436 addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; 437 438 if (skb_vlan_tag_present(skb)) 439 flags |= ENETC_TXBD_FLAGS_EX; 440 441 txbd_tmp.addr = cpu_to_le64(addr); 442 txbd_tmp.buf_len = cpu_to_le16(hdr_len); 443 444 /* first BD needs frm_len and offload flags set */ 445 txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len); 446 txbd_tmp.flags = flags; 447 448 /* For the TSO header we do not set the dma address since we do not 449 * want it unmapped when we do cleanup. We still set len so that we 450 * count the bytes sent. 451 */ 452 tx_swbd->len = hdr_len; 453 tx_swbd->do_twostep_tstamp = false; 454 tx_swbd->check_wb = false; 455 456 /* Actually write the header in the BD */ 457 *txbd = txbd_tmp; 458 459 /* Add extension BD for VLAN */ 460 if (flags & ENETC_TXBD_FLAGS_EX) { 461 /* Get the next BD */ 462 enetc_bdr_idx_inc(tx_ring, i); 463 txbd = ENETC_TXBD(*tx_ring, *i); 464 tx_swbd = &tx_ring->tx_swbd[*i]; 465 prefetchw(txbd); 466 467 /* Setup the VLAN fields */ 468 enetc_clear_tx_bd(&txbd_tmp); 469 txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); 470 txbd_tmp.ext.tpid = 0; /* < C-TAG */ 471 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; 472 473 /* Write the BD */ 474 txbd_tmp.ext.e_flags = e_flags; 475 *txbd = txbd_tmp; 476 count++; 477 } 478 479 return count; 480 } 481 482 static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, 483 struct enetc_tx_swbd *tx_swbd, 484 union enetc_tx_bd *txbd, char *data, 485 int size, bool last_bd) 486 { 487 union enetc_tx_bd txbd_tmp; 488 dma_addr_t addr; 489 u8 flags = 0; 490 491 enetc_clear_tx_bd(&txbd_tmp); 492 493 addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); 494 if (unlikely(dma_mapping_error(tx_ring->dev, addr))) { 495 netdev_err(tx_ring->ndev, "DMA map error\n"); 496 return -ENOMEM; 497 } 498 499 if (last_bd) { 500 flags |= ENETC_TXBD_FLAGS_F; 501 tx_swbd->is_eof = 1; 502 } 503 504 txbd_tmp.addr = cpu_to_le64(addr); 505 txbd_tmp.buf_len = cpu_to_le16(size); 506 txbd_tmp.flags = flags; 507 508 tx_swbd->dma = addr; 509 tx_swbd->len = size; 510 tx_swbd->dir = DMA_TO_DEVICE; 511 512 *txbd = txbd_tmp; 513 514 return 0; 515 } 516 517 static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb, 518 char *hdr, int hdr_len, int *l4_hdr_len) 519 { 520 char *l4_hdr = hdr + skb_transport_offset(skb); 521 int mac_hdr_len = skb_network_offset(skb); 522 523 if (tso->tlen != sizeof(struct udphdr)) { 524 struct tcphdr *tcph = (struct tcphdr *)(l4_hdr); 525 526 tcph->check = 0; 527 } else { 528 struct udphdr *udph = (struct udphdr *)(l4_hdr); 529 530 udph->check = 0; 531 } 532 533 /* Compute the IP checksum. This is necessary since tso_build_hdr() 534 * already incremented the IP ID field. 535 */ 536 if (!tso->ipv6) { 537 struct iphdr *iph = (void *)(hdr + mac_hdr_len); 538 539 iph->check = 0; 540 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 541 } 542 543 /* Compute the checksum over the L4 header. */ 544 *l4_hdr_len = hdr_len - skb_transport_offset(skb); 545 return csum_partial(l4_hdr, *l4_hdr_len, 0); 546 } 547 548 static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso, 549 struct sk_buff *skb, char *hdr, int len, 550 __wsum sum) 551 { 552 char *l4_hdr = hdr + skb_transport_offset(skb); 553 __sum16 csum_final; 554 555 /* Complete the L4 checksum by appending the pseudo-header to the 556 * already computed checksum. 557 */ 558 if (!tso->ipv6) 559 csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr, 560 ip_hdr(skb)->daddr, 561 len, ip_hdr(skb)->protocol, sum); 562 else 563 csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 564 &ipv6_hdr(skb)->daddr, 565 len, ipv6_hdr(skb)->nexthdr, sum); 566 567 if (tso->tlen != sizeof(struct udphdr)) { 568 struct tcphdr *tcph = (struct tcphdr *)(l4_hdr); 569 570 tcph->check = csum_final; 571 } else { 572 struct udphdr *udph = (struct udphdr *)(l4_hdr); 573 574 udph->check = csum_final; 575 } 576 } 577 578 static int enetc_lso_count_descs(const struct sk_buff *skb) 579 { 580 /* 4 BDs: 1 BD for LSO header + 1 BD for extended BD + 1 BD 581 * for linear area data but not include LSO header, namely 582 * skb_headlen(skb) - lso_hdr_len (it may be 0, but that's 583 * okay, we only need to consider the worst case). And 1 BD 584 * for gap. 585 */ 586 return skb_shinfo(skb)->nr_frags + 4; 587 } 588 589 static int enetc_lso_get_hdr_len(const struct sk_buff *skb) 590 { 591 int hdr_len, tlen; 592 593 tlen = skb_is_gso_tcp(skb) ? tcp_hdrlen(skb) : sizeof(struct udphdr); 594 hdr_len = skb_transport_offset(skb) + tlen; 595 596 return hdr_len; 597 } 598 599 static void enetc_lso_start(struct sk_buff *skb, struct enetc_lso_t *lso) 600 { 601 lso->lso_seg_size = skb_shinfo(skb)->gso_size; 602 lso->ipv6 = enetc_skb_is_ipv6(skb); 603 lso->tcp = skb_is_gso_tcp(skb); 604 lso->l3_hdr_len = skb_network_header_len(skb); 605 lso->l3_start = skb_network_offset(skb); 606 lso->hdr_len = enetc_lso_get_hdr_len(skb); 607 lso->total_len = skb->len - lso->hdr_len; 608 } 609 610 static void enetc_lso_map_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, 611 int *i, struct enetc_lso_t *lso) 612 { 613 union enetc_tx_bd txbd_tmp, *txbd; 614 struct enetc_tx_swbd *tx_swbd; 615 u16 frm_len, frm_len_ext; 616 u8 flags, e_flags = 0; 617 dma_addr_t addr; 618 char *hdr; 619 620 /* Get the first BD of the LSO BDs chain */ 621 txbd = ENETC_TXBD(*tx_ring, *i); 622 tx_swbd = &tx_ring->tx_swbd[*i]; 623 prefetchw(txbd); 624 625 /* Prepare LSO header: MAC + IP + TCP/UDP */ 626 hdr = tx_ring->tso_headers + *i * TSO_HEADER_SIZE; 627 memcpy(hdr, skb->data, lso->hdr_len); 628 addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; 629 630 /* {frm_len_ext, frm_len} indicates the total length of 631 * large transmit data unit. frm_len contains the 16 least 632 * significant bits and frm_len_ext contains the 4 most 633 * significant bits. 634 */ 635 frm_len = lso->total_len & 0xffff; 636 frm_len_ext = (lso->total_len >> 16) & 0xf; 637 638 /* Set the flags of the first BD */ 639 flags = ENETC_TXBD_FLAGS_EX | ENETC_TXBD_FLAGS_CSUM_LSO | 640 ENETC_TXBD_FLAGS_LSO | ENETC_TXBD_FLAGS_L4CS; 641 642 enetc_clear_tx_bd(&txbd_tmp); 643 txbd_tmp.addr = cpu_to_le64(addr); 644 txbd_tmp.hdr_len = cpu_to_le16(lso->hdr_len); 645 646 /* first BD needs frm_len and offload flags set */ 647 txbd_tmp.frm_len = cpu_to_le16(frm_len); 648 txbd_tmp.flags = flags; 649 650 txbd_tmp.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START, lso->l3_start); 651 /* l3_hdr_size in 32-bits (4 bytes) */ 652 txbd_tmp.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN, 653 lso->l3_hdr_len / 4); 654 if (lso->ipv6) 655 txbd_tmp.l3_aux1 |= ENETC_TX_BD_L3T; 656 else 657 txbd_tmp.l3_aux0 |= ENETC_TX_BD_IPCS; 658 659 txbd_tmp.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, lso->tcp ? 660 ENETC_TXBD_L4T_TCP : ENETC_TXBD_L4T_UDP); 661 662 /* For the LSO header we do not set the dma address since 663 * we do not want it unmapped when we do cleanup. We still 664 * set len so that we count the bytes sent. 665 */ 666 tx_swbd->len = lso->hdr_len; 667 tx_swbd->do_twostep_tstamp = false; 668 tx_swbd->check_wb = false; 669 670 /* Actually write the header in the BD */ 671 *txbd = txbd_tmp; 672 673 /* Get the next BD, and the next BD is extended BD */ 674 enetc_bdr_idx_inc(tx_ring, i); 675 txbd = ENETC_TXBD(*tx_ring, *i); 676 tx_swbd = &tx_ring->tx_swbd[*i]; 677 prefetchw(txbd); 678 679 enetc_clear_tx_bd(&txbd_tmp); 680 if (skb_vlan_tag_present(skb)) { 681 /* Setup the VLAN fields */ 682 txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); 683 txbd_tmp.ext.tpid = ENETC_TPID_8021Q; 684 e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS; 685 } 686 687 /* Write the BD */ 688 txbd_tmp.ext.e_flags = e_flags; 689 txbd_tmp.ext.lso_sg_size = cpu_to_le16(lso->lso_seg_size); 690 txbd_tmp.ext.frm_len_ext = cpu_to_le16(frm_len_ext); 691 *txbd = txbd_tmp; 692 } 693 694 static int enetc_lso_map_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, 695 int *i, struct enetc_lso_t *lso, int *count) 696 { 697 union enetc_tx_bd txbd_tmp, *txbd = NULL; 698 struct enetc_tx_swbd *tx_swbd; 699 skb_frag_t *frag; 700 dma_addr_t dma; 701 u8 flags = 0; 702 int len, f; 703 704 len = skb_headlen(skb) - lso->hdr_len; 705 if (len > 0) { 706 dma = dma_map_single(tx_ring->dev, skb->data + lso->hdr_len, 707 len, DMA_TO_DEVICE); 708 if (dma_mapping_error(tx_ring->dev, dma)) 709 return -ENOMEM; 710 711 enetc_bdr_idx_inc(tx_ring, i); 712 txbd = ENETC_TXBD(*tx_ring, *i); 713 tx_swbd = &tx_ring->tx_swbd[*i]; 714 prefetchw(txbd); 715 *count += 1; 716 717 enetc_clear_tx_bd(&txbd_tmp); 718 txbd_tmp.addr = cpu_to_le64(dma); 719 txbd_tmp.buf_len = cpu_to_le16(len); 720 721 tx_swbd->dma = dma; 722 tx_swbd->len = len; 723 tx_swbd->is_dma_page = 0; 724 tx_swbd->dir = DMA_TO_DEVICE; 725 } 726 727 frag = &skb_shinfo(skb)->frags[0]; 728 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) { 729 if (txbd) 730 *txbd = txbd_tmp; 731 732 len = skb_frag_size(frag); 733 dma = skb_frag_dma_map(tx_ring->dev, frag); 734 if (dma_mapping_error(tx_ring->dev, dma)) 735 return -ENOMEM; 736 737 /* Get the next BD */ 738 enetc_bdr_idx_inc(tx_ring, i); 739 txbd = ENETC_TXBD(*tx_ring, *i); 740 tx_swbd = &tx_ring->tx_swbd[*i]; 741 prefetchw(txbd); 742 *count += 1; 743 744 enetc_clear_tx_bd(&txbd_tmp); 745 txbd_tmp.addr = cpu_to_le64(dma); 746 txbd_tmp.buf_len = cpu_to_le16(len); 747 748 tx_swbd->dma = dma; 749 tx_swbd->len = len; 750 tx_swbd->is_dma_page = 1; 751 tx_swbd->dir = DMA_TO_DEVICE; 752 } 753 754 /* Last BD needs 'F' bit set */ 755 flags |= ENETC_TXBD_FLAGS_F; 756 txbd_tmp.flags = flags; 757 *txbd = txbd_tmp; 758 759 tx_swbd->is_eof = 1; 760 tx_swbd->skb = skb; 761 762 return 0; 763 } 764 765 static int enetc_lso_hw_offload(struct enetc_bdr *tx_ring, struct sk_buff *skb) 766 { 767 struct enetc_tx_swbd *tx_swbd; 768 struct enetc_lso_t lso = {0}; 769 int err, i, count = 0; 770 771 /* Initialize the LSO handler */ 772 enetc_lso_start(skb, &lso); 773 i = tx_ring->next_to_use; 774 775 enetc_lso_map_hdr(tx_ring, skb, &i, &lso); 776 /* First BD and an extend BD */ 777 count += 2; 778 779 err = enetc_lso_map_data(tx_ring, skb, &i, &lso, &count); 780 if (err) 781 goto dma_err; 782 783 /* Go to the next BD */ 784 enetc_bdr_idx_inc(tx_ring, &i); 785 tx_ring->next_to_use = i; 786 enetc_update_tx_ring_tail(tx_ring); 787 788 return count; 789 790 dma_err: 791 do { 792 tx_swbd = &tx_ring->tx_swbd[i]; 793 enetc_free_tx_frame(tx_ring, tx_swbd); 794 if (i == 0) 795 i = tx_ring->bd_count; 796 i--; 797 } while (--count); 798 799 return 0; 800 } 801 802 static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) 803 { 804 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); 805 int hdr_len, total_len, data_len; 806 struct enetc_tx_swbd *tx_swbd; 807 union enetc_tx_bd *txbd; 808 struct tso_t tso; 809 __wsum csum, csum2; 810 int count = 0, pos; 811 int err, i, bd_data_num; 812 813 /* Initialize the TSO handler, and prepare the first payload */ 814 hdr_len = tso_start(skb, &tso); 815 total_len = skb->len - hdr_len; 816 i = tx_ring->next_to_use; 817 818 while (total_len > 0) { 819 char *hdr; 820 821 /* Get the BD */ 822 txbd = ENETC_TXBD(*tx_ring, i); 823 tx_swbd = &tx_ring->tx_swbd[i]; 824 prefetchw(txbd); 825 826 /* Determine the length of this packet */ 827 data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len); 828 total_len -= data_len; 829 830 /* prepare packet headers: MAC + IP + TCP */ 831 hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE; 832 tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0); 833 834 /* compute the csum over the L4 header */ 835 csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos); 836 count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, 837 &i, hdr_len, data_len); 838 bd_data_num = 0; 839 840 while (data_len > 0) { 841 int size; 842 843 size = min_t(int, tso.size, data_len); 844 845 /* Advance the index in the BDR */ 846 enetc_bdr_idx_inc(tx_ring, &i); 847 txbd = ENETC_TXBD(*tx_ring, i); 848 tx_swbd = &tx_ring->tx_swbd[i]; 849 prefetchw(txbd); 850 851 /* Compute the checksum over this segment of data and 852 * add it to the csum already computed (over the L4 853 * header and possible other data segments). 854 */ 855 csum2 = csum_partial(tso.data, size, 0); 856 csum = csum_block_add(csum, csum2, pos); 857 pos += size; 858 859 err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd, 860 tso.data, size, 861 size == data_len); 862 if (err) { 863 if (i == 0) 864 i = tx_ring->bd_count; 865 i--; 866 867 goto err_map_data; 868 } 869 870 data_len -= size; 871 count++; 872 bd_data_num++; 873 tso_build_data(skb, &tso, size); 874 875 if (unlikely(bd_data_num >= priv->max_frags && data_len)) 876 goto err_chained_bd; 877 } 878 879 enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum); 880 881 if (total_len == 0) 882 tx_swbd->skb = skb; 883 884 /* Go to the next BD */ 885 enetc_bdr_idx_inc(tx_ring, &i); 886 } 887 888 tx_ring->next_to_use = i; 889 enetc_update_tx_ring_tail(tx_ring); 890 891 return count; 892 893 err_map_data: 894 dev_err(tx_ring->dev, "DMA map error"); 895 896 err_chained_bd: 897 enetc_unwind_tx_frame(tx_ring, count, i); 898 899 return 0; 900 } 901 902 static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, 903 struct net_device *ndev) 904 { 905 struct enetc_ndev_priv *priv = netdev_priv(ndev); 906 struct enetc_bdr *tx_ring; 907 int count; 908 909 /* Queue one-step Sync packet if already locked */ 910 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { 911 if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, 912 &priv->flags)) { 913 skb_queue_tail(&priv->tx_skbs, skb); 914 return NETDEV_TX_OK; 915 } 916 } 917 918 tx_ring = priv->tx_ring[skb->queue_mapping]; 919 920 if (skb_is_gso(skb)) { 921 /* LSO data unit lengths of up to 256KB are supported */ 922 if (priv->active_offloads & ENETC_F_LSO && 923 (skb->len - enetc_lso_get_hdr_len(skb)) <= 924 ENETC_LSO_MAX_DATA_LEN) { 925 if (enetc_bd_unused(tx_ring) < enetc_lso_count_descs(skb)) { 926 netif_stop_subqueue(ndev, tx_ring->index); 927 return NETDEV_TX_BUSY; 928 } 929 930 count = enetc_lso_hw_offload(tx_ring, skb); 931 } else { 932 if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) { 933 netif_stop_subqueue(ndev, tx_ring->index); 934 return NETDEV_TX_BUSY; 935 } 936 937 enetc_lock_mdio(); 938 count = enetc_map_tx_tso_buffs(tx_ring, skb); 939 enetc_unlock_mdio(); 940 } 941 } else { 942 if (unlikely(skb_shinfo(skb)->nr_frags > priv->max_frags)) 943 if (unlikely(skb_linearize(skb))) 944 goto drop_packet_err; 945 946 count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ 947 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { 948 netif_stop_subqueue(ndev, tx_ring->index); 949 return NETDEV_TX_BUSY; 950 } 951 952 enetc_lock_mdio(); 953 count = enetc_map_tx_buffs(tx_ring, skb); 954 enetc_unlock_mdio(); 955 } 956 957 if (unlikely(!count)) 958 goto drop_packet_err; 959 960 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED(priv->max_frags)) 961 netif_stop_subqueue(ndev, tx_ring->index); 962 963 return NETDEV_TX_OK; 964 965 drop_packet_err: 966 dev_kfree_skb_any(skb); 967 return NETDEV_TX_OK; 968 } 969 970 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) 971 { 972 struct enetc_ndev_priv *priv = netdev_priv(ndev); 973 u8 udp, msgtype, twostep; 974 u16 offset1, offset2; 975 976 /* Mark tx timestamp type on skb->cb[0] if requires */ 977 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 978 (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) { 979 skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK; 980 } else { 981 skb->cb[0] = 0; 982 } 983 984 /* Fall back to two-step timestamp if not one-step Sync packet */ 985 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { 986 if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, 987 &offset1, &offset2) || 988 msgtype != PTP_MSGTYPE_SYNC || twostep != 0) 989 skb->cb[0] = ENETC_F_TX_TSTAMP; 990 } 991 992 return enetc_start_xmit(skb, ndev); 993 } 994 EXPORT_SYMBOL_GPL(enetc_xmit); 995 996 static irqreturn_t enetc_msix(int irq, void *data) 997 { 998 struct enetc_int_vector *v = data; 999 int i; 1000 1001 enetc_lock_mdio(); 1002 1003 /* disable interrupts */ 1004 enetc_wr_reg_hot(v->rbier, 0); 1005 enetc_wr_reg_hot(v->ricr1, v->rx_ictt); 1006 1007 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) 1008 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0); 1009 1010 enetc_unlock_mdio(); 1011 1012 napi_schedule(&v->napi); 1013 1014 return IRQ_HANDLED; 1015 } 1016 1017 static void enetc_rx_dim_work(struct work_struct *w) 1018 { 1019 struct dim *dim = container_of(w, struct dim, work); 1020 struct dim_cq_moder moder = 1021 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 1022 struct enetc_int_vector *v = 1023 container_of(dim, struct enetc_int_vector, rx_dim); 1024 struct enetc_ndev_priv *priv = netdev_priv(v->rx_ring.ndev); 1025 1026 v->rx_ictt = enetc_usecs_to_cycles(moder.usec, priv->sysclk_freq); 1027 dim->state = DIM_START_MEASURE; 1028 } 1029 1030 static void enetc_rx_net_dim(struct enetc_int_vector *v) 1031 { 1032 struct dim_sample dim_sample = {}; 1033 1034 v->comp_cnt++; 1035 1036 if (!v->rx_napi_work) 1037 return; 1038 1039 dim_update_sample(v->comp_cnt, 1040 v->rx_ring.stats.packets, 1041 v->rx_ring.stats.bytes, 1042 &dim_sample); 1043 net_dim(&v->rx_dim, &dim_sample); 1044 } 1045 1046 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) 1047 { 1048 int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; 1049 1050 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; 1051 } 1052 1053 static bool enetc_page_reusable(struct page *page) 1054 { 1055 return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1); 1056 } 1057 1058 static void enetc_reuse_page(struct enetc_bdr *rx_ring, 1059 struct enetc_rx_swbd *old) 1060 { 1061 struct enetc_rx_swbd *new; 1062 1063 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; 1064 1065 /* next buf that may reuse a page */ 1066 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); 1067 1068 /* copy page reference */ 1069 *new = *old; 1070 } 1071 1072 static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd, 1073 u64 *tstamp) 1074 { 1075 u32 lo, hi, tstamp_lo; 1076 1077 lo = enetc_rd_hot(hw, ENETC_SICTR0); 1078 hi = enetc_rd_hot(hw, ENETC_SICTR1); 1079 tstamp_lo = le32_to_cpu(txbd->wb.tstamp); 1080 if (lo <= tstamp_lo) 1081 hi -= 1; 1082 *tstamp = (u64)hi << 32 | tstamp_lo; 1083 } 1084 1085 static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp) 1086 { 1087 struct skb_shared_hwtstamps shhwtstamps; 1088 1089 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { 1090 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 1091 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 1092 skb_txtime_consumed(skb); 1093 skb_tstamp_tx(skb, &shhwtstamps); 1094 } 1095 } 1096 1097 static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring, 1098 struct enetc_tx_swbd *tx_swbd) 1099 { 1100 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); 1101 struct enetc_rx_swbd rx_swbd = { 1102 .dma = tx_swbd->dma, 1103 .page = tx_swbd->page, 1104 .page_offset = tx_swbd->page_offset, 1105 .dir = tx_swbd->dir, 1106 .len = tx_swbd->len, 1107 }; 1108 struct enetc_bdr *rx_ring; 1109 1110 rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring); 1111 1112 if (likely(enetc_swbd_unused(rx_ring))) { 1113 enetc_reuse_page(rx_ring, &rx_swbd); 1114 1115 /* sync for use by the device */ 1116 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma, 1117 rx_swbd.page_offset, 1118 ENETC_RXB_DMA_SIZE_XDP, 1119 rx_swbd.dir); 1120 1121 rx_ring->stats.recycles++; 1122 } else { 1123 /* RX ring is already full, we need to unmap and free the 1124 * page, since there's nothing useful we can do with it. 1125 */ 1126 rx_ring->stats.recycle_failures++; 1127 1128 dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE, 1129 rx_swbd.dir); 1130 __free_page(rx_swbd.page); 1131 } 1132 1133 rx_ring->xdp.xdp_tx_in_flight--; 1134 } 1135 1136 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) 1137 { 1138 int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0; 1139 struct net_device *ndev = tx_ring->ndev; 1140 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1141 struct enetc_tx_swbd *tx_swbd; 1142 int i, bds_to_clean; 1143 bool do_twostep_tstamp; 1144 u64 tstamp = 0; 1145 1146 i = tx_ring->next_to_clean; 1147 tx_swbd = &tx_ring->tx_swbd[i]; 1148 1149 bds_to_clean = enetc_bd_ready_count(tx_ring, i); 1150 1151 do_twostep_tstamp = false; 1152 1153 while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) { 1154 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd); 1155 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd); 1156 bool is_eof = tx_swbd->is_eof; 1157 1158 if (unlikely(tx_swbd->check_wb)) { 1159 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); 1160 1161 if (txbd->flags & ENETC_TXBD_FLAGS_W && 1162 tx_swbd->do_twostep_tstamp) { 1163 enetc_get_tx_tstamp(&priv->si->hw, txbd, 1164 &tstamp); 1165 do_twostep_tstamp = true; 1166 } 1167 1168 if (tx_swbd->qbv_en && 1169 txbd->wb.status & ENETC_TXBD_STATS_WIN) 1170 tx_win_drop++; 1171 } 1172 1173 if (tx_swbd->is_xdp_tx) 1174 enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd); 1175 else if (likely(tx_swbd->dma)) 1176 enetc_unmap_tx_buff(tx_ring, tx_swbd); 1177 1178 if (xdp_frame) { 1179 xdp_return_frame(xdp_frame); 1180 } else if (skb) { 1181 if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) { 1182 /* Start work to release lock for next one-step 1183 * timestamping packet. And send one skb in 1184 * tx_skbs queue if has. 1185 */ 1186 schedule_work(&priv->tx_onestep_tstamp); 1187 } else if (unlikely(do_twostep_tstamp)) { 1188 enetc_tstamp_tx(skb, tstamp); 1189 do_twostep_tstamp = false; 1190 } 1191 napi_consume_skb(skb, napi_budget); 1192 } 1193 1194 tx_byte_cnt += tx_swbd->len; 1195 /* Scrub the swbd here so we don't have to do that 1196 * when we reuse it during xmit 1197 */ 1198 memset(tx_swbd, 0, sizeof(*tx_swbd)); 1199 1200 bds_to_clean--; 1201 tx_swbd++; 1202 i++; 1203 if (unlikely(i == tx_ring->bd_count)) { 1204 i = 0; 1205 tx_swbd = tx_ring->tx_swbd; 1206 } 1207 1208 /* BD iteration loop end */ 1209 if (is_eof) { 1210 tx_frm_cnt++; 1211 /* re-arm interrupt source */ 1212 enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) | 1213 BIT(16 + tx_ring->index)); 1214 } 1215 1216 if (unlikely(!bds_to_clean)) 1217 bds_to_clean = enetc_bd_ready_count(tx_ring, i); 1218 } 1219 1220 tx_ring->next_to_clean = i; 1221 tx_ring->stats.packets += tx_frm_cnt; 1222 tx_ring->stats.bytes += tx_byte_cnt; 1223 tx_ring->stats.win_drop += tx_win_drop; 1224 1225 if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) && 1226 __netif_subqueue_stopped(ndev, tx_ring->index) && 1227 !test_bit(ENETC_TX_DOWN, &priv->flags) && 1228 (enetc_bd_unused(tx_ring) >= 1229 ENETC_TXBDS_MAX_NEEDED(priv->max_frags)))) { 1230 netif_wake_subqueue(ndev, tx_ring->index); 1231 } 1232 1233 return tx_frm_cnt != ENETC_DEFAULT_TX_WORK; 1234 } 1235 1236 static bool enetc_new_page(struct enetc_bdr *rx_ring, 1237 struct enetc_rx_swbd *rx_swbd) 1238 { 1239 bool xdp = !!(rx_ring->xdp.prog); 1240 struct page *page; 1241 dma_addr_t addr; 1242 1243 page = dev_alloc_page(); 1244 if (unlikely(!page)) 1245 return false; 1246 1247 /* For XDP_TX, we forgo dma_unmap -> dma_map */ 1248 rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 1249 1250 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir); 1251 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { 1252 __free_page(page); 1253 1254 return false; 1255 } 1256 1257 rx_swbd->dma = addr; 1258 rx_swbd->page = page; 1259 rx_swbd->page_offset = rx_ring->buffer_offset; 1260 1261 return true; 1262 } 1263 1264 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) 1265 { 1266 struct enetc_rx_swbd *rx_swbd; 1267 union enetc_rx_bd *rxbd; 1268 int i, j; 1269 1270 i = rx_ring->next_to_use; 1271 rx_swbd = &rx_ring->rx_swbd[i]; 1272 rxbd = enetc_rxbd(rx_ring, i); 1273 1274 for (j = 0; j < buff_cnt; j++) { 1275 /* try reuse page */ 1276 if (unlikely(!rx_swbd->page)) { 1277 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { 1278 rx_ring->stats.rx_alloc_errs++; 1279 break; 1280 } 1281 } 1282 1283 /* update RxBD */ 1284 rxbd->w.addr = cpu_to_le64(rx_swbd->dma + 1285 rx_swbd->page_offset); 1286 /* clear 'R" as well */ 1287 rxbd->r.lstatus = 0; 1288 1289 enetc_rxbd_next(rx_ring, &rxbd, &i); 1290 rx_swbd = &rx_ring->rx_swbd[i]; 1291 } 1292 1293 if (likely(j)) { 1294 rx_ring->next_to_alloc = i; /* keep track from page reuse */ 1295 rx_ring->next_to_use = i; 1296 1297 /* update ENETC's consumer index */ 1298 enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use); 1299 } 1300 1301 return j; 1302 } 1303 1304 static void enetc_get_rx_tstamp(struct net_device *ndev, 1305 union enetc_rx_bd *rxbd, 1306 struct sk_buff *skb) 1307 { 1308 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 1309 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1310 struct enetc_hw *hw = &priv->si->hw; 1311 u32 lo, hi, tstamp_lo; 1312 u64 tstamp; 1313 1314 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) { 1315 lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0); 1316 hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1); 1317 rxbd = enetc_rxbd_ext(rxbd); 1318 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp); 1319 if (lo <= tstamp_lo) 1320 hi -= 1; 1321 1322 tstamp = (u64)hi << 32 | tstamp_lo; 1323 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 1324 shhwtstamps->hwtstamp = ns_to_ktime(tstamp); 1325 } 1326 } 1327 1328 static void enetc_get_offloads(struct enetc_bdr *rx_ring, 1329 union enetc_rx_bd *rxbd, struct sk_buff *skb) 1330 { 1331 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); 1332 1333 /* TODO: hashing */ 1334 if (rx_ring->ndev->features & NETIF_F_RXCSUM) { 1335 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum); 1336 1337 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum)); 1338 skb->ip_summed = CHECKSUM_COMPLETE; 1339 } 1340 1341 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) { 1342 __be16 tpid = 0; 1343 1344 switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) { 1345 case 0: 1346 tpid = htons(ETH_P_8021Q); 1347 break; 1348 case 1: 1349 tpid = htons(ETH_P_8021AD); 1350 break; 1351 case 2: 1352 tpid = htons(enetc_port_rd(&priv->si->hw, 1353 ENETC_PCVLANR1)); 1354 break; 1355 case 3: 1356 tpid = htons(enetc_port_rd(&priv->si->hw, 1357 ENETC_PCVLANR2)); 1358 break; 1359 default: 1360 break; 1361 } 1362 1363 __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt)); 1364 } 1365 1366 if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && 1367 (priv->active_offloads & ENETC_F_RX_TSTAMP)) 1368 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb); 1369 } 1370 1371 /* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS, 1372 * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL 1373 * mapped buffers. 1374 */ 1375 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring, 1376 int i, u16 size) 1377 { 1378 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; 1379 1380 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, 1381 rx_swbd->page_offset, 1382 size, rx_swbd->dir); 1383 return rx_swbd; 1384 } 1385 1386 /* Reuse the current page without performing half-page buffer flipping */ 1387 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring, 1388 struct enetc_rx_swbd *rx_swbd) 1389 { 1390 size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset; 1391 1392 enetc_reuse_page(rx_ring, rx_swbd); 1393 1394 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, 1395 rx_swbd->page_offset, 1396 buffer_size, rx_swbd->dir); 1397 1398 rx_swbd->page = NULL; 1399 } 1400 1401 /* Reuse the current page by performing half-page buffer flipping */ 1402 static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring, 1403 struct enetc_rx_swbd *rx_swbd) 1404 { 1405 if (likely(enetc_page_reusable(rx_swbd->page))) { 1406 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; 1407 page_ref_inc(rx_swbd->page); 1408 1409 enetc_put_rx_buff(rx_ring, rx_swbd); 1410 } else { 1411 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, 1412 rx_swbd->dir); 1413 rx_swbd->page = NULL; 1414 } 1415 } 1416 1417 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring, 1418 int i, u16 size) 1419 { 1420 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 1421 struct sk_buff *skb; 1422 void *ba; 1423 1424 ba = page_address(rx_swbd->page) + rx_swbd->page_offset; 1425 skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE); 1426 if (unlikely(!skb)) { 1427 rx_ring->stats.rx_alloc_errs++; 1428 return NULL; 1429 } 1430 1431 skb_reserve(skb, rx_ring->buffer_offset); 1432 __skb_put(skb, size); 1433 1434 enetc_flip_rx_buff(rx_ring, rx_swbd); 1435 1436 return skb; 1437 } 1438 1439 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i, 1440 u16 size, struct sk_buff *skb) 1441 { 1442 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 1443 1444 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, 1445 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); 1446 1447 enetc_flip_rx_buff(rx_ring, rx_swbd); 1448 } 1449 1450 static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring, 1451 u32 bd_status, 1452 union enetc_rx_bd **rxbd, int *i) 1453 { 1454 if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK)))) 1455 return false; 1456 1457 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); 1458 enetc_rxbd_next(rx_ring, rxbd, i); 1459 1460 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { 1461 dma_rmb(); 1462 bd_status = le32_to_cpu((*rxbd)->r.lstatus); 1463 1464 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); 1465 enetc_rxbd_next(rx_ring, rxbd, i); 1466 } 1467 1468 rx_ring->ndev->stats.rx_dropped++; 1469 rx_ring->ndev->stats.rx_errors++; 1470 1471 return true; 1472 } 1473 1474 static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring, 1475 u32 bd_status, union enetc_rx_bd **rxbd, 1476 int *i, int *cleaned_cnt, int buffer_size) 1477 { 1478 struct sk_buff *skb; 1479 u16 size; 1480 1481 size = le16_to_cpu((*rxbd)->r.buf_len); 1482 skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size); 1483 if (!skb) 1484 return NULL; 1485 1486 enetc_get_offloads(rx_ring, *rxbd, skb); 1487 1488 (*cleaned_cnt)++; 1489 1490 enetc_rxbd_next(rx_ring, rxbd, i); 1491 1492 /* not last BD in frame? */ 1493 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { 1494 bd_status = le32_to_cpu((*rxbd)->r.lstatus); 1495 size = buffer_size; 1496 1497 if (bd_status & ENETC_RXBD_LSTATUS_F) { 1498 dma_rmb(); 1499 size = le16_to_cpu((*rxbd)->r.buf_len); 1500 } 1501 1502 enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb); 1503 1504 (*cleaned_cnt)++; 1505 1506 enetc_rxbd_next(rx_ring, rxbd, i); 1507 } 1508 1509 skb_record_rx_queue(skb, rx_ring->index); 1510 skb->protocol = eth_type_trans(skb, rx_ring->ndev); 1511 1512 return skb; 1513 } 1514 1515 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */ 1516 1517 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, 1518 struct napi_struct *napi, int work_limit) 1519 { 1520 int rx_frm_cnt = 0, rx_byte_cnt = 0; 1521 int cleaned_cnt, i; 1522 1523 cleaned_cnt = enetc_bd_unused(rx_ring); 1524 /* next descriptor to process */ 1525 i = rx_ring->next_to_clean; 1526 1527 while (likely(rx_frm_cnt < work_limit)) { 1528 union enetc_rx_bd *rxbd; 1529 struct sk_buff *skb; 1530 u32 bd_status; 1531 1532 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) 1533 cleaned_cnt -= enetc_refill_rx_ring(rx_ring, 1534 cleaned_cnt); 1535 1536 rxbd = enetc_rxbd(rx_ring, i); 1537 bd_status = le32_to_cpu(rxbd->r.lstatus); 1538 if (!bd_status) 1539 break; 1540 1541 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); 1542 dma_rmb(); /* for reading other rxbd fields */ 1543 1544 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status, 1545 &rxbd, &i)) 1546 break; 1547 1548 skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i, 1549 &cleaned_cnt, ENETC_RXB_DMA_SIZE); 1550 if (!skb) 1551 break; 1552 1553 /* When set, the outer VLAN header is extracted and reported 1554 * in the receive buffer descriptor. So rx_byte_cnt should 1555 * add the length of the extracted VLAN header. 1556 */ 1557 if (bd_status & ENETC_RXBD_FLAG_VLAN) 1558 rx_byte_cnt += VLAN_HLEN; 1559 rx_byte_cnt += skb->len + ETH_HLEN; 1560 rx_frm_cnt++; 1561 1562 napi_gro_receive(napi, skb); 1563 } 1564 1565 rx_ring->next_to_clean = i; 1566 1567 rx_ring->stats.packets += rx_frm_cnt; 1568 rx_ring->stats.bytes += rx_byte_cnt; 1569 1570 return rx_frm_cnt; 1571 } 1572 1573 static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i, 1574 struct enetc_tx_swbd *tx_swbd, 1575 int frm_len) 1576 { 1577 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); 1578 1579 prefetchw(txbd); 1580 1581 enetc_clear_tx_bd(txbd); 1582 txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset); 1583 txbd->buf_len = cpu_to_le16(tx_swbd->len); 1584 txbd->frm_len = cpu_to_le16(frm_len); 1585 1586 memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd)); 1587 } 1588 1589 /* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer 1590 * descriptors. 1591 */ 1592 static bool enetc_xdp_tx(struct enetc_bdr *tx_ring, 1593 struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd) 1594 { 1595 struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr; 1596 int i, k, frm_len = tmp_tx_swbd->len; 1597 1598 if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd))) 1599 return false; 1600 1601 while (unlikely(!tmp_tx_swbd->is_eof)) { 1602 tmp_tx_swbd++; 1603 frm_len += tmp_tx_swbd->len; 1604 } 1605 1606 i = tx_ring->next_to_use; 1607 1608 for (k = 0; k < num_tx_swbd; k++) { 1609 struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k]; 1610 1611 enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len); 1612 1613 /* last BD needs 'F' bit set */ 1614 if (xdp_tx_swbd->is_eof) { 1615 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); 1616 1617 txbd->flags = ENETC_TXBD_FLAGS_F; 1618 } 1619 1620 enetc_bdr_idx_inc(tx_ring, &i); 1621 } 1622 1623 tx_ring->next_to_use = i; 1624 1625 return true; 1626 } 1627 1628 static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring, 1629 struct enetc_tx_swbd *xdp_tx_arr, 1630 struct xdp_frame *xdp_frame) 1631 { 1632 struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0]; 1633 struct skb_shared_info *shinfo; 1634 void *data = xdp_frame->data; 1635 int len = xdp_frame->len; 1636 skb_frag_t *frag; 1637 dma_addr_t dma; 1638 unsigned int f; 1639 int n = 0; 1640 1641 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); 1642 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { 1643 netdev_err(tx_ring->ndev, "DMA map error\n"); 1644 return -1; 1645 } 1646 1647 xdp_tx_swbd->dma = dma; 1648 xdp_tx_swbd->dir = DMA_TO_DEVICE; 1649 xdp_tx_swbd->len = len; 1650 xdp_tx_swbd->is_xdp_redirect = true; 1651 xdp_tx_swbd->is_eof = false; 1652 xdp_tx_swbd->xdp_frame = NULL; 1653 1654 n++; 1655 1656 if (!xdp_frame_has_frags(xdp_frame)) 1657 goto out; 1658 1659 xdp_tx_swbd = &xdp_tx_arr[n]; 1660 1661 shinfo = xdp_get_shared_info_from_frame(xdp_frame); 1662 1663 for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags; 1664 f++, frag++) { 1665 data = skb_frag_address(frag); 1666 len = skb_frag_size(frag); 1667 1668 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); 1669 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { 1670 /* Undo the DMA mapping for all fragments */ 1671 while (--n >= 0) 1672 enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]); 1673 1674 netdev_err(tx_ring->ndev, "DMA map error\n"); 1675 return -1; 1676 } 1677 1678 xdp_tx_swbd->dma = dma; 1679 xdp_tx_swbd->dir = DMA_TO_DEVICE; 1680 xdp_tx_swbd->len = len; 1681 xdp_tx_swbd->is_xdp_redirect = true; 1682 xdp_tx_swbd->is_eof = false; 1683 xdp_tx_swbd->xdp_frame = NULL; 1684 1685 n++; 1686 xdp_tx_swbd = &xdp_tx_arr[n]; 1687 } 1688 out: 1689 xdp_tx_arr[n - 1].is_eof = true; 1690 xdp_tx_arr[n - 1].xdp_frame = xdp_frame; 1691 1692 return n; 1693 } 1694 1695 int enetc_xdp_xmit(struct net_device *ndev, int num_frames, 1696 struct xdp_frame **frames, u32 flags) 1697 { 1698 struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0}; 1699 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1700 struct enetc_bdr *tx_ring; 1701 int xdp_tx_bd_cnt, i, k; 1702 int xdp_tx_frm_cnt = 0; 1703 1704 if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) 1705 return -ENETDOWN; 1706 1707 enetc_lock_mdio(); 1708 1709 tx_ring = priv->xdp_tx_ring[smp_processor_id()]; 1710 1711 prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use)); 1712 1713 for (k = 0; k < num_frames; k++) { 1714 xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring, 1715 xdp_redirect_arr, 1716 frames[k]); 1717 if (unlikely(xdp_tx_bd_cnt < 0)) 1718 break; 1719 1720 if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr, 1721 xdp_tx_bd_cnt))) { 1722 for (i = 0; i < xdp_tx_bd_cnt; i++) 1723 enetc_unmap_tx_buff(tx_ring, 1724 &xdp_redirect_arr[i]); 1725 tx_ring->stats.xdp_tx_drops++; 1726 break; 1727 } 1728 1729 xdp_tx_frm_cnt++; 1730 } 1731 1732 if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt)) 1733 enetc_update_tx_ring_tail(tx_ring); 1734 1735 tx_ring->stats.xdp_tx += xdp_tx_frm_cnt; 1736 1737 enetc_unlock_mdio(); 1738 1739 return xdp_tx_frm_cnt; 1740 } 1741 EXPORT_SYMBOL_GPL(enetc_xdp_xmit); 1742 1743 static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, 1744 struct xdp_buff *xdp_buff, u16 size) 1745 { 1746 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 1747 void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset; 1748 1749 /* To be used for XDP_TX */ 1750 rx_swbd->len = size; 1751 1752 xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset, 1753 rx_ring->buffer_offset, size, false); 1754 } 1755 1756 static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, 1757 u16 size, struct xdp_buff *xdp_buff) 1758 { 1759 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff); 1760 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 1761 skb_frag_t *frag; 1762 1763 /* To be used for XDP_TX */ 1764 rx_swbd->len = size; 1765 1766 if (!xdp_buff_has_frags(xdp_buff)) { 1767 xdp_buff_set_frags_flag(xdp_buff); 1768 shinfo->xdp_frags_size = size; 1769 shinfo->nr_frags = 0; 1770 } else { 1771 shinfo->xdp_frags_size += size; 1772 } 1773 1774 if (page_is_pfmemalloc(rx_swbd->page)) 1775 xdp_buff_set_frag_pfmemalloc(xdp_buff); 1776 1777 frag = &shinfo->frags[shinfo->nr_frags]; 1778 skb_frag_fill_page_desc(frag, rx_swbd->page, rx_swbd->page_offset, 1779 size); 1780 1781 shinfo->nr_frags++; 1782 } 1783 1784 static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status, 1785 union enetc_rx_bd **rxbd, int *i, 1786 int *cleaned_cnt, struct xdp_buff *xdp_buff) 1787 { 1788 u16 size = le16_to_cpu((*rxbd)->r.buf_len); 1789 1790 xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq); 1791 1792 enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size); 1793 (*cleaned_cnt)++; 1794 enetc_rxbd_next(rx_ring, rxbd, i); 1795 1796 /* not last BD in frame? */ 1797 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { 1798 bd_status = le32_to_cpu((*rxbd)->r.lstatus); 1799 size = ENETC_RXB_DMA_SIZE_XDP; 1800 1801 if (bd_status & ENETC_RXBD_LSTATUS_F) { 1802 dma_rmb(); 1803 size = le16_to_cpu((*rxbd)->r.buf_len); 1804 } 1805 1806 enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff); 1807 (*cleaned_cnt)++; 1808 enetc_rxbd_next(rx_ring, rxbd, i); 1809 } 1810 } 1811 1812 /* Convert RX buffer descriptors to TX buffer descriptors. These will be 1813 * recycled back into the RX ring in enetc_clean_tx_ring. 1814 */ 1815 static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr, 1816 struct enetc_bdr *rx_ring, 1817 int rx_ring_first, int rx_ring_last) 1818 { 1819 int n = 0; 1820 1821 for (; rx_ring_first != rx_ring_last; 1822 n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) { 1823 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; 1824 struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n]; 1825 1826 /* No need to dma_map, we already have DMA_BIDIRECTIONAL */ 1827 tx_swbd->dma = rx_swbd->dma; 1828 tx_swbd->dir = rx_swbd->dir; 1829 tx_swbd->page = rx_swbd->page; 1830 tx_swbd->page_offset = rx_swbd->page_offset; 1831 tx_swbd->len = rx_swbd->len; 1832 tx_swbd->is_dma_page = true; 1833 tx_swbd->is_xdp_tx = true; 1834 tx_swbd->is_eof = false; 1835 } 1836 1837 /* We rely on caller providing an rx_ring_last > rx_ring_first */ 1838 xdp_tx_arr[n - 1].is_eof = true; 1839 1840 return n; 1841 } 1842 1843 static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, 1844 int rx_ring_last) 1845 { 1846 while (rx_ring_first != rx_ring_last) { 1847 enetc_put_rx_buff(rx_ring, 1848 &rx_ring->rx_swbd[rx_ring_first]); 1849 enetc_bdr_idx_inc(rx_ring, &rx_ring_first); 1850 } 1851 } 1852 1853 static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, 1854 struct napi_struct *napi, int work_limit, 1855 struct bpf_prog *prog) 1856 { 1857 int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0; 1858 struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0}; 1859 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); 1860 int rx_frm_cnt = 0, rx_byte_cnt = 0; 1861 struct enetc_bdr *tx_ring; 1862 int cleaned_cnt, i; 1863 u32 xdp_act; 1864 1865 cleaned_cnt = enetc_bd_unused(rx_ring); 1866 /* next descriptor to process */ 1867 i = rx_ring->next_to_clean; 1868 1869 while (likely(rx_frm_cnt < work_limit)) { 1870 union enetc_rx_bd *rxbd, *orig_rxbd; 1871 int orig_i, orig_cleaned_cnt; 1872 struct xdp_buff xdp_buff; 1873 struct sk_buff *skb; 1874 u32 bd_status; 1875 int err; 1876 1877 rxbd = enetc_rxbd(rx_ring, i); 1878 bd_status = le32_to_cpu(rxbd->r.lstatus); 1879 if (!bd_status) 1880 break; 1881 1882 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); 1883 dma_rmb(); /* for reading other rxbd fields */ 1884 1885 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status, 1886 &rxbd, &i)) 1887 break; 1888 1889 orig_rxbd = rxbd; 1890 orig_cleaned_cnt = cleaned_cnt; 1891 orig_i = i; 1892 1893 enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i, 1894 &cleaned_cnt, &xdp_buff); 1895 1896 /* When set, the outer VLAN header is extracted and reported 1897 * in the receive buffer descriptor. So rx_byte_cnt should 1898 * add the length of the extracted VLAN header. 1899 */ 1900 if (bd_status & ENETC_RXBD_FLAG_VLAN) 1901 rx_byte_cnt += VLAN_HLEN; 1902 rx_byte_cnt += xdp_get_buff_len(&xdp_buff); 1903 1904 xdp_act = bpf_prog_run_xdp(prog, &xdp_buff); 1905 1906 switch (xdp_act) { 1907 default: 1908 bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act); 1909 fallthrough; 1910 case XDP_ABORTED: 1911 trace_xdp_exception(rx_ring->ndev, prog, xdp_act); 1912 fallthrough; 1913 case XDP_DROP: 1914 enetc_xdp_drop(rx_ring, orig_i, i); 1915 rx_ring->stats.xdp_drops++; 1916 break; 1917 case XDP_PASS: 1918 rxbd = orig_rxbd; 1919 cleaned_cnt = orig_cleaned_cnt; 1920 i = orig_i; 1921 1922 skb = enetc_build_skb(rx_ring, bd_status, &rxbd, 1923 &i, &cleaned_cnt, 1924 ENETC_RXB_DMA_SIZE_XDP); 1925 if (unlikely(!skb)) 1926 goto out; 1927 1928 napi_gro_receive(napi, skb); 1929 break; 1930 case XDP_TX: 1931 tx_ring = priv->xdp_tx_ring[rx_ring->index]; 1932 if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) { 1933 enetc_xdp_drop(rx_ring, orig_i, i); 1934 tx_ring->stats.xdp_tx_drops++; 1935 break; 1936 } 1937 1938 xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr, 1939 rx_ring, 1940 orig_i, i); 1941 1942 if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) { 1943 enetc_xdp_drop(rx_ring, orig_i, i); 1944 tx_ring->stats.xdp_tx_drops++; 1945 } else { 1946 tx_ring->stats.xdp_tx++; 1947 rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt; 1948 xdp_tx_frm_cnt++; 1949 /* The XDP_TX enqueue was successful, so we 1950 * need to scrub the RX software BDs because 1951 * the ownership of the buffers no longer 1952 * belongs to the RX ring, and we must prevent 1953 * enetc_refill_rx_ring() from reusing 1954 * rx_swbd->page. 1955 */ 1956 while (orig_i != i) { 1957 rx_ring->rx_swbd[orig_i].page = NULL; 1958 enetc_bdr_idx_inc(rx_ring, &orig_i); 1959 } 1960 } 1961 break; 1962 case XDP_REDIRECT: 1963 err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); 1964 if (unlikely(err)) { 1965 enetc_xdp_drop(rx_ring, orig_i, i); 1966 rx_ring->stats.xdp_redirect_failures++; 1967 } else { 1968 while (orig_i != i) { 1969 enetc_flip_rx_buff(rx_ring, 1970 &rx_ring->rx_swbd[orig_i]); 1971 enetc_bdr_idx_inc(rx_ring, &orig_i); 1972 } 1973 xdp_redirect_frm_cnt++; 1974 rx_ring->stats.xdp_redirect++; 1975 } 1976 } 1977 1978 rx_frm_cnt++; 1979 } 1980 1981 out: 1982 rx_ring->next_to_clean = i; 1983 1984 rx_ring->stats.packets += rx_frm_cnt; 1985 rx_ring->stats.bytes += rx_byte_cnt; 1986 1987 if (xdp_redirect_frm_cnt) 1988 xdp_do_flush(); 1989 1990 if (xdp_tx_frm_cnt) 1991 enetc_update_tx_ring_tail(tx_ring); 1992 1993 if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight) 1994 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) - 1995 rx_ring->xdp.xdp_tx_in_flight); 1996 1997 return rx_frm_cnt; 1998 } 1999 2000 static int enetc_poll(struct napi_struct *napi, int budget) 2001 { 2002 struct enetc_int_vector 2003 *v = container_of(napi, struct enetc_int_vector, napi); 2004 struct enetc_bdr *rx_ring = &v->rx_ring; 2005 struct bpf_prog *prog; 2006 bool complete = true; 2007 int work_done; 2008 int i; 2009 2010 enetc_lock_mdio(); 2011 2012 for (i = 0; i < v->count_tx_rings; i++) 2013 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) 2014 complete = false; 2015 2016 prog = rx_ring->xdp.prog; 2017 if (prog) 2018 work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog); 2019 else 2020 work_done = enetc_clean_rx_ring(rx_ring, napi, budget); 2021 if (work_done == budget) 2022 complete = false; 2023 if (work_done) 2024 v->rx_napi_work = true; 2025 2026 if (!complete) { 2027 enetc_unlock_mdio(); 2028 return budget; 2029 } 2030 2031 napi_complete_done(napi, work_done); 2032 2033 if (likely(v->rx_dim_en)) 2034 enetc_rx_net_dim(v); 2035 2036 v->rx_napi_work = false; 2037 2038 /* enable interrupts */ 2039 enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); 2040 2041 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) 2042 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 2043 ENETC_TBIER_TXTIE); 2044 2045 enetc_unlock_mdio(); 2046 2047 return work_done; 2048 } 2049 2050 /* Probing and Init */ 2051 #define ENETC_MAX_RFS_SIZE 64 2052 void enetc_get_si_caps(struct enetc_si *si) 2053 { 2054 struct enetc_hw *hw = &si->hw; 2055 u32 val; 2056 2057 /* find out how many of various resources we have to work with */ 2058 val = enetc_rd(hw, ENETC_SICAPR0); 2059 si->num_rx_rings = (val >> 16) & 0xff; 2060 si->num_tx_rings = val & 0xff; 2061 2062 val = enetc_rd(hw, ENETC_SIPCAPR0); 2063 if (val & ENETC_SIPCAPR0_RFS) { 2064 val = enetc_rd(hw, ENETC_SIRFSCAPR); 2065 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val); 2066 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE); 2067 } else { 2068 /* ENETC which not supports RFS */ 2069 si->num_fs_entries = 0; 2070 } 2071 2072 si->num_rss = 0; 2073 val = enetc_rd(hw, ENETC_SIPCAPR0); 2074 if (val & ENETC_SIPCAPR0_RSS) { 2075 u32 rss; 2076 2077 rss = enetc_rd(hw, ENETC_SIRSSCAPR); 2078 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss); 2079 } 2080 2081 if (val & ENETC_SIPCAPR0_LSO) 2082 si->hw_features |= ENETC_SI_F_LSO; 2083 } 2084 EXPORT_SYMBOL_GPL(enetc_get_si_caps); 2085 2086 static int enetc_dma_alloc_bdr(struct enetc_bdr_resource *res) 2087 { 2088 size_t bd_base_size = res->bd_count * res->bd_size; 2089 2090 res->bd_base = dma_alloc_coherent(res->dev, bd_base_size, 2091 &res->bd_dma_base, GFP_KERNEL); 2092 if (!res->bd_base) 2093 return -ENOMEM; 2094 2095 /* h/w requires 128B alignment */ 2096 if (!IS_ALIGNED(res->bd_dma_base, 128)) { 2097 dma_free_coherent(res->dev, bd_base_size, res->bd_base, 2098 res->bd_dma_base); 2099 return -EINVAL; 2100 } 2101 2102 return 0; 2103 } 2104 2105 static void enetc_dma_free_bdr(const struct enetc_bdr_resource *res) 2106 { 2107 size_t bd_base_size = res->bd_count * res->bd_size; 2108 2109 dma_free_coherent(res->dev, bd_base_size, res->bd_base, 2110 res->bd_dma_base); 2111 } 2112 2113 static int enetc_alloc_tx_resource(struct enetc_bdr_resource *res, 2114 struct device *dev, size_t bd_count) 2115 { 2116 int err; 2117 2118 res->dev = dev; 2119 res->bd_count = bd_count; 2120 res->bd_size = sizeof(union enetc_tx_bd); 2121 2122 res->tx_swbd = vcalloc(bd_count, sizeof(*res->tx_swbd)); 2123 if (!res->tx_swbd) 2124 return -ENOMEM; 2125 2126 err = enetc_dma_alloc_bdr(res); 2127 if (err) 2128 goto err_alloc_bdr; 2129 2130 res->tso_headers = dma_alloc_coherent(dev, bd_count * TSO_HEADER_SIZE, 2131 &res->tso_headers_dma, 2132 GFP_KERNEL); 2133 if (!res->tso_headers) { 2134 err = -ENOMEM; 2135 goto err_alloc_tso; 2136 } 2137 2138 return 0; 2139 2140 err_alloc_tso: 2141 enetc_dma_free_bdr(res); 2142 err_alloc_bdr: 2143 vfree(res->tx_swbd); 2144 res->tx_swbd = NULL; 2145 2146 return err; 2147 } 2148 2149 static void enetc_free_tx_resource(const struct enetc_bdr_resource *res) 2150 { 2151 dma_free_coherent(res->dev, res->bd_count * TSO_HEADER_SIZE, 2152 res->tso_headers, res->tso_headers_dma); 2153 enetc_dma_free_bdr(res); 2154 vfree(res->tx_swbd); 2155 } 2156 2157 static struct enetc_bdr_resource * 2158 enetc_alloc_tx_resources(struct enetc_ndev_priv *priv) 2159 { 2160 struct enetc_bdr_resource *tx_res; 2161 int i, err; 2162 2163 tx_res = kcalloc(priv->num_tx_rings, sizeof(*tx_res), GFP_KERNEL); 2164 if (!tx_res) 2165 return ERR_PTR(-ENOMEM); 2166 2167 for (i = 0; i < priv->num_tx_rings; i++) { 2168 struct enetc_bdr *tx_ring = priv->tx_ring[i]; 2169 2170 err = enetc_alloc_tx_resource(&tx_res[i], tx_ring->dev, 2171 tx_ring->bd_count); 2172 if (err) 2173 goto fail; 2174 } 2175 2176 return tx_res; 2177 2178 fail: 2179 while (i-- > 0) 2180 enetc_free_tx_resource(&tx_res[i]); 2181 2182 kfree(tx_res); 2183 2184 return ERR_PTR(err); 2185 } 2186 2187 static void enetc_free_tx_resources(const struct enetc_bdr_resource *tx_res, 2188 size_t num_resources) 2189 { 2190 size_t i; 2191 2192 for (i = 0; i < num_resources; i++) 2193 enetc_free_tx_resource(&tx_res[i]); 2194 2195 kfree(tx_res); 2196 } 2197 2198 static int enetc_alloc_rx_resource(struct enetc_bdr_resource *res, 2199 struct device *dev, size_t bd_count, 2200 bool extended) 2201 { 2202 int err; 2203 2204 res->dev = dev; 2205 res->bd_count = bd_count; 2206 res->bd_size = sizeof(union enetc_rx_bd); 2207 if (extended) 2208 res->bd_size *= 2; 2209 2210 res->rx_swbd = vcalloc(bd_count, sizeof(struct enetc_rx_swbd)); 2211 if (!res->rx_swbd) 2212 return -ENOMEM; 2213 2214 err = enetc_dma_alloc_bdr(res); 2215 if (err) { 2216 vfree(res->rx_swbd); 2217 return err; 2218 } 2219 2220 return 0; 2221 } 2222 2223 static void enetc_free_rx_resource(const struct enetc_bdr_resource *res) 2224 { 2225 enetc_dma_free_bdr(res); 2226 vfree(res->rx_swbd); 2227 } 2228 2229 static struct enetc_bdr_resource * 2230 enetc_alloc_rx_resources(struct enetc_ndev_priv *priv, bool extended) 2231 { 2232 struct enetc_bdr_resource *rx_res; 2233 int i, err; 2234 2235 rx_res = kcalloc(priv->num_rx_rings, sizeof(*rx_res), GFP_KERNEL); 2236 if (!rx_res) 2237 return ERR_PTR(-ENOMEM); 2238 2239 for (i = 0; i < priv->num_rx_rings; i++) { 2240 struct enetc_bdr *rx_ring = priv->rx_ring[i]; 2241 2242 err = enetc_alloc_rx_resource(&rx_res[i], rx_ring->dev, 2243 rx_ring->bd_count, extended); 2244 if (err) 2245 goto fail; 2246 } 2247 2248 return rx_res; 2249 2250 fail: 2251 while (i-- > 0) 2252 enetc_free_rx_resource(&rx_res[i]); 2253 2254 kfree(rx_res); 2255 2256 return ERR_PTR(err); 2257 } 2258 2259 static void enetc_free_rx_resources(const struct enetc_bdr_resource *rx_res, 2260 size_t num_resources) 2261 { 2262 size_t i; 2263 2264 for (i = 0; i < num_resources; i++) 2265 enetc_free_rx_resource(&rx_res[i]); 2266 2267 kfree(rx_res); 2268 } 2269 2270 static void enetc_assign_tx_resource(struct enetc_bdr *tx_ring, 2271 const struct enetc_bdr_resource *res) 2272 { 2273 tx_ring->bd_base = res ? res->bd_base : NULL; 2274 tx_ring->bd_dma_base = res ? res->bd_dma_base : 0; 2275 tx_ring->tx_swbd = res ? res->tx_swbd : NULL; 2276 tx_ring->tso_headers = res ? res->tso_headers : NULL; 2277 tx_ring->tso_headers_dma = res ? res->tso_headers_dma : 0; 2278 } 2279 2280 static void enetc_assign_rx_resource(struct enetc_bdr *rx_ring, 2281 const struct enetc_bdr_resource *res) 2282 { 2283 rx_ring->bd_base = res ? res->bd_base : NULL; 2284 rx_ring->bd_dma_base = res ? res->bd_dma_base : 0; 2285 rx_ring->rx_swbd = res ? res->rx_swbd : NULL; 2286 } 2287 2288 static void enetc_assign_tx_resources(struct enetc_ndev_priv *priv, 2289 const struct enetc_bdr_resource *res) 2290 { 2291 int i; 2292 2293 if (priv->tx_res) 2294 enetc_free_tx_resources(priv->tx_res, priv->num_tx_rings); 2295 2296 for (i = 0; i < priv->num_tx_rings; i++) { 2297 enetc_assign_tx_resource(priv->tx_ring[i], 2298 res ? &res[i] : NULL); 2299 } 2300 2301 priv->tx_res = res; 2302 } 2303 2304 static void enetc_assign_rx_resources(struct enetc_ndev_priv *priv, 2305 const struct enetc_bdr_resource *res) 2306 { 2307 int i; 2308 2309 if (priv->rx_res) 2310 enetc_free_rx_resources(priv->rx_res, priv->num_rx_rings); 2311 2312 for (i = 0; i < priv->num_rx_rings; i++) { 2313 enetc_assign_rx_resource(priv->rx_ring[i], 2314 res ? &res[i] : NULL); 2315 } 2316 2317 priv->rx_res = res; 2318 } 2319 2320 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) 2321 { 2322 int i; 2323 2324 for (i = 0; i < tx_ring->bd_count; i++) { 2325 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; 2326 2327 enetc_free_tx_frame(tx_ring, tx_swbd); 2328 } 2329 } 2330 2331 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring) 2332 { 2333 int i; 2334 2335 for (i = 0; i < rx_ring->bd_count; i++) { 2336 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; 2337 2338 if (!rx_swbd->page) 2339 continue; 2340 2341 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, 2342 rx_swbd->dir); 2343 __free_page(rx_swbd->page); 2344 rx_swbd->page = NULL; 2345 } 2346 } 2347 2348 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) 2349 { 2350 int i; 2351 2352 for (i = 0; i < priv->num_rx_rings; i++) 2353 enetc_free_rx_ring(priv->rx_ring[i]); 2354 2355 for (i = 0; i < priv->num_tx_rings; i++) 2356 enetc_free_tx_ring(priv->tx_ring[i]); 2357 } 2358 2359 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) 2360 { 2361 int *rss_table; 2362 int i; 2363 2364 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL); 2365 if (!rss_table) 2366 return -ENOMEM; 2367 2368 /* Set up RSS table defaults */ 2369 for (i = 0; i < si->num_rss; i++) 2370 rss_table[i] = i % num_groups; 2371 2372 enetc_set_rss_table(si, rss_table, si->num_rss); 2373 2374 kfree(rss_table); 2375 2376 return 0; 2377 } 2378 2379 static void enetc_set_lso_flags_mask(struct enetc_hw *hw) 2380 { 2381 enetc_wr(hw, ENETC4_SILSOSFMR0, 2382 SILSOSFMR0_VAL_SET(ENETC4_TCP_NL_SEG_FLAGS_DMASK, 2383 ENETC4_TCP_NL_SEG_FLAGS_DMASK)); 2384 enetc_wr(hw, ENETC4_SILSOSFMR1, 0); 2385 } 2386 2387 int enetc_configure_si(struct enetc_ndev_priv *priv) 2388 { 2389 struct enetc_si *si = priv->si; 2390 struct enetc_hw *hw = &si->hw; 2391 int err; 2392 2393 /* set SI cache attributes */ 2394 enetc_wr(hw, ENETC_SICAR0, 2395 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 2396 enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI); 2397 /* enable SI */ 2398 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN); 2399 2400 if (si->hw_features & ENETC_SI_F_LSO) 2401 enetc_set_lso_flags_mask(hw); 2402 2403 /* TODO: RSS support for i.MX95 will be supported later, and the 2404 * is_enetc_rev1() condition will be removed 2405 */ 2406 if (si->num_rss && is_enetc_rev1(si)) { 2407 err = enetc_setup_default_rss_table(si, priv->num_rx_rings); 2408 if (err) 2409 return err; 2410 } 2411 2412 return 0; 2413 } 2414 EXPORT_SYMBOL_GPL(enetc_configure_si); 2415 2416 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv) 2417 { 2418 struct enetc_si *si = priv->si; 2419 int cpus = num_online_cpus(); 2420 2421 priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE; 2422 priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE; 2423 2424 /* Enable all available TX rings in order to configure as many 2425 * priorities as possible, when needed. 2426 * TODO: Make # of TX rings run-time configurable 2427 */ 2428 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings); 2429 priv->num_tx_rings = si->num_tx_rings; 2430 priv->bdr_int_num = priv->num_rx_rings; 2431 priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL; 2432 priv->tx_ictt = enetc_usecs_to_cycles(600, priv->sysclk_freq); 2433 } 2434 EXPORT_SYMBOL_GPL(enetc_init_si_rings_params); 2435 2436 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) 2437 { 2438 struct enetc_si *si = priv->si; 2439 2440 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules), 2441 GFP_KERNEL); 2442 if (!priv->cls_rules) 2443 return -ENOMEM; 2444 2445 return 0; 2446 } 2447 EXPORT_SYMBOL_GPL(enetc_alloc_si_resources); 2448 2449 void enetc_free_si_resources(struct enetc_ndev_priv *priv) 2450 { 2451 kfree(priv->cls_rules); 2452 } 2453 EXPORT_SYMBOL_GPL(enetc_free_si_resources); 2454 2455 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 2456 { 2457 int idx = tx_ring->index; 2458 u32 tbmr; 2459 2460 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0, 2461 lower_32_bits(tx_ring->bd_dma_base)); 2462 2463 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1, 2464 upper_32_bits(tx_ring->bd_dma_base)); 2465 2466 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */ 2467 enetc_txbdr_wr(hw, idx, ENETC_TBLENR, 2468 ENETC_RTBLENR_LEN(tx_ring->bd_count)); 2469 2470 /* clearing PI/CI registers for Tx not supported, adjust sw indexes */ 2471 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR); 2472 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); 2473 2474 /* enable Tx ints by setting pkt thr to 1 */ 2475 enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1); 2476 2477 tbmr = ENETC_TBMR_SET_PRIO(tx_ring->prio); 2478 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) 2479 tbmr |= ENETC_TBMR_VIH; 2480 2481 /* enable ring */ 2482 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); 2483 2484 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR); 2485 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR); 2486 tx_ring->idr = hw->reg + ENETC_SITXIDR; 2487 } 2488 2489 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring, 2490 bool extended) 2491 { 2492 int idx = rx_ring->index; 2493 u32 rbmr = 0; 2494 2495 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0, 2496 lower_32_bits(rx_ring->bd_dma_base)); 2497 2498 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1, 2499 upper_32_bits(rx_ring->bd_dma_base)); 2500 2501 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */ 2502 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR, 2503 ENETC_RTBLENR_LEN(rx_ring->bd_count)); 2504 2505 if (rx_ring->xdp.prog) 2506 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP); 2507 else 2508 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE); 2509 2510 /* Also prepare the consumer index in case page allocation never 2511 * succeeds. In that case, hardware will never advance producer index 2512 * to match consumer index, and will drop all frames. 2513 */ 2514 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); 2515 enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1); 2516 2517 /* enable Rx ints by setting pkt thr to 1 */ 2518 enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1); 2519 2520 rx_ring->ext_en = extended; 2521 if (rx_ring->ext_en) 2522 rbmr |= ENETC_RBMR_BDS; 2523 2524 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) 2525 rbmr |= ENETC_RBMR_VTE; 2526 2527 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); 2528 rx_ring->idr = hw->reg + ENETC_SIRXIDR; 2529 2530 rx_ring->next_to_clean = 0; 2531 rx_ring->next_to_use = 0; 2532 rx_ring->next_to_alloc = 0; 2533 2534 enetc_lock_mdio(); 2535 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); 2536 enetc_unlock_mdio(); 2537 2538 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); 2539 } 2540 2541 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv, bool extended) 2542 { 2543 struct enetc_hw *hw = &priv->si->hw; 2544 int i; 2545 2546 for (i = 0; i < priv->num_tx_rings; i++) 2547 enetc_setup_txbdr(hw, priv->tx_ring[i]); 2548 2549 for (i = 0; i < priv->num_rx_rings; i++) 2550 enetc_setup_rxbdr(hw, priv->rx_ring[i], extended); 2551 } 2552 2553 static void enetc_enable_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 2554 { 2555 int idx = tx_ring->index; 2556 u32 tbmr; 2557 2558 tbmr = enetc_txbdr_rd(hw, idx, ENETC_TBMR); 2559 tbmr |= ENETC_TBMR_EN; 2560 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); 2561 } 2562 2563 static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2564 { 2565 int idx = rx_ring->index; 2566 u32 rbmr; 2567 2568 rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR); 2569 rbmr |= ENETC_RBMR_EN; 2570 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); 2571 } 2572 2573 static void enetc_enable_rx_bdrs(struct enetc_ndev_priv *priv) 2574 { 2575 struct enetc_hw *hw = &priv->si->hw; 2576 int i; 2577 2578 for (i = 0; i < priv->num_rx_rings; i++) 2579 enetc_enable_rxbdr(hw, priv->rx_ring[i]); 2580 } 2581 2582 static void enetc_enable_tx_bdrs(struct enetc_ndev_priv *priv) 2583 { 2584 struct enetc_hw *hw = &priv->si->hw; 2585 int i; 2586 2587 for (i = 0; i < priv->num_tx_rings; i++) 2588 enetc_enable_txbdr(hw, priv->tx_ring[i]); 2589 } 2590 2591 static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2592 { 2593 int idx = rx_ring->index; 2594 2595 /* disable EN bit on ring */ 2596 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0); 2597 } 2598 2599 static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2600 { 2601 int idx = rx_ring->index; 2602 2603 /* disable EN bit on ring */ 2604 enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0); 2605 } 2606 2607 static void enetc_disable_rx_bdrs(struct enetc_ndev_priv *priv) 2608 { 2609 struct enetc_hw *hw = &priv->si->hw; 2610 int i; 2611 2612 for (i = 0; i < priv->num_rx_rings; i++) 2613 enetc_disable_rxbdr(hw, priv->rx_ring[i]); 2614 } 2615 2616 static void enetc_disable_tx_bdrs(struct enetc_ndev_priv *priv) 2617 { 2618 struct enetc_hw *hw = &priv->si->hw; 2619 int i; 2620 2621 for (i = 0; i < priv->num_tx_rings; i++) 2622 enetc_disable_txbdr(hw, priv->tx_ring[i]); 2623 } 2624 2625 static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 2626 { 2627 int delay = 8, timeout = 100; 2628 int idx = tx_ring->index; 2629 2630 /* wait for busy to clear */ 2631 while (delay < timeout && 2632 enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) { 2633 msleep(delay); 2634 delay *= 2; 2635 } 2636 2637 if (delay >= timeout) 2638 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n", 2639 idx); 2640 } 2641 2642 static void enetc_wait_bdrs(struct enetc_ndev_priv *priv) 2643 { 2644 struct enetc_hw *hw = &priv->si->hw; 2645 int i; 2646 2647 for (i = 0; i < priv->num_tx_rings; i++) 2648 enetc_wait_txbdr(hw, priv->tx_ring[i]); 2649 } 2650 2651 static int enetc_setup_irqs(struct enetc_ndev_priv *priv) 2652 { 2653 struct pci_dev *pdev = priv->si->pdev; 2654 struct enetc_hw *hw = &priv->si->hw; 2655 int i, j, err; 2656 2657 for (i = 0; i < priv->bdr_int_num; i++) { 2658 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); 2659 struct enetc_int_vector *v = priv->int_vector[i]; 2660 int entry = ENETC_BDR_INT_BASE_IDX + i; 2661 2662 snprintf(v->name, sizeof(v->name), "%s-rxtx%d", 2663 priv->ndev->name, i); 2664 err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v); 2665 if (err) { 2666 dev_err(priv->dev, "request_irq() failed!\n"); 2667 goto irq_err; 2668 } 2669 2670 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); 2671 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); 2672 v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1); 2673 2674 enetc_wr(hw, ENETC_SIMSIRRV(i), entry); 2675 2676 for (j = 0; j < v->count_tx_rings; j++) { 2677 int idx = v->tx_ring[j].index; 2678 2679 enetc_wr(hw, ENETC_SIMSITRV(idx), entry); 2680 } 2681 irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus())); 2682 } 2683 2684 return 0; 2685 2686 irq_err: 2687 while (i--) { 2688 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); 2689 2690 irq_set_affinity_hint(irq, NULL); 2691 free_irq(irq, priv->int_vector[i]); 2692 } 2693 2694 return err; 2695 } 2696 2697 static void enetc_free_irqs(struct enetc_ndev_priv *priv) 2698 { 2699 struct pci_dev *pdev = priv->si->pdev; 2700 int i; 2701 2702 for (i = 0; i < priv->bdr_int_num; i++) { 2703 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); 2704 2705 irq_set_affinity_hint(irq, NULL); 2706 free_irq(irq, priv->int_vector[i]); 2707 } 2708 } 2709 2710 static void enetc_setup_interrupts(struct enetc_ndev_priv *priv) 2711 { 2712 struct enetc_hw *hw = &priv->si->hw; 2713 u32 icpt, ictt; 2714 int i; 2715 2716 /* enable Tx & Rx event indication */ 2717 if (priv->ic_mode & 2718 (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) { 2719 icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR); 2720 /* init to non-0 minimum, will be adjusted later */ 2721 ictt = 0x1; 2722 } else { 2723 icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */ 2724 ictt = 0; 2725 } 2726 2727 for (i = 0; i < priv->num_rx_rings; i++) { 2728 enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt); 2729 enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt); 2730 enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE); 2731 } 2732 2733 if (priv->ic_mode & ENETC_IC_TX_MANUAL) 2734 icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR); 2735 else 2736 icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */ 2737 2738 for (i = 0; i < priv->num_tx_rings; i++) { 2739 enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt); 2740 enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt); 2741 enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE); 2742 } 2743 } 2744 2745 static void enetc_clear_interrupts(struct enetc_ndev_priv *priv) 2746 { 2747 struct enetc_hw *hw = &priv->si->hw; 2748 int i; 2749 2750 for (i = 0; i < priv->num_tx_rings; i++) 2751 enetc_txbdr_wr(hw, i, ENETC_TBIER, 0); 2752 2753 for (i = 0; i < priv->num_rx_rings; i++) 2754 enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0); 2755 } 2756 2757 static int enetc_phylink_connect(struct net_device *ndev) 2758 { 2759 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2760 struct ethtool_keee edata; 2761 int err; 2762 2763 if (!priv->phylink) { 2764 /* phy-less mode */ 2765 netif_carrier_on(ndev); 2766 return 0; 2767 } 2768 2769 err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0); 2770 if (err) { 2771 dev_err(&ndev->dev, "could not attach to PHY\n"); 2772 return err; 2773 } 2774 2775 /* disable EEE autoneg, until ENETC driver supports it */ 2776 memset(&edata, 0, sizeof(struct ethtool_keee)); 2777 phylink_ethtool_set_eee(priv->phylink, &edata); 2778 2779 phylink_start(priv->phylink); 2780 2781 return 0; 2782 } 2783 2784 static void enetc_tx_onestep_tstamp(struct work_struct *work) 2785 { 2786 struct enetc_ndev_priv *priv; 2787 struct sk_buff *skb; 2788 2789 priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp); 2790 2791 netif_tx_lock_bh(priv->ndev); 2792 2793 clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags); 2794 skb = skb_dequeue(&priv->tx_skbs); 2795 if (skb) 2796 enetc_start_xmit(skb, priv->ndev); 2797 2798 netif_tx_unlock_bh(priv->ndev); 2799 } 2800 2801 static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv) 2802 { 2803 INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp); 2804 skb_queue_head_init(&priv->tx_skbs); 2805 } 2806 2807 void enetc_start(struct net_device *ndev) 2808 { 2809 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2810 int i; 2811 2812 enetc_setup_interrupts(priv); 2813 2814 for (i = 0; i < priv->bdr_int_num; i++) { 2815 int irq = pci_irq_vector(priv->si->pdev, 2816 ENETC_BDR_INT_BASE_IDX + i); 2817 2818 napi_enable(&priv->int_vector[i]->napi); 2819 enable_irq(irq); 2820 } 2821 2822 enetc_enable_tx_bdrs(priv); 2823 2824 enetc_enable_rx_bdrs(priv); 2825 2826 netif_tx_start_all_queues(ndev); 2827 2828 clear_bit(ENETC_TX_DOWN, &priv->flags); 2829 } 2830 EXPORT_SYMBOL_GPL(enetc_start); 2831 2832 int enetc_open(struct net_device *ndev) 2833 { 2834 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2835 struct enetc_bdr_resource *tx_res, *rx_res; 2836 bool extended; 2837 int err; 2838 2839 extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); 2840 2841 err = clk_prepare_enable(priv->ref_clk); 2842 if (err) 2843 return err; 2844 2845 err = enetc_setup_irqs(priv); 2846 if (err) 2847 goto err_setup_irqs; 2848 2849 err = enetc_phylink_connect(ndev); 2850 if (err) 2851 goto err_phy_connect; 2852 2853 tx_res = enetc_alloc_tx_resources(priv); 2854 if (IS_ERR(tx_res)) { 2855 err = PTR_ERR(tx_res); 2856 goto err_alloc_tx; 2857 } 2858 2859 rx_res = enetc_alloc_rx_resources(priv, extended); 2860 if (IS_ERR(rx_res)) { 2861 err = PTR_ERR(rx_res); 2862 goto err_alloc_rx; 2863 } 2864 2865 enetc_tx_onestep_tstamp_init(priv); 2866 enetc_assign_tx_resources(priv, tx_res); 2867 enetc_assign_rx_resources(priv, rx_res); 2868 enetc_setup_bdrs(priv, extended); 2869 enetc_start(ndev); 2870 2871 return 0; 2872 2873 err_alloc_rx: 2874 enetc_free_tx_resources(tx_res, priv->num_tx_rings); 2875 err_alloc_tx: 2876 if (priv->phylink) 2877 phylink_disconnect_phy(priv->phylink); 2878 err_phy_connect: 2879 enetc_free_irqs(priv); 2880 err_setup_irqs: 2881 clk_disable_unprepare(priv->ref_clk); 2882 2883 return err; 2884 } 2885 EXPORT_SYMBOL_GPL(enetc_open); 2886 2887 void enetc_stop(struct net_device *ndev) 2888 { 2889 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2890 int i; 2891 2892 set_bit(ENETC_TX_DOWN, &priv->flags); 2893 2894 netif_tx_stop_all_queues(ndev); 2895 2896 enetc_disable_rx_bdrs(priv); 2897 2898 enetc_wait_bdrs(priv); 2899 2900 enetc_disable_tx_bdrs(priv); 2901 2902 for (i = 0; i < priv->bdr_int_num; i++) { 2903 int irq = pci_irq_vector(priv->si->pdev, 2904 ENETC_BDR_INT_BASE_IDX + i); 2905 2906 disable_irq(irq); 2907 napi_synchronize(&priv->int_vector[i]->napi); 2908 napi_disable(&priv->int_vector[i]->napi); 2909 } 2910 2911 enetc_clear_interrupts(priv); 2912 } 2913 EXPORT_SYMBOL_GPL(enetc_stop); 2914 2915 int enetc_close(struct net_device *ndev) 2916 { 2917 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2918 2919 enetc_stop(ndev); 2920 2921 if (priv->phylink) { 2922 phylink_stop(priv->phylink); 2923 phylink_disconnect_phy(priv->phylink); 2924 } else { 2925 netif_carrier_off(ndev); 2926 } 2927 2928 enetc_free_rxtx_rings(priv); 2929 2930 /* Avoids dangling pointers and also frees old resources */ 2931 enetc_assign_rx_resources(priv, NULL); 2932 enetc_assign_tx_resources(priv, NULL); 2933 2934 enetc_free_irqs(priv); 2935 clk_disable_unprepare(priv->ref_clk); 2936 2937 return 0; 2938 } 2939 EXPORT_SYMBOL_GPL(enetc_close); 2940 2941 static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended, 2942 int (*cb)(struct enetc_ndev_priv *priv, void *ctx), 2943 void *ctx) 2944 { 2945 struct enetc_bdr_resource *tx_res, *rx_res; 2946 int err; 2947 2948 ASSERT_RTNL(); 2949 2950 /* If the interface is down, run the callback right away, 2951 * without reconfiguration. 2952 */ 2953 if (!netif_running(priv->ndev)) { 2954 if (cb) { 2955 err = cb(priv, ctx); 2956 if (err) 2957 return err; 2958 } 2959 2960 return 0; 2961 } 2962 2963 tx_res = enetc_alloc_tx_resources(priv); 2964 if (IS_ERR(tx_res)) { 2965 err = PTR_ERR(tx_res); 2966 goto out; 2967 } 2968 2969 rx_res = enetc_alloc_rx_resources(priv, extended); 2970 if (IS_ERR(rx_res)) { 2971 err = PTR_ERR(rx_res); 2972 goto out_free_tx_res; 2973 } 2974 2975 enetc_stop(priv->ndev); 2976 enetc_free_rxtx_rings(priv); 2977 2978 /* Interface is down, run optional callback now */ 2979 if (cb) { 2980 err = cb(priv, ctx); 2981 if (err) 2982 goto out_restart; 2983 } 2984 2985 enetc_assign_tx_resources(priv, tx_res); 2986 enetc_assign_rx_resources(priv, rx_res); 2987 enetc_setup_bdrs(priv, extended); 2988 enetc_start(priv->ndev); 2989 2990 return 0; 2991 2992 out_restart: 2993 enetc_setup_bdrs(priv, extended); 2994 enetc_start(priv->ndev); 2995 enetc_free_rx_resources(rx_res, priv->num_rx_rings); 2996 out_free_tx_res: 2997 enetc_free_tx_resources(tx_res, priv->num_tx_rings); 2998 out: 2999 return err; 3000 } 3001 3002 static void enetc_debug_tx_ring_prios(struct enetc_ndev_priv *priv) 3003 { 3004 int i; 3005 3006 for (i = 0; i < priv->num_tx_rings; i++) 3007 netdev_dbg(priv->ndev, "TX ring %d prio %d\n", i, 3008 priv->tx_ring[i]->prio); 3009 } 3010 3011 void enetc_reset_tc_mqprio(struct net_device *ndev) 3012 { 3013 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3014 struct enetc_hw *hw = &priv->si->hw; 3015 struct enetc_bdr *tx_ring; 3016 int num_stack_tx_queues; 3017 int i; 3018 3019 num_stack_tx_queues = enetc_num_stack_tx_queues(priv); 3020 3021 netdev_reset_tc(ndev); 3022 netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); 3023 priv->min_num_stack_tx_queues = num_possible_cpus(); 3024 3025 /* Reset all ring priorities to 0 */ 3026 for (i = 0; i < priv->num_tx_rings; i++) { 3027 tx_ring = priv->tx_ring[i]; 3028 tx_ring->prio = 0; 3029 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); 3030 } 3031 3032 enetc_debug_tx_ring_prios(priv); 3033 3034 enetc_change_preemptible_tcs(priv, 0); 3035 } 3036 EXPORT_SYMBOL_GPL(enetc_reset_tc_mqprio); 3037 3038 int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) 3039 { 3040 struct tc_mqprio_qopt_offload *mqprio = type_data; 3041 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3042 struct tc_mqprio_qopt *qopt = &mqprio->qopt; 3043 struct enetc_hw *hw = &priv->si->hw; 3044 int num_stack_tx_queues = 0; 3045 struct enetc_bdr *tx_ring; 3046 u8 num_tc = qopt->num_tc; 3047 int offset, count; 3048 int err, tc, q; 3049 3050 if (!num_tc) { 3051 enetc_reset_tc_mqprio(ndev); 3052 return 0; 3053 } 3054 3055 err = netdev_set_num_tc(ndev, num_tc); 3056 if (err) 3057 return err; 3058 3059 for (tc = 0; tc < num_tc; tc++) { 3060 offset = qopt->offset[tc]; 3061 count = qopt->count[tc]; 3062 num_stack_tx_queues += count; 3063 3064 err = netdev_set_tc_queue(ndev, tc, count, offset); 3065 if (err) 3066 goto err_reset_tc; 3067 3068 for (q = offset; q < offset + count; q++) { 3069 tx_ring = priv->tx_ring[q]; 3070 /* The prio_tc_map is skb_tx_hash()'s way of selecting 3071 * between TX queues based on skb->priority. As such, 3072 * there's nothing to offload based on it. 3073 * Make the mqprio "traffic class" be the priority of 3074 * this ring group, and leave the Tx IPV to traffic 3075 * class mapping as its default mapping value of 1:1. 3076 */ 3077 tx_ring->prio = tc; 3078 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); 3079 } 3080 } 3081 3082 err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); 3083 if (err) 3084 goto err_reset_tc; 3085 3086 priv->min_num_stack_tx_queues = num_stack_tx_queues; 3087 3088 enetc_debug_tx_ring_prios(priv); 3089 3090 enetc_change_preemptible_tcs(priv, mqprio->preemptible_tcs); 3091 3092 return 0; 3093 3094 err_reset_tc: 3095 enetc_reset_tc_mqprio(ndev); 3096 return err; 3097 } 3098 EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio); 3099 3100 static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx) 3101 { 3102 struct bpf_prog *old_prog, *prog = ctx; 3103 int num_stack_tx_queues; 3104 int err, i; 3105 3106 old_prog = xchg(&priv->xdp_prog, prog); 3107 3108 num_stack_tx_queues = enetc_num_stack_tx_queues(priv); 3109 err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues); 3110 if (err) { 3111 xchg(&priv->xdp_prog, old_prog); 3112 return err; 3113 } 3114 3115 if (old_prog) 3116 bpf_prog_put(old_prog); 3117 3118 for (i = 0; i < priv->num_rx_rings; i++) { 3119 struct enetc_bdr *rx_ring = priv->rx_ring[i]; 3120 3121 rx_ring->xdp.prog = prog; 3122 3123 if (prog) 3124 rx_ring->buffer_offset = XDP_PACKET_HEADROOM; 3125 else 3126 rx_ring->buffer_offset = ENETC_RXB_PAD; 3127 } 3128 3129 return 0; 3130 } 3131 3132 static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog, 3133 struct netlink_ext_ack *extack) 3134 { 3135 int num_xdp_tx_queues = prog ? num_possible_cpus() : 0; 3136 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3137 bool extended; 3138 3139 if (priv->min_num_stack_tx_queues + num_xdp_tx_queues > 3140 priv->num_tx_rings) { 3141 NL_SET_ERR_MSG_FMT_MOD(extack, 3142 "Reserving %d XDP TXQs leaves under %d for stack (total %d)", 3143 num_xdp_tx_queues, 3144 priv->min_num_stack_tx_queues, 3145 priv->num_tx_rings); 3146 return -EBUSY; 3147 } 3148 3149 extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); 3150 3151 /* The buffer layout is changing, so we need to drain the old 3152 * RX buffers and seed new ones. 3153 */ 3154 return enetc_reconfigure(priv, extended, enetc_reconfigure_xdp_cb, prog); 3155 } 3156 3157 int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf) 3158 { 3159 switch (bpf->command) { 3160 case XDP_SETUP_PROG: 3161 return enetc_setup_xdp_prog(ndev, bpf->prog, bpf->extack); 3162 default: 3163 return -EINVAL; 3164 } 3165 3166 return 0; 3167 } 3168 EXPORT_SYMBOL_GPL(enetc_setup_bpf); 3169 3170 struct net_device_stats *enetc_get_stats(struct net_device *ndev) 3171 { 3172 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3173 struct net_device_stats *stats = &ndev->stats; 3174 unsigned long packets = 0, bytes = 0; 3175 unsigned long tx_dropped = 0; 3176 int i; 3177 3178 for (i = 0; i < priv->num_rx_rings; i++) { 3179 packets += priv->rx_ring[i]->stats.packets; 3180 bytes += priv->rx_ring[i]->stats.bytes; 3181 } 3182 3183 stats->rx_packets = packets; 3184 stats->rx_bytes = bytes; 3185 bytes = 0; 3186 packets = 0; 3187 3188 for (i = 0; i < priv->num_tx_rings; i++) { 3189 packets += priv->tx_ring[i]->stats.packets; 3190 bytes += priv->tx_ring[i]->stats.bytes; 3191 tx_dropped += priv->tx_ring[i]->stats.win_drop; 3192 } 3193 3194 stats->tx_packets = packets; 3195 stats->tx_bytes = bytes; 3196 stats->tx_dropped = tx_dropped; 3197 3198 return stats; 3199 } 3200 EXPORT_SYMBOL_GPL(enetc_get_stats); 3201 3202 static int enetc_set_rss(struct net_device *ndev, int en) 3203 { 3204 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3205 struct enetc_hw *hw = &priv->si->hw; 3206 u32 reg; 3207 3208 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings); 3209 3210 reg = enetc_rd(hw, ENETC_SIMR); 3211 reg &= ~ENETC_SIMR_RSSE; 3212 reg |= (en) ? ENETC_SIMR_RSSE : 0; 3213 enetc_wr(hw, ENETC_SIMR, reg); 3214 3215 return 0; 3216 } 3217 3218 static void enetc_enable_rxvlan(struct net_device *ndev, bool en) 3219 { 3220 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3221 struct enetc_hw *hw = &priv->si->hw; 3222 int i; 3223 3224 for (i = 0; i < priv->num_rx_rings; i++) 3225 enetc_bdr_enable_rxvlan(hw, i, en); 3226 } 3227 3228 static void enetc_enable_txvlan(struct net_device *ndev, bool en) 3229 { 3230 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3231 struct enetc_hw *hw = &priv->si->hw; 3232 int i; 3233 3234 for (i = 0; i < priv->num_tx_rings; i++) 3235 enetc_bdr_enable_txvlan(hw, i, en); 3236 } 3237 3238 void enetc_set_features(struct net_device *ndev, netdev_features_t features) 3239 { 3240 netdev_features_t changed = ndev->features ^ features; 3241 3242 if (changed & NETIF_F_RXHASH) 3243 enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH)); 3244 3245 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 3246 enetc_enable_rxvlan(ndev, 3247 !!(features & NETIF_F_HW_VLAN_CTAG_RX)); 3248 3249 if (changed & NETIF_F_HW_VLAN_CTAG_TX) 3250 enetc_enable_txvlan(ndev, 3251 !!(features & NETIF_F_HW_VLAN_CTAG_TX)); 3252 } 3253 EXPORT_SYMBOL_GPL(enetc_set_features); 3254 3255 static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) 3256 { 3257 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3258 int err, new_offloads = priv->active_offloads; 3259 struct hwtstamp_config config; 3260 3261 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 3262 return -EFAULT; 3263 3264 switch (config.tx_type) { 3265 case HWTSTAMP_TX_OFF: 3266 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 3267 break; 3268 case HWTSTAMP_TX_ON: 3269 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 3270 new_offloads |= ENETC_F_TX_TSTAMP; 3271 break; 3272 case HWTSTAMP_TX_ONESTEP_SYNC: 3273 if (!enetc_si_is_pf(priv->si)) 3274 return -EOPNOTSUPP; 3275 3276 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 3277 new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP; 3278 break; 3279 default: 3280 return -ERANGE; 3281 } 3282 3283 switch (config.rx_filter) { 3284 case HWTSTAMP_FILTER_NONE: 3285 new_offloads &= ~ENETC_F_RX_TSTAMP; 3286 break; 3287 default: 3288 new_offloads |= ENETC_F_RX_TSTAMP; 3289 config.rx_filter = HWTSTAMP_FILTER_ALL; 3290 } 3291 3292 if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) { 3293 bool extended = !!(new_offloads & ENETC_F_RX_TSTAMP); 3294 3295 err = enetc_reconfigure(priv, extended, NULL, NULL); 3296 if (err) 3297 return err; 3298 } 3299 3300 priv->active_offloads = new_offloads; 3301 3302 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 3303 -EFAULT : 0; 3304 } 3305 3306 static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr) 3307 { 3308 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3309 struct hwtstamp_config config; 3310 3311 config.flags = 0; 3312 3313 if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) 3314 config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC; 3315 else if (priv->active_offloads & ENETC_F_TX_TSTAMP) 3316 config.tx_type = HWTSTAMP_TX_ON; 3317 else 3318 config.tx_type = HWTSTAMP_TX_OFF; 3319 3320 config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ? 3321 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; 3322 3323 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 3324 -EFAULT : 0; 3325 } 3326 3327 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 3328 { 3329 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3330 3331 if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) { 3332 if (cmd == SIOCSHWTSTAMP) 3333 return enetc_hwtstamp_set(ndev, rq); 3334 if (cmd == SIOCGHWTSTAMP) 3335 return enetc_hwtstamp_get(ndev, rq); 3336 } 3337 3338 if (!priv->phylink) 3339 return -EOPNOTSUPP; 3340 3341 return phylink_mii_ioctl(priv->phylink, rq, cmd); 3342 } 3343 EXPORT_SYMBOL_GPL(enetc_ioctl); 3344 3345 static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i, 3346 int v_tx_rings) 3347 { 3348 struct enetc_int_vector *v; 3349 struct enetc_bdr *bdr; 3350 int j, err; 3351 3352 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); 3353 if (!v) 3354 return -ENOMEM; 3355 3356 priv->int_vector[i] = v; 3357 bdr = &v->rx_ring; 3358 bdr->index = i; 3359 bdr->ndev = priv->ndev; 3360 bdr->dev = priv->dev; 3361 bdr->bd_count = priv->rx_bd_count; 3362 bdr->buffer_offset = ENETC_RXB_PAD; 3363 priv->rx_ring[i] = bdr; 3364 3365 err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); 3366 if (err) 3367 goto free_vector; 3368 3369 err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, MEM_TYPE_PAGE_SHARED, 3370 NULL); 3371 if (err) { 3372 xdp_rxq_info_unreg(&bdr->xdp.rxq); 3373 goto free_vector; 3374 } 3375 3376 /* init defaults for adaptive IC */ 3377 if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { 3378 v->rx_ictt = 0x1; 3379 v->rx_dim_en = true; 3380 } 3381 3382 INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); 3383 netif_napi_add(priv->ndev, &v->napi, enetc_poll); 3384 v->count_tx_rings = v_tx_rings; 3385 3386 for (j = 0; j < v_tx_rings; j++) { 3387 int idx; 3388 3389 /* default tx ring mapping policy */ 3390 idx = priv->bdr_int_num * j + i; 3391 __set_bit(idx, &v->tx_rings_map); 3392 bdr = &v->tx_ring[j]; 3393 bdr->index = idx; 3394 bdr->ndev = priv->ndev; 3395 bdr->dev = priv->dev; 3396 bdr->bd_count = priv->tx_bd_count; 3397 priv->tx_ring[idx] = bdr; 3398 } 3399 3400 return 0; 3401 3402 free_vector: 3403 priv->rx_ring[i] = NULL; 3404 priv->int_vector[i] = NULL; 3405 kfree(v); 3406 3407 return err; 3408 } 3409 3410 static void enetc_int_vector_destroy(struct enetc_ndev_priv *priv, int i) 3411 { 3412 struct enetc_int_vector *v = priv->int_vector[i]; 3413 struct enetc_bdr *rx_ring = &v->rx_ring; 3414 int j, tx_ring_index; 3415 3416 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); 3417 xdp_rxq_info_unreg(&rx_ring->xdp.rxq); 3418 netif_napi_del(&v->napi); 3419 cancel_work_sync(&v->rx_dim.work); 3420 3421 for (j = 0; j < v->count_tx_rings; j++) { 3422 tx_ring_index = priv->bdr_int_num * j + i; 3423 priv->tx_ring[tx_ring_index] = NULL; 3424 } 3425 3426 priv->rx_ring[i] = NULL; 3427 priv->int_vector[i] = NULL; 3428 kfree(v); 3429 } 3430 3431 int enetc_alloc_msix(struct enetc_ndev_priv *priv) 3432 { 3433 struct pci_dev *pdev = priv->si->pdev; 3434 int v_tx_rings, v_remainder; 3435 int num_stack_tx_queues; 3436 int first_xdp_tx_ring; 3437 int i, n, err, nvec; 3438 3439 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num; 3440 /* allocate MSIX for both messaging and Rx/Tx interrupts */ 3441 n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); 3442 3443 if (n < 0) 3444 return n; 3445 3446 if (n != nvec) 3447 return -EPERM; 3448 3449 /* # of tx rings per int vector */ 3450 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; 3451 v_remainder = priv->num_tx_rings % priv->bdr_int_num; 3452 3453 for (i = 0; i < priv->bdr_int_num; i++) { 3454 /* Distribute the remaining TX rings to the first v_remainder 3455 * interrupt vectors 3456 */ 3457 int num_tx_rings = i < v_remainder ? v_tx_rings + 1 : v_tx_rings; 3458 3459 err = enetc_int_vector_init(priv, i, num_tx_rings); 3460 if (err) 3461 goto fail; 3462 } 3463 3464 num_stack_tx_queues = enetc_num_stack_tx_queues(priv); 3465 3466 err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues); 3467 if (err) 3468 goto fail; 3469 3470 err = netif_set_real_num_rx_queues(priv->ndev, priv->num_rx_rings); 3471 if (err) 3472 goto fail; 3473 3474 priv->min_num_stack_tx_queues = num_possible_cpus(); 3475 first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus(); 3476 priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring]; 3477 3478 return 0; 3479 3480 fail: 3481 while (i--) 3482 enetc_int_vector_destroy(priv, i); 3483 3484 pci_free_irq_vectors(pdev); 3485 3486 return err; 3487 } 3488 EXPORT_SYMBOL_GPL(enetc_alloc_msix); 3489 3490 void enetc_free_msix(struct enetc_ndev_priv *priv) 3491 { 3492 int i; 3493 3494 for (i = 0; i < priv->bdr_int_num; i++) 3495 enetc_int_vector_destroy(priv, i); 3496 3497 /* disable all MSIX for this device */ 3498 pci_free_irq_vectors(priv->si->pdev); 3499 } 3500 EXPORT_SYMBOL_GPL(enetc_free_msix); 3501 3502 static void enetc_kfree_si(struct enetc_si *si) 3503 { 3504 char *p = (char *)si - si->pad; 3505 3506 kfree(p); 3507 } 3508 3509 static void enetc_detect_errata(struct enetc_si *si) 3510 { 3511 if (si->pdev->revision == ENETC_REV1) 3512 si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP; 3513 } 3514 3515 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv) 3516 { 3517 struct enetc_si *si, *p; 3518 struct enetc_hw *hw; 3519 size_t alloc_size; 3520 int err, len; 3521 3522 pcie_flr(pdev); 3523 err = pci_enable_device_mem(pdev); 3524 if (err) 3525 return dev_err_probe(&pdev->dev, err, "device enable failed\n"); 3526 3527 /* set up for high or low dma */ 3528 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3529 if (err) { 3530 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); 3531 goto err_dma; 3532 } 3533 3534 err = pci_request_mem_regions(pdev, name); 3535 if (err) { 3536 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err); 3537 goto err_pci_mem_reg; 3538 } 3539 3540 pci_set_master(pdev); 3541 3542 alloc_size = sizeof(struct enetc_si); 3543 if (sizeof_priv) { 3544 /* align priv to 32B */ 3545 alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN); 3546 alloc_size += sizeof_priv; 3547 } 3548 /* force 32B alignment for enetc_si */ 3549 alloc_size += ENETC_SI_ALIGN - 1; 3550 3551 p = kzalloc(alloc_size, GFP_KERNEL); 3552 if (!p) { 3553 err = -ENOMEM; 3554 goto err_alloc_si; 3555 } 3556 3557 si = PTR_ALIGN(p, ENETC_SI_ALIGN); 3558 si->pad = (char *)si - (char *)p; 3559 3560 pci_set_drvdata(pdev, si); 3561 si->pdev = pdev; 3562 hw = &si->hw; 3563 3564 len = pci_resource_len(pdev, ENETC_BAR_REGS); 3565 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len); 3566 if (!hw->reg) { 3567 err = -ENXIO; 3568 dev_err(&pdev->dev, "ioremap() failed\n"); 3569 goto err_ioremap; 3570 } 3571 if (len > ENETC_PORT_BASE) 3572 hw->port = hw->reg + ENETC_PORT_BASE; 3573 if (len > ENETC_GLOBAL_BASE) 3574 hw->global = hw->reg + ENETC_GLOBAL_BASE; 3575 3576 enetc_detect_errata(si); 3577 3578 return 0; 3579 3580 err_ioremap: 3581 enetc_kfree_si(si); 3582 err_alloc_si: 3583 pci_release_mem_regions(pdev); 3584 err_pci_mem_reg: 3585 err_dma: 3586 pci_disable_device(pdev); 3587 3588 return err; 3589 } 3590 EXPORT_SYMBOL_GPL(enetc_pci_probe); 3591 3592 void enetc_pci_remove(struct pci_dev *pdev) 3593 { 3594 struct enetc_si *si = pci_get_drvdata(pdev); 3595 struct enetc_hw *hw = &si->hw; 3596 3597 iounmap(hw->reg); 3598 enetc_kfree_si(si); 3599 pci_release_mem_regions(pdev); 3600 pci_disable_device(pdev); 3601 } 3602 EXPORT_SYMBOL_GPL(enetc_pci_remove); 3603 3604 static const struct enetc_drvdata enetc_pf_data = { 3605 .sysclk_freq = ENETC_CLK_400M, 3606 .pmac_offset = ENETC_PMAC_OFFSET, 3607 .max_frags = ENETC_MAX_SKB_FRAGS, 3608 .eth_ops = &enetc_pf_ethtool_ops, 3609 }; 3610 3611 static const struct enetc_drvdata enetc4_pf_data = { 3612 .sysclk_freq = ENETC_CLK_333M, 3613 .tx_csum = true, 3614 .max_frags = ENETC4_MAX_SKB_FRAGS, 3615 .pmac_offset = ENETC4_PMAC_OFFSET, 3616 .eth_ops = &enetc4_pf_ethtool_ops, 3617 }; 3618 3619 static const struct enetc_drvdata enetc_vf_data = { 3620 .sysclk_freq = ENETC_CLK_400M, 3621 .max_frags = ENETC_MAX_SKB_FRAGS, 3622 .eth_ops = &enetc_vf_ethtool_ops, 3623 }; 3624 3625 static const struct enetc_platform_info enetc_info[] = { 3626 { .revision = ENETC_REV_1_0, 3627 .dev_id = ENETC_DEV_ID_PF, 3628 .data = &enetc_pf_data, 3629 }, 3630 { .revision = ENETC_REV_4_1, 3631 .dev_id = NXP_ENETC_PF_DEV_ID, 3632 .data = &enetc4_pf_data, 3633 }, 3634 { .revision = ENETC_REV_1_0, 3635 .dev_id = ENETC_DEV_ID_VF, 3636 .data = &enetc_vf_data, 3637 }, 3638 }; 3639 3640 int enetc_get_driver_data(struct enetc_si *si) 3641 { 3642 u16 dev_id = si->pdev->device; 3643 int i; 3644 3645 for (i = 0; i < ARRAY_SIZE(enetc_info); i++) { 3646 if (si->revision == enetc_info[i].revision && 3647 dev_id == enetc_info[i].dev_id) { 3648 si->drvdata = enetc_info[i].data; 3649 3650 return 0; 3651 } 3652 } 3653 3654 return -ERANGE; 3655 } 3656 EXPORT_SYMBOL_GPL(enetc_get_driver_data); 3657 3658 MODULE_DESCRIPTION("NXP ENETC Ethernet driver"); 3659 MODULE_LICENSE("Dual BSD/GPL"); 3660