1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2017-2019 NXP */ 3 4 #include "enetc.h" 5 #include <linux/bpf_trace.h> 6 #include <linux/clk.h> 7 #include <linux/tcp.h> 8 #include <linux/udp.h> 9 #include <linux/vmalloc.h> 10 #include <linux/ptp_classify.h> 11 #include <net/ip6_checksum.h> 12 #include <net/pkt_sched.h> 13 #include <net/tso.h> 14 15 u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg) 16 { 17 return enetc_port_rd(&si->hw, reg); 18 } 19 EXPORT_SYMBOL_GPL(enetc_port_mac_rd); 20 21 void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val) 22 { 23 enetc_port_wr(&si->hw, reg, val); 24 if (si->hw_features & ENETC_SI_F_QBU) 25 enetc_port_wr(&si->hw, reg + si->drvdata->pmac_offset, val); 26 } 27 EXPORT_SYMBOL_GPL(enetc_port_mac_wr); 28 29 static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv, 30 u8 preemptible_tcs) 31 { 32 if (!(priv->si->hw_features & ENETC_SI_F_QBU)) 33 return; 34 35 priv->preemptible_tcs = preemptible_tcs; 36 enetc_mm_commit_preemptible_tcs(priv); 37 } 38 39 static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv) 40 { 41 int num_tx_rings = priv->num_tx_rings; 42 43 if (priv->xdp_prog) 44 return num_tx_rings - num_possible_cpus(); 45 46 return num_tx_rings; 47 } 48 49 static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv, 50 struct enetc_bdr *tx_ring) 51 { 52 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring; 53 54 return priv->rx_ring[index]; 55 } 56 57 static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd) 58 { 59 if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect) 60 return NULL; 61 62 return tx_swbd->skb; 63 } 64 65 static struct xdp_frame * 66 enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd) 67 { 68 if (tx_swbd->is_xdp_redirect) 69 return tx_swbd->xdp_frame; 70 71 return NULL; 72 } 73 74 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, 75 struct enetc_tx_swbd *tx_swbd) 76 { 77 /* For XDP_TX, pages come from RX, whereas for the other contexts where 78 * we have is_dma_page_set, those come from skb_frag_dma_map. We need 79 * to match the DMA mapping length, so we need to differentiate those. 80 */ 81 if (tx_swbd->is_dma_page) 82 dma_unmap_page(tx_ring->dev, tx_swbd->dma, 83 tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len, 84 tx_swbd->dir); 85 else 86 dma_unmap_single(tx_ring->dev, tx_swbd->dma, 87 tx_swbd->len, tx_swbd->dir); 88 tx_swbd->dma = 0; 89 } 90 91 static void enetc_free_tx_frame(struct enetc_bdr *tx_ring, 92 struct enetc_tx_swbd *tx_swbd) 93 { 94 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd); 95 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd); 96 97 if (tx_swbd->dma) 98 enetc_unmap_tx_buff(tx_ring, tx_swbd); 99 100 if (xdp_frame) { 101 xdp_return_frame(tx_swbd->xdp_frame); 102 tx_swbd->xdp_frame = NULL; 103 } else if (skb) { 104 dev_kfree_skb_any(skb); 105 tx_swbd->skb = NULL; 106 } 107 } 108 109 /* Let H/W know BD ring has been updated */ 110 static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring) 111 { 112 /* includes wmb() */ 113 enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use); 114 } 115 116 static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp, 117 u8 *msgtype, u8 *twostep, 118 u16 *correction_offset, u16 *body_offset) 119 { 120 unsigned int ptp_class; 121 struct ptp_header *hdr; 122 unsigned int type; 123 u8 *base; 124 125 ptp_class = ptp_classify_raw(skb); 126 if (ptp_class == PTP_CLASS_NONE) 127 return -EINVAL; 128 129 hdr = ptp_parse_header(skb, ptp_class); 130 if (!hdr) 131 return -EINVAL; 132 133 type = ptp_class & PTP_CLASS_PMASK; 134 if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6) 135 *udp = 1; 136 else 137 *udp = 0; 138 139 *msgtype = ptp_get_msgtype(hdr, ptp_class); 140 *twostep = hdr->flag_field[0] & 0x2; 141 142 base = skb_mac_header(skb); 143 *correction_offset = (u8 *)&hdr->correction - base; 144 *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base; 145 146 return 0; 147 } 148 149 static bool enetc_tx_csum_offload_check(struct sk_buff *skb) 150 { 151 switch (skb->csum_offset) { 152 case offsetof(struct tcphdr, check): 153 case offsetof(struct udphdr, check): 154 return true; 155 default: 156 return false; 157 } 158 } 159 160 static bool enetc_skb_is_ipv6(struct sk_buff *skb) 161 { 162 return vlan_get_protocol(skb) == htons(ETH_P_IPV6); 163 } 164 165 static bool enetc_skb_is_tcp(struct sk_buff *skb) 166 { 167 return skb->csum_offset == offsetof(struct tcphdr, check); 168 } 169 170 /** 171 * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame 172 * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located 173 * @count: Number of Tx buffer descriptors which need to be unmapped 174 * @i: Index of the last successfully mapped Tx buffer descriptor 175 */ 176 static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i) 177 { 178 while (count--) { 179 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; 180 181 enetc_free_tx_frame(tx_ring, tx_swbd); 182 if (i == 0) 183 i = tx_ring->bd_count; 184 i--; 185 } 186 } 187 188 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) 189 { 190 bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false; 191 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); 192 struct enetc_hw *hw = &priv->si->hw; 193 struct enetc_tx_swbd *tx_swbd; 194 int len = skb_headlen(skb); 195 union enetc_tx_bd temp_bd; 196 u8 msgtype, twostep, udp; 197 union enetc_tx_bd *txbd; 198 u16 offset1, offset2; 199 int i, count = 0; 200 skb_frag_t *frag; 201 unsigned int f; 202 dma_addr_t dma; 203 u8 flags = 0; 204 205 enetc_clear_tx_bd(&temp_bd); 206 if (skb->ip_summed == CHECKSUM_PARTIAL) { 207 /* Can not support TSD and checksum offload at the same time */ 208 if (priv->active_offloads & ENETC_F_TXCSUM && 209 enetc_tx_csum_offload_check(skb) && !tx_ring->tsd_enable) { 210 temp_bd.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START, 211 skb_network_offset(skb)); 212 temp_bd.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN, 213 skb_network_header_len(skb) / 4); 214 temp_bd.l3_aux1 |= FIELD_PREP(ENETC_TX_BD_L3T, 215 enetc_skb_is_ipv6(skb)); 216 if (enetc_skb_is_tcp(skb)) 217 temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, 218 ENETC_TXBD_L4T_TCP); 219 else 220 temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, 221 ENETC_TXBD_L4T_UDP); 222 flags |= ENETC_TXBD_FLAGS_CSUM_LSO | ENETC_TXBD_FLAGS_L4CS; 223 } else if (skb_checksum_help(skb)) { 224 return 0; 225 } 226 } 227 228 i = tx_ring->next_to_use; 229 txbd = ENETC_TXBD(*tx_ring, i); 230 prefetchw(txbd); 231 232 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); 233 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) 234 goto dma_err; 235 236 temp_bd.addr = cpu_to_le64(dma); 237 temp_bd.buf_len = cpu_to_le16(len); 238 239 tx_swbd = &tx_ring->tx_swbd[i]; 240 tx_swbd->dma = dma; 241 tx_swbd->len = len; 242 tx_swbd->is_dma_page = 0; 243 tx_swbd->dir = DMA_TO_DEVICE; 244 count++; 245 246 do_vlan = skb_vlan_tag_present(skb); 247 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { 248 if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1, 249 &offset2) || 250 msgtype != PTP_MSGTYPE_SYNC || twostep) 251 WARN_ONCE(1, "Bad packet for one-step timestamping\n"); 252 else 253 do_onestep_tstamp = true; 254 } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) { 255 do_twostep_tstamp = true; 256 } 257 258 tx_swbd->do_twostep_tstamp = do_twostep_tstamp; 259 tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV); 260 tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en; 261 262 if (do_vlan || do_onestep_tstamp || do_twostep_tstamp) 263 flags |= ENETC_TXBD_FLAGS_EX; 264 265 if (tx_ring->tsd_enable) 266 flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART; 267 268 /* first BD needs frm_len and offload flags set */ 269 temp_bd.frm_len = cpu_to_le16(skb->len); 270 temp_bd.flags = flags; 271 272 if (flags & ENETC_TXBD_FLAGS_TSE) 273 temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns, 274 flags); 275 276 if (flags & ENETC_TXBD_FLAGS_EX) { 277 u8 e_flags = 0; 278 *txbd = temp_bd; 279 enetc_clear_tx_bd(&temp_bd); 280 281 /* add extension BD for VLAN and/or timestamping */ 282 flags = 0; 283 tx_swbd++; 284 txbd++; 285 i++; 286 if (unlikely(i == tx_ring->bd_count)) { 287 i = 0; 288 tx_swbd = tx_ring->tx_swbd; 289 txbd = ENETC_TXBD(*tx_ring, 0); 290 } 291 prefetchw(txbd); 292 293 if (do_vlan) { 294 temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); 295 temp_bd.ext.tpid = 0; /* < C-TAG */ 296 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; 297 } 298 299 if (do_onestep_tstamp) { 300 __be32 new_sec_l, new_nsec; 301 u32 lo, hi, nsec, val; 302 __be16 new_sec_h; 303 u8 *data; 304 u64 sec; 305 306 lo = enetc_rd_hot(hw, ENETC_SICTR0); 307 hi = enetc_rd_hot(hw, ENETC_SICTR1); 308 sec = (u64)hi << 32 | lo; 309 nsec = do_div(sec, 1000000000); 310 311 /* Configure extension BD */ 312 temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff); 313 e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP; 314 315 /* Update originTimestamp field of Sync packet 316 * - 48 bits seconds field 317 * - 32 bits nanseconds field 318 * 319 * In addition, the UDP checksum needs to be updated 320 * by software after updating originTimestamp field, 321 * otherwise the hardware will calculate the wrong 322 * checksum when updating the correction field and 323 * update it to the packet. 324 */ 325 data = skb_mac_header(skb); 326 new_sec_h = htons((sec >> 32) & 0xffff); 327 new_sec_l = htonl(sec & 0xffffffff); 328 new_nsec = htonl(nsec); 329 if (udp) { 330 struct udphdr *uh = udp_hdr(skb); 331 __be32 old_sec_l, old_nsec; 332 __be16 old_sec_h; 333 334 old_sec_h = *(__be16 *)(data + offset2); 335 inet_proto_csum_replace2(&uh->check, skb, old_sec_h, 336 new_sec_h, false); 337 338 old_sec_l = *(__be32 *)(data + offset2 + 2); 339 inet_proto_csum_replace4(&uh->check, skb, old_sec_l, 340 new_sec_l, false); 341 342 old_nsec = *(__be32 *)(data + offset2 + 6); 343 inet_proto_csum_replace4(&uh->check, skb, old_nsec, 344 new_nsec, false); 345 } 346 347 *(__be16 *)(data + offset2) = new_sec_h; 348 *(__be32 *)(data + offset2 + 2) = new_sec_l; 349 *(__be32 *)(data + offset2 + 6) = new_nsec; 350 351 /* Configure single-step register */ 352 val = ENETC_PM0_SINGLE_STEP_EN; 353 val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1); 354 if (udp) 355 val |= ENETC_PM0_SINGLE_STEP_CH; 356 357 enetc_port_mac_wr(priv->si, ENETC_PM0_SINGLE_STEP, 358 val); 359 } else if (do_twostep_tstamp) { 360 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 361 e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP; 362 } 363 364 temp_bd.ext.e_flags = e_flags; 365 count++; 366 } 367 368 frag = &skb_shinfo(skb)->frags[0]; 369 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) { 370 len = skb_frag_size(frag); 371 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, 372 DMA_TO_DEVICE); 373 if (dma_mapping_error(tx_ring->dev, dma)) 374 goto dma_err; 375 376 *txbd = temp_bd; 377 enetc_clear_tx_bd(&temp_bd); 378 379 flags = 0; 380 tx_swbd++; 381 txbd++; 382 i++; 383 if (unlikely(i == tx_ring->bd_count)) { 384 i = 0; 385 tx_swbd = tx_ring->tx_swbd; 386 txbd = ENETC_TXBD(*tx_ring, 0); 387 } 388 prefetchw(txbd); 389 390 temp_bd.addr = cpu_to_le64(dma); 391 temp_bd.buf_len = cpu_to_le16(len); 392 393 tx_swbd->dma = dma; 394 tx_swbd->len = len; 395 tx_swbd->is_dma_page = 1; 396 tx_swbd->dir = DMA_TO_DEVICE; 397 count++; 398 } 399 400 /* last BD needs 'F' bit set */ 401 flags |= ENETC_TXBD_FLAGS_F; 402 temp_bd.flags = flags; 403 *txbd = temp_bd; 404 405 tx_ring->tx_swbd[i].is_eof = true; 406 tx_ring->tx_swbd[i].skb = skb; 407 408 enetc_bdr_idx_inc(tx_ring, &i); 409 tx_ring->next_to_use = i; 410 411 skb_tx_timestamp(skb); 412 413 enetc_update_tx_ring_tail(tx_ring); 414 415 return count; 416 417 dma_err: 418 dev_err(tx_ring->dev, "DMA map error"); 419 420 enetc_unwind_tx_frame(tx_ring, count, i); 421 422 return 0; 423 } 424 425 static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, 426 struct enetc_tx_swbd *tx_swbd, 427 union enetc_tx_bd *txbd, int *i, int hdr_len, 428 int data_len) 429 { 430 union enetc_tx_bd txbd_tmp; 431 u8 flags = 0, e_flags = 0; 432 dma_addr_t addr; 433 int count = 1; 434 435 enetc_clear_tx_bd(&txbd_tmp); 436 addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; 437 438 if (skb_vlan_tag_present(skb)) 439 flags |= ENETC_TXBD_FLAGS_EX; 440 441 txbd_tmp.addr = cpu_to_le64(addr); 442 txbd_tmp.buf_len = cpu_to_le16(hdr_len); 443 444 /* first BD needs frm_len and offload flags set */ 445 txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len); 446 txbd_tmp.flags = flags; 447 448 /* For the TSO header we do not set the dma address since we do not 449 * want it unmapped when we do cleanup. We still set len so that we 450 * count the bytes sent. 451 */ 452 tx_swbd->len = hdr_len; 453 tx_swbd->do_twostep_tstamp = false; 454 tx_swbd->check_wb = false; 455 456 /* Actually write the header in the BD */ 457 *txbd = txbd_tmp; 458 459 /* Add extension BD for VLAN */ 460 if (flags & ENETC_TXBD_FLAGS_EX) { 461 /* Get the next BD */ 462 enetc_bdr_idx_inc(tx_ring, i); 463 txbd = ENETC_TXBD(*tx_ring, *i); 464 tx_swbd = &tx_ring->tx_swbd[*i]; 465 prefetchw(txbd); 466 467 /* Setup the VLAN fields */ 468 enetc_clear_tx_bd(&txbd_tmp); 469 txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); 470 txbd_tmp.ext.tpid = 0; /* < C-TAG */ 471 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; 472 473 /* Write the BD */ 474 txbd_tmp.ext.e_flags = e_flags; 475 *txbd = txbd_tmp; 476 count++; 477 } 478 479 return count; 480 } 481 482 static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, 483 struct enetc_tx_swbd *tx_swbd, 484 union enetc_tx_bd *txbd, char *data, 485 int size, bool last_bd) 486 { 487 union enetc_tx_bd txbd_tmp; 488 dma_addr_t addr; 489 u8 flags = 0; 490 491 enetc_clear_tx_bd(&txbd_tmp); 492 493 addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); 494 if (unlikely(dma_mapping_error(tx_ring->dev, addr))) { 495 netdev_err(tx_ring->ndev, "DMA map error\n"); 496 return -ENOMEM; 497 } 498 499 if (last_bd) { 500 flags |= ENETC_TXBD_FLAGS_F; 501 tx_swbd->is_eof = 1; 502 } 503 504 txbd_tmp.addr = cpu_to_le64(addr); 505 txbd_tmp.buf_len = cpu_to_le16(size); 506 txbd_tmp.flags = flags; 507 508 tx_swbd->dma = addr; 509 tx_swbd->len = size; 510 tx_swbd->dir = DMA_TO_DEVICE; 511 512 *txbd = txbd_tmp; 513 514 return 0; 515 } 516 517 static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb, 518 char *hdr, int hdr_len, int *l4_hdr_len) 519 { 520 char *l4_hdr = hdr + skb_transport_offset(skb); 521 int mac_hdr_len = skb_network_offset(skb); 522 523 if (tso->tlen != sizeof(struct udphdr)) { 524 struct tcphdr *tcph = (struct tcphdr *)(l4_hdr); 525 526 tcph->check = 0; 527 } else { 528 struct udphdr *udph = (struct udphdr *)(l4_hdr); 529 530 udph->check = 0; 531 } 532 533 /* Compute the IP checksum. This is necessary since tso_build_hdr() 534 * already incremented the IP ID field. 535 */ 536 if (!tso->ipv6) { 537 struct iphdr *iph = (void *)(hdr + mac_hdr_len); 538 539 iph->check = 0; 540 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 541 } 542 543 /* Compute the checksum over the L4 header. */ 544 *l4_hdr_len = hdr_len - skb_transport_offset(skb); 545 return csum_partial(l4_hdr, *l4_hdr_len, 0); 546 } 547 548 static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso, 549 struct sk_buff *skb, char *hdr, int len, 550 __wsum sum) 551 { 552 char *l4_hdr = hdr + skb_transport_offset(skb); 553 __sum16 csum_final; 554 555 /* Complete the L4 checksum by appending the pseudo-header to the 556 * already computed checksum. 557 */ 558 if (!tso->ipv6) 559 csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr, 560 ip_hdr(skb)->daddr, 561 len, ip_hdr(skb)->protocol, sum); 562 else 563 csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 564 &ipv6_hdr(skb)->daddr, 565 len, ipv6_hdr(skb)->nexthdr, sum); 566 567 if (tso->tlen != sizeof(struct udphdr)) { 568 struct tcphdr *tcph = (struct tcphdr *)(l4_hdr); 569 570 tcph->check = csum_final; 571 } else { 572 struct udphdr *udph = (struct udphdr *)(l4_hdr); 573 574 udph->check = csum_final; 575 } 576 } 577 578 static int enetc_lso_count_descs(const struct sk_buff *skb) 579 { 580 /* 4 BDs: 1 BD for LSO header + 1 BD for extended BD + 1 BD 581 * for linear area data but not include LSO header, namely 582 * skb_headlen(skb) - lso_hdr_len (it may be 0, but that's 583 * okay, we only need to consider the worst case). And 1 BD 584 * for gap. 585 */ 586 return skb_shinfo(skb)->nr_frags + 4; 587 } 588 589 static int enetc_lso_get_hdr_len(const struct sk_buff *skb) 590 { 591 int hdr_len, tlen; 592 593 tlen = skb_is_gso_tcp(skb) ? tcp_hdrlen(skb) : sizeof(struct udphdr); 594 hdr_len = skb_transport_offset(skb) + tlen; 595 596 return hdr_len; 597 } 598 599 static void enetc_lso_start(struct sk_buff *skb, struct enetc_lso_t *lso) 600 { 601 lso->lso_seg_size = skb_shinfo(skb)->gso_size; 602 lso->ipv6 = enetc_skb_is_ipv6(skb); 603 lso->tcp = skb_is_gso_tcp(skb); 604 lso->l3_hdr_len = skb_network_header_len(skb); 605 lso->l3_start = skb_network_offset(skb); 606 lso->hdr_len = enetc_lso_get_hdr_len(skb); 607 lso->total_len = skb->len - lso->hdr_len; 608 } 609 610 static void enetc_lso_map_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, 611 int *i, struct enetc_lso_t *lso) 612 { 613 union enetc_tx_bd txbd_tmp, *txbd; 614 struct enetc_tx_swbd *tx_swbd; 615 u16 frm_len, frm_len_ext; 616 u8 flags, e_flags = 0; 617 dma_addr_t addr; 618 char *hdr; 619 620 /* Get the first BD of the LSO BDs chain */ 621 txbd = ENETC_TXBD(*tx_ring, *i); 622 tx_swbd = &tx_ring->tx_swbd[*i]; 623 prefetchw(txbd); 624 625 /* Prepare LSO header: MAC + IP + TCP/UDP */ 626 hdr = tx_ring->tso_headers + *i * TSO_HEADER_SIZE; 627 memcpy(hdr, skb->data, lso->hdr_len); 628 addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; 629 630 /* {frm_len_ext, frm_len} indicates the total length of 631 * large transmit data unit. frm_len contains the 16 least 632 * significant bits and frm_len_ext contains the 4 most 633 * significant bits. 634 */ 635 frm_len = lso->total_len & 0xffff; 636 frm_len_ext = (lso->total_len >> 16) & 0xf; 637 638 /* Set the flags of the first BD */ 639 flags = ENETC_TXBD_FLAGS_EX | ENETC_TXBD_FLAGS_CSUM_LSO | 640 ENETC_TXBD_FLAGS_LSO | ENETC_TXBD_FLAGS_L4CS; 641 642 enetc_clear_tx_bd(&txbd_tmp); 643 txbd_tmp.addr = cpu_to_le64(addr); 644 txbd_tmp.hdr_len = cpu_to_le16(lso->hdr_len); 645 646 /* first BD needs frm_len and offload flags set */ 647 txbd_tmp.frm_len = cpu_to_le16(frm_len); 648 txbd_tmp.flags = flags; 649 650 txbd_tmp.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START, lso->l3_start); 651 /* l3_hdr_size in 32-bits (4 bytes) */ 652 txbd_tmp.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN, 653 lso->l3_hdr_len / 4); 654 if (lso->ipv6) 655 txbd_tmp.l3_aux1 |= ENETC_TX_BD_L3T; 656 else 657 txbd_tmp.l3_aux0 |= ENETC_TX_BD_IPCS; 658 659 txbd_tmp.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, lso->tcp ? 660 ENETC_TXBD_L4T_TCP : ENETC_TXBD_L4T_UDP); 661 662 /* For the LSO header we do not set the dma address since 663 * we do not want it unmapped when we do cleanup. We still 664 * set len so that we count the bytes sent. 665 */ 666 tx_swbd->len = lso->hdr_len; 667 tx_swbd->do_twostep_tstamp = false; 668 tx_swbd->check_wb = false; 669 670 /* Actually write the header in the BD */ 671 *txbd = txbd_tmp; 672 673 /* Get the next BD, and the next BD is extended BD */ 674 enetc_bdr_idx_inc(tx_ring, i); 675 txbd = ENETC_TXBD(*tx_ring, *i); 676 tx_swbd = &tx_ring->tx_swbd[*i]; 677 prefetchw(txbd); 678 679 enetc_clear_tx_bd(&txbd_tmp); 680 if (skb_vlan_tag_present(skb)) { 681 /* Setup the VLAN fields */ 682 txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); 683 txbd_tmp.ext.tpid = ENETC_TPID_8021Q; 684 e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS; 685 } 686 687 /* Write the BD */ 688 txbd_tmp.ext.e_flags = e_flags; 689 txbd_tmp.ext.lso_sg_size = cpu_to_le16(lso->lso_seg_size); 690 txbd_tmp.ext.frm_len_ext = cpu_to_le16(frm_len_ext); 691 *txbd = txbd_tmp; 692 } 693 694 static int enetc_lso_map_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, 695 int *i, struct enetc_lso_t *lso, int *count) 696 { 697 union enetc_tx_bd txbd_tmp, *txbd = NULL; 698 struct enetc_tx_swbd *tx_swbd; 699 skb_frag_t *frag; 700 dma_addr_t dma; 701 u8 flags = 0; 702 int len, f; 703 704 len = skb_headlen(skb) - lso->hdr_len; 705 if (len > 0) { 706 dma = dma_map_single(tx_ring->dev, skb->data + lso->hdr_len, 707 len, DMA_TO_DEVICE); 708 if (dma_mapping_error(tx_ring->dev, dma)) 709 return -ENOMEM; 710 711 enetc_bdr_idx_inc(tx_ring, i); 712 txbd = ENETC_TXBD(*tx_ring, *i); 713 tx_swbd = &tx_ring->tx_swbd[*i]; 714 prefetchw(txbd); 715 *count += 1; 716 717 enetc_clear_tx_bd(&txbd_tmp); 718 txbd_tmp.addr = cpu_to_le64(dma); 719 txbd_tmp.buf_len = cpu_to_le16(len); 720 721 tx_swbd->dma = dma; 722 tx_swbd->len = len; 723 tx_swbd->is_dma_page = 0; 724 tx_swbd->dir = DMA_TO_DEVICE; 725 } 726 727 frag = &skb_shinfo(skb)->frags[0]; 728 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) { 729 if (txbd) 730 *txbd = txbd_tmp; 731 732 len = skb_frag_size(frag); 733 dma = skb_frag_dma_map(tx_ring->dev, frag); 734 if (dma_mapping_error(tx_ring->dev, dma)) 735 return -ENOMEM; 736 737 /* Get the next BD */ 738 enetc_bdr_idx_inc(tx_ring, i); 739 txbd = ENETC_TXBD(*tx_ring, *i); 740 tx_swbd = &tx_ring->tx_swbd[*i]; 741 prefetchw(txbd); 742 *count += 1; 743 744 enetc_clear_tx_bd(&txbd_tmp); 745 txbd_tmp.addr = cpu_to_le64(dma); 746 txbd_tmp.buf_len = cpu_to_le16(len); 747 748 tx_swbd->dma = dma; 749 tx_swbd->len = len; 750 tx_swbd->is_dma_page = 1; 751 tx_swbd->dir = DMA_TO_DEVICE; 752 } 753 754 /* Last BD needs 'F' bit set */ 755 flags |= ENETC_TXBD_FLAGS_F; 756 txbd_tmp.flags = flags; 757 *txbd = txbd_tmp; 758 759 tx_swbd->is_eof = 1; 760 tx_swbd->skb = skb; 761 762 return 0; 763 } 764 765 static int enetc_lso_hw_offload(struct enetc_bdr *tx_ring, struct sk_buff *skb) 766 { 767 struct enetc_tx_swbd *tx_swbd; 768 struct enetc_lso_t lso = {0}; 769 int err, i, count = 0; 770 771 /* Initialize the LSO handler */ 772 enetc_lso_start(skb, &lso); 773 i = tx_ring->next_to_use; 774 775 enetc_lso_map_hdr(tx_ring, skb, &i, &lso); 776 /* First BD and an extend BD */ 777 count += 2; 778 779 err = enetc_lso_map_data(tx_ring, skb, &i, &lso, &count); 780 if (err) 781 goto dma_err; 782 783 /* Go to the next BD */ 784 enetc_bdr_idx_inc(tx_ring, &i); 785 tx_ring->next_to_use = i; 786 enetc_update_tx_ring_tail(tx_ring); 787 788 return count; 789 790 dma_err: 791 do { 792 tx_swbd = &tx_ring->tx_swbd[i]; 793 enetc_free_tx_frame(tx_ring, tx_swbd); 794 if (i == 0) 795 i = tx_ring->bd_count; 796 i--; 797 } while (--count); 798 799 return 0; 800 } 801 802 static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) 803 { 804 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); 805 int hdr_len, total_len, data_len; 806 struct enetc_tx_swbd *tx_swbd; 807 union enetc_tx_bd *txbd; 808 struct tso_t tso; 809 __wsum csum, csum2; 810 int count = 0, pos; 811 int err, i, bd_data_num; 812 813 /* Initialize the TSO handler, and prepare the first payload */ 814 hdr_len = tso_start(skb, &tso); 815 total_len = skb->len - hdr_len; 816 i = tx_ring->next_to_use; 817 818 while (total_len > 0) { 819 char *hdr; 820 821 /* Get the BD */ 822 txbd = ENETC_TXBD(*tx_ring, i); 823 tx_swbd = &tx_ring->tx_swbd[i]; 824 prefetchw(txbd); 825 826 /* Determine the length of this packet */ 827 data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len); 828 total_len -= data_len; 829 830 /* prepare packet headers: MAC + IP + TCP */ 831 hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE; 832 tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0); 833 834 /* compute the csum over the L4 header */ 835 csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos); 836 count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, 837 &i, hdr_len, data_len); 838 bd_data_num = 0; 839 840 while (data_len > 0) { 841 int size; 842 843 size = min_t(int, tso.size, data_len); 844 845 /* Advance the index in the BDR */ 846 enetc_bdr_idx_inc(tx_ring, &i); 847 txbd = ENETC_TXBD(*tx_ring, i); 848 tx_swbd = &tx_ring->tx_swbd[i]; 849 prefetchw(txbd); 850 851 /* Compute the checksum over this segment of data and 852 * add it to the csum already computed (over the L4 853 * header and possible other data segments). 854 */ 855 csum2 = csum_partial(tso.data, size, 0); 856 csum = csum_block_add(csum, csum2, pos); 857 pos += size; 858 859 err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd, 860 tso.data, size, 861 size == data_len); 862 if (err) { 863 if (i == 0) 864 i = tx_ring->bd_count; 865 i--; 866 867 goto err_map_data; 868 } 869 870 data_len -= size; 871 count++; 872 bd_data_num++; 873 tso_build_data(skb, &tso, size); 874 875 if (unlikely(bd_data_num >= priv->max_frags && data_len)) 876 goto err_chained_bd; 877 } 878 879 enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum); 880 881 if (total_len == 0) 882 tx_swbd->skb = skb; 883 884 /* Go to the next BD */ 885 enetc_bdr_idx_inc(tx_ring, &i); 886 } 887 888 tx_ring->next_to_use = i; 889 enetc_update_tx_ring_tail(tx_ring); 890 891 return count; 892 893 err_map_data: 894 dev_err(tx_ring->dev, "DMA map error"); 895 896 err_chained_bd: 897 enetc_unwind_tx_frame(tx_ring, count, i); 898 899 return 0; 900 } 901 902 static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, 903 struct net_device *ndev) 904 { 905 struct enetc_ndev_priv *priv = netdev_priv(ndev); 906 struct enetc_bdr *tx_ring; 907 int count; 908 909 /* Queue one-step Sync packet if already locked */ 910 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { 911 if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, 912 &priv->flags)) { 913 skb_queue_tail(&priv->tx_skbs, skb); 914 return NETDEV_TX_OK; 915 } 916 } 917 918 tx_ring = priv->tx_ring[skb->queue_mapping]; 919 920 if (skb_is_gso(skb)) { 921 /* LSO data unit lengths of up to 256KB are supported */ 922 if (priv->active_offloads & ENETC_F_LSO && 923 (skb->len - enetc_lso_get_hdr_len(skb)) <= 924 ENETC_LSO_MAX_DATA_LEN) { 925 if (enetc_bd_unused(tx_ring) < enetc_lso_count_descs(skb)) { 926 netif_stop_subqueue(ndev, tx_ring->index); 927 return NETDEV_TX_BUSY; 928 } 929 930 count = enetc_lso_hw_offload(tx_ring, skb); 931 } else { 932 if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) { 933 netif_stop_subqueue(ndev, tx_ring->index); 934 return NETDEV_TX_BUSY; 935 } 936 937 enetc_lock_mdio(); 938 count = enetc_map_tx_tso_buffs(tx_ring, skb); 939 enetc_unlock_mdio(); 940 } 941 } else { 942 if (unlikely(skb_shinfo(skb)->nr_frags > priv->max_frags)) 943 if (unlikely(skb_linearize(skb))) 944 goto drop_packet_err; 945 946 count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ 947 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { 948 netif_stop_subqueue(ndev, tx_ring->index); 949 return NETDEV_TX_BUSY; 950 } 951 952 enetc_lock_mdio(); 953 count = enetc_map_tx_buffs(tx_ring, skb); 954 enetc_unlock_mdio(); 955 } 956 957 if (unlikely(!count)) 958 goto drop_packet_err; 959 960 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED(priv->max_frags)) 961 netif_stop_subqueue(ndev, tx_ring->index); 962 963 return NETDEV_TX_OK; 964 965 drop_packet_err: 966 dev_kfree_skb_any(skb); 967 return NETDEV_TX_OK; 968 } 969 970 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) 971 { 972 struct enetc_ndev_priv *priv = netdev_priv(ndev); 973 u8 udp, msgtype, twostep; 974 u16 offset1, offset2; 975 976 /* Mark tx timestamp type on skb->cb[0] if requires */ 977 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 978 (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) { 979 skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK; 980 } else { 981 skb->cb[0] = 0; 982 } 983 984 /* Fall back to two-step timestamp if not one-step Sync packet */ 985 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { 986 if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, 987 &offset1, &offset2) || 988 msgtype != PTP_MSGTYPE_SYNC || twostep != 0) 989 skb->cb[0] = ENETC_F_TX_TSTAMP; 990 } 991 992 return enetc_start_xmit(skb, ndev); 993 } 994 EXPORT_SYMBOL_GPL(enetc_xmit); 995 996 static irqreturn_t enetc_msix(int irq, void *data) 997 { 998 struct enetc_int_vector *v = data; 999 int i; 1000 1001 enetc_lock_mdio(); 1002 1003 /* disable interrupts */ 1004 enetc_wr_reg_hot(v->rbier, 0); 1005 enetc_wr_reg_hot(v->ricr1, v->rx_ictt); 1006 1007 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) 1008 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0); 1009 1010 enetc_unlock_mdio(); 1011 1012 napi_schedule(&v->napi); 1013 1014 return IRQ_HANDLED; 1015 } 1016 1017 static void enetc_rx_dim_work(struct work_struct *w) 1018 { 1019 struct dim *dim = container_of(w, struct dim, work); 1020 struct dim_cq_moder moder = 1021 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 1022 struct enetc_int_vector *v = 1023 container_of(dim, struct enetc_int_vector, rx_dim); 1024 struct enetc_ndev_priv *priv = netdev_priv(v->rx_ring.ndev); 1025 1026 v->rx_ictt = enetc_usecs_to_cycles(moder.usec, priv->sysclk_freq); 1027 dim->state = DIM_START_MEASURE; 1028 } 1029 1030 static void enetc_rx_net_dim(struct enetc_int_vector *v) 1031 { 1032 struct dim_sample dim_sample = {}; 1033 1034 v->comp_cnt++; 1035 1036 if (!v->rx_napi_work) 1037 return; 1038 1039 dim_update_sample(v->comp_cnt, 1040 v->rx_ring.stats.packets, 1041 v->rx_ring.stats.bytes, 1042 &dim_sample); 1043 net_dim(&v->rx_dim, &dim_sample); 1044 } 1045 1046 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) 1047 { 1048 int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; 1049 1050 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; 1051 } 1052 1053 static bool enetc_page_reusable(struct page *page) 1054 { 1055 return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1); 1056 } 1057 1058 static void enetc_reuse_page(struct enetc_bdr *rx_ring, 1059 struct enetc_rx_swbd *old) 1060 { 1061 struct enetc_rx_swbd *new; 1062 1063 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; 1064 1065 /* next buf that may reuse a page */ 1066 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); 1067 1068 /* copy page reference */ 1069 *new = *old; 1070 } 1071 1072 static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd, 1073 u64 *tstamp) 1074 { 1075 u32 lo, hi, tstamp_lo; 1076 1077 lo = enetc_rd_hot(hw, ENETC_SICTR0); 1078 hi = enetc_rd_hot(hw, ENETC_SICTR1); 1079 tstamp_lo = le32_to_cpu(txbd->wb.tstamp); 1080 if (lo <= tstamp_lo) 1081 hi -= 1; 1082 *tstamp = (u64)hi << 32 | tstamp_lo; 1083 } 1084 1085 static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp) 1086 { 1087 struct skb_shared_hwtstamps shhwtstamps; 1088 1089 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { 1090 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 1091 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 1092 skb_txtime_consumed(skb); 1093 skb_tstamp_tx(skb, &shhwtstamps); 1094 } 1095 } 1096 1097 static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring, 1098 struct enetc_tx_swbd *tx_swbd) 1099 { 1100 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); 1101 struct enetc_rx_swbd rx_swbd = { 1102 .dma = tx_swbd->dma, 1103 .page = tx_swbd->page, 1104 .page_offset = tx_swbd->page_offset, 1105 .dir = tx_swbd->dir, 1106 .len = tx_swbd->len, 1107 }; 1108 struct enetc_bdr *rx_ring; 1109 1110 rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring); 1111 1112 if (likely(enetc_swbd_unused(rx_ring))) { 1113 enetc_reuse_page(rx_ring, &rx_swbd); 1114 1115 /* sync for use by the device */ 1116 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma, 1117 rx_swbd.page_offset, 1118 ENETC_RXB_DMA_SIZE_XDP, 1119 rx_swbd.dir); 1120 1121 rx_ring->stats.recycles++; 1122 } else { 1123 /* RX ring is already full, we need to unmap and free the 1124 * page, since there's nothing useful we can do with it. 1125 */ 1126 rx_ring->stats.recycle_failures++; 1127 1128 dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE, 1129 rx_swbd.dir); 1130 __free_page(rx_swbd.page); 1131 } 1132 1133 rx_ring->xdp.xdp_tx_in_flight--; 1134 } 1135 1136 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) 1137 { 1138 int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0; 1139 struct net_device *ndev = tx_ring->ndev; 1140 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1141 struct enetc_tx_swbd *tx_swbd; 1142 int i, bds_to_clean; 1143 bool do_twostep_tstamp; 1144 u64 tstamp = 0; 1145 1146 i = tx_ring->next_to_clean; 1147 tx_swbd = &tx_ring->tx_swbd[i]; 1148 1149 bds_to_clean = enetc_bd_ready_count(tx_ring, i); 1150 1151 do_twostep_tstamp = false; 1152 1153 while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) { 1154 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd); 1155 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd); 1156 bool is_eof = tx_swbd->is_eof; 1157 1158 if (unlikely(tx_swbd->check_wb)) { 1159 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); 1160 1161 if (txbd->flags & ENETC_TXBD_FLAGS_W && 1162 tx_swbd->do_twostep_tstamp) { 1163 enetc_get_tx_tstamp(&priv->si->hw, txbd, 1164 &tstamp); 1165 do_twostep_tstamp = true; 1166 } 1167 1168 if (tx_swbd->qbv_en && 1169 txbd->wb.status & ENETC_TXBD_STATS_WIN) 1170 tx_win_drop++; 1171 } 1172 1173 if (tx_swbd->is_xdp_tx) 1174 enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd); 1175 else if (likely(tx_swbd->dma)) 1176 enetc_unmap_tx_buff(tx_ring, tx_swbd); 1177 1178 if (xdp_frame) { 1179 xdp_return_frame(xdp_frame); 1180 } else if (skb) { 1181 if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) { 1182 /* Start work to release lock for next one-step 1183 * timestamping packet. And send one skb in 1184 * tx_skbs queue if has. 1185 */ 1186 schedule_work(&priv->tx_onestep_tstamp); 1187 } else if (unlikely(do_twostep_tstamp)) { 1188 enetc_tstamp_tx(skb, tstamp); 1189 do_twostep_tstamp = false; 1190 } 1191 napi_consume_skb(skb, napi_budget); 1192 } 1193 1194 tx_byte_cnt += tx_swbd->len; 1195 /* Scrub the swbd here so we don't have to do that 1196 * when we reuse it during xmit 1197 */ 1198 memset(tx_swbd, 0, sizeof(*tx_swbd)); 1199 1200 bds_to_clean--; 1201 tx_swbd++; 1202 i++; 1203 if (unlikely(i == tx_ring->bd_count)) { 1204 i = 0; 1205 tx_swbd = tx_ring->tx_swbd; 1206 } 1207 1208 /* BD iteration loop end */ 1209 if (is_eof) { 1210 tx_frm_cnt++; 1211 /* re-arm interrupt source */ 1212 enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) | 1213 BIT(16 + tx_ring->index)); 1214 } 1215 1216 if (unlikely(!bds_to_clean)) 1217 bds_to_clean = enetc_bd_ready_count(tx_ring, i); 1218 } 1219 1220 tx_ring->next_to_clean = i; 1221 tx_ring->stats.packets += tx_frm_cnt; 1222 tx_ring->stats.bytes += tx_byte_cnt; 1223 tx_ring->stats.win_drop += tx_win_drop; 1224 1225 if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) && 1226 __netif_subqueue_stopped(ndev, tx_ring->index) && 1227 !test_bit(ENETC_TX_DOWN, &priv->flags) && 1228 (enetc_bd_unused(tx_ring) >= 1229 ENETC_TXBDS_MAX_NEEDED(priv->max_frags)))) { 1230 netif_wake_subqueue(ndev, tx_ring->index); 1231 } 1232 1233 return tx_frm_cnt != ENETC_DEFAULT_TX_WORK; 1234 } 1235 1236 static bool enetc_new_page(struct enetc_bdr *rx_ring, 1237 struct enetc_rx_swbd *rx_swbd) 1238 { 1239 bool xdp = !!(rx_ring->xdp.prog); 1240 struct page *page; 1241 dma_addr_t addr; 1242 1243 page = dev_alloc_page(); 1244 if (unlikely(!page)) 1245 return false; 1246 1247 /* For XDP_TX, we forgo dma_unmap -> dma_map */ 1248 rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 1249 1250 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir); 1251 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { 1252 __free_page(page); 1253 1254 return false; 1255 } 1256 1257 rx_swbd->dma = addr; 1258 rx_swbd->page = page; 1259 rx_swbd->page_offset = rx_ring->buffer_offset; 1260 1261 return true; 1262 } 1263 1264 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) 1265 { 1266 struct enetc_rx_swbd *rx_swbd; 1267 union enetc_rx_bd *rxbd; 1268 int i, j; 1269 1270 i = rx_ring->next_to_use; 1271 rx_swbd = &rx_ring->rx_swbd[i]; 1272 rxbd = enetc_rxbd(rx_ring, i); 1273 1274 for (j = 0; j < buff_cnt; j++) { 1275 /* try reuse page */ 1276 if (unlikely(!rx_swbd->page)) { 1277 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { 1278 rx_ring->stats.rx_alloc_errs++; 1279 break; 1280 } 1281 } 1282 1283 /* update RxBD */ 1284 rxbd->w.addr = cpu_to_le64(rx_swbd->dma + 1285 rx_swbd->page_offset); 1286 /* clear 'R" as well */ 1287 rxbd->r.lstatus = 0; 1288 1289 enetc_rxbd_next(rx_ring, &rxbd, &i); 1290 rx_swbd = &rx_ring->rx_swbd[i]; 1291 } 1292 1293 if (likely(j)) { 1294 rx_ring->next_to_alloc = i; /* keep track from page reuse */ 1295 rx_ring->next_to_use = i; 1296 1297 /* update ENETC's consumer index */ 1298 enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use); 1299 } 1300 1301 return j; 1302 } 1303 1304 static void enetc_get_rx_tstamp(struct net_device *ndev, 1305 union enetc_rx_bd *rxbd, 1306 struct sk_buff *skb) 1307 { 1308 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 1309 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1310 struct enetc_hw *hw = &priv->si->hw; 1311 u32 lo, hi, tstamp_lo; 1312 u64 tstamp; 1313 1314 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) { 1315 lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0); 1316 hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1); 1317 rxbd = enetc_rxbd_ext(rxbd); 1318 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp); 1319 if (lo <= tstamp_lo) 1320 hi -= 1; 1321 1322 tstamp = (u64)hi << 32 | tstamp_lo; 1323 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 1324 shhwtstamps->hwtstamp = ns_to_ktime(tstamp); 1325 } 1326 } 1327 1328 static void enetc_get_offloads(struct enetc_bdr *rx_ring, 1329 union enetc_rx_bd *rxbd, struct sk_buff *skb) 1330 { 1331 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); 1332 1333 /* TODO: hashing */ 1334 if (rx_ring->ndev->features & NETIF_F_RXCSUM) { 1335 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum); 1336 1337 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum)); 1338 skb->ip_summed = CHECKSUM_COMPLETE; 1339 } 1340 1341 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) { 1342 __be16 tpid = 0; 1343 1344 switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) { 1345 case 0: 1346 tpid = htons(ETH_P_8021Q); 1347 break; 1348 case 1: 1349 tpid = htons(ETH_P_8021AD); 1350 break; 1351 case 2: 1352 tpid = htons(enetc_port_rd(&priv->si->hw, 1353 ENETC_PCVLANR1)); 1354 break; 1355 case 3: 1356 tpid = htons(enetc_port_rd(&priv->si->hw, 1357 ENETC_PCVLANR2)); 1358 break; 1359 default: 1360 break; 1361 } 1362 1363 __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt)); 1364 } 1365 1366 if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && 1367 (priv->active_offloads & ENETC_F_RX_TSTAMP)) 1368 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb); 1369 } 1370 1371 /* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS, 1372 * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL 1373 * mapped buffers. 1374 */ 1375 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring, 1376 int i, u16 size) 1377 { 1378 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; 1379 1380 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, 1381 rx_swbd->page_offset, 1382 size, rx_swbd->dir); 1383 return rx_swbd; 1384 } 1385 1386 /* Reuse the current page without performing half-page buffer flipping */ 1387 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring, 1388 struct enetc_rx_swbd *rx_swbd) 1389 { 1390 size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset; 1391 1392 enetc_reuse_page(rx_ring, rx_swbd); 1393 1394 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, 1395 rx_swbd->page_offset, 1396 buffer_size, rx_swbd->dir); 1397 1398 rx_swbd->page = NULL; 1399 } 1400 1401 /* Reuse the current page by performing half-page buffer flipping */ 1402 static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring, 1403 struct enetc_rx_swbd *rx_swbd) 1404 { 1405 if (likely(enetc_page_reusable(rx_swbd->page))) { 1406 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; 1407 page_ref_inc(rx_swbd->page); 1408 1409 enetc_put_rx_buff(rx_ring, rx_swbd); 1410 } else { 1411 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, 1412 rx_swbd->dir); 1413 rx_swbd->page = NULL; 1414 } 1415 } 1416 1417 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring, 1418 int i, u16 size) 1419 { 1420 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 1421 struct sk_buff *skb; 1422 void *ba; 1423 1424 ba = page_address(rx_swbd->page) + rx_swbd->page_offset; 1425 skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE); 1426 if (unlikely(!skb)) { 1427 rx_ring->stats.rx_alloc_errs++; 1428 return NULL; 1429 } 1430 1431 skb_reserve(skb, rx_ring->buffer_offset); 1432 __skb_put(skb, size); 1433 1434 enetc_flip_rx_buff(rx_ring, rx_swbd); 1435 1436 return skb; 1437 } 1438 1439 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i, 1440 u16 size, struct sk_buff *skb) 1441 { 1442 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 1443 1444 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, 1445 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); 1446 1447 enetc_flip_rx_buff(rx_ring, rx_swbd); 1448 } 1449 1450 static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring, 1451 u32 bd_status, 1452 union enetc_rx_bd **rxbd, int *i) 1453 { 1454 if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK)))) 1455 return false; 1456 1457 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); 1458 enetc_rxbd_next(rx_ring, rxbd, i); 1459 1460 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { 1461 dma_rmb(); 1462 bd_status = le32_to_cpu((*rxbd)->r.lstatus); 1463 1464 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); 1465 enetc_rxbd_next(rx_ring, rxbd, i); 1466 } 1467 1468 rx_ring->ndev->stats.rx_dropped++; 1469 rx_ring->ndev->stats.rx_errors++; 1470 1471 return true; 1472 } 1473 1474 static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring, 1475 u32 bd_status, union enetc_rx_bd **rxbd, 1476 int *i, int *cleaned_cnt, int buffer_size) 1477 { 1478 struct sk_buff *skb; 1479 u16 size; 1480 1481 size = le16_to_cpu((*rxbd)->r.buf_len); 1482 skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size); 1483 if (!skb) 1484 return NULL; 1485 1486 enetc_get_offloads(rx_ring, *rxbd, skb); 1487 1488 (*cleaned_cnt)++; 1489 1490 enetc_rxbd_next(rx_ring, rxbd, i); 1491 1492 /* not last BD in frame? */ 1493 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { 1494 bd_status = le32_to_cpu((*rxbd)->r.lstatus); 1495 size = buffer_size; 1496 1497 if (bd_status & ENETC_RXBD_LSTATUS_F) { 1498 dma_rmb(); 1499 size = le16_to_cpu((*rxbd)->r.buf_len); 1500 } 1501 1502 enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb); 1503 1504 (*cleaned_cnt)++; 1505 1506 enetc_rxbd_next(rx_ring, rxbd, i); 1507 } 1508 1509 skb_record_rx_queue(skb, rx_ring->index); 1510 skb->protocol = eth_type_trans(skb, rx_ring->ndev); 1511 1512 return skb; 1513 } 1514 1515 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */ 1516 1517 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, 1518 struct napi_struct *napi, int work_limit) 1519 { 1520 int rx_frm_cnt = 0, rx_byte_cnt = 0; 1521 int cleaned_cnt, i; 1522 1523 cleaned_cnt = enetc_bd_unused(rx_ring); 1524 /* next descriptor to process */ 1525 i = rx_ring->next_to_clean; 1526 1527 while (likely(rx_frm_cnt < work_limit)) { 1528 union enetc_rx_bd *rxbd; 1529 struct sk_buff *skb; 1530 u32 bd_status; 1531 1532 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) 1533 cleaned_cnt -= enetc_refill_rx_ring(rx_ring, 1534 cleaned_cnt); 1535 1536 rxbd = enetc_rxbd(rx_ring, i); 1537 bd_status = le32_to_cpu(rxbd->r.lstatus); 1538 if (!bd_status) 1539 break; 1540 1541 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); 1542 dma_rmb(); /* for reading other rxbd fields */ 1543 1544 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status, 1545 &rxbd, &i)) 1546 break; 1547 1548 skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i, 1549 &cleaned_cnt, ENETC_RXB_DMA_SIZE); 1550 if (!skb) 1551 break; 1552 1553 /* When set, the outer VLAN header is extracted and reported 1554 * in the receive buffer descriptor. So rx_byte_cnt should 1555 * add the length of the extracted VLAN header. 1556 */ 1557 if (bd_status & ENETC_RXBD_FLAG_VLAN) 1558 rx_byte_cnt += VLAN_HLEN; 1559 rx_byte_cnt += skb->len + ETH_HLEN; 1560 rx_frm_cnt++; 1561 1562 napi_gro_receive(napi, skb); 1563 } 1564 1565 rx_ring->next_to_clean = i; 1566 1567 rx_ring->stats.packets += rx_frm_cnt; 1568 rx_ring->stats.bytes += rx_byte_cnt; 1569 1570 return rx_frm_cnt; 1571 } 1572 1573 static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i, 1574 struct enetc_tx_swbd *tx_swbd, 1575 int frm_len) 1576 { 1577 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); 1578 1579 prefetchw(txbd); 1580 1581 enetc_clear_tx_bd(txbd); 1582 txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset); 1583 txbd->buf_len = cpu_to_le16(tx_swbd->len); 1584 txbd->frm_len = cpu_to_le16(frm_len); 1585 1586 memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd)); 1587 } 1588 1589 /* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer 1590 * descriptors. 1591 */ 1592 static bool enetc_xdp_tx(struct enetc_bdr *tx_ring, 1593 struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd) 1594 { 1595 struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr; 1596 int i, k, frm_len = tmp_tx_swbd->len; 1597 1598 if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd))) 1599 return false; 1600 1601 while (unlikely(!tmp_tx_swbd->is_eof)) { 1602 tmp_tx_swbd++; 1603 frm_len += tmp_tx_swbd->len; 1604 } 1605 1606 i = tx_ring->next_to_use; 1607 1608 for (k = 0; k < num_tx_swbd; k++) { 1609 struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k]; 1610 1611 enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len); 1612 1613 /* last BD needs 'F' bit set */ 1614 if (xdp_tx_swbd->is_eof) { 1615 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); 1616 1617 txbd->flags = ENETC_TXBD_FLAGS_F; 1618 } 1619 1620 enetc_bdr_idx_inc(tx_ring, &i); 1621 } 1622 1623 tx_ring->next_to_use = i; 1624 1625 return true; 1626 } 1627 1628 static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring, 1629 struct enetc_tx_swbd *xdp_tx_arr, 1630 struct xdp_frame *xdp_frame) 1631 { 1632 struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0]; 1633 struct skb_shared_info *shinfo; 1634 void *data = xdp_frame->data; 1635 int len = xdp_frame->len; 1636 skb_frag_t *frag; 1637 dma_addr_t dma; 1638 unsigned int f; 1639 int n = 0; 1640 1641 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); 1642 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { 1643 netdev_err(tx_ring->ndev, "DMA map error\n"); 1644 return -1; 1645 } 1646 1647 xdp_tx_swbd->dma = dma; 1648 xdp_tx_swbd->dir = DMA_TO_DEVICE; 1649 xdp_tx_swbd->len = len; 1650 xdp_tx_swbd->is_xdp_redirect = true; 1651 xdp_tx_swbd->is_eof = false; 1652 xdp_tx_swbd->xdp_frame = NULL; 1653 1654 n++; 1655 1656 if (!xdp_frame_has_frags(xdp_frame)) 1657 goto out; 1658 1659 xdp_tx_swbd = &xdp_tx_arr[n]; 1660 1661 shinfo = xdp_get_shared_info_from_frame(xdp_frame); 1662 1663 for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags; 1664 f++, frag++) { 1665 data = skb_frag_address(frag); 1666 len = skb_frag_size(frag); 1667 1668 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); 1669 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { 1670 /* Undo the DMA mapping for all fragments */ 1671 while (--n >= 0) 1672 enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]); 1673 1674 netdev_err(tx_ring->ndev, "DMA map error\n"); 1675 return -1; 1676 } 1677 1678 xdp_tx_swbd->dma = dma; 1679 xdp_tx_swbd->dir = DMA_TO_DEVICE; 1680 xdp_tx_swbd->len = len; 1681 xdp_tx_swbd->is_xdp_redirect = true; 1682 xdp_tx_swbd->is_eof = false; 1683 xdp_tx_swbd->xdp_frame = NULL; 1684 1685 n++; 1686 xdp_tx_swbd = &xdp_tx_arr[n]; 1687 } 1688 out: 1689 xdp_tx_arr[n - 1].is_eof = true; 1690 xdp_tx_arr[n - 1].xdp_frame = xdp_frame; 1691 1692 return n; 1693 } 1694 1695 int enetc_xdp_xmit(struct net_device *ndev, int num_frames, 1696 struct xdp_frame **frames, u32 flags) 1697 { 1698 struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0}; 1699 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1700 struct enetc_bdr *tx_ring; 1701 int xdp_tx_bd_cnt, i, k; 1702 int xdp_tx_frm_cnt = 0; 1703 1704 if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) 1705 return -ENETDOWN; 1706 1707 enetc_lock_mdio(); 1708 1709 tx_ring = priv->xdp_tx_ring[smp_processor_id()]; 1710 1711 prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use)); 1712 1713 for (k = 0; k < num_frames; k++) { 1714 xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring, 1715 xdp_redirect_arr, 1716 frames[k]); 1717 if (unlikely(xdp_tx_bd_cnt < 0)) 1718 break; 1719 1720 if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr, 1721 xdp_tx_bd_cnt))) { 1722 for (i = 0; i < xdp_tx_bd_cnt; i++) 1723 enetc_unmap_tx_buff(tx_ring, 1724 &xdp_redirect_arr[i]); 1725 tx_ring->stats.xdp_tx_drops++; 1726 break; 1727 } 1728 1729 xdp_tx_frm_cnt++; 1730 } 1731 1732 if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt)) 1733 enetc_update_tx_ring_tail(tx_ring); 1734 1735 tx_ring->stats.xdp_tx += xdp_tx_frm_cnt; 1736 1737 enetc_unlock_mdio(); 1738 1739 return xdp_tx_frm_cnt; 1740 } 1741 EXPORT_SYMBOL_GPL(enetc_xdp_xmit); 1742 1743 static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, 1744 struct xdp_buff *xdp_buff, u16 size) 1745 { 1746 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 1747 void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset; 1748 1749 /* To be used for XDP_TX */ 1750 rx_swbd->len = size; 1751 1752 xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset, 1753 rx_ring->buffer_offset, size, false); 1754 } 1755 1756 static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, 1757 u16 size, struct xdp_buff *xdp_buff) 1758 { 1759 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff); 1760 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 1761 skb_frag_t *frag; 1762 1763 /* To be used for XDP_TX */ 1764 rx_swbd->len = size; 1765 1766 if (!xdp_buff_has_frags(xdp_buff)) { 1767 xdp_buff_set_frags_flag(xdp_buff); 1768 shinfo->xdp_frags_size = size; 1769 shinfo->nr_frags = 0; 1770 } else { 1771 shinfo->xdp_frags_size += size; 1772 } 1773 1774 if (page_is_pfmemalloc(rx_swbd->page)) 1775 xdp_buff_set_frag_pfmemalloc(xdp_buff); 1776 1777 frag = &shinfo->frags[shinfo->nr_frags]; 1778 skb_frag_fill_page_desc(frag, rx_swbd->page, rx_swbd->page_offset, 1779 size); 1780 1781 shinfo->nr_frags++; 1782 } 1783 1784 static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status, 1785 union enetc_rx_bd **rxbd, int *i, 1786 int *cleaned_cnt, struct xdp_buff *xdp_buff) 1787 { 1788 u16 size = le16_to_cpu((*rxbd)->r.buf_len); 1789 1790 xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq); 1791 1792 enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size); 1793 (*cleaned_cnt)++; 1794 enetc_rxbd_next(rx_ring, rxbd, i); 1795 1796 /* not last BD in frame? */ 1797 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { 1798 bd_status = le32_to_cpu((*rxbd)->r.lstatus); 1799 size = ENETC_RXB_DMA_SIZE_XDP; 1800 1801 if (bd_status & ENETC_RXBD_LSTATUS_F) { 1802 dma_rmb(); 1803 size = le16_to_cpu((*rxbd)->r.buf_len); 1804 } 1805 1806 enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff); 1807 (*cleaned_cnt)++; 1808 enetc_rxbd_next(rx_ring, rxbd, i); 1809 } 1810 } 1811 1812 /* Convert RX buffer descriptors to TX buffer descriptors. These will be 1813 * recycled back into the RX ring in enetc_clean_tx_ring. 1814 */ 1815 static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr, 1816 struct enetc_bdr *rx_ring, 1817 int rx_ring_first, int rx_ring_last) 1818 { 1819 int n = 0; 1820 1821 for (; rx_ring_first != rx_ring_last; 1822 n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) { 1823 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; 1824 struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n]; 1825 1826 /* No need to dma_map, we already have DMA_BIDIRECTIONAL */ 1827 tx_swbd->dma = rx_swbd->dma; 1828 tx_swbd->dir = rx_swbd->dir; 1829 tx_swbd->page = rx_swbd->page; 1830 tx_swbd->page_offset = rx_swbd->page_offset; 1831 tx_swbd->len = rx_swbd->len; 1832 tx_swbd->is_dma_page = true; 1833 tx_swbd->is_xdp_tx = true; 1834 tx_swbd->is_eof = false; 1835 } 1836 1837 /* We rely on caller providing an rx_ring_last > rx_ring_first */ 1838 xdp_tx_arr[n - 1].is_eof = true; 1839 1840 return n; 1841 } 1842 1843 static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, 1844 int rx_ring_last) 1845 { 1846 while (rx_ring_first != rx_ring_last) { 1847 enetc_put_rx_buff(rx_ring, 1848 &rx_ring->rx_swbd[rx_ring_first]); 1849 enetc_bdr_idx_inc(rx_ring, &rx_ring_first); 1850 } 1851 } 1852 1853 static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first, 1854 int rx_ring_last) 1855 { 1856 while (rx_ring_first != rx_ring_last) { 1857 enetc_flip_rx_buff(rx_ring, 1858 &rx_ring->rx_swbd[rx_ring_first]); 1859 enetc_bdr_idx_inc(rx_ring, &rx_ring_first); 1860 } 1861 } 1862 1863 static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, 1864 struct napi_struct *napi, int work_limit, 1865 struct bpf_prog *prog) 1866 { 1867 int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0; 1868 struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0}; 1869 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); 1870 int rx_frm_cnt = 0, rx_byte_cnt = 0; 1871 struct enetc_bdr *tx_ring; 1872 int cleaned_cnt, i; 1873 u32 xdp_act; 1874 1875 cleaned_cnt = enetc_bd_unused(rx_ring); 1876 /* next descriptor to process */ 1877 i = rx_ring->next_to_clean; 1878 1879 while (likely(rx_frm_cnt < work_limit)) { 1880 union enetc_rx_bd *rxbd, *orig_rxbd; 1881 struct xdp_buff xdp_buff; 1882 struct sk_buff *skb; 1883 int orig_i, err; 1884 u32 bd_status; 1885 1886 rxbd = enetc_rxbd(rx_ring, i); 1887 bd_status = le32_to_cpu(rxbd->r.lstatus); 1888 if (!bd_status) 1889 break; 1890 1891 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); 1892 dma_rmb(); /* for reading other rxbd fields */ 1893 1894 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status, 1895 &rxbd, &i)) 1896 break; 1897 1898 orig_rxbd = rxbd; 1899 orig_i = i; 1900 1901 enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i, 1902 &cleaned_cnt, &xdp_buff); 1903 1904 /* When set, the outer VLAN header is extracted and reported 1905 * in the receive buffer descriptor. So rx_byte_cnt should 1906 * add the length of the extracted VLAN header. 1907 */ 1908 if (bd_status & ENETC_RXBD_FLAG_VLAN) 1909 rx_byte_cnt += VLAN_HLEN; 1910 rx_byte_cnt += xdp_get_buff_len(&xdp_buff); 1911 1912 xdp_act = bpf_prog_run_xdp(prog, &xdp_buff); 1913 1914 switch (xdp_act) { 1915 default: 1916 bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act); 1917 fallthrough; 1918 case XDP_ABORTED: 1919 trace_xdp_exception(rx_ring->ndev, prog, xdp_act); 1920 fallthrough; 1921 case XDP_DROP: 1922 enetc_xdp_drop(rx_ring, orig_i, i); 1923 rx_ring->stats.xdp_drops++; 1924 break; 1925 case XDP_PASS: 1926 skb = xdp_build_skb_from_buff(&xdp_buff); 1927 /* Probably under memory pressure, stop NAPI */ 1928 if (unlikely(!skb)) { 1929 enetc_xdp_drop(rx_ring, orig_i, i); 1930 rx_ring->stats.xdp_drops++; 1931 goto out; 1932 } 1933 1934 enetc_get_offloads(rx_ring, orig_rxbd, skb); 1935 1936 /* These buffers are about to be owned by the stack. 1937 * Update our buffer cache (the rx_swbd array elements) 1938 * with their other page halves. 1939 */ 1940 enetc_bulk_flip_buff(rx_ring, orig_i, i); 1941 1942 napi_gro_receive(napi, skb); 1943 break; 1944 case XDP_TX: 1945 tx_ring = priv->xdp_tx_ring[rx_ring->index]; 1946 if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) { 1947 enetc_xdp_drop(rx_ring, orig_i, i); 1948 tx_ring->stats.xdp_tx_drops++; 1949 break; 1950 } 1951 1952 xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr, 1953 rx_ring, 1954 orig_i, i); 1955 1956 if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) { 1957 enetc_xdp_drop(rx_ring, orig_i, i); 1958 tx_ring->stats.xdp_tx_drops++; 1959 } else { 1960 tx_ring->stats.xdp_tx++; 1961 rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt; 1962 xdp_tx_frm_cnt++; 1963 /* The XDP_TX enqueue was successful, so we 1964 * need to scrub the RX software BDs because 1965 * the ownership of the buffers no longer 1966 * belongs to the RX ring, and we must prevent 1967 * enetc_refill_rx_ring() from reusing 1968 * rx_swbd->page. 1969 */ 1970 while (orig_i != i) { 1971 rx_ring->rx_swbd[orig_i].page = NULL; 1972 enetc_bdr_idx_inc(rx_ring, &orig_i); 1973 } 1974 } 1975 break; 1976 case XDP_REDIRECT: 1977 err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); 1978 if (unlikely(err)) { 1979 enetc_xdp_drop(rx_ring, orig_i, i); 1980 rx_ring->stats.xdp_redirect_failures++; 1981 } else { 1982 enetc_bulk_flip_buff(rx_ring, orig_i, i); 1983 xdp_redirect_frm_cnt++; 1984 rx_ring->stats.xdp_redirect++; 1985 } 1986 } 1987 1988 rx_frm_cnt++; 1989 } 1990 1991 out: 1992 rx_ring->next_to_clean = i; 1993 1994 rx_ring->stats.packets += rx_frm_cnt; 1995 rx_ring->stats.bytes += rx_byte_cnt; 1996 1997 if (xdp_redirect_frm_cnt) 1998 xdp_do_flush(); 1999 2000 if (xdp_tx_frm_cnt) 2001 enetc_update_tx_ring_tail(tx_ring); 2002 2003 if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight) 2004 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) - 2005 rx_ring->xdp.xdp_tx_in_flight); 2006 2007 return rx_frm_cnt; 2008 } 2009 2010 static int enetc_poll(struct napi_struct *napi, int budget) 2011 { 2012 struct enetc_int_vector 2013 *v = container_of(napi, struct enetc_int_vector, napi); 2014 struct enetc_bdr *rx_ring = &v->rx_ring; 2015 struct bpf_prog *prog; 2016 bool complete = true; 2017 int work_done; 2018 int i; 2019 2020 enetc_lock_mdio(); 2021 2022 for (i = 0; i < v->count_tx_rings; i++) 2023 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) 2024 complete = false; 2025 2026 prog = rx_ring->xdp.prog; 2027 if (prog) 2028 work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog); 2029 else 2030 work_done = enetc_clean_rx_ring(rx_ring, napi, budget); 2031 if (work_done == budget) 2032 complete = false; 2033 if (work_done) 2034 v->rx_napi_work = true; 2035 2036 if (!complete) { 2037 enetc_unlock_mdio(); 2038 return budget; 2039 } 2040 2041 napi_complete_done(napi, work_done); 2042 2043 if (likely(v->rx_dim_en)) 2044 enetc_rx_net_dim(v); 2045 2046 v->rx_napi_work = false; 2047 2048 /* enable interrupts */ 2049 enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); 2050 2051 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) 2052 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 2053 ENETC_TBIER_TXTIE); 2054 2055 enetc_unlock_mdio(); 2056 2057 return work_done; 2058 } 2059 2060 /* Probing and Init */ 2061 #define ENETC_MAX_RFS_SIZE 64 2062 void enetc_get_si_caps(struct enetc_si *si) 2063 { 2064 struct enetc_hw *hw = &si->hw; 2065 u32 val; 2066 2067 /* find out how many of various resources we have to work with */ 2068 val = enetc_rd(hw, ENETC_SICAPR0); 2069 si->num_rx_rings = (val >> 16) & 0xff; 2070 si->num_tx_rings = val & 0xff; 2071 2072 val = enetc_rd(hw, ENETC_SIPCAPR0); 2073 if (val & ENETC_SIPCAPR0_RFS) { 2074 val = enetc_rd(hw, ENETC_SIRFSCAPR); 2075 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val); 2076 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE); 2077 } else { 2078 /* ENETC which not supports RFS */ 2079 si->num_fs_entries = 0; 2080 } 2081 2082 si->num_rss = 0; 2083 val = enetc_rd(hw, ENETC_SIPCAPR0); 2084 if (val & ENETC_SIPCAPR0_RSS) { 2085 u32 rss; 2086 2087 rss = enetc_rd(hw, ENETC_SIRSSCAPR); 2088 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss); 2089 } 2090 2091 if (val & ENETC_SIPCAPR0_LSO) 2092 si->hw_features |= ENETC_SI_F_LSO; 2093 } 2094 EXPORT_SYMBOL_GPL(enetc_get_si_caps); 2095 2096 static int enetc_dma_alloc_bdr(struct enetc_bdr_resource *res) 2097 { 2098 size_t bd_base_size = res->bd_count * res->bd_size; 2099 2100 res->bd_base = dma_alloc_coherent(res->dev, bd_base_size, 2101 &res->bd_dma_base, GFP_KERNEL); 2102 if (!res->bd_base) 2103 return -ENOMEM; 2104 2105 /* h/w requires 128B alignment */ 2106 if (!IS_ALIGNED(res->bd_dma_base, 128)) { 2107 dma_free_coherent(res->dev, bd_base_size, res->bd_base, 2108 res->bd_dma_base); 2109 return -EINVAL; 2110 } 2111 2112 return 0; 2113 } 2114 2115 static void enetc_dma_free_bdr(const struct enetc_bdr_resource *res) 2116 { 2117 size_t bd_base_size = res->bd_count * res->bd_size; 2118 2119 dma_free_coherent(res->dev, bd_base_size, res->bd_base, 2120 res->bd_dma_base); 2121 } 2122 2123 static int enetc_alloc_tx_resource(struct enetc_bdr_resource *res, 2124 struct device *dev, size_t bd_count) 2125 { 2126 int err; 2127 2128 res->dev = dev; 2129 res->bd_count = bd_count; 2130 res->bd_size = sizeof(union enetc_tx_bd); 2131 2132 res->tx_swbd = vcalloc(bd_count, sizeof(*res->tx_swbd)); 2133 if (!res->tx_swbd) 2134 return -ENOMEM; 2135 2136 err = enetc_dma_alloc_bdr(res); 2137 if (err) 2138 goto err_alloc_bdr; 2139 2140 res->tso_headers = dma_alloc_coherent(dev, bd_count * TSO_HEADER_SIZE, 2141 &res->tso_headers_dma, 2142 GFP_KERNEL); 2143 if (!res->tso_headers) { 2144 err = -ENOMEM; 2145 goto err_alloc_tso; 2146 } 2147 2148 return 0; 2149 2150 err_alloc_tso: 2151 enetc_dma_free_bdr(res); 2152 err_alloc_bdr: 2153 vfree(res->tx_swbd); 2154 res->tx_swbd = NULL; 2155 2156 return err; 2157 } 2158 2159 static void enetc_free_tx_resource(const struct enetc_bdr_resource *res) 2160 { 2161 dma_free_coherent(res->dev, res->bd_count * TSO_HEADER_SIZE, 2162 res->tso_headers, res->tso_headers_dma); 2163 enetc_dma_free_bdr(res); 2164 vfree(res->tx_swbd); 2165 } 2166 2167 static struct enetc_bdr_resource * 2168 enetc_alloc_tx_resources(struct enetc_ndev_priv *priv) 2169 { 2170 struct enetc_bdr_resource *tx_res; 2171 int i, err; 2172 2173 tx_res = kcalloc(priv->num_tx_rings, sizeof(*tx_res), GFP_KERNEL); 2174 if (!tx_res) 2175 return ERR_PTR(-ENOMEM); 2176 2177 for (i = 0; i < priv->num_tx_rings; i++) { 2178 struct enetc_bdr *tx_ring = priv->tx_ring[i]; 2179 2180 err = enetc_alloc_tx_resource(&tx_res[i], tx_ring->dev, 2181 tx_ring->bd_count); 2182 if (err) 2183 goto fail; 2184 } 2185 2186 return tx_res; 2187 2188 fail: 2189 while (i-- > 0) 2190 enetc_free_tx_resource(&tx_res[i]); 2191 2192 kfree(tx_res); 2193 2194 return ERR_PTR(err); 2195 } 2196 2197 static void enetc_free_tx_resources(const struct enetc_bdr_resource *tx_res, 2198 size_t num_resources) 2199 { 2200 size_t i; 2201 2202 for (i = 0; i < num_resources; i++) 2203 enetc_free_tx_resource(&tx_res[i]); 2204 2205 kfree(tx_res); 2206 } 2207 2208 static int enetc_alloc_rx_resource(struct enetc_bdr_resource *res, 2209 struct device *dev, size_t bd_count, 2210 bool extended) 2211 { 2212 int err; 2213 2214 res->dev = dev; 2215 res->bd_count = bd_count; 2216 res->bd_size = sizeof(union enetc_rx_bd); 2217 if (extended) 2218 res->bd_size *= 2; 2219 2220 res->rx_swbd = vcalloc(bd_count, sizeof(struct enetc_rx_swbd)); 2221 if (!res->rx_swbd) 2222 return -ENOMEM; 2223 2224 err = enetc_dma_alloc_bdr(res); 2225 if (err) { 2226 vfree(res->rx_swbd); 2227 return err; 2228 } 2229 2230 return 0; 2231 } 2232 2233 static void enetc_free_rx_resource(const struct enetc_bdr_resource *res) 2234 { 2235 enetc_dma_free_bdr(res); 2236 vfree(res->rx_swbd); 2237 } 2238 2239 static struct enetc_bdr_resource * 2240 enetc_alloc_rx_resources(struct enetc_ndev_priv *priv, bool extended) 2241 { 2242 struct enetc_bdr_resource *rx_res; 2243 int i, err; 2244 2245 rx_res = kcalloc(priv->num_rx_rings, sizeof(*rx_res), GFP_KERNEL); 2246 if (!rx_res) 2247 return ERR_PTR(-ENOMEM); 2248 2249 for (i = 0; i < priv->num_rx_rings; i++) { 2250 struct enetc_bdr *rx_ring = priv->rx_ring[i]; 2251 2252 err = enetc_alloc_rx_resource(&rx_res[i], rx_ring->dev, 2253 rx_ring->bd_count, extended); 2254 if (err) 2255 goto fail; 2256 } 2257 2258 return rx_res; 2259 2260 fail: 2261 while (i-- > 0) 2262 enetc_free_rx_resource(&rx_res[i]); 2263 2264 kfree(rx_res); 2265 2266 return ERR_PTR(err); 2267 } 2268 2269 static void enetc_free_rx_resources(const struct enetc_bdr_resource *rx_res, 2270 size_t num_resources) 2271 { 2272 size_t i; 2273 2274 for (i = 0; i < num_resources; i++) 2275 enetc_free_rx_resource(&rx_res[i]); 2276 2277 kfree(rx_res); 2278 } 2279 2280 static void enetc_assign_tx_resource(struct enetc_bdr *tx_ring, 2281 const struct enetc_bdr_resource *res) 2282 { 2283 tx_ring->bd_base = res ? res->bd_base : NULL; 2284 tx_ring->bd_dma_base = res ? res->bd_dma_base : 0; 2285 tx_ring->tx_swbd = res ? res->tx_swbd : NULL; 2286 tx_ring->tso_headers = res ? res->tso_headers : NULL; 2287 tx_ring->tso_headers_dma = res ? res->tso_headers_dma : 0; 2288 } 2289 2290 static void enetc_assign_rx_resource(struct enetc_bdr *rx_ring, 2291 const struct enetc_bdr_resource *res) 2292 { 2293 rx_ring->bd_base = res ? res->bd_base : NULL; 2294 rx_ring->bd_dma_base = res ? res->bd_dma_base : 0; 2295 rx_ring->rx_swbd = res ? res->rx_swbd : NULL; 2296 } 2297 2298 static void enetc_assign_tx_resources(struct enetc_ndev_priv *priv, 2299 const struct enetc_bdr_resource *res) 2300 { 2301 int i; 2302 2303 if (priv->tx_res) 2304 enetc_free_tx_resources(priv->tx_res, priv->num_tx_rings); 2305 2306 for (i = 0; i < priv->num_tx_rings; i++) { 2307 enetc_assign_tx_resource(priv->tx_ring[i], 2308 res ? &res[i] : NULL); 2309 } 2310 2311 priv->tx_res = res; 2312 } 2313 2314 static void enetc_assign_rx_resources(struct enetc_ndev_priv *priv, 2315 const struct enetc_bdr_resource *res) 2316 { 2317 int i; 2318 2319 if (priv->rx_res) 2320 enetc_free_rx_resources(priv->rx_res, priv->num_rx_rings); 2321 2322 for (i = 0; i < priv->num_rx_rings; i++) { 2323 enetc_assign_rx_resource(priv->rx_ring[i], 2324 res ? &res[i] : NULL); 2325 } 2326 2327 priv->rx_res = res; 2328 } 2329 2330 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) 2331 { 2332 int i; 2333 2334 for (i = 0; i < tx_ring->bd_count; i++) { 2335 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; 2336 2337 enetc_free_tx_frame(tx_ring, tx_swbd); 2338 } 2339 } 2340 2341 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring) 2342 { 2343 int i; 2344 2345 for (i = 0; i < rx_ring->bd_count; i++) { 2346 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; 2347 2348 if (!rx_swbd->page) 2349 continue; 2350 2351 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, 2352 rx_swbd->dir); 2353 __free_page(rx_swbd->page); 2354 rx_swbd->page = NULL; 2355 } 2356 } 2357 2358 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) 2359 { 2360 int i; 2361 2362 for (i = 0; i < priv->num_rx_rings; i++) 2363 enetc_free_rx_ring(priv->rx_ring[i]); 2364 2365 for (i = 0; i < priv->num_tx_rings; i++) 2366 enetc_free_tx_ring(priv->tx_ring[i]); 2367 } 2368 2369 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) 2370 { 2371 int *rss_table; 2372 int i; 2373 2374 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL); 2375 if (!rss_table) 2376 return -ENOMEM; 2377 2378 /* Set up RSS table defaults */ 2379 for (i = 0; i < si->num_rss; i++) 2380 rss_table[i] = i % num_groups; 2381 2382 enetc_set_rss_table(si, rss_table, si->num_rss); 2383 2384 kfree(rss_table); 2385 2386 return 0; 2387 } 2388 2389 static void enetc_set_lso_flags_mask(struct enetc_hw *hw) 2390 { 2391 enetc_wr(hw, ENETC4_SILSOSFMR0, 2392 SILSOSFMR0_VAL_SET(ENETC4_TCP_NL_SEG_FLAGS_DMASK, 2393 ENETC4_TCP_NL_SEG_FLAGS_DMASK)); 2394 enetc_wr(hw, ENETC4_SILSOSFMR1, 0); 2395 } 2396 2397 int enetc_configure_si(struct enetc_ndev_priv *priv) 2398 { 2399 struct enetc_si *si = priv->si; 2400 struct enetc_hw *hw = &si->hw; 2401 int err; 2402 2403 /* set SI cache attributes */ 2404 enetc_wr(hw, ENETC_SICAR0, 2405 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 2406 enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI); 2407 /* enable SI */ 2408 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN); 2409 2410 if (si->hw_features & ENETC_SI_F_LSO) 2411 enetc_set_lso_flags_mask(hw); 2412 2413 /* TODO: RSS support for i.MX95 will be supported later, and the 2414 * is_enetc_rev1() condition will be removed 2415 */ 2416 if (si->num_rss && is_enetc_rev1(si)) { 2417 err = enetc_setup_default_rss_table(si, priv->num_rx_rings); 2418 if (err) 2419 return err; 2420 } 2421 2422 return 0; 2423 } 2424 EXPORT_SYMBOL_GPL(enetc_configure_si); 2425 2426 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv) 2427 { 2428 struct enetc_si *si = priv->si; 2429 int cpus = num_online_cpus(); 2430 2431 priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE; 2432 priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE; 2433 2434 /* Enable all available TX rings in order to configure as many 2435 * priorities as possible, when needed. 2436 * TODO: Make # of TX rings run-time configurable 2437 */ 2438 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings); 2439 priv->num_tx_rings = si->num_tx_rings; 2440 priv->bdr_int_num = priv->num_rx_rings; 2441 priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL; 2442 priv->tx_ictt = enetc_usecs_to_cycles(600, priv->sysclk_freq); 2443 } 2444 EXPORT_SYMBOL_GPL(enetc_init_si_rings_params); 2445 2446 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) 2447 { 2448 struct enetc_si *si = priv->si; 2449 2450 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules), 2451 GFP_KERNEL); 2452 if (!priv->cls_rules) 2453 return -ENOMEM; 2454 2455 return 0; 2456 } 2457 EXPORT_SYMBOL_GPL(enetc_alloc_si_resources); 2458 2459 void enetc_free_si_resources(struct enetc_ndev_priv *priv) 2460 { 2461 kfree(priv->cls_rules); 2462 } 2463 EXPORT_SYMBOL_GPL(enetc_free_si_resources); 2464 2465 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 2466 { 2467 int idx = tx_ring->index; 2468 u32 tbmr; 2469 2470 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0, 2471 lower_32_bits(tx_ring->bd_dma_base)); 2472 2473 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1, 2474 upper_32_bits(tx_ring->bd_dma_base)); 2475 2476 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */ 2477 enetc_txbdr_wr(hw, idx, ENETC_TBLENR, 2478 ENETC_RTBLENR_LEN(tx_ring->bd_count)); 2479 2480 /* clearing PI/CI registers for Tx not supported, adjust sw indexes */ 2481 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR); 2482 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); 2483 2484 /* enable Tx ints by setting pkt thr to 1 */ 2485 enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1); 2486 2487 tbmr = ENETC_TBMR_SET_PRIO(tx_ring->prio); 2488 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) 2489 tbmr |= ENETC_TBMR_VIH; 2490 2491 /* enable ring */ 2492 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); 2493 2494 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR); 2495 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR); 2496 tx_ring->idr = hw->reg + ENETC_SITXIDR; 2497 } 2498 2499 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring, 2500 bool extended) 2501 { 2502 int idx = rx_ring->index; 2503 u32 rbmr = 0; 2504 2505 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0, 2506 lower_32_bits(rx_ring->bd_dma_base)); 2507 2508 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1, 2509 upper_32_bits(rx_ring->bd_dma_base)); 2510 2511 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */ 2512 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR, 2513 ENETC_RTBLENR_LEN(rx_ring->bd_count)); 2514 2515 if (rx_ring->xdp.prog) 2516 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP); 2517 else 2518 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE); 2519 2520 /* Also prepare the consumer index in case page allocation never 2521 * succeeds. In that case, hardware will never advance producer index 2522 * to match consumer index, and will drop all frames. 2523 */ 2524 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); 2525 enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1); 2526 2527 /* enable Rx ints by setting pkt thr to 1 */ 2528 enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1); 2529 2530 rx_ring->ext_en = extended; 2531 if (rx_ring->ext_en) 2532 rbmr |= ENETC_RBMR_BDS; 2533 2534 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) 2535 rbmr |= ENETC_RBMR_VTE; 2536 2537 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); 2538 rx_ring->idr = hw->reg + ENETC_SIRXIDR; 2539 2540 rx_ring->next_to_clean = 0; 2541 rx_ring->next_to_use = 0; 2542 rx_ring->next_to_alloc = 0; 2543 2544 enetc_lock_mdio(); 2545 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); 2546 enetc_unlock_mdio(); 2547 2548 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); 2549 } 2550 2551 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv, bool extended) 2552 { 2553 struct enetc_hw *hw = &priv->si->hw; 2554 int i; 2555 2556 for (i = 0; i < priv->num_tx_rings; i++) 2557 enetc_setup_txbdr(hw, priv->tx_ring[i]); 2558 2559 for (i = 0; i < priv->num_rx_rings; i++) 2560 enetc_setup_rxbdr(hw, priv->rx_ring[i], extended); 2561 } 2562 2563 static void enetc_enable_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 2564 { 2565 int idx = tx_ring->index; 2566 u32 tbmr; 2567 2568 tbmr = enetc_txbdr_rd(hw, idx, ENETC_TBMR); 2569 tbmr |= ENETC_TBMR_EN; 2570 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); 2571 } 2572 2573 static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2574 { 2575 int idx = rx_ring->index; 2576 u32 rbmr; 2577 2578 rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR); 2579 rbmr |= ENETC_RBMR_EN; 2580 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); 2581 } 2582 2583 static void enetc_enable_rx_bdrs(struct enetc_ndev_priv *priv) 2584 { 2585 struct enetc_hw *hw = &priv->si->hw; 2586 int i; 2587 2588 for (i = 0; i < priv->num_rx_rings; i++) 2589 enetc_enable_rxbdr(hw, priv->rx_ring[i]); 2590 } 2591 2592 static void enetc_enable_tx_bdrs(struct enetc_ndev_priv *priv) 2593 { 2594 struct enetc_hw *hw = &priv->si->hw; 2595 int i; 2596 2597 for (i = 0; i < priv->num_tx_rings; i++) 2598 enetc_enable_txbdr(hw, priv->tx_ring[i]); 2599 } 2600 2601 static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2602 { 2603 int idx = rx_ring->index; 2604 2605 /* disable EN bit on ring */ 2606 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0); 2607 } 2608 2609 static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2610 { 2611 int idx = rx_ring->index; 2612 2613 /* disable EN bit on ring */ 2614 enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0); 2615 } 2616 2617 static void enetc_disable_rx_bdrs(struct enetc_ndev_priv *priv) 2618 { 2619 struct enetc_hw *hw = &priv->si->hw; 2620 int i; 2621 2622 for (i = 0; i < priv->num_rx_rings; i++) 2623 enetc_disable_rxbdr(hw, priv->rx_ring[i]); 2624 } 2625 2626 static void enetc_disable_tx_bdrs(struct enetc_ndev_priv *priv) 2627 { 2628 struct enetc_hw *hw = &priv->si->hw; 2629 int i; 2630 2631 for (i = 0; i < priv->num_tx_rings; i++) 2632 enetc_disable_txbdr(hw, priv->tx_ring[i]); 2633 } 2634 2635 static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 2636 { 2637 int delay = 8, timeout = 100; 2638 int idx = tx_ring->index; 2639 2640 /* wait for busy to clear */ 2641 while (delay < timeout && 2642 enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) { 2643 msleep(delay); 2644 delay *= 2; 2645 } 2646 2647 if (delay >= timeout) 2648 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n", 2649 idx); 2650 } 2651 2652 static void enetc_wait_bdrs(struct enetc_ndev_priv *priv) 2653 { 2654 struct enetc_hw *hw = &priv->si->hw; 2655 int i; 2656 2657 for (i = 0; i < priv->num_tx_rings; i++) 2658 enetc_wait_txbdr(hw, priv->tx_ring[i]); 2659 } 2660 2661 static int enetc_setup_irqs(struct enetc_ndev_priv *priv) 2662 { 2663 struct pci_dev *pdev = priv->si->pdev; 2664 struct enetc_hw *hw = &priv->si->hw; 2665 int i, j, err; 2666 2667 for (i = 0; i < priv->bdr_int_num; i++) { 2668 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); 2669 struct enetc_int_vector *v = priv->int_vector[i]; 2670 int entry = ENETC_BDR_INT_BASE_IDX + i; 2671 2672 snprintf(v->name, sizeof(v->name), "%s-rxtx%d", 2673 priv->ndev->name, i); 2674 err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v); 2675 if (err) { 2676 dev_err(priv->dev, "request_irq() failed!\n"); 2677 goto irq_err; 2678 } 2679 2680 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); 2681 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); 2682 v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1); 2683 2684 enetc_wr(hw, ENETC_SIMSIRRV(i), entry); 2685 2686 for (j = 0; j < v->count_tx_rings; j++) { 2687 int idx = v->tx_ring[j].index; 2688 2689 enetc_wr(hw, ENETC_SIMSITRV(idx), entry); 2690 } 2691 irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus())); 2692 } 2693 2694 return 0; 2695 2696 irq_err: 2697 while (i--) { 2698 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); 2699 2700 irq_set_affinity_hint(irq, NULL); 2701 free_irq(irq, priv->int_vector[i]); 2702 } 2703 2704 return err; 2705 } 2706 2707 static void enetc_free_irqs(struct enetc_ndev_priv *priv) 2708 { 2709 struct pci_dev *pdev = priv->si->pdev; 2710 int i; 2711 2712 for (i = 0; i < priv->bdr_int_num; i++) { 2713 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); 2714 2715 irq_set_affinity_hint(irq, NULL); 2716 free_irq(irq, priv->int_vector[i]); 2717 } 2718 } 2719 2720 static void enetc_setup_interrupts(struct enetc_ndev_priv *priv) 2721 { 2722 struct enetc_hw *hw = &priv->si->hw; 2723 u32 icpt, ictt; 2724 int i; 2725 2726 /* enable Tx & Rx event indication */ 2727 if (priv->ic_mode & 2728 (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) { 2729 icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR); 2730 /* init to non-0 minimum, will be adjusted later */ 2731 ictt = 0x1; 2732 } else { 2733 icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */ 2734 ictt = 0; 2735 } 2736 2737 for (i = 0; i < priv->num_rx_rings; i++) { 2738 enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt); 2739 enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt); 2740 enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE); 2741 } 2742 2743 if (priv->ic_mode & ENETC_IC_TX_MANUAL) 2744 icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR); 2745 else 2746 icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */ 2747 2748 for (i = 0; i < priv->num_tx_rings; i++) { 2749 enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt); 2750 enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt); 2751 enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE); 2752 } 2753 } 2754 2755 static void enetc_clear_interrupts(struct enetc_ndev_priv *priv) 2756 { 2757 struct enetc_hw *hw = &priv->si->hw; 2758 int i; 2759 2760 for (i = 0; i < priv->num_tx_rings; i++) 2761 enetc_txbdr_wr(hw, i, ENETC_TBIER, 0); 2762 2763 for (i = 0; i < priv->num_rx_rings; i++) 2764 enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0); 2765 } 2766 2767 static int enetc_phylink_connect(struct net_device *ndev) 2768 { 2769 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2770 struct ethtool_keee edata; 2771 int err; 2772 2773 if (!priv->phylink) { 2774 /* phy-less mode */ 2775 netif_carrier_on(ndev); 2776 return 0; 2777 } 2778 2779 err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0); 2780 if (err) { 2781 dev_err(&ndev->dev, "could not attach to PHY\n"); 2782 return err; 2783 } 2784 2785 /* disable EEE autoneg, until ENETC driver supports it */ 2786 memset(&edata, 0, sizeof(struct ethtool_keee)); 2787 phylink_ethtool_set_eee(priv->phylink, &edata); 2788 2789 phylink_start(priv->phylink); 2790 2791 return 0; 2792 } 2793 2794 static void enetc_tx_onestep_tstamp(struct work_struct *work) 2795 { 2796 struct enetc_ndev_priv *priv; 2797 struct sk_buff *skb; 2798 2799 priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp); 2800 2801 netif_tx_lock_bh(priv->ndev); 2802 2803 clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags); 2804 skb = skb_dequeue(&priv->tx_skbs); 2805 if (skb) 2806 enetc_start_xmit(skb, priv->ndev); 2807 2808 netif_tx_unlock_bh(priv->ndev); 2809 } 2810 2811 static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv) 2812 { 2813 INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp); 2814 skb_queue_head_init(&priv->tx_skbs); 2815 } 2816 2817 void enetc_start(struct net_device *ndev) 2818 { 2819 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2820 int i; 2821 2822 enetc_setup_interrupts(priv); 2823 2824 for (i = 0; i < priv->bdr_int_num; i++) { 2825 int irq = pci_irq_vector(priv->si->pdev, 2826 ENETC_BDR_INT_BASE_IDX + i); 2827 2828 napi_enable(&priv->int_vector[i]->napi); 2829 enable_irq(irq); 2830 } 2831 2832 enetc_enable_tx_bdrs(priv); 2833 2834 enetc_enable_rx_bdrs(priv); 2835 2836 netif_tx_start_all_queues(ndev); 2837 2838 clear_bit(ENETC_TX_DOWN, &priv->flags); 2839 } 2840 EXPORT_SYMBOL_GPL(enetc_start); 2841 2842 int enetc_open(struct net_device *ndev) 2843 { 2844 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2845 struct enetc_bdr_resource *tx_res, *rx_res; 2846 bool extended; 2847 int err; 2848 2849 extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); 2850 2851 err = clk_prepare_enable(priv->ref_clk); 2852 if (err) 2853 return err; 2854 2855 err = enetc_setup_irqs(priv); 2856 if (err) 2857 goto err_setup_irqs; 2858 2859 err = enetc_phylink_connect(ndev); 2860 if (err) 2861 goto err_phy_connect; 2862 2863 tx_res = enetc_alloc_tx_resources(priv); 2864 if (IS_ERR(tx_res)) { 2865 err = PTR_ERR(tx_res); 2866 goto err_alloc_tx; 2867 } 2868 2869 rx_res = enetc_alloc_rx_resources(priv, extended); 2870 if (IS_ERR(rx_res)) { 2871 err = PTR_ERR(rx_res); 2872 goto err_alloc_rx; 2873 } 2874 2875 enetc_tx_onestep_tstamp_init(priv); 2876 enetc_assign_tx_resources(priv, tx_res); 2877 enetc_assign_rx_resources(priv, rx_res); 2878 enetc_setup_bdrs(priv, extended); 2879 enetc_start(ndev); 2880 2881 return 0; 2882 2883 err_alloc_rx: 2884 enetc_free_tx_resources(tx_res, priv->num_tx_rings); 2885 err_alloc_tx: 2886 if (priv->phylink) 2887 phylink_disconnect_phy(priv->phylink); 2888 err_phy_connect: 2889 enetc_free_irqs(priv); 2890 err_setup_irqs: 2891 clk_disable_unprepare(priv->ref_clk); 2892 2893 return err; 2894 } 2895 EXPORT_SYMBOL_GPL(enetc_open); 2896 2897 void enetc_stop(struct net_device *ndev) 2898 { 2899 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2900 int i; 2901 2902 set_bit(ENETC_TX_DOWN, &priv->flags); 2903 2904 netif_tx_stop_all_queues(ndev); 2905 2906 enetc_disable_rx_bdrs(priv); 2907 2908 enetc_wait_bdrs(priv); 2909 2910 enetc_disable_tx_bdrs(priv); 2911 2912 for (i = 0; i < priv->bdr_int_num; i++) { 2913 int irq = pci_irq_vector(priv->si->pdev, 2914 ENETC_BDR_INT_BASE_IDX + i); 2915 2916 disable_irq(irq); 2917 napi_synchronize(&priv->int_vector[i]->napi); 2918 napi_disable(&priv->int_vector[i]->napi); 2919 } 2920 2921 enetc_clear_interrupts(priv); 2922 } 2923 EXPORT_SYMBOL_GPL(enetc_stop); 2924 2925 int enetc_close(struct net_device *ndev) 2926 { 2927 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2928 2929 enetc_stop(ndev); 2930 2931 if (priv->phylink) { 2932 phylink_stop(priv->phylink); 2933 phylink_disconnect_phy(priv->phylink); 2934 } else { 2935 netif_carrier_off(ndev); 2936 } 2937 2938 enetc_free_rxtx_rings(priv); 2939 2940 /* Avoids dangling pointers and also frees old resources */ 2941 enetc_assign_rx_resources(priv, NULL); 2942 enetc_assign_tx_resources(priv, NULL); 2943 2944 enetc_free_irqs(priv); 2945 clk_disable_unprepare(priv->ref_clk); 2946 2947 return 0; 2948 } 2949 EXPORT_SYMBOL_GPL(enetc_close); 2950 2951 static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended, 2952 int (*cb)(struct enetc_ndev_priv *priv, void *ctx), 2953 void *ctx) 2954 { 2955 struct enetc_bdr_resource *tx_res, *rx_res; 2956 int err; 2957 2958 ASSERT_RTNL(); 2959 2960 /* If the interface is down, run the callback right away, 2961 * without reconfiguration. 2962 */ 2963 if (!netif_running(priv->ndev)) { 2964 if (cb) { 2965 err = cb(priv, ctx); 2966 if (err) 2967 return err; 2968 } 2969 2970 return 0; 2971 } 2972 2973 tx_res = enetc_alloc_tx_resources(priv); 2974 if (IS_ERR(tx_res)) { 2975 err = PTR_ERR(tx_res); 2976 goto out; 2977 } 2978 2979 rx_res = enetc_alloc_rx_resources(priv, extended); 2980 if (IS_ERR(rx_res)) { 2981 err = PTR_ERR(rx_res); 2982 goto out_free_tx_res; 2983 } 2984 2985 enetc_stop(priv->ndev); 2986 enetc_free_rxtx_rings(priv); 2987 2988 /* Interface is down, run optional callback now */ 2989 if (cb) { 2990 err = cb(priv, ctx); 2991 if (err) 2992 goto out_restart; 2993 } 2994 2995 enetc_assign_tx_resources(priv, tx_res); 2996 enetc_assign_rx_resources(priv, rx_res); 2997 enetc_setup_bdrs(priv, extended); 2998 enetc_start(priv->ndev); 2999 3000 return 0; 3001 3002 out_restart: 3003 enetc_setup_bdrs(priv, extended); 3004 enetc_start(priv->ndev); 3005 enetc_free_rx_resources(rx_res, priv->num_rx_rings); 3006 out_free_tx_res: 3007 enetc_free_tx_resources(tx_res, priv->num_tx_rings); 3008 out: 3009 return err; 3010 } 3011 3012 static void enetc_debug_tx_ring_prios(struct enetc_ndev_priv *priv) 3013 { 3014 int i; 3015 3016 for (i = 0; i < priv->num_tx_rings; i++) 3017 netdev_dbg(priv->ndev, "TX ring %d prio %d\n", i, 3018 priv->tx_ring[i]->prio); 3019 } 3020 3021 void enetc_reset_tc_mqprio(struct net_device *ndev) 3022 { 3023 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3024 struct enetc_hw *hw = &priv->si->hw; 3025 struct enetc_bdr *tx_ring; 3026 int num_stack_tx_queues; 3027 int i; 3028 3029 num_stack_tx_queues = enetc_num_stack_tx_queues(priv); 3030 3031 netdev_reset_tc(ndev); 3032 netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); 3033 priv->min_num_stack_tx_queues = num_possible_cpus(); 3034 3035 /* Reset all ring priorities to 0 */ 3036 for (i = 0; i < priv->num_tx_rings; i++) { 3037 tx_ring = priv->tx_ring[i]; 3038 tx_ring->prio = 0; 3039 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); 3040 } 3041 3042 enetc_debug_tx_ring_prios(priv); 3043 3044 enetc_change_preemptible_tcs(priv, 0); 3045 } 3046 EXPORT_SYMBOL_GPL(enetc_reset_tc_mqprio); 3047 3048 int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) 3049 { 3050 struct tc_mqprio_qopt_offload *mqprio = type_data; 3051 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3052 struct tc_mqprio_qopt *qopt = &mqprio->qopt; 3053 struct enetc_hw *hw = &priv->si->hw; 3054 int num_stack_tx_queues = 0; 3055 struct enetc_bdr *tx_ring; 3056 u8 num_tc = qopt->num_tc; 3057 int offset, count; 3058 int err, tc, q; 3059 3060 if (!num_tc) { 3061 enetc_reset_tc_mqprio(ndev); 3062 return 0; 3063 } 3064 3065 err = netdev_set_num_tc(ndev, num_tc); 3066 if (err) 3067 return err; 3068 3069 for (tc = 0; tc < num_tc; tc++) { 3070 offset = qopt->offset[tc]; 3071 count = qopt->count[tc]; 3072 num_stack_tx_queues += count; 3073 3074 err = netdev_set_tc_queue(ndev, tc, count, offset); 3075 if (err) 3076 goto err_reset_tc; 3077 3078 for (q = offset; q < offset + count; q++) { 3079 tx_ring = priv->tx_ring[q]; 3080 /* The prio_tc_map is skb_tx_hash()'s way of selecting 3081 * between TX queues based on skb->priority. As such, 3082 * there's nothing to offload based on it. 3083 * Make the mqprio "traffic class" be the priority of 3084 * this ring group, and leave the Tx IPV to traffic 3085 * class mapping as its default mapping value of 1:1. 3086 */ 3087 tx_ring->prio = tc; 3088 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); 3089 } 3090 } 3091 3092 err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); 3093 if (err) 3094 goto err_reset_tc; 3095 3096 priv->min_num_stack_tx_queues = num_stack_tx_queues; 3097 3098 enetc_debug_tx_ring_prios(priv); 3099 3100 enetc_change_preemptible_tcs(priv, mqprio->preemptible_tcs); 3101 3102 return 0; 3103 3104 err_reset_tc: 3105 enetc_reset_tc_mqprio(ndev); 3106 return err; 3107 } 3108 EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio); 3109 3110 static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx) 3111 { 3112 struct bpf_prog *old_prog, *prog = ctx; 3113 int num_stack_tx_queues; 3114 int err, i; 3115 3116 old_prog = xchg(&priv->xdp_prog, prog); 3117 3118 num_stack_tx_queues = enetc_num_stack_tx_queues(priv); 3119 err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues); 3120 if (err) { 3121 xchg(&priv->xdp_prog, old_prog); 3122 return err; 3123 } 3124 3125 if (old_prog) 3126 bpf_prog_put(old_prog); 3127 3128 for (i = 0; i < priv->num_rx_rings; i++) { 3129 struct enetc_bdr *rx_ring = priv->rx_ring[i]; 3130 3131 rx_ring->xdp.prog = prog; 3132 3133 if (prog) 3134 rx_ring->buffer_offset = XDP_PACKET_HEADROOM; 3135 else 3136 rx_ring->buffer_offset = ENETC_RXB_PAD; 3137 } 3138 3139 return 0; 3140 } 3141 3142 static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog, 3143 struct netlink_ext_ack *extack) 3144 { 3145 int num_xdp_tx_queues = prog ? num_possible_cpus() : 0; 3146 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3147 bool extended; 3148 3149 if (priv->min_num_stack_tx_queues + num_xdp_tx_queues > 3150 priv->num_tx_rings) { 3151 NL_SET_ERR_MSG_FMT_MOD(extack, 3152 "Reserving %d XDP TXQs leaves under %d for stack (total %d)", 3153 num_xdp_tx_queues, 3154 priv->min_num_stack_tx_queues, 3155 priv->num_tx_rings); 3156 return -EBUSY; 3157 } 3158 3159 extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); 3160 3161 /* The buffer layout is changing, so we need to drain the old 3162 * RX buffers and seed new ones. 3163 */ 3164 return enetc_reconfigure(priv, extended, enetc_reconfigure_xdp_cb, prog); 3165 } 3166 3167 int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf) 3168 { 3169 switch (bpf->command) { 3170 case XDP_SETUP_PROG: 3171 return enetc_setup_xdp_prog(ndev, bpf->prog, bpf->extack); 3172 default: 3173 return -EINVAL; 3174 } 3175 3176 return 0; 3177 } 3178 EXPORT_SYMBOL_GPL(enetc_setup_bpf); 3179 3180 struct net_device_stats *enetc_get_stats(struct net_device *ndev) 3181 { 3182 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3183 struct net_device_stats *stats = &ndev->stats; 3184 unsigned long packets = 0, bytes = 0; 3185 unsigned long tx_dropped = 0; 3186 int i; 3187 3188 for (i = 0; i < priv->num_rx_rings; i++) { 3189 packets += priv->rx_ring[i]->stats.packets; 3190 bytes += priv->rx_ring[i]->stats.bytes; 3191 } 3192 3193 stats->rx_packets = packets; 3194 stats->rx_bytes = bytes; 3195 bytes = 0; 3196 packets = 0; 3197 3198 for (i = 0; i < priv->num_tx_rings; i++) { 3199 packets += priv->tx_ring[i]->stats.packets; 3200 bytes += priv->tx_ring[i]->stats.bytes; 3201 tx_dropped += priv->tx_ring[i]->stats.win_drop; 3202 } 3203 3204 stats->tx_packets = packets; 3205 stats->tx_bytes = bytes; 3206 stats->tx_dropped = tx_dropped; 3207 3208 return stats; 3209 } 3210 EXPORT_SYMBOL_GPL(enetc_get_stats); 3211 3212 static int enetc_set_rss(struct net_device *ndev, int en) 3213 { 3214 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3215 struct enetc_hw *hw = &priv->si->hw; 3216 u32 reg; 3217 3218 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings); 3219 3220 reg = enetc_rd(hw, ENETC_SIMR); 3221 reg &= ~ENETC_SIMR_RSSE; 3222 reg |= (en) ? ENETC_SIMR_RSSE : 0; 3223 enetc_wr(hw, ENETC_SIMR, reg); 3224 3225 return 0; 3226 } 3227 3228 static void enetc_enable_rxvlan(struct net_device *ndev, bool en) 3229 { 3230 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3231 struct enetc_hw *hw = &priv->si->hw; 3232 int i; 3233 3234 for (i = 0; i < priv->num_rx_rings; i++) 3235 enetc_bdr_enable_rxvlan(hw, i, en); 3236 } 3237 3238 static void enetc_enable_txvlan(struct net_device *ndev, bool en) 3239 { 3240 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3241 struct enetc_hw *hw = &priv->si->hw; 3242 int i; 3243 3244 for (i = 0; i < priv->num_tx_rings; i++) 3245 enetc_bdr_enable_txvlan(hw, i, en); 3246 } 3247 3248 void enetc_set_features(struct net_device *ndev, netdev_features_t features) 3249 { 3250 netdev_features_t changed = ndev->features ^ features; 3251 3252 if (changed & NETIF_F_RXHASH) 3253 enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH)); 3254 3255 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 3256 enetc_enable_rxvlan(ndev, 3257 !!(features & NETIF_F_HW_VLAN_CTAG_RX)); 3258 3259 if (changed & NETIF_F_HW_VLAN_CTAG_TX) 3260 enetc_enable_txvlan(ndev, 3261 !!(features & NETIF_F_HW_VLAN_CTAG_TX)); 3262 } 3263 EXPORT_SYMBOL_GPL(enetc_set_features); 3264 3265 static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) 3266 { 3267 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3268 int err, new_offloads = priv->active_offloads; 3269 struct hwtstamp_config config; 3270 3271 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 3272 return -EFAULT; 3273 3274 switch (config.tx_type) { 3275 case HWTSTAMP_TX_OFF: 3276 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 3277 break; 3278 case HWTSTAMP_TX_ON: 3279 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 3280 new_offloads |= ENETC_F_TX_TSTAMP; 3281 break; 3282 case HWTSTAMP_TX_ONESTEP_SYNC: 3283 if (!enetc_si_is_pf(priv->si)) 3284 return -EOPNOTSUPP; 3285 3286 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 3287 new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP; 3288 break; 3289 default: 3290 return -ERANGE; 3291 } 3292 3293 switch (config.rx_filter) { 3294 case HWTSTAMP_FILTER_NONE: 3295 new_offloads &= ~ENETC_F_RX_TSTAMP; 3296 break; 3297 default: 3298 new_offloads |= ENETC_F_RX_TSTAMP; 3299 config.rx_filter = HWTSTAMP_FILTER_ALL; 3300 } 3301 3302 if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) { 3303 bool extended = !!(new_offloads & ENETC_F_RX_TSTAMP); 3304 3305 err = enetc_reconfigure(priv, extended, NULL, NULL); 3306 if (err) 3307 return err; 3308 } 3309 3310 priv->active_offloads = new_offloads; 3311 3312 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 3313 -EFAULT : 0; 3314 } 3315 3316 static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr) 3317 { 3318 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3319 struct hwtstamp_config config; 3320 3321 config.flags = 0; 3322 3323 if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) 3324 config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC; 3325 else if (priv->active_offloads & ENETC_F_TX_TSTAMP) 3326 config.tx_type = HWTSTAMP_TX_ON; 3327 else 3328 config.tx_type = HWTSTAMP_TX_OFF; 3329 3330 config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ? 3331 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; 3332 3333 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 3334 -EFAULT : 0; 3335 } 3336 3337 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 3338 { 3339 struct enetc_ndev_priv *priv = netdev_priv(ndev); 3340 3341 if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) { 3342 if (cmd == SIOCSHWTSTAMP) 3343 return enetc_hwtstamp_set(ndev, rq); 3344 if (cmd == SIOCGHWTSTAMP) 3345 return enetc_hwtstamp_get(ndev, rq); 3346 } 3347 3348 if (!priv->phylink) 3349 return -EOPNOTSUPP; 3350 3351 return phylink_mii_ioctl(priv->phylink, rq, cmd); 3352 } 3353 EXPORT_SYMBOL_GPL(enetc_ioctl); 3354 3355 static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i, 3356 int v_tx_rings) 3357 { 3358 struct enetc_int_vector *v; 3359 struct enetc_bdr *bdr; 3360 int j, err; 3361 3362 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); 3363 if (!v) 3364 return -ENOMEM; 3365 3366 priv->int_vector[i] = v; 3367 bdr = &v->rx_ring; 3368 bdr->index = i; 3369 bdr->ndev = priv->ndev; 3370 bdr->dev = priv->dev; 3371 bdr->bd_count = priv->rx_bd_count; 3372 bdr->buffer_offset = ENETC_RXB_PAD; 3373 priv->rx_ring[i] = bdr; 3374 3375 err = __xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0, 3376 ENETC_RXB_DMA_SIZE_XDP); 3377 if (err) 3378 goto free_vector; 3379 3380 err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, MEM_TYPE_PAGE_SHARED, 3381 NULL); 3382 if (err) { 3383 xdp_rxq_info_unreg(&bdr->xdp.rxq); 3384 goto free_vector; 3385 } 3386 3387 /* init defaults for adaptive IC */ 3388 if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { 3389 v->rx_ictt = 0x1; 3390 v->rx_dim_en = true; 3391 } 3392 3393 INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); 3394 netif_napi_add(priv->ndev, &v->napi, enetc_poll); 3395 v->count_tx_rings = v_tx_rings; 3396 3397 for (j = 0; j < v_tx_rings; j++) { 3398 int idx; 3399 3400 /* default tx ring mapping policy */ 3401 idx = priv->bdr_int_num * j + i; 3402 __set_bit(idx, &v->tx_rings_map); 3403 bdr = &v->tx_ring[j]; 3404 bdr->index = idx; 3405 bdr->ndev = priv->ndev; 3406 bdr->dev = priv->dev; 3407 bdr->bd_count = priv->tx_bd_count; 3408 priv->tx_ring[idx] = bdr; 3409 } 3410 3411 return 0; 3412 3413 free_vector: 3414 priv->rx_ring[i] = NULL; 3415 priv->int_vector[i] = NULL; 3416 kfree(v); 3417 3418 return err; 3419 } 3420 3421 static void enetc_int_vector_destroy(struct enetc_ndev_priv *priv, int i) 3422 { 3423 struct enetc_int_vector *v = priv->int_vector[i]; 3424 struct enetc_bdr *rx_ring = &v->rx_ring; 3425 int j, tx_ring_index; 3426 3427 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); 3428 xdp_rxq_info_unreg(&rx_ring->xdp.rxq); 3429 netif_napi_del(&v->napi); 3430 cancel_work_sync(&v->rx_dim.work); 3431 3432 for (j = 0; j < v->count_tx_rings; j++) { 3433 tx_ring_index = priv->bdr_int_num * j + i; 3434 priv->tx_ring[tx_ring_index] = NULL; 3435 } 3436 3437 priv->rx_ring[i] = NULL; 3438 priv->int_vector[i] = NULL; 3439 kfree(v); 3440 } 3441 3442 int enetc_alloc_msix(struct enetc_ndev_priv *priv) 3443 { 3444 struct pci_dev *pdev = priv->si->pdev; 3445 int v_tx_rings, v_remainder; 3446 int num_stack_tx_queues; 3447 int first_xdp_tx_ring; 3448 int i, n, err, nvec; 3449 3450 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num; 3451 /* allocate MSIX for both messaging and Rx/Tx interrupts */ 3452 n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); 3453 3454 if (n < 0) 3455 return n; 3456 3457 if (n != nvec) 3458 return -EPERM; 3459 3460 /* # of tx rings per int vector */ 3461 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; 3462 v_remainder = priv->num_tx_rings % priv->bdr_int_num; 3463 3464 for (i = 0; i < priv->bdr_int_num; i++) { 3465 /* Distribute the remaining TX rings to the first v_remainder 3466 * interrupt vectors 3467 */ 3468 int num_tx_rings = i < v_remainder ? v_tx_rings + 1 : v_tx_rings; 3469 3470 err = enetc_int_vector_init(priv, i, num_tx_rings); 3471 if (err) 3472 goto fail; 3473 } 3474 3475 num_stack_tx_queues = enetc_num_stack_tx_queues(priv); 3476 3477 err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues); 3478 if (err) 3479 goto fail; 3480 3481 err = netif_set_real_num_rx_queues(priv->ndev, priv->num_rx_rings); 3482 if (err) 3483 goto fail; 3484 3485 priv->min_num_stack_tx_queues = num_possible_cpus(); 3486 first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus(); 3487 priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring]; 3488 3489 return 0; 3490 3491 fail: 3492 while (i--) 3493 enetc_int_vector_destroy(priv, i); 3494 3495 pci_free_irq_vectors(pdev); 3496 3497 return err; 3498 } 3499 EXPORT_SYMBOL_GPL(enetc_alloc_msix); 3500 3501 void enetc_free_msix(struct enetc_ndev_priv *priv) 3502 { 3503 int i; 3504 3505 for (i = 0; i < priv->bdr_int_num; i++) 3506 enetc_int_vector_destroy(priv, i); 3507 3508 /* disable all MSIX for this device */ 3509 pci_free_irq_vectors(priv->si->pdev); 3510 } 3511 EXPORT_SYMBOL_GPL(enetc_free_msix); 3512 3513 static void enetc_kfree_si(struct enetc_si *si) 3514 { 3515 char *p = (char *)si - si->pad; 3516 3517 kfree(p); 3518 } 3519 3520 static void enetc_detect_errata(struct enetc_si *si) 3521 { 3522 if (si->pdev->revision == ENETC_REV1) 3523 si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP; 3524 } 3525 3526 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv) 3527 { 3528 struct enetc_si *si, *p; 3529 struct enetc_hw *hw; 3530 size_t alloc_size; 3531 int err, len; 3532 3533 pcie_flr(pdev); 3534 err = pci_enable_device_mem(pdev); 3535 if (err) 3536 return dev_err_probe(&pdev->dev, err, "device enable failed\n"); 3537 3538 /* set up for high or low dma */ 3539 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3540 if (err) { 3541 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); 3542 goto err_dma; 3543 } 3544 3545 err = pci_request_mem_regions(pdev, name); 3546 if (err) { 3547 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err); 3548 goto err_pci_mem_reg; 3549 } 3550 3551 pci_set_master(pdev); 3552 3553 alloc_size = sizeof(struct enetc_si); 3554 if (sizeof_priv) { 3555 /* align priv to 32B */ 3556 alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN); 3557 alloc_size += sizeof_priv; 3558 } 3559 /* force 32B alignment for enetc_si */ 3560 alloc_size += ENETC_SI_ALIGN - 1; 3561 3562 p = kzalloc(alloc_size, GFP_KERNEL); 3563 if (!p) { 3564 err = -ENOMEM; 3565 goto err_alloc_si; 3566 } 3567 3568 si = PTR_ALIGN(p, ENETC_SI_ALIGN); 3569 si->pad = (char *)si - (char *)p; 3570 3571 pci_set_drvdata(pdev, si); 3572 si->pdev = pdev; 3573 hw = &si->hw; 3574 3575 len = pci_resource_len(pdev, ENETC_BAR_REGS); 3576 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len); 3577 if (!hw->reg) { 3578 err = -ENXIO; 3579 dev_err(&pdev->dev, "ioremap() failed\n"); 3580 goto err_ioremap; 3581 } 3582 if (len > ENETC_PORT_BASE) 3583 hw->port = hw->reg + ENETC_PORT_BASE; 3584 if (len > ENETC_GLOBAL_BASE) 3585 hw->global = hw->reg + ENETC_GLOBAL_BASE; 3586 3587 enetc_detect_errata(si); 3588 3589 return 0; 3590 3591 err_ioremap: 3592 enetc_kfree_si(si); 3593 err_alloc_si: 3594 pci_release_mem_regions(pdev); 3595 err_pci_mem_reg: 3596 err_dma: 3597 pci_disable_device(pdev); 3598 3599 return err; 3600 } 3601 EXPORT_SYMBOL_GPL(enetc_pci_probe); 3602 3603 void enetc_pci_remove(struct pci_dev *pdev) 3604 { 3605 struct enetc_si *si = pci_get_drvdata(pdev); 3606 struct enetc_hw *hw = &si->hw; 3607 3608 iounmap(hw->reg); 3609 enetc_kfree_si(si); 3610 pci_release_mem_regions(pdev); 3611 pci_disable_device(pdev); 3612 } 3613 EXPORT_SYMBOL_GPL(enetc_pci_remove); 3614 3615 static const struct enetc_drvdata enetc_pf_data = { 3616 .sysclk_freq = ENETC_CLK_400M, 3617 .pmac_offset = ENETC_PMAC_OFFSET, 3618 .max_frags = ENETC_MAX_SKB_FRAGS, 3619 .eth_ops = &enetc_pf_ethtool_ops, 3620 }; 3621 3622 static const struct enetc_drvdata enetc4_pf_data = { 3623 .sysclk_freq = ENETC_CLK_333M, 3624 .tx_csum = true, 3625 .max_frags = ENETC4_MAX_SKB_FRAGS, 3626 .pmac_offset = ENETC4_PMAC_OFFSET, 3627 .eth_ops = &enetc4_pf_ethtool_ops, 3628 }; 3629 3630 static const struct enetc_drvdata enetc_vf_data = { 3631 .sysclk_freq = ENETC_CLK_400M, 3632 .max_frags = ENETC_MAX_SKB_FRAGS, 3633 .eth_ops = &enetc_vf_ethtool_ops, 3634 }; 3635 3636 static const struct enetc_platform_info enetc_info[] = { 3637 { .revision = ENETC_REV_1_0, 3638 .dev_id = ENETC_DEV_ID_PF, 3639 .data = &enetc_pf_data, 3640 }, 3641 { .revision = ENETC_REV_4_1, 3642 .dev_id = NXP_ENETC_PF_DEV_ID, 3643 .data = &enetc4_pf_data, 3644 }, 3645 { .revision = ENETC_REV_1_0, 3646 .dev_id = ENETC_DEV_ID_VF, 3647 .data = &enetc_vf_data, 3648 }, 3649 }; 3650 3651 int enetc_get_driver_data(struct enetc_si *si) 3652 { 3653 u16 dev_id = si->pdev->device; 3654 int i; 3655 3656 for (i = 0; i < ARRAY_SIZE(enetc_info); i++) { 3657 if (si->revision == enetc_info[i].revision && 3658 dev_id == enetc_info[i].dev_id) { 3659 si->drvdata = enetc_info[i].data; 3660 3661 return 0; 3662 } 3663 } 3664 3665 return -ERANGE; 3666 } 3667 EXPORT_SYMBOL_GPL(enetc_get_driver_data); 3668 3669 MODULE_DESCRIPTION("NXP ENETC Ethernet driver"); 3670 MODULE_LICENSE("Dual BSD/GPL"); 3671