1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
3
4 #include "enetc.h"
5 #include <linux/bpf_trace.h>
6 #include <linux/clk.h>
7 #include <linux/tcp.h>
8 #include <linux/udp.h>
9 #include <linux/vmalloc.h>
10 #include <linux/ptp_classify.h>
11 #include <net/ip6_checksum.h>
12 #include <net/pkt_sched.h>
13 #include <net/tso.h>
14
enetc_port_mac_rd(struct enetc_si * si,u32 reg)15 u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg)
16 {
17 return enetc_port_rd(&si->hw, reg);
18 }
19 EXPORT_SYMBOL_GPL(enetc_port_mac_rd);
20
enetc_port_mac_wr(struct enetc_si * si,u32 reg,u32 val)21 void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val)
22 {
23 enetc_port_wr(&si->hw, reg, val);
24 if (si->hw_features & ENETC_SI_F_QBU)
25 enetc_port_wr(&si->hw, reg + si->drvdata->pmac_offset, val);
26 }
27 EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
28
enetc_change_preemptible_tcs(struct enetc_ndev_priv * priv,u8 preemptible_tcs)29 static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv,
30 u8 preemptible_tcs)
31 {
32 if (!(priv->si->hw_features & ENETC_SI_F_QBU))
33 return;
34
35 priv->preemptible_tcs = preemptible_tcs;
36 enetc_mm_commit_preemptible_tcs(priv);
37 }
38
enetc_num_stack_tx_queues(struct enetc_ndev_priv * priv)39 static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
40 {
41 int num_tx_rings = priv->num_tx_rings;
42
43 if (priv->xdp_prog)
44 return num_tx_rings - num_possible_cpus();
45
46 return num_tx_rings;
47 }
48
enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv * priv,struct enetc_bdr * tx_ring)49 static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv,
50 struct enetc_bdr *tx_ring)
51 {
52 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring;
53
54 return priv->rx_ring[index];
55 }
56
enetc_tx_swbd_get_skb(struct enetc_tx_swbd * tx_swbd)57 static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd)
58 {
59 if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect)
60 return NULL;
61
62 return tx_swbd->skb;
63 }
64
65 static struct xdp_frame *
enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd * tx_swbd)66 enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd)
67 {
68 if (tx_swbd->is_xdp_redirect)
69 return tx_swbd->xdp_frame;
70
71 return NULL;
72 }
73
enetc_unmap_tx_buff(struct enetc_bdr * tx_ring,struct enetc_tx_swbd * tx_swbd)74 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
75 struct enetc_tx_swbd *tx_swbd)
76 {
77 /* For XDP_TX, pages come from RX, whereas for the other contexts where
78 * we have is_dma_page_set, those come from skb_frag_dma_map. We need
79 * to match the DMA mapping length, so we need to differentiate those.
80 */
81 if (tx_swbd->is_dma_page)
82 dma_unmap_page(tx_ring->dev, tx_swbd->dma,
83 tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len,
84 tx_swbd->dir);
85 else
86 dma_unmap_single(tx_ring->dev, tx_swbd->dma,
87 tx_swbd->len, tx_swbd->dir);
88 tx_swbd->dma = 0;
89 }
90
enetc_free_tx_frame(struct enetc_bdr * tx_ring,struct enetc_tx_swbd * tx_swbd)91 static void enetc_free_tx_frame(struct enetc_bdr *tx_ring,
92 struct enetc_tx_swbd *tx_swbd)
93 {
94 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
95 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
96
97 if (tx_swbd->dma)
98 enetc_unmap_tx_buff(tx_ring, tx_swbd);
99
100 if (xdp_frame) {
101 xdp_return_frame(tx_swbd->xdp_frame);
102 tx_swbd->xdp_frame = NULL;
103 } else if (skb) {
104 dev_kfree_skb_any(skb);
105 tx_swbd->skb = NULL;
106 }
107 }
108
109 /* Let H/W know BD ring has been updated */
enetc_update_tx_ring_tail(struct enetc_bdr * tx_ring)110 static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring)
111 {
112 /* includes wmb() */
113 enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use);
114 }
115
enetc_ptp_parse(struct sk_buff * skb,u8 * udp,u8 * msgtype,u8 * twostep,u16 * correction_offset,u16 * body_offset)116 static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
117 u8 *msgtype, u8 *twostep,
118 u16 *correction_offset, u16 *body_offset)
119 {
120 unsigned int ptp_class;
121 struct ptp_header *hdr;
122 unsigned int type;
123 u8 *base;
124
125 ptp_class = ptp_classify_raw(skb);
126 if (ptp_class == PTP_CLASS_NONE)
127 return -EINVAL;
128
129 hdr = ptp_parse_header(skb, ptp_class);
130 if (!hdr)
131 return -EINVAL;
132
133 type = ptp_class & PTP_CLASS_PMASK;
134 if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6)
135 *udp = 1;
136 else
137 *udp = 0;
138
139 *msgtype = ptp_get_msgtype(hdr, ptp_class);
140 *twostep = hdr->flag_field[0] & 0x2;
141
142 base = skb_mac_header(skb);
143 *correction_offset = (u8 *)&hdr->correction - base;
144 *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
145
146 return 0;
147 }
148
enetc_map_tx_buffs(struct enetc_bdr * tx_ring,struct sk_buff * skb)149 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
150 {
151 bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
152 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
153 struct enetc_hw *hw = &priv->si->hw;
154 struct enetc_tx_swbd *tx_swbd;
155 int len = skb_headlen(skb);
156 union enetc_tx_bd temp_bd;
157 u8 msgtype, twostep, udp;
158 union enetc_tx_bd *txbd;
159 u16 offset1, offset2;
160 int i, count = 0;
161 skb_frag_t *frag;
162 unsigned int f;
163 dma_addr_t dma;
164 u8 flags = 0;
165
166 i = tx_ring->next_to_use;
167 txbd = ENETC_TXBD(*tx_ring, i);
168 prefetchw(txbd);
169
170 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
171 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
172 goto dma_err;
173
174 temp_bd.addr = cpu_to_le64(dma);
175 temp_bd.buf_len = cpu_to_le16(len);
176 temp_bd.lstatus = 0;
177
178 tx_swbd = &tx_ring->tx_swbd[i];
179 tx_swbd->dma = dma;
180 tx_swbd->len = len;
181 tx_swbd->is_dma_page = 0;
182 tx_swbd->dir = DMA_TO_DEVICE;
183 count++;
184
185 do_vlan = skb_vlan_tag_present(skb);
186 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
187 if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1,
188 &offset2) ||
189 msgtype != PTP_MSGTYPE_SYNC || twostep)
190 WARN_ONCE(1, "Bad packet for one-step timestamping\n");
191 else
192 do_onestep_tstamp = true;
193 } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) {
194 do_twostep_tstamp = true;
195 }
196
197 tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
198 tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV);
199 tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en;
200
201 if (do_vlan || do_onestep_tstamp || do_twostep_tstamp)
202 flags |= ENETC_TXBD_FLAGS_EX;
203
204 if (tx_ring->tsd_enable)
205 flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
206
207 /* first BD needs frm_len and offload flags set */
208 temp_bd.frm_len = cpu_to_le16(skb->len);
209 temp_bd.flags = flags;
210
211 if (flags & ENETC_TXBD_FLAGS_TSE)
212 temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns,
213 flags);
214
215 if (flags & ENETC_TXBD_FLAGS_EX) {
216 u8 e_flags = 0;
217 *txbd = temp_bd;
218 enetc_clear_tx_bd(&temp_bd);
219
220 /* add extension BD for VLAN and/or timestamping */
221 flags = 0;
222 tx_swbd++;
223 txbd++;
224 i++;
225 if (unlikely(i == tx_ring->bd_count)) {
226 i = 0;
227 tx_swbd = tx_ring->tx_swbd;
228 txbd = ENETC_TXBD(*tx_ring, 0);
229 }
230 prefetchw(txbd);
231
232 if (do_vlan) {
233 temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
234 temp_bd.ext.tpid = 0; /* < C-TAG */
235 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
236 }
237
238 if (do_onestep_tstamp) {
239 u32 lo, hi, val;
240 u64 sec, nsec;
241 u8 *data;
242
243 lo = enetc_rd_hot(hw, ENETC_SICTR0);
244 hi = enetc_rd_hot(hw, ENETC_SICTR1);
245 sec = (u64)hi << 32 | lo;
246 nsec = do_div(sec, 1000000000);
247
248 /* Configure extension BD */
249 temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff);
250 e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP;
251
252 /* Update originTimestamp field of Sync packet
253 * - 48 bits seconds field
254 * - 32 bits nanseconds field
255 */
256 data = skb_mac_header(skb);
257 *(__be16 *)(data + offset2) =
258 htons((sec >> 32) & 0xffff);
259 *(__be32 *)(data + offset2 + 2) =
260 htonl(sec & 0xffffffff);
261 *(__be32 *)(data + offset2 + 6) = htonl(nsec);
262
263 /* Configure single-step register */
264 val = ENETC_PM0_SINGLE_STEP_EN;
265 val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1);
266 if (udp)
267 val |= ENETC_PM0_SINGLE_STEP_CH;
268
269 enetc_port_mac_wr(priv->si, ENETC_PM0_SINGLE_STEP,
270 val);
271 } else if (do_twostep_tstamp) {
272 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
273 e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
274 }
275
276 temp_bd.ext.e_flags = e_flags;
277 count++;
278 }
279
280 frag = &skb_shinfo(skb)->frags[0];
281 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
282 len = skb_frag_size(frag);
283 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
284 DMA_TO_DEVICE);
285 if (dma_mapping_error(tx_ring->dev, dma))
286 goto dma_err;
287
288 *txbd = temp_bd;
289 enetc_clear_tx_bd(&temp_bd);
290
291 flags = 0;
292 tx_swbd++;
293 txbd++;
294 i++;
295 if (unlikely(i == tx_ring->bd_count)) {
296 i = 0;
297 tx_swbd = tx_ring->tx_swbd;
298 txbd = ENETC_TXBD(*tx_ring, 0);
299 }
300 prefetchw(txbd);
301
302 temp_bd.addr = cpu_to_le64(dma);
303 temp_bd.buf_len = cpu_to_le16(len);
304
305 tx_swbd->dma = dma;
306 tx_swbd->len = len;
307 tx_swbd->is_dma_page = 1;
308 tx_swbd->dir = DMA_TO_DEVICE;
309 count++;
310 }
311
312 /* last BD needs 'F' bit set */
313 flags |= ENETC_TXBD_FLAGS_F;
314 temp_bd.flags = flags;
315 *txbd = temp_bd;
316
317 tx_ring->tx_swbd[i].is_eof = true;
318 tx_ring->tx_swbd[i].skb = skb;
319
320 enetc_bdr_idx_inc(tx_ring, &i);
321 tx_ring->next_to_use = i;
322
323 skb_tx_timestamp(skb);
324
325 enetc_update_tx_ring_tail(tx_ring);
326
327 return count;
328
329 dma_err:
330 dev_err(tx_ring->dev, "DMA map error");
331
332 do {
333 tx_swbd = &tx_ring->tx_swbd[i];
334 enetc_free_tx_frame(tx_ring, tx_swbd);
335 if (i == 0)
336 i = tx_ring->bd_count;
337 i--;
338 } while (count--);
339
340 return 0;
341 }
342
enetc_map_tx_tso_hdr(struct enetc_bdr * tx_ring,struct sk_buff * skb,struct enetc_tx_swbd * tx_swbd,union enetc_tx_bd * txbd,int * i,int hdr_len,int data_len)343 static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
344 struct enetc_tx_swbd *tx_swbd,
345 union enetc_tx_bd *txbd, int *i, int hdr_len,
346 int data_len)
347 {
348 union enetc_tx_bd txbd_tmp;
349 u8 flags = 0, e_flags = 0;
350 dma_addr_t addr;
351
352 enetc_clear_tx_bd(&txbd_tmp);
353 addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
354
355 if (skb_vlan_tag_present(skb))
356 flags |= ENETC_TXBD_FLAGS_EX;
357
358 txbd_tmp.addr = cpu_to_le64(addr);
359 txbd_tmp.buf_len = cpu_to_le16(hdr_len);
360
361 /* first BD needs frm_len and offload flags set */
362 txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len);
363 txbd_tmp.flags = flags;
364
365 /* For the TSO header we do not set the dma address since we do not
366 * want it unmapped when we do cleanup. We still set len so that we
367 * count the bytes sent.
368 */
369 tx_swbd->len = hdr_len;
370 tx_swbd->do_twostep_tstamp = false;
371 tx_swbd->check_wb = false;
372
373 /* Actually write the header in the BD */
374 *txbd = txbd_tmp;
375
376 /* Add extension BD for VLAN */
377 if (flags & ENETC_TXBD_FLAGS_EX) {
378 /* Get the next BD */
379 enetc_bdr_idx_inc(tx_ring, i);
380 txbd = ENETC_TXBD(*tx_ring, *i);
381 tx_swbd = &tx_ring->tx_swbd[*i];
382 prefetchw(txbd);
383
384 /* Setup the VLAN fields */
385 enetc_clear_tx_bd(&txbd_tmp);
386 txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
387 txbd_tmp.ext.tpid = 0; /* < C-TAG */
388 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
389
390 /* Write the BD */
391 txbd_tmp.ext.e_flags = e_flags;
392 *txbd = txbd_tmp;
393 }
394 }
395
enetc_map_tx_tso_data(struct enetc_bdr * tx_ring,struct sk_buff * skb,struct enetc_tx_swbd * tx_swbd,union enetc_tx_bd * txbd,char * data,int size,bool last_bd)396 static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
397 struct enetc_tx_swbd *tx_swbd,
398 union enetc_tx_bd *txbd, char *data,
399 int size, bool last_bd)
400 {
401 union enetc_tx_bd txbd_tmp;
402 dma_addr_t addr;
403 u8 flags = 0;
404
405 enetc_clear_tx_bd(&txbd_tmp);
406
407 addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
408 if (unlikely(dma_mapping_error(tx_ring->dev, addr))) {
409 netdev_err(tx_ring->ndev, "DMA map error\n");
410 return -ENOMEM;
411 }
412
413 if (last_bd) {
414 flags |= ENETC_TXBD_FLAGS_F;
415 tx_swbd->is_eof = 1;
416 }
417
418 txbd_tmp.addr = cpu_to_le64(addr);
419 txbd_tmp.buf_len = cpu_to_le16(size);
420 txbd_tmp.flags = flags;
421
422 tx_swbd->dma = addr;
423 tx_swbd->len = size;
424 tx_swbd->dir = DMA_TO_DEVICE;
425
426 *txbd = txbd_tmp;
427
428 return 0;
429 }
430
enetc_tso_hdr_csum(struct tso_t * tso,struct sk_buff * skb,char * hdr,int hdr_len,int * l4_hdr_len)431 static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb,
432 char *hdr, int hdr_len, int *l4_hdr_len)
433 {
434 char *l4_hdr = hdr + skb_transport_offset(skb);
435 int mac_hdr_len = skb_network_offset(skb);
436
437 if (tso->tlen != sizeof(struct udphdr)) {
438 struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
439
440 tcph->check = 0;
441 } else {
442 struct udphdr *udph = (struct udphdr *)(l4_hdr);
443
444 udph->check = 0;
445 }
446
447 /* Compute the IP checksum. This is necessary since tso_build_hdr()
448 * already incremented the IP ID field.
449 */
450 if (!tso->ipv6) {
451 struct iphdr *iph = (void *)(hdr + mac_hdr_len);
452
453 iph->check = 0;
454 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
455 }
456
457 /* Compute the checksum over the L4 header. */
458 *l4_hdr_len = hdr_len - skb_transport_offset(skb);
459 return csum_partial(l4_hdr, *l4_hdr_len, 0);
460 }
461
enetc_tso_complete_csum(struct enetc_bdr * tx_ring,struct tso_t * tso,struct sk_buff * skb,char * hdr,int len,__wsum sum)462 static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso,
463 struct sk_buff *skb, char *hdr, int len,
464 __wsum sum)
465 {
466 char *l4_hdr = hdr + skb_transport_offset(skb);
467 __sum16 csum_final;
468
469 /* Complete the L4 checksum by appending the pseudo-header to the
470 * already computed checksum.
471 */
472 if (!tso->ipv6)
473 csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr,
474 ip_hdr(skb)->daddr,
475 len, ip_hdr(skb)->protocol, sum);
476 else
477 csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
478 &ipv6_hdr(skb)->daddr,
479 len, ipv6_hdr(skb)->nexthdr, sum);
480
481 if (tso->tlen != sizeof(struct udphdr)) {
482 struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
483
484 tcph->check = csum_final;
485 } else {
486 struct udphdr *udph = (struct udphdr *)(l4_hdr);
487
488 udph->check = csum_final;
489 }
490 }
491
enetc_map_tx_tso_buffs(struct enetc_bdr * tx_ring,struct sk_buff * skb)492 static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
493 {
494 int hdr_len, total_len, data_len;
495 struct enetc_tx_swbd *tx_swbd;
496 union enetc_tx_bd *txbd;
497 struct tso_t tso;
498 __wsum csum, csum2;
499 int count = 0, pos;
500 int err, i, bd_data_num;
501
502 /* Initialize the TSO handler, and prepare the first payload */
503 hdr_len = tso_start(skb, &tso);
504 total_len = skb->len - hdr_len;
505 i = tx_ring->next_to_use;
506
507 while (total_len > 0) {
508 char *hdr;
509
510 /* Get the BD */
511 txbd = ENETC_TXBD(*tx_ring, i);
512 tx_swbd = &tx_ring->tx_swbd[i];
513 prefetchw(txbd);
514
515 /* Determine the length of this packet */
516 data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len);
517 total_len -= data_len;
518
519 /* prepare packet headers: MAC + IP + TCP */
520 hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE;
521 tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0);
522
523 /* compute the csum over the L4 header */
524 csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
525 enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
526 bd_data_num = 0;
527 count++;
528
529 while (data_len > 0) {
530 int size;
531
532 size = min_t(int, tso.size, data_len);
533
534 /* Advance the index in the BDR */
535 enetc_bdr_idx_inc(tx_ring, &i);
536 txbd = ENETC_TXBD(*tx_ring, i);
537 tx_swbd = &tx_ring->tx_swbd[i];
538 prefetchw(txbd);
539
540 /* Compute the checksum over this segment of data and
541 * add it to the csum already computed (over the L4
542 * header and possible other data segments).
543 */
544 csum2 = csum_partial(tso.data, size, 0);
545 csum = csum_block_add(csum, csum2, pos);
546 pos += size;
547
548 err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
549 tso.data, size,
550 size == data_len);
551 if (err)
552 goto err_map_data;
553
554 data_len -= size;
555 count++;
556 bd_data_num++;
557 tso_build_data(skb, &tso, size);
558
559 if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
560 goto err_chained_bd;
561 }
562
563 enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum);
564
565 if (total_len == 0)
566 tx_swbd->skb = skb;
567
568 /* Go to the next BD */
569 enetc_bdr_idx_inc(tx_ring, &i);
570 }
571
572 tx_ring->next_to_use = i;
573 enetc_update_tx_ring_tail(tx_ring);
574
575 return count;
576
577 err_map_data:
578 dev_err(tx_ring->dev, "DMA map error");
579
580 err_chained_bd:
581 do {
582 tx_swbd = &tx_ring->tx_swbd[i];
583 enetc_free_tx_frame(tx_ring, tx_swbd);
584 if (i == 0)
585 i = tx_ring->bd_count;
586 i--;
587 } while (count--);
588
589 return 0;
590 }
591
enetc_start_xmit(struct sk_buff * skb,struct net_device * ndev)592 static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
593 struct net_device *ndev)
594 {
595 struct enetc_ndev_priv *priv = netdev_priv(ndev);
596 struct enetc_bdr *tx_ring;
597 int count, err;
598
599 /* Queue one-step Sync packet if already locked */
600 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
601 if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS,
602 &priv->flags)) {
603 skb_queue_tail(&priv->tx_skbs, skb);
604 return NETDEV_TX_OK;
605 }
606 }
607
608 tx_ring = priv->tx_ring[skb->queue_mapping];
609
610 if (skb_is_gso(skb)) {
611 if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
612 netif_stop_subqueue(ndev, tx_ring->index);
613 return NETDEV_TX_BUSY;
614 }
615
616 enetc_lock_mdio();
617 count = enetc_map_tx_tso_buffs(tx_ring, skb);
618 enetc_unlock_mdio();
619 } else {
620 if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
621 if (unlikely(skb_linearize(skb)))
622 goto drop_packet_err;
623
624 count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
625 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
626 netif_stop_subqueue(ndev, tx_ring->index);
627 return NETDEV_TX_BUSY;
628 }
629
630 if (skb->ip_summed == CHECKSUM_PARTIAL) {
631 err = skb_checksum_help(skb);
632 if (err)
633 goto drop_packet_err;
634 }
635 enetc_lock_mdio();
636 count = enetc_map_tx_buffs(tx_ring, skb);
637 enetc_unlock_mdio();
638 }
639
640 if (unlikely(!count))
641 goto drop_packet_err;
642
643 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
644 netif_stop_subqueue(ndev, tx_ring->index);
645
646 return NETDEV_TX_OK;
647
648 drop_packet_err:
649 dev_kfree_skb_any(skb);
650 return NETDEV_TX_OK;
651 }
652
enetc_xmit(struct sk_buff * skb,struct net_device * ndev)653 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
654 {
655 struct enetc_ndev_priv *priv = netdev_priv(ndev);
656 u8 udp, msgtype, twostep;
657 u16 offset1, offset2;
658
659 /* Mark tx timestamp type on skb->cb[0] if requires */
660 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
661 (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) {
662 skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
663 } else {
664 skb->cb[0] = 0;
665 }
666
667 /* Fall back to two-step timestamp if not one-step Sync packet */
668 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
669 if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep,
670 &offset1, &offset2) ||
671 msgtype != PTP_MSGTYPE_SYNC || twostep != 0)
672 skb->cb[0] = ENETC_F_TX_TSTAMP;
673 }
674
675 return enetc_start_xmit(skb, ndev);
676 }
677 EXPORT_SYMBOL_GPL(enetc_xmit);
678
enetc_msix(int irq,void * data)679 static irqreturn_t enetc_msix(int irq, void *data)
680 {
681 struct enetc_int_vector *v = data;
682 int i;
683
684 enetc_lock_mdio();
685
686 /* disable interrupts */
687 enetc_wr_reg_hot(v->rbier, 0);
688 enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
689
690 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
691 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
692
693 enetc_unlock_mdio();
694
695 napi_schedule(&v->napi);
696
697 return IRQ_HANDLED;
698 }
699
enetc_rx_dim_work(struct work_struct * w)700 static void enetc_rx_dim_work(struct work_struct *w)
701 {
702 struct dim *dim = container_of(w, struct dim, work);
703 struct dim_cq_moder moder =
704 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
705 struct enetc_int_vector *v =
706 container_of(dim, struct enetc_int_vector, rx_dim);
707 struct enetc_ndev_priv *priv = netdev_priv(v->rx_ring.ndev);
708
709 v->rx_ictt = enetc_usecs_to_cycles(moder.usec, priv->sysclk_freq);
710 dim->state = DIM_START_MEASURE;
711 }
712
enetc_rx_net_dim(struct enetc_int_vector * v)713 static void enetc_rx_net_dim(struct enetc_int_vector *v)
714 {
715 struct dim_sample dim_sample = {};
716
717 v->comp_cnt++;
718
719 if (!v->rx_napi_work)
720 return;
721
722 dim_update_sample(v->comp_cnt,
723 v->rx_ring.stats.packets,
724 v->rx_ring.stats.bytes,
725 &dim_sample);
726 net_dim(&v->rx_dim, &dim_sample);
727 }
728
enetc_bd_ready_count(struct enetc_bdr * tx_ring,int ci)729 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
730 {
731 int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
732
733 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
734 }
735
enetc_page_reusable(struct page * page)736 static bool enetc_page_reusable(struct page *page)
737 {
738 return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
739 }
740
enetc_reuse_page(struct enetc_bdr * rx_ring,struct enetc_rx_swbd * old)741 static void enetc_reuse_page(struct enetc_bdr *rx_ring,
742 struct enetc_rx_swbd *old)
743 {
744 struct enetc_rx_swbd *new;
745
746 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
747
748 /* next buf that may reuse a page */
749 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
750
751 /* copy page reference */
752 *new = *old;
753 }
754
enetc_get_tx_tstamp(struct enetc_hw * hw,union enetc_tx_bd * txbd,u64 * tstamp)755 static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
756 u64 *tstamp)
757 {
758 u32 lo, hi, tstamp_lo;
759
760 lo = enetc_rd_hot(hw, ENETC_SICTR0);
761 hi = enetc_rd_hot(hw, ENETC_SICTR1);
762 tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
763 if (lo <= tstamp_lo)
764 hi -= 1;
765 *tstamp = (u64)hi << 32 | tstamp_lo;
766 }
767
enetc_tstamp_tx(struct sk_buff * skb,u64 tstamp)768 static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
769 {
770 struct skb_shared_hwtstamps shhwtstamps;
771
772 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
773 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
774 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
775 skb_txtime_consumed(skb);
776 skb_tstamp_tx(skb, &shhwtstamps);
777 }
778 }
779
enetc_recycle_xdp_tx_buff(struct enetc_bdr * tx_ring,struct enetc_tx_swbd * tx_swbd)780 static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring,
781 struct enetc_tx_swbd *tx_swbd)
782 {
783 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
784 struct enetc_rx_swbd rx_swbd = {
785 .dma = tx_swbd->dma,
786 .page = tx_swbd->page,
787 .page_offset = tx_swbd->page_offset,
788 .dir = tx_swbd->dir,
789 .len = tx_swbd->len,
790 };
791 struct enetc_bdr *rx_ring;
792
793 rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring);
794
795 if (likely(enetc_swbd_unused(rx_ring))) {
796 enetc_reuse_page(rx_ring, &rx_swbd);
797
798 /* sync for use by the device */
799 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma,
800 rx_swbd.page_offset,
801 ENETC_RXB_DMA_SIZE_XDP,
802 rx_swbd.dir);
803
804 rx_ring->stats.recycles++;
805 } else {
806 /* RX ring is already full, we need to unmap and free the
807 * page, since there's nothing useful we can do with it.
808 */
809 rx_ring->stats.recycle_failures++;
810
811 dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE,
812 rx_swbd.dir);
813 __free_page(rx_swbd.page);
814 }
815
816 rx_ring->xdp.xdp_tx_in_flight--;
817 }
818
enetc_clean_tx_ring(struct enetc_bdr * tx_ring,int napi_budget)819 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
820 {
821 int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0;
822 struct net_device *ndev = tx_ring->ndev;
823 struct enetc_ndev_priv *priv = netdev_priv(ndev);
824 struct enetc_tx_swbd *tx_swbd;
825 int i, bds_to_clean;
826 bool do_twostep_tstamp;
827 u64 tstamp = 0;
828
829 i = tx_ring->next_to_clean;
830 tx_swbd = &tx_ring->tx_swbd[i];
831
832 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
833
834 do_twostep_tstamp = false;
835
836 while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
837 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
838 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
839 bool is_eof = tx_swbd->is_eof;
840
841 if (unlikely(tx_swbd->check_wb)) {
842 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
843
844 if (txbd->flags & ENETC_TXBD_FLAGS_W &&
845 tx_swbd->do_twostep_tstamp) {
846 enetc_get_tx_tstamp(&priv->si->hw, txbd,
847 &tstamp);
848 do_twostep_tstamp = true;
849 }
850
851 if (tx_swbd->qbv_en &&
852 txbd->wb.status & ENETC_TXBD_STATS_WIN)
853 tx_win_drop++;
854 }
855
856 if (tx_swbd->is_xdp_tx)
857 enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd);
858 else if (likely(tx_swbd->dma))
859 enetc_unmap_tx_buff(tx_ring, tx_swbd);
860
861 if (xdp_frame) {
862 xdp_return_frame(xdp_frame);
863 } else if (skb) {
864 if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
865 /* Start work to release lock for next one-step
866 * timestamping packet. And send one skb in
867 * tx_skbs queue if has.
868 */
869 schedule_work(&priv->tx_onestep_tstamp);
870 } else if (unlikely(do_twostep_tstamp)) {
871 enetc_tstamp_tx(skb, tstamp);
872 do_twostep_tstamp = false;
873 }
874 napi_consume_skb(skb, napi_budget);
875 }
876
877 tx_byte_cnt += tx_swbd->len;
878 /* Scrub the swbd here so we don't have to do that
879 * when we reuse it during xmit
880 */
881 memset(tx_swbd, 0, sizeof(*tx_swbd));
882
883 bds_to_clean--;
884 tx_swbd++;
885 i++;
886 if (unlikely(i == tx_ring->bd_count)) {
887 i = 0;
888 tx_swbd = tx_ring->tx_swbd;
889 }
890
891 /* BD iteration loop end */
892 if (is_eof) {
893 tx_frm_cnt++;
894 /* re-arm interrupt source */
895 enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
896 BIT(16 + tx_ring->index));
897 }
898
899 if (unlikely(!bds_to_clean))
900 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
901 }
902
903 tx_ring->next_to_clean = i;
904 tx_ring->stats.packets += tx_frm_cnt;
905 tx_ring->stats.bytes += tx_byte_cnt;
906 tx_ring->stats.win_drop += tx_win_drop;
907
908 if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
909 __netif_subqueue_stopped(ndev, tx_ring->index) &&
910 !test_bit(ENETC_TX_DOWN, &priv->flags) &&
911 (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
912 netif_wake_subqueue(ndev, tx_ring->index);
913 }
914
915 return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
916 }
917
enetc_new_page(struct enetc_bdr * rx_ring,struct enetc_rx_swbd * rx_swbd)918 static bool enetc_new_page(struct enetc_bdr *rx_ring,
919 struct enetc_rx_swbd *rx_swbd)
920 {
921 bool xdp = !!(rx_ring->xdp.prog);
922 struct page *page;
923 dma_addr_t addr;
924
925 page = dev_alloc_page();
926 if (unlikely(!page))
927 return false;
928
929 /* For XDP_TX, we forgo dma_unmap -> dma_map */
930 rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
931
932 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir);
933 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
934 __free_page(page);
935
936 return false;
937 }
938
939 rx_swbd->dma = addr;
940 rx_swbd->page = page;
941 rx_swbd->page_offset = rx_ring->buffer_offset;
942
943 return true;
944 }
945
enetc_refill_rx_ring(struct enetc_bdr * rx_ring,const int buff_cnt)946 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
947 {
948 struct enetc_rx_swbd *rx_swbd;
949 union enetc_rx_bd *rxbd;
950 int i, j;
951
952 i = rx_ring->next_to_use;
953 rx_swbd = &rx_ring->rx_swbd[i];
954 rxbd = enetc_rxbd(rx_ring, i);
955
956 for (j = 0; j < buff_cnt; j++) {
957 /* try reuse page */
958 if (unlikely(!rx_swbd->page)) {
959 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
960 rx_ring->stats.rx_alloc_errs++;
961 break;
962 }
963 }
964
965 /* update RxBD */
966 rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
967 rx_swbd->page_offset);
968 /* clear 'R" as well */
969 rxbd->r.lstatus = 0;
970
971 enetc_rxbd_next(rx_ring, &rxbd, &i);
972 rx_swbd = &rx_ring->rx_swbd[i];
973 }
974
975 if (likely(j)) {
976 rx_ring->next_to_alloc = i; /* keep track from page reuse */
977 rx_ring->next_to_use = i;
978
979 /* update ENETC's consumer index */
980 enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
981 }
982
983 return j;
984 }
985
enetc_get_rx_tstamp(struct net_device * ndev,union enetc_rx_bd * rxbd,struct sk_buff * skb)986 static void enetc_get_rx_tstamp(struct net_device *ndev,
987 union enetc_rx_bd *rxbd,
988 struct sk_buff *skb)
989 {
990 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
991 struct enetc_ndev_priv *priv = netdev_priv(ndev);
992 struct enetc_hw *hw = &priv->si->hw;
993 u32 lo, hi, tstamp_lo;
994 u64 tstamp;
995
996 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
997 lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
998 hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
999 rxbd = enetc_rxbd_ext(rxbd);
1000 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
1001 if (lo <= tstamp_lo)
1002 hi -= 1;
1003
1004 tstamp = (u64)hi << 32 | tstamp_lo;
1005 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
1006 shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
1007 }
1008 }
1009
enetc_get_offloads(struct enetc_bdr * rx_ring,union enetc_rx_bd * rxbd,struct sk_buff * skb)1010 static void enetc_get_offloads(struct enetc_bdr *rx_ring,
1011 union enetc_rx_bd *rxbd, struct sk_buff *skb)
1012 {
1013 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
1014
1015 /* TODO: hashing */
1016 if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
1017 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
1018
1019 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
1020 skb->ip_summed = CHECKSUM_COMPLETE;
1021 }
1022
1023 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
1024 __be16 tpid = 0;
1025
1026 switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
1027 case 0:
1028 tpid = htons(ETH_P_8021Q);
1029 break;
1030 case 1:
1031 tpid = htons(ETH_P_8021AD);
1032 break;
1033 case 2:
1034 tpid = htons(enetc_port_rd(&priv->si->hw,
1035 ENETC_PCVLANR1));
1036 break;
1037 case 3:
1038 tpid = htons(enetc_port_rd(&priv->si->hw,
1039 ENETC_PCVLANR2));
1040 break;
1041 default:
1042 break;
1043 }
1044
1045 __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
1046 }
1047
1048 if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) &&
1049 (priv->active_offloads & ENETC_F_RX_TSTAMP))
1050 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
1051 }
1052
1053 /* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
1054 * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL
1055 * mapped buffers.
1056 */
enetc_get_rx_buff(struct enetc_bdr * rx_ring,int i,u16 size)1057 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
1058 int i, u16 size)
1059 {
1060 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
1061
1062 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
1063 rx_swbd->page_offset,
1064 size, rx_swbd->dir);
1065 return rx_swbd;
1066 }
1067
1068 /* Reuse the current page without performing half-page buffer flipping */
enetc_put_rx_buff(struct enetc_bdr * rx_ring,struct enetc_rx_swbd * rx_swbd)1069 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
1070 struct enetc_rx_swbd *rx_swbd)
1071 {
1072 size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset;
1073
1074 enetc_reuse_page(rx_ring, rx_swbd);
1075
1076 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
1077 rx_swbd->page_offset,
1078 buffer_size, rx_swbd->dir);
1079
1080 rx_swbd->page = NULL;
1081 }
1082
1083 /* Reuse the current page by performing half-page buffer flipping */
enetc_flip_rx_buff(struct enetc_bdr * rx_ring,struct enetc_rx_swbd * rx_swbd)1084 static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring,
1085 struct enetc_rx_swbd *rx_swbd)
1086 {
1087 if (likely(enetc_page_reusable(rx_swbd->page))) {
1088 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
1089 page_ref_inc(rx_swbd->page);
1090
1091 enetc_put_rx_buff(rx_ring, rx_swbd);
1092 } else {
1093 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
1094 rx_swbd->dir);
1095 rx_swbd->page = NULL;
1096 }
1097 }
1098
enetc_map_rx_buff_to_skb(struct enetc_bdr * rx_ring,int i,u16 size)1099 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
1100 int i, u16 size)
1101 {
1102 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1103 struct sk_buff *skb;
1104 void *ba;
1105
1106 ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
1107 skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE);
1108 if (unlikely(!skb)) {
1109 rx_ring->stats.rx_alloc_errs++;
1110 return NULL;
1111 }
1112
1113 skb_reserve(skb, rx_ring->buffer_offset);
1114 __skb_put(skb, size);
1115
1116 enetc_flip_rx_buff(rx_ring, rx_swbd);
1117
1118 return skb;
1119 }
1120
enetc_add_rx_buff_to_skb(struct enetc_bdr * rx_ring,int i,u16 size,struct sk_buff * skb)1121 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
1122 u16 size, struct sk_buff *skb)
1123 {
1124 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1125
1126 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
1127 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
1128
1129 enetc_flip_rx_buff(rx_ring, rx_swbd);
1130 }
1131
enetc_check_bd_errors_and_consume(struct enetc_bdr * rx_ring,u32 bd_status,union enetc_rx_bd ** rxbd,int * i)1132 static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring,
1133 u32 bd_status,
1134 union enetc_rx_bd **rxbd, int *i)
1135 {
1136 if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))))
1137 return false;
1138
1139 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
1140 enetc_rxbd_next(rx_ring, rxbd, i);
1141
1142 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
1143 dma_rmb();
1144 bd_status = le32_to_cpu((*rxbd)->r.lstatus);
1145
1146 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
1147 enetc_rxbd_next(rx_ring, rxbd, i);
1148 }
1149
1150 rx_ring->ndev->stats.rx_dropped++;
1151 rx_ring->ndev->stats.rx_errors++;
1152
1153 return true;
1154 }
1155
enetc_build_skb(struct enetc_bdr * rx_ring,u32 bd_status,union enetc_rx_bd ** rxbd,int * i,int * cleaned_cnt,int buffer_size)1156 static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
1157 u32 bd_status, union enetc_rx_bd **rxbd,
1158 int *i, int *cleaned_cnt, int buffer_size)
1159 {
1160 struct sk_buff *skb;
1161 u16 size;
1162
1163 size = le16_to_cpu((*rxbd)->r.buf_len);
1164 skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size);
1165 if (!skb)
1166 return NULL;
1167
1168 enetc_get_offloads(rx_ring, *rxbd, skb);
1169
1170 (*cleaned_cnt)++;
1171
1172 enetc_rxbd_next(rx_ring, rxbd, i);
1173
1174 /* not last BD in frame? */
1175 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
1176 bd_status = le32_to_cpu((*rxbd)->r.lstatus);
1177 size = buffer_size;
1178
1179 if (bd_status & ENETC_RXBD_LSTATUS_F) {
1180 dma_rmb();
1181 size = le16_to_cpu((*rxbd)->r.buf_len);
1182 }
1183
1184 enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb);
1185
1186 (*cleaned_cnt)++;
1187
1188 enetc_rxbd_next(rx_ring, rxbd, i);
1189 }
1190
1191 skb_record_rx_queue(skb, rx_ring->index);
1192 skb->protocol = eth_type_trans(skb, rx_ring->ndev);
1193
1194 return skb;
1195 }
1196
1197 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
1198
enetc_clean_rx_ring(struct enetc_bdr * rx_ring,struct napi_struct * napi,int work_limit)1199 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
1200 struct napi_struct *napi, int work_limit)
1201 {
1202 int rx_frm_cnt = 0, rx_byte_cnt = 0;
1203 int cleaned_cnt, i;
1204
1205 cleaned_cnt = enetc_bd_unused(rx_ring);
1206 /* next descriptor to process */
1207 i = rx_ring->next_to_clean;
1208
1209 while (likely(rx_frm_cnt < work_limit)) {
1210 union enetc_rx_bd *rxbd;
1211 struct sk_buff *skb;
1212 u32 bd_status;
1213
1214 if (cleaned_cnt >= ENETC_RXBD_BUNDLE)
1215 cleaned_cnt -= enetc_refill_rx_ring(rx_ring,
1216 cleaned_cnt);
1217
1218 rxbd = enetc_rxbd(rx_ring, i);
1219 bd_status = le32_to_cpu(rxbd->r.lstatus);
1220 if (!bd_status)
1221 break;
1222
1223 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
1224 dma_rmb(); /* for reading other rxbd fields */
1225
1226 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
1227 &rxbd, &i))
1228 break;
1229
1230 skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i,
1231 &cleaned_cnt, ENETC_RXB_DMA_SIZE);
1232 if (!skb)
1233 break;
1234
1235 /* When set, the outer VLAN header is extracted and reported
1236 * in the receive buffer descriptor. So rx_byte_cnt should
1237 * add the length of the extracted VLAN header.
1238 */
1239 if (bd_status & ENETC_RXBD_FLAG_VLAN)
1240 rx_byte_cnt += VLAN_HLEN;
1241 rx_byte_cnt += skb->len + ETH_HLEN;
1242 rx_frm_cnt++;
1243
1244 napi_gro_receive(napi, skb);
1245 }
1246
1247 rx_ring->next_to_clean = i;
1248
1249 rx_ring->stats.packets += rx_frm_cnt;
1250 rx_ring->stats.bytes += rx_byte_cnt;
1251
1252 return rx_frm_cnt;
1253 }
1254
enetc_xdp_map_tx_buff(struct enetc_bdr * tx_ring,int i,struct enetc_tx_swbd * tx_swbd,int frm_len)1255 static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i,
1256 struct enetc_tx_swbd *tx_swbd,
1257 int frm_len)
1258 {
1259 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
1260
1261 prefetchw(txbd);
1262
1263 enetc_clear_tx_bd(txbd);
1264 txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset);
1265 txbd->buf_len = cpu_to_le16(tx_swbd->len);
1266 txbd->frm_len = cpu_to_le16(frm_len);
1267
1268 memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd));
1269 }
1270
1271 /* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer
1272 * descriptors.
1273 */
enetc_xdp_tx(struct enetc_bdr * tx_ring,struct enetc_tx_swbd * xdp_tx_arr,int num_tx_swbd)1274 static bool enetc_xdp_tx(struct enetc_bdr *tx_ring,
1275 struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd)
1276 {
1277 struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr;
1278 int i, k, frm_len = tmp_tx_swbd->len;
1279
1280 if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd)))
1281 return false;
1282
1283 while (unlikely(!tmp_tx_swbd->is_eof)) {
1284 tmp_tx_swbd++;
1285 frm_len += tmp_tx_swbd->len;
1286 }
1287
1288 i = tx_ring->next_to_use;
1289
1290 for (k = 0; k < num_tx_swbd; k++) {
1291 struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k];
1292
1293 enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len);
1294
1295 /* last BD needs 'F' bit set */
1296 if (xdp_tx_swbd->is_eof) {
1297 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
1298
1299 txbd->flags = ENETC_TXBD_FLAGS_F;
1300 }
1301
1302 enetc_bdr_idx_inc(tx_ring, &i);
1303 }
1304
1305 tx_ring->next_to_use = i;
1306
1307 return true;
1308 }
1309
enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr * tx_ring,struct enetc_tx_swbd * xdp_tx_arr,struct xdp_frame * xdp_frame)1310 static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
1311 struct enetc_tx_swbd *xdp_tx_arr,
1312 struct xdp_frame *xdp_frame)
1313 {
1314 struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0];
1315 struct skb_shared_info *shinfo;
1316 void *data = xdp_frame->data;
1317 int len = xdp_frame->len;
1318 skb_frag_t *frag;
1319 dma_addr_t dma;
1320 unsigned int f;
1321 int n = 0;
1322
1323 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
1324 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
1325 netdev_err(tx_ring->ndev, "DMA map error\n");
1326 return -1;
1327 }
1328
1329 xdp_tx_swbd->dma = dma;
1330 xdp_tx_swbd->dir = DMA_TO_DEVICE;
1331 xdp_tx_swbd->len = len;
1332 xdp_tx_swbd->is_xdp_redirect = true;
1333 xdp_tx_swbd->is_eof = false;
1334 xdp_tx_swbd->xdp_frame = NULL;
1335
1336 n++;
1337
1338 if (!xdp_frame_has_frags(xdp_frame))
1339 goto out;
1340
1341 xdp_tx_swbd = &xdp_tx_arr[n];
1342
1343 shinfo = xdp_get_shared_info_from_frame(xdp_frame);
1344
1345 for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags;
1346 f++, frag++) {
1347 data = skb_frag_address(frag);
1348 len = skb_frag_size(frag);
1349
1350 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
1351 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
1352 /* Undo the DMA mapping for all fragments */
1353 while (--n >= 0)
1354 enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]);
1355
1356 netdev_err(tx_ring->ndev, "DMA map error\n");
1357 return -1;
1358 }
1359
1360 xdp_tx_swbd->dma = dma;
1361 xdp_tx_swbd->dir = DMA_TO_DEVICE;
1362 xdp_tx_swbd->len = len;
1363 xdp_tx_swbd->is_xdp_redirect = true;
1364 xdp_tx_swbd->is_eof = false;
1365 xdp_tx_swbd->xdp_frame = NULL;
1366
1367 n++;
1368 xdp_tx_swbd = &xdp_tx_arr[n];
1369 }
1370 out:
1371 xdp_tx_arr[n - 1].is_eof = true;
1372 xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
1373
1374 return n;
1375 }
1376
enetc_xdp_xmit(struct net_device * ndev,int num_frames,struct xdp_frame ** frames,u32 flags)1377 int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
1378 struct xdp_frame **frames, u32 flags)
1379 {
1380 struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0};
1381 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1382 struct enetc_bdr *tx_ring;
1383 int xdp_tx_bd_cnt, i, k;
1384 int xdp_tx_frm_cnt = 0;
1385
1386 if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags)))
1387 return -ENETDOWN;
1388
1389 enetc_lock_mdio();
1390
1391 tx_ring = priv->xdp_tx_ring[smp_processor_id()];
1392
1393 prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use));
1394
1395 for (k = 0; k < num_frames; k++) {
1396 xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring,
1397 xdp_redirect_arr,
1398 frames[k]);
1399 if (unlikely(xdp_tx_bd_cnt < 0))
1400 break;
1401
1402 if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr,
1403 xdp_tx_bd_cnt))) {
1404 for (i = 0; i < xdp_tx_bd_cnt; i++)
1405 enetc_unmap_tx_buff(tx_ring,
1406 &xdp_redirect_arr[i]);
1407 tx_ring->stats.xdp_tx_drops++;
1408 break;
1409 }
1410
1411 xdp_tx_frm_cnt++;
1412 }
1413
1414 if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt))
1415 enetc_update_tx_ring_tail(tx_ring);
1416
1417 tx_ring->stats.xdp_tx += xdp_tx_frm_cnt;
1418
1419 enetc_unlock_mdio();
1420
1421 return xdp_tx_frm_cnt;
1422 }
1423 EXPORT_SYMBOL_GPL(enetc_xdp_xmit);
1424
enetc_map_rx_buff_to_xdp(struct enetc_bdr * rx_ring,int i,struct xdp_buff * xdp_buff,u16 size)1425 static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
1426 struct xdp_buff *xdp_buff, u16 size)
1427 {
1428 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1429 void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset;
1430
1431 /* To be used for XDP_TX */
1432 rx_swbd->len = size;
1433
1434 xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset,
1435 rx_ring->buffer_offset, size, false);
1436 }
1437
enetc_add_rx_buff_to_xdp(struct enetc_bdr * rx_ring,int i,u16 size,struct xdp_buff * xdp_buff)1438 static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
1439 u16 size, struct xdp_buff *xdp_buff)
1440 {
1441 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff);
1442 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1443 skb_frag_t *frag;
1444
1445 /* To be used for XDP_TX */
1446 rx_swbd->len = size;
1447
1448 if (!xdp_buff_has_frags(xdp_buff)) {
1449 xdp_buff_set_frags_flag(xdp_buff);
1450 shinfo->xdp_frags_size = size;
1451 shinfo->nr_frags = 0;
1452 } else {
1453 shinfo->xdp_frags_size += size;
1454 }
1455
1456 if (page_is_pfmemalloc(rx_swbd->page))
1457 xdp_buff_set_frag_pfmemalloc(xdp_buff);
1458
1459 frag = &shinfo->frags[shinfo->nr_frags];
1460 skb_frag_fill_page_desc(frag, rx_swbd->page, rx_swbd->page_offset,
1461 size);
1462
1463 shinfo->nr_frags++;
1464 }
1465
enetc_build_xdp_buff(struct enetc_bdr * rx_ring,u32 bd_status,union enetc_rx_bd ** rxbd,int * i,int * cleaned_cnt,struct xdp_buff * xdp_buff)1466 static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status,
1467 union enetc_rx_bd **rxbd, int *i,
1468 int *cleaned_cnt, struct xdp_buff *xdp_buff)
1469 {
1470 u16 size = le16_to_cpu((*rxbd)->r.buf_len);
1471
1472 xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq);
1473
1474 enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size);
1475 (*cleaned_cnt)++;
1476 enetc_rxbd_next(rx_ring, rxbd, i);
1477
1478 /* not last BD in frame? */
1479 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
1480 bd_status = le32_to_cpu((*rxbd)->r.lstatus);
1481 size = ENETC_RXB_DMA_SIZE_XDP;
1482
1483 if (bd_status & ENETC_RXBD_LSTATUS_F) {
1484 dma_rmb();
1485 size = le16_to_cpu((*rxbd)->r.buf_len);
1486 }
1487
1488 enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff);
1489 (*cleaned_cnt)++;
1490 enetc_rxbd_next(rx_ring, rxbd, i);
1491 }
1492 }
1493
1494 /* Convert RX buffer descriptors to TX buffer descriptors. These will be
1495 * recycled back into the RX ring in enetc_clean_tx_ring.
1496 */
enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd * xdp_tx_arr,struct enetc_bdr * rx_ring,int rx_ring_first,int rx_ring_last)1497 static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr,
1498 struct enetc_bdr *rx_ring,
1499 int rx_ring_first, int rx_ring_last)
1500 {
1501 int n = 0;
1502
1503 for (; rx_ring_first != rx_ring_last;
1504 n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) {
1505 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
1506 struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n];
1507
1508 /* No need to dma_map, we already have DMA_BIDIRECTIONAL */
1509 tx_swbd->dma = rx_swbd->dma;
1510 tx_swbd->dir = rx_swbd->dir;
1511 tx_swbd->page = rx_swbd->page;
1512 tx_swbd->page_offset = rx_swbd->page_offset;
1513 tx_swbd->len = rx_swbd->len;
1514 tx_swbd->is_dma_page = true;
1515 tx_swbd->is_xdp_tx = true;
1516 tx_swbd->is_eof = false;
1517 }
1518
1519 /* We rely on caller providing an rx_ring_last > rx_ring_first */
1520 xdp_tx_arr[n - 1].is_eof = true;
1521
1522 return n;
1523 }
1524
enetc_xdp_drop(struct enetc_bdr * rx_ring,int rx_ring_first,int rx_ring_last)1525 static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
1526 int rx_ring_last)
1527 {
1528 while (rx_ring_first != rx_ring_last) {
1529 enetc_put_rx_buff(rx_ring,
1530 &rx_ring->rx_swbd[rx_ring_first]);
1531 enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
1532 }
1533 }
1534
enetc_clean_rx_ring_xdp(struct enetc_bdr * rx_ring,struct napi_struct * napi,int work_limit,struct bpf_prog * prog)1535 static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
1536 struct napi_struct *napi, int work_limit,
1537 struct bpf_prog *prog)
1538 {
1539 int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0;
1540 struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0};
1541 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
1542 int rx_frm_cnt = 0, rx_byte_cnt = 0;
1543 struct enetc_bdr *tx_ring;
1544 int cleaned_cnt, i;
1545 u32 xdp_act;
1546
1547 cleaned_cnt = enetc_bd_unused(rx_ring);
1548 /* next descriptor to process */
1549 i = rx_ring->next_to_clean;
1550
1551 while (likely(rx_frm_cnt < work_limit)) {
1552 union enetc_rx_bd *rxbd, *orig_rxbd;
1553 int orig_i, orig_cleaned_cnt;
1554 struct xdp_buff xdp_buff;
1555 struct sk_buff *skb;
1556 u32 bd_status;
1557 int err;
1558
1559 rxbd = enetc_rxbd(rx_ring, i);
1560 bd_status = le32_to_cpu(rxbd->r.lstatus);
1561 if (!bd_status)
1562 break;
1563
1564 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
1565 dma_rmb(); /* for reading other rxbd fields */
1566
1567 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
1568 &rxbd, &i))
1569 break;
1570
1571 orig_rxbd = rxbd;
1572 orig_cleaned_cnt = cleaned_cnt;
1573 orig_i = i;
1574
1575 enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
1576 &cleaned_cnt, &xdp_buff);
1577
1578 /* When set, the outer VLAN header is extracted and reported
1579 * in the receive buffer descriptor. So rx_byte_cnt should
1580 * add the length of the extracted VLAN header.
1581 */
1582 if (bd_status & ENETC_RXBD_FLAG_VLAN)
1583 rx_byte_cnt += VLAN_HLEN;
1584 rx_byte_cnt += xdp_get_buff_len(&xdp_buff);
1585
1586 xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
1587
1588 switch (xdp_act) {
1589 default:
1590 bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act);
1591 fallthrough;
1592 case XDP_ABORTED:
1593 trace_xdp_exception(rx_ring->ndev, prog, xdp_act);
1594 fallthrough;
1595 case XDP_DROP:
1596 enetc_xdp_drop(rx_ring, orig_i, i);
1597 rx_ring->stats.xdp_drops++;
1598 break;
1599 case XDP_PASS:
1600 rxbd = orig_rxbd;
1601 cleaned_cnt = orig_cleaned_cnt;
1602 i = orig_i;
1603
1604 skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
1605 &i, &cleaned_cnt,
1606 ENETC_RXB_DMA_SIZE_XDP);
1607 if (unlikely(!skb))
1608 goto out;
1609
1610 napi_gro_receive(napi, skb);
1611 break;
1612 case XDP_TX:
1613 tx_ring = priv->xdp_tx_ring[rx_ring->index];
1614 if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) {
1615 enetc_xdp_drop(rx_ring, orig_i, i);
1616 tx_ring->stats.xdp_tx_drops++;
1617 break;
1618 }
1619
1620 xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
1621 rx_ring,
1622 orig_i, i);
1623
1624 if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) {
1625 enetc_xdp_drop(rx_ring, orig_i, i);
1626 tx_ring->stats.xdp_tx_drops++;
1627 } else {
1628 tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
1629 rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
1630 xdp_tx_frm_cnt++;
1631 /* The XDP_TX enqueue was successful, so we
1632 * need to scrub the RX software BDs because
1633 * the ownership of the buffers no longer
1634 * belongs to the RX ring, and we must prevent
1635 * enetc_refill_rx_ring() from reusing
1636 * rx_swbd->page.
1637 */
1638 while (orig_i != i) {
1639 rx_ring->rx_swbd[orig_i].page = NULL;
1640 enetc_bdr_idx_inc(rx_ring, &orig_i);
1641 }
1642 }
1643 break;
1644 case XDP_REDIRECT:
1645 err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
1646 if (unlikely(err)) {
1647 enetc_xdp_drop(rx_ring, orig_i, i);
1648 rx_ring->stats.xdp_redirect_failures++;
1649 } else {
1650 while (orig_i != i) {
1651 enetc_flip_rx_buff(rx_ring,
1652 &rx_ring->rx_swbd[orig_i]);
1653 enetc_bdr_idx_inc(rx_ring, &orig_i);
1654 }
1655 xdp_redirect_frm_cnt++;
1656 rx_ring->stats.xdp_redirect++;
1657 }
1658 }
1659
1660 rx_frm_cnt++;
1661 }
1662
1663 out:
1664 rx_ring->next_to_clean = i;
1665
1666 rx_ring->stats.packets += rx_frm_cnt;
1667 rx_ring->stats.bytes += rx_byte_cnt;
1668
1669 if (xdp_redirect_frm_cnt)
1670 xdp_do_flush();
1671
1672 if (xdp_tx_frm_cnt)
1673 enetc_update_tx_ring_tail(tx_ring);
1674
1675 if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight)
1676 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
1677 rx_ring->xdp.xdp_tx_in_flight);
1678
1679 return rx_frm_cnt;
1680 }
1681
enetc_poll(struct napi_struct * napi,int budget)1682 static int enetc_poll(struct napi_struct *napi, int budget)
1683 {
1684 struct enetc_int_vector
1685 *v = container_of(napi, struct enetc_int_vector, napi);
1686 struct enetc_bdr *rx_ring = &v->rx_ring;
1687 struct bpf_prog *prog;
1688 bool complete = true;
1689 int work_done;
1690 int i;
1691
1692 enetc_lock_mdio();
1693
1694 for (i = 0; i < v->count_tx_rings; i++)
1695 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
1696 complete = false;
1697
1698 prog = rx_ring->xdp.prog;
1699 if (prog)
1700 work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog);
1701 else
1702 work_done = enetc_clean_rx_ring(rx_ring, napi, budget);
1703 if (work_done == budget)
1704 complete = false;
1705 if (work_done)
1706 v->rx_napi_work = true;
1707
1708 if (!complete) {
1709 enetc_unlock_mdio();
1710 return budget;
1711 }
1712
1713 napi_complete_done(napi, work_done);
1714
1715 if (likely(v->rx_dim_en))
1716 enetc_rx_net_dim(v);
1717
1718 v->rx_napi_work = false;
1719
1720 /* enable interrupts */
1721 enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
1722
1723 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
1724 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
1725 ENETC_TBIER_TXTIE);
1726
1727 enetc_unlock_mdio();
1728
1729 return work_done;
1730 }
1731
1732 /* Probing and Init */
1733 #define ENETC_MAX_RFS_SIZE 64
enetc_get_si_caps(struct enetc_si * si)1734 void enetc_get_si_caps(struct enetc_si *si)
1735 {
1736 struct enetc_hw *hw = &si->hw;
1737 u32 val;
1738
1739 /* find out how many of various resources we have to work with */
1740 val = enetc_rd(hw, ENETC_SICAPR0);
1741 si->num_rx_rings = (val >> 16) & 0xff;
1742 si->num_tx_rings = val & 0xff;
1743
1744 val = enetc_rd(hw, ENETC_SIPCAPR0);
1745 if (val & ENETC_SIPCAPR0_RFS) {
1746 val = enetc_rd(hw, ENETC_SIRFSCAPR);
1747 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
1748 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
1749 } else {
1750 /* ENETC which not supports RFS */
1751 si->num_fs_entries = 0;
1752 }
1753
1754 si->num_rss = 0;
1755 val = enetc_rd(hw, ENETC_SIPCAPR0);
1756 if (val & ENETC_SIPCAPR0_RSS) {
1757 u32 rss;
1758
1759 rss = enetc_rd(hw, ENETC_SIRSSCAPR);
1760 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
1761 }
1762 }
1763 EXPORT_SYMBOL_GPL(enetc_get_si_caps);
1764
enetc_dma_alloc_bdr(struct enetc_bdr_resource * res)1765 static int enetc_dma_alloc_bdr(struct enetc_bdr_resource *res)
1766 {
1767 size_t bd_base_size = res->bd_count * res->bd_size;
1768
1769 res->bd_base = dma_alloc_coherent(res->dev, bd_base_size,
1770 &res->bd_dma_base, GFP_KERNEL);
1771 if (!res->bd_base)
1772 return -ENOMEM;
1773
1774 /* h/w requires 128B alignment */
1775 if (!IS_ALIGNED(res->bd_dma_base, 128)) {
1776 dma_free_coherent(res->dev, bd_base_size, res->bd_base,
1777 res->bd_dma_base);
1778 return -EINVAL;
1779 }
1780
1781 return 0;
1782 }
1783
enetc_dma_free_bdr(const struct enetc_bdr_resource * res)1784 static void enetc_dma_free_bdr(const struct enetc_bdr_resource *res)
1785 {
1786 size_t bd_base_size = res->bd_count * res->bd_size;
1787
1788 dma_free_coherent(res->dev, bd_base_size, res->bd_base,
1789 res->bd_dma_base);
1790 }
1791
enetc_alloc_tx_resource(struct enetc_bdr_resource * res,struct device * dev,size_t bd_count)1792 static int enetc_alloc_tx_resource(struct enetc_bdr_resource *res,
1793 struct device *dev, size_t bd_count)
1794 {
1795 int err;
1796
1797 res->dev = dev;
1798 res->bd_count = bd_count;
1799 res->bd_size = sizeof(union enetc_tx_bd);
1800
1801 res->tx_swbd = vcalloc(bd_count, sizeof(*res->tx_swbd));
1802 if (!res->tx_swbd)
1803 return -ENOMEM;
1804
1805 err = enetc_dma_alloc_bdr(res);
1806 if (err)
1807 goto err_alloc_bdr;
1808
1809 res->tso_headers = dma_alloc_coherent(dev, bd_count * TSO_HEADER_SIZE,
1810 &res->tso_headers_dma,
1811 GFP_KERNEL);
1812 if (!res->tso_headers) {
1813 err = -ENOMEM;
1814 goto err_alloc_tso;
1815 }
1816
1817 return 0;
1818
1819 err_alloc_tso:
1820 enetc_dma_free_bdr(res);
1821 err_alloc_bdr:
1822 vfree(res->tx_swbd);
1823 res->tx_swbd = NULL;
1824
1825 return err;
1826 }
1827
enetc_free_tx_resource(const struct enetc_bdr_resource * res)1828 static void enetc_free_tx_resource(const struct enetc_bdr_resource *res)
1829 {
1830 dma_free_coherent(res->dev, res->bd_count * TSO_HEADER_SIZE,
1831 res->tso_headers, res->tso_headers_dma);
1832 enetc_dma_free_bdr(res);
1833 vfree(res->tx_swbd);
1834 }
1835
1836 static struct enetc_bdr_resource *
enetc_alloc_tx_resources(struct enetc_ndev_priv * priv)1837 enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
1838 {
1839 struct enetc_bdr_resource *tx_res;
1840 int i, err;
1841
1842 tx_res = kcalloc(priv->num_tx_rings, sizeof(*tx_res), GFP_KERNEL);
1843 if (!tx_res)
1844 return ERR_PTR(-ENOMEM);
1845
1846 for (i = 0; i < priv->num_tx_rings; i++) {
1847 struct enetc_bdr *tx_ring = priv->tx_ring[i];
1848
1849 err = enetc_alloc_tx_resource(&tx_res[i], tx_ring->dev,
1850 tx_ring->bd_count);
1851 if (err)
1852 goto fail;
1853 }
1854
1855 return tx_res;
1856
1857 fail:
1858 while (i-- > 0)
1859 enetc_free_tx_resource(&tx_res[i]);
1860
1861 kfree(tx_res);
1862
1863 return ERR_PTR(err);
1864 }
1865
enetc_free_tx_resources(const struct enetc_bdr_resource * tx_res,size_t num_resources)1866 static void enetc_free_tx_resources(const struct enetc_bdr_resource *tx_res,
1867 size_t num_resources)
1868 {
1869 size_t i;
1870
1871 for (i = 0; i < num_resources; i++)
1872 enetc_free_tx_resource(&tx_res[i]);
1873
1874 kfree(tx_res);
1875 }
1876
enetc_alloc_rx_resource(struct enetc_bdr_resource * res,struct device * dev,size_t bd_count,bool extended)1877 static int enetc_alloc_rx_resource(struct enetc_bdr_resource *res,
1878 struct device *dev, size_t bd_count,
1879 bool extended)
1880 {
1881 int err;
1882
1883 res->dev = dev;
1884 res->bd_count = bd_count;
1885 res->bd_size = sizeof(union enetc_rx_bd);
1886 if (extended)
1887 res->bd_size *= 2;
1888
1889 res->rx_swbd = vcalloc(bd_count, sizeof(struct enetc_rx_swbd));
1890 if (!res->rx_swbd)
1891 return -ENOMEM;
1892
1893 err = enetc_dma_alloc_bdr(res);
1894 if (err) {
1895 vfree(res->rx_swbd);
1896 return err;
1897 }
1898
1899 return 0;
1900 }
1901
enetc_free_rx_resource(const struct enetc_bdr_resource * res)1902 static void enetc_free_rx_resource(const struct enetc_bdr_resource *res)
1903 {
1904 enetc_dma_free_bdr(res);
1905 vfree(res->rx_swbd);
1906 }
1907
1908 static struct enetc_bdr_resource *
enetc_alloc_rx_resources(struct enetc_ndev_priv * priv,bool extended)1909 enetc_alloc_rx_resources(struct enetc_ndev_priv *priv, bool extended)
1910 {
1911 struct enetc_bdr_resource *rx_res;
1912 int i, err;
1913
1914 rx_res = kcalloc(priv->num_rx_rings, sizeof(*rx_res), GFP_KERNEL);
1915 if (!rx_res)
1916 return ERR_PTR(-ENOMEM);
1917
1918 for (i = 0; i < priv->num_rx_rings; i++) {
1919 struct enetc_bdr *rx_ring = priv->rx_ring[i];
1920
1921 err = enetc_alloc_rx_resource(&rx_res[i], rx_ring->dev,
1922 rx_ring->bd_count, extended);
1923 if (err)
1924 goto fail;
1925 }
1926
1927 return rx_res;
1928
1929 fail:
1930 while (i-- > 0)
1931 enetc_free_rx_resource(&rx_res[i]);
1932
1933 kfree(rx_res);
1934
1935 return ERR_PTR(err);
1936 }
1937
enetc_free_rx_resources(const struct enetc_bdr_resource * rx_res,size_t num_resources)1938 static void enetc_free_rx_resources(const struct enetc_bdr_resource *rx_res,
1939 size_t num_resources)
1940 {
1941 size_t i;
1942
1943 for (i = 0; i < num_resources; i++)
1944 enetc_free_rx_resource(&rx_res[i]);
1945
1946 kfree(rx_res);
1947 }
1948
enetc_assign_tx_resource(struct enetc_bdr * tx_ring,const struct enetc_bdr_resource * res)1949 static void enetc_assign_tx_resource(struct enetc_bdr *tx_ring,
1950 const struct enetc_bdr_resource *res)
1951 {
1952 tx_ring->bd_base = res ? res->bd_base : NULL;
1953 tx_ring->bd_dma_base = res ? res->bd_dma_base : 0;
1954 tx_ring->tx_swbd = res ? res->tx_swbd : NULL;
1955 tx_ring->tso_headers = res ? res->tso_headers : NULL;
1956 tx_ring->tso_headers_dma = res ? res->tso_headers_dma : 0;
1957 }
1958
enetc_assign_rx_resource(struct enetc_bdr * rx_ring,const struct enetc_bdr_resource * res)1959 static void enetc_assign_rx_resource(struct enetc_bdr *rx_ring,
1960 const struct enetc_bdr_resource *res)
1961 {
1962 rx_ring->bd_base = res ? res->bd_base : NULL;
1963 rx_ring->bd_dma_base = res ? res->bd_dma_base : 0;
1964 rx_ring->rx_swbd = res ? res->rx_swbd : NULL;
1965 }
1966
enetc_assign_tx_resources(struct enetc_ndev_priv * priv,const struct enetc_bdr_resource * res)1967 static void enetc_assign_tx_resources(struct enetc_ndev_priv *priv,
1968 const struct enetc_bdr_resource *res)
1969 {
1970 int i;
1971
1972 if (priv->tx_res)
1973 enetc_free_tx_resources(priv->tx_res, priv->num_tx_rings);
1974
1975 for (i = 0; i < priv->num_tx_rings; i++) {
1976 enetc_assign_tx_resource(priv->tx_ring[i],
1977 res ? &res[i] : NULL);
1978 }
1979
1980 priv->tx_res = res;
1981 }
1982
enetc_assign_rx_resources(struct enetc_ndev_priv * priv,const struct enetc_bdr_resource * res)1983 static void enetc_assign_rx_resources(struct enetc_ndev_priv *priv,
1984 const struct enetc_bdr_resource *res)
1985 {
1986 int i;
1987
1988 if (priv->rx_res)
1989 enetc_free_rx_resources(priv->rx_res, priv->num_rx_rings);
1990
1991 for (i = 0; i < priv->num_rx_rings; i++) {
1992 enetc_assign_rx_resource(priv->rx_ring[i],
1993 res ? &res[i] : NULL);
1994 }
1995
1996 priv->rx_res = res;
1997 }
1998
enetc_free_tx_ring(struct enetc_bdr * tx_ring)1999 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
2000 {
2001 int i;
2002
2003 for (i = 0; i < tx_ring->bd_count; i++) {
2004 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
2005
2006 enetc_free_tx_frame(tx_ring, tx_swbd);
2007 }
2008 }
2009
enetc_free_rx_ring(struct enetc_bdr * rx_ring)2010 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
2011 {
2012 int i;
2013
2014 for (i = 0; i < rx_ring->bd_count; i++) {
2015 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
2016
2017 if (!rx_swbd->page)
2018 continue;
2019
2020 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
2021 rx_swbd->dir);
2022 __free_page(rx_swbd->page);
2023 rx_swbd->page = NULL;
2024 }
2025 }
2026
enetc_free_rxtx_rings(struct enetc_ndev_priv * priv)2027 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
2028 {
2029 int i;
2030
2031 for (i = 0; i < priv->num_rx_rings; i++)
2032 enetc_free_rx_ring(priv->rx_ring[i]);
2033
2034 for (i = 0; i < priv->num_tx_rings; i++)
2035 enetc_free_tx_ring(priv->tx_ring[i]);
2036 }
2037
enetc_setup_default_rss_table(struct enetc_si * si,int num_groups)2038 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
2039 {
2040 int *rss_table;
2041 int i;
2042
2043 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
2044 if (!rss_table)
2045 return -ENOMEM;
2046
2047 /* Set up RSS table defaults */
2048 for (i = 0; i < si->num_rss; i++)
2049 rss_table[i] = i % num_groups;
2050
2051 enetc_set_rss_table(si, rss_table, si->num_rss);
2052
2053 kfree(rss_table);
2054
2055 return 0;
2056 }
2057
enetc_configure_si(struct enetc_ndev_priv * priv)2058 int enetc_configure_si(struct enetc_ndev_priv *priv)
2059 {
2060 struct enetc_si *si = priv->si;
2061 struct enetc_hw *hw = &si->hw;
2062 int err;
2063
2064 /* set SI cache attributes */
2065 enetc_wr(hw, ENETC_SICAR0,
2066 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
2067 enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
2068 /* enable SI */
2069 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
2070
2071 /* TODO: RSS support for i.MX95 will be supported later, and the
2072 * is_enetc_rev1() condition will be removed
2073 */
2074 if (si->num_rss && is_enetc_rev1(si)) {
2075 err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
2076 if (err)
2077 return err;
2078 }
2079
2080 return 0;
2081 }
2082 EXPORT_SYMBOL_GPL(enetc_configure_si);
2083
enetc_init_si_rings_params(struct enetc_ndev_priv * priv)2084 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
2085 {
2086 struct enetc_si *si = priv->si;
2087 int cpus = num_online_cpus();
2088
2089 priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE;
2090 priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE;
2091
2092 /* Enable all available TX rings in order to configure as many
2093 * priorities as possible, when needed.
2094 * TODO: Make # of TX rings run-time configurable
2095 */
2096 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
2097 priv->num_tx_rings = si->num_tx_rings;
2098 priv->bdr_int_num = priv->num_rx_rings;
2099 priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
2100 priv->tx_ictt = enetc_usecs_to_cycles(600, priv->sysclk_freq);
2101 }
2102 EXPORT_SYMBOL_GPL(enetc_init_si_rings_params);
2103
enetc_alloc_si_resources(struct enetc_ndev_priv * priv)2104 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
2105 {
2106 struct enetc_si *si = priv->si;
2107
2108 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
2109 GFP_KERNEL);
2110 if (!priv->cls_rules)
2111 return -ENOMEM;
2112
2113 return 0;
2114 }
2115 EXPORT_SYMBOL_GPL(enetc_alloc_si_resources);
2116
enetc_free_si_resources(struct enetc_ndev_priv * priv)2117 void enetc_free_si_resources(struct enetc_ndev_priv *priv)
2118 {
2119 kfree(priv->cls_rules);
2120 }
2121 EXPORT_SYMBOL_GPL(enetc_free_si_resources);
2122
enetc_setup_txbdr(struct enetc_hw * hw,struct enetc_bdr * tx_ring)2123 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
2124 {
2125 int idx = tx_ring->index;
2126 u32 tbmr;
2127
2128 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
2129 lower_32_bits(tx_ring->bd_dma_base));
2130
2131 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
2132 upper_32_bits(tx_ring->bd_dma_base));
2133
2134 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
2135 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
2136 ENETC_RTBLENR_LEN(tx_ring->bd_count));
2137
2138 /* clearing PI/CI registers for Tx not supported, adjust sw indexes */
2139 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
2140 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
2141
2142 /* enable Tx ints by setting pkt thr to 1 */
2143 enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
2144
2145 tbmr = ENETC_TBMR_SET_PRIO(tx_ring->prio);
2146 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
2147 tbmr |= ENETC_TBMR_VIH;
2148
2149 /* enable ring */
2150 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
2151
2152 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
2153 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
2154 tx_ring->idr = hw->reg + ENETC_SITXIDR;
2155 }
2156
enetc_setup_rxbdr(struct enetc_hw * hw,struct enetc_bdr * rx_ring,bool extended)2157 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
2158 bool extended)
2159 {
2160 int idx = rx_ring->index;
2161 u32 rbmr = 0;
2162
2163 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
2164 lower_32_bits(rx_ring->bd_dma_base));
2165
2166 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
2167 upper_32_bits(rx_ring->bd_dma_base));
2168
2169 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
2170 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
2171 ENETC_RTBLENR_LEN(rx_ring->bd_count));
2172
2173 if (rx_ring->xdp.prog)
2174 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP);
2175 else
2176 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
2177
2178 /* Also prepare the consumer index in case page allocation never
2179 * succeeds. In that case, hardware will never advance producer index
2180 * to match consumer index, and will drop all frames.
2181 */
2182 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
2183 enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1);
2184
2185 /* enable Rx ints by setting pkt thr to 1 */
2186 enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
2187
2188 rx_ring->ext_en = extended;
2189 if (rx_ring->ext_en)
2190 rbmr |= ENETC_RBMR_BDS;
2191
2192 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
2193 rbmr |= ENETC_RBMR_VTE;
2194
2195 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
2196 rx_ring->idr = hw->reg + ENETC_SIRXIDR;
2197
2198 rx_ring->next_to_clean = 0;
2199 rx_ring->next_to_use = 0;
2200 rx_ring->next_to_alloc = 0;
2201
2202 enetc_lock_mdio();
2203 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
2204 enetc_unlock_mdio();
2205
2206 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
2207 }
2208
enetc_setup_bdrs(struct enetc_ndev_priv * priv,bool extended)2209 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv, bool extended)
2210 {
2211 struct enetc_hw *hw = &priv->si->hw;
2212 int i;
2213
2214 for (i = 0; i < priv->num_tx_rings; i++)
2215 enetc_setup_txbdr(hw, priv->tx_ring[i]);
2216
2217 for (i = 0; i < priv->num_rx_rings; i++)
2218 enetc_setup_rxbdr(hw, priv->rx_ring[i], extended);
2219 }
2220
enetc_enable_txbdr(struct enetc_hw * hw,struct enetc_bdr * tx_ring)2221 static void enetc_enable_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
2222 {
2223 int idx = tx_ring->index;
2224 u32 tbmr;
2225
2226 tbmr = enetc_txbdr_rd(hw, idx, ENETC_TBMR);
2227 tbmr |= ENETC_TBMR_EN;
2228 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
2229 }
2230
enetc_enable_rxbdr(struct enetc_hw * hw,struct enetc_bdr * rx_ring)2231 static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
2232 {
2233 int idx = rx_ring->index;
2234 u32 rbmr;
2235
2236 rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
2237 rbmr |= ENETC_RBMR_EN;
2238 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
2239 }
2240
enetc_enable_rx_bdrs(struct enetc_ndev_priv * priv)2241 static void enetc_enable_rx_bdrs(struct enetc_ndev_priv *priv)
2242 {
2243 struct enetc_hw *hw = &priv->si->hw;
2244 int i;
2245
2246 for (i = 0; i < priv->num_rx_rings; i++)
2247 enetc_enable_rxbdr(hw, priv->rx_ring[i]);
2248 }
2249
enetc_enable_tx_bdrs(struct enetc_ndev_priv * priv)2250 static void enetc_enable_tx_bdrs(struct enetc_ndev_priv *priv)
2251 {
2252 struct enetc_hw *hw = &priv->si->hw;
2253 int i;
2254
2255 for (i = 0; i < priv->num_tx_rings; i++)
2256 enetc_enable_txbdr(hw, priv->tx_ring[i]);
2257 }
2258
enetc_disable_rxbdr(struct enetc_hw * hw,struct enetc_bdr * rx_ring)2259 static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
2260 {
2261 int idx = rx_ring->index;
2262
2263 /* disable EN bit on ring */
2264 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
2265 }
2266
enetc_disable_txbdr(struct enetc_hw * hw,struct enetc_bdr * rx_ring)2267 static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
2268 {
2269 int idx = rx_ring->index;
2270
2271 /* disable EN bit on ring */
2272 enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
2273 }
2274
enetc_disable_rx_bdrs(struct enetc_ndev_priv * priv)2275 static void enetc_disable_rx_bdrs(struct enetc_ndev_priv *priv)
2276 {
2277 struct enetc_hw *hw = &priv->si->hw;
2278 int i;
2279
2280 for (i = 0; i < priv->num_rx_rings; i++)
2281 enetc_disable_rxbdr(hw, priv->rx_ring[i]);
2282 }
2283
enetc_disable_tx_bdrs(struct enetc_ndev_priv * priv)2284 static void enetc_disable_tx_bdrs(struct enetc_ndev_priv *priv)
2285 {
2286 struct enetc_hw *hw = &priv->si->hw;
2287 int i;
2288
2289 for (i = 0; i < priv->num_tx_rings; i++)
2290 enetc_disable_txbdr(hw, priv->tx_ring[i]);
2291 }
2292
enetc_wait_txbdr(struct enetc_hw * hw,struct enetc_bdr * tx_ring)2293 static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
2294 {
2295 int delay = 8, timeout = 100;
2296 int idx = tx_ring->index;
2297
2298 /* wait for busy to clear */
2299 while (delay < timeout &&
2300 enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
2301 msleep(delay);
2302 delay *= 2;
2303 }
2304
2305 if (delay >= timeout)
2306 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
2307 idx);
2308 }
2309
enetc_wait_bdrs(struct enetc_ndev_priv * priv)2310 static void enetc_wait_bdrs(struct enetc_ndev_priv *priv)
2311 {
2312 struct enetc_hw *hw = &priv->si->hw;
2313 int i;
2314
2315 for (i = 0; i < priv->num_tx_rings; i++)
2316 enetc_wait_txbdr(hw, priv->tx_ring[i]);
2317 }
2318
enetc_setup_irqs(struct enetc_ndev_priv * priv)2319 static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
2320 {
2321 struct pci_dev *pdev = priv->si->pdev;
2322 struct enetc_hw *hw = &priv->si->hw;
2323 int i, j, err;
2324
2325 for (i = 0; i < priv->bdr_int_num; i++) {
2326 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
2327 struct enetc_int_vector *v = priv->int_vector[i];
2328 int entry = ENETC_BDR_INT_BASE_IDX + i;
2329
2330 snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
2331 priv->ndev->name, i);
2332 err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v);
2333 if (err) {
2334 dev_err(priv->dev, "request_irq() failed!\n");
2335 goto irq_err;
2336 }
2337
2338 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
2339 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
2340 v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1);
2341
2342 enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
2343
2344 for (j = 0; j < v->count_tx_rings; j++) {
2345 int idx = v->tx_ring[j].index;
2346
2347 enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
2348 }
2349 irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));
2350 }
2351
2352 return 0;
2353
2354 irq_err:
2355 while (i--) {
2356 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
2357
2358 irq_set_affinity_hint(irq, NULL);
2359 free_irq(irq, priv->int_vector[i]);
2360 }
2361
2362 return err;
2363 }
2364
enetc_free_irqs(struct enetc_ndev_priv * priv)2365 static void enetc_free_irqs(struct enetc_ndev_priv *priv)
2366 {
2367 struct pci_dev *pdev = priv->si->pdev;
2368 int i;
2369
2370 for (i = 0; i < priv->bdr_int_num; i++) {
2371 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
2372
2373 irq_set_affinity_hint(irq, NULL);
2374 free_irq(irq, priv->int_vector[i]);
2375 }
2376 }
2377
enetc_setup_interrupts(struct enetc_ndev_priv * priv)2378 static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
2379 {
2380 struct enetc_hw *hw = &priv->si->hw;
2381 u32 icpt, ictt;
2382 int i;
2383
2384 /* enable Tx & Rx event indication */
2385 if (priv->ic_mode &
2386 (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) {
2387 icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR);
2388 /* init to non-0 minimum, will be adjusted later */
2389 ictt = 0x1;
2390 } else {
2391 icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */
2392 ictt = 0;
2393 }
2394
2395 for (i = 0; i < priv->num_rx_rings; i++) {
2396 enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt);
2397 enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt);
2398 enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
2399 }
2400
2401 if (priv->ic_mode & ENETC_IC_TX_MANUAL)
2402 icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR);
2403 else
2404 icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */
2405
2406 for (i = 0; i < priv->num_tx_rings; i++) {
2407 enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt);
2408 enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt);
2409 enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE);
2410 }
2411 }
2412
enetc_clear_interrupts(struct enetc_ndev_priv * priv)2413 static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
2414 {
2415 struct enetc_hw *hw = &priv->si->hw;
2416 int i;
2417
2418 for (i = 0; i < priv->num_tx_rings; i++)
2419 enetc_txbdr_wr(hw, i, ENETC_TBIER, 0);
2420
2421 for (i = 0; i < priv->num_rx_rings; i++)
2422 enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0);
2423 }
2424
enetc_phylink_connect(struct net_device * ndev)2425 static int enetc_phylink_connect(struct net_device *ndev)
2426 {
2427 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2428 struct ethtool_keee edata;
2429 int err;
2430
2431 if (!priv->phylink) {
2432 /* phy-less mode */
2433 netif_carrier_on(ndev);
2434 return 0;
2435 }
2436
2437 err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
2438 if (err) {
2439 dev_err(&ndev->dev, "could not attach to PHY\n");
2440 return err;
2441 }
2442
2443 /* disable EEE autoneg, until ENETC driver supports it */
2444 memset(&edata, 0, sizeof(struct ethtool_keee));
2445 phylink_ethtool_set_eee(priv->phylink, &edata);
2446
2447 phylink_start(priv->phylink);
2448
2449 return 0;
2450 }
2451
enetc_tx_onestep_tstamp(struct work_struct * work)2452 static void enetc_tx_onestep_tstamp(struct work_struct *work)
2453 {
2454 struct enetc_ndev_priv *priv;
2455 struct sk_buff *skb;
2456
2457 priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
2458
2459 netif_tx_lock_bh(priv->ndev);
2460
2461 clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
2462 skb = skb_dequeue(&priv->tx_skbs);
2463 if (skb)
2464 enetc_start_xmit(skb, priv->ndev);
2465
2466 netif_tx_unlock_bh(priv->ndev);
2467 }
2468
enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv * priv)2469 static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
2470 {
2471 INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp);
2472 skb_queue_head_init(&priv->tx_skbs);
2473 }
2474
enetc_start(struct net_device * ndev)2475 void enetc_start(struct net_device *ndev)
2476 {
2477 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2478 int i;
2479
2480 enetc_setup_interrupts(priv);
2481
2482 for (i = 0; i < priv->bdr_int_num; i++) {
2483 int irq = pci_irq_vector(priv->si->pdev,
2484 ENETC_BDR_INT_BASE_IDX + i);
2485
2486 napi_enable(&priv->int_vector[i]->napi);
2487 enable_irq(irq);
2488 }
2489
2490 enetc_enable_tx_bdrs(priv);
2491
2492 enetc_enable_rx_bdrs(priv);
2493
2494 netif_tx_start_all_queues(ndev);
2495
2496 clear_bit(ENETC_TX_DOWN, &priv->flags);
2497 }
2498 EXPORT_SYMBOL_GPL(enetc_start);
2499
enetc_open(struct net_device * ndev)2500 int enetc_open(struct net_device *ndev)
2501 {
2502 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2503 struct enetc_bdr_resource *tx_res, *rx_res;
2504 bool extended;
2505 int err;
2506
2507 extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
2508
2509 err = clk_prepare_enable(priv->ref_clk);
2510 if (err)
2511 return err;
2512
2513 err = enetc_setup_irqs(priv);
2514 if (err)
2515 goto err_setup_irqs;
2516
2517 err = enetc_phylink_connect(ndev);
2518 if (err)
2519 goto err_phy_connect;
2520
2521 tx_res = enetc_alloc_tx_resources(priv);
2522 if (IS_ERR(tx_res)) {
2523 err = PTR_ERR(tx_res);
2524 goto err_alloc_tx;
2525 }
2526
2527 rx_res = enetc_alloc_rx_resources(priv, extended);
2528 if (IS_ERR(rx_res)) {
2529 err = PTR_ERR(rx_res);
2530 goto err_alloc_rx;
2531 }
2532
2533 enetc_tx_onestep_tstamp_init(priv);
2534 enetc_assign_tx_resources(priv, tx_res);
2535 enetc_assign_rx_resources(priv, rx_res);
2536 enetc_setup_bdrs(priv, extended);
2537 enetc_start(ndev);
2538
2539 return 0;
2540
2541 err_alloc_rx:
2542 enetc_free_tx_resources(tx_res, priv->num_tx_rings);
2543 err_alloc_tx:
2544 if (priv->phylink)
2545 phylink_disconnect_phy(priv->phylink);
2546 err_phy_connect:
2547 enetc_free_irqs(priv);
2548 err_setup_irqs:
2549 clk_disable_unprepare(priv->ref_clk);
2550
2551 return err;
2552 }
2553 EXPORT_SYMBOL_GPL(enetc_open);
2554
enetc_stop(struct net_device * ndev)2555 void enetc_stop(struct net_device *ndev)
2556 {
2557 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2558 int i;
2559
2560 set_bit(ENETC_TX_DOWN, &priv->flags);
2561
2562 netif_tx_stop_all_queues(ndev);
2563
2564 enetc_disable_rx_bdrs(priv);
2565
2566 enetc_wait_bdrs(priv);
2567
2568 enetc_disable_tx_bdrs(priv);
2569
2570 for (i = 0; i < priv->bdr_int_num; i++) {
2571 int irq = pci_irq_vector(priv->si->pdev,
2572 ENETC_BDR_INT_BASE_IDX + i);
2573
2574 disable_irq(irq);
2575 napi_synchronize(&priv->int_vector[i]->napi);
2576 napi_disable(&priv->int_vector[i]->napi);
2577 }
2578
2579 enetc_clear_interrupts(priv);
2580 }
2581 EXPORT_SYMBOL_GPL(enetc_stop);
2582
enetc_close(struct net_device * ndev)2583 int enetc_close(struct net_device *ndev)
2584 {
2585 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2586
2587 enetc_stop(ndev);
2588
2589 if (priv->phylink) {
2590 phylink_stop(priv->phylink);
2591 phylink_disconnect_phy(priv->phylink);
2592 } else {
2593 netif_carrier_off(ndev);
2594 }
2595
2596 enetc_free_rxtx_rings(priv);
2597
2598 /* Avoids dangling pointers and also frees old resources */
2599 enetc_assign_rx_resources(priv, NULL);
2600 enetc_assign_tx_resources(priv, NULL);
2601
2602 enetc_free_irqs(priv);
2603 clk_disable_unprepare(priv->ref_clk);
2604
2605 return 0;
2606 }
2607 EXPORT_SYMBOL_GPL(enetc_close);
2608
enetc_reconfigure(struct enetc_ndev_priv * priv,bool extended,int (* cb)(struct enetc_ndev_priv * priv,void * ctx),void * ctx)2609 static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended,
2610 int (*cb)(struct enetc_ndev_priv *priv, void *ctx),
2611 void *ctx)
2612 {
2613 struct enetc_bdr_resource *tx_res, *rx_res;
2614 int err;
2615
2616 ASSERT_RTNL();
2617
2618 /* If the interface is down, run the callback right away,
2619 * without reconfiguration.
2620 */
2621 if (!netif_running(priv->ndev)) {
2622 if (cb) {
2623 err = cb(priv, ctx);
2624 if (err)
2625 return err;
2626 }
2627
2628 return 0;
2629 }
2630
2631 tx_res = enetc_alloc_tx_resources(priv);
2632 if (IS_ERR(tx_res)) {
2633 err = PTR_ERR(tx_res);
2634 goto out;
2635 }
2636
2637 rx_res = enetc_alloc_rx_resources(priv, extended);
2638 if (IS_ERR(rx_res)) {
2639 err = PTR_ERR(rx_res);
2640 goto out_free_tx_res;
2641 }
2642
2643 enetc_stop(priv->ndev);
2644 enetc_free_rxtx_rings(priv);
2645
2646 /* Interface is down, run optional callback now */
2647 if (cb) {
2648 err = cb(priv, ctx);
2649 if (err)
2650 goto out_restart;
2651 }
2652
2653 enetc_assign_tx_resources(priv, tx_res);
2654 enetc_assign_rx_resources(priv, rx_res);
2655 enetc_setup_bdrs(priv, extended);
2656 enetc_start(priv->ndev);
2657
2658 return 0;
2659
2660 out_restart:
2661 enetc_setup_bdrs(priv, extended);
2662 enetc_start(priv->ndev);
2663 enetc_free_rx_resources(rx_res, priv->num_rx_rings);
2664 out_free_tx_res:
2665 enetc_free_tx_resources(tx_res, priv->num_tx_rings);
2666 out:
2667 return err;
2668 }
2669
enetc_debug_tx_ring_prios(struct enetc_ndev_priv * priv)2670 static void enetc_debug_tx_ring_prios(struct enetc_ndev_priv *priv)
2671 {
2672 int i;
2673
2674 for (i = 0; i < priv->num_tx_rings; i++)
2675 netdev_dbg(priv->ndev, "TX ring %d prio %d\n", i,
2676 priv->tx_ring[i]->prio);
2677 }
2678
enetc_reset_tc_mqprio(struct net_device * ndev)2679 void enetc_reset_tc_mqprio(struct net_device *ndev)
2680 {
2681 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2682 struct enetc_hw *hw = &priv->si->hw;
2683 struct enetc_bdr *tx_ring;
2684 int num_stack_tx_queues;
2685 int i;
2686
2687 num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
2688
2689 netdev_reset_tc(ndev);
2690 netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
2691 priv->min_num_stack_tx_queues = num_possible_cpus();
2692
2693 /* Reset all ring priorities to 0 */
2694 for (i = 0; i < priv->num_tx_rings; i++) {
2695 tx_ring = priv->tx_ring[i];
2696 tx_ring->prio = 0;
2697 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
2698 }
2699
2700 enetc_debug_tx_ring_prios(priv);
2701
2702 enetc_change_preemptible_tcs(priv, 0);
2703 }
2704 EXPORT_SYMBOL_GPL(enetc_reset_tc_mqprio);
2705
enetc_setup_tc_mqprio(struct net_device * ndev,void * type_data)2706 int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
2707 {
2708 struct tc_mqprio_qopt_offload *mqprio = type_data;
2709 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2710 struct tc_mqprio_qopt *qopt = &mqprio->qopt;
2711 struct enetc_hw *hw = &priv->si->hw;
2712 int num_stack_tx_queues = 0;
2713 struct enetc_bdr *tx_ring;
2714 u8 num_tc = qopt->num_tc;
2715 int offset, count;
2716 int err, tc, q;
2717
2718 if (!num_tc) {
2719 enetc_reset_tc_mqprio(ndev);
2720 return 0;
2721 }
2722
2723 err = netdev_set_num_tc(ndev, num_tc);
2724 if (err)
2725 return err;
2726
2727 for (tc = 0; tc < num_tc; tc++) {
2728 offset = qopt->offset[tc];
2729 count = qopt->count[tc];
2730 num_stack_tx_queues += count;
2731
2732 err = netdev_set_tc_queue(ndev, tc, count, offset);
2733 if (err)
2734 goto err_reset_tc;
2735
2736 for (q = offset; q < offset + count; q++) {
2737 tx_ring = priv->tx_ring[q];
2738 /* The prio_tc_map is skb_tx_hash()'s way of selecting
2739 * between TX queues based on skb->priority. As such,
2740 * there's nothing to offload based on it.
2741 * Make the mqprio "traffic class" be the priority of
2742 * this ring group, and leave the Tx IPV to traffic
2743 * class mapping as its default mapping value of 1:1.
2744 */
2745 tx_ring->prio = tc;
2746 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
2747 }
2748 }
2749
2750 err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
2751 if (err)
2752 goto err_reset_tc;
2753
2754 priv->min_num_stack_tx_queues = num_stack_tx_queues;
2755
2756 enetc_debug_tx_ring_prios(priv);
2757
2758 enetc_change_preemptible_tcs(priv, mqprio->preemptible_tcs);
2759
2760 return 0;
2761
2762 err_reset_tc:
2763 enetc_reset_tc_mqprio(ndev);
2764 return err;
2765 }
2766 EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio);
2767
enetc_reconfigure_xdp_cb(struct enetc_ndev_priv * priv,void * ctx)2768 static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx)
2769 {
2770 struct bpf_prog *old_prog, *prog = ctx;
2771 int num_stack_tx_queues;
2772 int err, i;
2773
2774 old_prog = xchg(&priv->xdp_prog, prog);
2775
2776 num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
2777 err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
2778 if (err) {
2779 xchg(&priv->xdp_prog, old_prog);
2780 return err;
2781 }
2782
2783 if (old_prog)
2784 bpf_prog_put(old_prog);
2785
2786 for (i = 0; i < priv->num_rx_rings; i++) {
2787 struct enetc_bdr *rx_ring = priv->rx_ring[i];
2788
2789 rx_ring->xdp.prog = prog;
2790
2791 if (prog)
2792 rx_ring->buffer_offset = XDP_PACKET_HEADROOM;
2793 else
2794 rx_ring->buffer_offset = ENETC_RXB_PAD;
2795 }
2796
2797 return 0;
2798 }
2799
enetc_setup_xdp_prog(struct net_device * ndev,struct bpf_prog * prog,struct netlink_ext_ack * extack)2800 static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
2801 struct netlink_ext_ack *extack)
2802 {
2803 int num_xdp_tx_queues = prog ? num_possible_cpus() : 0;
2804 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2805 bool extended;
2806
2807 if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
2808 priv->num_tx_rings) {
2809 NL_SET_ERR_MSG_FMT_MOD(extack,
2810 "Reserving %d XDP TXQs leaves under %d for stack (total %d)",
2811 num_xdp_tx_queues,
2812 priv->min_num_stack_tx_queues,
2813 priv->num_tx_rings);
2814 return -EBUSY;
2815 }
2816
2817 extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
2818
2819 /* The buffer layout is changing, so we need to drain the old
2820 * RX buffers and seed new ones.
2821 */
2822 return enetc_reconfigure(priv, extended, enetc_reconfigure_xdp_cb, prog);
2823 }
2824
enetc_setup_bpf(struct net_device * ndev,struct netdev_bpf * bpf)2825 int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
2826 {
2827 switch (bpf->command) {
2828 case XDP_SETUP_PROG:
2829 return enetc_setup_xdp_prog(ndev, bpf->prog, bpf->extack);
2830 default:
2831 return -EINVAL;
2832 }
2833
2834 return 0;
2835 }
2836 EXPORT_SYMBOL_GPL(enetc_setup_bpf);
2837
enetc_get_stats(struct net_device * ndev)2838 struct net_device_stats *enetc_get_stats(struct net_device *ndev)
2839 {
2840 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2841 struct net_device_stats *stats = &ndev->stats;
2842 unsigned long packets = 0, bytes = 0;
2843 unsigned long tx_dropped = 0;
2844 int i;
2845
2846 for (i = 0; i < priv->num_rx_rings; i++) {
2847 packets += priv->rx_ring[i]->stats.packets;
2848 bytes += priv->rx_ring[i]->stats.bytes;
2849 }
2850
2851 stats->rx_packets = packets;
2852 stats->rx_bytes = bytes;
2853 bytes = 0;
2854 packets = 0;
2855
2856 for (i = 0; i < priv->num_tx_rings; i++) {
2857 packets += priv->tx_ring[i]->stats.packets;
2858 bytes += priv->tx_ring[i]->stats.bytes;
2859 tx_dropped += priv->tx_ring[i]->stats.win_drop;
2860 }
2861
2862 stats->tx_packets = packets;
2863 stats->tx_bytes = bytes;
2864 stats->tx_dropped = tx_dropped;
2865
2866 return stats;
2867 }
2868 EXPORT_SYMBOL_GPL(enetc_get_stats);
2869
enetc_set_rss(struct net_device * ndev,int en)2870 static int enetc_set_rss(struct net_device *ndev, int en)
2871 {
2872 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2873 struct enetc_hw *hw = &priv->si->hw;
2874 u32 reg;
2875
2876 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
2877
2878 reg = enetc_rd(hw, ENETC_SIMR);
2879 reg &= ~ENETC_SIMR_RSSE;
2880 reg |= (en) ? ENETC_SIMR_RSSE : 0;
2881 enetc_wr(hw, ENETC_SIMR, reg);
2882
2883 return 0;
2884 }
2885
enetc_enable_rxvlan(struct net_device * ndev,bool en)2886 static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
2887 {
2888 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2889 struct enetc_hw *hw = &priv->si->hw;
2890 int i;
2891
2892 for (i = 0; i < priv->num_rx_rings; i++)
2893 enetc_bdr_enable_rxvlan(hw, i, en);
2894 }
2895
enetc_enable_txvlan(struct net_device * ndev,bool en)2896 static void enetc_enable_txvlan(struct net_device *ndev, bool en)
2897 {
2898 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2899 struct enetc_hw *hw = &priv->si->hw;
2900 int i;
2901
2902 for (i = 0; i < priv->num_tx_rings; i++)
2903 enetc_bdr_enable_txvlan(hw, i, en);
2904 }
2905
enetc_set_features(struct net_device * ndev,netdev_features_t features)2906 void enetc_set_features(struct net_device *ndev, netdev_features_t features)
2907 {
2908 netdev_features_t changed = ndev->features ^ features;
2909
2910 if (changed & NETIF_F_RXHASH)
2911 enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
2912
2913 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2914 enetc_enable_rxvlan(ndev,
2915 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
2916
2917 if (changed & NETIF_F_HW_VLAN_CTAG_TX)
2918 enetc_enable_txvlan(ndev,
2919 !!(features & NETIF_F_HW_VLAN_CTAG_TX));
2920 }
2921 EXPORT_SYMBOL_GPL(enetc_set_features);
2922
enetc_hwtstamp_set(struct net_device * ndev,struct ifreq * ifr)2923 static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
2924 {
2925 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2926 int err, new_offloads = priv->active_offloads;
2927 struct hwtstamp_config config;
2928
2929 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2930 return -EFAULT;
2931
2932 switch (config.tx_type) {
2933 case HWTSTAMP_TX_OFF:
2934 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
2935 break;
2936 case HWTSTAMP_TX_ON:
2937 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
2938 new_offloads |= ENETC_F_TX_TSTAMP;
2939 break;
2940 case HWTSTAMP_TX_ONESTEP_SYNC:
2941 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
2942 new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
2943 break;
2944 default:
2945 return -ERANGE;
2946 }
2947
2948 switch (config.rx_filter) {
2949 case HWTSTAMP_FILTER_NONE:
2950 new_offloads &= ~ENETC_F_RX_TSTAMP;
2951 break;
2952 default:
2953 new_offloads |= ENETC_F_RX_TSTAMP;
2954 config.rx_filter = HWTSTAMP_FILTER_ALL;
2955 }
2956
2957 if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) {
2958 bool extended = !!(new_offloads & ENETC_F_RX_TSTAMP);
2959
2960 err = enetc_reconfigure(priv, extended, NULL, NULL);
2961 if (err)
2962 return err;
2963 }
2964
2965 priv->active_offloads = new_offloads;
2966
2967 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2968 -EFAULT : 0;
2969 }
2970
enetc_hwtstamp_get(struct net_device * ndev,struct ifreq * ifr)2971 static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
2972 {
2973 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2974 struct hwtstamp_config config;
2975
2976 config.flags = 0;
2977
2978 if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
2979 config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
2980 else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
2981 config.tx_type = HWTSTAMP_TX_ON;
2982 else
2983 config.tx_type = HWTSTAMP_TX_OFF;
2984
2985 config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
2986 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
2987
2988 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2989 -EFAULT : 0;
2990 }
2991
enetc_ioctl(struct net_device * ndev,struct ifreq * rq,int cmd)2992 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2993 {
2994 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2995
2996 if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) {
2997 if (cmd == SIOCSHWTSTAMP)
2998 return enetc_hwtstamp_set(ndev, rq);
2999 if (cmd == SIOCGHWTSTAMP)
3000 return enetc_hwtstamp_get(ndev, rq);
3001 }
3002
3003 if (!priv->phylink)
3004 return -EOPNOTSUPP;
3005
3006 return phylink_mii_ioctl(priv->phylink, rq, cmd);
3007 }
3008 EXPORT_SYMBOL_GPL(enetc_ioctl);
3009
enetc_int_vector_init(struct enetc_ndev_priv * priv,int i,int v_tx_rings)3010 static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i,
3011 int v_tx_rings)
3012 {
3013 struct enetc_int_vector *v;
3014 struct enetc_bdr *bdr;
3015 int j, err;
3016
3017 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
3018 if (!v)
3019 return -ENOMEM;
3020
3021 priv->int_vector[i] = v;
3022 bdr = &v->rx_ring;
3023 bdr->index = i;
3024 bdr->ndev = priv->ndev;
3025 bdr->dev = priv->dev;
3026 bdr->bd_count = priv->rx_bd_count;
3027 bdr->buffer_offset = ENETC_RXB_PAD;
3028 priv->rx_ring[i] = bdr;
3029
3030 err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
3031 if (err)
3032 goto free_vector;
3033
3034 err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, MEM_TYPE_PAGE_SHARED,
3035 NULL);
3036 if (err) {
3037 xdp_rxq_info_unreg(&bdr->xdp.rxq);
3038 goto free_vector;
3039 }
3040
3041 /* init defaults for adaptive IC */
3042 if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
3043 v->rx_ictt = 0x1;
3044 v->rx_dim_en = true;
3045 }
3046
3047 INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
3048 netif_napi_add(priv->ndev, &v->napi, enetc_poll);
3049 v->count_tx_rings = v_tx_rings;
3050
3051 for (j = 0; j < v_tx_rings; j++) {
3052 int idx;
3053
3054 /* default tx ring mapping policy */
3055 idx = priv->bdr_int_num * j + i;
3056 __set_bit(idx, &v->tx_rings_map);
3057 bdr = &v->tx_ring[j];
3058 bdr->index = idx;
3059 bdr->ndev = priv->ndev;
3060 bdr->dev = priv->dev;
3061 bdr->bd_count = priv->tx_bd_count;
3062 priv->tx_ring[idx] = bdr;
3063 }
3064
3065 return 0;
3066
3067 free_vector:
3068 priv->rx_ring[i] = NULL;
3069 priv->int_vector[i] = NULL;
3070 kfree(v);
3071
3072 return err;
3073 }
3074
enetc_int_vector_destroy(struct enetc_ndev_priv * priv,int i)3075 static void enetc_int_vector_destroy(struct enetc_ndev_priv *priv, int i)
3076 {
3077 struct enetc_int_vector *v = priv->int_vector[i];
3078 struct enetc_bdr *rx_ring = &v->rx_ring;
3079 int j, tx_ring_index;
3080
3081 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
3082 xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
3083 netif_napi_del(&v->napi);
3084 cancel_work_sync(&v->rx_dim.work);
3085
3086 for (j = 0; j < v->count_tx_rings; j++) {
3087 tx_ring_index = priv->bdr_int_num * j + i;
3088 priv->tx_ring[tx_ring_index] = NULL;
3089 }
3090
3091 priv->rx_ring[i] = NULL;
3092 priv->int_vector[i] = NULL;
3093 kfree(v);
3094 }
3095
enetc_alloc_msix(struct enetc_ndev_priv * priv)3096 int enetc_alloc_msix(struct enetc_ndev_priv *priv)
3097 {
3098 struct pci_dev *pdev = priv->si->pdev;
3099 int v_tx_rings, v_remainder;
3100 int num_stack_tx_queues;
3101 int first_xdp_tx_ring;
3102 int i, n, err, nvec;
3103
3104 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
3105 /* allocate MSIX for both messaging and Rx/Tx interrupts */
3106 n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
3107
3108 if (n < 0)
3109 return n;
3110
3111 if (n != nvec)
3112 return -EPERM;
3113
3114 /* # of tx rings per int vector */
3115 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
3116 v_remainder = priv->num_tx_rings % priv->bdr_int_num;
3117
3118 for (i = 0; i < priv->bdr_int_num; i++) {
3119 /* Distribute the remaining TX rings to the first v_remainder
3120 * interrupt vectors
3121 */
3122 int num_tx_rings = i < v_remainder ? v_tx_rings + 1 : v_tx_rings;
3123
3124 err = enetc_int_vector_init(priv, i, num_tx_rings);
3125 if (err)
3126 goto fail;
3127 }
3128
3129 num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
3130
3131 err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
3132 if (err)
3133 goto fail;
3134
3135 err = netif_set_real_num_rx_queues(priv->ndev, priv->num_rx_rings);
3136 if (err)
3137 goto fail;
3138
3139 priv->min_num_stack_tx_queues = num_possible_cpus();
3140 first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
3141 priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
3142
3143 return 0;
3144
3145 fail:
3146 while (i--)
3147 enetc_int_vector_destroy(priv, i);
3148
3149 pci_free_irq_vectors(pdev);
3150
3151 return err;
3152 }
3153 EXPORT_SYMBOL_GPL(enetc_alloc_msix);
3154
enetc_free_msix(struct enetc_ndev_priv * priv)3155 void enetc_free_msix(struct enetc_ndev_priv *priv)
3156 {
3157 int i;
3158
3159 for (i = 0; i < priv->bdr_int_num; i++)
3160 enetc_int_vector_destroy(priv, i);
3161
3162 /* disable all MSIX for this device */
3163 pci_free_irq_vectors(priv->si->pdev);
3164 }
3165 EXPORT_SYMBOL_GPL(enetc_free_msix);
3166
enetc_kfree_si(struct enetc_si * si)3167 static void enetc_kfree_si(struct enetc_si *si)
3168 {
3169 char *p = (char *)si - si->pad;
3170
3171 kfree(p);
3172 }
3173
enetc_detect_errata(struct enetc_si * si)3174 static void enetc_detect_errata(struct enetc_si *si)
3175 {
3176 if (si->pdev->revision == ENETC_REV1)
3177 si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP;
3178 }
3179
enetc_pci_probe(struct pci_dev * pdev,const char * name,int sizeof_priv)3180 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
3181 {
3182 struct enetc_si *si, *p;
3183 struct enetc_hw *hw;
3184 size_t alloc_size;
3185 int err, len;
3186
3187 pcie_flr(pdev);
3188 err = pci_enable_device_mem(pdev);
3189 if (err)
3190 return dev_err_probe(&pdev->dev, err, "device enable failed\n");
3191
3192 /* set up for high or low dma */
3193 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3194 if (err) {
3195 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
3196 goto err_dma;
3197 }
3198
3199 err = pci_request_mem_regions(pdev, name);
3200 if (err) {
3201 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
3202 goto err_pci_mem_reg;
3203 }
3204
3205 pci_set_master(pdev);
3206
3207 alloc_size = sizeof(struct enetc_si);
3208 if (sizeof_priv) {
3209 /* align priv to 32B */
3210 alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
3211 alloc_size += sizeof_priv;
3212 }
3213 /* force 32B alignment for enetc_si */
3214 alloc_size += ENETC_SI_ALIGN - 1;
3215
3216 p = kzalloc(alloc_size, GFP_KERNEL);
3217 if (!p) {
3218 err = -ENOMEM;
3219 goto err_alloc_si;
3220 }
3221
3222 si = PTR_ALIGN(p, ENETC_SI_ALIGN);
3223 si->pad = (char *)si - (char *)p;
3224
3225 pci_set_drvdata(pdev, si);
3226 si->pdev = pdev;
3227 hw = &si->hw;
3228
3229 len = pci_resource_len(pdev, ENETC_BAR_REGS);
3230 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
3231 if (!hw->reg) {
3232 err = -ENXIO;
3233 dev_err(&pdev->dev, "ioremap() failed\n");
3234 goto err_ioremap;
3235 }
3236 if (len > ENETC_PORT_BASE)
3237 hw->port = hw->reg + ENETC_PORT_BASE;
3238 if (len > ENETC_GLOBAL_BASE)
3239 hw->global = hw->reg + ENETC_GLOBAL_BASE;
3240
3241 enetc_detect_errata(si);
3242
3243 return 0;
3244
3245 err_ioremap:
3246 enetc_kfree_si(si);
3247 err_alloc_si:
3248 pci_release_mem_regions(pdev);
3249 err_pci_mem_reg:
3250 err_dma:
3251 pci_disable_device(pdev);
3252
3253 return err;
3254 }
3255 EXPORT_SYMBOL_GPL(enetc_pci_probe);
3256
enetc_pci_remove(struct pci_dev * pdev)3257 void enetc_pci_remove(struct pci_dev *pdev)
3258 {
3259 struct enetc_si *si = pci_get_drvdata(pdev);
3260 struct enetc_hw *hw = &si->hw;
3261
3262 iounmap(hw->reg);
3263 enetc_kfree_si(si);
3264 pci_release_mem_regions(pdev);
3265 pci_disable_device(pdev);
3266 }
3267 EXPORT_SYMBOL_GPL(enetc_pci_remove);
3268
3269 static const struct enetc_drvdata enetc_pf_data = {
3270 .sysclk_freq = ENETC_CLK_400M,
3271 .pmac_offset = ENETC_PMAC_OFFSET,
3272 .eth_ops = &enetc_pf_ethtool_ops,
3273 };
3274
3275 static const struct enetc_drvdata enetc4_pf_data = {
3276 .sysclk_freq = ENETC_CLK_333M,
3277 .pmac_offset = ENETC4_PMAC_OFFSET,
3278 .eth_ops = &enetc4_pf_ethtool_ops,
3279 };
3280
3281 static const struct enetc_drvdata enetc_vf_data = {
3282 .sysclk_freq = ENETC_CLK_400M,
3283 .eth_ops = &enetc_vf_ethtool_ops,
3284 };
3285
3286 static const struct enetc_platform_info enetc_info[] = {
3287 { .revision = ENETC_REV_1_0,
3288 .dev_id = ENETC_DEV_ID_PF,
3289 .data = &enetc_pf_data,
3290 },
3291 { .revision = ENETC_REV_4_1,
3292 .dev_id = NXP_ENETC_PF_DEV_ID,
3293 .data = &enetc4_pf_data,
3294 },
3295 { .revision = ENETC_REV_1_0,
3296 .dev_id = ENETC_DEV_ID_VF,
3297 .data = &enetc_vf_data,
3298 },
3299 };
3300
enetc_get_driver_data(struct enetc_si * si)3301 int enetc_get_driver_data(struct enetc_si *si)
3302 {
3303 u16 dev_id = si->pdev->device;
3304 int i;
3305
3306 for (i = 0; i < ARRAY_SIZE(enetc_info); i++) {
3307 if (si->revision == enetc_info[i].revision &&
3308 dev_id == enetc_info[i].dev_id) {
3309 si->drvdata = enetc_info[i].data;
3310
3311 return 0;
3312 }
3313 }
3314
3315 return -ERANGE;
3316 }
3317 EXPORT_SYMBOL_GPL(enetc_get_driver_data);
3318
3319 MODULE_DESCRIPTION("NXP ENETC Ethernet driver");
3320 MODULE_LICENSE("Dual BSD/GPL");
3321