1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
3
4 #include "enetc.h"
5 #include <linux/bpf_trace.h>
6 #include <linux/clk.h>
7 #include <linux/tcp.h>
8 #include <linux/udp.h>
9 #include <linux/vmalloc.h>
10 #include <linux/ptp_classify.h>
11 #include <net/ip6_checksum.h>
12 #include <net/pkt_sched.h>
13 #include <net/tso.h>
14
enetc_port_mac_rd(struct enetc_si * si,u32 reg)15 u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg)
16 {
17 /* ENETC with pseudo MAC does not have Ethernet MAC
18 * port registers.
19 */
20 if (enetc_is_pseudo_mac(si))
21 return 0;
22
23 return enetc_port_rd(&si->hw, reg);
24 }
25 EXPORT_SYMBOL_GPL(enetc_port_mac_rd);
26
enetc_port_mac_wr(struct enetc_si * si,u32 reg,u32 val)27 void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val)
28 {
29 if (enetc_is_pseudo_mac(si))
30 return;
31
32 enetc_port_wr(&si->hw, reg, val);
33 if (si->hw_features & ENETC_SI_F_QBU)
34 enetc_port_wr(&si->hw, reg + si->drvdata->pmac_offset, val);
35 }
36 EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
37
enetc_change_preemptible_tcs(struct enetc_ndev_priv * priv,u8 preemptible_tcs)38 static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv,
39 u8 preemptible_tcs)
40 {
41 if (!(priv->si->hw_features & ENETC_SI_F_QBU))
42 return;
43
44 priv->preemptible_tcs = preemptible_tcs;
45 enetc_mm_commit_preemptible_tcs(priv);
46 }
47
enetc_mac_addr_hash_idx(const u8 * addr)48 static int enetc_mac_addr_hash_idx(const u8 *addr)
49 {
50 u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
51 u64 mask = 0;
52 int res = 0;
53 int i;
54
55 for (i = 0; i < 8; i++)
56 mask |= BIT_ULL(i * 6);
57
58 for (i = 0; i < 6; i++)
59 res |= (hweight64(fold & (mask << i)) & 0x1) << i;
60
61 return res;
62 }
63
enetc_add_mac_addr_ht_filter(struct enetc_mac_filter * filter,const unsigned char * addr)64 void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
65 const unsigned char *addr)
66 {
67 int idx = enetc_mac_addr_hash_idx(addr);
68
69 /* add hash table entry */
70 __set_bit(idx, filter->mac_hash_table);
71 filter->mac_addr_cnt++;
72 }
73 EXPORT_SYMBOL_GPL(enetc_add_mac_addr_ht_filter);
74
enetc_reset_mac_addr_filter(struct enetc_mac_filter * filter)75 void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
76 {
77 filter->mac_addr_cnt = 0;
78
79 bitmap_zero(filter->mac_hash_table,
80 ENETC_MADDR_HASH_TBL_SZ);
81 }
82 EXPORT_SYMBOL_GPL(enetc_reset_mac_addr_filter);
83
enetc_num_stack_tx_queues(struct enetc_ndev_priv * priv)84 static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
85 {
86 int num_tx_rings = priv->num_tx_rings;
87
88 if (priv->xdp_prog)
89 return num_tx_rings - num_possible_cpus();
90
91 return num_tx_rings;
92 }
93
enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv * priv,struct enetc_bdr * tx_ring)94 static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv,
95 struct enetc_bdr *tx_ring)
96 {
97 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring;
98
99 return priv->rx_ring[index];
100 }
101
enetc_tx_swbd_get_skb(struct enetc_tx_swbd * tx_swbd)102 static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd)
103 {
104 if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect)
105 return NULL;
106
107 return tx_swbd->skb;
108 }
109
110 static struct xdp_frame *
enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd * tx_swbd)111 enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd)
112 {
113 if (tx_swbd->is_xdp_redirect)
114 return tx_swbd->xdp_frame;
115
116 return NULL;
117 }
118
enetc_unmap_tx_buff(struct enetc_bdr * tx_ring,struct enetc_tx_swbd * tx_swbd)119 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
120 struct enetc_tx_swbd *tx_swbd)
121 {
122 /* For XDP_TX, pages come from RX, whereas for the other contexts where
123 * we have is_dma_page_set, those come from skb_frag_dma_map. We need
124 * to match the DMA mapping length, so we need to differentiate those.
125 */
126 if (tx_swbd->is_dma_page)
127 dma_unmap_page(tx_ring->dev, tx_swbd->dma,
128 tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len,
129 tx_swbd->dir);
130 else
131 dma_unmap_single(tx_ring->dev, tx_swbd->dma,
132 tx_swbd->len, tx_swbd->dir);
133 tx_swbd->dma = 0;
134 }
135
enetc_free_tx_frame(struct enetc_bdr * tx_ring,struct enetc_tx_swbd * tx_swbd)136 static void enetc_free_tx_frame(struct enetc_bdr *tx_ring,
137 struct enetc_tx_swbd *tx_swbd)
138 {
139 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
140 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
141
142 if (tx_swbd->dma)
143 enetc_unmap_tx_buff(tx_ring, tx_swbd);
144
145 if (xdp_frame) {
146 xdp_return_frame(tx_swbd->xdp_frame);
147 tx_swbd->xdp_frame = NULL;
148 } else if (skb) {
149 dev_kfree_skb_any(skb);
150 tx_swbd->skb = NULL;
151 }
152 }
153
154 /* Let H/W know BD ring has been updated */
enetc_update_tx_ring_tail(struct enetc_bdr * tx_ring)155 static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring)
156 {
157 /* includes wmb() */
158 enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use);
159 }
160
enetc_ptp_parse(struct sk_buff * skb,u8 * udp,u8 * msgtype,u8 * twostep,u16 * correction_offset,u16 * body_offset)161 static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
162 u8 *msgtype, u8 *twostep,
163 u16 *correction_offset, u16 *body_offset)
164 {
165 unsigned int ptp_class;
166 struct ptp_header *hdr;
167 unsigned int type;
168 u8 *base;
169
170 ptp_class = ptp_classify_raw(skb);
171 if (ptp_class == PTP_CLASS_NONE)
172 return -EINVAL;
173
174 hdr = ptp_parse_header(skb, ptp_class);
175 if (!hdr)
176 return -EINVAL;
177
178 type = ptp_class & PTP_CLASS_PMASK;
179 if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6)
180 *udp = 1;
181 else
182 *udp = 0;
183
184 *msgtype = ptp_get_msgtype(hdr, ptp_class);
185 *twostep = hdr->flag_field[0] & 0x2;
186
187 base = skb_mac_header(skb);
188 *correction_offset = (u8 *)&hdr->correction - base;
189 *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
190
191 return 0;
192 }
193
enetc_tx_csum_offload_check(struct sk_buff * skb)194 static bool enetc_tx_csum_offload_check(struct sk_buff *skb)
195 {
196 switch (skb->csum_offset) {
197 case offsetof(struct tcphdr, check):
198 case offsetof(struct udphdr, check):
199 return true;
200 default:
201 return false;
202 }
203 }
204
enetc_skb_is_ipv6(struct sk_buff * skb)205 static bool enetc_skb_is_ipv6(struct sk_buff *skb)
206 {
207 return vlan_get_protocol(skb) == htons(ETH_P_IPV6);
208 }
209
enetc_skb_is_tcp(struct sk_buff * skb)210 static bool enetc_skb_is_tcp(struct sk_buff *skb)
211 {
212 return skb->csum_offset == offsetof(struct tcphdr, check);
213 }
214
215 /**
216 * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame
217 * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located
218 * @count: Number of Tx buffer descriptors which need to be unmapped
219 * @i: Index of the last successfully mapped Tx buffer descriptor
220 */
enetc_unwind_tx_frame(struct enetc_bdr * tx_ring,int count,int i)221 static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i)
222 {
223 while (count--) {
224 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
225
226 enetc_free_tx_frame(tx_ring, tx_swbd);
227 if (i == 0)
228 i = tx_ring->bd_count;
229 i--;
230 }
231 }
232
enetc_set_one_step_ts(struct enetc_si * si,bool udp,int offset)233 static void enetc_set_one_step_ts(struct enetc_si *si, bool udp, int offset)
234 {
235 u32 val = ENETC_PM0_SINGLE_STEP_EN;
236
237 val |= ENETC_SET_SINGLE_STEP_OFFSET(offset);
238 if (udp)
239 val |= ENETC_PM0_SINGLE_STEP_CH;
240
241 /* The "Correction" field of a packet is updated based on the
242 * current time and the timestamp provided
243 */
244 enetc_port_mac_wr(si, ENETC_PM0_SINGLE_STEP, val);
245 }
246
enetc4_set_one_step_ts(struct enetc_si * si,bool udp,int offset)247 static void enetc4_set_one_step_ts(struct enetc_si *si, bool udp, int offset)
248 {
249 u32 val = PM_SINGLE_STEP_EN;
250
251 val |= PM_SINGLE_STEP_OFFSET_SET(offset);
252 if (udp)
253 val |= PM_SINGLE_STEP_CH;
254
255 enetc_port_mac_wr(si, ENETC4_PM_SINGLE_STEP(0), val);
256 }
257
enetc_update_ptp_sync_msg(struct enetc_ndev_priv * priv,struct sk_buff * skb,bool csum_offload)258 static u32 enetc_update_ptp_sync_msg(struct enetc_ndev_priv *priv,
259 struct sk_buff *skb, bool csum_offload)
260 {
261 struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
262 u16 tstamp_off = enetc_cb->origin_tstamp_off;
263 u16 corr_off = enetc_cb->correction_off;
264 struct enetc_si *si = priv->si;
265 struct enetc_hw *hw = &si->hw;
266 __be32 new_sec_l, new_nsec;
267 __be16 new_sec_h;
268 u32 lo, hi, nsec;
269 u8 *data;
270 u64 sec;
271
272 lo = enetc_rd_hot(hw, ENETC_SICTR0);
273 hi = enetc_rd_hot(hw, ENETC_SICTR1);
274 sec = (u64)hi << 32 | lo;
275 nsec = do_div(sec, 1000000000);
276
277 /* Update originTimestamp field of Sync packet
278 * - 48 bits seconds field
279 * - 32 bits nanseconds field
280 *
281 * In addition, if csum_offload is false, the UDP checksum needs
282 * to be updated by software after updating originTimestamp field,
283 * otherwise the hardware will calculate the wrong checksum when
284 * updating the correction field and update it to the packet.
285 */
286
287 data = skb_mac_header(skb);
288 new_sec_h = htons((sec >> 32) & 0xffff);
289 new_sec_l = htonl(sec & 0xffffffff);
290 new_nsec = htonl(nsec);
291 if (enetc_cb->udp && !csum_offload) {
292 struct udphdr *uh = udp_hdr(skb);
293 __be32 old_sec_l, old_nsec;
294 __be16 old_sec_h;
295
296 old_sec_h = *(__be16 *)(data + tstamp_off);
297 inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
298 new_sec_h, false);
299
300 old_sec_l = *(__be32 *)(data + tstamp_off + 2);
301 inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
302 new_sec_l, false);
303
304 old_nsec = *(__be32 *)(data + tstamp_off + 6);
305 inet_proto_csum_replace4(&uh->check, skb, old_nsec,
306 new_nsec, false);
307 }
308
309 *(__be16 *)(data + tstamp_off) = new_sec_h;
310 *(__be32 *)(data + tstamp_off + 2) = new_sec_l;
311 *(__be32 *)(data + tstamp_off + 6) = new_nsec;
312
313 /* Configure single-step register */
314 if (is_enetc_rev1(si))
315 enetc_set_one_step_ts(si, enetc_cb->udp, corr_off);
316 else
317 enetc4_set_one_step_ts(si, enetc_cb->udp, corr_off);
318
319 return lo & ENETC_TXBD_TSTAMP;
320 }
321
enetc_map_tx_buffs(struct enetc_bdr * tx_ring,struct sk_buff * skb)322 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
323 {
324 bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
325 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
326 struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
327 struct enetc_tx_swbd *tx_swbd;
328 int len = skb_headlen(skb);
329 union enetc_tx_bd temp_bd;
330 bool csum_offload = false;
331 union enetc_tx_bd *txbd;
332 int i, count = 0;
333 skb_frag_t *frag;
334 unsigned int f;
335 dma_addr_t dma;
336 u8 flags = 0;
337 u32 tstamp;
338
339 enetc_clear_tx_bd(&temp_bd);
340 if (skb->ip_summed == CHECKSUM_PARTIAL) {
341 /* Can not support TSD and checksum offload at the same time */
342 if (priv->active_offloads & ENETC_F_TXCSUM &&
343 enetc_tx_csum_offload_check(skb) && !tx_ring->tsd_enable) {
344 temp_bd.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START,
345 skb_network_offset(skb));
346 temp_bd.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN,
347 skb_network_header_len(skb) / 4);
348 temp_bd.l3_aux1 |= FIELD_PREP(ENETC_TX_BD_L3T,
349 enetc_skb_is_ipv6(skb));
350 if (enetc_skb_is_tcp(skb))
351 temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
352 ENETC_TXBD_L4T_TCP);
353 else
354 temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
355 ENETC_TXBD_L4T_UDP);
356 flags |= ENETC_TXBD_FLAGS_CSUM_LSO | ENETC_TXBD_FLAGS_L4CS;
357 csum_offload = true;
358 } else if (skb_checksum_help(skb)) {
359 return 0;
360 }
361 }
362
363 if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
364 do_onestep_tstamp = true;
365 tstamp = enetc_update_ptp_sync_msg(priv, skb, csum_offload);
366 } else if (enetc_cb->flag & ENETC_F_TX_TSTAMP) {
367 do_twostep_tstamp = true;
368 }
369
370 i = tx_ring->next_to_use;
371 txbd = ENETC_TXBD(*tx_ring, i);
372 prefetchw(txbd);
373
374 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
375 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
376 goto dma_err;
377
378 temp_bd.addr = cpu_to_le64(dma);
379 temp_bd.buf_len = cpu_to_le16(len);
380
381 tx_swbd = &tx_ring->tx_swbd[i];
382 tx_swbd->dma = dma;
383 tx_swbd->len = len;
384 tx_swbd->is_dma_page = 0;
385 tx_swbd->dir = DMA_TO_DEVICE;
386 count++;
387
388 do_vlan = skb_vlan_tag_present(skb);
389 tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
390 tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV);
391 tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en;
392
393 if (do_vlan || do_onestep_tstamp || do_twostep_tstamp)
394 flags |= ENETC_TXBD_FLAGS_EX;
395
396 if (tx_ring->tsd_enable)
397 flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
398
399 /* first BD needs frm_len and offload flags set */
400 temp_bd.frm_len = cpu_to_le16(skb->len);
401 temp_bd.flags = flags;
402
403 if (flags & ENETC_TXBD_FLAGS_TSE)
404 temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns,
405 flags);
406
407 if (flags & ENETC_TXBD_FLAGS_EX) {
408 u8 e_flags = 0;
409 *txbd = temp_bd;
410 enetc_clear_tx_bd(&temp_bd);
411
412 /* add extension BD for VLAN and/or timestamping */
413 flags = 0;
414 tx_swbd++;
415 txbd++;
416 i++;
417 if (unlikely(i == tx_ring->bd_count)) {
418 i = 0;
419 tx_swbd = tx_ring->tx_swbd;
420 txbd = ENETC_TXBD(*tx_ring, 0);
421 }
422 prefetchw(txbd);
423
424 if (do_vlan) {
425 temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
426 temp_bd.ext.tpid = 0; /* < C-TAG */
427 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
428 }
429
430 if (do_onestep_tstamp) {
431 /* Configure extension BD */
432 temp_bd.ext.tstamp = cpu_to_le32(tstamp);
433 e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP;
434 } else if (do_twostep_tstamp) {
435 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
436 e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
437 }
438
439 temp_bd.ext.e_flags = e_flags;
440 count++;
441 }
442
443 frag = &skb_shinfo(skb)->frags[0];
444 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
445 len = skb_frag_size(frag);
446 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
447 DMA_TO_DEVICE);
448 if (dma_mapping_error(tx_ring->dev, dma))
449 goto dma_err;
450
451 *txbd = temp_bd;
452 enetc_clear_tx_bd(&temp_bd);
453
454 flags = 0;
455 tx_swbd++;
456 txbd++;
457 i++;
458 if (unlikely(i == tx_ring->bd_count)) {
459 i = 0;
460 tx_swbd = tx_ring->tx_swbd;
461 txbd = ENETC_TXBD(*tx_ring, 0);
462 }
463 prefetchw(txbd);
464
465 temp_bd.addr = cpu_to_le64(dma);
466 temp_bd.buf_len = cpu_to_le16(len);
467
468 tx_swbd->dma = dma;
469 tx_swbd->len = len;
470 tx_swbd->is_dma_page = 1;
471 tx_swbd->dir = DMA_TO_DEVICE;
472 count++;
473 }
474
475 /* last BD needs 'F' bit set */
476 flags |= ENETC_TXBD_FLAGS_F;
477 temp_bd.flags = flags;
478 *txbd = temp_bd;
479
480 tx_ring->tx_swbd[i].is_eof = true;
481 tx_ring->tx_swbd[i].skb = skb;
482
483 enetc_bdr_idx_inc(tx_ring, &i);
484 tx_ring->next_to_use = i;
485
486 skb_tx_timestamp(skb);
487
488 enetc_update_tx_ring_tail(tx_ring);
489
490 return count;
491
492 dma_err:
493 dev_err(tx_ring->dev, "DMA map error");
494
495 enetc_unwind_tx_frame(tx_ring, count, i);
496
497 return 0;
498 }
499
enetc_map_tx_tso_hdr(struct enetc_bdr * tx_ring,struct sk_buff * skb,struct enetc_tx_swbd * tx_swbd,union enetc_tx_bd * txbd,int * i,int hdr_len,int data_len)500 static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
501 struct enetc_tx_swbd *tx_swbd,
502 union enetc_tx_bd *txbd, int *i, int hdr_len,
503 int data_len)
504 {
505 union enetc_tx_bd txbd_tmp;
506 u8 flags = 0, e_flags = 0;
507 dma_addr_t addr;
508 int count = 1;
509
510 enetc_clear_tx_bd(&txbd_tmp);
511 addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
512
513 if (skb_vlan_tag_present(skb))
514 flags |= ENETC_TXBD_FLAGS_EX;
515
516 txbd_tmp.addr = cpu_to_le64(addr);
517 txbd_tmp.buf_len = cpu_to_le16(hdr_len);
518
519 /* first BD needs frm_len and offload flags set */
520 txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len);
521 txbd_tmp.flags = flags;
522
523 /* For the TSO header we do not set the dma address since we do not
524 * want it unmapped when we do cleanup. We still set len so that we
525 * count the bytes sent.
526 */
527 tx_swbd->len = hdr_len;
528 tx_swbd->do_twostep_tstamp = false;
529 tx_swbd->check_wb = false;
530
531 /* Actually write the header in the BD */
532 *txbd = txbd_tmp;
533
534 /* Add extension BD for VLAN */
535 if (flags & ENETC_TXBD_FLAGS_EX) {
536 /* Get the next BD */
537 enetc_bdr_idx_inc(tx_ring, i);
538 txbd = ENETC_TXBD(*tx_ring, *i);
539 tx_swbd = &tx_ring->tx_swbd[*i];
540 prefetchw(txbd);
541
542 /* Setup the VLAN fields */
543 enetc_clear_tx_bd(&txbd_tmp);
544 txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
545 txbd_tmp.ext.tpid = 0; /* < C-TAG */
546 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
547
548 /* Write the BD */
549 txbd_tmp.ext.e_flags = e_flags;
550 *txbd = txbd_tmp;
551 count++;
552 }
553
554 return count;
555 }
556
enetc_map_tx_tso_data(struct enetc_bdr * tx_ring,struct sk_buff * skb,struct enetc_tx_swbd * tx_swbd,union enetc_tx_bd * txbd,char * data,int size,bool last_bd)557 static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
558 struct enetc_tx_swbd *tx_swbd,
559 union enetc_tx_bd *txbd, char *data,
560 int size, bool last_bd)
561 {
562 union enetc_tx_bd txbd_tmp;
563 dma_addr_t addr;
564 u8 flags = 0;
565
566 enetc_clear_tx_bd(&txbd_tmp);
567
568 addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
569 if (unlikely(dma_mapping_error(tx_ring->dev, addr))) {
570 netdev_err(tx_ring->ndev, "DMA map error\n");
571 return -ENOMEM;
572 }
573
574 if (last_bd) {
575 flags |= ENETC_TXBD_FLAGS_F;
576 tx_swbd->is_eof = 1;
577 }
578
579 txbd_tmp.addr = cpu_to_le64(addr);
580 txbd_tmp.buf_len = cpu_to_le16(size);
581 txbd_tmp.flags = flags;
582
583 tx_swbd->dma = addr;
584 tx_swbd->len = size;
585 tx_swbd->dir = DMA_TO_DEVICE;
586
587 *txbd = txbd_tmp;
588
589 return 0;
590 }
591
enetc_tso_hdr_csum(struct tso_t * tso,struct sk_buff * skb,char * hdr,int hdr_len,int * l4_hdr_len)592 static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb,
593 char *hdr, int hdr_len, int *l4_hdr_len)
594 {
595 char *l4_hdr = hdr + skb_transport_offset(skb);
596 int mac_hdr_len = skb_network_offset(skb);
597
598 if (tso->tlen != sizeof(struct udphdr)) {
599 struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
600
601 tcph->check = 0;
602 } else {
603 struct udphdr *udph = (struct udphdr *)(l4_hdr);
604
605 udph->check = 0;
606 }
607
608 /* Compute the IP checksum. This is necessary since tso_build_hdr()
609 * already incremented the IP ID field.
610 */
611 if (!tso->ipv6) {
612 struct iphdr *iph = (void *)(hdr + mac_hdr_len);
613
614 iph->check = 0;
615 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
616 }
617
618 /* Compute the checksum over the L4 header. */
619 *l4_hdr_len = hdr_len - skb_transport_offset(skb);
620 return csum_partial(l4_hdr, *l4_hdr_len, 0);
621 }
622
enetc_tso_complete_csum(struct enetc_bdr * tx_ring,struct tso_t * tso,struct sk_buff * skb,char * hdr,int len,__wsum sum)623 static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso,
624 struct sk_buff *skb, char *hdr, int len,
625 __wsum sum)
626 {
627 char *l4_hdr = hdr + skb_transport_offset(skb);
628 __sum16 csum_final;
629
630 /* Complete the L4 checksum by appending the pseudo-header to the
631 * already computed checksum.
632 */
633 if (!tso->ipv6)
634 csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr,
635 ip_hdr(skb)->daddr,
636 len, ip_hdr(skb)->protocol, sum);
637 else
638 csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
639 &ipv6_hdr(skb)->daddr,
640 len, ipv6_hdr(skb)->nexthdr, sum);
641
642 if (tso->tlen != sizeof(struct udphdr)) {
643 struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
644
645 tcph->check = csum_final;
646 } else {
647 struct udphdr *udph = (struct udphdr *)(l4_hdr);
648
649 udph->check = csum_final;
650 }
651 }
652
enetc_lso_count_descs(const struct sk_buff * skb)653 static int enetc_lso_count_descs(const struct sk_buff *skb)
654 {
655 /* 4 BDs: 1 BD for LSO header + 1 BD for extended BD + 1 BD
656 * for linear area data but not include LSO header, namely
657 * skb_headlen(skb) - lso_hdr_len (it may be 0, but that's
658 * okay, we only need to consider the worst case). And 1 BD
659 * for gap.
660 */
661 return skb_shinfo(skb)->nr_frags + 4;
662 }
663
enetc_lso_get_hdr_len(const struct sk_buff * skb)664 static int enetc_lso_get_hdr_len(const struct sk_buff *skb)
665 {
666 int hdr_len, tlen;
667
668 tlen = skb_is_gso_tcp(skb) ? tcp_hdrlen(skb) : sizeof(struct udphdr);
669 hdr_len = skb_transport_offset(skb) + tlen;
670
671 return hdr_len;
672 }
673
enetc_lso_start(struct sk_buff * skb,struct enetc_lso_t * lso)674 static void enetc_lso_start(struct sk_buff *skb, struct enetc_lso_t *lso)
675 {
676 lso->lso_seg_size = skb_shinfo(skb)->gso_size;
677 lso->ipv6 = enetc_skb_is_ipv6(skb);
678 lso->tcp = skb_is_gso_tcp(skb);
679 lso->l3_hdr_len = skb_network_header_len(skb);
680 lso->l3_start = skb_network_offset(skb);
681 lso->hdr_len = enetc_lso_get_hdr_len(skb);
682 lso->total_len = skb->len - lso->hdr_len;
683 }
684
enetc_lso_map_hdr(struct enetc_bdr * tx_ring,struct sk_buff * skb,int * i,struct enetc_lso_t * lso)685 static void enetc_lso_map_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
686 int *i, struct enetc_lso_t *lso)
687 {
688 union enetc_tx_bd txbd_tmp, *txbd;
689 struct enetc_tx_swbd *tx_swbd;
690 u16 frm_len, frm_len_ext;
691 u8 flags, e_flags = 0;
692 dma_addr_t addr;
693 char *hdr;
694
695 /* Get the first BD of the LSO BDs chain */
696 txbd = ENETC_TXBD(*tx_ring, *i);
697 tx_swbd = &tx_ring->tx_swbd[*i];
698 prefetchw(txbd);
699
700 /* Prepare LSO header: MAC + IP + TCP/UDP */
701 hdr = tx_ring->tso_headers + *i * TSO_HEADER_SIZE;
702 memcpy(hdr, skb->data, lso->hdr_len);
703 addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
704
705 /* {frm_len_ext, frm_len} indicates the total length of
706 * large transmit data unit. frm_len contains the 16 least
707 * significant bits and frm_len_ext contains the 4 most
708 * significant bits.
709 */
710 frm_len = lso->total_len & 0xffff;
711 frm_len_ext = (lso->total_len >> 16) & 0xf;
712
713 /* Set the flags of the first BD */
714 flags = ENETC_TXBD_FLAGS_EX | ENETC_TXBD_FLAGS_CSUM_LSO |
715 ENETC_TXBD_FLAGS_LSO | ENETC_TXBD_FLAGS_L4CS;
716
717 enetc_clear_tx_bd(&txbd_tmp);
718 txbd_tmp.addr = cpu_to_le64(addr);
719 txbd_tmp.hdr_len = cpu_to_le16(lso->hdr_len);
720
721 /* first BD needs frm_len and offload flags set */
722 txbd_tmp.frm_len = cpu_to_le16(frm_len);
723 txbd_tmp.flags = flags;
724
725 txbd_tmp.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START, lso->l3_start);
726 /* l3_hdr_size in 32-bits (4 bytes) */
727 txbd_tmp.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN,
728 lso->l3_hdr_len / 4);
729 if (lso->ipv6)
730 txbd_tmp.l3_aux1 |= ENETC_TX_BD_L3T;
731 else
732 txbd_tmp.l3_aux0 |= ENETC_TX_BD_IPCS;
733
734 txbd_tmp.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, lso->tcp ?
735 ENETC_TXBD_L4T_TCP : ENETC_TXBD_L4T_UDP);
736
737 /* For the LSO header we do not set the dma address since
738 * we do not want it unmapped when we do cleanup. We still
739 * set len so that we count the bytes sent.
740 */
741 tx_swbd->len = lso->hdr_len;
742 tx_swbd->do_twostep_tstamp = false;
743 tx_swbd->check_wb = false;
744
745 /* Actually write the header in the BD */
746 *txbd = txbd_tmp;
747
748 /* Get the next BD, and the next BD is extended BD */
749 enetc_bdr_idx_inc(tx_ring, i);
750 txbd = ENETC_TXBD(*tx_ring, *i);
751 tx_swbd = &tx_ring->tx_swbd[*i];
752 prefetchw(txbd);
753
754 enetc_clear_tx_bd(&txbd_tmp);
755 if (skb_vlan_tag_present(skb)) {
756 /* Setup the VLAN fields */
757 txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
758 txbd_tmp.ext.tpid = ENETC_TPID_8021Q;
759 e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS;
760 }
761
762 /* Write the BD */
763 txbd_tmp.ext.e_flags = e_flags;
764 txbd_tmp.ext.lso_sg_size = cpu_to_le16(lso->lso_seg_size);
765 txbd_tmp.ext.frm_len_ext = cpu_to_le16(frm_len_ext);
766 *txbd = txbd_tmp;
767 }
768
enetc_lso_map_data(struct enetc_bdr * tx_ring,struct sk_buff * skb,int * i,struct enetc_lso_t * lso,int * count)769 static int enetc_lso_map_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
770 int *i, struct enetc_lso_t *lso, int *count)
771 {
772 union enetc_tx_bd txbd_tmp, *txbd = NULL;
773 struct enetc_tx_swbd *tx_swbd;
774 skb_frag_t *frag;
775 dma_addr_t dma;
776 u8 flags = 0;
777 int len, f;
778
779 len = skb_headlen(skb) - lso->hdr_len;
780 if (len > 0) {
781 dma = dma_map_single(tx_ring->dev, skb->data + lso->hdr_len,
782 len, DMA_TO_DEVICE);
783 if (dma_mapping_error(tx_ring->dev, dma))
784 return -ENOMEM;
785
786 enetc_bdr_idx_inc(tx_ring, i);
787 txbd = ENETC_TXBD(*tx_ring, *i);
788 tx_swbd = &tx_ring->tx_swbd[*i];
789 prefetchw(txbd);
790 *count += 1;
791
792 enetc_clear_tx_bd(&txbd_tmp);
793 txbd_tmp.addr = cpu_to_le64(dma);
794 txbd_tmp.buf_len = cpu_to_le16(len);
795
796 tx_swbd->dma = dma;
797 tx_swbd->len = len;
798 tx_swbd->is_dma_page = 0;
799 tx_swbd->dir = DMA_TO_DEVICE;
800 }
801
802 frag = &skb_shinfo(skb)->frags[0];
803 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
804 if (txbd)
805 *txbd = txbd_tmp;
806
807 len = skb_frag_size(frag);
808 dma = skb_frag_dma_map(tx_ring->dev, frag);
809 if (dma_mapping_error(tx_ring->dev, dma))
810 return -ENOMEM;
811
812 /* Get the next BD */
813 enetc_bdr_idx_inc(tx_ring, i);
814 txbd = ENETC_TXBD(*tx_ring, *i);
815 tx_swbd = &tx_ring->tx_swbd[*i];
816 prefetchw(txbd);
817 *count += 1;
818
819 enetc_clear_tx_bd(&txbd_tmp);
820 txbd_tmp.addr = cpu_to_le64(dma);
821 txbd_tmp.buf_len = cpu_to_le16(len);
822
823 tx_swbd->dma = dma;
824 tx_swbd->len = len;
825 tx_swbd->is_dma_page = 1;
826 tx_swbd->dir = DMA_TO_DEVICE;
827 }
828
829 /* Last BD needs 'F' bit set */
830 flags |= ENETC_TXBD_FLAGS_F;
831 txbd_tmp.flags = flags;
832 *txbd = txbd_tmp;
833
834 tx_swbd->is_eof = 1;
835 tx_swbd->skb = skb;
836
837 return 0;
838 }
839
enetc_lso_hw_offload(struct enetc_bdr * tx_ring,struct sk_buff * skb)840 static int enetc_lso_hw_offload(struct enetc_bdr *tx_ring, struct sk_buff *skb)
841 {
842 struct enetc_tx_swbd *tx_swbd;
843 struct enetc_lso_t lso = {0};
844 int err, i, count = 0;
845
846 /* Initialize the LSO handler */
847 enetc_lso_start(skb, &lso);
848 i = tx_ring->next_to_use;
849
850 enetc_lso_map_hdr(tx_ring, skb, &i, &lso);
851 /* First BD and an extend BD */
852 count += 2;
853
854 err = enetc_lso_map_data(tx_ring, skb, &i, &lso, &count);
855 if (err)
856 goto dma_err;
857
858 /* Go to the next BD */
859 enetc_bdr_idx_inc(tx_ring, &i);
860 tx_ring->next_to_use = i;
861 enetc_update_tx_ring_tail(tx_ring);
862
863 return count;
864
865 dma_err:
866 do {
867 tx_swbd = &tx_ring->tx_swbd[i];
868 enetc_free_tx_frame(tx_ring, tx_swbd);
869 if (i == 0)
870 i = tx_ring->bd_count;
871 i--;
872 } while (--count);
873
874 return 0;
875 }
876
enetc_map_tx_tso_buffs(struct enetc_bdr * tx_ring,struct sk_buff * skb)877 static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
878 {
879 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
880 int hdr_len, total_len, data_len;
881 struct enetc_tx_swbd *tx_swbd;
882 union enetc_tx_bd *txbd;
883 struct tso_t tso;
884 __wsum csum, csum2;
885 int count = 0, pos;
886 int err, i, bd_data_num;
887
888 /* Initialize the TSO handler, and prepare the first payload */
889 hdr_len = tso_start(skb, &tso);
890 total_len = skb->len - hdr_len;
891 i = tx_ring->next_to_use;
892
893 while (total_len > 0) {
894 char *hdr;
895
896 /* Get the BD */
897 txbd = ENETC_TXBD(*tx_ring, i);
898 tx_swbd = &tx_ring->tx_swbd[i];
899 prefetchw(txbd);
900
901 /* Determine the length of this packet */
902 data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len);
903 total_len -= data_len;
904
905 /* prepare packet headers: MAC + IP + TCP */
906 hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE;
907 tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0);
908
909 /* compute the csum over the L4 header */
910 csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
911 count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd,
912 &i, hdr_len, data_len);
913 bd_data_num = 0;
914
915 while (data_len > 0) {
916 int size;
917
918 size = min_t(int, tso.size, data_len);
919
920 /* Advance the index in the BDR */
921 enetc_bdr_idx_inc(tx_ring, &i);
922 txbd = ENETC_TXBD(*tx_ring, i);
923 tx_swbd = &tx_ring->tx_swbd[i];
924 prefetchw(txbd);
925
926 /* Compute the checksum over this segment of data and
927 * add it to the csum already computed (over the L4
928 * header and possible other data segments).
929 */
930 csum2 = csum_partial(tso.data, size, 0);
931 csum = csum_block_add(csum, csum2, pos);
932 pos += size;
933
934 err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
935 tso.data, size,
936 size == data_len);
937 if (err) {
938 if (i == 0)
939 i = tx_ring->bd_count;
940 i--;
941
942 goto err_map_data;
943 }
944
945 data_len -= size;
946 count++;
947 bd_data_num++;
948 tso_build_data(skb, &tso, size);
949
950 if (unlikely(bd_data_num >= priv->max_frags && data_len))
951 goto err_chained_bd;
952 }
953
954 enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum);
955
956 if (total_len == 0)
957 tx_swbd->skb = skb;
958
959 /* Go to the next BD */
960 enetc_bdr_idx_inc(tx_ring, &i);
961 }
962
963 tx_ring->next_to_use = i;
964 enetc_update_tx_ring_tail(tx_ring);
965
966 return count;
967
968 err_map_data:
969 dev_err(tx_ring->dev, "DMA map error");
970
971 err_chained_bd:
972 enetc_unwind_tx_frame(tx_ring, count, i);
973
974 return 0;
975 }
976
enetc_start_xmit(struct sk_buff * skb,struct net_device * ndev)977 static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
978 struct net_device *ndev)
979 {
980 struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
981 struct enetc_ndev_priv *priv = netdev_priv(ndev);
982 struct enetc_bdr *tx_ring;
983 int count;
984
985 /* Queue one-step Sync packet if already locked */
986 if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
987 if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS,
988 &priv->flags)) {
989 skb_queue_tail(&priv->tx_skbs, skb);
990 return NETDEV_TX_OK;
991 }
992 }
993
994 tx_ring = priv->tx_ring[skb->queue_mapping];
995
996 if (skb_is_gso(skb)) {
997 /* LSO data unit lengths of up to 256KB are supported */
998 if (priv->active_offloads & ENETC_F_LSO &&
999 (skb->len - enetc_lso_get_hdr_len(skb)) <=
1000 ENETC_LSO_MAX_DATA_LEN) {
1001 if (enetc_bd_unused(tx_ring) < enetc_lso_count_descs(skb)) {
1002 netif_stop_subqueue(ndev, tx_ring->index);
1003 return NETDEV_TX_BUSY;
1004 }
1005
1006 count = enetc_lso_hw_offload(tx_ring, skb);
1007 } else {
1008 if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
1009 netif_stop_subqueue(ndev, tx_ring->index);
1010 return NETDEV_TX_BUSY;
1011 }
1012
1013 enetc_lock_mdio();
1014 count = enetc_map_tx_tso_buffs(tx_ring, skb);
1015 enetc_unlock_mdio();
1016 }
1017 } else {
1018 if (unlikely(skb_shinfo(skb)->nr_frags > priv->max_frags))
1019 if (unlikely(skb_linearize(skb)))
1020 goto drop_packet_err;
1021
1022 count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
1023 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
1024 netif_stop_subqueue(ndev, tx_ring->index);
1025 return NETDEV_TX_BUSY;
1026 }
1027
1028 enetc_lock_mdio();
1029 count = enetc_map_tx_buffs(tx_ring, skb);
1030 enetc_unlock_mdio();
1031 }
1032
1033 if (unlikely(!count))
1034 goto drop_packet_err;
1035
1036 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED(priv->max_frags))
1037 netif_stop_subqueue(ndev, tx_ring->index);
1038
1039 return NETDEV_TX_OK;
1040
1041 drop_packet_err:
1042 dev_kfree_skb_any(skb);
1043 return NETDEV_TX_OK;
1044 }
1045
enetc_xmit(struct sk_buff * skb,struct net_device * ndev)1046 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
1047 {
1048 struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
1049 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1050 u8 udp, msgtype, twostep;
1051 u16 offset1, offset2;
1052
1053 /* Mark tx timestamp type on enetc_cb->flag if requires */
1054 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1055 (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK))
1056 enetc_cb->flag = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
1057 else
1058 enetc_cb->flag = 0;
1059
1060 /* Fall back to two-step timestamp if not one-step Sync packet */
1061 if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
1062 if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep,
1063 &offset1, &offset2) ||
1064 msgtype != PTP_MSGTYPE_SYNC || twostep != 0) {
1065 enetc_cb->flag = ENETC_F_TX_TSTAMP;
1066 } else {
1067 enetc_cb->udp = !!udp;
1068 enetc_cb->correction_off = offset1;
1069 enetc_cb->origin_tstamp_off = offset2;
1070 }
1071 }
1072
1073 return enetc_start_xmit(skb, ndev);
1074 }
1075 EXPORT_SYMBOL_GPL(enetc_xmit);
1076
enetc_msix(int irq,void * data)1077 static irqreturn_t enetc_msix(int irq, void *data)
1078 {
1079 struct enetc_int_vector *v = data;
1080 int i;
1081
1082 enetc_lock_mdio();
1083
1084 /* disable interrupts */
1085 enetc_wr_reg_hot(v->rbier, 0);
1086 enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
1087
1088 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
1089 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
1090
1091 enetc_unlock_mdio();
1092
1093 napi_schedule(&v->napi);
1094
1095 return IRQ_HANDLED;
1096 }
1097
enetc_rx_dim_work(struct work_struct * w)1098 static void enetc_rx_dim_work(struct work_struct *w)
1099 {
1100 struct dim *dim = container_of(w, struct dim, work);
1101 struct dim_cq_moder moder =
1102 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1103 struct enetc_int_vector *v =
1104 container_of(dim, struct enetc_int_vector, rx_dim);
1105 struct enetc_ndev_priv *priv = netdev_priv(v->rx_ring.ndev);
1106
1107 v->rx_ictt = enetc_usecs_to_cycles(moder.usec, priv->sysclk_freq);
1108 dim->state = DIM_START_MEASURE;
1109 }
1110
enetc_rx_net_dim(struct enetc_int_vector * v)1111 static void enetc_rx_net_dim(struct enetc_int_vector *v)
1112 {
1113 struct dim_sample dim_sample = {};
1114
1115 v->comp_cnt++;
1116
1117 if (!v->rx_napi_work)
1118 return;
1119
1120 dim_update_sample(v->comp_cnt,
1121 v->rx_ring.stats.packets,
1122 v->rx_ring.stats.bytes,
1123 &dim_sample);
1124 net_dim(&v->rx_dim, &dim_sample);
1125 }
1126
enetc_bd_ready_count(struct enetc_bdr * tx_ring,int ci)1127 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
1128 {
1129 int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
1130
1131 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
1132 }
1133
enetc_page_reusable(struct page * page)1134 static bool enetc_page_reusable(struct page *page)
1135 {
1136 return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
1137 }
1138
enetc_reuse_page(struct enetc_bdr * rx_ring,struct enetc_rx_swbd * old)1139 static void enetc_reuse_page(struct enetc_bdr *rx_ring,
1140 struct enetc_rx_swbd *old)
1141 {
1142 struct enetc_rx_swbd *new;
1143
1144 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
1145
1146 /* next buf that may reuse a page */
1147 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
1148
1149 /* copy page reference */
1150 *new = *old;
1151 }
1152
enetc_get_tx_tstamp(struct enetc_hw * hw,union enetc_tx_bd * txbd,u64 * tstamp)1153 static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
1154 u64 *tstamp)
1155 {
1156 u32 lo, hi, tstamp_lo;
1157
1158 lo = enetc_rd_hot(hw, ENETC_SICTR0);
1159 hi = enetc_rd_hot(hw, ENETC_SICTR1);
1160 tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
1161 if (lo <= tstamp_lo)
1162 hi -= 1;
1163 *tstamp = (u64)hi << 32 | tstamp_lo;
1164 }
1165
enetc_tstamp_tx(struct sk_buff * skb,u64 tstamp)1166 static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
1167 {
1168 struct skb_shared_hwtstamps shhwtstamps;
1169
1170 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
1171 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1172 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
1173 skb_txtime_consumed(skb);
1174 skb_tstamp_tx(skb, &shhwtstamps);
1175 }
1176 }
1177
enetc_recycle_xdp_tx_buff(struct enetc_bdr * tx_ring,struct enetc_tx_swbd * tx_swbd)1178 static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring,
1179 struct enetc_tx_swbd *tx_swbd)
1180 {
1181 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
1182 struct enetc_rx_swbd rx_swbd = {
1183 .dma = tx_swbd->dma,
1184 .page = tx_swbd->page,
1185 .page_offset = tx_swbd->page_offset,
1186 .dir = tx_swbd->dir,
1187 .len = tx_swbd->len,
1188 };
1189 struct enetc_bdr *rx_ring;
1190
1191 rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring);
1192
1193 if (likely(enetc_swbd_unused(rx_ring))) {
1194 enetc_reuse_page(rx_ring, &rx_swbd);
1195
1196 /* sync for use by the device */
1197 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma,
1198 rx_swbd.page_offset,
1199 ENETC_RXB_DMA_SIZE_XDP,
1200 rx_swbd.dir);
1201
1202 rx_ring->stats.recycles++;
1203 } else {
1204 /* RX ring is already full, we need to unmap and free the
1205 * page, since there's nothing useful we can do with it.
1206 */
1207 rx_ring->stats.recycle_failures++;
1208
1209 dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE,
1210 rx_swbd.dir);
1211 __free_page(rx_swbd.page);
1212 }
1213
1214 rx_ring->xdp.xdp_tx_in_flight--;
1215 }
1216
enetc_clean_tx_ring(struct enetc_bdr * tx_ring,int napi_budget)1217 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
1218 {
1219 int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0;
1220 struct net_device *ndev = tx_ring->ndev;
1221 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1222 struct enetc_tx_swbd *tx_swbd;
1223 int i, bds_to_clean;
1224 bool do_twostep_tstamp;
1225 u64 tstamp = 0;
1226
1227 i = tx_ring->next_to_clean;
1228 tx_swbd = &tx_ring->tx_swbd[i];
1229
1230 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
1231
1232 do_twostep_tstamp = false;
1233
1234 while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
1235 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
1236 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
1237 bool is_eof = tx_swbd->is_eof;
1238
1239 if (unlikely(tx_swbd->check_wb)) {
1240 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
1241
1242 if (txbd->flags & ENETC_TXBD_FLAGS_W &&
1243 tx_swbd->do_twostep_tstamp) {
1244 enetc_get_tx_tstamp(&priv->si->hw, txbd,
1245 &tstamp);
1246 do_twostep_tstamp = true;
1247 }
1248
1249 if (tx_swbd->qbv_en &&
1250 txbd->wb.status & ENETC_TXBD_STATS_WIN)
1251 tx_win_drop++;
1252 }
1253
1254 if (tx_swbd->is_xdp_tx)
1255 enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd);
1256 else if (likely(tx_swbd->dma))
1257 enetc_unmap_tx_buff(tx_ring, tx_swbd);
1258
1259 if (xdp_frame) {
1260 xdp_return_frame(xdp_frame);
1261 } else if (skb) {
1262 struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
1263
1264 if (unlikely(enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
1265 /* Start work to release lock for next one-step
1266 * timestamping packet. And send one skb in
1267 * tx_skbs queue if has.
1268 */
1269 schedule_work(&priv->tx_onestep_tstamp);
1270 } else if (unlikely(do_twostep_tstamp)) {
1271 enetc_tstamp_tx(skb, tstamp);
1272 do_twostep_tstamp = false;
1273 }
1274 napi_consume_skb(skb, napi_budget);
1275 }
1276
1277 tx_byte_cnt += tx_swbd->len;
1278 /* Scrub the swbd here so we don't have to do that
1279 * when we reuse it during xmit
1280 */
1281 memset(tx_swbd, 0, sizeof(*tx_swbd));
1282
1283 bds_to_clean--;
1284 tx_swbd++;
1285 i++;
1286 if (unlikely(i == tx_ring->bd_count)) {
1287 i = 0;
1288 tx_swbd = tx_ring->tx_swbd;
1289 }
1290
1291 /* BD iteration loop end */
1292 if (is_eof) {
1293 tx_frm_cnt++;
1294 /* re-arm interrupt source */
1295 enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
1296 BIT(16 + tx_ring->index));
1297 }
1298
1299 if (unlikely(!bds_to_clean))
1300 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
1301 }
1302
1303 tx_ring->next_to_clean = i;
1304 tx_ring->stats.packets += tx_frm_cnt;
1305 tx_ring->stats.bytes += tx_byte_cnt;
1306 tx_ring->stats.win_drop += tx_win_drop;
1307
1308 if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
1309 __netif_subqueue_stopped(ndev, tx_ring->index) &&
1310 !test_bit(ENETC_TX_DOWN, &priv->flags) &&
1311 (enetc_bd_unused(tx_ring) >=
1312 ENETC_TXBDS_MAX_NEEDED(priv->max_frags)))) {
1313 netif_wake_subqueue(ndev, tx_ring->index);
1314 }
1315
1316 return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
1317 }
1318
enetc_new_page(struct enetc_bdr * rx_ring,struct enetc_rx_swbd * rx_swbd)1319 static bool enetc_new_page(struct enetc_bdr *rx_ring,
1320 struct enetc_rx_swbd *rx_swbd)
1321 {
1322 bool xdp = !!(rx_ring->xdp.prog);
1323 struct page *page;
1324 dma_addr_t addr;
1325
1326 page = dev_alloc_page();
1327 if (unlikely(!page))
1328 return false;
1329
1330 /* For XDP_TX, we forgo dma_unmap -> dma_map */
1331 rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
1332
1333 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir);
1334 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
1335 __free_page(page);
1336
1337 return false;
1338 }
1339
1340 rx_swbd->dma = addr;
1341 rx_swbd->page = page;
1342 rx_swbd->page_offset = rx_ring->buffer_offset;
1343
1344 return true;
1345 }
1346
enetc_refill_rx_ring(struct enetc_bdr * rx_ring,const int buff_cnt)1347 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
1348 {
1349 struct enetc_rx_swbd *rx_swbd;
1350 union enetc_rx_bd *rxbd;
1351 int i, j;
1352
1353 i = rx_ring->next_to_use;
1354 rx_swbd = &rx_ring->rx_swbd[i];
1355 rxbd = enetc_rxbd(rx_ring, i);
1356
1357 for (j = 0; j < buff_cnt; j++) {
1358 /* try reuse page */
1359 if (unlikely(!rx_swbd->page)) {
1360 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
1361 rx_ring->stats.rx_alloc_errs++;
1362 break;
1363 }
1364 }
1365
1366 /* update RxBD */
1367 rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
1368 rx_swbd->page_offset);
1369 /* clear 'R" as well */
1370 rxbd->r.lstatus = 0;
1371
1372 enetc_rxbd_next(rx_ring, &rxbd, &i);
1373 rx_swbd = &rx_ring->rx_swbd[i];
1374 }
1375
1376 if (likely(j)) {
1377 rx_ring->next_to_alloc = i; /* keep track from page reuse */
1378 rx_ring->next_to_use = i;
1379
1380 /* update ENETC's consumer index */
1381 enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
1382 }
1383
1384 return j;
1385 }
1386
enetc_get_rx_tstamp(struct net_device * ndev,union enetc_rx_bd * rxbd,struct sk_buff * skb)1387 static void enetc_get_rx_tstamp(struct net_device *ndev,
1388 union enetc_rx_bd *rxbd,
1389 struct sk_buff *skb)
1390 {
1391 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
1392 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1393 struct enetc_hw *hw = &priv->si->hw;
1394 u32 lo, hi, tstamp_lo;
1395 u64 tstamp;
1396
1397 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
1398 lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
1399 hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
1400 rxbd = enetc_rxbd_ext(rxbd);
1401 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
1402 if (lo <= tstamp_lo)
1403 hi -= 1;
1404
1405 tstamp = (u64)hi << 32 | tstamp_lo;
1406 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
1407 shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
1408 }
1409 }
1410
enetc_get_offloads(struct enetc_bdr * rx_ring,union enetc_rx_bd * rxbd,struct sk_buff * skb)1411 static void enetc_get_offloads(struct enetc_bdr *rx_ring,
1412 union enetc_rx_bd *rxbd, struct sk_buff *skb)
1413 {
1414 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
1415
1416 /* TODO: hashing */
1417 if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
1418 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
1419
1420 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
1421 skb->ip_summed = CHECKSUM_COMPLETE;
1422 }
1423
1424 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
1425 struct enetc_hw *hw = &priv->si->hw;
1426 __be16 tpid = 0;
1427
1428 switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
1429 case 0:
1430 tpid = htons(ETH_P_8021Q);
1431 break;
1432 case 1:
1433 tpid = htons(ETH_P_8021AD);
1434 break;
1435 case 2:
1436 tpid = htons(enetc_rd_hot(hw, ENETC_SICVLANR1) &
1437 SICVLANR_ETYPE);
1438 break;
1439 case 3:
1440 tpid = htons(enetc_rd_hot(hw, ENETC_SICVLANR2) &
1441 SICVLANR_ETYPE);
1442 }
1443
1444 __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
1445 }
1446
1447 if (priv->active_offloads & ENETC_F_RX_TSTAMP)
1448 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
1449 }
1450
1451 /* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
1452 * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL
1453 * mapped buffers.
1454 */
enetc_get_rx_buff(struct enetc_bdr * rx_ring,int i,u16 size)1455 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
1456 int i, u16 size)
1457 {
1458 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
1459
1460 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
1461 rx_swbd->page_offset,
1462 size, rx_swbd->dir);
1463 return rx_swbd;
1464 }
1465
1466 /* Reuse the current page without performing half-page buffer flipping */
enetc_put_rx_buff(struct enetc_bdr * rx_ring,struct enetc_rx_swbd * rx_swbd)1467 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
1468 struct enetc_rx_swbd *rx_swbd)
1469 {
1470 size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset;
1471
1472 enetc_reuse_page(rx_ring, rx_swbd);
1473
1474 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
1475 rx_swbd->page_offset,
1476 buffer_size, rx_swbd->dir);
1477
1478 rx_swbd->page = NULL;
1479 }
1480
1481 /* Reuse the current page by performing half-page buffer flipping */
enetc_flip_rx_buff(struct enetc_bdr * rx_ring,struct enetc_rx_swbd * rx_swbd)1482 static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring,
1483 struct enetc_rx_swbd *rx_swbd)
1484 {
1485 if (likely(enetc_page_reusable(rx_swbd->page))) {
1486 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
1487 page_ref_inc(rx_swbd->page);
1488
1489 enetc_put_rx_buff(rx_ring, rx_swbd);
1490 } else {
1491 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
1492 rx_swbd->dir);
1493 rx_swbd->page = NULL;
1494 }
1495 }
1496
enetc_map_rx_buff_to_skb(struct enetc_bdr * rx_ring,int i,u16 size)1497 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
1498 int i, u16 size)
1499 {
1500 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1501 struct sk_buff *skb;
1502 void *ba;
1503
1504 ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
1505 skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE);
1506 if (unlikely(!skb)) {
1507 rx_ring->stats.rx_alloc_errs++;
1508 return NULL;
1509 }
1510
1511 skb_reserve(skb, rx_ring->buffer_offset);
1512 __skb_put(skb, size);
1513
1514 enetc_flip_rx_buff(rx_ring, rx_swbd);
1515
1516 return skb;
1517 }
1518
enetc_add_rx_buff_to_skb(struct enetc_bdr * rx_ring,int i,u16 size,struct sk_buff * skb)1519 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
1520 u16 size, struct sk_buff *skb)
1521 {
1522 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1523
1524 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
1525 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
1526
1527 enetc_flip_rx_buff(rx_ring, rx_swbd);
1528 }
1529
enetc_check_bd_errors_and_consume(struct enetc_bdr * rx_ring,u32 bd_status,union enetc_rx_bd ** rxbd,int * i)1530 static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring,
1531 u32 bd_status,
1532 union enetc_rx_bd **rxbd, int *i)
1533 {
1534 if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))))
1535 return false;
1536
1537 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
1538 enetc_rxbd_next(rx_ring, rxbd, i);
1539
1540 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
1541 dma_rmb();
1542 bd_status = le32_to_cpu((*rxbd)->r.lstatus);
1543
1544 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
1545 enetc_rxbd_next(rx_ring, rxbd, i);
1546 }
1547
1548 rx_ring->ndev->stats.rx_dropped++;
1549 rx_ring->ndev->stats.rx_errors++;
1550
1551 return true;
1552 }
1553
enetc_build_skb(struct enetc_bdr * rx_ring,u32 bd_status,union enetc_rx_bd ** rxbd,int * i,int * cleaned_cnt,int buffer_size)1554 static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
1555 u32 bd_status, union enetc_rx_bd **rxbd,
1556 int *i, int *cleaned_cnt, int buffer_size)
1557 {
1558 struct sk_buff *skb;
1559 u16 size;
1560
1561 size = le16_to_cpu((*rxbd)->r.buf_len);
1562 skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size);
1563 if (!skb)
1564 return NULL;
1565
1566 enetc_get_offloads(rx_ring, *rxbd, skb);
1567
1568 (*cleaned_cnt)++;
1569
1570 enetc_rxbd_next(rx_ring, rxbd, i);
1571
1572 /* not last BD in frame? */
1573 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
1574 bd_status = le32_to_cpu((*rxbd)->r.lstatus);
1575 size = buffer_size;
1576
1577 if (bd_status & ENETC_RXBD_LSTATUS_F) {
1578 dma_rmb();
1579 size = le16_to_cpu((*rxbd)->r.buf_len);
1580 }
1581
1582 enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb);
1583
1584 (*cleaned_cnt)++;
1585
1586 enetc_rxbd_next(rx_ring, rxbd, i);
1587 }
1588
1589 skb_record_rx_queue(skb, rx_ring->index);
1590 skb->protocol = eth_type_trans(skb, rx_ring->ndev);
1591
1592 return skb;
1593 }
1594
1595 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
1596
enetc_clean_rx_ring(struct enetc_bdr * rx_ring,struct napi_struct * napi,int work_limit)1597 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
1598 struct napi_struct *napi, int work_limit)
1599 {
1600 int rx_frm_cnt = 0, rx_byte_cnt = 0;
1601 int cleaned_cnt, i;
1602
1603 cleaned_cnt = enetc_bd_unused(rx_ring);
1604 /* next descriptor to process */
1605 i = rx_ring->next_to_clean;
1606
1607 enetc_lock_mdio();
1608
1609 while (likely(rx_frm_cnt < work_limit)) {
1610 union enetc_rx_bd *rxbd;
1611 struct sk_buff *skb;
1612 u32 bd_status;
1613
1614 if (cleaned_cnt >= ENETC_RXBD_BUNDLE)
1615 cleaned_cnt -= enetc_refill_rx_ring(rx_ring,
1616 cleaned_cnt);
1617
1618 rxbd = enetc_rxbd(rx_ring, i);
1619 bd_status = le32_to_cpu(rxbd->r.lstatus);
1620 if (!bd_status)
1621 break;
1622
1623 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
1624 dma_rmb(); /* for reading other rxbd fields */
1625
1626 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
1627 &rxbd, &i))
1628 break;
1629
1630 skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i,
1631 &cleaned_cnt, ENETC_RXB_DMA_SIZE);
1632 if (!skb)
1633 break;
1634
1635 /* When set, the outer VLAN header is extracted and reported
1636 * in the receive buffer descriptor. So rx_byte_cnt should
1637 * add the length of the extracted VLAN header.
1638 */
1639 if (bd_status & ENETC_RXBD_FLAG_VLAN)
1640 rx_byte_cnt += VLAN_HLEN;
1641 rx_byte_cnt += skb->len + ETH_HLEN;
1642 rx_frm_cnt++;
1643
1644 enetc_unlock_mdio();
1645 napi_gro_receive(napi, skb);
1646 enetc_lock_mdio();
1647 }
1648
1649 rx_ring->next_to_clean = i;
1650
1651 rx_ring->stats.packets += rx_frm_cnt;
1652 rx_ring->stats.bytes += rx_byte_cnt;
1653
1654 enetc_unlock_mdio();
1655
1656 return rx_frm_cnt;
1657 }
1658
enetc_xdp_map_tx_buff(struct enetc_bdr * tx_ring,int i,struct enetc_tx_swbd * tx_swbd,int frm_len)1659 static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i,
1660 struct enetc_tx_swbd *tx_swbd,
1661 int frm_len)
1662 {
1663 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
1664
1665 prefetchw(txbd);
1666
1667 enetc_clear_tx_bd(txbd);
1668 txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset);
1669 txbd->buf_len = cpu_to_le16(tx_swbd->len);
1670 txbd->frm_len = cpu_to_le16(frm_len);
1671
1672 memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd));
1673 }
1674
1675 /* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer
1676 * descriptors.
1677 */
enetc_xdp_tx(struct enetc_bdr * tx_ring,struct enetc_tx_swbd * xdp_tx_arr,int num_tx_swbd)1678 static bool enetc_xdp_tx(struct enetc_bdr *tx_ring,
1679 struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd)
1680 {
1681 struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr;
1682 int i, k, frm_len = tmp_tx_swbd->len;
1683
1684 if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd)))
1685 return false;
1686
1687 while (unlikely(!tmp_tx_swbd->is_eof)) {
1688 tmp_tx_swbd++;
1689 frm_len += tmp_tx_swbd->len;
1690 }
1691
1692 i = tx_ring->next_to_use;
1693
1694 for (k = 0; k < num_tx_swbd; k++) {
1695 struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k];
1696
1697 enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len);
1698
1699 /* last BD needs 'F' bit set */
1700 if (xdp_tx_swbd->is_eof) {
1701 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
1702
1703 txbd->flags = ENETC_TXBD_FLAGS_F;
1704 }
1705
1706 enetc_bdr_idx_inc(tx_ring, &i);
1707 }
1708
1709 tx_ring->next_to_use = i;
1710
1711 return true;
1712 }
1713
enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr * tx_ring,struct enetc_tx_swbd * xdp_tx_arr,struct xdp_frame * xdp_frame)1714 static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
1715 struct enetc_tx_swbd *xdp_tx_arr,
1716 struct xdp_frame *xdp_frame)
1717 {
1718 struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0];
1719 struct skb_shared_info *shinfo;
1720 void *data = xdp_frame->data;
1721 int len = xdp_frame->len;
1722 skb_frag_t *frag;
1723 dma_addr_t dma;
1724 unsigned int f;
1725 int n = 0;
1726
1727 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
1728 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
1729 netdev_err(tx_ring->ndev, "DMA map error\n");
1730 return -1;
1731 }
1732
1733 xdp_tx_swbd->dma = dma;
1734 xdp_tx_swbd->dir = DMA_TO_DEVICE;
1735 xdp_tx_swbd->len = len;
1736 xdp_tx_swbd->is_xdp_redirect = true;
1737 xdp_tx_swbd->is_eof = false;
1738 xdp_tx_swbd->xdp_frame = NULL;
1739
1740 n++;
1741
1742 if (!xdp_frame_has_frags(xdp_frame))
1743 goto out;
1744
1745 xdp_tx_swbd = &xdp_tx_arr[n];
1746
1747 shinfo = xdp_get_shared_info_from_frame(xdp_frame);
1748
1749 for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags;
1750 f++, frag++) {
1751 data = skb_frag_address(frag);
1752 len = skb_frag_size(frag);
1753
1754 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
1755 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
1756 /* Undo the DMA mapping for all fragments */
1757 while (--n >= 0)
1758 enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]);
1759
1760 netdev_err(tx_ring->ndev, "DMA map error\n");
1761 return -1;
1762 }
1763
1764 xdp_tx_swbd->dma = dma;
1765 xdp_tx_swbd->dir = DMA_TO_DEVICE;
1766 xdp_tx_swbd->len = len;
1767 xdp_tx_swbd->is_xdp_redirect = true;
1768 xdp_tx_swbd->is_eof = false;
1769 xdp_tx_swbd->xdp_frame = NULL;
1770
1771 n++;
1772 xdp_tx_swbd = &xdp_tx_arr[n];
1773 }
1774 out:
1775 xdp_tx_arr[n - 1].is_eof = true;
1776 xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
1777
1778 return n;
1779 }
1780
enetc_xdp_xmit(struct net_device * ndev,int num_frames,struct xdp_frame ** frames,u32 flags)1781 int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
1782 struct xdp_frame **frames, u32 flags)
1783 {
1784 struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0};
1785 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1786 struct enetc_bdr *tx_ring;
1787 int xdp_tx_bd_cnt, i, k;
1788 int xdp_tx_frm_cnt = 0;
1789
1790 if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags)))
1791 return -ENETDOWN;
1792
1793 enetc_lock_mdio();
1794
1795 tx_ring = priv->xdp_tx_ring[smp_processor_id()];
1796
1797 prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use));
1798
1799 for (k = 0; k < num_frames; k++) {
1800 xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring,
1801 xdp_redirect_arr,
1802 frames[k]);
1803 if (unlikely(xdp_tx_bd_cnt < 0))
1804 break;
1805
1806 if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr,
1807 xdp_tx_bd_cnt))) {
1808 for (i = 0; i < xdp_tx_bd_cnt; i++)
1809 enetc_unmap_tx_buff(tx_ring,
1810 &xdp_redirect_arr[i]);
1811 tx_ring->stats.xdp_tx_drops++;
1812 break;
1813 }
1814
1815 xdp_tx_frm_cnt++;
1816 }
1817
1818 if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt))
1819 enetc_update_tx_ring_tail(tx_ring);
1820
1821 tx_ring->stats.xdp_tx += xdp_tx_frm_cnt;
1822
1823 enetc_unlock_mdio();
1824
1825 return xdp_tx_frm_cnt;
1826 }
1827 EXPORT_SYMBOL_GPL(enetc_xdp_xmit);
1828
enetc_map_rx_buff_to_xdp(struct enetc_bdr * rx_ring,int i,struct xdp_buff * xdp_buff,u16 size)1829 static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
1830 struct xdp_buff *xdp_buff, u16 size)
1831 {
1832 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1833 void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset;
1834
1835 /* To be used for XDP_TX */
1836 rx_swbd->len = size;
1837
1838 xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset,
1839 rx_ring->buffer_offset, size, false);
1840 }
1841
enetc_add_rx_buff_to_xdp(struct enetc_bdr * rx_ring,int i,u16 size,struct xdp_buff * xdp_buff)1842 static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
1843 u16 size, struct xdp_buff *xdp_buff)
1844 {
1845 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff);
1846 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1847 skb_frag_t *frag;
1848
1849 /* To be used for XDP_TX */
1850 rx_swbd->len = size;
1851
1852 if (!xdp_buff_has_frags(xdp_buff)) {
1853 xdp_buff_set_frags_flag(xdp_buff);
1854 shinfo->xdp_frags_size = size;
1855 shinfo->nr_frags = 0;
1856 } else {
1857 shinfo->xdp_frags_size += size;
1858 }
1859
1860 if (page_is_pfmemalloc(rx_swbd->page))
1861 xdp_buff_set_frag_pfmemalloc(xdp_buff);
1862
1863 frag = &shinfo->frags[shinfo->nr_frags];
1864 skb_frag_fill_page_desc(frag, rx_swbd->page, rx_swbd->page_offset,
1865 size);
1866
1867 shinfo->nr_frags++;
1868 }
1869
enetc_build_xdp_buff(struct enetc_bdr * rx_ring,u32 bd_status,union enetc_rx_bd ** rxbd,int * i,int * cleaned_cnt,struct xdp_buff * xdp_buff)1870 static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status,
1871 union enetc_rx_bd **rxbd, int *i,
1872 int *cleaned_cnt, struct xdp_buff *xdp_buff)
1873 {
1874 u16 size = le16_to_cpu((*rxbd)->r.buf_len);
1875
1876 xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq);
1877
1878 enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size);
1879 (*cleaned_cnt)++;
1880 enetc_rxbd_next(rx_ring, rxbd, i);
1881
1882 /* not last BD in frame? */
1883 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
1884 bd_status = le32_to_cpu((*rxbd)->r.lstatus);
1885 size = ENETC_RXB_DMA_SIZE_XDP;
1886
1887 if (bd_status & ENETC_RXBD_LSTATUS_F) {
1888 dma_rmb();
1889 size = le16_to_cpu((*rxbd)->r.buf_len);
1890 }
1891
1892 enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff);
1893 (*cleaned_cnt)++;
1894 enetc_rxbd_next(rx_ring, rxbd, i);
1895 }
1896 }
1897
1898 /* Convert RX buffer descriptors to TX buffer descriptors. These will be
1899 * recycled back into the RX ring in enetc_clean_tx_ring.
1900 */
enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd * xdp_tx_arr,struct enetc_bdr * rx_ring,int rx_ring_first,int rx_ring_last)1901 static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr,
1902 struct enetc_bdr *rx_ring,
1903 int rx_ring_first, int rx_ring_last)
1904 {
1905 int n = 0;
1906
1907 for (; rx_ring_first != rx_ring_last;
1908 n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) {
1909 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
1910 struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n];
1911
1912 /* No need to dma_map, we already have DMA_BIDIRECTIONAL */
1913 tx_swbd->dma = rx_swbd->dma;
1914 tx_swbd->dir = rx_swbd->dir;
1915 tx_swbd->page = rx_swbd->page;
1916 tx_swbd->page_offset = rx_swbd->page_offset;
1917 tx_swbd->len = rx_swbd->len;
1918 tx_swbd->is_dma_page = true;
1919 tx_swbd->is_xdp_tx = true;
1920 tx_swbd->is_eof = false;
1921 }
1922
1923 /* We rely on caller providing an rx_ring_last > rx_ring_first */
1924 xdp_tx_arr[n - 1].is_eof = true;
1925
1926 return n;
1927 }
1928
enetc_xdp_drop(struct enetc_bdr * rx_ring,int rx_ring_first,int rx_ring_last)1929 static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
1930 int rx_ring_last)
1931 {
1932 while (rx_ring_first != rx_ring_last) {
1933 enetc_put_rx_buff(rx_ring,
1934 &rx_ring->rx_swbd[rx_ring_first]);
1935 enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
1936 }
1937 }
1938
enetc_bulk_flip_buff(struct enetc_bdr * rx_ring,int rx_ring_first,int rx_ring_last)1939 static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first,
1940 int rx_ring_last)
1941 {
1942 while (rx_ring_first != rx_ring_last) {
1943 enetc_flip_rx_buff(rx_ring,
1944 &rx_ring->rx_swbd[rx_ring_first]);
1945 enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
1946 }
1947 }
1948
enetc_clean_rx_ring_xdp(struct enetc_bdr * rx_ring,struct napi_struct * napi,int work_limit,struct bpf_prog * prog)1949 static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
1950 struct napi_struct *napi, int work_limit,
1951 struct bpf_prog *prog)
1952 {
1953 int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0;
1954 struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0};
1955 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
1956 int rx_frm_cnt = 0, rx_byte_cnt = 0;
1957 struct enetc_bdr *tx_ring;
1958 int cleaned_cnt, i;
1959 u32 xdp_act;
1960
1961 cleaned_cnt = enetc_bd_unused(rx_ring);
1962 /* next descriptor to process */
1963 i = rx_ring->next_to_clean;
1964
1965 enetc_lock_mdio();
1966
1967 while (likely(rx_frm_cnt < work_limit)) {
1968 union enetc_rx_bd *rxbd, *orig_rxbd;
1969 struct xdp_buff xdp_buff;
1970 struct sk_buff *skb;
1971 int orig_i, err;
1972 u32 bd_status;
1973
1974 rxbd = enetc_rxbd(rx_ring, i);
1975 bd_status = le32_to_cpu(rxbd->r.lstatus);
1976 if (!bd_status)
1977 break;
1978
1979 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
1980 dma_rmb(); /* for reading other rxbd fields */
1981
1982 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
1983 &rxbd, &i))
1984 break;
1985
1986 orig_rxbd = rxbd;
1987 orig_i = i;
1988
1989 enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
1990 &cleaned_cnt, &xdp_buff);
1991
1992 /* When set, the outer VLAN header is extracted and reported
1993 * in the receive buffer descriptor. So rx_byte_cnt should
1994 * add the length of the extracted VLAN header.
1995 */
1996 if (bd_status & ENETC_RXBD_FLAG_VLAN)
1997 rx_byte_cnt += VLAN_HLEN;
1998 rx_byte_cnt += xdp_get_buff_len(&xdp_buff);
1999
2000 xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
2001
2002 switch (xdp_act) {
2003 default:
2004 bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act);
2005 fallthrough;
2006 case XDP_ABORTED:
2007 trace_xdp_exception(rx_ring->ndev, prog, xdp_act);
2008 fallthrough;
2009 case XDP_DROP:
2010 enetc_xdp_drop(rx_ring, orig_i, i);
2011 rx_ring->stats.xdp_drops++;
2012 break;
2013 case XDP_PASS:
2014 skb = xdp_build_skb_from_buff(&xdp_buff);
2015 /* Probably under memory pressure, stop NAPI */
2016 if (unlikely(!skb)) {
2017 enetc_xdp_drop(rx_ring, orig_i, i);
2018 rx_ring->stats.xdp_drops++;
2019 goto out;
2020 }
2021
2022 enetc_get_offloads(rx_ring, orig_rxbd, skb);
2023
2024 /* These buffers are about to be owned by the stack.
2025 * Update our buffer cache (the rx_swbd array elements)
2026 * with their other page halves.
2027 */
2028 enetc_bulk_flip_buff(rx_ring, orig_i, i);
2029
2030 enetc_unlock_mdio();
2031 napi_gro_receive(napi, skb);
2032 enetc_lock_mdio();
2033 break;
2034 case XDP_TX:
2035 tx_ring = priv->xdp_tx_ring[rx_ring->index];
2036 if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) {
2037 enetc_xdp_drop(rx_ring, orig_i, i);
2038 tx_ring->stats.xdp_tx_drops++;
2039 break;
2040 }
2041
2042 xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
2043 rx_ring,
2044 orig_i, i);
2045
2046 if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) {
2047 enetc_xdp_drop(rx_ring, orig_i, i);
2048 tx_ring->stats.xdp_tx_drops++;
2049 } else {
2050 tx_ring->stats.xdp_tx++;
2051 rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
2052 xdp_tx_frm_cnt++;
2053 /* The XDP_TX enqueue was successful, so we
2054 * need to scrub the RX software BDs because
2055 * the ownership of the buffers no longer
2056 * belongs to the RX ring, and we must prevent
2057 * enetc_refill_rx_ring() from reusing
2058 * rx_swbd->page.
2059 */
2060 while (orig_i != i) {
2061 rx_ring->rx_swbd[orig_i].page = NULL;
2062 enetc_bdr_idx_inc(rx_ring, &orig_i);
2063 }
2064 }
2065 break;
2066 case XDP_REDIRECT:
2067 enetc_unlock_mdio();
2068 err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
2069 enetc_lock_mdio();
2070 if (unlikely(err)) {
2071 enetc_xdp_drop(rx_ring, orig_i, i);
2072 rx_ring->stats.xdp_redirect_failures++;
2073 } else {
2074 enetc_bulk_flip_buff(rx_ring, orig_i, i);
2075 xdp_redirect_frm_cnt++;
2076 rx_ring->stats.xdp_redirect++;
2077 }
2078 }
2079
2080 rx_frm_cnt++;
2081 }
2082
2083 out:
2084 rx_ring->next_to_clean = i;
2085
2086 rx_ring->stats.packets += rx_frm_cnt;
2087 rx_ring->stats.bytes += rx_byte_cnt;
2088
2089 if (xdp_redirect_frm_cnt) {
2090 enetc_unlock_mdio();
2091 xdp_do_flush();
2092 enetc_lock_mdio();
2093 }
2094
2095 if (xdp_tx_frm_cnt)
2096 enetc_update_tx_ring_tail(tx_ring);
2097
2098 if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight)
2099 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
2100 rx_ring->xdp.xdp_tx_in_flight);
2101
2102 enetc_unlock_mdio();
2103
2104 return rx_frm_cnt;
2105 }
2106
enetc_poll(struct napi_struct * napi,int budget)2107 static int enetc_poll(struct napi_struct *napi, int budget)
2108 {
2109 struct enetc_int_vector
2110 *v = container_of(napi, struct enetc_int_vector, napi);
2111 struct enetc_bdr *rx_ring = &v->rx_ring;
2112 struct bpf_prog *prog;
2113 bool complete = true;
2114 int work_done;
2115 int i;
2116
2117 enetc_lock_mdio();
2118
2119 for (i = 0; i < v->count_tx_rings; i++)
2120 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
2121 complete = false;
2122 enetc_unlock_mdio();
2123
2124 prog = rx_ring->xdp.prog;
2125 if (prog)
2126 work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog);
2127 else
2128 work_done = enetc_clean_rx_ring(rx_ring, napi, budget);
2129 if (work_done == budget)
2130 complete = false;
2131 if (work_done)
2132 v->rx_napi_work = true;
2133
2134 if (!complete)
2135 return budget;
2136
2137 napi_complete_done(napi, work_done);
2138
2139 if (likely(v->rx_dim_en))
2140 enetc_rx_net_dim(v);
2141
2142 v->rx_napi_work = false;
2143
2144 enetc_lock_mdio();
2145 /* enable interrupts */
2146 enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
2147
2148 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
2149 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
2150 ENETC_TBIER_TXTIE);
2151
2152 enetc_unlock_mdio();
2153
2154 return work_done;
2155 }
2156
2157 /* Probing and Init */
2158 #define ENETC_MAX_RFS_SIZE 64
enetc_get_si_caps(struct enetc_si * si)2159 void enetc_get_si_caps(struct enetc_si *si)
2160 {
2161 struct enetc_hw *hw = &si->hw;
2162 u32 val;
2163
2164 /* find out how many of various resources we have to work with */
2165 val = enetc_rd(hw, ENETC_SICAPR0);
2166 si->num_rx_rings = (val >> 16) & 0xff;
2167 si->num_tx_rings = val & 0xff;
2168
2169 val = enetc_rd(hw, ENETC_SIPCAPR0);
2170 if (val & ENETC_SIPCAPR0_RFS) {
2171 val = enetc_rd(hw, ENETC_SIRFSCAPR);
2172 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
2173 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
2174 } else {
2175 /* ENETC which not supports RFS */
2176 si->num_fs_entries = 0;
2177 }
2178
2179 si->num_rss = 0;
2180 val = enetc_rd(hw, ENETC_SIPCAPR0);
2181 if (val & ENETC_SIPCAPR0_RSS) {
2182 u32 rss;
2183
2184 rss = enetc_rd(hw, ENETC_SIRSSCAPR);
2185 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
2186 }
2187
2188 if (val & ENETC_SIPCAPR0_LSO)
2189 si->hw_features |= ENETC_SI_F_LSO;
2190 }
2191 EXPORT_SYMBOL_GPL(enetc_get_si_caps);
2192
enetc_dma_alloc_bdr(struct enetc_bdr_resource * res)2193 static int enetc_dma_alloc_bdr(struct enetc_bdr_resource *res)
2194 {
2195 size_t bd_base_size = res->bd_count * res->bd_size;
2196
2197 res->bd_base = dma_alloc_coherent(res->dev, bd_base_size,
2198 &res->bd_dma_base, GFP_KERNEL);
2199 if (!res->bd_base)
2200 return -ENOMEM;
2201
2202 /* h/w requires 128B alignment */
2203 if (!IS_ALIGNED(res->bd_dma_base, 128)) {
2204 dma_free_coherent(res->dev, bd_base_size, res->bd_base,
2205 res->bd_dma_base);
2206 return -EINVAL;
2207 }
2208
2209 return 0;
2210 }
2211
enetc_dma_free_bdr(const struct enetc_bdr_resource * res)2212 static void enetc_dma_free_bdr(const struct enetc_bdr_resource *res)
2213 {
2214 size_t bd_base_size = res->bd_count * res->bd_size;
2215
2216 dma_free_coherent(res->dev, bd_base_size, res->bd_base,
2217 res->bd_dma_base);
2218 }
2219
enetc_alloc_tx_resource(struct enetc_bdr_resource * res,struct device * dev,size_t bd_count)2220 static int enetc_alloc_tx_resource(struct enetc_bdr_resource *res,
2221 struct device *dev, size_t bd_count)
2222 {
2223 int err;
2224
2225 res->dev = dev;
2226 res->bd_count = bd_count;
2227 res->bd_size = sizeof(union enetc_tx_bd);
2228
2229 res->tx_swbd = vcalloc(bd_count, sizeof(*res->tx_swbd));
2230 if (!res->tx_swbd)
2231 return -ENOMEM;
2232
2233 err = enetc_dma_alloc_bdr(res);
2234 if (err)
2235 goto err_alloc_bdr;
2236
2237 res->tso_headers = dma_alloc_coherent(dev, bd_count * TSO_HEADER_SIZE,
2238 &res->tso_headers_dma,
2239 GFP_KERNEL);
2240 if (!res->tso_headers) {
2241 err = -ENOMEM;
2242 goto err_alloc_tso;
2243 }
2244
2245 return 0;
2246
2247 err_alloc_tso:
2248 enetc_dma_free_bdr(res);
2249 err_alloc_bdr:
2250 vfree(res->tx_swbd);
2251 res->tx_swbd = NULL;
2252
2253 return err;
2254 }
2255
enetc_free_tx_resource(const struct enetc_bdr_resource * res)2256 static void enetc_free_tx_resource(const struct enetc_bdr_resource *res)
2257 {
2258 dma_free_coherent(res->dev, res->bd_count * TSO_HEADER_SIZE,
2259 res->tso_headers, res->tso_headers_dma);
2260 enetc_dma_free_bdr(res);
2261 vfree(res->tx_swbd);
2262 }
2263
2264 static struct enetc_bdr_resource *
enetc_alloc_tx_resources(struct enetc_ndev_priv * priv)2265 enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
2266 {
2267 struct enetc_bdr_resource *tx_res;
2268 int i, err;
2269
2270 tx_res = kcalloc(priv->num_tx_rings, sizeof(*tx_res), GFP_KERNEL);
2271 if (!tx_res)
2272 return ERR_PTR(-ENOMEM);
2273
2274 for (i = 0; i < priv->num_tx_rings; i++) {
2275 struct enetc_bdr *tx_ring = priv->tx_ring[i];
2276
2277 err = enetc_alloc_tx_resource(&tx_res[i], tx_ring->dev,
2278 tx_ring->bd_count);
2279 if (err)
2280 goto fail;
2281 }
2282
2283 return tx_res;
2284
2285 fail:
2286 while (i-- > 0)
2287 enetc_free_tx_resource(&tx_res[i]);
2288
2289 kfree(tx_res);
2290
2291 return ERR_PTR(err);
2292 }
2293
enetc_free_tx_resources(const struct enetc_bdr_resource * tx_res,size_t num_resources)2294 static void enetc_free_tx_resources(const struct enetc_bdr_resource *tx_res,
2295 size_t num_resources)
2296 {
2297 size_t i;
2298
2299 for (i = 0; i < num_resources; i++)
2300 enetc_free_tx_resource(&tx_res[i]);
2301
2302 kfree(tx_res);
2303 }
2304
enetc_alloc_rx_resource(struct enetc_bdr_resource * res,struct device * dev,size_t bd_count,bool extended)2305 static int enetc_alloc_rx_resource(struct enetc_bdr_resource *res,
2306 struct device *dev, size_t bd_count,
2307 bool extended)
2308 {
2309 int err;
2310
2311 res->dev = dev;
2312 res->bd_count = bd_count;
2313 res->bd_size = sizeof(union enetc_rx_bd);
2314 if (extended)
2315 res->bd_size *= 2;
2316
2317 res->rx_swbd = vcalloc(bd_count, sizeof(struct enetc_rx_swbd));
2318 if (!res->rx_swbd)
2319 return -ENOMEM;
2320
2321 err = enetc_dma_alloc_bdr(res);
2322 if (err) {
2323 vfree(res->rx_swbd);
2324 return err;
2325 }
2326
2327 return 0;
2328 }
2329
enetc_free_rx_resource(const struct enetc_bdr_resource * res)2330 static void enetc_free_rx_resource(const struct enetc_bdr_resource *res)
2331 {
2332 enetc_dma_free_bdr(res);
2333 vfree(res->rx_swbd);
2334 }
2335
2336 static struct enetc_bdr_resource *
enetc_alloc_rx_resources(struct enetc_ndev_priv * priv,bool extended)2337 enetc_alloc_rx_resources(struct enetc_ndev_priv *priv, bool extended)
2338 {
2339 struct enetc_bdr_resource *rx_res;
2340 int i, err;
2341
2342 rx_res = kcalloc(priv->num_rx_rings, sizeof(*rx_res), GFP_KERNEL);
2343 if (!rx_res)
2344 return ERR_PTR(-ENOMEM);
2345
2346 for (i = 0; i < priv->num_rx_rings; i++) {
2347 struct enetc_bdr *rx_ring = priv->rx_ring[i];
2348
2349 err = enetc_alloc_rx_resource(&rx_res[i], rx_ring->dev,
2350 rx_ring->bd_count, extended);
2351 if (err)
2352 goto fail;
2353 }
2354
2355 return rx_res;
2356
2357 fail:
2358 while (i-- > 0)
2359 enetc_free_rx_resource(&rx_res[i]);
2360
2361 kfree(rx_res);
2362
2363 return ERR_PTR(err);
2364 }
2365
enetc_free_rx_resources(const struct enetc_bdr_resource * rx_res,size_t num_resources)2366 static void enetc_free_rx_resources(const struct enetc_bdr_resource *rx_res,
2367 size_t num_resources)
2368 {
2369 size_t i;
2370
2371 for (i = 0; i < num_resources; i++)
2372 enetc_free_rx_resource(&rx_res[i]);
2373
2374 kfree(rx_res);
2375 }
2376
enetc_assign_tx_resource(struct enetc_bdr * tx_ring,const struct enetc_bdr_resource * res)2377 static void enetc_assign_tx_resource(struct enetc_bdr *tx_ring,
2378 const struct enetc_bdr_resource *res)
2379 {
2380 tx_ring->bd_base = res ? res->bd_base : NULL;
2381 tx_ring->bd_dma_base = res ? res->bd_dma_base : 0;
2382 tx_ring->tx_swbd = res ? res->tx_swbd : NULL;
2383 tx_ring->tso_headers = res ? res->tso_headers : NULL;
2384 tx_ring->tso_headers_dma = res ? res->tso_headers_dma : 0;
2385 }
2386
enetc_assign_rx_resource(struct enetc_bdr * rx_ring,const struct enetc_bdr_resource * res)2387 static void enetc_assign_rx_resource(struct enetc_bdr *rx_ring,
2388 const struct enetc_bdr_resource *res)
2389 {
2390 rx_ring->bd_base = res ? res->bd_base : NULL;
2391 rx_ring->bd_dma_base = res ? res->bd_dma_base : 0;
2392 rx_ring->rx_swbd = res ? res->rx_swbd : NULL;
2393 }
2394
enetc_assign_tx_resources(struct enetc_ndev_priv * priv,const struct enetc_bdr_resource * res)2395 static void enetc_assign_tx_resources(struct enetc_ndev_priv *priv,
2396 const struct enetc_bdr_resource *res)
2397 {
2398 int i;
2399
2400 if (priv->tx_res)
2401 enetc_free_tx_resources(priv->tx_res, priv->num_tx_rings);
2402
2403 for (i = 0; i < priv->num_tx_rings; i++) {
2404 enetc_assign_tx_resource(priv->tx_ring[i],
2405 res ? &res[i] : NULL);
2406 }
2407
2408 priv->tx_res = res;
2409 }
2410
enetc_assign_rx_resources(struct enetc_ndev_priv * priv,const struct enetc_bdr_resource * res)2411 static void enetc_assign_rx_resources(struct enetc_ndev_priv *priv,
2412 const struct enetc_bdr_resource *res)
2413 {
2414 int i;
2415
2416 if (priv->rx_res)
2417 enetc_free_rx_resources(priv->rx_res, priv->num_rx_rings);
2418
2419 for (i = 0; i < priv->num_rx_rings; i++) {
2420 enetc_assign_rx_resource(priv->rx_ring[i],
2421 res ? &res[i] : NULL);
2422 }
2423
2424 priv->rx_res = res;
2425 }
2426
enetc_free_tx_ring(struct enetc_bdr * tx_ring)2427 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
2428 {
2429 int i;
2430
2431 for (i = 0; i < tx_ring->bd_count; i++) {
2432 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
2433
2434 enetc_free_tx_frame(tx_ring, tx_swbd);
2435 }
2436 }
2437
enetc_free_rx_ring(struct enetc_bdr * rx_ring)2438 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
2439 {
2440 int i;
2441
2442 for (i = 0; i < rx_ring->bd_count; i++) {
2443 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
2444
2445 if (!rx_swbd->page)
2446 continue;
2447
2448 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
2449 rx_swbd->dir);
2450 __free_page(rx_swbd->page);
2451 rx_swbd->page = NULL;
2452 }
2453 }
2454
enetc_free_rxtx_rings(struct enetc_ndev_priv * priv)2455 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
2456 {
2457 int i;
2458
2459 for (i = 0; i < priv->num_rx_rings; i++)
2460 enetc_free_rx_ring(priv->rx_ring[i]);
2461
2462 for (i = 0; i < priv->num_tx_rings; i++)
2463 enetc_free_tx_ring(priv->tx_ring[i]);
2464 }
2465
enetc_setup_default_rss_table(struct enetc_si * si,int num_groups)2466 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
2467 {
2468 int *rss_table;
2469 int i;
2470
2471 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
2472 if (!rss_table)
2473 return -ENOMEM;
2474
2475 /* Set up RSS table defaults */
2476 for (i = 0; i < si->num_rss; i++)
2477 rss_table[i] = i % num_groups;
2478
2479 si->ops->set_rss_table(si, rss_table, si->num_rss);
2480
2481 kfree(rss_table);
2482
2483 return 0;
2484 }
2485
enetc_set_lso_flags_mask(struct enetc_hw * hw)2486 static void enetc_set_lso_flags_mask(struct enetc_hw *hw)
2487 {
2488 enetc_wr(hw, ENETC4_SILSOSFMR0,
2489 SILSOSFMR0_VAL_SET(ENETC4_TCP_NL_SEG_FLAGS_DMASK,
2490 ENETC4_TCP_NL_SEG_FLAGS_DMASK));
2491 enetc_wr(hw, ENETC4_SILSOSFMR1, 0);
2492 }
2493
enetc_set_rss(struct net_device * ndev,int en)2494 static void enetc_set_rss(struct net_device *ndev, int en)
2495 {
2496 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2497 struct enetc_hw *hw = &priv->si->hw;
2498 u32 reg;
2499
2500 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
2501
2502 reg = enetc_rd(hw, ENETC_SIMR);
2503 reg &= ~ENETC_SIMR_RSSE;
2504 reg |= (en) ? ENETC_SIMR_RSSE : 0;
2505 enetc_wr(hw, ENETC_SIMR, reg);
2506 }
2507
enetc_configure_si(struct enetc_ndev_priv * priv)2508 int enetc_configure_si(struct enetc_ndev_priv *priv)
2509 {
2510 struct enetc_si *si = priv->si;
2511 struct enetc_hw *hw = &si->hw;
2512 int err;
2513
2514 /* set SI cache attributes */
2515 enetc_wr(hw, ENETC_SICAR0,
2516 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
2517 enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
2518 /* enable SI */
2519 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
2520
2521 if (si->hw_features & ENETC_SI_F_LSO)
2522 enetc_set_lso_flags_mask(hw);
2523
2524 if (si->num_rss) {
2525 err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
2526 if (err)
2527 return err;
2528
2529 if (priv->ndev->features & NETIF_F_RXHASH)
2530 enetc_set_rss(priv->ndev, true);
2531 }
2532
2533 return 0;
2534 }
2535 EXPORT_SYMBOL_GPL(enetc_configure_si);
2536
enetc_init_si_rings_params(struct enetc_ndev_priv * priv)2537 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
2538 {
2539 struct enetc_si *si = priv->si;
2540 int cpus = num_online_cpus();
2541
2542 priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE;
2543 priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE;
2544
2545 /* Enable all available TX rings in order to configure as many
2546 * priorities as possible, when needed.
2547 * TODO: Make # of TX rings run-time configurable
2548 */
2549 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
2550 priv->num_tx_rings = si->num_tx_rings;
2551 priv->bdr_int_num = priv->num_rx_rings;
2552 priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
2553 priv->tx_ictt = enetc_usecs_to_cycles(600, priv->sysclk_freq);
2554 }
2555 EXPORT_SYMBOL_GPL(enetc_init_si_rings_params);
2556
enetc_alloc_si_resources(struct enetc_ndev_priv * priv)2557 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
2558 {
2559 struct enetc_si *si = priv->si;
2560
2561 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
2562 GFP_KERNEL);
2563 if (!priv->cls_rules)
2564 return -ENOMEM;
2565
2566 return 0;
2567 }
2568 EXPORT_SYMBOL_GPL(enetc_alloc_si_resources);
2569
enetc_free_si_resources(struct enetc_ndev_priv * priv)2570 void enetc_free_si_resources(struct enetc_ndev_priv *priv)
2571 {
2572 kfree(priv->cls_rules);
2573 }
2574 EXPORT_SYMBOL_GPL(enetc_free_si_resources);
2575
enetc_setup_txbdr(struct enetc_hw * hw,struct enetc_bdr * tx_ring)2576 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
2577 {
2578 int idx = tx_ring->index;
2579 u32 tbmr;
2580
2581 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
2582 lower_32_bits(tx_ring->bd_dma_base));
2583
2584 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
2585 upper_32_bits(tx_ring->bd_dma_base));
2586
2587 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
2588 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
2589 ENETC_RTBLENR_LEN(tx_ring->bd_count));
2590
2591 /* clearing PI/CI registers for Tx not supported, adjust sw indexes */
2592 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
2593 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
2594
2595 /* enable Tx ints by setting pkt thr to 1 */
2596 enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
2597
2598 tbmr = ENETC_TBMR_SET_PRIO(tx_ring->prio);
2599 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
2600 tbmr |= ENETC_TBMR_VIH;
2601
2602 /* enable ring */
2603 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
2604
2605 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
2606 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
2607 tx_ring->idr = hw->reg + ENETC_SITXIDR;
2608 }
2609
enetc_setup_rxbdr(struct enetc_hw * hw,struct enetc_bdr * rx_ring,bool extended)2610 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
2611 bool extended)
2612 {
2613 int idx = rx_ring->index;
2614 u32 rbmr = 0;
2615
2616 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
2617 lower_32_bits(rx_ring->bd_dma_base));
2618
2619 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
2620 upper_32_bits(rx_ring->bd_dma_base));
2621
2622 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
2623 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
2624 ENETC_RTBLENR_LEN(rx_ring->bd_count));
2625
2626 if (rx_ring->xdp.prog)
2627 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP);
2628 else
2629 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
2630
2631 /* Also prepare the consumer index in case page allocation never
2632 * succeeds. In that case, hardware will never advance producer index
2633 * to match consumer index, and will drop all frames.
2634 */
2635 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
2636 enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1);
2637
2638 /* enable Rx ints by setting pkt thr to 1 */
2639 enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
2640
2641 rx_ring->ext_en = extended;
2642 if (rx_ring->ext_en)
2643 rbmr |= ENETC_RBMR_BDS;
2644
2645 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
2646 rbmr |= ENETC_RBMR_VTE;
2647
2648 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
2649 rx_ring->idr = hw->reg + ENETC_SIRXIDR;
2650
2651 rx_ring->next_to_clean = 0;
2652 rx_ring->next_to_use = 0;
2653 rx_ring->next_to_alloc = 0;
2654
2655 enetc_lock_mdio();
2656 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
2657 enetc_unlock_mdio();
2658
2659 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
2660 }
2661
enetc_setup_bdrs(struct enetc_ndev_priv * priv,bool extended)2662 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv, bool extended)
2663 {
2664 struct enetc_hw *hw = &priv->si->hw;
2665 int i;
2666
2667 for (i = 0; i < priv->num_tx_rings; i++)
2668 enetc_setup_txbdr(hw, priv->tx_ring[i]);
2669
2670 for (i = 0; i < priv->num_rx_rings; i++)
2671 enetc_setup_rxbdr(hw, priv->rx_ring[i], extended);
2672 }
2673
enetc_enable_txbdr(struct enetc_hw * hw,struct enetc_bdr * tx_ring)2674 static void enetc_enable_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
2675 {
2676 int idx = tx_ring->index;
2677 u32 tbmr;
2678
2679 tbmr = enetc_txbdr_rd(hw, idx, ENETC_TBMR);
2680 tbmr |= ENETC_TBMR_EN;
2681 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
2682 }
2683
enetc_enable_rxbdr(struct enetc_hw * hw,struct enetc_bdr * rx_ring)2684 static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
2685 {
2686 int idx = rx_ring->index;
2687 u32 rbmr;
2688
2689 rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
2690 rbmr |= ENETC_RBMR_EN;
2691 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
2692 }
2693
enetc_enable_rx_bdrs(struct enetc_ndev_priv * priv)2694 static void enetc_enable_rx_bdrs(struct enetc_ndev_priv *priv)
2695 {
2696 struct enetc_hw *hw = &priv->si->hw;
2697 int i;
2698
2699 for (i = 0; i < priv->num_rx_rings; i++)
2700 enetc_enable_rxbdr(hw, priv->rx_ring[i]);
2701 }
2702
enetc_enable_tx_bdrs(struct enetc_ndev_priv * priv)2703 static void enetc_enable_tx_bdrs(struct enetc_ndev_priv *priv)
2704 {
2705 struct enetc_hw *hw = &priv->si->hw;
2706 int i;
2707
2708 for (i = 0; i < priv->num_tx_rings; i++)
2709 enetc_enable_txbdr(hw, priv->tx_ring[i]);
2710 }
2711
enetc_disable_rxbdr(struct enetc_hw * hw,struct enetc_bdr * rx_ring)2712 static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
2713 {
2714 int idx = rx_ring->index;
2715
2716 /* disable EN bit on ring */
2717 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
2718 }
2719
enetc_disable_txbdr(struct enetc_hw * hw,struct enetc_bdr * rx_ring)2720 static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
2721 {
2722 int idx = rx_ring->index;
2723
2724 /* disable EN bit on ring */
2725 enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
2726 }
2727
enetc_disable_rx_bdrs(struct enetc_ndev_priv * priv)2728 static void enetc_disable_rx_bdrs(struct enetc_ndev_priv *priv)
2729 {
2730 struct enetc_hw *hw = &priv->si->hw;
2731 int i;
2732
2733 for (i = 0; i < priv->num_rx_rings; i++)
2734 enetc_disable_rxbdr(hw, priv->rx_ring[i]);
2735 }
2736
enetc_disable_tx_bdrs(struct enetc_ndev_priv * priv)2737 static void enetc_disable_tx_bdrs(struct enetc_ndev_priv *priv)
2738 {
2739 struct enetc_hw *hw = &priv->si->hw;
2740 int i;
2741
2742 for (i = 0; i < priv->num_tx_rings; i++)
2743 enetc_disable_txbdr(hw, priv->tx_ring[i]);
2744 }
2745
enetc_wait_txbdr(struct enetc_hw * hw,struct enetc_bdr * tx_ring)2746 static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
2747 {
2748 int delay = 8, timeout = 100;
2749 int idx = tx_ring->index;
2750
2751 /* wait for busy to clear */
2752 while (delay < timeout &&
2753 enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
2754 msleep(delay);
2755 delay *= 2;
2756 }
2757
2758 if (delay >= timeout)
2759 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
2760 idx);
2761 }
2762
enetc_wait_bdrs(struct enetc_ndev_priv * priv)2763 static void enetc_wait_bdrs(struct enetc_ndev_priv *priv)
2764 {
2765 struct enetc_hw *hw = &priv->si->hw;
2766 int i;
2767
2768 for (i = 0; i < priv->num_tx_rings; i++)
2769 enetc_wait_txbdr(hw, priv->tx_ring[i]);
2770 }
2771
enetc_setup_irqs(struct enetc_ndev_priv * priv)2772 static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
2773 {
2774 struct pci_dev *pdev = priv->si->pdev;
2775 struct enetc_hw *hw = &priv->si->hw;
2776 int i, j, err;
2777
2778 for (i = 0; i < priv->bdr_int_num; i++) {
2779 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
2780 struct enetc_int_vector *v = priv->int_vector[i];
2781 int entry = ENETC_BDR_INT_BASE_IDX + i;
2782
2783 snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
2784 priv->ndev->name, i);
2785 err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v);
2786 if (err) {
2787 dev_err(priv->dev, "request_irq() failed!\n");
2788 goto irq_err;
2789 }
2790
2791 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
2792 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
2793 v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1);
2794
2795 enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
2796
2797 for (j = 0; j < v->count_tx_rings; j++) {
2798 int idx = v->tx_ring[j].index;
2799
2800 enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
2801 }
2802 irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));
2803 }
2804
2805 return 0;
2806
2807 irq_err:
2808 while (i--) {
2809 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
2810
2811 irq_set_affinity_hint(irq, NULL);
2812 free_irq(irq, priv->int_vector[i]);
2813 }
2814
2815 return err;
2816 }
2817
enetc_free_irqs(struct enetc_ndev_priv * priv)2818 static void enetc_free_irqs(struct enetc_ndev_priv *priv)
2819 {
2820 struct pci_dev *pdev = priv->si->pdev;
2821 int i;
2822
2823 for (i = 0; i < priv->bdr_int_num; i++) {
2824 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
2825
2826 irq_set_affinity_hint(irq, NULL);
2827 free_irq(irq, priv->int_vector[i]);
2828 }
2829 }
2830
enetc_setup_interrupts(struct enetc_ndev_priv * priv)2831 static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
2832 {
2833 struct enetc_hw *hw = &priv->si->hw;
2834 u32 icpt, ictt;
2835 int i;
2836
2837 /* enable Tx & Rx event indication */
2838 if (priv->ic_mode &
2839 (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) {
2840 icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR);
2841 /* init to non-0 minimum, will be adjusted later */
2842 ictt = 0x1;
2843 } else {
2844 icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */
2845 ictt = 0;
2846 }
2847
2848 for (i = 0; i < priv->num_rx_rings; i++) {
2849 enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt);
2850 enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt);
2851 enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
2852 }
2853
2854 if (priv->ic_mode & ENETC_IC_TX_MANUAL)
2855 icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR);
2856 else
2857 icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */
2858
2859 for (i = 0; i < priv->num_tx_rings; i++) {
2860 enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt);
2861 enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt);
2862 enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE);
2863 }
2864 }
2865
enetc_clear_interrupts(struct enetc_ndev_priv * priv)2866 static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
2867 {
2868 struct enetc_hw *hw = &priv->si->hw;
2869 int i;
2870
2871 for (i = 0; i < priv->num_tx_rings; i++)
2872 enetc_txbdr_wr(hw, i, ENETC_TBIER, 0);
2873
2874 for (i = 0; i < priv->num_rx_rings; i++)
2875 enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0);
2876 }
2877
enetc_phylink_connect(struct net_device * ndev)2878 static int enetc_phylink_connect(struct net_device *ndev)
2879 {
2880 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2881 struct ethtool_keee edata;
2882 int err;
2883
2884 if (!priv->phylink) {
2885 /* phy-less mode */
2886 netif_carrier_on(ndev);
2887 return 0;
2888 }
2889
2890 err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
2891 if (err) {
2892 dev_err(&ndev->dev, "could not attach to PHY\n");
2893 return err;
2894 }
2895
2896 /* disable EEE autoneg, until ENETC driver supports it */
2897 memset(&edata, 0, sizeof(struct ethtool_keee));
2898 phylink_ethtool_set_eee(priv->phylink, &edata);
2899
2900 phylink_start(priv->phylink);
2901
2902 return 0;
2903 }
2904
enetc_tx_onestep_tstamp(struct work_struct * work)2905 static void enetc_tx_onestep_tstamp(struct work_struct *work)
2906 {
2907 struct enetc_ndev_priv *priv;
2908 struct sk_buff *skb;
2909
2910 priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
2911
2912 netif_tx_lock_bh(priv->ndev);
2913
2914 clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
2915 skb = skb_dequeue(&priv->tx_skbs);
2916 if (skb)
2917 enetc_start_xmit(skb, priv->ndev);
2918
2919 netif_tx_unlock_bh(priv->ndev);
2920 }
2921
enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv * priv)2922 static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
2923 {
2924 INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp);
2925 skb_queue_head_init(&priv->tx_skbs);
2926 }
2927
enetc_start(struct net_device * ndev)2928 void enetc_start(struct net_device *ndev)
2929 {
2930 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2931 int i;
2932
2933 enetc_setup_interrupts(priv);
2934
2935 for (i = 0; i < priv->bdr_int_num; i++) {
2936 int irq = pci_irq_vector(priv->si->pdev,
2937 ENETC_BDR_INT_BASE_IDX + i);
2938
2939 napi_enable(&priv->int_vector[i]->napi);
2940 enable_irq(irq);
2941 }
2942
2943 enetc_enable_tx_bdrs(priv);
2944
2945 enetc_enable_rx_bdrs(priv);
2946
2947 netif_tx_start_all_queues(ndev);
2948
2949 clear_bit(ENETC_TX_DOWN, &priv->flags);
2950 }
2951 EXPORT_SYMBOL_GPL(enetc_start);
2952
enetc_open(struct net_device * ndev)2953 int enetc_open(struct net_device *ndev)
2954 {
2955 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2956 struct enetc_bdr_resource *tx_res, *rx_res;
2957 bool extended;
2958 int err;
2959
2960 extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
2961
2962 err = clk_prepare_enable(priv->ref_clk);
2963 if (err)
2964 return err;
2965
2966 err = enetc_setup_irqs(priv);
2967 if (err)
2968 goto err_setup_irqs;
2969
2970 err = enetc_phylink_connect(ndev);
2971 if (err)
2972 goto err_phy_connect;
2973
2974 tx_res = enetc_alloc_tx_resources(priv);
2975 if (IS_ERR(tx_res)) {
2976 err = PTR_ERR(tx_res);
2977 goto err_alloc_tx;
2978 }
2979
2980 rx_res = enetc_alloc_rx_resources(priv, extended);
2981 if (IS_ERR(rx_res)) {
2982 err = PTR_ERR(rx_res);
2983 goto err_alloc_rx;
2984 }
2985
2986 enetc_tx_onestep_tstamp_init(priv);
2987 enetc_assign_tx_resources(priv, tx_res);
2988 enetc_assign_rx_resources(priv, rx_res);
2989 enetc_setup_bdrs(priv, extended);
2990 enetc_start(ndev);
2991
2992 return 0;
2993
2994 err_alloc_rx:
2995 enetc_free_tx_resources(tx_res, priv->num_tx_rings);
2996 err_alloc_tx:
2997 if (priv->phylink)
2998 phylink_disconnect_phy(priv->phylink);
2999 err_phy_connect:
3000 enetc_free_irqs(priv);
3001 err_setup_irqs:
3002 clk_disable_unprepare(priv->ref_clk);
3003
3004 return err;
3005 }
3006 EXPORT_SYMBOL_GPL(enetc_open);
3007
enetc_stop(struct net_device * ndev)3008 void enetc_stop(struct net_device *ndev)
3009 {
3010 struct enetc_ndev_priv *priv = netdev_priv(ndev);
3011 int i;
3012
3013 set_bit(ENETC_TX_DOWN, &priv->flags);
3014
3015 netif_tx_stop_all_queues(ndev);
3016
3017 enetc_disable_rx_bdrs(priv);
3018
3019 enetc_wait_bdrs(priv);
3020
3021 enetc_disable_tx_bdrs(priv);
3022
3023 for (i = 0; i < priv->bdr_int_num; i++) {
3024 int irq = pci_irq_vector(priv->si->pdev,
3025 ENETC_BDR_INT_BASE_IDX + i);
3026
3027 disable_irq(irq);
3028 napi_synchronize(&priv->int_vector[i]->napi);
3029 napi_disable(&priv->int_vector[i]->napi);
3030 }
3031
3032 enetc_clear_interrupts(priv);
3033 }
3034 EXPORT_SYMBOL_GPL(enetc_stop);
3035
enetc_close(struct net_device * ndev)3036 int enetc_close(struct net_device *ndev)
3037 {
3038 struct enetc_ndev_priv *priv = netdev_priv(ndev);
3039
3040 enetc_stop(ndev);
3041
3042 if (priv->phylink) {
3043 phylink_stop(priv->phylink);
3044 phylink_disconnect_phy(priv->phylink);
3045 } else {
3046 netif_carrier_off(ndev);
3047 }
3048
3049 enetc_free_rxtx_rings(priv);
3050
3051 /* Avoids dangling pointers and also frees old resources */
3052 enetc_assign_rx_resources(priv, NULL);
3053 enetc_assign_tx_resources(priv, NULL);
3054
3055 enetc_free_irqs(priv);
3056 clk_disable_unprepare(priv->ref_clk);
3057
3058 return 0;
3059 }
3060 EXPORT_SYMBOL_GPL(enetc_close);
3061
enetc_reconfigure(struct enetc_ndev_priv * priv,bool extended,int (* cb)(struct enetc_ndev_priv * priv,void * ctx),void * ctx)3062 static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended,
3063 int (*cb)(struct enetc_ndev_priv *priv, void *ctx),
3064 void *ctx)
3065 {
3066 struct enetc_bdr_resource *tx_res, *rx_res;
3067 int err;
3068
3069 ASSERT_RTNL();
3070
3071 /* If the interface is down, run the callback right away,
3072 * without reconfiguration.
3073 */
3074 if (!netif_running(priv->ndev)) {
3075 if (cb) {
3076 err = cb(priv, ctx);
3077 if (err)
3078 return err;
3079 }
3080
3081 return 0;
3082 }
3083
3084 tx_res = enetc_alloc_tx_resources(priv);
3085 if (IS_ERR(tx_res)) {
3086 err = PTR_ERR(tx_res);
3087 goto out;
3088 }
3089
3090 rx_res = enetc_alloc_rx_resources(priv, extended);
3091 if (IS_ERR(rx_res)) {
3092 err = PTR_ERR(rx_res);
3093 goto out_free_tx_res;
3094 }
3095
3096 enetc_stop(priv->ndev);
3097 enetc_free_rxtx_rings(priv);
3098
3099 /* Interface is down, run optional callback now */
3100 if (cb) {
3101 err = cb(priv, ctx);
3102 if (err)
3103 goto out_restart;
3104 }
3105
3106 enetc_assign_tx_resources(priv, tx_res);
3107 enetc_assign_rx_resources(priv, rx_res);
3108 enetc_setup_bdrs(priv, extended);
3109 enetc_start(priv->ndev);
3110
3111 return 0;
3112
3113 out_restart:
3114 enetc_setup_bdrs(priv, extended);
3115 enetc_start(priv->ndev);
3116 enetc_free_rx_resources(rx_res, priv->num_rx_rings);
3117 out_free_tx_res:
3118 enetc_free_tx_resources(tx_res, priv->num_tx_rings);
3119 out:
3120 return err;
3121 }
3122
enetc_debug_tx_ring_prios(struct enetc_ndev_priv * priv)3123 static void enetc_debug_tx_ring_prios(struct enetc_ndev_priv *priv)
3124 {
3125 int i;
3126
3127 for (i = 0; i < priv->num_tx_rings; i++)
3128 netdev_dbg(priv->ndev, "TX ring %d prio %d\n", i,
3129 priv->tx_ring[i]->prio);
3130 }
3131
enetc_reset_tc_mqprio(struct net_device * ndev)3132 void enetc_reset_tc_mqprio(struct net_device *ndev)
3133 {
3134 struct enetc_ndev_priv *priv = netdev_priv(ndev);
3135 struct enetc_hw *hw = &priv->si->hw;
3136 struct enetc_bdr *tx_ring;
3137 int num_stack_tx_queues;
3138 int i;
3139
3140 num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
3141
3142 netdev_reset_tc(ndev);
3143 netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
3144 priv->min_num_stack_tx_queues = num_possible_cpus();
3145
3146 /* Reset all ring priorities to 0 */
3147 for (i = 0; i < priv->num_tx_rings; i++) {
3148 tx_ring = priv->tx_ring[i];
3149 tx_ring->prio = 0;
3150 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
3151 }
3152
3153 enetc_debug_tx_ring_prios(priv);
3154
3155 enetc_change_preemptible_tcs(priv, 0);
3156 }
3157 EXPORT_SYMBOL_GPL(enetc_reset_tc_mqprio);
3158
enetc_setup_tc_mqprio(struct net_device * ndev,void * type_data)3159 int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
3160 {
3161 struct tc_mqprio_qopt_offload *mqprio = type_data;
3162 struct enetc_ndev_priv *priv = netdev_priv(ndev);
3163 struct tc_mqprio_qopt *qopt = &mqprio->qopt;
3164 struct enetc_hw *hw = &priv->si->hw;
3165 int num_stack_tx_queues = 0;
3166 struct enetc_bdr *tx_ring;
3167 u8 num_tc = qopt->num_tc;
3168 int offset, count;
3169 int err, tc, q;
3170
3171 if (!num_tc) {
3172 enetc_reset_tc_mqprio(ndev);
3173 return 0;
3174 }
3175
3176 err = netdev_set_num_tc(ndev, num_tc);
3177 if (err)
3178 return err;
3179
3180 for (tc = 0; tc < num_tc; tc++) {
3181 offset = qopt->offset[tc];
3182 count = qopt->count[tc];
3183 num_stack_tx_queues += count;
3184
3185 err = netdev_set_tc_queue(ndev, tc, count, offset);
3186 if (err)
3187 goto err_reset_tc;
3188
3189 for (q = offset; q < offset + count; q++) {
3190 tx_ring = priv->tx_ring[q];
3191 /* The prio_tc_map is skb_tx_hash()'s way of selecting
3192 * between TX queues based on skb->priority. As such,
3193 * there's nothing to offload based on it.
3194 * Make the mqprio "traffic class" be the priority of
3195 * this ring group, and leave the Tx IPV to traffic
3196 * class mapping as its default mapping value of 1:1.
3197 */
3198 tx_ring->prio = tc;
3199 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
3200 }
3201 }
3202
3203 err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
3204 if (err)
3205 goto err_reset_tc;
3206
3207 priv->min_num_stack_tx_queues = num_stack_tx_queues;
3208
3209 enetc_debug_tx_ring_prios(priv);
3210
3211 enetc_change_preemptible_tcs(priv, mqprio->preemptible_tcs);
3212
3213 return 0;
3214
3215 err_reset_tc:
3216 enetc_reset_tc_mqprio(ndev);
3217 return err;
3218 }
3219 EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio);
3220
enetc_reconfigure_xdp_cb(struct enetc_ndev_priv * priv,void * ctx)3221 static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx)
3222 {
3223 struct bpf_prog *old_prog, *prog = ctx;
3224 int num_stack_tx_queues;
3225 int err, i;
3226
3227 old_prog = xchg(&priv->xdp_prog, prog);
3228
3229 num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
3230 err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
3231 if (err) {
3232 xchg(&priv->xdp_prog, old_prog);
3233 return err;
3234 }
3235
3236 if (old_prog)
3237 bpf_prog_put(old_prog);
3238
3239 for (i = 0; i < priv->num_rx_rings; i++) {
3240 struct enetc_bdr *rx_ring = priv->rx_ring[i];
3241
3242 rx_ring->xdp.prog = prog;
3243
3244 if (prog)
3245 rx_ring->buffer_offset = XDP_PACKET_HEADROOM;
3246 else
3247 rx_ring->buffer_offset = ENETC_RXB_PAD;
3248 }
3249
3250 return 0;
3251 }
3252
enetc_setup_xdp_prog(struct net_device * ndev,struct bpf_prog * prog,struct netlink_ext_ack * extack)3253 static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
3254 struct netlink_ext_ack *extack)
3255 {
3256 int num_xdp_tx_queues = prog ? num_possible_cpus() : 0;
3257 struct enetc_ndev_priv *priv = netdev_priv(ndev);
3258 bool extended;
3259
3260 if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
3261 priv->num_tx_rings) {
3262 NL_SET_ERR_MSG_FMT_MOD(extack,
3263 "Reserving %d XDP TXQs leaves under %d for stack (total %d)",
3264 num_xdp_tx_queues,
3265 priv->min_num_stack_tx_queues,
3266 priv->num_tx_rings);
3267 return -EBUSY;
3268 }
3269
3270 extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
3271
3272 /* The buffer layout is changing, so we need to drain the old
3273 * RX buffers and seed new ones.
3274 */
3275 return enetc_reconfigure(priv, extended, enetc_reconfigure_xdp_cb, prog);
3276 }
3277
enetc_setup_bpf(struct net_device * ndev,struct netdev_bpf * bpf)3278 int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
3279 {
3280 switch (bpf->command) {
3281 case XDP_SETUP_PROG:
3282 return enetc_setup_xdp_prog(ndev, bpf->prog, bpf->extack);
3283 default:
3284 return -EINVAL;
3285 }
3286
3287 return 0;
3288 }
3289 EXPORT_SYMBOL_GPL(enetc_setup_bpf);
3290
enetc_get_stats(struct net_device * ndev)3291 struct net_device_stats *enetc_get_stats(struct net_device *ndev)
3292 {
3293 struct enetc_ndev_priv *priv = netdev_priv(ndev);
3294 struct net_device_stats *stats = &ndev->stats;
3295 unsigned long packets = 0, bytes = 0;
3296 unsigned long tx_dropped = 0;
3297 int i;
3298
3299 for (i = 0; i < priv->num_rx_rings; i++) {
3300 packets += priv->rx_ring[i]->stats.packets;
3301 bytes += priv->rx_ring[i]->stats.bytes;
3302 }
3303
3304 stats->rx_packets = packets;
3305 stats->rx_bytes = bytes;
3306 bytes = 0;
3307 packets = 0;
3308
3309 for (i = 0; i < priv->num_tx_rings; i++) {
3310 packets += priv->tx_ring[i]->stats.packets;
3311 bytes += priv->tx_ring[i]->stats.bytes;
3312 tx_dropped += priv->tx_ring[i]->stats.win_drop;
3313 }
3314
3315 stats->tx_packets = packets;
3316 stats->tx_bytes = bytes;
3317 stats->tx_dropped = tx_dropped;
3318
3319 return stats;
3320 }
3321 EXPORT_SYMBOL_GPL(enetc_get_stats);
3322
enetc_enable_rxvlan(struct net_device * ndev,bool en)3323 static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
3324 {
3325 struct enetc_ndev_priv *priv = netdev_priv(ndev);
3326 struct enetc_hw *hw = &priv->si->hw;
3327 int i;
3328
3329 for (i = 0; i < priv->num_rx_rings; i++)
3330 enetc_bdr_enable_rxvlan(hw, i, en);
3331 }
3332
enetc_enable_txvlan(struct net_device * ndev,bool en)3333 static void enetc_enable_txvlan(struct net_device *ndev, bool en)
3334 {
3335 struct enetc_ndev_priv *priv = netdev_priv(ndev);
3336 struct enetc_hw *hw = &priv->si->hw;
3337 int i;
3338
3339 for (i = 0; i < priv->num_tx_rings; i++)
3340 enetc_bdr_enable_txvlan(hw, i, en);
3341 }
3342
enetc_set_features(struct net_device * ndev,netdev_features_t features)3343 void enetc_set_features(struct net_device *ndev, netdev_features_t features)
3344 {
3345 netdev_features_t changed = ndev->features ^ features;
3346
3347 if (changed & NETIF_F_RXHASH)
3348 enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
3349
3350 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
3351 enetc_enable_rxvlan(ndev,
3352 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
3353
3354 if (changed & NETIF_F_HW_VLAN_CTAG_TX)
3355 enetc_enable_txvlan(ndev,
3356 !!(features & NETIF_F_HW_VLAN_CTAG_TX));
3357 }
3358 EXPORT_SYMBOL_GPL(enetc_set_features);
3359
enetc_hwtstamp_set(struct net_device * ndev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)3360 int enetc_hwtstamp_set(struct net_device *ndev,
3361 struct kernel_hwtstamp_config *config,
3362 struct netlink_ext_ack *extack)
3363 {
3364 struct enetc_ndev_priv *priv = netdev_priv(ndev);
3365 int err, new_offloads = priv->active_offloads;
3366
3367 if (!enetc_ptp_clock_is_enabled(priv->si))
3368 return -EOPNOTSUPP;
3369
3370 switch (config->tx_type) {
3371 case HWTSTAMP_TX_OFF:
3372 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
3373 break;
3374 case HWTSTAMP_TX_ON:
3375 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
3376 new_offloads |= ENETC_F_TX_TSTAMP;
3377 break;
3378 case HWTSTAMP_TX_ONESTEP_SYNC:
3379 if (!enetc_si_is_pf(priv->si) ||
3380 enetc_is_pseudo_mac(priv->si))
3381 return -EOPNOTSUPP;
3382
3383 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
3384 new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
3385 break;
3386 default:
3387 return -ERANGE;
3388 }
3389
3390 switch (config->rx_filter) {
3391 case HWTSTAMP_FILTER_NONE:
3392 new_offloads &= ~ENETC_F_RX_TSTAMP;
3393 break;
3394 default:
3395 new_offloads |= ENETC_F_RX_TSTAMP;
3396 config->rx_filter = HWTSTAMP_FILTER_ALL;
3397 }
3398
3399 if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) {
3400 bool extended = !!(new_offloads & ENETC_F_RX_TSTAMP);
3401
3402 err = enetc_reconfigure(priv, extended, NULL, NULL);
3403 if (err)
3404 return err;
3405 }
3406
3407 priv->active_offloads = new_offloads;
3408
3409 return 0;
3410 }
3411 EXPORT_SYMBOL_GPL(enetc_hwtstamp_set);
3412
enetc_hwtstamp_get(struct net_device * ndev,struct kernel_hwtstamp_config * config)3413 int enetc_hwtstamp_get(struct net_device *ndev,
3414 struct kernel_hwtstamp_config *config)
3415 {
3416 struct enetc_ndev_priv *priv = netdev_priv(ndev);
3417
3418 if (!enetc_ptp_clock_is_enabled(priv->si))
3419 return -EOPNOTSUPP;
3420
3421 if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
3422 config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
3423 else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
3424 config->tx_type = HWTSTAMP_TX_ON;
3425 else
3426 config->tx_type = HWTSTAMP_TX_OFF;
3427
3428 config->rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
3429 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
3430
3431 return 0;
3432 }
3433 EXPORT_SYMBOL_GPL(enetc_hwtstamp_get);
3434
enetc_ioctl(struct net_device * ndev,struct ifreq * rq,int cmd)3435 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
3436 {
3437 struct enetc_ndev_priv *priv = netdev_priv(ndev);
3438
3439 if (!priv->phylink)
3440 return -EOPNOTSUPP;
3441
3442 return phylink_mii_ioctl(priv->phylink, rq, cmd);
3443 }
3444 EXPORT_SYMBOL_GPL(enetc_ioctl);
3445
enetc_int_vector_init(struct enetc_ndev_priv * priv,int i,int v_tx_rings)3446 static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i,
3447 int v_tx_rings)
3448 {
3449 struct enetc_int_vector *v;
3450 struct enetc_bdr *bdr;
3451 int j, err;
3452
3453 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
3454 if (!v)
3455 return -ENOMEM;
3456
3457 priv->int_vector[i] = v;
3458 bdr = &v->rx_ring;
3459 bdr->index = i;
3460 bdr->ndev = priv->ndev;
3461 bdr->dev = priv->dev;
3462 bdr->bd_count = priv->rx_bd_count;
3463 bdr->buffer_offset = ENETC_RXB_PAD;
3464 priv->rx_ring[i] = bdr;
3465
3466 err = __xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0,
3467 ENETC_RXB_DMA_SIZE_XDP);
3468 if (err)
3469 goto free_vector;
3470
3471 err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, MEM_TYPE_PAGE_SHARED,
3472 NULL);
3473 if (err) {
3474 xdp_rxq_info_unreg(&bdr->xdp.rxq);
3475 goto free_vector;
3476 }
3477
3478 /* init defaults for adaptive IC */
3479 if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
3480 v->rx_ictt = 0x1;
3481 v->rx_dim_en = true;
3482 }
3483
3484 INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
3485 netif_napi_add(priv->ndev, &v->napi, enetc_poll);
3486 v->count_tx_rings = v_tx_rings;
3487
3488 for (j = 0; j < v_tx_rings; j++) {
3489 int idx;
3490
3491 /* default tx ring mapping policy */
3492 idx = priv->bdr_int_num * j + i;
3493 __set_bit(idx, &v->tx_rings_map);
3494 bdr = &v->tx_ring[j];
3495 bdr->index = idx;
3496 bdr->ndev = priv->ndev;
3497 bdr->dev = priv->dev;
3498 bdr->bd_count = priv->tx_bd_count;
3499 priv->tx_ring[idx] = bdr;
3500 }
3501
3502 return 0;
3503
3504 free_vector:
3505 priv->rx_ring[i] = NULL;
3506 priv->int_vector[i] = NULL;
3507 kfree(v);
3508
3509 return err;
3510 }
3511
enetc_int_vector_destroy(struct enetc_ndev_priv * priv,int i)3512 static void enetc_int_vector_destroy(struct enetc_ndev_priv *priv, int i)
3513 {
3514 struct enetc_int_vector *v = priv->int_vector[i];
3515 struct enetc_bdr *rx_ring = &v->rx_ring;
3516 int j, tx_ring_index;
3517
3518 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
3519 xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
3520 netif_napi_del(&v->napi);
3521 cancel_work_sync(&v->rx_dim.work);
3522
3523 for (j = 0; j < v->count_tx_rings; j++) {
3524 tx_ring_index = priv->bdr_int_num * j + i;
3525 priv->tx_ring[tx_ring_index] = NULL;
3526 }
3527
3528 priv->rx_ring[i] = NULL;
3529 priv->int_vector[i] = NULL;
3530 kfree(v);
3531 }
3532
enetc_alloc_msix(struct enetc_ndev_priv * priv)3533 int enetc_alloc_msix(struct enetc_ndev_priv *priv)
3534 {
3535 struct pci_dev *pdev = priv->si->pdev;
3536 int v_tx_rings, v_remainder;
3537 int num_stack_tx_queues;
3538 int first_xdp_tx_ring;
3539 int i, n, err, nvec;
3540
3541 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
3542 /* allocate MSIX for both messaging and Rx/Tx interrupts */
3543 n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
3544
3545 if (n < 0)
3546 return n;
3547
3548 if (n != nvec)
3549 return -EPERM;
3550
3551 /* # of tx rings per int vector */
3552 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
3553 v_remainder = priv->num_tx_rings % priv->bdr_int_num;
3554
3555 for (i = 0; i < priv->bdr_int_num; i++) {
3556 /* Distribute the remaining TX rings to the first v_remainder
3557 * interrupt vectors
3558 */
3559 int num_tx_rings = i < v_remainder ? v_tx_rings + 1 : v_tx_rings;
3560
3561 err = enetc_int_vector_init(priv, i, num_tx_rings);
3562 if (err)
3563 goto fail;
3564 }
3565
3566 num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
3567
3568 err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
3569 if (err)
3570 goto fail;
3571
3572 err = netif_set_real_num_rx_queues(priv->ndev, priv->num_rx_rings);
3573 if (err)
3574 goto fail;
3575
3576 priv->min_num_stack_tx_queues = num_possible_cpus();
3577 first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
3578 priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
3579
3580 return 0;
3581
3582 fail:
3583 while (i--)
3584 enetc_int_vector_destroy(priv, i);
3585
3586 pci_free_irq_vectors(pdev);
3587
3588 return err;
3589 }
3590 EXPORT_SYMBOL_GPL(enetc_alloc_msix);
3591
enetc_free_msix(struct enetc_ndev_priv * priv)3592 void enetc_free_msix(struct enetc_ndev_priv *priv)
3593 {
3594 int i;
3595
3596 for (i = 0; i < priv->bdr_int_num; i++)
3597 enetc_int_vector_destroy(priv, i);
3598
3599 /* disable all MSIX for this device */
3600 pci_free_irq_vectors(priv->si->pdev);
3601 }
3602 EXPORT_SYMBOL_GPL(enetc_free_msix);
3603
enetc_kfree_si(struct enetc_si * si)3604 static void enetc_kfree_si(struct enetc_si *si)
3605 {
3606 char *p = (char *)si - si->pad;
3607
3608 kfree(p);
3609 }
3610
enetc_detect_errata(struct enetc_si * si)3611 static void enetc_detect_errata(struct enetc_si *si)
3612 {
3613 if (si->pdev->revision == ENETC_REV1)
3614 si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP;
3615 }
3616
enetc_pci_probe(struct pci_dev * pdev,const char * name,int sizeof_priv)3617 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
3618 {
3619 struct enetc_si *si, *p;
3620 struct enetc_hw *hw;
3621 size_t alloc_size;
3622 int err, len;
3623
3624 pcie_flr(pdev);
3625 err = pci_enable_device_mem(pdev);
3626 if (err)
3627 return dev_err_probe(&pdev->dev, err, "device enable failed\n");
3628
3629 /* set up for high or low dma */
3630 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3631 if (err) {
3632 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
3633 goto err_dma;
3634 }
3635
3636 err = pci_request_mem_regions(pdev, name);
3637 if (err) {
3638 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
3639 goto err_pci_mem_reg;
3640 }
3641
3642 pci_set_master(pdev);
3643
3644 alloc_size = sizeof(struct enetc_si);
3645 if (sizeof_priv) {
3646 /* align priv to 32B */
3647 alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
3648 alloc_size += sizeof_priv;
3649 }
3650 /* force 32B alignment for enetc_si */
3651 alloc_size += ENETC_SI_ALIGN - 1;
3652
3653 p = kzalloc(alloc_size, GFP_KERNEL);
3654 if (!p) {
3655 err = -ENOMEM;
3656 goto err_alloc_si;
3657 }
3658
3659 si = PTR_ALIGN(p, ENETC_SI_ALIGN);
3660 si->pad = (char *)si - (char *)p;
3661
3662 pci_set_drvdata(pdev, si);
3663 si->pdev = pdev;
3664 hw = &si->hw;
3665
3666 len = pci_resource_len(pdev, ENETC_BAR_REGS);
3667 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
3668 if (!hw->reg) {
3669 err = -ENXIO;
3670 dev_err(&pdev->dev, "ioremap() failed\n");
3671 goto err_ioremap;
3672 }
3673 if (len > ENETC_PORT_BASE)
3674 hw->port = hw->reg + ENETC_PORT_BASE;
3675 if (len > ENETC_GLOBAL_BASE)
3676 hw->global = hw->reg + ENETC_GLOBAL_BASE;
3677
3678 enetc_detect_errata(si);
3679
3680 return 0;
3681
3682 err_ioremap:
3683 enetc_kfree_si(si);
3684 err_alloc_si:
3685 pci_release_mem_regions(pdev);
3686 err_pci_mem_reg:
3687 err_dma:
3688 pci_disable_device(pdev);
3689
3690 return err;
3691 }
3692 EXPORT_SYMBOL_GPL(enetc_pci_probe);
3693
enetc_pci_remove(struct pci_dev * pdev)3694 void enetc_pci_remove(struct pci_dev *pdev)
3695 {
3696 struct enetc_si *si = pci_get_drvdata(pdev);
3697 struct enetc_hw *hw = &si->hw;
3698
3699 iounmap(hw->reg);
3700 enetc_kfree_si(si);
3701 pci_release_mem_regions(pdev);
3702 pci_disable_device(pdev);
3703 }
3704 EXPORT_SYMBOL_GPL(enetc_pci_remove);
3705
3706 static const struct enetc_drvdata enetc_pf_data = {
3707 .sysclk_freq = ENETC_CLK_400M,
3708 .pmac_offset = ENETC_PMAC_OFFSET,
3709 .max_frags = ENETC_MAX_SKB_FRAGS,
3710 .eth_ops = &enetc_pf_ethtool_ops,
3711 };
3712
3713 static const struct enetc_drvdata enetc4_pf_data = {
3714 .sysclk_freq = ENETC_CLK_333M,
3715 .tx_csum = true,
3716 .max_frags = ENETC4_MAX_SKB_FRAGS,
3717 .pmac_offset = ENETC4_PMAC_OFFSET,
3718 .eth_ops = &enetc4_pf_ethtool_ops,
3719 };
3720
3721 static const struct enetc_drvdata enetc4_ppm_data = {
3722 .sysclk_freq = ENETC_CLK_333M,
3723 .tx_csum = true,
3724 .max_frags = ENETC4_MAX_SKB_FRAGS,
3725 .eth_ops = &enetc4_ppm_ethtool_ops,
3726 };
3727
3728 static const struct enetc_drvdata enetc_vf_data = {
3729 .sysclk_freq = ENETC_CLK_400M,
3730 .max_frags = ENETC_MAX_SKB_FRAGS,
3731 .eth_ops = &enetc_vf_ethtool_ops,
3732 };
3733
3734 static const struct enetc_platform_info enetc_info[] = {
3735 { .revision = ENETC_REV_1_0,
3736 .dev_id = ENETC_DEV_ID_PF,
3737 .data = &enetc_pf_data,
3738 },
3739 { .revision = ENETC_REV_4_1,
3740 .dev_id = NXP_ENETC_PF_DEV_ID,
3741 .data = &enetc4_pf_data,
3742 },
3743 { .revision = ENETC_REV_1_0,
3744 .dev_id = ENETC_DEV_ID_VF,
3745 .data = &enetc_vf_data,
3746 },
3747 {
3748 .revision = ENETC_REV_4_3,
3749 .dev_id = NXP_ENETC_PPM_DEV_ID,
3750 .data = &enetc4_ppm_data,
3751 },
3752 { .revision = ENETC_REV_4_3,
3753 .dev_id = NXP_ENETC_PF_DEV_ID,
3754 .data = &enetc4_pf_data,
3755 },
3756 };
3757
enetc_get_driver_data(struct enetc_si * si)3758 int enetc_get_driver_data(struct enetc_si *si)
3759 {
3760 u16 dev_id = si->pdev->device;
3761 int i;
3762
3763 for (i = 0; i < ARRAY_SIZE(enetc_info); i++) {
3764 if (si->revision == enetc_info[i].revision &&
3765 dev_id == enetc_info[i].dev_id) {
3766 si->drvdata = enetc_info[i].data;
3767
3768 return 0;
3769 }
3770 }
3771
3772 return -ERANGE;
3773 }
3774 EXPORT_SYMBOL_GPL(enetc_get_driver_data);
3775
3776 MODULE_DESCRIPTION("NXP ENETC Ethernet driver");
3777 MODULE_LICENSE("Dual BSD/GPL");
3778