1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/ip6_checksum.h>
8 #include <net/netdev_queues.h>
9 #include <net/page_pool/helpers.h>
10
11 #include "ionic.h"
12 #include "ionic_lif.h"
13 #include "ionic_txrx.h"
14
15 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
16 void *data, size_t len);
17
18 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
19 const skb_frag_t *frag,
20 size_t offset, size_t len);
21
22 static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
23 struct ionic_tx_desc_info *desc_info);
24
25 static void ionic_tx_clean(struct ionic_queue *q,
26 struct ionic_tx_desc_info *desc_info,
27 struct ionic_txq_comp *comp,
28 bool in_napi);
29
ionic_txq_post(struct ionic_queue * q,bool ring_dbell)30 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
31 {
32 /* Ensure TX descriptor writes reach memory before NIC reads them.
33 * Prevents device from fetching stale descriptors.
34 */
35 dma_wmb();
36 ionic_q_post(q, ring_dbell);
37 }
38
ionic_rxq_post(struct ionic_queue * q,bool ring_dbell)39 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell)
40 {
41 ionic_q_post(q, ring_dbell);
42 }
43
ionic_txq_poke_doorbell(struct ionic_queue * q)44 bool ionic_txq_poke_doorbell(struct ionic_queue *q)
45 {
46 struct netdev_queue *netdev_txq;
47 unsigned long now, then, dif;
48 struct net_device *netdev;
49
50 netdev = q->lif->netdev;
51 netdev_txq = netdev_get_tx_queue(netdev, q->index);
52
53 HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id());
54
55 if (q->tail_idx == q->head_idx) {
56 HARD_TX_UNLOCK(netdev, netdev_txq);
57 return false;
58 }
59
60 now = READ_ONCE(jiffies);
61 then = q->dbell_jiffies;
62 dif = now - then;
63
64 if (dif > q->dbell_deadline) {
65 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
66 q->dbval | q->head_idx);
67
68 q->dbell_jiffies = now;
69 }
70
71 HARD_TX_UNLOCK(netdev, netdev_txq);
72
73 return true;
74 }
75
ionic_rxq_poke_doorbell(struct ionic_queue * q)76 bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
77 {
78 unsigned long now, then, dif;
79
80 /* no lock, called from rx napi or txrx napi, nothing else can fill */
81
82 if (q->tail_idx == q->head_idx)
83 return false;
84
85 now = READ_ONCE(jiffies);
86 then = q->dbell_jiffies;
87 dif = now - then;
88
89 if (dif > q->dbell_deadline) {
90 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
91 q->dbval | q->head_idx);
92
93 q->dbell_jiffies = now;
94
95 dif = 2 * q->dbell_deadline;
96 if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE)
97 dif = IONIC_RX_MAX_DOORBELL_DEADLINE;
98
99 q->dbell_deadline = dif;
100 }
101
102 return true;
103 }
104
ionic_tx_sg_elems(struct ionic_queue * q)105 static inline struct ionic_txq_sg_elem *ionic_tx_sg_elems(struct ionic_queue *q)
106 {
107 if (likely(q->sg_desc_size == sizeof(struct ionic_txq_sg_desc_v1)))
108 return q->txq_sgl_v1[q->head_idx].elems;
109 else
110 return q->txq_sgl[q->head_idx].elems;
111 }
112
q_to_ndq(struct net_device * netdev,struct ionic_queue * q)113 static inline struct netdev_queue *q_to_ndq(struct net_device *netdev,
114 struct ionic_queue *q)
115 {
116 return netdev_get_tx_queue(netdev, q->index);
117 }
118
ionic_rx_buf_va(struct ionic_buf_info * buf_info)119 static void *ionic_rx_buf_va(struct ionic_buf_info *buf_info)
120 {
121 return page_address(buf_info->page) + buf_info->page_offset;
122 }
123
ionic_rx_buf_pa(struct ionic_buf_info * buf_info)124 static dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info)
125 {
126 return page_pool_get_dma_addr(buf_info->page) + buf_info->page_offset;
127 }
128
__ionic_rx_put_buf(struct ionic_queue * q,struct ionic_buf_info * buf_info,bool recycle_direct)129 static void __ionic_rx_put_buf(struct ionic_queue *q,
130 struct ionic_buf_info *buf_info,
131 bool recycle_direct)
132 {
133 if (!buf_info->page)
134 return;
135
136 page_pool_put_full_page(q->page_pool, buf_info->page, recycle_direct);
137 buf_info->page = NULL;
138 buf_info->len = 0;
139 buf_info->page_offset = 0;
140 }
141
142
ionic_rx_put_buf(struct ionic_queue * q,struct ionic_buf_info * buf_info)143 static void ionic_rx_put_buf(struct ionic_queue *q,
144 struct ionic_buf_info *buf_info)
145 {
146 __ionic_rx_put_buf(q, buf_info, false);
147 }
148
ionic_rx_put_buf_direct(struct ionic_queue * q,struct ionic_buf_info * buf_info)149 static void ionic_rx_put_buf_direct(struct ionic_queue *q,
150 struct ionic_buf_info *buf_info)
151 {
152 __ionic_rx_put_buf(q, buf_info, true);
153 }
154
ionic_rx_add_skb_frag(struct ionic_queue * q,struct sk_buff * skb,struct ionic_buf_info * buf_info,u32 headroom,u32 len,bool synced)155 static void ionic_rx_add_skb_frag(struct ionic_queue *q,
156 struct sk_buff *skb,
157 struct ionic_buf_info *buf_info,
158 u32 headroom, u32 len,
159 bool synced)
160 {
161 if (!synced)
162 page_pool_dma_sync_for_cpu(q->page_pool,
163 buf_info->page,
164 buf_info->page_offset + headroom,
165 len);
166
167 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
168 buf_info->page, buf_info->page_offset + headroom,
169 len, buf_info->len);
170
171 /* napi_gro_frags() will release/recycle the
172 * page_pool buffers from the frags list
173 */
174 buf_info->page = NULL;
175 buf_info->len = 0;
176 buf_info->page_offset = 0;
177 }
178
ionic_rx_build_skb(struct ionic_queue * q,struct ionic_rx_desc_info * desc_info,unsigned int headroom,unsigned int len,unsigned int num_sg_elems,bool synced)179 static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
180 struct ionic_rx_desc_info *desc_info,
181 unsigned int headroom,
182 unsigned int len,
183 unsigned int num_sg_elems,
184 bool synced)
185 {
186 struct ionic_buf_info *buf_info;
187 struct sk_buff *skb;
188 unsigned int i;
189 u16 frag_len;
190
191 buf_info = &desc_info->bufs[0];
192 prefetchw(buf_info->page);
193
194 skb = napi_get_frags(&q_to_qcq(q)->napi);
195 if (unlikely(!skb)) {
196 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
197 dev_name(q->dev), q->name);
198 q_to_rx_stats(q)->alloc_err++;
199 return NULL;
200 }
201 skb_mark_for_recycle(skb);
202
203 if (headroom)
204 frag_len = min_t(u16, len,
205 IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
206 else
207 frag_len = min_t(u16, len, IONIC_PAGE_SIZE);
208
209 if (unlikely(!buf_info->page))
210 goto err_bad_buf_page;
211 ionic_rx_add_skb_frag(q, skb, buf_info, headroom, frag_len, synced);
212 len -= frag_len;
213 buf_info++;
214
215 for (i = 0; i < num_sg_elems; i++, buf_info++) {
216 if (unlikely(!buf_info->page))
217 goto err_bad_buf_page;
218 frag_len = min_t(u16, len, buf_info->len);
219 ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len, synced);
220 len -= frag_len;
221 }
222
223 return skb;
224
225 err_bad_buf_page:
226 dev_kfree_skb(skb);
227 return NULL;
228 }
229
ionic_rx_copybreak(struct net_device * netdev,struct ionic_queue * q,struct ionic_rx_desc_info * desc_info,unsigned int headroom,unsigned int len,unsigned int num_sg_elems,bool synced)230 static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
231 struct ionic_queue *q,
232 struct ionic_rx_desc_info *desc_info,
233 unsigned int headroom,
234 unsigned int len,
235 unsigned int num_sg_elems,
236 bool synced)
237 {
238 struct ionic_buf_info *buf_info;
239 struct device *dev = q->dev;
240 struct sk_buff *skb;
241 int i;
242
243 buf_info = &desc_info->bufs[0];
244
245 skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
246 if (unlikely(!skb)) {
247 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
248 dev_name(dev), q->name);
249 q_to_rx_stats(q)->alloc_err++;
250 return NULL;
251 }
252 skb_mark_for_recycle(skb);
253
254 if (!synced)
255 page_pool_dma_sync_for_cpu(q->page_pool,
256 buf_info->page,
257 buf_info->page_offset + headroom,
258 len);
259
260 skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len);
261
262 skb_put(skb, len);
263 skb->protocol = eth_type_trans(skb, netdev);
264
265 /* recycle the Rx buffer now that we're done with it */
266 ionic_rx_put_buf_direct(q, buf_info);
267 buf_info++;
268 for (i = 0; i < num_sg_elems; i++, buf_info++)
269 ionic_rx_put_buf_direct(q, buf_info);
270
271 return skb;
272 }
273
ionic_xdp_tx_desc_clean(struct ionic_queue * q,struct ionic_tx_desc_info * desc_info,bool in_napi)274 static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
275 struct ionic_tx_desc_info *desc_info,
276 bool in_napi)
277 {
278 struct xdp_frame_bulk bq;
279
280 if (!desc_info->nbufs)
281 return;
282
283 xdp_frame_bulk_init(&bq);
284 rcu_read_lock(); /* need for xdp_return_frame_bulk */
285
286 if (desc_info->act == XDP_TX) {
287 if (likely(in_napi))
288 xdp_return_frame_rx_napi(desc_info->xdpf);
289 else
290 xdp_return_frame(desc_info->xdpf);
291 } else if (desc_info->act == XDP_REDIRECT) {
292 ionic_tx_desc_unmap_bufs(q, desc_info);
293 xdp_return_frame_bulk(desc_info->xdpf, &bq);
294 }
295
296 xdp_flush_frame_bulk(&bq);
297 rcu_read_unlock();
298
299 desc_info->nbufs = 0;
300 desc_info->xdpf = NULL;
301 desc_info->act = 0;
302 }
303
ionic_xdp_post_frame(struct ionic_queue * q,struct xdp_frame * frame,enum xdp_action act,struct page * page,int off,bool ring_doorbell)304 static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
305 enum xdp_action act, struct page *page, int off,
306 bool ring_doorbell)
307 {
308 struct ionic_tx_desc_info *desc_info;
309 struct ionic_buf_info *buf_info;
310 struct ionic_tx_stats *stats;
311 struct ionic_txq_desc *desc;
312 size_t len = frame->len;
313 dma_addr_t dma_addr;
314 u64 cmd;
315
316 desc_info = &q->tx_info[q->head_idx];
317 desc = &q->txq[q->head_idx];
318 buf_info = desc_info->bufs;
319 stats = q_to_tx_stats(q);
320
321 if (act == XDP_TX) {
322 dma_addr = page_pool_get_dma_addr(page) +
323 off + XDP_PACKET_HEADROOM;
324 dma_sync_single_for_device(q->dev, dma_addr,
325 len, DMA_TO_DEVICE);
326 } else /* XDP_REDIRECT */ {
327 dma_addr = ionic_tx_map_single(q, frame->data, len);
328 if (dma_addr == DMA_MAPPING_ERROR)
329 return -EIO;
330 }
331
332 buf_info->dma_addr = dma_addr;
333 buf_info->len = len;
334 buf_info->page = page;
335 buf_info->page_offset = off;
336
337 desc_info->nbufs = 1;
338 desc_info->xdpf = frame;
339 desc_info->act = act;
340
341 if (xdp_frame_has_frags(frame)) {
342 struct ionic_txq_sg_elem *elem;
343 struct skb_shared_info *sinfo;
344 struct ionic_buf_info *bi;
345 skb_frag_t *frag;
346 int i;
347
348 bi = &buf_info[1];
349 sinfo = xdp_get_shared_info_from_frame(frame);
350 frag = sinfo->frags;
351 elem = ionic_tx_sg_elems(q);
352 for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) {
353 if (act == XDP_TX) {
354 struct page *pg = skb_frag_page(frag);
355
356 dma_addr = page_pool_get_dma_addr(pg) +
357 skb_frag_off(frag);
358 dma_sync_single_for_device(q->dev, dma_addr,
359 skb_frag_size(frag),
360 DMA_TO_DEVICE);
361 } else {
362 dma_addr = ionic_tx_map_frag(q, frag, 0,
363 skb_frag_size(frag));
364 if (dma_addr == DMA_MAPPING_ERROR) {
365 ionic_tx_desc_unmap_bufs(q, desc_info);
366 return -EIO;
367 }
368 }
369 bi->dma_addr = dma_addr;
370 bi->len = skb_frag_size(frag);
371 bi->page = skb_frag_page(frag);
372
373 elem->addr = cpu_to_le64(bi->dma_addr);
374 elem->len = cpu_to_le16(bi->len);
375 elem++;
376
377 desc_info->nbufs++;
378 }
379 }
380
381 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
382 0, (desc_info->nbufs - 1), buf_info->dma_addr);
383 desc->cmd = cpu_to_le64(cmd);
384 desc->len = cpu_to_le16(len);
385 desc->csum_start = 0;
386 desc->csum_offset = 0;
387
388 stats->xdp_frames++;
389 stats->pkts++;
390 stats->bytes += len;
391
392 ionic_txq_post(q, ring_doorbell);
393
394 return 0;
395 }
396
ionic_xdp_xmit(struct net_device * netdev,int n,struct xdp_frame ** xdp_frames,u32 flags)397 int ionic_xdp_xmit(struct net_device *netdev, int n,
398 struct xdp_frame **xdp_frames, u32 flags)
399 {
400 struct ionic_lif *lif = netdev_priv(netdev);
401 struct ionic_queue *txq;
402 struct netdev_queue *nq;
403 int nxmit;
404 int space;
405 int cpu;
406 int qi;
407
408 if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state)))
409 return -ENETDOWN;
410
411 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
412 return -EINVAL;
413
414 /* AdminQ is assumed on cpu 0, while we attempt to affinitize the
415 * TxRx queue pairs 0..n-1 on cpus 1..n. We try to keep with that
416 * affinitization here, but of course irqbalance and friends might
417 * have juggled things anyway, so we have to check for the 0 case.
418 */
419 cpu = smp_processor_id();
420 qi = cpu ? (cpu - 1) % lif->nxqs : cpu;
421
422 txq = &lif->txqcqs[qi]->q;
423 nq = netdev_get_tx_queue(netdev, txq->index);
424 __netif_tx_lock(nq, cpu);
425 txq_trans_cond_update(nq);
426
427 if (netif_tx_queue_stopped(nq) ||
428 !netif_txq_maybe_stop(q_to_ndq(netdev, txq),
429 ionic_q_space_avail(txq),
430 1, 1)) {
431 __netif_tx_unlock(nq);
432 return -EIO;
433 }
434
435 space = min_t(int, n, ionic_q_space_avail(txq));
436 for (nxmit = 0; nxmit < space ; nxmit++) {
437 if (ionic_xdp_post_frame(txq, xdp_frames[nxmit],
438 XDP_REDIRECT,
439 virt_to_page(xdp_frames[nxmit]->data),
440 0, false)) {
441 nxmit--;
442 break;
443 }
444 }
445
446 if (flags & XDP_XMIT_FLUSH)
447 ionic_dbell_ring(lif->kern_dbpage, txq->hw_type,
448 txq->dbval | txq->head_idx);
449
450 netif_txq_maybe_stop(q_to_ndq(netdev, txq),
451 ionic_q_space_avail(txq),
452 4, 4);
453 __netif_tx_unlock(nq);
454
455 return nxmit;
456 }
457
ionic_xdp_rx_unlink_bufs(struct ionic_queue * q,struct ionic_buf_info * buf_info,int nbufs)458 static void ionic_xdp_rx_unlink_bufs(struct ionic_queue *q,
459 struct ionic_buf_info *buf_info,
460 int nbufs)
461 {
462 int i;
463
464 for (i = 0; i < nbufs; i++) {
465 buf_info->page = NULL;
466 buf_info++;
467 }
468 }
469
ionic_run_xdp(struct ionic_rx_stats * stats,struct net_device * netdev,struct bpf_prog * xdp_prog,struct ionic_queue * rxq,struct ionic_buf_info * buf_info,int len)470 static bool ionic_run_xdp(struct ionic_rx_stats *stats,
471 struct net_device *netdev,
472 struct bpf_prog *xdp_prog,
473 struct ionic_queue *rxq,
474 struct ionic_buf_info *buf_info,
475 int len)
476 {
477 u32 xdp_action = XDP_ABORTED;
478 struct xdp_buff xdp_buf;
479 struct ionic_queue *txq;
480 struct netdev_queue *nq;
481 struct xdp_frame *xdpf;
482 int remain_len;
483 int nbufs = 1;
484 int frag_len;
485 int err = 0;
486
487 xdp_init_buff(&xdp_buf, IONIC_PAGE_SIZE, rxq->xdp_rxq_info);
488 frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
489 xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
490 XDP_PACKET_HEADROOM, frag_len, false);
491 page_pool_dma_sync_for_cpu(rxq->page_pool, buf_info->page,
492 buf_info->page_offset + XDP_PACKET_HEADROOM,
493 frag_len);
494 prefetchw(&xdp_buf.data_hard_start);
495
496 /* We limit MTU size to one buffer if !xdp_has_frags, so
497 * if the recv len is bigger than one buffer
498 * then we know we have frag info to gather
499 */
500 remain_len = len - frag_len;
501 if (remain_len) {
502 struct skb_shared_info *sinfo;
503 struct ionic_buf_info *bi;
504 skb_frag_t *frag;
505
506 bi = buf_info;
507 sinfo = xdp_get_shared_info_from_buff(&xdp_buf);
508 sinfo->nr_frags = 0;
509 sinfo->xdp_frags_size = 0;
510 xdp_buff_set_frags_flag(&xdp_buf);
511
512 do {
513 if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
514 err = -ENOSPC;
515 break;
516 }
517
518 frag = &sinfo->frags[sinfo->nr_frags];
519 sinfo->nr_frags++;
520 bi++;
521 frag_len = min_t(u16, remain_len, bi->len);
522 page_pool_dma_sync_for_cpu(rxq->page_pool, bi->page,
523 buf_info->page_offset,
524 frag_len);
525 skb_frag_fill_page_desc(frag, bi->page, 0, frag_len);
526 sinfo->xdp_frags_size += frag_len;
527 remain_len -= frag_len;
528
529 if (page_is_pfmemalloc(bi->page))
530 xdp_buff_set_frag_pfmemalloc(&xdp_buf);
531 } while (remain_len > 0);
532 nbufs += sinfo->nr_frags;
533 }
534
535 xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf);
536
537 switch (xdp_action) {
538 case XDP_PASS:
539 stats->xdp_pass++;
540 return false; /* false = we didn't consume the packet */
541
542 case XDP_DROP:
543 ionic_rx_put_buf_direct(rxq, buf_info);
544 stats->xdp_drop++;
545 break;
546
547 case XDP_TX:
548 xdpf = xdp_convert_buff_to_frame(&xdp_buf);
549 if (!xdpf) {
550 err = -ENOSPC;
551 break;
552 }
553
554 txq = rxq->partner;
555 nq = netdev_get_tx_queue(netdev, txq->index);
556 __netif_tx_lock(nq, smp_processor_id());
557 txq_trans_cond_update(nq);
558
559 if (netif_tx_queue_stopped(nq) ||
560 !netif_txq_maybe_stop(q_to_ndq(netdev, txq),
561 ionic_q_space_avail(txq),
562 1, 1)) {
563 __netif_tx_unlock(nq);
564 err = -EIO;
565 break;
566 }
567
568 err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
569 buf_info->page,
570 buf_info->page_offset,
571 true);
572 __netif_tx_unlock(nq);
573 if (unlikely(err)) {
574 netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
575 break;
576 }
577 ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs);
578 stats->xdp_tx++;
579 break;
580
581 case XDP_REDIRECT:
582 err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
583 if (unlikely(err)) {
584 netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
585 break;
586 }
587 ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs);
588 rxq->xdp_flush = true;
589 stats->xdp_redirect++;
590 break;
591
592 case XDP_ABORTED:
593 default:
594 err = -EIO;
595 break;
596 }
597
598 if (err) {
599 ionic_rx_put_buf_direct(rxq, buf_info);
600 trace_xdp_exception(netdev, xdp_prog, xdp_action);
601 stats->xdp_aborted++;
602 }
603
604 return true;
605 }
606
ionic_rx_clean(struct ionic_queue * q,struct ionic_rx_desc_info * desc_info,struct ionic_rxq_comp * comp,struct bpf_prog * xdp_prog)607 static void ionic_rx_clean(struct ionic_queue *q,
608 struct ionic_rx_desc_info *desc_info,
609 struct ionic_rxq_comp *comp,
610 struct bpf_prog *xdp_prog)
611 {
612 struct net_device *netdev = q->lif->netdev;
613 struct ionic_qcq *qcq = q_to_qcq(q);
614 struct ionic_rx_stats *stats;
615 unsigned int headroom = 0;
616 struct sk_buff *skb;
617 bool synced = false;
618 bool use_copybreak;
619 u16 len;
620
621 stats = q_to_rx_stats(q);
622
623 if (unlikely(comp->status)) {
624 /* Most likely status==2 and the pkt received was bigger
625 * than the buffer available: comp->len will show the
626 * pkt size received that didn't fit the advertised desc.len
627 */
628 dev_dbg(q->dev, "q%d drop comp->status %d comp->len %d desc->len %d\n",
629 q->index, comp->status, comp->len, q->rxq[q->head_idx].len);
630
631 stats->dropped++;
632 return;
633 }
634
635 len = le16_to_cpu(comp->len);
636 stats->pkts++;
637 stats->bytes += len;
638
639 if (xdp_prog) {
640 if (ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len))
641 return;
642 synced = true;
643 headroom = XDP_PACKET_HEADROOM;
644 }
645
646 use_copybreak = len <= q->lif->rx_copybreak;
647 if (use_copybreak)
648 skb = ionic_rx_copybreak(netdev, q, desc_info,
649 headroom, len,
650 comp->num_sg_elems, synced);
651 else
652 skb = ionic_rx_build_skb(q, desc_info, headroom, len,
653 comp->num_sg_elems, synced);
654
655 if (unlikely(!skb)) {
656 stats->dropped++;
657 return;
658 }
659
660 skb_record_rx_queue(skb, q->index);
661
662 if (likely(netdev->features & NETIF_F_RXHASH)) {
663 switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
664 case IONIC_PKT_TYPE_IPV4:
665 case IONIC_PKT_TYPE_IPV6:
666 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
667 PKT_HASH_TYPE_L3);
668 break;
669 case IONIC_PKT_TYPE_IPV4_TCP:
670 case IONIC_PKT_TYPE_IPV6_TCP:
671 case IONIC_PKT_TYPE_IPV4_UDP:
672 case IONIC_PKT_TYPE_IPV6_UDP:
673 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
674 PKT_HASH_TYPE_L4);
675 break;
676 }
677 }
678
679 if (likely(netdev->features & NETIF_F_RXCSUM) &&
680 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
681 skb->ip_summed = CHECKSUM_COMPLETE;
682 skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
683 stats->csum_complete++;
684 } else {
685 stats->csum_none++;
686 }
687
688 if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
689 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
690 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
691 stats->csum_error++;
692
693 if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
694 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
695 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
696 le16_to_cpu(comp->vlan_tci));
697 stats->vlan_stripped++;
698 }
699
700 if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) {
701 __le64 *cq_desc_hwstamp;
702 u64 hwstamp;
703
704 cq_desc_hwstamp =
705 (void *)comp +
706 qcq->cq.desc_size -
707 sizeof(struct ionic_rxq_comp) -
708 IONIC_HWSTAMP_CQ_NEGOFFSET;
709
710 hwstamp = le64_to_cpu(*cq_desc_hwstamp);
711
712 if (hwstamp != IONIC_HWSTAMP_INVALID) {
713 skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
714 stats->hwstamp_valid++;
715 } else {
716 stats->hwstamp_invalid++;
717 }
718 }
719
720 if (use_copybreak)
721 napi_gro_receive(&qcq->napi, skb);
722 else
723 napi_gro_frags(&qcq->napi);
724 }
725
__ionic_rx_service(struct ionic_cq * cq,struct bpf_prog * xdp_prog)726 static bool __ionic_rx_service(struct ionic_cq *cq, struct bpf_prog *xdp_prog)
727 {
728 struct ionic_rx_desc_info *desc_info;
729 struct ionic_queue *q = cq->bound_q;
730 struct ionic_rxq_comp *comp;
731
732 comp = &((struct ionic_rxq_comp *)cq->base)[cq->tail_idx];
733
734 if (!color_match(comp->pkt_type_color, cq->done_color))
735 return false;
736
737 /* check for empty queue */
738 if (q->tail_idx == q->head_idx)
739 return false;
740
741 if (q->tail_idx != le16_to_cpu(comp->comp_index))
742 return false;
743
744 desc_info = &q->rx_info[q->tail_idx];
745 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
746
747 /* clean the related q entry, only one per qc completion */
748 ionic_rx_clean(q, desc_info, comp, xdp_prog);
749
750 return true;
751 }
752
ionic_rx_service(struct ionic_cq * cq)753 bool ionic_rx_service(struct ionic_cq *cq)
754 {
755 return __ionic_rx_service(cq, NULL);
756 }
757
ionic_write_cmb_desc(struct ionic_queue * q,void * desc)758 static inline void ionic_write_cmb_desc(struct ionic_queue *q,
759 void *desc)
760 {
761 /* Since Rx and Tx descriptors are the same size, we can
762 * save an instruction or two and skip the qtype check.
763 */
764 if (unlikely(q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS))
765 memcpy_toio(&q->cmb_txq[q->head_idx], desc, sizeof(q->cmb_txq[0]));
766 }
767
ionic_rx_fill(struct ionic_queue * q,struct bpf_prog * xdp_prog)768 void ionic_rx_fill(struct ionic_queue *q, struct bpf_prog *xdp_prog)
769 {
770 struct net_device *netdev = q->lif->netdev;
771 struct ionic_rx_desc_info *desc_info;
772 struct ionic_rxq_sg_elem *sg_elem;
773 struct ionic_buf_info *buf_info;
774 unsigned int fill_threshold;
775 struct ionic_rxq_desc *desc;
776 unsigned int first_frag_len;
777 unsigned int first_buf_len;
778 unsigned int headroom = 0;
779 unsigned int remain_len;
780 unsigned int frag_len;
781 unsigned int nfrags;
782 unsigned int n_fill;
783 unsigned int len;
784 unsigned int i;
785 unsigned int j;
786
787 n_fill = ionic_q_space_avail(q);
788
789 fill_threshold = min_t(unsigned int, IONIC_RX_FILL_THRESHOLD,
790 q->num_descs / IONIC_RX_FILL_DIV);
791 if (n_fill < fill_threshold)
792 return;
793
794 len = netdev->mtu + VLAN_ETH_HLEN;
795
796 if (xdp_prog) {
797 /* Always alloc the full size buffer, but only need
798 * the actual frag_len in the descriptor
799 * XDP uses space in the first buffer, so account for
800 * head room, tail room, and ip header in the first frag size.
801 */
802 headroom = XDP_PACKET_HEADROOM;
803 first_buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN + headroom;
804 first_frag_len = min_t(u16, len + headroom, first_buf_len);
805 } else {
806 /* Use MTU size if smaller than max buffer size */
807 first_frag_len = min_t(u16, len, IONIC_PAGE_SIZE);
808 first_buf_len = first_frag_len;
809 }
810
811 for (i = n_fill; i; i--) {
812 /* fill main descriptor - buf[0] */
813 nfrags = 0;
814 remain_len = len;
815 desc = &q->rxq[q->head_idx];
816 desc_info = &q->rx_info[q->head_idx];
817 buf_info = &desc_info->bufs[0];
818
819 buf_info->len = first_buf_len;
820 frag_len = first_frag_len - headroom;
821
822 /* get a new buffer if we can't reuse one */
823 if (!buf_info->page)
824 buf_info->page = page_pool_alloc(q->page_pool,
825 &buf_info->page_offset,
826 &buf_info->len,
827 GFP_ATOMIC);
828 if (unlikely(!buf_info->page)) {
829 buf_info->len = 0;
830 return;
831 }
832
833 desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom);
834 desc->len = cpu_to_le16(frag_len);
835 remain_len -= frag_len;
836 buf_info++;
837 nfrags++;
838
839 /* fill sg descriptors - buf[1..n] */
840 sg_elem = q->rxq_sgl[q->head_idx].elems;
841 for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++, sg_elem++) {
842 frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE);
843
844 /* Recycle any leftover buffers that are too small to reuse */
845 if (unlikely(buf_info->page && buf_info->len < frag_len))
846 ionic_rx_put_buf_direct(q, buf_info);
847
848 /* Get new buffer if needed */
849 if (!buf_info->page) {
850 buf_info->len = frag_len;
851 buf_info->page = page_pool_alloc(q->page_pool,
852 &buf_info->page_offset,
853 &buf_info->len,
854 GFP_ATOMIC);
855 if (unlikely(!buf_info->page)) {
856 buf_info->len = 0;
857 return;
858 }
859 }
860
861 sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info));
862 sg_elem->len = cpu_to_le16(frag_len);
863 remain_len -= frag_len;
864 buf_info++;
865 nfrags++;
866 }
867
868 /* clear end sg element as a sentinel */
869 if (j < q->max_sg_elems)
870 memset(sg_elem, 0, sizeof(*sg_elem));
871
872 desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
873 IONIC_RXQ_DESC_OPCODE_SIMPLE;
874 desc_info->nbufs = nfrags;
875
876 ionic_write_cmb_desc(q, desc);
877
878 ionic_rxq_post(q, false);
879 }
880
881 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
882 q->dbval | q->head_idx);
883
884 q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
885 q->dbell_jiffies = jiffies;
886 }
887
ionic_rx_empty(struct ionic_queue * q)888 void ionic_rx_empty(struct ionic_queue *q)
889 {
890 struct ionic_rx_desc_info *desc_info;
891 unsigned int i, j;
892
893 for (i = 0; i < q->num_descs; i++) {
894 desc_info = &q->rx_info[i];
895 for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++)
896 ionic_rx_put_buf(q, &desc_info->bufs[j]);
897 desc_info->nbufs = 0;
898 }
899
900 q->head_idx = 0;
901 q->tail_idx = 0;
902 }
903
ionic_dim_update(struct ionic_qcq * qcq,int napi_mode)904 static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
905 {
906 struct dim_sample dim_sample;
907 struct ionic_lif *lif;
908 unsigned int qi;
909 u64 pkts, bytes;
910
911 if (!qcq->intr.dim_coal_hw)
912 return;
913
914 lif = qcq->q.lif;
915 qi = qcq->cq.bound_q->index;
916
917 switch (napi_mode) {
918 case IONIC_LIF_F_TX_DIM_INTR:
919 pkts = lif->txqstats[qi].pkts;
920 bytes = lif->txqstats[qi].bytes;
921 break;
922 case IONIC_LIF_F_RX_DIM_INTR:
923 pkts = lif->rxqstats[qi].pkts;
924 bytes = lif->rxqstats[qi].bytes;
925 break;
926 default:
927 pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
928 bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
929 break;
930 }
931
932 dim_update_sample(qcq->cq.bound_intr->rearm_count,
933 pkts, bytes, &dim_sample);
934
935 net_dim(&qcq->dim, &dim_sample);
936 }
937
ionic_tx_napi(struct napi_struct * napi,int budget)938 int ionic_tx_napi(struct napi_struct *napi, int budget)
939 {
940 struct ionic_qcq *qcq = napi_to_qcq(napi);
941 struct ionic_cq *cq = napi_to_cq(napi);
942 u32 work_done = 0;
943 u32 flags = 0;
944
945 work_done = ionic_tx_cq_service(cq, budget, !!budget);
946
947 if (unlikely(!budget))
948 return budget;
949
950 if (work_done < budget && napi_complete_done(napi, work_done)) {
951 ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
952 flags |= IONIC_INTR_CRED_UNMASK;
953 cq->bound_intr->rearm_count++;
954 }
955
956 if (work_done || flags) {
957 flags |= IONIC_INTR_CRED_RESET_COALESCE;
958 ionic_intr_credits(cq->idev->intr_ctrl,
959 cq->bound_intr->index,
960 work_done, flags);
961 }
962
963 if (!work_done && cq->bound_q->lif->doorbell_wa)
964 ionic_txq_poke_doorbell(&qcq->q);
965
966 return work_done;
967 }
968
ionic_xdp_do_flush(struct ionic_cq * cq)969 static void ionic_xdp_do_flush(struct ionic_cq *cq)
970 {
971 if (cq->bound_q->xdp_flush) {
972 xdp_do_flush();
973 cq->bound_q->xdp_flush = false;
974 }
975 }
976
ionic_rx_cq_service(struct ionic_cq * cq,unsigned int work_to_do)977 static unsigned int ionic_rx_cq_service(struct ionic_cq *cq,
978 unsigned int work_to_do)
979 {
980 struct ionic_queue *q = cq->bound_q;
981 unsigned int work_done = 0;
982 struct bpf_prog *xdp_prog;
983
984 if (work_to_do == 0)
985 return 0;
986
987 xdp_prog = READ_ONCE(q->xdp_prog);
988 while (__ionic_rx_service(cq, xdp_prog)) {
989 if (cq->tail_idx == cq->num_descs - 1)
990 cq->done_color = !cq->done_color;
991
992 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
993
994 if (++work_done >= work_to_do)
995 break;
996 }
997 ionic_rx_fill(q, xdp_prog);
998 ionic_xdp_do_flush(cq);
999
1000 return work_done;
1001 }
1002
ionic_rx_napi(struct napi_struct * napi,int budget)1003 int ionic_rx_napi(struct napi_struct *napi, int budget)
1004 {
1005 struct ionic_qcq *qcq = napi_to_qcq(napi);
1006 struct ionic_cq *cq = napi_to_cq(napi);
1007 u32 work_done = 0;
1008 u32 flags = 0;
1009
1010 if (unlikely(!budget))
1011 return budget;
1012
1013 work_done = ionic_rx_cq_service(cq, budget);
1014
1015 if (work_done < budget && napi_complete_done(napi, work_done)) {
1016 ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
1017 flags |= IONIC_INTR_CRED_UNMASK;
1018 cq->bound_intr->rearm_count++;
1019 }
1020
1021 if (work_done || flags) {
1022 flags |= IONIC_INTR_CRED_RESET_COALESCE;
1023 ionic_intr_credits(cq->idev->intr_ctrl,
1024 cq->bound_intr->index,
1025 work_done, flags);
1026 }
1027
1028 if (!work_done && cq->bound_q->lif->doorbell_wa)
1029 ionic_rxq_poke_doorbell(&qcq->q);
1030
1031 return work_done;
1032 }
1033
ionic_txrx_napi(struct napi_struct * napi,int budget)1034 int ionic_txrx_napi(struct napi_struct *napi, int budget)
1035 {
1036 struct ionic_qcq *rxqcq = napi_to_qcq(napi);
1037 struct ionic_cq *rxcq = napi_to_cq(napi);
1038 unsigned int qi = rxcq->bound_q->index;
1039 struct ionic_qcq *txqcq;
1040 struct ionic_lif *lif;
1041 struct ionic_cq *txcq;
1042 u32 rx_work_done = 0;
1043 u32 tx_work_done = 0;
1044 u32 flags = 0;
1045
1046 lif = rxcq->bound_q->lif;
1047 txqcq = lif->txqcqs[qi];
1048 txcq = &lif->txqcqs[qi]->cq;
1049
1050 tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, !!budget);
1051
1052 if (unlikely(!budget))
1053 return budget;
1054
1055 rx_work_done = ionic_rx_cq_service(rxcq, budget);
1056
1057 if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
1058 ionic_dim_update(rxqcq, 0);
1059 flags |= IONIC_INTR_CRED_UNMASK;
1060 rxcq->bound_intr->rearm_count++;
1061 }
1062
1063 if (rx_work_done || flags) {
1064 flags |= IONIC_INTR_CRED_RESET_COALESCE;
1065 ionic_intr_credits(rxcq->idev->intr_ctrl, rxcq->bound_intr->index,
1066 tx_work_done + rx_work_done, flags);
1067 }
1068
1069 if (lif->doorbell_wa) {
1070 if (!rx_work_done)
1071 ionic_rxq_poke_doorbell(&rxqcq->q);
1072 if (!tx_work_done)
1073 ionic_txq_poke_doorbell(&txqcq->q);
1074 }
1075
1076 return rx_work_done;
1077 }
1078
ionic_tx_map_single(struct ionic_queue * q,void * data,size_t len)1079 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
1080 void *data, size_t len)
1081 {
1082 struct device *dev = q->dev;
1083 dma_addr_t dma_addr;
1084
1085 dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
1086 if (unlikely(dma_mapping_error(dev, dma_addr))) {
1087 net_warn_ratelimited("%s: DMA single map failed on %s!\n",
1088 dev_name(dev), q->name);
1089 q_to_tx_stats(q)->dma_map_err++;
1090 return DMA_MAPPING_ERROR;
1091 }
1092 return dma_addr;
1093 }
1094
ionic_tx_map_frag(struct ionic_queue * q,const skb_frag_t * frag,size_t offset,size_t len)1095 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
1096 const skb_frag_t *frag,
1097 size_t offset, size_t len)
1098 {
1099 struct device *dev = q->dev;
1100 dma_addr_t dma_addr;
1101
1102 dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
1103 if (unlikely(dma_mapping_error(dev, dma_addr))) {
1104 net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
1105 dev_name(dev), q->name);
1106 q_to_tx_stats(q)->dma_map_err++;
1107 return DMA_MAPPING_ERROR;
1108 }
1109 return dma_addr;
1110 }
1111
ionic_tx_map_skb(struct ionic_queue * q,struct sk_buff * skb,struct ionic_tx_desc_info * desc_info)1112 static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
1113 struct ionic_tx_desc_info *desc_info)
1114 {
1115 struct ionic_buf_info *buf_info = desc_info->bufs;
1116 struct device *dev = q->dev;
1117 dma_addr_t dma_addr;
1118 unsigned int nfrags;
1119 skb_frag_t *frag;
1120 int frag_idx;
1121
1122 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
1123 if (dma_addr == DMA_MAPPING_ERROR)
1124 return -EIO;
1125 buf_info->dma_addr = dma_addr;
1126 buf_info->len = skb_headlen(skb);
1127 buf_info++;
1128
1129 frag = skb_shinfo(skb)->frags;
1130 nfrags = skb_shinfo(skb)->nr_frags;
1131 for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
1132 dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
1133 if (dma_addr == DMA_MAPPING_ERROR)
1134 goto dma_fail;
1135 buf_info->dma_addr = dma_addr;
1136 buf_info->len = skb_frag_size(frag);
1137 buf_info++;
1138 }
1139
1140 desc_info->nbufs = 1 + nfrags;
1141
1142 return 0;
1143
1144 dma_fail:
1145 /* unwind the frag mappings and the head mapping */
1146 while (frag_idx > 0) {
1147 frag_idx--;
1148 buf_info--;
1149 dma_unmap_page(dev, buf_info->dma_addr,
1150 buf_info->len, DMA_TO_DEVICE);
1151 }
1152 dma_unmap_single(dev, desc_info->bufs[0].dma_addr,
1153 desc_info->bufs[0].len, DMA_TO_DEVICE);
1154 return -EIO;
1155 }
1156
ionic_tx_desc_unmap_bufs(struct ionic_queue * q,struct ionic_tx_desc_info * desc_info)1157 static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
1158 struct ionic_tx_desc_info *desc_info)
1159 {
1160 struct ionic_buf_info *buf_info = desc_info->bufs;
1161 struct device *dev = q->dev;
1162 unsigned int i;
1163
1164 if (!desc_info->nbufs)
1165 return;
1166
1167 dma_unmap_single(dev, buf_info->dma_addr,
1168 buf_info->len, DMA_TO_DEVICE);
1169 buf_info++;
1170 for (i = 1; i < desc_info->nbufs; i++, buf_info++)
1171 dma_unmap_page(dev, buf_info->dma_addr,
1172 buf_info->len, DMA_TO_DEVICE);
1173
1174 desc_info->nbufs = 0;
1175 }
1176
ionic_tx_clean(struct ionic_queue * q,struct ionic_tx_desc_info * desc_info,struct ionic_txq_comp * comp,bool in_napi)1177 static void ionic_tx_clean(struct ionic_queue *q,
1178 struct ionic_tx_desc_info *desc_info,
1179 struct ionic_txq_comp *comp,
1180 bool in_napi)
1181 {
1182 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1183 struct ionic_qcq *qcq = q_to_qcq(q);
1184 struct sk_buff *skb;
1185
1186 if (desc_info->xdpf) {
1187 ionic_xdp_tx_desc_clean(q->partner, desc_info, in_napi);
1188 stats->clean++;
1189
1190 if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index)))
1191 netif_wake_subqueue(q->lif->netdev, q->index);
1192
1193 return;
1194 }
1195
1196 ionic_tx_desc_unmap_bufs(q, desc_info);
1197
1198 skb = desc_info->skb;
1199 if (!skb)
1200 return;
1201
1202 if (unlikely(ionic_txq_hwstamp_enabled(q))) {
1203 if (comp) {
1204 struct skb_shared_hwtstamps hwts = {};
1205 __le64 *cq_desc_hwstamp;
1206 u64 hwstamp;
1207
1208 cq_desc_hwstamp =
1209 (void *)comp +
1210 qcq->cq.desc_size -
1211 sizeof(struct ionic_txq_comp) -
1212 IONIC_HWSTAMP_CQ_NEGOFFSET;
1213
1214 hwstamp = le64_to_cpu(*cq_desc_hwstamp);
1215
1216 if (hwstamp != IONIC_HWSTAMP_INVALID) {
1217 hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
1218
1219 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1220 skb_tstamp_tx(skb, &hwts);
1221
1222 stats->hwstamp_valid++;
1223 } else {
1224 stats->hwstamp_invalid++;
1225 }
1226 }
1227 }
1228
1229 desc_info->bytes = skb->len;
1230 stats->clean++;
1231
1232 napi_consume_skb(skb, likely(in_napi) ? 1 : 0);
1233 }
1234
ionic_tx_service(struct ionic_cq * cq,unsigned int * total_pkts,unsigned int * total_bytes,bool in_napi)1235 static bool ionic_tx_service(struct ionic_cq *cq,
1236 unsigned int *total_pkts,
1237 unsigned int *total_bytes,
1238 bool in_napi)
1239 {
1240 struct ionic_tx_desc_info *desc_info;
1241 struct ionic_queue *q = cq->bound_q;
1242 struct ionic_txq_comp *comp;
1243 unsigned int bytes = 0;
1244 unsigned int pkts = 0;
1245 u16 index;
1246
1247 comp = &((struct ionic_txq_comp *)cq->base)[cq->tail_idx];
1248
1249 if (!color_match(comp->color, cq->done_color))
1250 return false;
1251
1252 /* clean the related q entries, there could be
1253 * several q entries completed for each cq completion
1254 */
1255 do {
1256 desc_info = &q->tx_info[q->tail_idx];
1257 desc_info->bytes = 0;
1258 index = q->tail_idx;
1259 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
1260 ionic_tx_clean(q, desc_info, comp, in_napi);
1261 if (desc_info->skb) {
1262 pkts++;
1263 bytes += desc_info->bytes;
1264 desc_info->skb = NULL;
1265 }
1266 } while (index != le16_to_cpu(comp->comp_index));
1267
1268 (*total_pkts) += pkts;
1269 (*total_bytes) += bytes;
1270
1271 return true;
1272 }
1273
ionic_tx_cq_service(struct ionic_cq * cq,unsigned int work_to_do,bool in_napi)1274 unsigned int ionic_tx_cq_service(struct ionic_cq *cq,
1275 unsigned int work_to_do,
1276 bool in_napi)
1277 {
1278 unsigned int work_done = 0;
1279 unsigned int bytes = 0;
1280 unsigned int pkts = 0;
1281
1282 if (work_to_do == 0)
1283 return 0;
1284
1285 while (ionic_tx_service(cq, &pkts, &bytes, in_napi)) {
1286 if (cq->tail_idx == cq->num_descs - 1)
1287 cq->done_color = !cq->done_color;
1288 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
1289
1290 if (++work_done >= work_to_do)
1291 break;
1292 }
1293
1294 if (work_done) {
1295 struct ionic_queue *q = cq->bound_q;
1296
1297 if (likely(!ionic_txq_hwstamp_enabled(q)))
1298 netif_txq_completed_wake(q_to_ndq(q->lif->netdev, q),
1299 pkts, bytes,
1300 ionic_q_space_avail(q),
1301 IONIC_TSO_DESCS_NEEDED);
1302 }
1303
1304 return work_done;
1305 }
1306
ionic_tx_flush(struct ionic_cq * cq)1307 void ionic_tx_flush(struct ionic_cq *cq)
1308 {
1309 u32 work_done;
1310
1311 work_done = ionic_tx_cq_service(cq, cq->num_descs, false);
1312 if (work_done)
1313 ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index,
1314 work_done, IONIC_INTR_CRED_RESET_COALESCE);
1315 }
1316
ionic_tx_empty(struct ionic_queue * q)1317 void ionic_tx_empty(struct ionic_queue *q)
1318 {
1319 struct ionic_tx_desc_info *desc_info;
1320 int bytes = 0;
1321 int pkts = 0;
1322
1323 /* walk the not completed tx entries, if any */
1324 while (q->head_idx != q->tail_idx) {
1325 desc_info = &q->tx_info[q->tail_idx];
1326 desc_info->bytes = 0;
1327 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
1328 ionic_tx_clean(q, desc_info, NULL, false);
1329 if (desc_info->skb) {
1330 pkts++;
1331 bytes += desc_info->bytes;
1332 desc_info->skb = NULL;
1333 }
1334 }
1335
1336 if (likely(!ionic_txq_hwstamp_enabled(q))) {
1337 struct netdev_queue *ndq = q_to_ndq(q->lif->netdev, q);
1338
1339 netdev_tx_completed_queue(ndq, pkts, bytes);
1340 netdev_tx_reset_queue(ndq);
1341 }
1342 }
1343
ionic_tx_tcp_inner_pseudo_csum(struct sk_buff * skb)1344 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
1345 {
1346 int err;
1347
1348 err = skb_cow_head(skb, 0);
1349 if (unlikely(err))
1350 return err;
1351
1352 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
1353 inner_ip_hdr(skb)->check = 0;
1354 inner_tcp_hdr(skb)->check =
1355 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
1356 inner_ip_hdr(skb)->daddr,
1357 0, IPPROTO_TCP, 0);
1358 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
1359 inner_tcp_hdr(skb)->check =
1360 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
1361 &inner_ipv6_hdr(skb)->daddr,
1362 0, IPPROTO_TCP, 0);
1363 }
1364
1365 return 0;
1366 }
1367
ionic_tx_tcp_pseudo_csum(struct sk_buff * skb)1368 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
1369 {
1370 int err;
1371
1372 err = skb_cow_head(skb, 0);
1373 if (unlikely(err))
1374 return err;
1375
1376 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
1377 ip_hdr(skb)->check = 0;
1378 tcp_hdr(skb)->check =
1379 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1380 ip_hdr(skb)->daddr,
1381 0, IPPROTO_TCP, 0);
1382 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
1383 tcp_v6_gso_csum_prep(skb);
1384 }
1385
1386 return 0;
1387 }
1388
ionic_tx_tso_post(struct net_device * netdev,struct ionic_queue * q,struct ionic_txq_desc * desc,struct sk_buff * skb,dma_addr_t addr,u8 nsge,u16 len,unsigned int hdrlen,unsigned int mss,bool outer_csum,u16 vlan_tci,bool has_vlan,bool start,bool done)1389 static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
1390 struct ionic_txq_desc *desc,
1391 struct sk_buff *skb,
1392 dma_addr_t addr, u8 nsge, u16 len,
1393 unsigned int hdrlen, unsigned int mss,
1394 bool outer_csum,
1395 u16 vlan_tci, bool has_vlan,
1396 bool start, bool done)
1397 {
1398 u8 flags = 0;
1399 u64 cmd;
1400
1401 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1402 flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1403 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
1404 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
1405
1406 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
1407 desc->cmd = cpu_to_le64(cmd);
1408 desc->len = cpu_to_le16(len);
1409 desc->vlan_tci = cpu_to_le16(vlan_tci);
1410 desc->hdr_len = cpu_to_le16(hdrlen);
1411 desc->mss = cpu_to_le16(mss);
1412
1413 ionic_write_cmb_desc(q, desc);
1414
1415 if (start) {
1416 skb_tx_timestamp(skb);
1417 if (likely(!ionic_txq_hwstamp_enabled(q)))
1418 netdev_tx_sent_queue(q_to_ndq(netdev, q), skb->len);
1419 ionic_txq_post(q, false);
1420 } else {
1421 ionic_txq_post(q, done);
1422 }
1423 }
1424
ionic_tx_tso(struct net_device * netdev,struct ionic_queue * q,struct sk_buff * skb)1425 static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
1426 struct sk_buff *skb)
1427 {
1428 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1429 struct ionic_tx_desc_info *desc_info;
1430 struct ionic_buf_info *buf_info;
1431 struct ionic_txq_sg_elem *elem;
1432 struct ionic_txq_desc *desc;
1433 unsigned int chunk_len;
1434 unsigned int frag_rem;
1435 unsigned int tso_rem;
1436 unsigned int seg_rem;
1437 dma_addr_t desc_addr;
1438 dma_addr_t frag_addr;
1439 unsigned int hdrlen;
1440 unsigned int len;
1441 unsigned int mss;
1442 bool start, done;
1443 bool outer_csum;
1444 bool has_vlan;
1445 u16 desc_len;
1446 u8 desc_nsge;
1447 u16 vlan_tci;
1448 bool encap;
1449 int err;
1450
1451 has_vlan = !!skb_vlan_tag_present(skb);
1452 vlan_tci = skb_vlan_tag_get(skb);
1453 encap = skb->encapsulation;
1454
1455 /* Preload inner-most TCP csum field with IP pseudo hdr
1456 * calculated with IP length set to zero. HW will later
1457 * add in length to each TCP segment resulting from the TSO.
1458 */
1459
1460 if (encap)
1461 err = ionic_tx_tcp_inner_pseudo_csum(skb);
1462 else
1463 err = ionic_tx_tcp_pseudo_csum(skb);
1464 if (unlikely(err))
1465 return err;
1466
1467 desc_info = &q->tx_info[q->head_idx];
1468 if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1469 return -EIO;
1470
1471 len = skb->len;
1472 mss = skb_shinfo(skb)->gso_size;
1473 outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1474 SKB_GSO_GRE_CSUM |
1475 SKB_GSO_IPXIP4 |
1476 SKB_GSO_IPXIP6 |
1477 SKB_GSO_UDP_TUNNEL |
1478 SKB_GSO_UDP_TUNNEL_CSUM));
1479 if (encap)
1480 hdrlen = skb_inner_tcp_all_headers(skb);
1481 else
1482 hdrlen = skb_tcp_all_headers(skb);
1483
1484 desc_info->skb = skb;
1485 buf_info = desc_info->bufs;
1486 tso_rem = len;
1487 seg_rem = min(tso_rem, hdrlen + mss);
1488
1489 frag_addr = 0;
1490 frag_rem = 0;
1491
1492 start = true;
1493
1494 while (tso_rem > 0) {
1495 desc = NULL;
1496 elem = NULL;
1497 desc_addr = 0;
1498 desc_len = 0;
1499 desc_nsge = 0;
1500 /* use fragments until we have enough to post a single descriptor */
1501 while (seg_rem > 0) {
1502 /* if the fragment is exhausted then move to the next one */
1503 if (frag_rem == 0) {
1504 /* grab the next fragment */
1505 frag_addr = buf_info->dma_addr;
1506 frag_rem = buf_info->len;
1507 buf_info++;
1508 }
1509 chunk_len = min(frag_rem, seg_rem);
1510 if (!desc) {
1511 /* fill main descriptor */
1512 desc = &q->txq[q->head_idx];
1513 elem = ionic_tx_sg_elems(q);
1514 desc_addr = frag_addr;
1515 desc_len = chunk_len;
1516 } else {
1517 /* fill sg descriptor */
1518 elem->addr = cpu_to_le64(frag_addr);
1519 elem->len = cpu_to_le16(chunk_len);
1520 elem++;
1521 desc_nsge++;
1522 }
1523 frag_addr += chunk_len;
1524 frag_rem -= chunk_len;
1525 tso_rem -= chunk_len;
1526 seg_rem -= chunk_len;
1527 }
1528 seg_rem = min(tso_rem, mss);
1529 done = (tso_rem == 0);
1530 /* post descriptor */
1531 ionic_tx_tso_post(netdev, q, desc, skb, desc_addr, desc_nsge,
1532 desc_len, hdrlen, mss, outer_csum, vlan_tci,
1533 has_vlan, start, done);
1534 start = false;
1535 /* Buffer information is stored with the first tso descriptor */
1536 desc_info = &q->tx_info[q->head_idx];
1537 desc_info->nbufs = 0;
1538 }
1539
1540 stats->pkts += DIV_ROUND_UP(len - hdrlen, mss);
1541 stats->bytes += len;
1542 stats->tso++;
1543 stats->tso_bytes = len;
1544
1545 return 0;
1546 }
1547
ionic_tx_calc_csum(struct ionic_queue * q,struct sk_buff * skb,struct ionic_tx_desc_info * desc_info)1548 static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
1549 struct ionic_tx_desc_info *desc_info)
1550 {
1551 struct ionic_txq_desc *desc = &q->txq[q->head_idx];
1552 struct ionic_buf_info *buf_info = desc_info->bufs;
1553 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1554 bool has_vlan;
1555 u8 flags = 0;
1556 bool encap;
1557 u64 cmd;
1558
1559 has_vlan = !!skb_vlan_tag_present(skb);
1560 encap = skb->encapsulation;
1561
1562 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1563 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1564
1565 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
1566 flags, skb_shinfo(skb)->nr_frags,
1567 buf_info->dma_addr);
1568 desc->cmd = cpu_to_le64(cmd);
1569 desc->len = cpu_to_le16(buf_info->len);
1570 if (has_vlan) {
1571 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1572 stats->vlan_inserted++;
1573 } else {
1574 desc->vlan_tci = 0;
1575 }
1576 desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
1577 desc->csum_offset = cpu_to_le16(skb->csum_offset);
1578
1579 ionic_write_cmb_desc(q, desc);
1580
1581 if (skb_csum_is_sctp(skb))
1582 stats->crc32_csum++;
1583 else
1584 stats->csum++;
1585 }
1586
ionic_tx_calc_no_csum(struct ionic_queue * q,struct sk_buff * skb,struct ionic_tx_desc_info * desc_info)1587 static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
1588 struct ionic_tx_desc_info *desc_info)
1589 {
1590 struct ionic_txq_desc *desc = &q->txq[q->head_idx];
1591 struct ionic_buf_info *buf_info = desc_info->bufs;
1592 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1593 bool has_vlan;
1594 u8 flags = 0;
1595 bool encap;
1596 u64 cmd;
1597
1598 has_vlan = !!skb_vlan_tag_present(skb);
1599 encap = skb->encapsulation;
1600
1601 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1602 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1603
1604 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
1605 flags, skb_shinfo(skb)->nr_frags,
1606 buf_info->dma_addr);
1607 desc->cmd = cpu_to_le64(cmd);
1608 desc->len = cpu_to_le16(buf_info->len);
1609 if (has_vlan) {
1610 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1611 stats->vlan_inserted++;
1612 } else {
1613 desc->vlan_tci = 0;
1614 }
1615 desc->csum_start = 0;
1616 desc->csum_offset = 0;
1617
1618 ionic_write_cmb_desc(q, desc);
1619
1620 stats->csum_none++;
1621 }
1622
ionic_tx_skb_frags(struct ionic_queue * q,struct sk_buff * skb,struct ionic_tx_desc_info * desc_info)1623 static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
1624 struct ionic_tx_desc_info *desc_info)
1625 {
1626 struct ionic_buf_info *buf_info = &desc_info->bufs[1];
1627 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1628 struct ionic_txq_sg_elem *elem;
1629 unsigned int i;
1630
1631 elem = ionic_tx_sg_elems(q);
1632 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
1633 elem->addr = cpu_to_le64(buf_info->dma_addr);
1634 elem->len = cpu_to_le16(buf_info->len);
1635 }
1636
1637 stats->frags += skb_shinfo(skb)->nr_frags;
1638 }
1639
ionic_tx(struct net_device * netdev,struct ionic_queue * q,struct sk_buff * skb)1640 static int ionic_tx(struct net_device *netdev, struct ionic_queue *q,
1641 struct sk_buff *skb)
1642 {
1643 struct ionic_tx_desc_info *desc_info = &q->tx_info[q->head_idx];
1644 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1645 bool ring_dbell = true;
1646
1647 if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1648 return -EIO;
1649
1650 desc_info->skb = skb;
1651
1652 /* set up the initial descriptor */
1653 if (skb->ip_summed == CHECKSUM_PARTIAL)
1654 ionic_tx_calc_csum(q, skb, desc_info);
1655 else
1656 ionic_tx_calc_no_csum(q, skb, desc_info);
1657
1658 /* add frags */
1659 ionic_tx_skb_frags(q, skb, desc_info);
1660
1661 skb_tx_timestamp(skb);
1662 stats->pkts++;
1663 stats->bytes += skb->len;
1664
1665 if (likely(!ionic_txq_hwstamp_enabled(q))) {
1666 struct netdev_queue *ndq = q_to_ndq(netdev, q);
1667
1668 if (unlikely(!ionic_q_has_space(q, MAX_SKB_FRAGS + 1)))
1669 netif_tx_stop_queue(ndq);
1670 ring_dbell = __netdev_tx_sent_queue(ndq, skb->len,
1671 netdev_xmit_more());
1672 }
1673 ionic_txq_post(q, ring_dbell);
1674
1675 return 0;
1676 }
1677
ionic_tx_descs_needed(struct ionic_queue * q,struct sk_buff * skb)1678 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1679 {
1680 int nr_frags = skb_shinfo(skb)->nr_frags;
1681 bool too_many_frags = false;
1682 skb_frag_t *frag;
1683 int desc_bufs;
1684 int chunk_len;
1685 int frag_rem;
1686 int tso_rem;
1687 int seg_rem;
1688 bool encap;
1689 int hdrlen;
1690 int ndescs;
1691 int err;
1692
1693 /* Each desc is mss long max, so a descriptor for each gso_seg */
1694 if (skb_is_gso(skb)) {
1695 ndescs = skb_shinfo(skb)->gso_segs;
1696 if (!nr_frags)
1697 return ndescs;
1698 } else {
1699 ndescs = 1;
1700 if (!nr_frags)
1701 return ndescs;
1702
1703 if (unlikely(nr_frags > q->max_sg_elems)) {
1704 too_many_frags = true;
1705 goto linearize;
1706 }
1707
1708 return ndescs;
1709 }
1710
1711 /* We need to scan the skb to be sure that none of the MTU sized
1712 * packets in the TSO will require more sgs per descriptor than we
1713 * can support. We loop through the frags, add up the lengths for
1714 * a packet, and count the number of sgs used per packet.
1715 */
1716 tso_rem = skb->len;
1717 frag = skb_shinfo(skb)->frags;
1718 encap = skb->encapsulation;
1719
1720 /* start with just hdr in first part of first descriptor */
1721 if (encap)
1722 hdrlen = skb_inner_tcp_all_headers(skb);
1723 else
1724 hdrlen = skb_tcp_all_headers(skb);
1725 seg_rem = min_t(int, tso_rem, hdrlen + skb_shinfo(skb)->gso_size);
1726 frag_rem = hdrlen;
1727
1728 while (tso_rem > 0) {
1729 desc_bufs = 0;
1730 while (seg_rem > 0) {
1731 desc_bufs++;
1732
1733 /* We add the +1 because we can take buffers for one
1734 * more than we have SGs: one for the initial desc data
1735 * in addition to the SG segments that might follow.
1736 */
1737 if (desc_bufs > q->max_sg_elems + 1) {
1738 too_many_frags = true;
1739 goto linearize;
1740 }
1741
1742 if (frag_rem == 0) {
1743 frag_rem = skb_frag_size(frag);
1744 frag++;
1745 }
1746 chunk_len = min(frag_rem, seg_rem);
1747 frag_rem -= chunk_len;
1748 tso_rem -= chunk_len;
1749 seg_rem -= chunk_len;
1750 }
1751
1752 seg_rem = min_t(int, tso_rem, skb_shinfo(skb)->gso_size);
1753 }
1754
1755 linearize:
1756 if (too_many_frags) {
1757 err = skb_linearize(skb);
1758 if (unlikely(err))
1759 return err;
1760 q_to_tx_stats(q)->linearize++;
1761 }
1762
1763 return ndescs;
1764 }
1765
ionic_start_hwstamp_xmit(struct sk_buff * skb,struct net_device * netdev)1766 static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
1767 struct net_device *netdev)
1768 {
1769 struct ionic_lif *lif = netdev_priv(netdev);
1770 struct ionic_queue *q;
1771 int err, ndescs;
1772
1773 /* Does not stop/start txq, because we post to a separate tx queue
1774 * for timestamping, and if a packet can't be posted immediately to
1775 * the timestamping queue, it is dropped.
1776 */
1777
1778 q = &lif->hwstamp_txq->q;
1779 ndescs = ionic_tx_descs_needed(q, skb);
1780 if (unlikely(ndescs < 0))
1781 goto err_out_drop;
1782
1783 if (unlikely(!ionic_q_has_space(q, ndescs)))
1784 goto err_out_drop;
1785
1786 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
1787 if (skb_is_gso(skb))
1788 err = ionic_tx_tso(netdev, q, skb);
1789 else
1790 err = ionic_tx(netdev, q, skb);
1791
1792 if (unlikely(err))
1793 goto err_out_drop;
1794
1795 return NETDEV_TX_OK;
1796
1797 err_out_drop:
1798 q->drop++;
1799 dev_kfree_skb(skb);
1800 return NETDEV_TX_OK;
1801 }
1802
ionic_start_xmit(struct sk_buff * skb,struct net_device * netdev)1803 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1804 {
1805 u16 queue_index = skb_get_queue_mapping(skb);
1806 struct ionic_lif *lif = netdev_priv(netdev);
1807 struct ionic_queue *q;
1808 int ndescs;
1809 int err;
1810
1811 if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1812 dev_kfree_skb(skb);
1813 return NETDEV_TX_OK;
1814 }
1815
1816 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1817 if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode)
1818 return ionic_start_hwstamp_xmit(skb, netdev);
1819
1820 if (unlikely(queue_index >= lif->nxqs))
1821 queue_index = 0;
1822 q = &lif->txqcqs[queue_index]->q;
1823
1824 ndescs = ionic_tx_descs_needed(q, skb);
1825 if (ndescs < 0)
1826 goto err_out_drop;
1827
1828 if (!netif_txq_maybe_stop(q_to_ndq(netdev, q),
1829 ionic_q_space_avail(q),
1830 ndescs, ndescs))
1831 return NETDEV_TX_BUSY;
1832
1833 if (skb_is_gso(skb))
1834 err = ionic_tx_tso(netdev, q, skb);
1835 else
1836 err = ionic_tx(netdev, q, skb);
1837
1838 if (unlikely(err))
1839 goto err_out_drop;
1840
1841 return NETDEV_TX_OK;
1842
1843 err_out_drop:
1844 q->drop++;
1845 dev_kfree_skb(skb);
1846 return NETDEV_TX_OK;
1847 }
1848