1 /*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
39 #include <linux/ip.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
44 #include <net/xfrm.h>
45 #include <net/ipv6.h>
46 #include <net/tcp.h>
47 #include <net/busy_poll.h>
48 #ifdef CONFIG_CHELSIO_T4_FCOE
49 #include <scsi/fc/fc_fcoe.h>
50 #endif /* CONFIG_CHELSIO_T4_FCOE */
51 #include "cxgb4.h"
52 #include "t4_regs.h"
53 #include "t4_values.h"
54 #include "t4_msg.h"
55 #include "t4fw_api.h"
56 #include "cxgb4_ptp.h"
57 #include "cxgb4_uld.h"
58 #include "cxgb4_tc_mqprio.h"
59 #include "sched.h"
60
61 /*
62 * Rx buffer size. We use largish buffers if possible but settle for single
63 * pages under memory shortage.
64 */
65 #if PAGE_SHIFT >= 16
66 # define FL_PG_ORDER 0
67 #else
68 # define FL_PG_ORDER (16 - PAGE_SHIFT)
69 #endif
70
71 /* RX_PULL_LEN should be <= RX_COPY_THRES */
72 #define RX_COPY_THRES 256
73 #define RX_PULL_LEN 128
74
75 /*
76 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
77 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
78 */
79 #define RX_PKT_SKB_LEN 512
80
81 /*
82 * Max number of Tx descriptors we clean up at a time. Should be modest as
83 * freeing skbs isn't cheap and it happens while holding locks. We just need
84 * to free packets faster than they arrive, we eventually catch up and keep
85 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. It should
86 * also match the CIDX Flush Threshold.
87 */
88 #define MAX_TX_RECLAIM 32
89
90 /*
91 * Max number of Rx buffers we replenish at a time. Again keep this modest,
92 * allocating buffers isn't cheap either.
93 */
94 #define MAX_RX_REFILL 16U
95
96 /*
97 * Period of the Rx queue check timer. This timer is infrequent as it has
98 * something to do only when the system experiences severe memory shortage.
99 */
100 #define RX_QCHECK_PERIOD (HZ / 2)
101
102 /*
103 * Period of the Tx queue check timer.
104 */
105 #define TX_QCHECK_PERIOD (HZ / 2)
106
107 /*
108 * Max number of Tx descriptors to be reclaimed by the Tx timer.
109 */
110 #define MAX_TIMER_TX_RECLAIM 100
111
112 /*
113 * Timer index used when backing off due to memory shortage.
114 */
115 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
116
117 /*
118 * Suspension threshold for non-Ethernet Tx queues. We require enough room
119 * for a full sized WR.
120 */
121 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
122
123 /*
124 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
125 * into a WR.
126 */
127 #define MAX_IMM_TX_PKT_LEN 256
128
129 /*
130 * Max size of a WR sent through a control Tx queue.
131 */
132 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
133
134 struct rx_sw_desc { /* SW state per Rx descriptor */
135 struct page *page;
136 dma_addr_t dma_addr;
137 };
138
139 /*
140 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
141 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
142 * We could easily support more but there doesn't seem to be much need for
143 * that ...
144 */
145 #define FL_MTU_SMALL 1500
146 #define FL_MTU_LARGE 9000
147
fl_mtu_bufsize(struct adapter * adapter,unsigned int mtu)148 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
149 unsigned int mtu)
150 {
151 struct sge *s = &adapter->sge;
152
153 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
154 }
155
156 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
157 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
158
159 /*
160 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
161 * these to specify the buffer size as an index into the SGE Free List Buffer
162 * Size register array. We also use bit 4, when the buffer has been unmapped
163 * for DMA, but this is of course never sent to the hardware and is only used
164 * to prevent double unmappings. All of the above requires that the Free List
165 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
166 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
167 * Free List Buffer alignment is 32 bytes, this works out for us ...
168 */
169 enum {
170 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
171 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
172 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
173
174 /*
175 * XXX We shouldn't depend on being able to use these indices.
176 * XXX Especially when some other Master PF has initialized the
177 * XXX adapter or we use the Firmware Configuration File. We
178 * XXX should really search through the Host Buffer Size register
179 * XXX array for the appropriately sized buffer indices.
180 */
181 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
182 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
183
184 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
185 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
186 };
187
188 static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
189 #define MIN_NAPI_WORK 1
190
get_buf_addr(const struct rx_sw_desc * d)191 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
192 {
193 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
194 }
195
is_buf_mapped(const struct rx_sw_desc * d)196 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
197 {
198 return !(d->dma_addr & RX_UNMAPPED_BUF);
199 }
200
201 /**
202 * txq_avail - return the number of available slots in a Tx queue
203 * @q: the Tx queue
204 *
205 * Returns the number of descriptors in a Tx queue available to write new
206 * packets.
207 */
txq_avail(const struct sge_txq * q)208 static inline unsigned int txq_avail(const struct sge_txq *q)
209 {
210 return q->size - 1 - q->in_use;
211 }
212
213 /**
214 * fl_cap - return the capacity of a free-buffer list
215 * @fl: the FL
216 *
217 * Returns the capacity of a free-buffer list. The capacity is less than
218 * the size because one descriptor needs to be left unpopulated, otherwise
219 * HW will think the FL is empty.
220 */
fl_cap(const struct sge_fl * fl)221 static inline unsigned int fl_cap(const struct sge_fl *fl)
222 {
223 return fl->size - 8; /* 1 descriptor = 8 buffers */
224 }
225
226 /**
227 * fl_starving - return whether a Free List is starving.
228 * @adapter: pointer to the adapter
229 * @fl: the Free List
230 *
231 * Tests specified Free List to see whether the number of buffers
232 * available to the hardware has falled below our "starvation"
233 * threshold.
234 */
fl_starving(const struct adapter * adapter,const struct sge_fl * fl)235 static inline bool fl_starving(const struct adapter *adapter,
236 const struct sge_fl *fl)
237 {
238 const struct sge *s = &adapter->sge;
239
240 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
241 }
242
cxgb4_map_skb(struct device * dev,const struct sk_buff * skb,dma_addr_t * addr)243 int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
244 dma_addr_t *addr)
245 {
246 const skb_frag_t *fp, *end;
247 const struct skb_shared_info *si;
248
249 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
250 if (dma_mapping_error(dev, *addr))
251 goto out_err;
252
253 si = skb_shinfo(skb);
254 end = &si->frags[si->nr_frags];
255
256 for (fp = si->frags; fp < end; fp++) {
257 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
258 DMA_TO_DEVICE);
259 if (dma_mapping_error(dev, *addr))
260 goto unwind;
261 }
262 return 0;
263
264 unwind:
265 while (fp-- > si->frags)
266 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
267
268 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
269 out_err:
270 return -ENOMEM;
271 }
272 EXPORT_SYMBOL(cxgb4_map_skb);
273
unmap_skb(struct device * dev,const struct sk_buff * skb,const dma_addr_t * addr)274 static void unmap_skb(struct device *dev, const struct sk_buff *skb,
275 const dma_addr_t *addr)
276 {
277 const skb_frag_t *fp, *end;
278 const struct skb_shared_info *si;
279
280 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
281
282 si = skb_shinfo(skb);
283 end = &si->frags[si->nr_frags];
284 for (fp = si->frags; fp < end; fp++)
285 dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
286 }
287
288 #ifdef CONFIG_NEED_DMA_MAP_STATE
289 /**
290 * deferred_unmap_destructor - unmap a packet when it is freed
291 * @skb: the packet
292 *
293 * This is the packet destructor used for Tx packets that need to remain
294 * mapped until they are freed rather than until their Tx descriptors are
295 * freed.
296 */
deferred_unmap_destructor(struct sk_buff * skb)297 static void deferred_unmap_destructor(struct sk_buff *skb)
298 {
299 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
300 }
301 #endif
302
303 /**
304 * free_tx_desc - reclaims Tx descriptors and their buffers
305 * @adap: the adapter
306 * @q: the Tx queue to reclaim descriptors from
307 * @n: the number of descriptors to reclaim
308 * @unmap: whether the buffers should be unmapped for DMA
309 *
310 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
311 * Tx buffers. Called with the Tx queue lock held.
312 */
free_tx_desc(struct adapter * adap,struct sge_txq * q,unsigned int n,bool unmap)313 void free_tx_desc(struct adapter *adap, struct sge_txq *q,
314 unsigned int n, bool unmap)
315 {
316 unsigned int cidx = q->cidx;
317 struct tx_sw_desc *d;
318
319 d = &q->sdesc[cidx];
320 while (n--) {
321 if (d->skb) { /* an SGL is present */
322 if (unmap && d->addr[0]) {
323 unmap_skb(adap->pdev_dev, d->skb, d->addr);
324 memset(d->addr, 0, sizeof(d->addr));
325 }
326 dev_consume_skb_any(d->skb);
327 d->skb = NULL;
328 }
329 ++d;
330 if (++cidx == q->size) {
331 cidx = 0;
332 d = q->sdesc;
333 }
334 }
335 q->cidx = cidx;
336 }
337
338 /*
339 * Return the number of reclaimable descriptors in a Tx queue.
340 */
reclaimable(const struct sge_txq * q)341 static inline int reclaimable(const struct sge_txq *q)
342 {
343 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
344 hw_cidx -= q->cidx;
345 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
346 }
347
348 /**
349 * reclaim_completed_tx - reclaims completed TX Descriptors
350 * @adap: the adapter
351 * @q: the Tx queue to reclaim completed descriptors from
352 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
353 * @unmap: whether the buffers should be unmapped for DMA
354 *
355 * Reclaims Tx Descriptors that the SGE has indicated it has processed,
356 * and frees the associated buffers if possible. If @max == -1, then
357 * we'll use a defaiult maximum. Called with the TX Queue locked.
358 */
reclaim_completed_tx(struct adapter * adap,struct sge_txq * q,int maxreclaim,bool unmap)359 static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
360 int maxreclaim, bool unmap)
361 {
362 int reclaim = reclaimable(q);
363
364 if (reclaim) {
365 /*
366 * Limit the amount of clean up work we do at a time to keep
367 * the Tx lock hold time O(1).
368 */
369 if (maxreclaim < 0)
370 maxreclaim = MAX_TX_RECLAIM;
371 if (reclaim > maxreclaim)
372 reclaim = maxreclaim;
373
374 free_tx_desc(adap, q, reclaim, unmap);
375 q->in_use -= reclaim;
376 }
377
378 return reclaim;
379 }
380
381 /**
382 * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
383 * @adap: the adapter
384 * @q: the Tx queue to reclaim completed descriptors from
385 * @unmap: whether the buffers should be unmapped for DMA
386 *
387 * Reclaims Tx descriptors that the SGE has indicated it has processed,
388 * and frees the associated buffers if possible. Called with the Tx
389 * queue locked.
390 */
cxgb4_reclaim_completed_tx(struct adapter * adap,struct sge_txq * q,bool unmap)391 void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
392 bool unmap)
393 {
394 (void)reclaim_completed_tx(adap, q, -1, unmap);
395 }
396 EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
397
get_buf_size(struct adapter * adapter,const struct rx_sw_desc * d)398 static inline int get_buf_size(struct adapter *adapter,
399 const struct rx_sw_desc *d)
400 {
401 struct sge *s = &adapter->sge;
402 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
403 int buf_size;
404
405 switch (rx_buf_size_idx) {
406 case RX_SMALL_PG_BUF:
407 buf_size = PAGE_SIZE;
408 break;
409
410 case RX_LARGE_PG_BUF:
411 buf_size = PAGE_SIZE << s->fl_pg_order;
412 break;
413
414 case RX_SMALL_MTU_BUF:
415 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
416 break;
417
418 case RX_LARGE_MTU_BUF:
419 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
420 break;
421
422 default:
423 BUG();
424 }
425
426 return buf_size;
427 }
428
429 /**
430 * free_rx_bufs - free the Rx buffers on an SGE free list
431 * @adap: the adapter
432 * @q: the SGE free list to free buffers from
433 * @n: how many buffers to free
434 *
435 * Release the next @n buffers on an SGE free-buffer Rx queue. The
436 * buffers must be made inaccessible to HW before calling this function.
437 */
free_rx_bufs(struct adapter * adap,struct sge_fl * q,int n)438 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
439 {
440 while (n--) {
441 struct rx_sw_desc *d = &q->sdesc[q->cidx];
442
443 if (is_buf_mapped(d))
444 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
445 get_buf_size(adap, d),
446 DMA_FROM_DEVICE);
447 put_page(d->page);
448 d->page = NULL;
449 if (++q->cidx == q->size)
450 q->cidx = 0;
451 q->avail--;
452 }
453 }
454
455 /**
456 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
457 * @adap: the adapter
458 * @q: the SGE free list
459 *
460 * Unmap the current buffer on an SGE free-buffer Rx queue. The
461 * buffer must be made inaccessible to HW before calling this function.
462 *
463 * This is similar to @free_rx_bufs above but does not free the buffer.
464 * Do note that the FL still loses any further access to the buffer.
465 */
unmap_rx_buf(struct adapter * adap,struct sge_fl * q)466 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
467 {
468 struct rx_sw_desc *d = &q->sdesc[q->cidx];
469
470 if (is_buf_mapped(d))
471 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
472 get_buf_size(adap, d), DMA_FROM_DEVICE);
473 d->page = NULL;
474 if (++q->cidx == q->size)
475 q->cidx = 0;
476 q->avail--;
477 }
478
ring_fl_db(struct adapter * adap,struct sge_fl * q)479 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
480 {
481 if (q->pend_cred >= 8) {
482 u32 val = adap->params.arch.sge_fl_db;
483
484 if (is_t4(adap->params.chip))
485 val |= PIDX_V(q->pend_cred / 8);
486 else
487 val |= PIDX_T5_V(q->pend_cred / 8);
488
489 /* Make sure all memory writes to the Free List queue are
490 * committed before we tell the hardware about them.
491 */
492 wmb();
493
494 /* If we don't have access to the new User Doorbell (T5+), use
495 * the old doorbell mechanism; otherwise use the new BAR2
496 * mechanism.
497 */
498 if (unlikely(q->bar2_addr == NULL)) {
499 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
500 val | QID_V(q->cntxt_id));
501 } else {
502 writel(val | QID_V(q->bar2_qid),
503 q->bar2_addr + SGE_UDB_KDOORBELL);
504
505 /* This Write memory Barrier will force the write to
506 * the User Doorbell area to be flushed.
507 */
508 wmb();
509 }
510 q->pend_cred &= 7;
511 }
512 }
513
set_rx_sw_desc(struct rx_sw_desc * sd,struct page * pg,dma_addr_t mapping)514 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
515 dma_addr_t mapping)
516 {
517 sd->page = pg;
518 sd->dma_addr = mapping; /* includes size low bits */
519 }
520
521 /**
522 * refill_fl - refill an SGE Rx buffer ring
523 * @adap: the adapter
524 * @q: the ring to refill
525 * @n: the number of new buffers to allocate
526 * @gfp: the gfp flags for the allocations
527 *
528 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
529 * allocated with the supplied gfp flags. The caller must assure that
530 * @n does not exceed the queue's capacity. If afterwards the queue is
531 * found critically low mark it as starving in the bitmap of starving FLs.
532 *
533 * Returns the number of buffers allocated.
534 */
refill_fl(struct adapter * adap,struct sge_fl * q,int n,gfp_t gfp)535 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
536 gfp_t gfp)
537 {
538 struct sge *s = &adap->sge;
539 struct page *pg;
540 dma_addr_t mapping;
541 unsigned int cred = q->avail;
542 __be64 *d = &q->desc[q->pidx];
543 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
544 int node;
545
546 #ifdef CONFIG_DEBUG_FS
547 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
548 goto out;
549 #endif
550
551 gfp |= __GFP_NOWARN;
552 node = dev_to_node(adap->pdev_dev);
553
554 if (s->fl_pg_order == 0)
555 goto alloc_small_pages;
556
557 /*
558 * Prefer large buffers
559 */
560 while (n) {
561 pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
562 if (unlikely(!pg)) {
563 q->large_alloc_failed++;
564 break; /* fall back to single pages */
565 }
566
567 mapping = dma_map_page(adap->pdev_dev, pg, 0,
568 PAGE_SIZE << s->fl_pg_order,
569 DMA_FROM_DEVICE);
570 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
571 __free_pages(pg, s->fl_pg_order);
572 q->mapping_err++;
573 goto out; /* do not try small pages for this error */
574 }
575 mapping |= RX_LARGE_PG_BUF;
576 *d++ = cpu_to_be64(mapping);
577
578 set_rx_sw_desc(sd, pg, mapping);
579 sd++;
580
581 q->avail++;
582 if (++q->pidx == q->size) {
583 q->pidx = 0;
584 sd = q->sdesc;
585 d = q->desc;
586 }
587 n--;
588 }
589
590 alloc_small_pages:
591 while (n--) {
592 pg = alloc_pages_node(node, gfp, 0);
593 if (unlikely(!pg)) {
594 q->alloc_failed++;
595 break;
596 }
597
598 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
599 DMA_FROM_DEVICE);
600 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
601 put_page(pg);
602 q->mapping_err++;
603 goto out;
604 }
605 *d++ = cpu_to_be64(mapping);
606
607 set_rx_sw_desc(sd, pg, mapping);
608 sd++;
609
610 q->avail++;
611 if (++q->pidx == q->size) {
612 q->pidx = 0;
613 sd = q->sdesc;
614 d = q->desc;
615 }
616 }
617
618 out: cred = q->avail - cred;
619 q->pend_cred += cred;
620 ring_fl_db(adap, q);
621
622 if (unlikely(fl_starving(adap, q))) {
623 smp_wmb();
624 q->low++;
625 set_bit(q->cntxt_id - adap->sge.egr_start,
626 adap->sge.starving_fl);
627 }
628
629 return cred;
630 }
631
__refill_fl(struct adapter * adap,struct sge_fl * fl)632 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
633 {
634 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
635 GFP_ATOMIC);
636 }
637
638 /**
639 * alloc_ring - allocate resources for an SGE descriptor ring
640 * @dev: the PCI device's core device
641 * @nelem: the number of descriptors
642 * @elem_size: the size of each descriptor
643 * @sw_size: the size of the SW state associated with each ring element
644 * @phys: the physical address of the allocated ring
645 * @metadata: address of the array holding the SW state for the ring
646 * @stat_size: extra space in HW ring for status information
647 * @node: preferred node for memory allocations
648 *
649 * Allocates resources for an SGE descriptor ring, such as Tx queues,
650 * free buffer lists, or response queues. Each SGE ring requires
651 * space for its HW descriptors plus, optionally, space for the SW state
652 * associated with each HW entry (the metadata). The function returns
653 * three values: the virtual address for the HW ring (the return value
654 * of the function), the bus address of the HW ring, and the address
655 * of the SW ring.
656 */
alloc_ring(struct device * dev,size_t nelem,size_t elem_size,size_t sw_size,dma_addr_t * phys,void * metadata,size_t stat_size,int node)657 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
658 size_t sw_size, dma_addr_t *phys, void *metadata,
659 size_t stat_size, int node)
660 {
661 size_t len = nelem * elem_size + stat_size;
662 void *s = NULL;
663 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
664
665 if (!p)
666 return NULL;
667 if (sw_size) {
668 s = kcalloc_node(sw_size, nelem, GFP_KERNEL, node);
669
670 if (!s) {
671 dma_free_coherent(dev, len, p, *phys);
672 return NULL;
673 }
674 }
675 if (metadata)
676 *(void **)metadata = s;
677 return p;
678 }
679
680 /**
681 * sgl_len - calculates the size of an SGL of the given capacity
682 * @n: the number of SGL entries
683 *
684 * Calculates the number of flits needed for a scatter/gather list that
685 * can hold the given number of entries.
686 */
sgl_len(unsigned int n)687 static inline unsigned int sgl_len(unsigned int n)
688 {
689 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
690 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
691 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
692 * repeated sequences of { Length[i], Length[i+1], Address[i],
693 * Address[i+1] } (this ensures that all addresses are on 64-bit
694 * boundaries). If N is even, then Length[N+1] should be set to 0 and
695 * Address[N+1] is omitted.
696 *
697 * The following calculation incorporates all of the above. It's
698 * somewhat hard to follow but, briefly: the "+2" accounts for the
699 * first two flits which include the DSGL header, Length0 and
700 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
701 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
702 * finally the "+((n-1)&1)" adds the one remaining flit needed if
703 * (n-1) is odd ...
704 */
705 n--;
706 return (3 * n) / 2 + (n & 1) + 2;
707 }
708
709 /**
710 * flits_to_desc - returns the num of Tx descriptors for the given flits
711 * @n: the number of flits
712 *
713 * Returns the number of Tx descriptors needed for the supplied number
714 * of flits.
715 */
flits_to_desc(unsigned int n)716 static inline unsigned int flits_to_desc(unsigned int n)
717 {
718 BUG_ON(n > SGE_MAX_WR_LEN / 8);
719 return DIV_ROUND_UP(n, 8);
720 }
721
722 /**
723 * is_eth_imm - can an Ethernet packet be sent as immediate data?
724 * @skb: the packet
725 * @chip_ver: chip version
726 *
727 * Returns whether an Ethernet packet is small enough to fit as
728 * immediate data. Return value corresponds to headroom required.
729 */
is_eth_imm(const struct sk_buff * skb,unsigned int chip_ver)730 static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
731 {
732 int hdrlen = 0;
733
734 if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
735 chip_ver > CHELSIO_T5) {
736 hdrlen = sizeof(struct cpl_tx_tnl_lso);
737 hdrlen += sizeof(struct cpl_tx_pkt_core);
738 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
739 return 0;
740 } else {
741 hdrlen = skb_shinfo(skb)->gso_size ?
742 sizeof(struct cpl_tx_pkt_lso_core) : 0;
743 hdrlen += sizeof(struct cpl_tx_pkt);
744 }
745 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
746 return hdrlen;
747 return 0;
748 }
749
750 /**
751 * calc_tx_flits - calculate the number of flits for a packet Tx WR
752 * @skb: the packet
753 * @chip_ver: chip version
754 *
755 * Returns the number of flits needed for a Tx WR for the given Ethernet
756 * packet, including the needed WR and CPL headers.
757 */
calc_tx_flits(const struct sk_buff * skb,unsigned int chip_ver)758 static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
759 unsigned int chip_ver)
760 {
761 unsigned int flits;
762 int hdrlen = is_eth_imm(skb, chip_ver);
763
764 /* If the skb is small enough, we can pump it out as a work request
765 * with only immediate data. In that case we just have to have the
766 * TX Packet header plus the skb data in the Work Request.
767 */
768
769 if (hdrlen)
770 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
771
772 /* Otherwise, we're going to have to construct a Scatter gather list
773 * of the skb body and fragments. We also include the flits necessary
774 * for the TX Packet Work Request and CPL. We always have a firmware
775 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
776 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
777 * message or, if we're doing a Large Send Offload, an LSO CPL message
778 * with an embedded TX Packet Write CPL message.
779 */
780 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
781 if (skb_shinfo(skb)->gso_size) {
782 if (skb->encapsulation && chip_ver > CHELSIO_T5) {
783 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
784 sizeof(struct cpl_tx_tnl_lso);
785 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
786 u32 pkt_hdrlen;
787
788 pkt_hdrlen = eth_get_headlen(skb->dev, skb->data,
789 skb_headlen(skb));
790 hdrlen = sizeof(struct fw_eth_tx_eo_wr) +
791 round_up(pkt_hdrlen, 16);
792 } else {
793 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
794 sizeof(struct cpl_tx_pkt_lso_core);
795 }
796
797 hdrlen += sizeof(struct cpl_tx_pkt_core);
798 flits += (hdrlen / sizeof(__be64));
799 } else {
800 flits += (sizeof(struct fw_eth_tx_pkt_wr) +
801 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
802 }
803 return flits;
804 }
805
806 /**
807 * cxgb4_write_sgl - populate a scatter/gather list for a packet
808 * @skb: the packet
809 * @q: the Tx queue we are writing into
810 * @sgl: starting location for writing the SGL
811 * @end: points right after the end of the SGL
812 * @start: start offset into skb main-body data to include in the SGL
813 * @addr: the list of bus addresses for the SGL elements
814 *
815 * Generates a gather list for the buffers that make up a packet.
816 * The caller must provide adequate space for the SGL that will be written.
817 * The SGL includes all of the packet's page fragments and the data in its
818 * main body except for the first @start bytes. @sgl must be 16-byte
819 * aligned and within a Tx descriptor with available space. @end points
820 * right after the end of the SGL but does not account for any potential
821 * wrap around, i.e., @end > @sgl.
822 */
cxgb4_write_sgl(const struct sk_buff * skb,struct sge_txq * q,struct ulptx_sgl * sgl,u64 * end,unsigned int start,const dma_addr_t * addr)823 void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
824 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
825 const dma_addr_t *addr)
826 {
827 unsigned int i, len;
828 struct ulptx_sge_pair *to;
829 const struct skb_shared_info *si = skb_shinfo(skb);
830 unsigned int nfrags = si->nr_frags;
831 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
832
833 len = skb_headlen(skb) - start;
834 if (likely(len)) {
835 sgl->len0 = htonl(len);
836 sgl->addr0 = cpu_to_be64(addr[0] + start);
837 nfrags++;
838 } else {
839 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
840 sgl->addr0 = cpu_to_be64(addr[1]);
841 }
842
843 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
844 ULPTX_NSGE_V(nfrags));
845 if (likely(--nfrags == 0))
846 return;
847 /*
848 * Most of the complexity below deals with the possibility we hit the
849 * end of the queue in the middle of writing the SGL. For this case
850 * only we create the SGL in a temporary buffer and then copy it.
851 */
852 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
853
854 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
855 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
856 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
857 to->addr[0] = cpu_to_be64(addr[i]);
858 to->addr[1] = cpu_to_be64(addr[++i]);
859 }
860 if (nfrags) {
861 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
862 to->len[1] = cpu_to_be32(0);
863 to->addr[0] = cpu_to_be64(addr[i + 1]);
864 }
865 if (unlikely((u8 *)end > (u8 *)q->stat)) {
866 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
867
868 if (likely(part0))
869 memcpy(sgl->sge, buf, part0);
870 part1 = (u8 *)end - (u8 *)q->stat;
871 memcpy(q->desc, (u8 *)buf + part0, part1);
872 end = (void *)q->desc + part1;
873 }
874 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
875 *end = 0;
876 }
877 EXPORT_SYMBOL(cxgb4_write_sgl);
878
879 /* cxgb4_write_partial_sgl - populate SGL for partial packet
880 * @skb: the packet
881 * @q: the Tx queue we are writing into
882 * @sgl: starting location for writing the SGL
883 * @end: points right after the end of the SGL
884 * @addr: the list of bus addresses for the SGL elements
885 * @start: start offset in the SKB where partial data starts
886 * @len: length of data from @start to send out
887 *
888 * This API will handle sending out partial data of a skb if required.
889 * Unlike cxgb4_write_sgl, @start can be any offset into the skb data,
890 * and @len will decide how much data after @start offset to send out.
891 */
cxgb4_write_partial_sgl(const struct sk_buff * skb,struct sge_txq * q,struct ulptx_sgl * sgl,u64 * end,const dma_addr_t * addr,u32 start,u32 len)892 void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
893 struct ulptx_sgl *sgl, u64 *end,
894 const dma_addr_t *addr, u32 start, u32 len)
895 {
896 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1] = {0}, *to;
897 u32 frag_size, skb_linear_data_len = skb_headlen(skb);
898 struct skb_shared_info *si = skb_shinfo(skb);
899 u8 i = 0, frag_idx = 0, nfrags = 0;
900 skb_frag_t *frag;
901
902 /* Fill the first SGL either from linear data or from partial
903 * frag based on @start.
904 */
905 if (unlikely(start < skb_linear_data_len)) {
906 frag_size = min(len, skb_linear_data_len - start);
907 sgl->len0 = htonl(frag_size);
908 sgl->addr0 = cpu_to_be64(addr[0] + start);
909 len -= frag_size;
910 nfrags++;
911 } else {
912 start -= skb_linear_data_len;
913 frag = &si->frags[frag_idx];
914 frag_size = skb_frag_size(frag);
915 /* find the first frag */
916 while (start >= frag_size) {
917 start -= frag_size;
918 frag_idx++;
919 frag = &si->frags[frag_idx];
920 frag_size = skb_frag_size(frag);
921 }
922
923 frag_size = min(len, skb_frag_size(frag) - start);
924 sgl->len0 = cpu_to_be32(frag_size);
925 sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start);
926 len -= frag_size;
927 nfrags++;
928 frag_idx++;
929 }
930
931 /* If the entire partial data fit in one SGL, then send it out
932 * now.
933 */
934 if (!len)
935 goto done;
936
937 /* Most of the complexity below deals with the possibility we hit the
938 * end of the queue in the middle of writing the SGL. For this case
939 * only we create the SGL in a temporary buffer and then copy it.
940 */
941 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
942
943 /* If the skb couldn't fit in first SGL completely, fill the
944 * rest of the frags in subsequent SGLs. Note that each SGL
945 * pair can store 2 frags.
946 */
947 while (len) {
948 frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
949 to->len[i & 1] = cpu_to_be32(frag_size);
950 to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]);
951 if (i && (i & 1))
952 to++;
953 nfrags++;
954 frag_idx++;
955 i++;
956 len -= frag_size;
957 }
958
959 /* If we ended in an odd boundary, then set the second SGL's
960 * length in the pair to 0.
961 */
962 if (i & 1)
963 to->len[1] = cpu_to_be32(0);
964
965 /* Copy from temporary buffer to Tx ring, in case we hit the
966 * end of the queue in the middle of writing the SGL.
967 */
968 if (unlikely((u8 *)end > (u8 *)q->stat)) {
969 u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
970
971 if (likely(part0))
972 memcpy(sgl->sge, buf, part0);
973 part1 = (u8 *)end - (u8 *)q->stat;
974 memcpy(q->desc, (u8 *)buf + part0, part1);
975 end = (void *)q->desc + part1;
976 }
977
978 /* 0-pad to multiple of 16 */
979 if ((uintptr_t)end & 8)
980 *end = 0;
981 done:
982 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
983 ULPTX_NSGE_V(nfrags));
984 }
985 EXPORT_SYMBOL(cxgb4_write_partial_sgl);
986
987 /* This function copies 64 byte coalesced work request to
988 * memory mapped BAR2 space. For coalesced WR SGE fetches
989 * data from the FIFO instead of from Host.
990 */
cxgb_pio_copy(u64 __iomem * dst,u64 * src)991 static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
992 {
993 int count = 8;
994
995 while (count) {
996 writeq(*src, dst);
997 src++;
998 dst++;
999 count--;
1000 }
1001 }
1002
1003 /**
1004 * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
1005 * @adap: the adapter
1006 * @q: the Tx queue
1007 * @n: number of new descriptors to give to HW
1008 *
1009 * Ring the doorbel for a Tx queue.
1010 */
cxgb4_ring_tx_db(struct adapter * adap,struct sge_txq * q,int n)1011 inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
1012 {
1013 /* Make sure that all writes to the TX Descriptors are committed
1014 * before we tell the hardware about them.
1015 */
1016 wmb();
1017
1018 /* If we don't have access to the new User Doorbell (T5+), use the old
1019 * doorbell mechanism; otherwise use the new BAR2 mechanism.
1020 */
1021 if (unlikely(q->bar2_addr == NULL)) {
1022 u32 val = PIDX_V(n);
1023 unsigned long flags;
1024
1025 /* For T4 we need to participate in the Doorbell Recovery
1026 * mechanism.
1027 */
1028 spin_lock_irqsave(&q->db_lock, flags);
1029 if (!q->db_disabled)
1030 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1031 QID_V(q->cntxt_id) | val);
1032 else
1033 q->db_pidx_inc += n;
1034 q->db_pidx = q->pidx;
1035 spin_unlock_irqrestore(&q->db_lock, flags);
1036 } else {
1037 u32 val = PIDX_T5_V(n);
1038
1039 /* T4 and later chips share the same PIDX field offset within
1040 * the doorbell, but T5 and later shrank the field in order to
1041 * gain a bit for Doorbell Priority. The field was absurdly
1042 * large in the first place (14 bits) so we just use the T5
1043 * and later limits and warn if a Queue ID is too large.
1044 */
1045 WARN_ON(val & DBPRIO_F);
1046
1047 /* If we're only writing a single TX Descriptor and we can use
1048 * Inferred QID registers, we can use the Write Combining
1049 * Gather Buffer; otherwise we use the simple doorbell.
1050 */
1051 if (n == 1 && q->bar2_qid == 0) {
1052 int index = (q->pidx
1053 ? (q->pidx - 1)
1054 : (q->size - 1));
1055 u64 *wr = (u64 *)&q->desc[index];
1056
1057 cxgb_pio_copy((u64 __iomem *)
1058 (q->bar2_addr + SGE_UDB_WCDOORBELL),
1059 wr);
1060 } else {
1061 writel(val | QID_V(q->bar2_qid),
1062 q->bar2_addr + SGE_UDB_KDOORBELL);
1063 }
1064
1065 /* This Write Memory Barrier will force the write to the User
1066 * Doorbell area to be flushed. This is needed to prevent
1067 * writes on different CPUs for the same queue from hitting
1068 * the adapter out of order. This is required when some Work
1069 * Requests take the Write Combine Gather Buffer path (user
1070 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
1071 * take the traditional path where we simply increment the
1072 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
1073 * hardware DMA read the actual Work Request.
1074 */
1075 wmb();
1076 }
1077 }
1078 EXPORT_SYMBOL(cxgb4_ring_tx_db);
1079
1080 /**
1081 * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
1082 * @skb: the packet
1083 * @q: the Tx queue where the packet will be inlined
1084 * @pos: starting position in the Tx queue where to inline the packet
1085 *
1086 * Inline a packet's contents directly into Tx descriptors, starting at
1087 * the given position within the Tx DMA ring.
1088 * Most of the complexity of this operation is dealing with wrap arounds
1089 * in the middle of the packet we want to inline.
1090 */
cxgb4_inline_tx_skb(const struct sk_buff * skb,const struct sge_txq * q,void * pos)1091 void cxgb4_inline_tx_skb(const struct sk_buff *skb,
1092 const struct sge_txq *q, void *pos)
1093 {
1094 int left = (void *)q->stat - pos;
1095 u64 *p;
1096
1097 if (likely(skb->len <= left)) {
1098 if (likely(!skb->data_len))
1099 skb_copy_from_linear_data(skb, pos, skb->len);
1100 else
1101 skb_copy_bits(skb, 0, pos, skb->len);
1102 pos += skb->len;
1103 } else {
1104 skb_copy_bits(skb, 0, pos, left);
1105 skb_copy_bits(skb, left, q->desc, skb->len - left);
1106 pos = (void *)q->desc + (skb->len - left);
1107 }
1108
1109 /* 0-pad to multiple of 16 */
1110 p = PTR_ALIGN(pos, 8);
1111 if ((uintptr_t)p & 8)
1112 *p = 0;
1113 }
1114 EXPORT_SYMBOL(cxgb4_inline_tx_skb);
1115
inline_tx_skb_header(const struct sk_buff * skb,const struct sge_txq * q,void * pos,int length)1116 static void *inline_tx_skb_header(const struct sk_buff *skb,
1117 const struct sge_txq *q, void *pos,
1118 int length)
1119 {
1120 u64 *p;
1121 int left = (void *)q->stat - pos;
1122
1123 if (likely(length <= left)) {
1124 memcpy(pos, skb->data, length);
1125 pos += length;
1126 } else {
1127 memcpy(pos, skb->data, left);
1128 memcpy(q->desc, skb->data + left, length - left);
1129 pos = (void *)q->desc + (length - left);
1130 }
1131 /* 0-pad to multiple of 16 */
1132 p = PTR_ALIGN(pos, 8);
1133 if ((uintptr_t)p & 8) {
1134 *p = 0;
1135 return p + 1;
1136 }
1137 return p;
1138 }
1139
1140 /*
1141 * Figure out what HW csum a packet wants and return the appropriate control
1142 * bits.
1143 */
hwcsum(enum chip_type chip,const struct sk_buff * skb)1144 static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1145 {
1146 int csum_type;
1147 bool inner_hdr_csum = false;
1148 u16 proto, ver;
1149
1150 if (skb->encapsulation &&
1151 (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5))
1152 inner_hdr_csum = true;
1153
1154 if (inner_hdr_csum) {
1155 ver = inner_ip_hdr(skb)->version;
1156 proto = (ver == 4) ? inner_ip_hdr(skb)->protocol :
1157 inner_ipv6_hdr(skb)->nexthdr;
1158 } else {
1159 ver = ip_hdr(skb)->version;
1160 proto = (ver == 4) ? ip_hdr(skb)->protocol :
1161 ipv6_hdr(skb)->nexthdr;
1162 }
1163
1164 if (ver == 4) {
1165 if (proto == IPPROTO_TCP)
1166 csum_type = TX_CSUM_TCPIP;
1167 else if (proto == IPPROTO_UDP)
1168 csum_type = TX_CSUM_UDPIP;
1169 else {
1170 nocsum: /*
1171 * unknown protocol, disable HW csum
1172 * and hope a bad packet is detected
1173 */
1174 return TXPKT_L4CSUM_DIS_F;
1175 }
1176 } else {
1177 /*
1178 * this doesn't work with extension headers
1179 */
1180 if (proto == IPPROTO_TCP)
1181 csum_type = TX_CSUM_TCPIP6;
1182 else if (proto == IPPROTO_UDP)
1183 csum_type = TX_CSUM_UDPIP6;
1184 else
1185 goto nocsum;
1186 }
1187
1188 if (likely(csum_type >= TX_CSUM_TCPIP)) {
1189 int eth_hdr_len, l4_len;
1190 u64 hdr_len;
1191
1192 if (inner_hdr_csum) {
1193 /* This allows checksum offload for all encapsulated
1194 * packets like GRE etc..
1195 */
1196 l4_len = skb_inner_network_header_len(skb);
1197 eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN;
1198 } else {
1199 l4_len = skb_network_header_len(skb);
1200 eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1201 }
1202 hdr_len = TXPKT_IPHDR_LEN_V(l4_len);
1203
1204 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1205 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1206 else
1207 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1208 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1209 } else {
1210 int start = skb_transport_offset(skb);
1211
1212 return TXPKT_CSUM_TYPE_V(csum_type) |
1213 TXPKT_CSUM_START_V(start) |
1214 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1215 }
1216 }
1217
eth_txq_stop(struct sge_eth_txq * q)1218 static void eth_txq_stop(struct sge_eth_txq *q)
1219 {
1220 netif_tx_stop_queue(q->txq);
1221 q->q.stops++;
1222 }
1223
txq_advance(struct sge_txq * q,unsigned int n)1224 static inline void txq_advance(struct sge_txq *q, unsigned int n)
1225 {
1226 q->in_use += n;
1227 q->pidx += n;
1228 if (q->pidx >= q->size)
1229 q->pidx -= q->size;
1230 }
1231
1232 #ifdef CONFIG_CHELSIO_T4_FCOE
1233 static inline int
cxgb_fcoe_offload(struct sk_buff * skb,struct adapter * adap,const struct port_info * pi,u64 * cntrl)1234 cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
1235 const struct port_info *pi, u64 *cntrl)
1236 {
1237 const struct cxgb_fcoe *fcoe = &pi->fcoe;
1238
1239 if (!(fcoe->flags & CXGB_FCOE_ENABLED))
1240 return 0;
1241
1242 if (skb->protocol != htons(ETH_P_FCOE))
1243 return 0;
1244
1245 skb_reset_mac_header(skb);
1246 skb->mac_len = sizeof(struct ethhdr);
1247
1248 skb_set_network_header(skb, skb->mac_len);
1249 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
1250
1251 if (!cxgb_fcoe_sof_eof_supported(adap, skb))
1252 return -ENOTSUPP;
1253
1254 /* FC CRC offload */
1255 *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
1256 TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
1257 TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
1258 TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
1259 TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
1260 return 0;
1261 }
1262 #endif /* CONFIG_CHELSIO_T4_FCOE */
1263
1264 /* Returns tunnel type if hardware supports offloading of the same.
1265 * It is called only for T5 and onwards.
1266 */
cxgb_encap_offload_supported(struct sk_buff * skb)1267 enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
1268 {
1269 u8 l4_hdr = 0;
1270 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
1271 struct port_info *pi = netdev_priv(skb->dev);
1272 struct adapter *adapter = pi->adapter;
1273
1274 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1275 skb->inner_protocol != htons(ETH_P_TEB))
1276 return tnl_type;
1277
1278 switch (vlan_get_protocol(skb)) {
1279 case htons(ETH_P_IP):
1280 l4_hdr = ip_hdr(skb)->protocol;
1281 break;
1282 case htons(ETH_P_IPV6):
1283 l4_hdr = ipv6_hdr(skb)->nexthdr;
1284 break;
1285 default:
1286 return tnl_type;
1287 }
1288
1289 switch (l4_hdr) {
1290 case IPPROTO_UDP:
1291 if (adapter->vxlan_port == udp_hdr(skb)->dest)
1292 tnl_type = TX_TNL_TYPE_VXLAN;
1293 else if (adapter->geneve_port == udp_hdr(skb)->dest)
1294 tnl_type = TX_TNL_TYPE_GENEVE;
1295 break;
1296 default:
1297 return tnl_type;
1298 }
1299
1300 return tnl_type;
1301 }
1302
t6_fill_tnl_lso(struct sk_buff * skb,struct cpl_tx_tnl_lso * tnl_lso,enum cpl_tx_tnl_lso_type tnl_type)1303 static inline void t6_fill_tnl_lso(struct sk_buff *skb,
1304 struct cpl_tx_tnl_lso *tnl_lso,
1305 enum cpl_tx_tnl_lso_type tnl_type)
1306 {
1307 u32 val;
1308 int in_eth_xtra_len;
1309 int l3hdr_len = skb_network_header_len(skb);
1310 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1311 const struct skb_shared_info *ssi = skb_shinfo(skb);
1312 bool v6 = (ip_hdr(skb)->version == 6);
1313
1314 val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
1315 CPL_TX_TNL_LSO_FIRST_F |
1316 CPL_TX_TNL_LSO_LAST_F |
1317 (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
1318 CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
1319 CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
1320 (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
1321 CPL_TX_TNL_LSO_IPLENSETOUT_F |
1322 (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
1323 tnl_lso->op_to_IpIdSplitOut = htonl(val);
1324
1325 tnl_lso->IpIdOffsetOut = 0;
1326
1327 /* Get the tunnel header length */
1328 val = skb_inner_mac_header(skb) - skb_mac_header(skb);
1329 in_eth_xtra_len = skb_inner_network_header(skb) -
1330 skb_inner_mac_header(skb) - ETH_HLEN;
1331
1332 switch (tnl_type) {
1333 case TX_TNL_TYPE_VXLAN:
1334 case TX_TNL_TYPE_GENEVE:
1335 tnl_lso->UdpLenSetOut_to_TnlHdrLen =
1336 htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
1337 CPL_TX_TNL_LSO_UDPLENSETOUT_F);
1338 break;
1339 default:
1340 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
1341 break;
1342 }
1343
1344 tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
1345 htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
1346 CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
1347
1348 tnl_lso->r1 = 0;
1349
1350 val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
1351 CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
1352 CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
1353 CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
1354 tnl_lso->Flow_to_TcpHdrLen = htonl(val);
1355
1356 tnl_lso->IpIdOffset = htons(0);
1357
1358 tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
1359 tnl_lso->TCPSeqOffset = htonl(0);
1360 tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
1361 }
1362
write_tso_wr(struct adapter * adap,struct sk_buff * skb,struct cpl_tx_pkt_lso_core * lso)1363 static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
1364 struct cpl_tx_pkt_lso_core *lso)
1365 {
1366 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1367 int l3hdr_len = skb_network_header_len(skb);
1368 const struct skb_shared_info *ssi;
1369 bool ipv6 = false;
1370
1371 ssi = skb_shinfo(skb);
1372 if (ssi->gso_type & SKB_GSO_TCPV6)
1373 ipv6 = true;
1374
1375 lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1376 LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
1377 LSO_IPV6_V(ipv6) |
1378 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1379 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1380 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1381 lso->ipid_ofst = htons(0);
1382 lso->mss = htons(ssi->gso_size);
1383 lso->seqno_offset = htonl(0);
1384 if (is_t4(adap->params.chip))
1385 lso->len = htonl(skb->len);
1386 else
1387 lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
1388
1389 return (void *)(lso + 1);
1390 }
1391
1392 /**
1393 * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
1394 * @adap: the adapter
1395 * @eq: the Ethernet TX Queue
1396 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
1397 *
1398 * We're typically called here to update the state of an Ethernet TX
1399 * Queue with respect to the hardware's progress in consuming the TX
1400 * Work Requests that we've put on that Egress Queue. This happens
1401 * when we get Egress Queue Update messages and also prophylactically
1402 * in regular timer-based Ethernet TX Queue maintenance.
1403 */
t4_sge_eth_txq_egress_update(struct adapter * adap,struct sge_eth_txq * eq,int maxreclaim)1404 int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
1405 int maxreclaim)
1406 {
1407 unsigned int reclaimed, hw_cidx;
1408 struct sge_txq *q = &eq->q;
1409 int hw_in_use;
1410
1411 if (!q->in_use || !__netif_tx_trylock(eq->txq))
1412 return 0;
1413
1414 /* Reclaim pending completed TX Descriptors. */
1415 reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
1416
1417 hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
1418 hw_in_use = q->pidx - hw_cidx;
1419 if (hw_in_use < 0)
1420 hw_in_use += q->size;
1421
1422 /* If the TX Queue is currently stopped and there's now more than half
1423 * the queue available, restart it. Otherwise bail out since the rest
1424 * of what we want do here is with the possibility of shipping any
1425 * currently buffered Coalesced TX Work Request.
1426 */
1427 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
1428 netif_tx_wake_queue(eq->txq);
1429 eq->q.restarts++;
1430 }
1431
1432 __netif_tx_unlock(eq->txq);
1433 return reclaimed;
1434 }
1435
cxgb4_validate_skb(struct sk_buff * skb,struct net_device * dev,u32 min_pkt_len)1436 static inline int cxgb4_validate_skb(struct sk_buff *skb,
1437 struct net_device *dev,
1438 u32 min_pkt_len)
1439 {
1440 u32 max_pkt_len;
1441
1442 /* The chip min packet length is 10 octets but some firmware
1443 * commands have a minimum packet length requirement. So, play
1444 * safe and reject anything shorter than @min_pkt_len.
1445 */
1446 if (unlikely(skb->len < min_pkt_len))
1447 return -EINVAL;
1448
1449 /* Discard the packet if the length is greater than mtu */
1450 max_pkt_len = ETH_HLEN + dev->mtu;
1451
1452 if (skb_vlan_tagged(skb))
1453 max_pkt_len += VLAN_HLEN;
1454
1455 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1456 return -EINVAL;
1457
1458 return 0;
1459 }
1460
write_eo_udp_wr(struct sk_buff * skb,struct fw_eth_tx_eo_wr * wr,u32 hdr_len)1461 static void *write_eo_udp_wr(struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
1462 u32 hdr_len)
1463 {
1464 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG;
1465 wr->u.udpseg.ethlen = skb_network_offset(skb);
1466 wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
1467 wr->u.udpseg.udplen = sizeof(struct udphdr);
1468 wr->u.udpseg.rtplen = 0;
1469 wr->u.udpseg.r4 = 0;
1470 if (skb_shinfo(skb)->gso_size)
1471 wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1472 else
1473 wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len);
1474 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss;
1475 wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len);
1476
1477 return (void *)(wr + 1);
1478 }
1479
1480 /**
1481 * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
1482 * @skb: the packet
1483 * @dev: the egress net device
1484 *
1485 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1486 */
cxgb4_eth_xmit(struct sk_buff * skb,struct net_device * dev)1487 static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1488 {
1489 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
1490 bool ptp_enabled = is_ptp_enabled(skb, dev);
1491 unsigned int last_desc, flits, ndesc;
1492 u32 wr_mid, ctrl0, op, sgl_off = 0;
1493 const struct skb_shared_info *ssi;
1494 int len, qidx, credits, ret, left;
1495 struct tx_sw_desc *sgl_sdesc;
1496 struct fw_eth_tx_eo_wr *eowr;
1497 struct fw_eth_tx_pkt_wr *wr;
1498 struct cpl_tx_pkt_core *cpl;
1499 const struct port_info *pi;
1500 bool immediate = false;
1501 u64 cntrl, *end, *sgl;
1502 struct sge_eth_txq *q;
1503 unsigned int chip_ver;
1504 struct adapter *adap;
1505
1506 ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
1507 if (ret)
1508 goto out_free;
1509
1510 pi = netdev_priv(dev);
1511 adap = pi->adapter;
1512 ssi = skb_shinfo(skb);
1513 #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
1514 if (xfrm_offload(skb) && !ssi->gso_size)
1515 return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev);
1516 #endif /* CHELSIO_IPSEC_INLINE */
1517
1518 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
1519 if (tls_is_skb_tx_device_offloaded(skb) &&
1520 (skb->len - skb_tcp_all_headers(skb)))
1521 return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
1522 #endif /* CHELSIO_TLS_DEVICE */
1523
1524 qidx = skb_get_queue_mapping(skb);
1525 if (ptp_enabled) {
1526 if (!(adap->ptp_tx_skb)) {
1527 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1528 adap->ptp_tx_skb = skb_get(skb);
1529 } else {
1530 goto out_free;
1531 }
1532 q = &adap->sge.ptptxq;
1533 } else {
1534 q = &adap->sge.ethtxq[qidx + pi->first_qset];
1535 }
1536
1537 reclaim_completed_tx(adap, &q->q, -1, true);
1538 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1539
1540 #ifdef CONFIG_CHELSIO_T4_FCOE
1541 ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
1542 if (unlikely(ret == -EOPNOTSUPP))
1543 goto out_free;
1544 #endif /* CONFIG_CHELSIO_T4_FCOE */
1545
1546 chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
1547 flits = calc_tx_flits(skb, chip_ver);
1548 ndesc = flits_to_desc(flits);
1549 credits = txq_avail(&q->q) - ndesc;
1550
1551 if (unlikely(credits < 0)) {
1552 eth_txq_stop(q);
1553 dev_err(adap->pdev_dev,
1554 "%s: Tx ring %u full while queue awake!\n",
1555 dev->name, qidx);
1556 return NETDEV_TX_BUSY;
1557 }
1558
1559 if (is_eth_imm(skb, chip_ver))
1560 immediate = true;
1561
1562 if (skb->encapsulation && chip_ver > CHELSIO_T5)
1563 tnl_type = cxgb_encap_offload_supported(skb);
1564
1565 last_desc = q->q.pidx + ndesc - 1;
1566 if (last_desc >= q->q.size)
1567 last_desc -= q->q.size;
1568 sgl_sdesc = &q->q.sdesc[last_desc];
1569
1570 if (!immediate &&
1571 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
1572 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1573 q->mapping_err++;
1574 goto out_free;
1575 }
1576
1577 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1578 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1579 /* After we're done injecting the Work Request for this
1580 * packet, we'll be below our "stop threshold" so stop the TX
1581 * Queue now and schedule a request for an SGE Egress Queue
1582 * Update message. The queue will get started later on when
1583 * the firmware processes this Work Request and sends us an
1584 * Egress Queue Status Update message indicating that space
1585 * has opened up.
1586 */
1587 eth_txq_stop(q);
1588 if (chip_ver > CHELSIO_T5)
1589 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1590 }
1591
1592 wr = (void *)&q->q.desc[q->q.pidx];
1593 eowr = (void *)&q->q.desc[q->q.pidx];
1594 wr->equiq_to_len16 = htonl(wr_mid);
1595 wr->r3 = cpu_to_be64(0);
1596 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
1597 end = (u64 *)eowr + flits;
1598 else
1599 end = (u64 *)wr + flits;
1600
1601 len = immediate ? skb->len : 0;
1602 len += sizeof(*cpl);
1603 if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) {
1604 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1605 struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
1606
1607 if (tnl_type)
1608 len += sizeof(*tnl_lso);
1609 else
1610 len += sizeof(*lso);
1611
1612 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1613 FW_WR_IMMDLEN_V(len));
1614 if (tnl_type) {
1615 struct iphdr *iph = ip_hdr(skb);
1616
1617 t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
1618 cpl = (void *)(tnl_lso + 1);
1619 /* Driver is expected to compute partial checksum that
1620 * does not include the IP Total Length.
1621 */
1622 if (iph->version == 4) {
1623 iph->check = 0;
1624 iph->tot_len = 0;
1625 iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl);
1626 }
1627 if (skb->ip_summed == CHECKSUM_PARTIAL)
1628 cntrl = hwcsum(adap->params.chip, skb);
1629 } else {
1630 cpl = write_tso_wr(adap, skb, lso);
1631 cntrl = hwcsum(adap->params.chip, skb);
1632 }
1633 sgl = (u64 *)(cpl + 1); /* sgl start here */
1634 q->tso++;
1635 q->tx_cso += ssi->gso_segs;
1636 } else if (ssi->gso_size) {
1637 u64 *start;
1638 u32 hdrlen;
1639
1640 hdrlen = eth_get_headlen(dev, skb->data, skb_headlen(skb));
1641 len += hdrlen;
1642 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
1643 FW_ETH_TX_EO_WR_IMMDLEN_V(len));
1644 cpl = write_eo_udp_wr(skb, eowr, hdrlen);
1645 cntrl = hwcsum(adap->params.chip, skb);
1646
1647 start = (u64 *)(cpl + 1);
1648 sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start,
1649 hdrlen);
1650 if (unlikely(start > sgl)) {
1651 left = (u8 *)end - (u8 *)q->q.stat;
1652 end = (void *)q->q.desc + left;
1653 }
1654 sgl_off = hdrlen;
1655 q->uso++;
1656 q->tx_cso += ssi->gso_segs;
1657 } else {
1658 if (ptp_enabled)
1659 op = FW_PTP_TX_PKT_WR;
1660 else
1661 op = FW_ETH_TX_PKT_WR;
1662 wr->op_immdlen = htonl(FW_WR_OP_V(op) |
1663 FW_WR_IMMDLEN_V(len));
1664 cpl = (void *)(wr + 1);
1665 sgl = (u64 *)(cpl + 1);
1666 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1667 cntrl = hwcsum(adap->params.chip, skb) |
1668 TXPKT_IPCSUM_DIS_F;
1669 q->tx_cso++;
1670 }
1671 }
1672
1673 if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
1674 /* If current position is already at the end of the
1675 * txq, reset the current to point to start of the queue
1676 * and update the end ptr as well.
1677 */
1678 left = (u8 *)end - (u8 *)q->q.stat;
1679 end = (void *)q->q.desc + left;
1680 sgl = (void *)q->q.desc;
1681 }
1682
1683 if (skb_vlan_tag_present(skb)) {
1684 q->vlan_ins++;
1685 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1686 #ifdef CONFIG_CHELSIO_T4_FCOE
1687 if (skb->protocol == htons(ETH_P_FCOE))
1688 cntrl |= TXPKT_VLAN_V(
1689 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
1690 #endif /* CONFIG_CHELSIO_T4_FCOE */
1691 }
1692
1693 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
1694 TXPKT_PF_V(adap->pf);
1695 if (ptp_enabled)
1696 ctrl0 |= TXPKT_TSTAMP_F;
1697 #ifdef CONFIG_CHELSIO_T4_DCB
1698 if (is_t4(adap->params.chip))
1699 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
1700 else
1701 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
1702 #endif
1703 cpl->ctrl0 = htonl(ctrl0);
1704 cpl->pack = htons(0);
1705 cpl->len = htons(skb->len);
1706 cpl->ctrl1 = cpu_to_be64(cntrl);
1707
1708 skb_tx_timestamp(skb);
1709
1710 if (immediate) {
1711 cxgb4_inline_tx_skb(skb, &q->q, sgl);
1712 dev_consume_skb_any(skb);
1713 } else {
1714 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off,
1715 sgl_sdesc->addr);
1716 skb_orphan(skb);
1717 sgl_sdesc->skb = skb;
1718 }
1719
1720 txq_advance(&q->q, ndesc);
1721
1722 cxgb4_ring_tx_db(adap, &q->q, ndesc);
1723 return NETDEV_TX_OK;
1724
1725 out_free:
1726 dev_kfree_skb_any(skb);
1727 return NETDEV_TX_OK;
1728 }
1729
1730 /* Constants ... */
1731 enum {
1732 /* Egress Queue sizes, producer and consumer indices are all in units
1733 * of Egress Context Units bytes. Note that as far as the hardware is
1734 * concerned, the free list is an Egress Queue (the host produces free
1735 * buffers which the hardware consumes) and free list entries are
1736 * 64-bit PCI DMA addresses.
1737 */
1738 EQ_UNIT = SGE_EQ_IDXSIZE,
1739 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
1740 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
1741
1742 T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1743 sizeof(struct cpl_tx_pkt_lso_core) +
1744 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
1745 };
1746
1747 /**
1748 * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
1749 * @skb: the packet
1750 *
1751 * Returns whether an Ethernet packet is small enough to fit completely as
1752 * immediate data.
1753 */
t4vf_is_eth_imm(const struct sk_buff * skb)1754 static inline int t4vf_is_eth_imm(const struct sk_buff *skb)
1755 {
1756 /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
1757 * which does not accommodate immediate data. We could dike out all
1758 * of the support code for immediate data but that would tie our hands
1759 * too much if we ever want to enhace the firmware. It would also
1760 * create more differences between the PF and VF Drivers.
1761 */
1762 return false;
1763 }
1764
1765 /**
1766 * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
1767 * @skb: the packet
1768 *
1769 * Returns the number of flits needed for a TX Work Request for the
1770 * given Ethernet packet, including the needed WR and CPL headers.
1771 */
t4vf_calc_tx_flits(const struct sk_buff * skb)1772 static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
1773 {
1774 unsigned int flits;
1775
1776 /* If the skb is small enough, we can pump it out as a work request
1777 * with only immediate data. In that case we just have to have the
1778 * TX Packet header plus the skb data in the Work Request.
1779 */
1780 if (t4vf_is_eth_imm(skb))
1781 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
1782 sizeof(__be64));
1783
1784 /* Otherwise, we're going to have to construct a Scatter gather list
1785 * of the skb body and fragments. We also include the flits necessary
1786 * for the TX Packet Work Request and CPL. We always have a firmware
1787 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
1788 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
1789 * message or, if we're doing a Large Send Offload, an LSO CPL message
1790 * with an embedded TX Packet Write CPL message.
1791 */
1792 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
1793 if (skb_shinfo(skb)->gso_size)
1794 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1795 sizeof(struct cpl_tx_pkt_lso_core) +
1796 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
1797 else
1798 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1799 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
1800 return flits;
1801 }
1802
1803 /**
1804 * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
1805 * @skb: the packet
1806 * @dev: the egress net device
1807 *
1808 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
1809 */
cxgb4_vf_eth_xmit(struct sk_buff * skb,struct net_device * dev)1810 static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
1811 struct net_device *dev)
1812 {
1813 unsigned int last_desc, flits, ndesc;
1814 const struct skb_shared_info *ssi;
1815 struct fw_eth_tx_pkt_vm_wr *wr;
1816 struct tx_sw_desc *sgl_sdesc;
1817 struct cpl_tx_pkt_core *cpl;
1818 const struct port_info *pi;
1819 struct sge_eth_txq *txq;
1820 struct adapter *adapter;
1821 int qidx, credits, ret;
1822 size_t fw_hdr_copy_len;
1823 unsigned int chip_ver;
1824 u64 cntrl, *end;
1825 u32 wr_mid;
1826
1827 /* The chip minimum packet length is 10 octets but the firmware
1828 * command that we are using requires that we copy the Ethernet header
1829 * (including the VLAN tag) into the header so we reject anything
1830 * smaller than that ...
1831 */
1832 BUILD_BUG_ON(sizeof(wr->firmware) !=
1833 (sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
1834 sizeof(wr->ethtype) + sizeof(wr->vlantci)));
1835 fw_hdr_copy_len = sizeof(wr->firmware);
1836 ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len);
1837 if (ret)
1838 goto out_free;
1839
1840 /* Figure out which TX Queue we're going to use. */
1841 pi = netdev_priv(dev);
1842 adapter = pi->adapter;
1843 qidx = skb_get_queue_mapping(skb);
1844 WARN_ON(qidx >= pi->nqsets);
1845 txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1846
1847 /* Take this opportunity to reclaim any TX Descriptors whose DMA
1848 * transfers have completed.
1849 */
1850 reclaim_completed_tx(adapter, &txq->q, -1, true);
1851
1852 /* Calculate the number of flits and TX Descriptors we're going to
1853 * need along with how many TX Descriptors will be left over after
1854 * we inject our Work Request.
1855 */
1856 flits = t4vf_calc_tx_flits(skb);
1857 ndesc = flits_to_desc(flits);
1858 credits = txq_avail(&txq->q) - ndesc;
1859
1860 if (unlikely(credits < 0)) {
1861 /* Not enough room for this packet's Work Request. Stop the
1862 * TX Queue and return a "busy" condition. The queue will get
1863 * started later on when the firmware informs us that space
1864 * has opened up.
1865 */
1866 eth_txq_stop(txq);
1867 dev_err(adapter->pdev_dev,
1868 "%s: TX ring %u full while queue awake!\n",
1869 dev->name, qidx);
1870 return NETDEV_TX_BUSY;
1871 }
1872
1873 last_desc = txq->q.pidx + ndesc - 1;
1874 if (last_desc >= txq->q.size)
1875 last_desc -= txq->q.size;
1876 sgl_sdesc = &txq->q.sdesc[last_desc];
1877
1878 if (!t4vf_is_eth_imm(skb) &&
1879 unlikely(cxgb4_map_skb(adapter->pdev_dev, skb,
1880 sgl_sdesc->addr) < 0)) {
1881 /* We need to map the skb into PCI DMA space (because it can't
1882 * be in-lined directly into the Work Request) and the mapping
1883 * operation failed. Record the error and drop the packet.
1884 */
1885 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1886 txq->mapping_err++;
1887 goto out_free;
1888 }
1889
1890 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1891 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1892 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1893 /* After we're done injecting the Work Request for this
1894 * packet, we'll be below our "stop threshold" so stop the TX
1895 * Queue now and schedule a request for an SGE Egress Queue
1896 * Update message. The queue will get started later on when
1897 * the firmware processes this Work Request and sends us an
1898 * Egress Queue Status Update message indicating that space
1899 * has opened up.
1900 */
1901 eth_txq_stop(txq);
1902 if (chip_ver > CHELSIO_T5)
1903 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1904 }
1905
1906 /* Start filling in our Work Request. Note that we do _not_ handle
1907 * the WR Header wrapping around the TX Descriptor Ring. If our
1908 * maximum header size ever exceeds one TX Descriptor, we'll need to
1909 * do something else here.
1910 */
1911 WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1912 wr = (void *)&txq->q.desc[txq->q.pidx];
1913 wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1914 wr->r3[0] = cpu_to_be32(0);
1915 wr->r3[1] = cpu_to_be32(0);
1916 skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
1917 end = (u64 *)wr + flits;
1918
1919 /* If this is a Large Send Offload packet we'll put in an LSO CPL
1920 * message with an encapsulated TX Packet CPL message. Otherwise we
1921 * just use a TX Packet CPL message.
1922 */
1923 ssi = skb_shinfo(skb);
1924 if (ssi->gso_size) {
1925 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1926 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1927 int l3hdr_len = skb_network_header_len(skb);
1928 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1929
1930 wr->op_immdlen =
1931 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1932 FW_WR_IMMDLEN_V(sizeof(*lso) +
1933 sizeof(*cpl)));
1934 /* Fill in the LSO CPL message. */
1935 lso->lso_ctrl =
1936 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1937 LSO_FIRST_SLICE_F |
1938 LSO_LAST_SLICE_F |
1939 LSO_IPV6_V(v6) |
1940 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1941 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1942 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1943 lso->ipid_ofst = cpu_to_be16(0);
1944 lso->mss = cpu_to_be16(ssi->gso_size);
1945 lso->seqno_offset = cpu_to_be32(0);
1946 if (is_t4(adapter->params.chip))
1947 lso->len = cpu_to_be32(skb->len);
1948 else
1949 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1950
1951 /* Set up TX Packet CPL pointer, control word and perform
1952 * accounting.
1953 */
1954 cpl = (void *)(lso + 1);
1955
1956 if (chip_ver <= CHELSIO_T5)
1957 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1958 else
1959 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1960
1961 cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1962 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1963 TXPKT_IPHDR_LEN_V(l3hdr_len);
1964 txq->tso++;
1965 txq->tx_cso += ssi->gso_segs;
1966 } else {
1967 int len;
1968
1969 len = (t4vf_is_eth_imm(skb)
1970 ? skb->len + sizeof(*cpl)
1971 : sizeof(*cpl));
1972 wr->op_immdlen =
1973 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1974 FW_WR_IMMDLEN_V(len));
1975
1976 /* Set up TX Packet CPL pointer, control word and perform
1977 * accounting.
1978 */
1979 cpl = (void *)(wr + 1);
1980 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1981 cntrl = hwcsum(adapter->params.chip, skb) |
1982 TXPKT_IPCSUM_DIS_F;
1983 txq->tx_cso++;
1984 } else {
1985 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1986 }
1987 }
1988
1989 /* If there's a VLAN tag present, add that to the list of things to
1990 * do in this Work Request.
1991 */
1992 if (skb_vlan_tag_present(skb)) {
1993 txq->vlan_ins++;
1994 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1995 }
1996
1997 /* Fill in the TX Packet CPL message header. */
1998 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
1999 TXPKT_INTF_V(pi->port_id) |
2000 TXPKT_PF_V(0));
2001 cpl->pack = cpu_to_be16(0);
2002 cpl->len = cpu_to_be16(skb->len);
2003 cpl->ctrl1 = cpu_to_be64(cntrl);
2004
2005 /* Fill in the body of the TX Packet CPL message with either in-lined
2006 * data or a Scatter/Gather List.
2007 */
2008 if (t4vf_is_eth_imm(skb)) {
2009 /* In-line the packet's data and free the skb since we don't
2010 * need it any longer.
2011 */
2012 cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
2013 dev_consume_skb_any(skb);
2014 } else {
2015 /* Write the skb's Scatter/Gather list into the TX Packet CPL
2016 * message and retain a pointer to the skb so we can free it
2017 * later when its DMA completes. (We store the skb pointer
2018 * in the Software Descriptor corresponding to the last TX
2019 * Descriptor used by the Work Request.)
2020 *
2021 * The retained skb will be freed when the corresponding TX
2022 * Descriptors are reclaimed after their DMAs complete.
2023 * However, this could take quite a while since, in general,
2024 * the hardware is set up to be lazy about sending DMA
2025 * completion notifications to us and we mostly perform TX
2026 * reclaims in the transmit routine.
2027 *
2028 * This is good for performamce but means that we rely on new
2029 * TX packets arriving to run the destructors of completed
2030 * packets, which open up space in their sockets' send queues.
2031 * Sometimes we do not get such new packets causing TX to
2032 * stall. A single UDP transmitter is a good example of this
2033 * situation. We have a clean up timer that periodically
2034 * reclaims completed packets but it doesn't run often enough
2035 * (nor do we want it to) to prevent lengthy stalls. A
2036 * solution to this problem is to run the destructor early,
2037 * after the packet is queued but before it's DMAd. A con is
2038 * that we lie to socket memory accounting, but the amount of
2039 * extra memory is reasonable (limited by the number of TX
2040 * descriptors), the packets do actually get freed quickly by
2041 * new packets almost always, and for protocols like TCP that
2042 * wait for acks to really free up the data the extra memory
2043 * is even less. On the positive side we run the destructors
2044 * on the sending CPU rather than on a potentially different
2045 * completing CPU, usually a good thing.
2046 *
2047 * Run the destructor before telling the DMA engine about the
2048 * packet to make sure it doesn't complete and get freed
2049 * prematurely.
2050 */
2051 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
2052 struct sge_txq *tq = &txq->q;
2053
2054 /* If the Work Request header was an exact multiple of our TX
2055 * Descriptor length, then it's possible that the starting SGL
2056 * pointer lines up exactly with the end of our TX Descriptor
2057 * ring. If that's the case, wrap around to the beginning
2058 * here ...
2059 */
2060 if (unlikely((void *)sgl == (void *)tq->stat)) {
2061 sgl = (void *)tq->desc;
2062 end = (void *)((void *)tq->desc +
2063 ((void *)end - (void *)tq->stat));
2064 }
2065
2066 cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr);
2067 skb_orphan(skb);
2068 sgl_sdesc->skb = skb;
2069 }
2070
2071 /* Advance our internal TX Queue state, tell the hardware about
2072 * the new TX descriptors and return success.
2073 */
2074 txq_advance(&txq->q, ndesc);
2075
2076 cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
2077 return NETDEV_TX_OK;
2078
2079 out_free:
2080 /* An error of some sort happened. Free the TX skb and tell the
2081 * OS that we've "dealt" with the packet ...
2082 */
2083 dev_kfree_skb_any(skb);
2084 return NETDEV_TX_OK;
2085 }
2086
2087 /**
2088 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
2089 * @q: the SGE control Tx queue
2090 *
2091 * This is a variant of cxgb4_reclaim_completed_tx() that is used
2092 * for Tx queues that send only immediate data (presently just
2093 * the control queues) and thus do not have any sk_buffs to release.
2094 */
reclaim_completed_tx_imm(struct sge_txq * q)2095 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
2096 {
2097 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
2098 int reclaim = hw_cidx - q->cidx;
2099
2100 if (reclaim < 0)
2101 reclaim += q->size;
2102
2103 q->in_use -= reclaim;
2104 q->cidx = hw_cidx;
2105 }
2106
eosw_txq_advance_index(u32 * idx,u32 n,u32 max)2107 static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max)
2108 {
2109 u32 val = *idx + n;
2110
2111 if (val >= max)
2112 val -= max;
2113
2114 *idx = val;
2115 }
2116
cxgb4_eosw_txq_free_desc(struct adapter * adap,struct sge_eosw_txq * eosw_txq,u32 ndesc)2117 void cxgb4_eosw_txq_free_desc(struct adapter *adap,
2118 struct sge_eosw_txq *eosw_txq, u32 ndesc)
2119 {
2120 struct tx_sw_desc *d;
2121
2122 d = &eosw_txq->desc[eosw_txq->last_cidx];
2123 while (ndesc--) {
2124 if (d->skb) {
2125 if (d->addr[0]) {
2126 unmap_skb(adap->pdev_dev, d->skb, d->addr);
2127 memset(d->addr, 0, sizeof(d->addr));
2128 }
2129 dev_consume_skb_any(d->skb);
2130 d->skb = NULL;
2131 }
2132 eosw_txq_advance_index(&eosw_txq->last_cidx, 1,
2133 eosw_txq->ndesc);
2134 d = &eosw_txq->desc[eosw_txq->last_cidx];
2135 }
2136 }
2137
eosw_txq_advance(struct sge_eosw_txq * eosw_txq,u32 n)2138 static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n)
2139 {
2140 eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc);
2141 eosw_txq->inuse += n;
2142 }
2143
eosw_txq_enqueue(struct sge_eosw_txq * eosw_txq,struct sk_buff * skb)2144 static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq,
2145 struct sk_buff *skb)
2146 {
2147 if (eosw_txq->inuse == eosw_txq->ndesc)
2148 return -ENOMEM;
2149
2150 eosw_txq->desc[eosw_txq->pidx].skb = skb;
2151 return 0;
2152 }
2153
eosw_txq_peek(struct sge_eosw_txq * eosw_txq)2154 static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq)
2155 {
2156 return eosw_txq->desc[eosw_txq->last_pidx].skb;
2157 }
2158
ethofld_calc_tx_flits(struct adapter * adap,struct sk_buff * skb,u32 hdr_len)2159 static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
2160 struct sk_buff *skb, u32 hdr_len)
2161 {
2162 u8 flits, nsgl = 0;
2163 u32 wrlen;
2164
2165 wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core);
2166 if (skb_shinfo(skb)->gso_size &&
2167 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
2168 wrlen += sizeof(struct cpl_tx_pkt_lso_core);
2169
2170 wrlen += roundup(hdr_len, 16);
2171
2172 /* Packet headers + WR + CPLs */
2173 flits = DIV_ROUND_UP(wrlen, 8);
2174
2175 if (skb_shinfo(skb)->nr_frags > 0) {
2176 if (skb_headlen(skb) - hdr_len)
2177 nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1);
2178 else
2179 nsgl = sgl_len(skb_shinfo(skb)->nr_frags);
2180 } else if (skb->len - hdr_len) {
2181 nsgl = sgl_len(1);
2182 }
2183
2184 return flits + nsgl;
2185 }
2186
write_eo_wr(struct adapter * adap,struct sge_eosw_txq * eosw_txq,struct sk_buff * skb,struct fw_eth_tx_eo_wr * wr,u32 hdr_len,u32 wrlen)2187 static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq,
2188 struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
2189 u32 hdr_len, u32 wrlen)
2190 {
2191 const struct skb_shared_info *ssi = skb_shinfo(skb);
2192 struct cpl_tx_pkt_core *cpl;
2193 u32 immd_len, wrlen16;
2194 bool compl = false;
2195 u8 ver, proto;
2196
2197 ver = ip_hdr(skb)->version;
2198 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol;
2199
2200 wrlen16 = DIV_ROUND_UP(wrlen, 16);
2201 immd_len = sizeof(struct cpl_tx_pkt_core);
2202 if (skb_shinfo(skb)->gso_size &&
2203 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
2204 immd_len += sizeof(struct cpl_tx_pkt_lso_core);
2205 immd_len += hdr_len;
2206
2207 if (!eosw_txq->ncompl ||
2208 (eosw_txq->last_compl + wrlen16) >=
2209 (adap->params.ofldq_wr_cred / 2)) {
2210 compl = true;
2211 eosw_txq->ncompl++;
2212 eosw_txq->last_compl = 0;
2213 }
2214
2215 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
2216 FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len) |
2217 FW_WR_COMPL_V(compl));
2218 wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) |
2219 FW_WR_FLOWID_V(eosw_txq->hwtid));
2220 wr->r3 = 0;
2221 if (proto == IPPROTO_UDP) {
2222 cpl = write_eo_udp_wr(skb, wr, hdr_len);
2223 } else {
2224 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG;
2225 wr->u.tcpseg.ethlen = skb_network_offset(skb);
2226 wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
2227 wr->u.tcpseg.tcplen = tcp_hdrlen(skb);
2228 wr->u.tcpseg.tsclk_tsoff = 0;
2229 wr->u.tcpseg.r4 = 0;
2230 wr->u.tcpseg.r5 = 0;
2231 wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len);
2232
2233 if (ssi->gso_size) {
2234 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
2235
2236 wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size);
2237 cpl = write_tso_wr(adap, skb, lso);
2238 } else {
2239 wr->u.tcpseg.mss = cpu_to_be16(0xffff);
2240 cpl = (void *)(wr + 1);
2241 }
2242 }
2243
2244 eosw_txq->cred -= wrlen16;
2245 eosw_txq->last_compl += wrlen16;
2246 return cpl;
2247 }
2248
ethofld_hard_xmit(struct net_device * dev,struct sge_eosw_txq * eosw_txq)2249 static int ethofld_hard_xmit(struct net_device *dev,
2250 struct sge_eosw_txq *eosw_txq)
2251 {
2252 struct port_info *pi = netdev2pinfo(dev);
2253 struct adapter *adap = netdev2adap(dev);
2254 u32 wrlen, wrlen16, hdr_len, data_len;
2255 enum sge_eosw_state next_state;
2256 u64 cntrl, *start, *end, *sgl;
2257 struct sge_eohw_txq *eohw_txq;
2258 struct cpl_tx_pkt_core *cpl;
2259 struct fw_eth_tx_eo_wr *wr;
2260 bool skip_eotx_wr = false;
2261 struct tx_sw_desc *d;
2262 struct sk_buff *skb;
2263 int left, ret = 0;
2264 u8 flits, ndesc;
2265
2266 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
2267 spin_lock(&eohw_txq->lock);
2268 reclaim_completed_tx_imm(&eohw_txq->q);
2269
2270 d = &eosw_txq->desc[eosw_txq->last_pidx];
2271 skb = d->skb;
2272
2273 wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
2274 if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
2275 eosw_txq->last_pidx == eosw_txq->flowc_idx)) {
2276 hdr_len = skb->len;
2277 data_len = 0;
2278 flits = DIV_ROUND_UP(hdr_len, 8);
2279 if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND)
2280 next_state = CXGB4_EO_STATE_FLOWC_OPEN_REPLY;
2281 else
2282 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_REPLY;
2283 skip_eotx_wr = true;
2284 } else {
2285 hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb));
2286 data_len = skb->len - hdr_len;
2287 flits = ethofld_calc_tx_flits(adap, skb, hdr_len);
2288 }
2289 ndesc = flits_to_desc(flits);
2290 wrlen = flits * 8;
2291 wrlen16 = DIV_ROUND_UP(wrlen, 16);
2292
2293 left = txq_avail(&eohw_txq->q) - ndesc;
2294
2295 /* If there are no descriptors left in hardware queues or no
2296 * CPL credits left in software queues, then wait for them
2297 * to come back and retry again. Note that we always request
2298 * for credits update via interrupt for every half credits
2299 * consumed. So, the interrupt will eventually restore the
2300 * credits and invoke the Tx path again.
2301 */
2302 if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) {
2303 ret = -ENOMEM;
2304 goto out_unlock;
2305 }
2306
2307 if (unlikely(skip_eotx_wr)) {
2308 start = (u64 *)wr;
2309 eosw_txq->state = next_state;
2310 eosw_txq->cred -= wrlen16;
2311 eosw_txq->ncompl++;
2312 eosw_txq->last_compl = 0;
2313 goto write_wr_headers;
2314 }
2315
2316 cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen);
2317 cntrl = hwcsum(adap->params.chip, skb);
2318 if (skb_vlan_tag_present(skb))
2319 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
2320
2321 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
2322 TXPKT_INTF_V(pi->tx_chan) |
2323 TXPKT_PF_V(adap->pf));
2324 cpl->pack = 0;
2325 cpl->len = cpu_to_be16(skb->len);
2326 cpl->ctrl1 = cpu_to_be64(cntrl);
2327
2328 start = (u64 *)(cpl + 1);
2329
2330 write_wr_headers:
2331 sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
2332 hdr_len);
2333 if (data_len) {
2334 ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr);
2335 if (unlikely(ret)) {
2336 memset(d->addr, 0, sizeof(d->addr));
2337 eohw_txq->mapping_err++;
2338 goto out_unlock;
2339 }
2340
2341 end = (u64 *)wr + flits;
2342 if (unlikely(start > sgl)) {
2343 left = (u8 *)end - (u8 *)eohw_txq->q.stat;
2344 end = (void *)eohw_txq->q.desc + left;
2345 }
2346
2347 if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) {
2348 /* If current position is already at the end of the
2349 * txq, reset the current to point to start of the queue
2350 * and update the end ptr as well.
2351 */
2352 left = (u8 *)end - (u8 *)eohw_txq->q.stat;
2353
2354 end = (void *)eohw_txq->q.desc + left;
2355 sgl = (void *)eohw_txq->q.desc;
2356 }
2357
2358 cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len,
2359 d->addr);
2360 }
2361
2362 if (skb_shinfo(skb)->gso_size) {
2363 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
2364 eohw_txq->uso++;
2365 else
2366 eohw_txq->tso++;
2367 eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs;
2368 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2369 eohw_txq->tx_cso++;
2370 }
2371
2372 if (skb_vlan_tag_present(skb))
2373 eohw_txq->vlan_ins++;
2374
2375 txq_advance(&eohw_txq->q, ndesc);
2376 skb_tx_timestamp(skb);
2377 cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
2378 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
2379
2380 out_unlock:
2381 spin_unlock(&eohw_txq->lock);
2382 return ret;
2383 }
2384
ethofld_xmit(struct net_device * dev,struct sge_eosw_txq * eosw_txq)2385 static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
2386 {
2387 struct sk_buff *skb;
2388 int pktcount, ret;
2389
2390 switch (eosw_txq->state) {
2391 case CXGB4_EO_STATE_ACTIVE:
2392 case CXGB4_EO_STATE_FLOWC_OPEN_SEND:
2393 case CXGB4_EO_STATE_FLOWC_CLOSE_SEND:
2394 pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
2395 if (pktcount < 0)
2396 pktcount += eosw_txq->ndesc;
2397 break;
2398 case CXGB4_EO_STATE_FLOWC_OPEN_REPLY:
2399 case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY:
2400 case CXGB4_EO_STATE_CLOSED:
2401 default:
2402 return;
2403 }
2404
2405 while (pktcount--) {
2406 skb = eosw_txq_peek(eosw_txq);
2407 if (!skb) {
2408 eosw_txq_advance_index(&eosw_txq->last_pidx, 1,
2409 eosw_txq->ndesc);
2410 continue;
2411 }
2412
2413 ret = ethofld_hard_xmit(dev, eosw_txq);
2414 if (ret)
2415 break;
2416 }
2417 }
2418
cxgb4_ethofld_xmit(struct sk_buff * skb,struct net_device * dev)2419 static netdev_tx_t cxgb4_ethofld_xmit(struct sk_buff *skb,
2420 struct net_device *dev)
2421 {
2422 struct cxgb4_tc_port_mqprio *tc_port_mqprio;
2423 struct port_info *pi = netdev2pinfo(dev);
2424 struct adapter *adap = netdev2adap(dev);
2425 struct sge_eosw_txq *eosw_txq;
2426 u32 qid;
2427 int ret;
2428
2429 ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
2430 if (ret)
2431 goto out_free;
2432
2433 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
2434 qid = skb_get_queue_mapping(skb) - pi->nqsets;
2435 eosw_txq = &tc_port_mqprio->eosw_txq[qid];
2436 spin_lock_bh(&eosw_txq->lock);
2437 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
2438 goto out_unlock;
2439
2440 ret = eosw_txq_enqueue(eosw_txq, skb);
2441 if (ret)
2442 goto out_unlock;
2443
2444 /* SKB is queued for processing until credits are available.
2445 * So, call the destructor now and we'll free the skb later
2446 * after it has been successfully transmitted.
2447 */
2448 skb_orphan(skb);
2449
2450 eosw_txq_advance(eosw_txq, 1);
2451 ethofld_xmit(dev, eosw_txq);
2452 spin_unlock_bh(&eosw_txq->lock);
2453 return NETDEV_TX_OK;
2454
2455 out_unlock:
2456 spin_unlock_bh(&eosw_txq->lock);
2457 out_free:
2458 dev_kfree_skb_any(skb);
2459 return NETDEV_TX_OK;
2460 }
2461
t4_start_xmit(struct sk_buff * skb,struct net_device * dev)2462 netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
2463 {
2464 struct port_info *pi = netdev_priv(dev);
2465 u16 qid = skb_get_queue_mapping(skb);
2466
2467 if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
2468 return cxgb4_vf_eth_xmit(skb, dev);
2469
2470 if (unlikely(qid >= pi->nqsets))
2471 return cxgb4_ethofld_xmit(skb, dev);
2472
2473 if (is_ptp_enabled(skb, dev)) {
2474 struct adapter *adap = netdev2adap(dev);
2475 netdev_tx_t ret;
2476
2477 spin_lock(&adap->ptp_lock);
2478 ret = cxgb4_eth_xmit(skb, dev);
2479 spin_unlock(&adap->ptp_lock);
2480 return ret;
2481 }
2482
2483 return cxgb4_eth_xmit(skb, dev);
2484 }
2485
eosw_txq_flush_pending_skbs(struct sge_eosw_txq * eosw_txq)2486 static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq)
2487 {
2488 int pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
2489 int pidx = eosw_txq->pidx;
2490 struct sk_buff *skb;
2491
2492 if (!pktcount)
2493 return;
2494
2495 if (pktcount < 0)
2496 pktcount += eosw_txq->ndesc;
2497
2498 while (pktcount--) {
2499 pidx--;
2500 if (pidx < 0)
2501 pidx += eosw_txq->ndesc;
2502
2503 skb = eosw_txq->desc[pidx].skb;
2504 if (skb) {
2505 dev_consume_skb_any(skb);
2506 eosw_txq->desc[pidx].skb = NULL;
2507 eosw_txq->inuse--;
2508 }
2509 }
2510
2511 eosw_txq->pidx = eosw_txq->last_pidx + 1;
2512 }
2513
2514 /**
2515 * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
2516 * @dev: netdevice
2517 * @eotid: ETHOFLD tid to bind/unbind
2518 * @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
2519 *
2520 * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
2521 * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
2522 * a traffic class.
2523 */
cxgb4_ethofld_send_flowc(struct net_device * dev,u32 eotid,u32 tc)2524 int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
2525 {
2526 struct port_info *pi = netdev2pinfo(dev);
2527 struct adapter *adap = netdev2adap(dev);
2528 enum sge_eosw_state next_state;
2529 struct sge_eosw_txq *eosw_txq;
2530 u32 len, len16, nparams = 6;
2531 struct fw_flowc_wr *flowc;
2532 struct eotid_entry *entry;
2533 struct sge_ofld_rxq *rxq;
2534 struct sk_buff *skb;
2535 int ret = 0;
2536
2537 len = struct_size(flowc, mnemval, nparams);
2538 len16 = DIV_ROUND_UP(len, 16);
2539
2540 entry = cxgb4_lookup_eotid(&adap->tids, eotid);
2541 if (!entry)
2542 return -ENOMEM;
2543
2544 eosw_txq = (struct sge_eosw_txq *)entry->data;
2545 if (!eosw_txq)
2546 return -ENOMEM;
2547
2548 if (!(adap->flags & CXGB4_FW_OK)) {
2549 /* Don't stall caller when access to FW is lost */
2550 complete(&eosw_txq->completion);
2551 return -EIO;
2552 }
2553
2554 skb = alloc_skb(len, GFP_KERNEL);
2555 if (!skb)
2556 return -ENOMEM;
2557
2558 spin_lock_bh(&eosw_txq->lock);
2559 if (tc != FW_SCHED_CLS_NONE) {
2560 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED)
2561 goto out_free_skb;
2562
2563 next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND;
2564 } else {
2565 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
2566 goto out_free_skb;
2567
2568 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND;
2569 }
2570
2571 flowc = __skb_put(skb, len);
2572 memset(flowc, 0, len);
2573
2574 rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid];
2575 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) |
2576 FW_WR_FLOWID_V(eosw_txq->hwtid));
2577 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
2578 FW_FLOWC_WR_NPARAMS_V(nparams) |
2579 FW_WR_COMPL_V(1));
2580 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
2581 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf));
2582 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
2583 flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan);
2584 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
2585 flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan);
2586 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
2587 flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id);
2588 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
2589 flowc->mnemval[4].val = cpu_to_be32(tc);
2590 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE;
2591 flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ?
2592 FW_FLOWC_MNEM_EOSTATE_CLOSING :
2593 FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
2594
2595 /* Free up any pending skbs to ensure there's room for
2596 * termination FLOWC.
2597 */
2598 if (tc == FW_SCHED_CLS_NONE)
2599 eosw_txq_flush_pending_skbs(eosw_txq);
2600
2601 ret = eosw_txq_enqueue(eosw_txq, skb);
2602 if (ret)
2603 goto out_free_skb;
2604
2605 eosw_txq->state = next_state;
2606 eosw_txq->flowc_idx = eosw_txq->pidx;
2607 eosw_txq_advance(eosw_txq, 1);
2608 ethofld_xmit(dev, eosw_txq);
2609
2610 spin_unlock_bh(&eosw_txq->lock);
2611 return 0;
2612
2613 out_free_skb:
2614 dev_consume_skb_any(skb);
2615 spin_unlock_bh(&eosw_txq->lock);
2616 return ret;
2617 }
2618
2619 /**
2620 * is_imm - check whether a packet can be sent as immediate data
2621 * @skb: the packet
2622 *
2623 * Returns true if a packet can be sent as a WR with immediate data.
2624 */
is_imm(const struct sk_buff * skb)2625 static inline int is_imm(const struct sk_buff *skb)
2626 {
2627 return skb->len <= MAX_CTRL_WR_LEN;
2628 }
2629
2630 /**
2631 * ctrlq_check_stop - check if a control queue is full and should stop
2632 * @q: the queue
2633 * @wr: most recent WR written to the queue
2634 *
2635 * Check if a control queue has become full and should be stopped.
2636 * We clean up control queue descriptors very lazily, only when we are out.
2637 * If the queue is still full after reclaiming any completed descriptors
2638 * we suspend it and have the last WR wake it up.
2639 */
ctrlq_check_stop(struct sge_ctrl_txq * q,struct fw_wr_hdr * wr)2640 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
2641 {
2642 reclaim_completed_tx_imm(&q->q);
2643 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
2644 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
2645 q->q.stops++;
2646 q->full = 1;
2647 }
2648 }
2649
2650 #define CXGB4_SELFTEST_LB_STR "CHELSIO_SELFTEST"
2651
cxgb4_selftest_lb_pkt(struct net_device * netdev)2652 int cxgb4_selftest_lb_pkt(struct net_device *netdev)
2653 {
2654 struct port_info *pi = netdev_priv(netdev);
2655 struct adapter *adap = pi->adapter;
2656 struct cxgb4_ethtool_lb_test *lb;
2657 int ret, i = 0, pkt_len, credits;
2658 struct fw_eth_tx_pkt_wr *wr;
2659 struct cpl_tx_pkt_core *cpl;
2660 u32 ctrl0, ndesc, flits;
2661 struct sge_eth_txq *q;
2662 u8 *sgl;
2663
2664 pkt_len = ETH_HLEN + sizeof(CXGB4_SELFTEST_LB_STR);
2665
2666 flits = DIV_ROUND_UP(pkt_len + sizeof(*cpl) + sizeof(*wr),
2667 sizeof(__be64));
2668 ndesc = flits_to_desc(flits);
2669
2670 lb = &pi->ethtool_lb;
2671 lb->loopback = 1;
2672
2673 q = &adap->sge.ethtxq[pi->first_qset];
2674 __netif_tx_lock_bh(q->txq);
2675
2676 reclaim_completed_tx(adap, &q->q, -1, true);
2677 credits = txq_avail(&q->q) - ndesc;
2678 if (unlikely(credits < 0)) {
2679 __netif_tx_unlock_bh(q->txq);
2680 return -ENOMEM;
2681 }
2682
2683 wr = (void *)&q->q.desc[q->q.pidx];
2684 memset(wr, 0, sizeof(struct tx_desc));
2685
2686 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
2687 FW_WR_IMMDLEN_V(pkt_len +
2688 sizeof(*cpl)));
2689 wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)));
2690 wr->r3 = cpu_to_be64(0);
2691
2692 cpl = (void *)(wr + 1);
2693 sgl = (u8 *)(cpl + 1);
2694
2695 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) |
2696 TXPKT_INTF_V(pi->tx_chan + 4);
2697
2698 cpl->ctrl0 = htonl(ctrl0);
2699 cpl->pack = htons(0);
2700 cpl->len = htons(pkt_len);
2701 cpl->ctrl1 = cpu_to_be64(TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F);
2702
2703 eth_broadcast_addr(sgl);
2704 i += ETH_ALEN;
2705 ether_addr_copy(&sgl[i], netdev->dev_addr);
2706 i += ETH_ALEN;
2707
2708 snprintf(&sgl[i], sizeof(CXGB4_SELFTEST_LB_STR), "%s",
2709 CXGB4_SELFTEST_LB_STR);
2710
2711 init_completion(&lb->completion);
2712 txq_advance(&q->q, ndesc);
2713 cxgb4_ring_tx_db(adap, &q->q, ndesc);
2714 __netif_tx_unlock_bh(q->txq);
2715
2716 /* wait for the pkt to return */
2717 ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
2718 if (!ret)
2719 ret = -ETIMEDOUT;
2720 else
2721 ret = lb->result;
2722
2723 lb->loopback = 0;
2724
2725 return ret;
2726 }
2727
2728 /**
2729 * ctrl_xmit - send a packet through an SGE control Tx queue
2730 * @q: the control queue
2731 * @skb: the packet
2732 *
2733 * Send a packet through an SGE control Tx queue. Packets sent through
2734 * a control queue must fit entirely as immediate data.
2735 */
ctrl_xmit(struct sge_ctrl_txq * q,struct sk_buff * skb)2736 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
2737 {
2738 unsigned int ndesc;
2739 struct fw_wr_hdr *wr;
2740
2741 if (unlikely(!is_imm(skb))) {
2742 WARN_ON(1);
2743 dev_kfree_skb(skb);
2744 return NET_XMIT_DROP;
2745 }
2746
2747 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
2748 spin_lock(&q->sendq.lock);
2749
2750 if (unlikely(q->full)) {
2751 skb->priority = ndesc; /* save for restart */
2752 __skb_queue_tail(&q->sendq, skb);
2753 spin_unlock(&q->sendq.lock);
2754 return NET_XMIT_CN;
2755 }
2756
2757 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
2758 cxgb4_inline_tx_skb(skb, &q->q, wr);
2759
2760 txq_advance(&q->q, ndesc);
2761 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
2762 ctrlq_check_stop(q, wr);
2763
2764 cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
2765 spin_unlock(&q->sendq.lock);
2766
2767 kfree_skb(skb);
2768 return NET_XMIT_SUCCESS;
2769 }
2770
2771 /**
2772 * restart_ctrlq - restart a suspended control queue
2773 * @t: pointer to the tasklet associated with this handler
2774 *
2775 * Resumes transmission on a suspended Tx control queue.
2776 */
restart_ctrlq(struct tasklet_struct * t)2777 static void restart_ctrlq(struct tasklet_struct *t)
2778 {
2779 struct sk_buff *skb;
2780 unsigned int written = 0;
2781 struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk);
2782
2783 spin_lock(&q->sendq.lock);
2784 reclaim_completed_tx_imm(&q->q);
2785 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
2786
2787 while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
2788 struct fw_wr_hdr *wr;
2789 unsigned int ndesc = skb->priority; /* previously saved */
2790
2791 written += ndesc;
2792 /* Write descriptors and free skbs outside the lock to limit
2793 * wait times. q->full is still set so new skbs will be queued.
2794 */
2795 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
2796 txq_advance(&q->q, ndesc);
2797 spin_unlock(&q->sendq.lock);
2798
2799 cxgb4_inline_tx_skb(skb, &q->q, wr);
2800 kfree_skb(skb);
2801
2802 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
2803 unsigned long old = q->q.stops;
2804
2805 ctrlq_check_stop(q, wr);
2806 if (q->q.stops != old) { /* suspended anew */
2807 spin_lock(&q->sendq.lock);
2808 goto ringdb;
2809 }
2810 }
2811 if (written > 16) {
2812 cxgb4_ring_tx_db(q->adap, &q->q, written);
2813 written = 0;
2814 }
2815 spin_lock(&q->sendq.lock);
2816 }
2817 q->full = 0;
2818 ringdb:
2819 if (written)
2820 cxgb4_ring_tx_db(q->adap, &q->q, written);
2821 spin_unlock(&q->sendq.lock);
2822 }
2823
2824 /**
2825 * t4_mgmt_tx - send a management message
2826 * @adap: the adapter
2827 * @skb: the packet containing the management message
2828 *
2829 * Send a management message through control queue 0.
2830 */
t4_mgmt_tx(struct adapter * adap,struct sk_buff * skb)2831 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
2832 {
2833 int ret;
2834
2835 local_bh_disable();
2836 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
2837 local_bh_enable();
2838 return ret;
2839 }
2840
2841 /**
2842 * is_ofld_imm - check whether a packet can be sent as immediate data
2843 * @skb: the packet
2844 *
2845 * Returns true if a packet can be sent as an offload WR with immediate
2846 * data.
2847 * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field.
2848 * However, FW_ULPTX_WR commands have a 256 byte immediate only
2849 * payload limit.
2850 */
is_ofld_imm(const struct sk_buff * skb)2851 static inline int is_ofld_imm(const struct sk_buff *skb)
2852 {
2853 struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
2854 unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
2855
2856 if (unlikely(opcode == FW_ULPTX_WR))
2857 return skb->len <= MAX_IMM_ULPTX_WR_LEN;
2858 else if (opcode == FW_CRYPTO_LOOKASIDE_WR)
2859 return skb->len <= SGE_MAX_WR_LEN;
2860 else
2861 return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
2862 }
2863
2864 /**
2865 * calc_tx_flits_ofld - calculate # of flits for an offload packet
2866 * @skb: the packet
2867 *
2868 * Returns the number of flits needed for the given offload packet.
2869 * These packets are already fully constructed and no additional headers
2870 * will be added.
2871 */
calc_tx_flits_ofld(const struct sk_buff * skb)2872 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
2873 {
2874 unsigned int flits, cnt;
2875
2876 if (is_ofld_imm(skb))
2877 return DIV_ROUND_UP(skb->len, 8);
2878
2879 flits = skb_transport_offset(skb) / 8U; /* headers */
2880 cnt = skb_shinfo(skb)->nr_frags;
2881 if (skb_tail_pointer(skb) != skb_transport_header(skb))
2882 cnt++;
2883 return flits + sgl_len(cnt);
2884 }
2885
2886 /**
2887 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
2888 * @q: the queue to stop
2889 *
2890 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
2891 * inability to map packets. A periodic timer attempts to restart
2892 * queues so marked.
2893 */
txq_stop_maperr(struct sge_uld_txq * q)2894 static void txq_stop_maperr(struct sge_uld_txq *q)
2895 {
2896 q->mapping_err++;
2897 q->q.stops++;
2898 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
2899 q->adap->sge.txq_maperr);
2900 }
2901
2902 /**
2903 * ofldtxq_stop - stop an offload Tx queue that has become full
2904 * @q: the queue to stop
2905 * @wr: the Work Request causing the queue to become full
2906 *
2907 * Stops an offload Tx queue that has become full and modifies the packet
2908 * being written to request a wakeup.
2909 */
ofldtxq_stop(struct sge_uld_txq * q,struct fw_wr_hdr * wr)2910 static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
2911 {
2912 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
2913 q->q.stops++;
2914 q->full = 1;
2915 }
2916
2917 /**
2918 * service_ofldq - service/restart a suspended offload queue
2919 * @q: the offload queue
2920 *
2921 * Services an offload Tx queue by moving packets from its Pending Send
2922 * Queue to the Hardware TX ring. The function starts and ends with the
2923 * Send Queue locked, but drops the lock while putting the skb at the
2924 * head of the Send Queue onto the Hardware TX Ring. Dropping the lock
2925 * allows more skbs to be added to the Send Queue by other threads.
2926 * The packet being processed at the head of the Pending Send Queue is
2927 * left on the queue in case we experience DMA Mapping errors, etc.
2928 * and need to give up and restart later.
2929 *
2930 * service_ofldq() can be thought of as a task which opportunistically
2931 * uses other threads execution contexts. We use the Offload Queue
2932 * boolean "service_ofldq_running" to make sure that only one instance
2933 * is ever running at a time ...
2934 */
service_ofldq(struct sge_uld_txq * q)2935 static void service_ofldq(struct sge_uld_txq *q)
2936 __must_hold(&q->sendq.lock)
2937 {
2938 u64 *pos, *before, *end;
2939 int credits;
2940 struct sk_buff *skb;
2941 struct sge_txq *txq;
2942 unsigned int left;
2943 unsigned int written = 0;
2944 unsigned int flits, ndesc;
2945
2946 /* If another thread is currently in service_ofldq() processing the
2947 * Pending Send Queue then there's nothing to do. Otherwise, flag
2948 * that we're doing the work and continue. Examining/modifying
2949 * the Offload Queue boolean "service_ofldq_running" must be done
2950 * while holding the Pending Send Queue Lock.
2951 */
2952 if (q->service_ofldq_running)
2953 return;
2954 q->service_ofldq_running = true;
2955
2956 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
2957 /* We drop the lock while we're working with the skb at the
2958 * head of the Pending Send Queue. This allows more skbs to
2959 * be added to the Pending Send Queue while we're working on
2960 * this one. We don't need to lock to guard the TX Ring
2961 * updates because only one thread of execution is ever
2962 * allowed into service_ofldq() at a time.
2963 */
2964 spin_unlock(&q->sendq.lock);
2965
2966 cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
2967
2968 flits = skb->priority; /* previously saved */
2969 ndesc = flits_to_desc(flits);
2970 credits = txq_avail(&q->q) - ndesc;
2971 BUG_ON(credits < 0);
2972 if (unlikely(credits < TXQ_STOP_THRES))
2973 ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data);
2974
2975 pos = (u64 *)&q->q.desc[q->q.pidx];
2976 if (is_ofld_imm(skb))
2977 cxgb4_inline_tx_skb(skb, &q->q, pos);
2978 else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
2979 (dma_addr_t *)skb->head)) {
2980 txq_stop_maperr(q);
2981 spin_lock(&q->sendq.lock);
2982 break;
2983 } else {
2984 int last_desc, hdr_len = skb_transport_offset(skb);
2985
2986 /* The WR headers may not fit within one descriptor.
2987 * So we need to deal with wrap-around here.
2988 */
2989 before = (u64 *)pos;
2990 end = (u64 *)pos + flits;
2991 txq = &q->q;
2992 pos = (void *)inline_tx_skb_header(skb, &q->q,
2993 (void *)pos,
2994 hdr_len);
2995 if (before > (u64 *)pos) {
2996 left = (u8 *)end - (u8 *)txq->stat;
2997 end = (void *)txq->desc + left;
2998 }
2999
3000 /* If current position is already at the end of the
3001 * ofld queue, reset the current to point to
3002 * start of the queue and update the end ptr as well.
3003 */
3004 if (pos == (u64 *)txq->stat) {
3005 left = (u8 *)end - (u8 *)txq->stat;
3006 end = (void *)txq->desc + left;
3007 pos = (void *)txq->desc;
3008 }
3009
3010 cxgb4_write_sgl(skb, &q->q, (void *)pos,
3011 end, hdr_len,
3012 (dma_addr_t *)skb->head);
3013 #ifdef CONFIG_NEED_DMA_MAP_STATE
3014 skb->dev = q->adap->port[0];
3015 skb->destructor = deferred_unmap_destructor;
3016 #endif
3017 last_desc = q->q.pidx + ndesc - 1;
3018 if (last_desc >= q->q.size)
3019 last_desc -= q->q.size;
3020 q->q.sdesc[last_desc].skb = skb;
3021 }
3022
3023 txq_advance(&q->q, ndesc);
3024 written += ndesc;
3025 if (unlikely(written > 32)) {
3026 cxgb4_ring_tx_db(q->adap, &q->q, written);
3027 written = 0;
3028 }
3029
3030 /* Reacquire the Pending Send Queue Lock so we can unlink the
3031 * skb we've just successfully transferred to the TX Ring and
3032 * loop for the next skb which may be at the head of the
3033 * Pending Send Queue.
3034 */
3035 spin_lock(&q->sendq.lock);
3036 __skb_unlink(skb, &q->sendq);
3037 if (is_ofld_imm(skb))
3038 kfree_skb(skb);
3039 }
3040 if (likely(written))
3041 cxgb4_ring_tx_db(q->adap, &q->q, written);
3042
3043 /*Indicate that no thread is processing the Pending Send Queue
3044 * currently.
3045 */
3046 q->service_ofldq_running = false;
3047 }
3048
3049 /**
3050 * ofld_xmit - send a packet through an offload queue
3051 * @q: the Tx offload queue
3052 * @skb: the packet
3053 *
3054 * Send an offload packet through an SGE offload queue.
3055 */
ofld_xmit(struct sge_uld_txq * q,struct sk_buff * skb)3056 static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
3057 {
3058 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
3059 spin_lock(&q->sendq.lock);
3060
3061 /* Queue the new skb onto the Offload Queue's Pending Send Queue. If
3062 * that results in this new skb being the only one on the queue, start
3063 * servicing it. If there are other skbs already on the list, then
3064 * either the queue is currently being processed or it's been stopped
3065 * for some reason and it'll be restarted at a later time. Restart
3066 * paths are triggered by events like experiencing a DMA Mapping Error
3067 * or filling the Hardware TX Ring.
3068 */
3069 __skb_queue_tail(&q->sendq, skb);
3070 if (q->sendq.qlen == 1)
3071 service_ofldq(q);
3072
3073 spin_unlock(&q->sendq.lock);
3074 return NET_XMIT_SUCCESS;
3075 }
3076
3077 /**
3078 * restart_ofldq - restart a suspended offload queue
3079 * @t: pointer to the tasklet associated with this handler
3080 *
3081 * Resumes transmission on a suspended Tx offload queue.
3082 */
restart_ofldq(struct tasklet_struct * t)3083 static void restart_ofldq(struct tasklet_struct *t)
3084 {
3085 struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk);
3086
3087 spin_lock(&q->sendq.lock);
3088 q->full = 0; /* the queue actually is completely empty now */
3089 service_ofldq(q);
3090 spin_unlock(&q->sendq.lock);
3091 }
3092
3093 /**
3094 * skb_txq - return the Tx queue an offload packet should use
3095 * @skb: the packet
3096 *
3097 * Returns the Tx queue an offload packet should use as indicated by bits
3098 * 1-15 in the packet's queue_mapping.
3099 */
skb_txq(const struct sk_buff * skb)3100 static inline unsigned int skb_txq(const struct sk_buff *skb)
3101 {
3102 return skb->queue_mapping >> 1;
3103 }
3104
3105 /**
3106 * is_ctrl_pkt - return whether an offload packet is a control packet
3107 * @skb: the packet
3108 *
3109 * Returns whether an offload packet should use an OFLD or a CTRL
3110 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
3111 */
is_ctrl_pkt(const struct sk_buff * skb)3112 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
3113 {
3114 return skb->queue_mapping & 1;
3115 }
3116
uld_send(struct adapter * adap,struct sk_buff * skb,unsigned int tx_uld_type)3117 static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
3118 unsigned int tx_uld_type)
3119 {
3120 struct sge_uld_txq_info *txq_info;
3121 struct sge_uld_txq *txq;
3122 unsigned int idx = skb_txq(skb);
3123
3124 if (unlikely(is_ctrl_pkt(skb))) {
3125 /* Single ctrl queue is a requirement for LE workaround path */
3126 if (adap->tids.nsftids)
3127 idx = 0;
3128 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
3129 }
3130
3131 txq_info = adap->sge.uld_txq_info[tx_uld_type];
3132 if (unlikely(!txq_info)) {
3133 WARN_ON(true);
3134 kfree_skb(skb);
3135 return NET_XMIT_DROP;
3136 }
3137
3138 txq = &txq_info->uldtxq[idx];
3139 return ofld_xmit(txq, skb);
3140 }
3141
3142 /**
3143 * t4_ofld_send - send an offload packet
3144 * @adap: the adapter
3145 * @skb: the packet
3146 *
3147 * Sends an offload packet. We use the packet queue_mapping to select the
3148 * appropriate Tx queue as follows: bit 0 indicates whether the packet
3149 * should be sent as regular or control, bits 1-15 select the queue.
3150 */
t4_ofld_send(struct adapter * adap,struct sk_buff * skb)3151 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
3152 {
3153 int ret;
3154
3155 local_bh_disable();
3156 ret = uld_send(adap, skb, CXGB4_TX_OFLD);
3157 local_bh_enable();
3158 return ret;
3159 }
3160
3161 /**
3162 * cxgb4_ofld_send - send an offload packet
3163 * @dev: the net device
3164 * @skb: the packet
3165 *
3166 * Sends an offload packet. This is an exported version of @t4_ofld_send,
3167 * intended for ULDs.
3168 */
cxgb4_ofld_send(struct net_device * dev,struct sk_buff * skb)3169 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
3170 {
3171 return t4_ofld_send(netdev2adap(dev), skb);
3172 }
3173 EXPORT_SYMBOL(cxgb4_ofld_send);
3174
inline_tx_header(const void * src,const struct sge_txq * q,void * pos,int length)3175 static void *inline_tx_header(const void *src,
3176 const struct sge_txq *q,
3177 void *pos, int length)
3178 {
3179 int left = (void *)q->stat - pos;
3180 u64 *p;
3181
3182 if (likely(length <= left)) {
3183 memcpy(pos, src, length);
3184 pos += length;
3185 } else {
3186 memcpy(pos, src, left);
3187 memcpy(q->desc, src + left, length - left);
3188 pos = (void *)q->desc + (length - left);
3189 }
3190 /* 0-pad to multiple of 16 */
3191 p = PTR_ALIGN(pos, 8);
3192 if ((uintptr_t)p & 8) {
3193 *p = 0;
3194 return p + 1;
3195 }
3196 return p;
3197 }
3198
3199 /**
3200 * ofld_xmit_direct - copy a WR into offload queue
3201 * @q: the Tx offload queue
3202 * @src: location of WR
3203 * @len: WR length
3204 *
3205 * Copy an immediate WR into an uncontended SGE offload queue.
3206 */
ofld_xmit_direct(struct sge_uld_txq * q,const void * src,unsigned int len)3207 static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src,
3208 unsigned int len)
3209 {
3210 unsigned int ndesc;
3211 int credits;
3212 u64 *pos;
3213
3214 /* Use the lower limit as the cut-off */
3215 if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) {
3216 WARN_ON(1);
3217 return NET_XMIT_DROP;
3218 }
3219
3220 /* Don't return NET_XMIT_CN here as the current
3221 * implementation doesn't queue the request
3222 * using an skb when the following conditions not met
3223 */
3224 if (!spin_trylock(&q->sendq.lock))
3225 return NET_XMIT_DROP;
3226
3227 if (q->full || !skb_queue_empty(&q->sendq) ||
3228 q->service_ofldq_running) {
3229 spin_unlock(&q->sendq.lock);
3230 return NET_XMIT_DROP;
3231 }
3232 ndesc = flits_to_desc(DIV_ROUND_UP(len, 8));
3233 credits = txq_avail(&q->q) - ndesc;
3234 pos = (u64 *)&q->q.desc[q->q.pidx];
3235
3236 /* ofldtxq_stop modifies WR header in-situ */
3237 inline_tx_header(src, &q->q, pos, len);
3238 if (unlikely(credits < TXQ_STOP_THRES))
3239 ofldtxq_stop(q, (struct fw_wr_hdr *)pos);
3240 txq_advance(&q->q, ndesc);
3241 cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
3242
3243 spin_unlock(&q->sendq.lock);
3244 return NET_XMIT_SUCCESS;
3245 }
3246
cxgb4_immdata_send(struct net_device * dev,unsigned int idx,const void * src,unsigned int len)3247 int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
3248 const void *src, unsigned int len)
3249 {
3250 struct sge_uld_txq_info *txq_info;
3251 struct sge_uld_txq *txq;
3252 struct adapter *adap;
3253 int ret;
3254
3255 adap = netdev2adap(dev);
3256
3257 local_bh_disable();
3258 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
3259 if (unlikely(!txq_info)) {
3260 WARN_ON(true);
3261 local_bh_enable();
3262 return NET_XMIT_DROP;
3263 }
3264 txq = &txq_info->uldtxq[idx];
3265
3266 ret = ofld_xmit_direct(txq, src, len);
3267 local_bh_enable();
3268 return net_xmit_eval(ret);
3269 }
3270 EXPORT_SYMBOL(cxgb4_immdata_send);
3271
3272 /**
3273 * t4_crypto_send - send crypto packet
3274 * @adap: the adapter
3275 * @skb: the packet
3276 *
3277 * Sends crypto packet. We use the packet queue_mapping to select the
3278 * appropriate Tx queue as follows: bit 0 indicates whether the packet
3279 * should be sent as regular or control, bits 1-15 select the queue.
3280 */
t4_crypto_send(struct adapter * adap,struct sk_buff * skb)3281 static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
3282 {
3283 int ret;
3284
3285 local_bh_disable();
3286 ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
3287 local_bh_enable();
3288 return ret;
3289 }
3290
3291 /**
3292 * cxgb4_crypto_send - send crypto packet
3293 * @dev: the net device
3294 * @skb: the packet
3295 *
3296 * Sends crypto packet. This is an exported version of @t4_crypto_send,
3297 * intended for ULDs.
3298 */
cxgb4_crypto_send(struct net_device * dev,struct sk_buff * skb)3299 int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb)
3300 {
3301 return t4_crypto_send(netdev2adap(dev), skb);
3302 }
3303 EXPORT_SYMBOL(cxgb4_crypto_send);
3304
copy_frags(struct sk_buff * skb,const struct pkt_gl * gl,unsigned int offset)3305 static inline void copy_frags(struct sk_buff *skb,
3306 const struct pkt_gl *gl, unsigned int offset)
3307 {
3308 int i;
3309
3310 /* usually there's just one frag */
3311 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
3312 gl->frags[0].offset + offset,
3313 gl->frags[0].size - offset);
3314 skb_shinfo(skb)->nr_frags = gl->nfrags;
3315 for (i = 1; i < gl->nfrags; i++)
3316 __skb_fill_page_desc(skb, i, gl->frags[i].page,
3317 gl->frags[i].offset,
3318 gl->frags[i].size);
3319
3320 /* get a reference to the last page, we don't own it */
3321 get_page(gl->frags[gl->nfrags - 1].page);
3322 }
3323
3324 /**
3325 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
3326 * @gl: the gather list
3327 * @skb_len: size of sk_buff main body if it carries fragments
3328 * @pull_len: amount of data to move to the sk_buff's main body
3329 *
3330 * Builds an sk_buff from the given packet gather list. Returns the
3331 * sk_buff or %NULL if sk_buff allocation failed.
3332 */
cxgb4_pktgl_to_skb(const struct pkt_gl * gl,unsigned int skb_len,unsigned int pull_len)3333 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
3334 unsigned int skb_len, unsigned int pull_len)
3335 {
3336 struct sk_buff *skb;
3337
3338 /*
3339 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
3340 * size, which is expected since buffers are at least PAGE_SIZEd.
3341 * In this case packets up to RX_COPY_THRES have only one fragment.
3342 */
3343 if (gl->tot_len <= RX_COPY_THRES) {
3344 skb = dev_alloc_skb(gl->tot_len);
3345 if (unlikely(!skb))
3346 goto out;
3347 __skb_put(skb, gl->tot_len);
3348 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
3349 } else {
3350 skb = dev_alloc_skb(skb_len);
3351 if (unlikely(!skb))
3352 goto out;
3353 __skb_put(skb, pull_len);
3354 skb_copy_to_linear_data(skb, gl->va, pull_len);
3355
3356 copy_frags(skb, gl, pull_len);
3357 skb->len = gl->tot_len;
3358 skb->data_len = skb->len - pull_len;
3359 skb->truesize += skb->data_len;
3360 }
3361 out: return skb;
3362 }
3363 EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
3364
3365 /**
3366 * t4_pktgl_free - free a packet gather list
3367 * @gl: the gather list
3368 *
3369 * Releases the pages of a packet gather list. We do not own the last
3370 * page on the list and do not free it.
3371 */
t4_pktgl_free(const struct pkt_gl * gl)3372 static void t4_pktgl_free(const struct pkt_gl *gl)
3373 {
3374 int n;
3375 const struct page_frag *p;
3376
3377 for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
3378 put_page(p->page);
3379 }
3380
3381 /*
3382 * Process an MPS trace packet. Give it an unused protocol number so it won't
3383 * be delivered to anyone and send it to the stack for capture.
3384 */
handle_trace_pkt(struct adapter * adap,const struct pkt_gl * gl)3385 static noinline int handle_trace_pkt(struct adapter *adap,
3386 const struct pkt_gl *gl)
3387 {
3388 struct sk_buff *skb;
3389
3390 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
3391 if (unlikely(!skb)) {
3392 t4_pktgl_free(gl);
3393 return 0;
3394 }
3395
3396 if (is_t4(adap->params.chip))
3397 __skb_pull(skb, sizeof(struct cpl_trace_pkt));
3398 else
3399 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
3400
3401 skb_reset_mac_header(skb);
3402 skb->protocol = htons(0xffff);
3403 skb->dev = adap->port[0];
3404 netif_receive_skb(skb);
3405 return 0;
3406 }
3407
3408 /**
3409 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
3410 * @adap: the adapter
3411 * @hwtstamps: time stamp structure to update
3412 * @sgetstamp: 60bit iqe timestamp
3413 *
3414 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
3415 * which is in Core Clock ticks into ktime_t and assign it
3416 **/
cxgb4_sgetim_to_hwtstamp(struct adapter * adap,struct skb_shared_hwtstamps * hwtstamps,u64 sgetstamp)3417 static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
3418 struct skb_shared_hwtstamps *hwtstamps,
3419 u64 sgetstamp)
3420 {
3421 u64 ns;
3422 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
3423
3424 ns = div_u64(tmp, adap->params.vpd.cclk);
3425
3426 memset(hwtstamps, 0, sizeof(*hwtstamps));
3427 hwtstamps->hwtstamp = ns_to_ktime(ns);
3428 }
3429
do_gro(struct sge_eth_rxq * rxq,const struct pkt_gl * gl,const struct cpl_rx_pkt * pkt,unsigned long tnl_hdr_len)3430 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
3431 const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len)
3432 {
3433 struct adapter *adapter = rxq->rspq.adap;
3434 struct sge *s = &adapter->sge;
3435 struct port_info *pi;
3436 int ret;
3437 struct sk_buff *skb;
3438
3439 skb = napi_get_frags(&rxq->rspq.napi);
3440 if (unlikely(!skb)) {
3441 t4_pktgl_free(gl);
3442 rxq->stats.rx_drops++;
3443 return;
3444 }
3445
3446 copy_frags(skb, gl, s->pktshift);
3447 if (tnl_hdr_len)
3448 skb->csum_level = 1;
3449 skb->len = gl->tot_len - s->pktshift;
3450 skb->data_len = skb->len;
3451 skb->truesize += skb->data_len;
3452 skb->ip_summed = CHECKSUM_UNNECESSARY;
3453 skb_record_rx_queue(skb, rxq->rspq.idx);
3454 pi = netdev_priv(skb->dev);
3455 if (pi->rxtstamp)
3456 cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
3457 gl->sgetstamp);
3458 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
3459 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
3460 PKT_HASH_TYPE_L3);
3461
3462 if (unlikely(pkt->vlan_ex)) {
3463 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
3464 rxq->stats.vlan_ex++;
3465 }
3466 ret = napi_gro_frags(&rxq->rspq.napi);
3467 if (ret == GRO_HELD)
3468 rxq->stats.lro_pkts++;
3469 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
3470 rxq->stats.lro_merged++;
3471 rxq->stats.pkts++;
3472 rxq->stats.rx_cso++;
3473 }
3474
3475 enum {
3476 RX_NON_PTP_PKT = 0,
3477 RX_PTP_PKT_SUC = 1,
3478 RX_PTP_PKT_ERR = 2
3479 };
3480
3481 /**
3482 * t4_systim_to_hwstamp - read hardware time stamp
3483 * @adapter: the adapter
3484 * @skb: the packet
3485 *
3486 * Read Time Stamp from MPS packet and insert in skb which
3487 * is forwarded to PTP application
3488 */
t4_systim_to_hwstamp(struct adapter * adapter,struct sk_buff * skb)3489 static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
3490 struct sk_buff *skb)
3491 {
3492 struct skb_shared_hwtstamps *hwtstamps;
3493 struct cpl_rx_mps_pkt *cpl = NULL;
3494 unsigned char *data;
3495 int offset;
3496
3497 cpl = (struct cpl_rx_mps_pkt *)skb->data;
3498 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) &
3499 X_CPL_RX_MPS_PKT_TYPE_PTP))
3500 return RX_PTP_PKT_ERR;
3501
3502 data = skb->data + sizeof(*cpl);
3503 skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt));
3504 offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN;
3505 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short))
3506 return RX_PTP_PKT_ERR;
3507
3508 hwtstamps = skb_hwtstamps(skb);
3509 memset(hwtstamps, 0, sizeof(*hwtstamps));
3510 hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data));
3511
3512 return RX_PTP_PKT_SUC;
3513 }
3514
3515 /**
3516 * t4_rx_hststamp - Recv PTP Event Message
3517 * @adapter: the adapter
3518 * @rsp: the response queue descriptor holding the RX_PKT message
3519 * @rxq: the response queue holding the RX_PKT message
3520 * @skb: the packet
3521 *
3522 * PTP enabled and MPS packet, read HW timestamp
3523 */
t4_rx_hststamp(struct adapter * adapter,const __be64 * rsp,struct sge_eth_rxq * rxq,struct sk_buff * skb)3524 static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
3525 struct sge_eth_rxq *rxq, struct sk_buff *skb)
3526 {
3527 int ret;
3528
3529 if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) &&
3530 !is_t4(adapter->params.chip))) {
3531 ret = t4_systim_to_hwstamp(adapter, skb);
3532 if (ret == RX_PTP_PKT_ERR) {
3533 kfree_skb(skb);
3534 rxq->stats.rx_drops++;
3535 }
3536 return ret;
3537 }
3538 return RX_NON_PTP_PKT;
3539 }
3540
3541 /**
3542 * t4_tx_hststamp - Loopback PTP Transmit Event Message
3543 * @adapter: the adapter
3544 * @skb: the packet
3545 * @dev: the ingress net device
3546 *
3547 * Read hardware timestamp for the loopback PTP Tx event message
3548 */
t4_tx_hststamp(struct adapter * adapter,struct sk_buff * skb,struct net_device * dev)3549 static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
3550 struct net_device *dev)
3551 {
3552 struct port_info *pi = netdev_priv(dev);
3553
3554 if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) {
3555 cxgb4_ptp_read_hwstamp(adapter, pi);
3556 kfree_skb(skb);
3557 return 0;
3558 }
3559 return 1;
3560 }
3561
3562 /**
3563 * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages
3564 * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue
3565 * @rsp: Response Entry pointer into Response Queue
3566 * @gl: Gather List pointer
3567 *
3568 * For adapters which support the SGE Doorbell Queue Timer facility,
3569 * we configure the Ethernet TX Queues to send CIDX Updates to the
3570 * Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE
3571 * messages. This adds a small load to PCIe Link RX bandwidth and,
3572 * potentially, higher CPU Interrupt load, but allows us to respond
3573 * much more quickly to the CIDX Updates. This is important for
3574 * Upper Layer Software which isn't willing to have a large amount
3575 * of TX Data outstanding before receiving DMA Completions.
3576 */
t4_tx_completion_handler(struct sge_rspq * rspq,const __be64 * rsp,const struct pkt_gl * gl)3577 static void t4_tx_completion_handler(struct sge_rspq *rspq,
3578 const __be64 *rsp,
3579 const struct pkt_gl *gl)
3580 {
3581 u8 opcode = ((const struct rss_header *)rsp)->opcode;
3582 struct port_info *pi = netdev_priv(rspq->netdev);
3583 struct adapter *adapter = rspq->adap;
3584 struct sge *s = &adapter->sge;
3585 struct sge_eth_txq *txq;
3586
3587 /* skip RSS header */
3588 rsp++;
3589
3590 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
3591 */
3592 if (unlikely(opcode == CPL_FW4_MSG &&
3593 ((const struct cpl_fw4_msg *)rsp)->type ==
3594 FW_TYPE_RSSCPL)) {
3595 rsp++;
3596 opcode = ((const struct rss_header *)rsp)->opcode;
3597 rsp++;
3598 }
3599
3600 if (unlikely(opcode != CPL_SGE_EGR_UPDATE)) {
3601 pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n",
3602 __func__, opcode);
3603 return;
3604 }
3605
3606 txq = &s->ethtxq[pi->first_qset + rspq->idx];
3607
3608 /* We've got the Hardware Consumer Index Update in the Egress Update
3609 * message. These Egress Update messages will be our sole CIDX Updates
3610 * we get since we don't want to chew up PCIe bandwidth for both Ingress
3611 * Messages and Status Page writes. However, The code which manages
3612 * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
3613 * stored in the Status Page at the end of the TX Queue. It's easiest
3614 * to simply copy the CIDX Update value from the Egress Update message
3615 * to the Status Page. Also note that no Endian issues need to be
3616 * considered here since both are Big Endian and we're just copying
3617 * bytes consistently ...
3618 */
3619 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
3620 struct cpl_sge_egr_update *egr;
3621
3622 egr = (struct cpl_sge_egr_update *)rsp;
3623 WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
3624 }
3625
3626 t4_sge_eth_txq_egress_update(adapter, txq, -1);
3627 }
3628
cxgb4_validate_lb_pkt(struct port_info * pi,const struct pkt_gl * si)3629 static int cxgb4_validate_lb_pkt(struct port_info *pi, const struct pkt_gl *si)
3630 {
3631 struct adapter *adap = pi->adapter;
3632 struct cxgb4_ethtool_lb_test *lb;
3633 struct sge *s = &adap->sge;
3634 struct net_device *netdev;
3635 u8 *data;
3636 int i;
3637
3638 netdev = adap->port[pi->port_id];
3639 lb = &pi->ethtool_lb;
3640 data = si->va + s->pktshift;
3641
3642 i = ETH_ALEN;
3643 if (!ether_addr_equal(data + i, netdev->dev_addr))
3644 return -1;
3645
3646 i += ETH_ALEN;
3647 if (strcmp(&data[i], CXGB4_SELFTEST_LB_STR))
3648 lb->result = -EIO;
3649
3650 complete(&lb->completion);
3651 return 0;
3652 }
3653
3654 /**
3655 * t4_ethrx_handler - process an ingress ethernet packet
3656 * @q: the response queue that received the packet
3657 * @rsp: the response queue descriptor holding the RX_PKT message
3658 * @si: the gather list of packet fragments
3659 *
3660 * Process an ingress ethernet packet and deliver it to the stack.
3661 */
t4_ethrx_handler(struct sge_rspq * q,const __be64 * rsp,const struct pkt_gl * si)3662 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
3663 const struct pkt_gl *si)
3664 {
3665 bool csum_ok;
3666 struct sk_buff *skb;
3667 const struct cpl_rx_pkt *pkt;
3668 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
3669 struct adapter *adapter = q->adap;
3670 struct sge *s = &q->adap->sge;
3671 int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
3672 CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
3673 u16 err_vec, tnl_hdr_len = 0;
3674 struct port_info *pi;
3675 int ret = 0;
3676
3677 pi = netdev_priv(q->netdev);
3678 /* If we're looking at TX Queue CIDX Update, handle that separately
3679 * and return.
3680 */
3681 if (unlikely((*(u8 *)rsp == CPL_FW4_MSG) ||
3682 (*(u8 *)rsp == CPL_SGE_EGR_UPDATE))) {
3683 t4_tx_completion_handler(q, rsp, si);
3684 return 0;
3685 }
3686
3687 if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
3688 return handle_trace_pkt(q->adap, si);
3689
3690 pkt = (const struct cpl_rx_pkt *)rsp;
3691 /* Compressed error vector is enabled for T6 only */
3692 if (q->adap->params.tp.rx_pkt_encap) {
3693 err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
3694 tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec));
3695 } else {
3696 err_vec = be16_to_cpu(pkt->err_vec);
3697 }
3698
3699 csum_ok = pkt->csum_calc && !err_vec &&
3700 (q->netdev->features & NETIF_F_RXCSUM);
3701
3702 if (err_vec)
3703 rxq->stats.bad_rx_pkts++;
3704
3705 if (unlikely(pi->ethtool_lb.loopback && pkt->iff >= NCHAN)) {
3706 ret = cxgb4_validate_lb_pkt(pi, si);
3707 if (!ret)
3708 return 0;
3709 }
3710
3711 if (((pkt->l2info & htonl(RXF_TCP_F)) ||
3712 tnl_hdr_len) &&
3713 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
3714 do_gro(rxq, si, pkt, tnl_hdr_len);
3715 return 0;
3716 }
3717
3718 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
3719 if (unlikely(!skb)) {
3720 t4_pktgl_free(si);
3721 rxq->stats.rx_drops++;
3722 return 0;
3723 }
3724
3725 /* Handle PTP Event Rx packet */
3726 if (unlikely(pi->ptp_enable)) {
3727 ret = t4_rx_hststamp(adapter, rsp, rxq, skb);
3728 if (ret == RX_PTP_PKT_ERR)
3729 return 0;
3730 }
3731 if (likely(!ret))
3732 __skb_pull(skb, s->pktshift); /* remove ethernet header pad */
3733
3734 /* Handle the PTP Event Tx Loopback packet */
3735 if (unlikely(pi->ptp_enable && !ret &&
3736 (pkt->l2info & htonl(RXF_UDP_F)) &&
3737 cxgb4_ptp_is_ptp_rx(skb))) {
3738 if (!t4_tx_hststamp(adapter, skb, q->netdev))
3739 return 0;
3740 }
3741
3742 skb->protocol = eth_type_trans(skb, q->netdev);
3743 skb_record_rx_queue(skb, q->idx);
3744 if (skb->dev->features & NETIF_F_RXHASH)
3745 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
3746 PKT_HASH_TYPE_L3);
3747
3748 rxq->stats.pkts++;
3749
3750 if (pi->rxtstamp)
3751 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
3752 si->sgetstamp);
3753 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
3754 if (!pkt->ip_frag) {
3755 skb->ip_summed = CHECKSUM_UNNECESSARY;
3756 rxq->stats.rx_cso++;
3757 } else if (pkt->l2info & htonl(RXF_IP_F)) {
3758 __sum16 c = (__force __sum16)pkt->csum;
3759 skb->csum = csum_unfold(c);
3760
3761 if (tnl_hdr_len) {
3762 skb->ip_summed = CHECKSUM_UNNECESSARY;
3763 skb->csum_level = 1;
3764 } else {
3765 skb->ip_summed = CHECKSUM_COMPLETE;
3766 }
3767 rxq->stats.rx_cso++;
3768 }
3769 } else {
3770 skb_checksum_none_assert(skb);
3771 #ifdef CONFIG_CHELSIO_T4_FCOE
3772 #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
3773 RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
3774
3775 if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
3776 if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
3777 (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
3778 if (q->adap->params.tp.rx_pkt_encap)
3779 csum_ok = err_vec &
3780 T6_COMPR_RXERR_SUM_F;
3781 else
3782 csum_ok = err_vec & RXERR_CSUM_F;
3783 if (!csum_ok)
3784 skb->ip_summed = CHECKSUM_UNNECESSARY;
3785 }
3786 }
3787
3788 #undef CPL_RX_PKT_FLAGS
3789 #endif /* CONFIG_CHELSIO_T4_FCOE */
3790 }
3791
3792 if (unlikely(pkt->vlan_ex)) {
3793 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
3794 rxq->stats.vlan_ex++;
3795 }
3796 skb_mark_napi_id(skb, &q->napi);
3797 netif_receive_skb(skb);
3798 return 0;
3799 }
3800
3801 /**
3802 * restore_rx_bufs - put back a packet's Rx buffers
3803 * @si: the packet gather list
3804 * @q: the SGE free list
3805 * @frags: number of FL buffers to restore
3806 *
3807 * Puts back on an FL the Rx buffers associated with @si. The buffers
3808 * have already been unmapped and are left unmapped, we mark them so to
3809 * prevent further unmapping attempts.
3810 *
3811 * This function undoes a series of @unmap_rx_buf calls when we find out
3812 * that the current packet can't be processed right away afterall and we
3813 * need to come back to it later. This is a very rare event and there's
3814 * no effort to make this particularly efficient.
3815 */
restore_rx_bufs(const struct pkt_gl * si,struct sge_fl * q,int frags)3816 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
3817 int frags)
3818 {
3819 struct rx_sw_desc *d;
3820
3821 while (frags--) {
3822 if (q->cidx == 0)
3823 q->cidx = q->size - 1;
3824 else
3825 q->cidx--;
3826 d = &q->sdesc[q->cidx];
3827 d->page = si->frags[frags].page;
3828 d->dma_addr |= RX_UNMAPPED_BUF;
3829 q->avail++;
3830 }
3831 }
3832
3833 /**
3834 * is_new_response - check if a response is newly written
3835 * @r: the response descriptor
3836 * @q: the response queue
3837 *
3838 * Returns true if a response descriptor contains a yet unprocessed
3839 * response.
3840 */
is_new_response(const struct rsp_ctrl * r,const struct sge_rspq * q)3841 static inline bool is_new_response(const struct rsp_ctrl *r,
3842 const struct sge_rspq *q)
3843 {
3844 return (r->type_gen >> RSPD_GEN_S) == q->gen;
3845 }
3846
3847 /**
3848 * rspq_next - advance to the next entry in a response queue
3849 * @q: the queue
3850 *
3851 * Updates the state of a response queue to advance it to the next entry.
3852 */
rspq_next(struct sge_rspq * q)3853 static inline void rspq_next(struct sge_rspq *q)
3854 {
3855 q->cur_desc = (void *)q->cur_desc + q->iqe_len;
3856 if (unlikely(++q->cidx == q->size)) {
3857 q->cidx = 0;
3858 q->gen ^= 1;
3859 q->cur_desc = q->desc;
3860 }
3861 }
3862
3863 /**
3864 * process_responses - process responses from an SGE response queue
3865 * @q: the ingress queue to process
3866 * @budget: how many responses can be processed in this round
3867 *
3868 * Process responses from an SGE response queue up to the supplied budget.
3869 * Responses include received packets as well as control messages from FW
3870 * or HW.
3871 *
3872 * Additionally choose the interrupt holdoff time for the next interrupt
3873 * on this queue. If the system is under memory shortage use a fairly
3874 * long delay to help recovery.
3875 */
process_responses(struct sge_rspq * q,int budget)3876 static int process_responses(struct sge_rspq *q, int budget)
3877 {
3878 int ret, rsp_type;
3879 int budget_left = budget;
3880 const struct rsp_ctrl *rc;
3881 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
3882 struct adapter *adapter = q->adap;
3883 struct sge *s = &adapter->sge;
3884
3885 while (likely(budget_left)) {
3886 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
3887 if (!is_new_response(rc, q)) {
3888 if (q->flush_handler)
3889 q->flush_handler(q);
3890 break;
3891 }
3892
3893 dma_rmb();
3894 rsp_type = RSPD_TYPE_G(rc->type_gen);
3895 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
3896 struct page_frag *fp;
3897 struct pkt_gl si;
3898 const struct rx_sw_desc *rsd;
3899 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
3900
3901 if (len & RSPD_NEWBUF_F) {
3902 if (likely(q->offset > 0)) {
3903 free_rx_bufs(q->adap, &rxq->fl, 1);
3904 q->offset = 0;
3905 }
3906 len = RSPD_LEN_G(len);
3907 }
3908 si.tot_len = len;
3909
3910 /* gather packet fragments */
3911 for (frags = 0, fp = si.frags; ; frags++, fp++) {
3912 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
3913 bufsz = get_buf_size(adapter, rsd);
3914 fp->page = rsd->page;
3915 fp->offset = q->offset;
3916 fp->size = min(bufsz, len);
3917 len -= fp->size;
3918 if (!len)
3919 break;
3920 unmap_rx_buf(q->adap, &rxq->fl);
3921 }
3922
3923 si.sgetstamp = SGE_TIMESTAMP_G(
3924 be64_to_cpu(rc->last_flit));
3925 /*
3926 * Last buffer remains mapped so explicitly make it
3927 * coherent for CPU access.
3928 */
3929 dma_sync_single_for_cpu(q->adap->pdev_dev,
3930 get_buf_addr(rsd),
3931 fp->size, DMA_FROM_DEVICE);
3932
3933 si.va = page_address(si.frags[0].page) +
3934 si.frags[0].offset;
3935 prefetch(si.va);
3936
3937 si.nfrags = frags + 1;
3938 ret = q->handler(q, q->cur_desc, &si);
3939 if (likely(ret == 0))
3940 q->offset += ALIGN(fp->size, s->fl_align);
3941 else
3942 restore_rx_bufs(&si, &rxq->fl, frags);
3943 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
3944 ret = q->handler(q, q->cur_desc, NULL);
3945 } else {
3946 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
3947 }
3948
3949 if (unlikely(ret)) {
3950 /* couldn't process descriptor, back off for recovery */
3951 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
3952 break;
3953 }
3954
3955 rspq_next(q);
3956 budget_left--;
3957 }
3958
3959 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
3960 __refill_fl(q->adap, &rxq->fl);
3961 return budget - budget_left;
3962 }
3963
3964 /**
3965 * napi_rx_handler - the NAPI handler for Rx processing
3966 * @napi: the napi instance
3967 * @budget: how many packets we can process in this round
3968 *
3969 * Handler for new data events when using NAPI. This does not need any
3970 * locking or protection from interrupts as data interrupts are off at
3971 * this point and other adapter interrupts do not interfere (the latter
3972 * in not a concern at all with MSI-X as non-data interrupts then have
3973 * a separate handler).
3974 */
napi_rx_handler(struct napi_struct * napi,int budget)3975 static int napi_rx_handler(struct napi_struct *napi, int budget)
3976 {
3977 unsigned int params;
3978 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
3979 int work_done;
3980 u32 val;
3981
3982 work_done = process_responses(q, budget);
3983 if (likely(work_done < budget)) {
3984 int timer_index;
3985
3986 napi_complete_done(napi, work_done);
3987 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
3988
3989 if (q->adaptive_rx) {
3990 if (work_done > max(timer_pkt_quota[timer_index],
3991 MIN_NAPI_WORK))
3992 timer_index = (timer_index + 1);
3993 else
3994 timer_index = timer_index - 1;
3995
3996 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
3997 q->next_intr_params =
3998 QINTR_TIMER_IDX_V(timer_index) |
3999 QINTR_CNT_EN_V(0);
4000 params = q->next_intr_params;
4001 } else {
4002 params = q->next_intr_params;
4003 q->next_intr_params = q->intr_params;
4004 }
4005 } else
4006 params = QINTR_TIMER_IDX_V(7);
4007
4008 val = CIDXINC_V(work_done) | SEINTARM_V(params);
4009
4010 /* If we don't have access to the new User GTS (T5+), use the old
4011 * doorbell mechanism; otherwise use the new BAR2 mechanism.
4012 */
4013 if (unlikely(q->bar2_addr == NULL)) {
4014 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
4015 val | INGRESSQID_V((u32)q->cntxt_id));
4016 } else {
4017 writel(val | INGRESSQID_V(q->bar2_qid),
4018 q->bar2_addr + SGE_UDB_GTS);
4019 wmb();
4020 }
4021 return work_done;
4022 }
4023
cxgb4_ethofld_restart(struct tasklet_struct * t)4024 void cxgb4_ethofld_restart(struct tasklet_struct *t)
4025 {
4026 struct sge_eosw_txq *eosw_txq = from_tasklet(eosw_txq, t,
4027 qresume_tsk);
4028 int pktcount;
4029
4030 spin_lock(&eosw_txq->lock);
4031 pktcount = eosw_txq->cidx - eosw_txq->last_cidx;
4032 if (pktcount < 0)
4033 pktcount += eosw_txq->ndesc;
4034
4035 if (pktcount) {
4036 cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev),
4037 eosw_txq, pktcount);
4038 eosw_txq->inuse -= pktcount;
4039 }
4040
4041 /* There may be some packets waiting for completions. So,
4042 * attempt to send these packets now.
4043 */
4044 ethofld_xmit(eosw_txq->netdev, eosw_txq);
4045 spin_unlock(&eosw_txq->lock);
4046 }
4047
4048 /* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions
4049 * @q: the response queue that received the packet
4050 * @rsp: the response queue descriptor holding the CPL message
4051 * @si: the gather list of packet fragments
4052 *
4053 * Process a ETHOFLD Tx completion. Increment the cidx here, but
4054 * free up the descriptors in a tasklet later.
4055 */
cxgb4_ethofld_rx_handler(struct sge_rspq * q,const __be64 * rsp,const struct pkt_gl * si)4056 int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
4057 const struct pkt_gl *si)
4058 {
4059 u8 opcode = ((const struct rss_header *)rsp)->opcode;
4060
4061 /* skip RSS header */
4062 rsp++;
4063
4064 if (opcode == CPL_FW4_ACK) {
4065 const struct cpl_fw4_ack *cpl;
4066 struct sge_eosw_txq *eosw_txq;
4067 struct eotid_entry *entry;
4068 struct sk_buff *skb;
4069 u32 hdr_len, eotid;
4070 u8 flits, wrlen16;
4071 int credits;
4072
4073 cpl = (const struct cpl_fw4_ack *)rsp;
4074 eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) -
4075 q->adap->tids.eotid_base;
4076 entry = cxgb4_lookup_eotid(&q->adap->tids, eotid);
4077 if (!entry)
4078 goto out_done;
4079
4080 eosw_txq = (struct sge_eosw_txq *)entry->data;
4081 if (!eosw_txq)
4082 goto out_done;
4083
4084 spin_lock(&eosw_txq->lock);
4085 credits = cpl->credits;
4086 while (credits > 0) {
4087 skb = eosw_txq->desc[eosw_txq->cidx].skb;
4088 if (!skb)
4089 break;
4090
4091 if (unlikely((eosw_txq->state ==
4092 CXGB4_EO_STATE_FLOWC_OPEN_REPLY ||
4093 eosw_txq->state ==
4094 CXGB4_EO_STATE_FLOWC_CLOSE_REPLY) &&
4095 eosw_txq->cidx == eosw_txq->flowc_idx)) {
4096 flits = DIV_ROUND_UP(skb->len, 8);
4097 if (eosw_txq->state ==
4098 CXGB4_EO_STATE_FLOWC_OPEN_REPLY)
4099 eosw_txq->state = CXGB4_EO_STATE_ACTIVE;
4100 else
4101 eosw_txq->state = CXGB4_EO_STATE_CLOSED;
4102 complete(&eosw_txq->completion);
4103 } else {
4104 hdr_len = eth_get_headlen(eosw_txq->netdev,
4105 skb->data,
4106 skb_headlen(skb));
4107 flits = ethofld_calc_tx_flits(q->adap, skb,
4108 hdr_len);
4109 }
4110 eosw_txq_advance_index(&eosw_txq->cidx, 1,
4111 eosw_txq->ndesc);
4112 wrlen16 = DIV_ROUND_UP(flits * 8, 16);
4113 credits -= wrlen16;
4114 }
4115
4116 eosw_txq->cred += cpl->credits;
4117 eosw_txq->ncompl--;
4118
4119 spin_unlock(&eosw_txq->lock);
4120
4121 /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx,
4122 * if there were packets waiting for completion.
4123 */
4124 tasklet_schedule(&eosw_txq->qresume_tsk);
4125 }
4126
4127 out_done:
4128 return 0;
4129 }
4130
4131 /*
4132 * The MSI-X interrupt handler for an SGE response queue.
4133 */
t4_sge_intr_msix(int irq,void * cookie)4134 irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
4135 {
4136 struct sge_rspq *q = cookie;
4137
4138 napi_schedule(&q->napi);
4139 return IRQ_HANDLED;
4140 }
4141
4142 /*
4143 * Process the indirect interrupt entries in the interrupt queue and kick off
4144 * NAPI for each queue that has generated an entry.
4145 */
process_intrq(struct adapter * adap)4146 static unsigned int process_intrq(struct adapter *adap)
4147 {
4148 unsigned int credits;
4149 const struct rsp_ctrl *rc;
4150 struct sge_rspq *q = &adap->sge.intrq;
4151 u32 val;
4152
4153 spin_lock(&adap->sge.intrq_lock);
4154 for (credits = 0; ; credits++) {
4155 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
4156 if (!is_new_response(rc, q))
4157 break;
4158
4159 dma_rmb();
4160 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
4161 unsigned int qid = ntohl(rc->pldbuflen_qid);
4162
4163 qid -= adap->sge.ingr_start;
4164 napi_schedule(&adap->sge.ingr_map[qid]->napi);
4165 }
4166
4167 rspq_next(q);
4168 }
4169
4170 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
4171
4172 /* If we don't have access to the new User GTS (T5+), use the old
4173 * doorbell mechanism; otherwise use the new BAR2 mechanism.
4174 */
4175 if (unlikely(q->bar2_addr == NULL)) {
4176 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
4177 val | INGRESSQID_V(q->cntxt_id));
4178 } else {
4179 writel(val | INGRESSQID_V(q->bar2_qid),
4180 q->bar2_addr + SGE_UDB_GTS);
4181 wmb();
4182 }
4183 spin_unlock(&adap->sge.intrq_lock);
4184 return credits;
4185 }
4186
4187 /*
4188 * The MSI interrupt handler, which handles data events from SGE response queues
4189 * as well as error and other async events as they all use the same MSI vector.
4190 */
t4_intr_msi(int irq,void * cookie)4191 static irqreturn_t t4_intr_msi(int irq, void *cookie)
4192 {
4193 struct adapter *adap = cookie;
4194
4195 if (adap->flags & CXGB4_MASTER_PF)
4196 t4_slow_intr_handler(adap);
4197 process_intrq(adap);
4198 return IRQ_HANDLED;
4199 }
4200
4201 /*
4202 * Interrupt handler for legacy INTx interrupts.
4203 * Handles data events from SGE response queues as well as error and other
4204 * async events as they all use the same interrupt line.
4205 */
t4_intr_intx(int irq,void * cookie)4206 static irqreturn_t t4_intr_intx(int irq, void *cookie)
4207 {
4208 struct adapter *adap = cookie;
4209
4210 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
4211 if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) |
4212 process_intrq(adap))
4213 return IRQ_HANDLED;
4214 return IRQ_NONE; /* probably shared interrupt */
4215 }
4216
4217 /**
4218 * t4_intr_handler - select the top-level interrupt handler
4219 * @adap: the adapter
4220 *
4221 * Selects the top-level interrupt handler based on the type of interrupts
4222 * (MSI-X, MSI, or INTx).
4223 */
t4_intr_handler(struct adapter * adap)4224 irq_handler_t t4_intr_handler(struct adapter *adap)
4225 {
4226 if (adap->flags & CXGB4_USING_MSIX)
4227 return t4_sge_intr_msix;
4228 if (adap->flags & CXGB4_USING_MSI)
4229 return t4_intr_msi;
4230 return t4_intr_intx;
4231 }
4232
sge_rx_timer_cb(struct timer_list * t)4233 static void sge_rx_timer_cb(struct timer_list *t)
4234 {
4235 unsigned long m;
4236 unsigned int i;
4237 struct adapter *adap = timer_container_of(adap, t, sge.rx_timer);
4238 struct sge *s = &adap->sge;
4239
4240 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
4241 for (m = s->starving_fl[i]; m; m &= m - 1) {
4242 struct sge_eth_rxq *rxq;
4243 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
4244 struct sge_fl *fl = s->egr_map[id];
4245
4246 clear_bit(id, s->starving_fl);
4247 smp_mb__after_atomic();
4248
4249 if (fl_starving(adap, fl)) {
4250 rxq = container_of(fl, struct sge_eth_rxq, fl);
4251 if (napi_schedule(&rxq->rspq.napi))
4252 fl->starving++;
4253 else
4254 set_bit(id, s->starving_fl);
4255 }
4256 }
4257 /* The remainder of the SGE RX Timer Callback routine is dedicated to
4258 * global Master PF activities like checking for chip ingress stalls,
4259 * etc.
4260 */
4261 if (!(adap->flags & CXGB4_MASTER_PF))
4262 goto done;
4263
4264 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
4265
4266 done:
4267 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
4268 }
4269
sge_tx_timer_cb(struct timer_list * t)4270 static void sge_tx_timer_cb(struct timer_list *t)
4271 {
4272 struct adapter *adap = timer_container_of(adap, t, sge.tx_timer);
4273 struct sge *s = &adap->sge;
4274 unsigned long m, period;
4275 unsigned int i, budget;
4276
4277 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
4278 for (m = s->txq_maperr[i]; m; m &= m - 1) {
4279 unsigned long id = __ffs(m) + i * BITS_PER_LONG;
4280 struct sge_uld_txq *txq = s->egr_map[id];
4281
4282 clear_bit(id, s->txq_maperr);
4283 tasklet_schedule(&txq->qresume_tsk);
4284 }
4285
4286 if (!is_t4(adap->params.chip)) {
4287 struct sge_eth_txq *q = &s->ptptxq;
4288 int avail;
4289
4290 spin_lock(&adap->ptp_lock);
4291 avail = reclaimable(&q->q);
4292
4293 if (avail) {
4294 free_tx_desc(adap, &q->q, avail, false);
4295 q->q.in_use -= avail;
4296 }
4297 spin_unlock(&adap->ptp_lock);
4298 }
4299
4300 budget = MAX_TIMER_TX_RECLAIM;
4301 i = s->ethtxq_rover;
4302 do {
4303 budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i],
4304 budget);
4305 if (!budget)
4306 break;
4307
4308 if (++i >= s->ethqsets)
4309 i = 0;
4310 } while (i != s->ethtxq_rover);
4311 s->ethtxq_rover = i;
4312
4313 if (budget == 0) {
4314 /* If we found too many reclaimable packets schedule a timer
4315 * in the near future to continue where we left off.
4316 */
4317 period = 2;
4318 } else {
4319 /* We reclaimed all reclaimable TX Descriptors, so reschedule
4320 * at the normal period.
4321 */
4322 period = TX_QCHECK_PERIOD;
4323 }
4324
4325 mod_timer(&s->tx_timer, jiffies + period);
4326 }
4327
4328 /**
4329 * bar2_address - return the BAR2 address for an SGE Queue's Registers
4330 * @adapter: the adapter
4331 * @qid: the SGE Queue ID
4332 * @qtype: the SGE Queue Type (Egress or Ingress)
4333 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
4334 *
4335 * Returns the BAR2 address for the SGE Queue Registers associated with
4336 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
4337 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
4338 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
4339 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
4340 */
bar2_address(struct adapter * adapter,unsigned int qid,enum t4_bar2_qtype qtype,unsigned int * pbar2_qid)4341 static void __iomem *bar2_address(struct adapter *adapter,
4342 unsigned int qid,
4343 enum t4_bar2_qtype qtype,
4344 unsigned int *pbar2_qid)
4345 {
4346 u64 bar2_qoffset;
4347 int ret;
4348
4349 ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
4350 &bar2_qoffset, pbar2_qid);
4351 if (ret)
4352 return NULL;
4353
4354 return adapter->bar2 + bar2_qoffset;
4355 }
4356
4357 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
4358 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
4359 */
t4_sge_alloc_rxq(struct adapter * adap,struct sge_rspq * iq,bool fwevtq,struct net_device * dev,int intr_idx,struct sge_fl * fl,rspq_handler_t hnd,rspq_flush_handler_t flush_hnd,int cong)4360 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
4361 struct net_device *dev, int intr_idx,
4362 struct sge_fl *fl, rspq_handler_t hnd,
4363 rspq_flush_handler_t flush_hnd, int cong)
4364 {
4365 int ret, flsz = 0;
4366 struct fw_iq_cmd c;
4367 struct sge *s = &adap->sge;
4368 struct port_info *pi = netdev_priv(dev);
4369 int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING);
4370
4371 /* Size needs to be multiple of 16, including status entry. */
4372 iq->size = roundup(iq->size, 16);
4373
4374 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
4375 &iq->phys_addr, NULL, 0,
4376 dev_to_node(adap->pdev_dev));
4377 if (!iq->desc)
4378 return -ENOMEM;
4379
4380 memset(&c, 0, sizeof(c));
4381 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
4382 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4383 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
4384 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
4385 FW_LEN16(c));
4386 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
4387 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
4388 FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
4389 FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
4390 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
4391 -intr_idx - 1));
4392 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
4393 FW_IQ_CMD_IQGTSMODE_F |
4394 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
4395 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
4396 c.iqsize = htons(iq->size);
4397 c.iqaddr = cpu_to_be64(iq->phys_addr);
4398 if (cong >= 0)
4399 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F |
4400 FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC
4401 : FW_IQ_IQTYPE_OFLD));
4402
4403 if (fl) {
4404 unsigned int chip_ver =
4405 CHELSIO_CHIP_VERSION(adap->params.chip);
4406
4407 /* Allocate the ring for the hardware free list (with space
4408 * for its status page) along with the associated software
4409 * descriptor ring. The free list size needs to be a multiple
4410 * of the Egress Queue Unit and at least 2 Egress Units larger
4411 * than the SGE's Egress Congrestion Threshold
4412 * (fl_starve_thres - 1).
4413 */
4414 if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
4415 fl->size = s->fl_starve_thres - 1 + 2 * 8;
4416 fl->size = roundup(fl->size, 8);
4417 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
4418 sizeof(struct rx_sw_desc), &fl->addr,
4419 &fl->sdesc, s->stat_len,
4420 dev_to_node(adap->pdev_dev));
4421 if (!fl->desc)
4422 goto fl_nomem;
4423
4424 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
4425 c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
4426 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
4427 FW_IQ_CMD_FL0DATARO_V(relaxed) |
4428 FW_IQ_CMD_FL0PADEN_F);
4429 if (cong >= 0)
4430 c.iqns_to_fl0congen |=
4431 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
4432 FW_IQ_CMD_FL0CONGCIF_F |
4433 FW_IQ_CMD_FL0CONGEN_F);
4434 /* In T6, for egress queue type FL there is internal overhead
4435 * of 16B for header going into FLM module. Hence the maximum
4436 * allowed burst size is 448 bytes. For T4/T5, the hardware
4437 * doesn't coalesce fetch requests if more than 64 bytes of
4438 * Free List pointers are provided, so we use a 128-byte Fetch
4439 * Burst Minimum there (T6 implements coalescing so we can use
4440 * the smaller 64-byte value there).
4441 */
4442 c.fl0dcaen_to_fl0cidxfthresh =
4443 htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 ?
4444 FETCHBURSTMIN_128B_X :
4445 FETCHBURSTMIN_64B_T6_X) |
4446 FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
4447 FETCHBURSTMAX_512B_X :
4448 FETCHBURSTMAX_256B_X));
4449 c.fl0size = htons(flsz);
4450 c.fl0addr = cpu_to_be64(fl->addr);
4451 }
4452
4453 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4454 if (ret)
4455 goto err;
4456
4457 netif_napi_add(dev, &iq->napi, napi_rx_handler);
4458 iq->cur_desc = iq->desc;
4459 iq->cidx = 0;
4460 iq->gen = 1;
4461 iq->next_intr_params = iq->intr_params;
4462 iq->cntxt_id = ntohs(c.iqid);
4463 iq->abs_id = ntohs(c.physiqid);
4464 iq->bar2_addr = bar2_address(adap,
4465 iq->cntxt_id,
4466 T4_BAR2_QTYPE_INGRESS,
4467 &iq->bar2_qid);
4468 iq->size--; /* subtract status entry */
4469 iq->netdev = dev;
4470 iq->handler = hnd;
4471 iq->flush_handler = flush_hnd;
4472
4473 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
4474 skb_queue_head_init(&iq->lro_mgr.lroq);
4475
4476 /* set offset to -1 to distinguish ingress queues without FL */
4477 iq->offset = fl ? 0 : -1;
4478
4479 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
4480
4481 if (fl) {
4482 fl->cntxt_id = ntohs(c.fl0id);
4483 fl->avail = fl->pend_cred = 0;
4484 fl->pidx = fl->cidx = 0;
4485 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
4486 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
4487
4488 /* Note, we must initialize the BAR2 Free List User Doorbell
4489 * information before refilling the Free List!
4490 */
4491 fl->bar2_addr = bar2_address(adap,
4492 fl->cntxt_id,
4493 T4_BAR2_QTYPE_EGRESS,
4494 &fl->bar2_qid);
4495 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
4496 }
4497
4498 /* For T5 and later we attempt to set up the Congestion Manager values
4499 * of the new RX Ethernet Queue. This should really be handled by
4500 * firmware because it's more complex than any host driver wants to
4501 * get involved with and it's different per chip and this is almost
4502 * certainly wrong. Firmware would be wrong as well, but it would be
4503 * a lot easier to fix in one place ... For now we do something very
4504 * simple (and hopefully less wrong).
4505 */
4506 if (!is_t4(adap->params.chip) && cong >= 0) {
4507 u32 param, val, ch_map = 0;
4508 int i;
4509 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
4510
4511 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
4512 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
4513 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
4514 if (cong == 0) {
4515 val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
4516 } else {
4517 val =
4518 CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
4519 for (i = 0; i < 4; i++) {
4520 if (cong & (1 << i))
4521 ch_map |= 1 << (i << cng_ch_bits_log);
4522 }
4523 val |= CONMCTXT_CNGCHMAP_V(ch_map);
4524 }
4525 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
4526 ¶m, &val);
4527 if (ret)
4528 dev_warn(adap->pdev_dev, "Failed to set Congestion"
4529 " Manager Context for Ingress Queue %d: %d\n",
4530 iq->cntxt_id, -ret);
4531 }
4532
4533 return 0;
4534
4535 fl_nomem:
4536 ret = -ENOMEM;
4537 err:
4538 if (iq->desc) {
4539 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
4540 iq->desc, iq->phys_addr);
4541 iq->desc = NULL;
4542 }
4543 if (fl && fl->desc) {
4544 kfree(fl->sdesc);
4545 fl->sdesc = NULL;
4546 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
4547 fl->desc, fl->addr);
4548 fl->desc = NULL;
4549 }
4550 return ret;
4551 }
4552
init_txq(struct adapter * adap,struct sge_txq * q,unsigned int id)4553 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
4554 {
4555 q->cntxt_id = id;
4556 q->bar2_addr = bar2_address(adap,
4557 q->cntxt_id,
4558 T4_BAR2_QTYPE_EGRESS,
4559 &q->bar2_qid);
4560 q->in_use = 0;
4561 q->cidx = q->pidx = 0;
4562 q->stops = q->restarts = 0;
4563 q->stat = (void *)&q->desc[q->size];
4564 spin_lock_init(&q->db_lock);
4565 adap->sge.egr_map[id - adap->sge.egr_start] = q;
4566 }
4567
4568 /**
4569 * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue
4570 * @adap: the adapter
4571 * @txq: the SGE Ethernet TX Queue to initialize
4572 * @dev: the Linux Network Device
4573 * @netdevq: the corresponding Linux TX Queue
4574 * @iqid: the Ingress Queue to which to deliver CIDX Update messages
4575 * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers
4576 */
t4_sge_alloc_eth_txq(struct adapter * adap,struct sge_eth_txq * txq,struct net_device * dev,struct netdev_queue * netdevq,unsigned int iqid,u8 dbqt)4577 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
4578 struct net_device *dev, struct netdev_queue *netdevq,
4579 unsigned int iqid, u8 dbqt)
4580 {
4581 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4582 struct port_info *pi = netdev_priv(dev);
4583 struct sge *s = &adap->sge;
4584 struct fw_eq_eth_cmd c;
4585 int ret, nentries;
4586
4587 /* Add status entries */
4588 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
4589
4590 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
4591 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
4592 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
4593 netdev_queue_numa_node_read(netdevq));
4594 if (!txq->q.desc)
4595 return -ENOMEM;
4596
4597 memset(&c, 0, sizeof(c));
4598 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
4599 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4600 FW_EQ_ETH_CMD_PFN_V(adap->pf) |
4601 FW_EQ_ETH_CMD_VFN_V(0));
4602 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
4603 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
4604
4605 /* For TX Ethernet Queues using the SGE Doorbell Queue Timer
4606 * mechanism, we use Ingress Queue messages for Hardware Consumer
4607 * Index Updates on the TX Queue. Otherwise we have the Hardware
4608 * write the CIDX Updates into the Status Page at the end of the
4609 * TX Queue.
4610 */
4611 c.autoequiqe_to_viid = htonl(((chip_ver <= CHELSIO_T5) ?
4612 FW_EQ_ETH_CMD_AUTOEQUIQE_F :
4613 FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
4614 FW_EQ_ETH_CMD_VIID_V(pi->viid));
4615
4616 c.fetchszm_to_iqid =
4617 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V((chip_ver <= CHELSIO_T5) ?
4618 HOSTFCMODE_INGRESS_QUEUE_X :
4619 HOSTFCMODE_STATUS_PAGE_X) |
4620 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
4621 FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
4622
4623 /* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */
4624 c.dcaen_to_eqsize =
4625 htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
4626 ? FETCHBURSTMIN_64B_X
4627 : FETCHBURSTMIN_64B_T6_X) |
4628 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
4629 FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
4630 FW_EQ_ETH_CMD_CIDXFTHRESHO_V(chip_ver == CHELSIO_T5) |
4631 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
4632
4633 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
4634
4635 /* If we're using the SGE Doorbell Queue Timer mechanism, pass in the
4636 * currently configured Timer Index. THis can be changed later via an
4637 * ethtool -C tx-usecs {Timer Val} command. Note that the SGE
4638 * Doorbell Queue mode is currently automatically enabled in the
4639 * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ...
4640 */
4641 if (dbqt)
4642 c.timeren_timerix =
4643 cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F |
4644 FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix));
4645
4646 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4647 if (ret) {
4648 kfree(txq->q.sdesc);
4649 txq->q.sdesc = NULL;
4650 dma_free_coherent(adap->pdev_dev,
4651 nentries * sizeof(struct tx_desc),
4652 txq->q.desc, txq->q.phys_addr);
4653 txq->q.desc = NULL;
4654 return ret;
4655 }
4656
4657 txq->q.q_type = CXGB4_TXQ_ETH;
4658 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
4659 txq->txq = netdevq;
4660 txq->tso = 0;
4661 txq->uso = 0;
4662 txq->tx_cso = 0;
4663 txq->vlan_ins = 0;
4664 txq->mapping_err = 0;
4665 txq->dbqt = dbqt;
4666
4667 return 0;
4668 }
4669
t4_sge_alloc_ctrl_txq(struct adapter * adap,struct sge_ctrl_txq * txq,struct net_device * dev,unsigned int iqid,unsigned int cmplqid)4670 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
4671 struct net_device *dev, unsigned int iqid,
4672 unsigned int cmplqid)
4673 {
4674 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4675 struct port_info *pi = netdev_priv(dev);
4676 struct sge *s = &adap->sge;
4677 struct fw_eq_ctrl_cmd c;
4678 int ret, nentries;
4679
4680 /* Add status entries */
4681 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
4682
4683 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
4684 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
4685 NULL, 0, dev_to_node(adap->pdev_dev));
4686 if (!txq->q.desc)
4687 return -ENOMEM;
4688
4689 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
4690 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4691 FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
4692 FW_EQ_CTRL_CMD_VFN_V(0));
4693 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
4694 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
4695 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
4696 c.physeqid_pkd = htonl(0);
4697 c.fetchszm_to_iqid =
4698 htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
4699 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
4700 FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
4701 c.dcaen_to_eqsize =
4702 htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
4703 ? FETCHBURSTMIN_64B_X
4704 : FETCHBURSTMIN_64B_T6_X) |
4705 FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
4706 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
4707 FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
4708 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
4709
4710 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4711 if (ret) {
4712 dma_free_coherent(adap->pdev_dev,
4713 nentries * sizeof(struct tx_desc),
4714 txq->q.desc, txq->q.phys_addr);
4715 txq->q.desc = NULL;
4716 return ret;
4717 }
4718
4719 txq->q.q_type = CXGB4_TXQ_CTRL;
4720 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
4721 txq->adap = adap;
4722 skb_queue_head_init(&txq->sendq);
4723 tasklet_setup(&txq->qresume_tsk, restart_ctrlq);
4724 txq->full = 0;
4725 return 0;
4726 }
4727
t4_sge_mod_ctrl_txq(struct adapter * adap,unsigned int eqid,unsigned int cmplqid)4728 int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
4729 unsigned int cmplqid)
4730 {
4731 u32 param, val;
4732
4733 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
4734 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
4735 FW_PARAMS_PARAM_YZ_V(eqid));
4736 val = cmplqid;
4737 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
4738 }
4739
t4_sge_alloc_ofld_txq(struct adapter * adap,struct sge_txq * q,struct net_device * dev,u32 cmd,u32 iqid)4740 static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
4741 struct net_device *dev, u32 cmd, u32 iqid)
4742 {
4743 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4744 struct port_info *pi = netdev_priv(dev);
4745 struct sge *s = &adap->sge;
4746 struct fw_eq_ofld_cmd c;
4747 u32 fb_min, nentries;
4748 int ret;
4749
4750 /* Add status entries */
4751 nentries = q->size + s->stat_len / sizeof(struct tx_desc);
4752 q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc),
4753 sizeof(struct tx_sw_desc), &q->phys_addr,
4754 &q->sdesc, s->stat_len, NUMA_NO_NODE);
4755 if (!q->desc)
4756 return -ENOMEM;
4757
4758 if (chip_ver <= CHELSIO_T5)
4759 fb_min = FETCHBURSTMIN_64B_X;
4760 else
4761 fb_min = FETCHBURSTMIN_64B_T6_X;
4762
4763 memset(&c, 0, sizeof(c));
4764 c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
4765 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4766 FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
4767 FW_EQ_OFLD_CMD_VFN_V(0));
4768 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
4769 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
4770 c.fetchszm_to_iqid =
4771 htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
4772 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
4773 FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
4774 c.dcaen_to_eqsize =
4775 htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min) |
4776 FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
4777 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
4778 FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
4779 c.eqaddr = cpu_to_be64(q->phys_addr);
4780
4781 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4782 if (ret) {
4783 kfree(q->sdesc);
4784 q->sdesc = NULL;
4785 dma_free_coherent(adap->pdev_dev,
4786 nentries * sizeof(struct tx_desc),
4787 q->desc, q->phys_addr);
4788 q->desc = NULL;
4789 return ret;
4790 }
4791
4792 init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
4793 return 0;
4794 }
4795
t4_sge_alloc_uld_txq(struct adapter * adap,struct sge_uld_txq * txq,struct net_device * dev,unsigned int iqid,unsigned int uld_type)4796 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
4797 struct net_device *dev, unsigned int iqid,
4798 unsigned int uld_type)
4799 {
4800 u32 cmd = FW_EQ_OFLD_CMD;
4801 int ret;
4802
4803 if (unlikely(uld_type == CXGB4_TX_CRYPTO))
4804 cmd = FW_EQ_CTRL_CMD;
4805
4806 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid);
4807 if (ret)
4808 return ret;
4809
4810 txq->q.q_type = CXGB4_TXQ_ULD;
4811 txq->adap = adap;
4812 skb_queue_head_init(&txq->sendq);
4813 tasklet_setup(&txq->qresume_tsk, restart_ofldq);
4814 txq->full = 0;
4815 txq->mapping_err = 0;
4816 return 0;
4817 }
4818
t4_sge_alloc_ethofld_txq(struct adapter * adap,struct sge_eohw_txq * txq,struct net_device * dev,u32 iqid)4819 int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
4820 struct net_device *dev, u32 iqid)
4821 {
4822 int ret;
4823
4824 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid);
4825 if (ret)
4826 return ret;
4827
4828 txq->q.q_type = CXGB4_TXQ_ULD;
4829 spin_lock_init(&txq->lock);
4830 txq->adap = adap;
4831 txq->tso = 0;
4832 txq->uso = 0;
4833 txq->tx_cso = 0;
4834 txq->vlan_ins = 0;
4835 txq->mapping_err = 0;
4836 return 0;
4837 }
4838
free_txq(struct adapter * adap,struct sge_txq * q)4839 void free_txq(struct adapter *adap, struct sge_txq *q)
4840 {
4841 struct sge *s = &adap->sge;
4842
4843 dma_free_coherent(adap->pdev_dev,
4844 q->size * sizeof(struct tx_desc) + s->stat_len,
4845 q->desc, q->phys_addr);
4846 q->cntxt_id = 0;
4847 q->sdesc = NULL;
4848 q->desc = NULL;
4849 }
4850
free_rspq_fl(struct adapter * adap,struct sge_rspq * rq,struct sge_fl * fl)4851 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
4852 struct sge_fl *fl)
4853 {
4854 struct sge *s = &adap->sge;
4855 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
4856
4857 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
4858 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
4859 rq->cntxt_id, fl_id, 0xffff);
4860 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
4861 rq->desc, rq->phys_addr);
4862 netif_napi_del(&rq->napi);
4863 rq->netdev = NULL;
4864 rq->cntxt_id = rq->abs_id = 0;
4865 rq->desc = NULL;
4866
4867 if (fl) {
4868 free_rx_bufs(adap, fl, fl->avail);
4869 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
4870 fl->desc, fl->addr);
4871 kfree(fl->sdesc);
4872 fl->sdesc = NULL;
4873 fl->cntxt_id = 0;
4874 fl->desc = NULL;
4875 }
4876 }
4877
t4_sge_free_ethofld_txq(struct adapter * adap,struct sge_eohw_txq * txq)4878 void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
4879 {
4880 if (txq->q.desc) {
4881 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
4882 txq->q.cntxt_id);
4883 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
4884 kfree(txq->q.sdesc);
4885 free_txq(adap, &txq->q);
4886 }
4887 }
4888
4889 /**
4890 * t4_free_sge_resources - free SGE resources
4891 * @adap: the adapter
4892 *
4893 * Frees resources used by the SGE queue sets.
4894 */
t4_free_sge_resources(struct adapter * adap)4895 void t4_free_sge_resources(struct adapter *adap)
4896 {
4897 int i;
4898 struct sge_eth_rxq *eq;
4899 struct sge_eth_txq *etq;
4900
4901 /* stop all Rx queues in order to start them draining */
4902 for (i = 0; i < adap->sge.ethqsets; i++) {
4903 eq = &adap->sge.ethrxq[i];
4904 if (eq->rspq.desc)
4905 t4_iq_stop(adap, adap->mbox, adap->pf, 0,
4906 FW_IQ_TYPE_FL_INT_CAP,
4907 eq->rspq.cntxt_id,
4908 eq->fl.size ? eq->fl.cntxt_id : 0xffff,
4909 0xffff);
4910 }
4911
4912 /* clean up Ethernet Tx/Rx queues */
4913 for (i = 0; i < adap->sge.ethqsets; i++) {
4914 eq = &adap->sge.ethrxq[i];
4915 if (eq->rspq.desc)
4916 free_rspq_fl(adap, &eq->rspq,
4917 eq->fl.size ? &eq->fl : NULL);
4918 if (eq->msix) {
4919 cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx);
4920 eq->msix = NULL;
4921 }
4922
4923 etq = &adap->sge.ethtxq[i];
4924 if (etq->q.desc) {
4925 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
4926 etq->q.cntxt_id);
4927 __netif_tx_lock_bh(etq->txq);
4928 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
4929 __netif_tx_unlock_bh(etq->txq);
4930 kfree(etq->q.sdesc);
4931 free_txq(adap, &etq->q);
4932 }
4933 }
4934
4935 /* clean up control Tx queues */
4936 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
4937 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
4938
4939 if (cq->q.desc) {
4940 tasklet_kill(&cq->qresume_tsk);
4941 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
4942 cq->q.cntxt_id);
4943 __skb_queue_purge(&cq->sendq);
4944 free_txq(adap, &cq->q);
4945 }
4946 }
4947
4948 if (adap->sge.fw_evtq.desc) {
4949 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
4950 if (adap->sge.fwevtq_msix_idx >= 0)
4951 cxgb4_free_msix_idx_in_bmap(adap,
4952 adap->sge.fwevtq_msix_idx);
4953 }
4954
4955 if (adap->sge.nd_msix_idx >= 0)
4956 cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx);
4957
4958 if (adap->sge.intrq.desc)
4959 free_rspq_fl(adap, &adap->sge.intrq, NULL);
4960
4961 if (!is_t4(adap->params.chip)) {
4962 etq = &adap->sge.ptptxq;
4963 if (etq->q.desc) {
4964 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
4965 etq->q.cntxt_id);
4966 spin_lock_bh(&adap->ptp_lock);
4967 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
4968 spin_unlock_bh(&adap->ptp_lock);
4969 kfree(etq->q.sdesc);
4970 free_txq(adap, &etq->q);
4971 }
4972 }
4973
4974 /* clear the reverse egress queue map */
4975 memset(adap->sge.egr_map, 0,
4976 adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
4977 }
4978
t4_sge_start(struct adapter * adap)4979 void t4_sge_start(struct adapter *adap)
4980 {
4981 adap->sge.ethtxq_rover = 0;
4982 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
4983 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
4984 }
4985
4986 /**
4987 * t4_sge_stop - disable SGE operation
4988 * @adap: the adapter
4989 *
4990 * Stop tasklets and timers associated with the DMA engine. Note that
4991 * this is effective only if measures have been taken to disable any HW
4992 * events that may restart them.
4993 */
t4_sge_stop(struct adapter * adap)4994 void t4_sge_stop(struct adapter *adap)
4995 {
4996 int i;
4997 struct sge *s = &adap->sge;
4998
4999 if (s->rx_timer.function)
5000 timer_delete_sync(&s->rx_timer);
5001 if (s->tx_timer.function)
5002 timer_delete_sync(&s->tx_timer);
5003
5004 if (is_offload(adap)) {
5005 struct sge_uld_txq_info *txq_info;
5006
5007 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
5008 if (txq_info) {
5009 struct sge_uld_txq *txq = txq_info->uldtxq;
5010
5011 for_each_ofldtxq(&adap->sge, i) {
5012 if (txq->q.desc)
5013 tasklet_kill(&txq->qresume_tsk);
5014 }
5015 }
5016 }
5017
5018 if (is_pci_uld(adap)) {
5019 struct sge_uld_txq_info *txq_info;
5020
5021 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
5022 if (txq_info) {
5023 struct sge_uld_txq *txq = txq_info->uldtxq;
5024
5025 for_each_ofldtxq(&adap->sge, i) {
5026 if (txq->q.desc)
5027 tasklet_kill(&txq->qresume_tsk);
5028 }
5029 }
5030 }
5031
5032 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
5033 struct sge_ctrl_txq *cq = &s->ctrlq[i];
5034
5035 if (cq->q.desc)
5036 tasklet_kill(&cq->qresume_tsk);
5037 }
5038 }
5039
5040 /**
5041 * t4_sge_init_soft - grab core SGE values needed by SGE code
5042 * @adap: the adapter
5043 *
5044 * We need to grab the SGE operating parameters that we need to have
5045 * in order to do our job and make sure we can live with them.
5046 */
5047
t4_sge_init_soft(struct adapter * adap)5048 static int t4_sge_init_soft(struct adapter *adap)
5049 {
5050 struct sge *s = &adap->sge;
5051 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
5052 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
5053 u32 ingress_rx_threshold;
5054
5055 /*
5056 * Verify that CPL messages are going to the Ingress Queue for
5057 * process_responses() and that only packet data is going to the
5058 * Free Lists.
5059 */
5060 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
5061 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
5062 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
5063 return -EINVAL;
5064 }
5065
5066 /*
5067 * Validate the Host Buffer Register Array indices that we want to
5068 * use ...
5069 *
5070 * XXX Note that we should really read through the Host Buffer Size
5071 * XXX register array and find the indices of the Buffer Sizes which
5072 * XXX meet our needs!
5073 */
5074 #define READ_FL_BUF(x) \
5075 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
5076
5077 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
5078 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
5079 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
5080 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
5081
5082 /* We only bother using the Large Page logic if the Large Page Buffer
5083 * is larger than our Page Size Buffer.
5084 */
5085 if (fl_large_pg <= fl_small_pg)
5086 fl_large_pg = 0;
5087
5088 #undef READ_FL_BUF
5089
5090 /* The Page Size Buffer must be exactly equal to our Page Size and the
5091 * Large Page Size Buffer should be 0 (per above) or a power of 2.
5092 */
5093 if (fl_small_pg != PAGE_SIZE ||
5094 (fl_large_pg & (fl_large_pg-1)) != 0) {
5095 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
5096 fl_small_pg, fl_large_pg);
5097 return -EINVAL;
5098 }
5099 if (fl_large_pg)
5100 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
5101
5102 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
5103 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
5104 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
5105 fl_small_mtu, fl_large_mtu);
5106 return -EINVAL;
5107 }
5108
5109 /*
5110 * Retrieve our RX interrupt holdoff timer values and counter
5111 * threshold values from the SGE parameters.
5112 */
5113 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
5114 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
5115 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
5116 s->timer_val[0] = core_ticks_to_us(adap,
5117 TIMERVALUE0_G(timer_value_0_and_1));
5118 s->timer_val[1] = core_ticks_to_us(adap,
5119 TIMERVALUE1_G(timer_value_0_and_1));
5120 s->timer_val[2] = core_ticks_to_us(adap,
5121 TIMERVALUE2_G(timer_value_2_and_3));
5122 s->timer_val[3] = core_ticks_to_us(adap,
5123 TIMERVALUE3_G(timer_value_2_and_3));
5124 s->timer_val[4] = core_ticks_to_us(adap,
5125 TIMERVALUE4_G(timer_value_4_and_5));
5126 s->timer_val[5] = core_ticks_to_us(adap,
5127 TIMERVALUE5_G(timer_value_4_and_5));
5128
5129 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
5130 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
5131 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
5132 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
5133 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
5134
5135 return 0;
5136 }
5137
5138 /**
5139 * t4_sge_init - initialize SGE
5140 * @adap: the adapter
5141 *
5142 * Perform low-level SGE code initialization needed every time after a
5143 * chip reset.
5144 */
t4_sge_init(struct adapter * adap)5145 int t4_sge_init(struct adapter *adap)
5146 {
5147 struct sge *s = &adap->sge;
5148 u32 sge_control, sge_conm_ctrl;
5149 int ret, egress_threshold;
5150
5151 /*
5152 * Ingress Padding Boundary and Egress Status Page Size are set up by
5153 * t4_fixup_host_params().
5154 */
5155 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
5156 s->pktshift = PKTSHIFT_G(sge_control);
5157 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
5158
5159 s->fl_align = t4_fl_pkt_align(adap);
5160 ret = t4_sge_init_soft(adap);
5161 if (ret < 0)
5162 return ret;
5163
5164 /*
5165 * A FL with <= fl_starve_thres buffers is starving and a periodic
5166 * timer will attempt to refill it. This needs to be larger than the
5167 * SGE's Egress Congestion Threshold. If it isn't, then we can get
5168 * stuck waiting for new packets while the SGE is waiting for us to
5169 * give it more Free List entries. (Note that the SGE's Egress
5170 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
5171 * there was only a single field to control this. For T5 there's the
5172 * original field which now only applies to Unpacked Mode Free List
5173 * buffers and a new field which only applies to Packed Mode Free List
5174 * buffers.
5175 */
5176 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
5177 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
5178 case CHELSIO_T4:
5179 egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
5180 break;
5181 case CHELSIO_T5:
5182 egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
5183 break;
5184 case CHELSIO_T6:
5185 egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
5186 break;
5187 default:
5188 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
5189 CHELSIO_CHIP_VERSION(adap->params.chip));
5190 return -EINVAL;
5191 }
5192 s->fl_starve_thres = 2*egress_threshold + 1;
5193
5194 t4_idma_monitor_init(adap, &s->idma_monitor);
5195
5196 /* Set up timers used for recuring callbacks to process RX and TX
5197 * administrative tasks.
5198 */
5199 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
5200 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
5201
5202 spin_lock_init(&s->intrq_lock);
5203
5204 return 0;
5205 }
5206