xref: /linux/drivers/net/ethernet/netronome/nfp/nfdk/rings.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
3 
4 #include <linux/seq_file.h>
5 
6 #include "../nfp_net.h"
7 #include "../nfp_net_dp.h"
8 #include "nfdk.h"
9 
10 static void
11 nfp_nfdk_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
12 {
13 	struct device *dev = dp->dev;
14 	struct netdev_queue *nd_q;
15 
16 	while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
17 		const skb_frag_t *frag, *fend;
18 		unsigned int size, n_descs = 1;
19 		struct nfp_nfdk_tx_buf *txbuf;
20 		int nr_frags, rd_idx;
21 		struct sk_buff *skb;
22 
23 		rd_idx = D_IDX(tx_ring, tx_ring->rd_p);
24 		txbuf = &tx_ring->ktxbufs[rd_idx];
25 
26 		skb = txbuf->skb;
27 		if (!skb) {
28 			n_descs = D_BLOCK_CPL(tx_ring->rd_p);
29 			goto next;
30 		}
31 
32 		nr_frags = skb_shinfo(skb)->nr_frags;
33 		txbuf++;
34 
35 		/* Unmap head */
36 		size = skb_headlen(skb);
37 		dma_unmap_single(dev, txbuf->dma_addr, size, DMA_TO_DEVICE);
38 		n_descs += nfp_nfdk_headlen_to_segs(size);
39 		txbuf++;
40 
41 		frag = skb_shinfo(skb)->frags;
42 		fend = frag + nr_frags;
43 		for (; frag < fend; frag++) {
44 			size = skb_frag_size(frag);
45 			dma_unmap_page(dev, txbuf->dma_addr,
46 				       skb_frag_size(frag), DMA_TO_DEVICE);
47 			n_descs += DIV_ROUND_UP(size,
48 						NFDK_TX_MAX_DATA_PER_DESC);
49 			txbuf++;
50 		}
51 
52 		if (skb_is_gso(skb))
53 			n_descs++;
54 
55 		dev_kfree_skb_any(skb);
56 next:
57 		tx_ring->rd_p += n_descs;
58 	}
59 
60 	memset(tx_ring->txds, 0, tx_ring->size);
61 	tx_ring->data_pending = 0;
62 	tx_ring->wr_p = 0;
63 	tx_ring->rd_p = 0;
64 	tx_ring->qcp_rd_p = 0;
65 	tx_ring->wr_ptr_add = 0;
66 
67 	if (tx_ring->is_xdp || !dp->netdev)
68 		return;
69 
70 	nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
71 	netdev_tx_reset_queue(nd_q);
72 }
73 
74 static void nfp_nfdk_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
75 {
76 	struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
77 	struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
78 
79 	kvfree(tx_ring->ktxbufs);
80 
81 	if (tx_ring->ktxds)
82 		dma_free_coherent(dp->dev, tx_ring->size,
83 				  tx_ring->ktxds, tx_ring->dma);
84 
85 	tx_ring->cnt = 0;
86 	tx_ring->txbufs = NULL;
87 	tx_ring->txds = NULL;
88 	tx_ring->dma = 0;
89 	tx_ring->size = 0;
90 }
91 
92 static int
93 nfp_nfdk_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
94 {
95 	struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
96 
97 	tx_ring->cnt = dp->txd_cnt * NFDK_TX_DESC_PER_SIMPLE_PKT;
98 	tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->ktxds));
99 	tx_ring->ktxds = dma_alloc_coherent(dp->dev, tx_ring->size,
100 					    &tx_ring->dma,
101 					    GFP_KERNEL | __GFP_NOWARN);
102 	if (!tx_ring->ktxds) {
103 		netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
104 			    tx_ring->cnt);
105 		goto err_alloc;
106 	}
107 
108 	tx_ring->ktxbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->ktxbufs),
109 				    GFP_KERNEL);
110 	if (!tx_ring->ktxbufs)
111 		goto err_alloc;
112 
113 	if (!tx_ring->is_xdp && dp->netdev)
114 		netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
115 				    tx_ring->idx);
116 
117 	return 0;
118 
119 err_alloc:
120 	nfp_nfdk_tx_ring_free(tx_ring);
121 	return -ENOMEM;
122 }
123 
124 static void
125 nfp_nfdk_tx_ring_bufs_free(struct nfp_net_dp *dp,
126 			   struct nfp_net_tx_ring *tx_ring)
127 {
128 }
129 
130 static int
131 nfp_nfdk_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
132 			    struct nfp_net_tx_ring *tx_ring)
133 {
134 	return 0;
135 }
136 
137 static void
138 nfp_nfdk_print_tx_descs(struct seq_file *file,
139 			struct nfp_net_r_vector *r_vec,
140 			struct nfp_net_tx_ring *tx_ring,
141 			u32 d_rd_p, u32 d_wr_p)
142 {
143 	struct nfp_nfdk_tx_desc *txd;
144 	u32 txd_cnt = tx_ring->cnt;
145 	int i;
146 
147 	for (i = 0; i < txd_cnt; i++) {
148 		txd = &tx_ring->ktxds[i];
149 
150 		seq_printf(file, "%04d: 0x%08x 0x%08x 0x%016llx", i,
151 			   txd->vals[0], txd->vals[1], tx_ring->ktxbufs[i].raw);
152 
153 		if (i == tx_ring->rd_p % txd_cnt)
154 			seq_puts(file, " H_RD");
155 		if (i == tx_ring->wr_p % txd_cnt)
156 			seq_puts(file, " H_WR");
157 		if (i == d_rd_p % txd_cnt)
158 			seq_puts(file, " D_RD");
159 		if (i == d_wr_p % txd_cnt)
160 			seq_puts(file, " D_WR");
161 
162 		seq_putc(file, '\n');
163 	}
164 }
165 
166 #define NFP_NFDK_CFG_CTRL_SUPPORTED					\
167 	(NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_CTRL_PROMISC |		\
168 	 NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC |		\
169 	 NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM |		\
170 	 NFP_NET_CFG_CTRL_RXVLAN |					\
171 	 NFP_NET_CFG_CTRL_RXVLAN_V2 | NFP_NET_CFG_CTRL_RXQINQ |		\
172 	 NFP_NET_CFG_CTRL_TXVLAN_V2 |					\
173 	 NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO |		\
174 	 NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA |	\
175 	 NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_IRQMOD |		\
176 	 NFP_NET_CFG_CTRL_TXRWB | NFP_NET_CFG_CTRL_VEPA |		\
177 	 NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE |		\
178 	 NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 |			\
179 	 NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE |	\
180 	 NFP_NET_CFG_CTRL_LIVE_ADDR)
181 
182 const struct nfp_dp_ops nfp_nfdk_ops = {
183 	.version		= NFP_NFD_VER_NFDK,
184 	.tx_min_desc_per_pkt	= NFDK_TX_DESC_PER_SIMPLE_PKT,
185 	.cap_mask		= NFP_NFDK_CFG_CTRL_SUPPORTED,
186 	.dma_mask		= DMA_BIT_MASK(48),
187 	.poll			= nfp_nfdk_poll,
188 	.ctrl_poll		= nfp_nfdk_ctrl_poll,
189 	.xmit			= nfp_nfdk_tx,
190 	.ctrl_tx_one		= nfp_nfdk_ctrl_tx_one,
191 	.rx_ring_fill_freelist	= nfp_nfdk_rx_ring_fill_freelist,
192 	.tx_ring_alloc		= nfp_nfdk_tx_ring_alloc,
193 	.tx_ring_reset		= nfp_nfdk_tx_ring_reset,
194 	.tx_ring_free		= nfp_nfdk_tx_ring_free,
195 	.tx_ring_bufs_alloc	= nfp_nfdk_tx_ring_bufs_alloc,
196 	.tx_ring_bufs_free	= nfp_nfdk_tx_ring_bufs_free,
197 	.print_tx_descs		= nfp_nfdk_print_tx_descs
198 };
199