xref: /linux/drivers/net/ethernet/netronome/nfp/bpf/offload.c (revision 995231c820e3bd3633cb38bf4ea6f2541e1da331)
1 /*
2  * Copyright (C) 2016 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * nfp_net_offload.c
36  * Netronome network device driver: TC offload functions for PF and VF
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/netdevice.h>
41 #include <linux/pci.h>
42 #include <linux/jiffies.h>
43 #include <linux/timer.h>
44 #include <linux/list.h>
45 
46 #include <net/pkt_cls.h>
47 #include <net/tc_act/tc_gact.h>
48 #include <net/tc_act/tc_mirred.h>
49 
50 #include "main.h"
51 #include "../nfp_net_ctrl.h"
52 #include "../nfp_net.h"
53 
54 void nfp_net_filter_stats_timer(struct timer_list *t)
55 {
56 	struct nfp_net_bpf_priv *priv = from_timer(priv, t,
57 						   rx_filter_stats_timer);
58 	struct nfp_net *nn = priv->nn;
59 	struct nfp_stat_pair latest;
60 
61 	spin_lock_bh(&priv->rx_filter_lock);
62 
63 	if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
64 		mod_timer(&priv->rx_filter_stats_timer,
65 			  jiffies + NFP_NET_STAT_POLL_IVL);
66 
67 	spin_unlock_bh(&priv->rx_filter_lock);
68 
69 	latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
70 	latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
71 
72 	if (latest.pkts != priv->rx_filter.pkts)
73 		priv->rx_filter_change = jiffies;
74 
75 	priv->rx_filter = latest;
76 }
77 
78 static void nfp_net_bpf_stats_reset(struct nfp_net *nn)
79 {
80 	struct nfp_net_bpf_priv *priv = nn->app_priv;
81 
82 	priv->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
83 	priv->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
84 	priv->rx_filter_prev = priv->rx_filter;
85 	priv->rx_filter_change = jiffies;
86 }
87 
88 static int
89 nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
90 {
91 	struct nfp_net_bpf_priv *priv = nn->app_priv;
92 	u64 bytes, pkts;
93 
94 	pkts = priv->rx_filter.pkts - priv->rx_filter_prev.pkts;
95 	bytes = priv->rx_filter.bytes - priv->rx_filter_prev.bytes;
96 	bytes -= pkts * ETH_HLEN;
97 
98 	priv->rx_filter_prev = priv->rx_filter;
99 
100 	tcf_exts_stats_update(cls_bpf->exts,
101 			      bytes, pkts, priv->rx_filter_change);
102 
103 	return 0;
104 }
105 
106 static int
107 nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
108 {
109 	const struct tc_action *a;
110 	LIST_HEAD(actions);
111 
112 	if (!cls_bpf->exts)
113 		return NN_ACT_XDP;
114 
115 	/* TC direct action */
116 	if (cls_bpf->exts_integrated) {
117 		if (!tcf_exts_has_actions(cls_bpf->exts))
118 			return NN_ACT_DIRECT;
119 
120 		return -EOPNOTSUPP;
121 	}
122 
123 	/* TC legacy mode */
124 	if (!tcf_exts_has_one_action(cls_bpf->exts))
125 		return -EOPNOTSUPP;
126 
127 	tcf_exts_to_list(cls_bpf->exts, &actions);
128 	list_for_each_entry(a, &actions, list) {
129 		if (is_tcf_gact_shot(a))
130 			return NN_ACT_TC_DROP;
131 
132 		if (is_tcf_mirred_egress_redirect(a) &&
133 		    tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex)
134 			return NN_ACT_TC_REDIR;
135 	}
136 
137 	return -EOPNOTSUPP;
138 }
139 
140 static int
141 nfp_net_bpf_offload_prepare(struct nfp_net *nn,
142 			    struct tc_cls_bpf_offload *cls_bpf,
143 			    struct nfp_bpf_result *res,
144 			    void **code, dma_addr_t *dma_addr, u16 max_instr)
145 {
146 	unsigned int code_sz = max_instr * sizeof(u64);
147 	enum nfp_bpf_action_type act;
148 	unsigned int stack_size;
149 	u16 start_off, done_off;
150 	unsigned int max_mtu;
151 	int ret;
152 
153 	if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
154 		return -EOPNOTSUPP;
155 
156 	ret = nfp_net_bpf_get_act(nn, cls_bpf);
157 	if (ret < 0)
158 		return ret;
159 	act = ret;
160 
161 	max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
162 	if (max_mtu < nn->dp.netdev->mtu) {
163 		nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
164 		return -EOPNOTSUPP;
165 	}
166 
167 	start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
168 	done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
169 
170 	stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
171 	if (cls_bpf->prog->aux->stack_depth > stack_size) {
172 		nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
173 			cls_bpf->prog->aux->stack_depth, stack_size);
174 		return -EOPNOTSUPP;
175 	}
176 
177 	*code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
178 	if (!*code)
179 		return -ENOMEM;
180 
181 	ret = nfp_bpf_jit(cls_bpf->prog, *code, act, start_off, done_off,
182 			  max_instr, res);
183 	if (ret)
184 		goto out;
185 
186 	return 0;
187 
188 out:
189 	dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr);
190 	return ret;
191 }
192 
193 static void
194 nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
195 			   void *code, dma_addr_t dma_addr,
196 			   unsigned int code_sz, unsigned int n_instr,
197 			   bool dense_mode)
198 {
199 	struct nfp_net_bpf_priv *priv = nn->app_priv;
200 	u64 bpf_addr = dma_addr;
201 	int err;
202 
203 	nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
204 
205 	if (dense_mode)
206 		bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
207 
208 	nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr);
209 	nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, bpf_addr);
210 
211 	/* Load up the JITed code */
212 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
213 	if (err)
214 		nn_err(nn, "FW command error while loading BPF: %d\n", err);
215 
216 	/* Enable passing packets through BPF function */
217 	nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
218 	nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
219 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
220 	if (err)
221 		nn_err(nn, "FW command error while enabling BPF: %d\n", err);
222 
223 	dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
224 
225 	nfp_net_bpf_stats_reset(nn);
226 	mod_timer(&priv->rx_filter_stats_timer,
227 		  jiffies + NFP_NET_STAT_POLL_IVL);
228 }
229 
230 static int nfp_net_bpf_stop(struct nfp_net *nn)
231 {
232 	struct nfp_net_bpf_priv *priv = nn->app_priv;
233 
234 	if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
235 		return 0;
236 
237 	spin_lock_bh(&priv->rx_filter_lock);
238 	nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
239 	spin_unlock_bh(&priv->rx_filter_lock);
240 	nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
241 
242 	del_timer_sync(&priv->rx_filter_stats_timer);
243 	nn->dp.bpf_offload_skip_sw = 0;
244 
245 	return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
246 }
247 
248 int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
249 {
250 	struct nfp_bpf_result res;
251 	dma_addr_t dma_addr;
252 	u16 max_instr;
253 	void *code;
254 	int err;
255 
256 	max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
257 
258 	switch (cls_bpf->command) {
259 	case TC_CLSBPF_REPLACE:
260 		/* There is nothing stopping us from implementing seamless
261 		 * replace but the simple method of loading I adopted in
262 		 * the firmware does not handle atomic replace (i.e. we have to
263 		 * stop the BPF offload and re-enable it).  Leaking-in a few
264 		 * frames which didn't have BPF applied in the hardware should
265 		 * be fine if software fallback is available, though.
266 		 */
267 		if (nn->dp.bpf_offload_skip_sw)
268 			return -EBUSY;
269 
270 		err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
271 						  &dma_addr, max_instr);
272 		if (err)
273 			return err;
274 
275 		nfp_net_bpf_stop(nn);
276 		nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
277 					   dma_addr, max_instr * sizeof(u64),
278 					   res.n_instr, res.dense_mode);
279 		return 0;
280 
281 	case TC_CLSBPF_ADD:
282 		if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
283 			return -EBUSY;
284 
285 		err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
286 						  &dma_addr, max_instr);
287 		if (err)
288 			return err;
289 
290 		nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
291 					   dma_addr, max_instr * sizeof(u64),
292 					   res.n_instr, res.dense_mode);
293 		return 0;
294 
295 	case TC_CLSBPF_DESTROY:
296 		return nfp_net_bpf_stop(nn);
297 
298 	case TC_CLSBPF_STATS:
299 		return nfp_net_bpf_stats_update(nn, cls_bpf);
300 
301 	default:
302 		return -EOPNOTSUPP;
303 	}
304 }
305