1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4 #include <linux/bpf.h>
5 #include <linux/bitops.h>
6 #include <linux/bug.h>
7 #include <linux/jiffies.h>
8 #include <linux/skbuff.h>
9 #include <linux/timekeeping.h>
10
11 #include "../ccm.h"
12 #include "../nfp_app.h"
13 #include "../nfp_net.h"
14 #include "fw.h"
15 #include "main.h"
16
17 static struct sk_buff *
nfp_bpf_cmsg_alloc(struct nfp_app_bpf * bpf,unsigned int size)18 nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
19 {
20 struct sk_buff *skb;
21
22 skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
23 if (!skb)
24 return NULL;
25 skb_put(skb, size);
26
27 return skb;
28 }
29
30 static unsigned int
nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf * bpf,unsigned int n)31 nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf *bpf, unsigned int n)
32 {
33 unsigned int size;
34
35 size = sizeof(struct cmsg_req_map_op);
36 size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
37
38 return size;
39 }
40
41 static struct sk_buff *
nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf * bpf,unsigned int n)42 nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
43 {
44 return nfp_bpf_cmsg_alloc(bpf, nfp_bpf_cmsg_map_req_size(bpf, n));
45 }
46
47 static unsigned int
nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf * bpf,unsigned int n)48 nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
49 {
50 unsigned int size;
51
52 size = sizeof(struct cmsg_reply_map_op);
53 size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
54
55 return size;
56 }
57
58 static int
nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf * bpf,struct cmsg_reply_map_simple * reply)59 nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
60 struct cmsg_reply_map_simple *reply)
61 {
62 static const int res_table[] = {
63 [CMSG_RC_SUCCESS] = 0,
64 [CMSG_RC_ERR_MAP_FD] = -EBADFD,
65 [CMSG_RC_ERR_MAP_NOENT] = -ENOENT,
66 [CMSG_RC_ERR_MAP_ERR] = -EINVAL,
67 [CMSG_RC_ERR_MAP_PARSE] = -EIO,
68 [CMSG_RC_ERR_MAP_EXIST] = -EEXIST,
69 [CMSG_RC_ERR_MAP_NOMEM] = -ENOMEM,
70 [CMSG_RC_ERR_MAP_E2BIG] = -E2BIG,
71 };
72 u32 rc;
73
74 rc = be32_to_cpu(reply->rc);
75 if (rc >= ARRAY_SIZE(res_table)) {
76 cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
77 return -EIO;
78 }
79
80 return res_table[rc];
81 }
82
83 long long int
nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf * bpf,struct bpf_map * map)84 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
85 {
86 struct cmsg_reply_map_alloc_tbl *reply;
87 struct cmsg_req_map_alloc_tbl *req;
88 struct sk_buff *skb;
89 u32 tid;
90 int err;
91
92 skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
93 if (!skb)
94 return -ENOMEM;
95
96 req = (void *)skb->data;
97 req->key_size = cpu_to_be32(map->key_size);
98 req->value_size = cpu_to_be32(map->value_size);
99 req->max_entries = cpu_to_be32(map->max_entries);
100 req->map_type = cpu_to_be32(map->map_type);
101 req->map_flags = 0;
102
103 skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_ALLOC,
104 sizeof(*reply));
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 reply = (void *)skb->data;
109 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
110 if (err)
111 goto err_free;
112
113 tid = be32_to_cpu(reply->tid);
114 dev_consume_skb_any(skb);
115
116 return tid;
117 err_free:
118 dev_kfree_skb_any(skb);
119 return err;
120 }
121
nfp_bpf_ctrl_free_map(struct nfp_app_bpf * bpf,struct nfp_bpf_map * nfp_map)122 void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
123 {
124 struct cmsg_reply_map_free_tbl *reply;
125 struct cmsg_req_map_free_tbl *req;
126 struct sk_buff *skb;
127 int err;
128
129 skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
130 if (!skb) {
131 cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
132 return;
133 }
134
135 req = (void *)skb->data;
136 req->tid = cpu_to_be32(nfp_map->tid);
137
138 skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_FREE,
139 sizeof(*reply));
140 if (IS_ERR(skb)) {
141 cmsg_warn(bpf, "leaking map - I/O error\n");
142 return;
143 }
144
145 reply = (void *)skb->data;
146 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
147 if (err)
148 cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
149
150 dev_consume_skb_any(skb);
151 }
152
153 static void *
nfp_bpf_ctrl_req_key(struct nfp_app_bpf * bpf,struct cmsg_req_map_op * req,unsigned int n)154 nfp_bpf_ctrl_req_key(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
155 unsigned int n)
156 {
157 return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
158 }
159
160 static void *
nfp_bpf_ctrl_req_val(struct nfp_app_bpf * bpf,struct cmsg_req_map_op * req,unsigned int n)161 nfp_bpf_ctrl_req_val(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
162 unsigned int n)
163 {
164 return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
165 }
166
167 static void *
nfp_bpf_ctrl_reply_key(struct nfp_app_bpf * bpf,struct cmsg_reply_map_op * reply,unsigned int n)168 nfp_bpf_ctrl_reply_key(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
169 unsigned int n)
170 {
171 return &reply->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
172 }
173
174 static void *
nfp_bpf_ctrl_reply_val(struct nfp_app_bpf * bpf,struct cmsg_reply_map_op * reply,unsigned int n)175 nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
176 unsigned int n)
177 {
178 return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
179 }
180
nfp_bpf_ctrl_op_cache_invalidate(enum nfp_ccm_type op)181 static bool nfp_bpf_ctrl_op_cache_invalidate(enum nfp_ccm_type op)
182 {
183 return op == NFP_CCM_TYPE_BPF_MAP_UPDATE ||
184 op == NFP_CCM_TYPE_BPF_MAP_DELETE;
185 }
186
nfp_bpf_ctrl_op_cache_capable(enum nfp_ccm_type op)187 static bool nfp_bpf_ctrl_op_cache_capable(enum nfp_ccm_type op)
188 {
189 return op == NFP_CCM_TYPE_BPF_MAP_LOOKUP ||
190 op == NFP_CCM_TYPE_BPF_MAP_GETNEXT;
191 }
192
nfp_bpf_ctrl_op_cache_fill(enum nfp_ccm_type op)193 static bool nfp_bpf_ctrl_op_cache_fill(enum nfp_ccm_type op)
194 {
195 return op == NFP_CCM_TYPE_BPF_MAP_GETFIRST ||
196 op == NFP_CCM_TYPE_BPF_MAP_GETNEXT;
197 }
198
199 static unsigned int
nfp_bpf_ctrl_op_cache_get(struct nfp_bpf_map * nfp_map,enum nfp_ccm_type op,const u8 * key,u8 * out_key,u8 * out_value,u32 * cache_gen)200 nfp_bpf_ctrl_op_cache_get(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op,
201 const u8 *key, u8 *out_key, u8 *out_value,
202 u32 *cache_gen)
203 {
204 struct bpf_map *map = &nfp_map->offmap->map;
205 struct nfp_app_bpf *bpf = nfp_map->bpf;
206 unsigned int i, count, n_entries;
207 struct cmsg_reply_map_op *reply;
208
209 n_entries = nfp_bpf_ctrl_op_cache_fill(op) ? bpf->cmsg_cache_cnt : 1;
210
211 spin_lock(&nfp_map->cache_lock);
212 *cache_gen = nfp_map->cache_gen;
213 if (nfp_map->cache_blockers)
214 n_entries = 1;
215
216 if (nfp_bpf_ctrl_op_cache_invalidate(op))
217 goto exit_block;
218 if (!nfp_bpf_ctrl_op_cache_capable(op))
219 goto exit_unlock;
220
221 if (!nfp_map->cache)
222 goto exit_unlock;
223 if (nfp_map->cache_to < ktime_get_ns())
224 goto exit_invalidate;
225
226 reply = (void *)nfp_map->cache->data;
227 count = be32_to_cpu(reply->count);
228
229 for (i = 0; i < count; i++) {
230 void *cached_key;
231
232 cached_key = nfp_bpf_ctrl_reply_key(bpf, reply, i);
233 if (memcmp(cached_key, key, map->key_size))
234 continue;
235
236 if (op == NFP_CCM_TYPE_BPF_MAP_LOOKUP)
237 memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, i),
238 map->value_size);
239 if (op == NFP_CCM_TYPE_BPF_MAP_GETNEXT) {
240 if (i + 1 == count)
241 break;
242
243 memcpy(out_key,
244 nfp_bpf_ctrl_reply_key(bpf, reply, i + 1),
245 map->key_size);
246 }
247
248 n_entries = 0;
249 goto exit_unlock;
250 }
251 goto exit_unlock;
252
253 exit_block:
254 nfp_map->cache_blockers++;
255 exit_invalidate:
256 dev_consume_skb_any(nfp_map->cache);
257 nfp_map->cache = NULL;
258 exit_unlock:
259 spin_unlock(&nfp_map->cache_lock);
260 return n_entries;
261 }
262
263 static void
nfp_bpf_ctrl_op_cache_put(struct nfp_bpf_map * nfp_map,enum nfp_ccm_type op,struct sk_buff * skb,u32 cache_gen)264 nfp_bpf_ctrl_op_cache_put(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op,
265 struct sk_buff *skb, u32 cache_gen)
266 {
267 bool blocker, filler;
268
269 blocker = nfp_bpf_ctrl_op_cache_invalidate(op);
270 filler = nfp_bpf_ctrl_op_cache_fill(op);
271 if (blocker || filler) {
272 u64 to = 0;
273
274 if (filler)
275 to = ktime_get_ns() + NFP_BPF_MAP_CACHE_TIME_NS;
276
277 spin_lock(&nfp_map->cache_lock);
278 if (blocker) {
279 nfp_map->cache_blockers--;
280 nfp_map->cache_gen++;
281 }
282 if (filler && !nfp_map->cache_blockers &&
283 nfp_map->cache_gen == cache_gen) {
284 nfp_map->cache_to = to;
285 swap(nfp_map->cache, skb);
286 }
287 spin_unlock(&nfp_map->cache_lock);
288 }
289
290 dev_consume_skb_any(skb);
291 }
292
293 static int
nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map * offmap,enum nfp_ccm_type op,u8 * key,u8 * value,u64 flags,u8 * out_key,u8 * out_value)294 nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op,
295 u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
296 {
297 struct nfp_bpf_map *nfp_map = offmap->dev_priv;
298 unsigned int n_entries, reply_entries, count;
299 struct nfp_app_bpf *bpf = nfp_map->bpf;
300 struct bpf_map *map = &offmap->map;
301 struct cmsg_reply_map_op *reply;
302 struct cmsg_req_map_op *req;
303 struct sk_buff *skb;
304 u32 cache_gen;
305 int err;
306
307 /* FW messages have no space for more than 32 bits of flags */
308 if (flags >> 32)
309 return -EOPNOTSUPP;
310
311 /* Handle op cache */
312 n_entries = nfp_bpf_ctrl_op_cache_get(nfp_map, op, key, out_key,
313 out_value, &cache_gen);
314 if (!n_entries)
315 return 0;
316
317 skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
318 if (!skb) {
319 err = -ENOMEM;
320 goto err_cache_put;
321 }
322
323 req = (void *)skb->data;
324 req->tid = cpu_to_be32(nfp_map->tid);
325 req->count = cpu_to_be32(n_entries);
326 req->flags = cpu_to_be32(flags);
327
328 /* Copy inputs */
329 if (key)
330 memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size);
331 if (value)
332 memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
333 map->value_size);
334
335 skb = nfp_ccm_communicate(&bpf->ccm, skb, op, 0);
336 if (IS_ERR(skb)) {
337 err = PTR_ERR(skb);
338 goto err_cache_put;
339 }
340
341 if (skb->len < sizeof(*reply)) {
342 cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d!\n",
343 op, skb->len);
344 err = -EIO;
345 goto err_free;
346 }
347
348 reply = (void *)skb->data;
349 count = be32_to_cpu(reply->count);
350 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
351 /* FW responds with message sized to hold the good entries,
352 * plus one extra entry if there was an error.
353 */
354 reply_entries = count + !!err;
355 if (n_entries > 1 && count)
356 err = 0;
357 if (err)
358 goto err_free;
359
360 if (skb->len != nfp_bpf_cmsg_map_reply_size(bpf, reply_entries)) {
361 cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d for %d entries!\n",
362 op, skb->len, reply_entries);
363 err = -EIO;
364 goto err_free;
365 }
366
367 /* Copy outputs */
368 if (out_key)
369 memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0),
370 map->key_size);
371 if (out_value)
372 memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0),
373 map->value_size);
374
375 nfp_bpf_ctrl_op_cache_put(nfp_map, op, skb, cache_gen);
376
377 return 0;
378 err_free:
379 dev_kfree_skb_any(skb);
380 err_cache_put:
381 nfp_bpf_ctrl_op_cache_put(nfp_map, op, NULL, cache_gen);
382 return err;
383 }
384
nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map * offmap,void * key,void * value,u64 flags)385 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
386 void *key, void *value, u64 flags)
387 {
388 return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_UPDATE,
389 key, value, flags, NULL, NULL);
390 }
391
nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map * offmap,void * key)392 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
393 {
394 return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_DELETE,
395 key, NULL, 0, NULL, NULL);
396 }
397
nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map * offmap,void * key,void * value)398 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
399 void *key, void *value)
400 {
401 return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_LOOKUP,
402 key, NULL, 0, NULL, value);
403 }
404
nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map * offmap,void * next_key)405 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
406 void *next_key)
407 {
408 return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETFIRST,
409 NULL, NULL, 0, next_key, NULL);
410 }
411
nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map * offmap,void * key,void * next_key)412 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
413 void *key, void *next_key)
414 {
415 return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETNEXT,
416 key, NULL, 0, next_key, NULL);
417 }
418
nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf * bpf)419 unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf)
420 {
421 return max(nfp_bpf_cmsg_map_req_size(bpf, 1),
422 nfp_bpf_cmsg_map_reply_size(bpf, 1));
423 }
424
nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf * bpf)425 unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
426 {
427 return max3(NFP_NET_DEFAULT_MTU,
428 nfp_bpf_cmsg_map_req_size(bpf, NFP_BPF_MAP_CACHE_CNT),
429 nfp_bpf_cmsg_map_reply_size(bpf, NFP_BPF_MAP_CACHE_CNT));
430 }
431
nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf * bpf)432 unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf)
433 {
434 unsigned int mtu, req_max, reply_max, entry_sz;
435
436 mtu = bpf->app->ctrl->dp.mtu;
437 entry_sz = bpf->cmsg_key_sz + bpf->cmsg_val_sz;
438 req_max = (mtu - sizeof(struct cmsg_req_map_op)) / entry_sz;
439 reply_max = (mtu - sizeof(struct cmsg_reply_map_op)) / entry_sz;
440
441 return min3(req_max, reply_max, NFP_BPF_MAP_CACHE_CNT);
442 }
443
nfp_bpf_ctrl_msg_rx(struct nfp_app * app,struct sk_buff * skb)444 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
445 {
446 struct nfp_app_bpf *bpf = app->priv;
447
448 if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
449 cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
450 dev_kfree_skb_any(skb);
451 return;
452 }
453
454 if (nfp_ccm_get_type(skb) == NFP_CCM_TYPE_BPF_BPF_EVENT) {
455 if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
456 dev_consume_skb_any(skb);
457 else
458 dev_kfree_skb_any(skb);
459 return;
460 }
461
462 nfp_ccm_rx(&bpf->ccm, skb);
463 }
464
465 void
nfp_bpf_ctrl_msg_rx_raw(struct nfp_app * app,const void * data,unsigned int len)466 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
467 {
468 const struct nfp_ccm_hdr *hdr = data;
469 struct nfp_app_bpf *bpf = app->priv;
470
471 if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
472 cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
473 return;
474 }
475
476 if (hdr->type == NFP_CCM_TYPE_BPF_BPF_EVENT)
477 nfp_bpf_event_output(bpf, data, len);
478 else
479 cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",
480 hdr->type);
481 }
482