1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/cls_flower.c Flower classifier
4 *
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 #include <linux/bitfield.h>
15
16 #include <linux/if_ether.h>
17 #include <linux/in6.h>
18 #include <linux/ip.h>
19 #include <linux/mpls.h>
20 #include <linux/ppp_defs.h>
21
22 #include <net/sch_generic.h>
23 #include <net/pkt_cls.h>
24 #include <net/pkt_sched.h>
25 #include <net/ip.h>
26 #include <net/flow_dissector.h>
27 #include <net/geneve.h>
28 #include <net/vxlan.h>
29 #include <net/erspan.h>
30 #include <net/gtp.h>
31 #include <net/pfcp.h>
32 #include <net/tc_wrapper.h>
33
34 #include <net/dst.h>
35 #include <net/dst_metadata.h>
36
37 #include <uapi/linux/netfilter/nf_conntrack_common.h>
38
39 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
40 ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
41 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
42 (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
43
44 #define TCA_FLOWER_KEY_FLAGS_POLICY_MASK \
45 (TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT | \
46 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST)
47
48 #define TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK \
49 (TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM | \
50 TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT | \
51 TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM | \
52 TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT)
53
54 struct fl_flow_key {
55 struct flow_dissector_key_meta meta;
56 struct flow_dissector_key_control control;
57 struct flow_dissector_key_control enc_control;
58 struct flow_dissector_key_basic basic;
59 struct flow_dissector_key_eth_addrs eth;
60 struct flow_dissector_key_vlan vlan;
61 struct flow_dissector_key_vlan cvlan;
62 union {
63 struct flow_dissector_key_ipv4_addrs ipv4;
64 struct flow_dissector_key_ipv6_addrs ipv6;
65 };
66 struct flow_dissector_key_ports tp;
67 struct flow_dissector_key_icmp icmp;
68 struct flow_dissector_key_arp arp;
69 struct flow_dissector_key_keyid enc_key_id;
70 union {
71 struct flow_dissector_key_ipv4_addrs enc_ipv4;
72 struct flow_dissector_key_ipv6_addrs enc_ipv6;
73 };
74 struct flow_dissector_key_ports enc_tp;
75 struct flow_dissector_key_mpls mpls;
76 struct flow_dissector_key_tcp tcp;
77 struct flow_dissector_key_ip ip;
78 struct flow_dissector_key_ip enc_ip;
79 struct flow_dissector_key_enc_opts enc_opts;
80 struct flow_dissector_key_ports_range tp_range;
81 struct flow_dissector_key_ct ct;
82 struct flow_dissector_key_hash hash;
83 struct flow_dissector_key_num_of_vlans num_of_vlans;
84 struct flow_dissector_key_pppoe pppoe;
85 struct flow_dissector_key_l2tpv3 l2tpv3;
86 struct flow_dissector_key_ipsec ipsec;
87 struct flow_dissector_key_cfm cfm;
88 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
89
90 struct fl_flow_mask_range {
91 unsigned short int start;
92 unsigned short int end;
93 };
94
95 struct fl_flow_mask {
96 struct fl_flow_key key;
97 struct fl_flow_mask_range range;
98 u32 flags;
99 struct rhash_head ht_node;
100 struct rhashtable ht;
101 struct rhashtable_params filter_ht_params;
102 struct flow_dissector dissector;
103 struct list_head filters;
104 struct rcu_work rwork;
105 struct list_head list;
106 refcount_t refcnt;
107 };
108
109 struct fl_flow_tmplt {
110 struct fl_flow_key dummy_key;
111 struct fl_flow_key mask;
112 struct flow_dissector dissector;
113 struct tcf_chain *chain;
114 };
115
116 struct cls_fl_head {
117 struct rhashtable ht;
118 spinlock_t masks_lock; /* Protect masks list */
119 struct list_head masks;
120 struct list_head hw_filters;
121 struct rcu_work rwork;
122 struct idr handle_idr;
123 };
124
125 struct cls_fl_filter {
126 struct fl_flow_mask *mask;
127 struct rhash_head ht_node;
128 struct fl_flow_key mkey;
129 struct tcf_exts exts;
130 struct tcf_result res;
131 struct fl_flow_key key;
132 struct list_head list;
133 struct list_head hw_list;
134 u32 handle;
135 u32 flags;
136 u32 in_hw_count;
137 u8 needs_tc_skb_ext:1;
138 struct rcu_work rwork;
139 struct net_device *hw_dev;
140 /* Flower classifier is unlocked, which means that its reference counter
141 * can be changed concurrently without any kind of external
142 * synchronization. Use atomic reference counter to be concurrency-safe.
143 */
144 refcount_t refcnt;
145 bool deleted;
146 };
147
148 static const struct rhashtable_params mask_ht_params = {
149 .key_offset = offsetof(struct fl_flow_mask, key),
150 .key_len = sizeof(struct fl_flow_key),
151 .head_offset = offsetof(struct fl_flow_mask, ht_node),
152 .automatic_shrinking = true,
153 };
154
fl_mask_range(const struct fl_flow_mask * mask)155 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
156 {
157 return mask->range.end - mask->range.start;
158 }
159
fl_mask_update_range(struct fl_flow_mask * mask)160 static void fl_mask_update_range(struct fl_flow_mask *mask)
161 {
162 const u8 *bytes = (const u8 *) &mask->key;
163 size_t size = sizeof(mask->key);
164 size_t i, first = 0, last;
165
166 for (i = 0; i < size; i++) {
167 if (bytes[i]) {
168 first = i;
169 break;
170 }
171 }
172 last = first;
173 for (i = size - 1; i != first; i--) {
174 if (bytes[i]) {
175 last = i;
176 break;
177 }
178 }
179 mask->range.start = rounddown(first, sizeof(long));
180 mask->range.end = roundup(last + 1, sizeof(long));
181 }
182
fl_key_get_start(struct fl_flow_key * key,const struct fl_flow_mask * mask)183 static void *fl_key_get_start(struct fl_flow_key *key,
184 const struct fl_flow_mask *mask)
185 {
186 return (u8 *) key + mask->range.start;
187 }
188
fl_set_masked_key(struct fl_flow_key * mkey,struct fl_flow_key * key,struct fl_flow_mask * mask)189 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
190 struct fl_flow_mask *mask)
191 {
192 const long *lkey = fl_key_get_start(key, mask);
193 const long *lmask = fl_key_get_start(&mask->key, mask);
194 long *lmkey = fl_key_get_start(mkey, mask);
195 int i;
196
197 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
198 *lmkey++ = *lkey++ & *lmask++;
199 }
200
fl_mask_fits_tmplt(struct fl_flow_tmplt * tmplt,struct fl_flow_mask * mask)201 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
202 struct fl_flow_mask *mask)
203 {
204 const long *lmask = fl_key_get_start(&mask->key, mask);
205 const long *ltmplt;
206 int i;
207
208 if (!tmplt)
209 return true;
210 ltmplt = fl_key_get_start(&tmplt->mask, mask);
211 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
212 if (~*ltmplt++ & *lmask++)
213 return false;
214 }
215 return true;
216 }
217
fl_clear_masked_range(struct fl_flow_key * key,struct fl_flow_mask * mask)218 static void fl_clear_masked_range(struct fl_flow_key *key,
219 struct fl_flow_mask *mask)
220 {
221 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
222 }
223
fl_range_port_dst_cmp(struct cls_fl_filter * filter,struct fl_flow_key * key,struct fl_flow_key * mkey)224 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
225 struct fl_flow_key *key,
226 struct fl_flow_key *mkey)
227 {
228 u16 min_mask, max_mask, min_val, max_val;
229
230 min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
231 max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
232 min_val = ntohs(filter->key.tp_range.tp_min.dst);
233 max_val = ntohs(filter->key.tp_range.tp_max.dst);
234
235 if (min_mask && max_mask) {
236 if (ntohs(key->tp_range.tp.dst) < min_val ||
237 ntohs(key->tp_range.tp.dst) > max_val)
238 return false;
239
240 /* skb does not have min and max values */
241 mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
242 mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
243 }
244 return true;
245 }
246
fl_range_port_src_cmp(struct cls_fl_filter * filter,struct fl_flow_key * key,struct fl_flow_key * mkey)247 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
248 struct fl_flow_key *key,
249 struct fl_flow_key *mkey)
250 {
251 u16 min_mask, max_mask, min_val, max_val;
252
253 min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
254 max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
255 min_val = ntohs(filter->key.tp_range.tp_min.src);
256 max_val = ntohs(filter->key.tp_range.tp_max.src);
257
258 if (min_mask && max_mask) {
259 if (ntohs(key->tp_range.tp.src) < min_val ||
260 ntohs(key->tp_range.tp.src) > max_val)
261 return false;
262
263 /* skb does not have min and max values */
264 mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
265 mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
266 }
267 return true;
268 }
269
__fl_lookup(struct fl_flow_mask * mask,struct fl_flow_key * mkey)270 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
271 struct fl_flow_key *mkey)
272 {
273 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
274 mask->filter_ht_params);
275 }
276
fl_lookup_range(struct fl_flow_mask * mask,struct fl_flow_key * mkey,struct fl_flow_key * key)277 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
278 struct fl_flow_key *mkey,
279 struct fl_flow_key *key)
280 {
281 struct cls_fl_filter *filter, *f;
282
283 list_for_each_entry_rcu(filter, &mask->filters, list) {
284 if (!fl_range_port_dst_cmp(filter, key, mkey))
285 continue;
286
287 if (!fl_range_port_src_cmp(filter, key, mkey))
288 continue;
289
290 f = __fl_lookup(mask, mkey);
291 if (f)
292 return f;
293 }
294 return NULL;
295 }
296
297 static noinline_for_stack
fl_mask_lookup(struct fl_flow_mask * mask,struct fl_flow_key * key)298 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
299 {
300 struct fl_flow_key mkey;
301
302 fl_set_masked_key(&mkey, key, mask);
303 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
304 return fl_lookup_range(mask, &mkey, key);
305
306 return __fl_lookup(mask, &mkey);
307 }
308
309 static u16 fl_ct_info_to_flower_map[] = {
310 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
311 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
312 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
313 TCA_FLOWER_KEY_CT_FLAGS_RELATED,
314 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
315 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
316 TCA_FLOWER_KEY_CT_FLAGS_REPLY,
317 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
318 TCA_FLOWER_KEY_CT_FLAGS_RELATED |
319 TCA_FLOWER_KEY_CT_FLAGS_REPLY,
320 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
321 TCA_FLOWER_KEY_CT_FLAGS_NEW,
322 };
323
fl_classify(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res)324 TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb,
325 const struct tcf_proto *tp,
326 struct tcf_result *res)
327 {
328 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
329 bool post_ct = tc_skb_cb(skb)->post_ct;
330 u16 zone = tc_skb_cb(skb)->zone;
331 struct fl_flow_key skb_key;
332 struct fl_flow_mask *mask;
333 struct cls_fl_filter *f;
334
335 list_for_each_entry_rcu(mask, &head->masks, list) {
336 flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
337 fl_clear_masked_range(&skb_key, mask);
338
339 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
340 /* skb_flow_dissect() does not set n_proto in case an unknown
341 * protocol, so do it rather here.
342 */
343 skb_key.basic.n_proto = skb_protocol(skb, false);
344 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
345 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
346 fl_ct_info_to_flower_map,
347 ARRAY_SIZE(fl_ct_info_to_flower_map),
348 post_ct, zone);
349 skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
350 skb_flow_dissect(skb, &mask->dissector, &skb_key,
351 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
352
353 f = fl_mask_lookup(mask, &skb_key);
354 if (f && !tc_skip_sw(f->flags)) {
355 *res = f->res;
356 return tcf_exts_exec(skb, &f->exts, res);
357 }
358 }
359 return -1;
360 }
361
fl_init(struct tcf_proto * tp)362 static int fl_init(struct tcf_proto *tp)
363 {
364 struct cls_fl_head *head;
365
366 head = kzalloc(sizeof(*head), GFP_KERNEL);
367 if (!head)
368 return -ENOBUFS;
369
370 spin_lock_init(&head->masks_lock);
371 INIT_LIST_HEAD_RCU(&head->masks);
372 INIT_LIST_HEAD(&head->hw_filters);
373 rcu_assign_pointer(tp->root, head);
374 idr_init(&head->handle_idr);
375
376 return rhashtable_init(&head->ht, &mask_ht_params);
377 }
378
fl_mask_free(struct fl_flow_mask * mask,bool mask_init_done)379 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
380 {
381 /* temporary masks don't have their filters list and ht initialized */
382 if (mask_init_done) {
383 WARN_ON(!list_empty(&mask->filters));
384 rhashtable_destroy(&mask->ht);
385 }
386 kfree(mask);
387 }
388
fl_mask_free_work(struct work_struct * work)389 static void fl_mask_free_work(struct work_struct *work)
390 {
391 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
392 struct fl_flow_mask, rwork);
393
394 fl_mask_free(mask, true);
395 }
396
fl_uninit_mask_free_work(struct work_struct * work)397 static void fl_uninit_mask_free_work(struct work_struct *work)
398 {
399 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
400 struct fl_flow_mask, rwork);
401
402 fl_mask_free(mask, false);
403 }
404
fl_mask_put(struct cls_fl_head * head,struct fl_flow_mask * mask)405 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
406 {
407 if (!refcount_dec_and_test(&mask->refcnt))
408 return false;
409
410 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
411
412 spin_lock(&head->masks_lock);
413 list_del_rcu(&mask->list);
414 spin_unlock(&head->masks_lock);
415
416 tcf_queue_work(&mask->rwork, fl_mask_free_work);
417
418 return true;
419 }
420
fl_head_dereference(struct tcf_proto * tp)421 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
422 {
423 /* Flower classifier only changes root pointer during init and destroy.
424 * Users must obtain reference to tcf_proto instance before calling its
425 * API, so tp->root pointer is protected from concurrent call to
426 * fl_destroy() by reference counting.
427 */
428 return rcu_dereference_raw(tp->root);
429 }
430
__fl_destroy_filter(struct cls_fl_filter * f)431 static void __fl_destroy_filter(struct cls_fl_filter *f)
432 {
433 if (f->needs_tc_skb_ext)
434 tc_skb_ext_tc_disable();
435 tcf_exts_destroy(&f->exts);
436 tcf_exts_put_net(&f->exts);
437 kfree(f);
438 }
439
fl_destroy_filter_work(struct work_struct * work)440 static void fl_destroy_filter_work(struct work_struct *work)
441 {
442 struct cls_fl_filter *f = container_of(to_rcu_work(work),
443 struct cls_fl_filter, rwork);
444
445 __fl_destroy_filter(f);
446 }
447
fl_hw_destroy_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held,struct netlink_ext_ack * extack)448 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
449 bool rtnl_held, struct netlink_ext_ack *extack)
450 {
451 struct tcf_block *block = tp->chain->block;
452 struct flow_cls_offload cls_flower = {};
453
454 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
455 cls_flower.command = FLOW_CLS_DESTROY;
456 cls_flower.cookie = (unsigned long) f;
457
458 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
459 &f->flags, &f->in_hw_count, rtnl_held);
460
461 }
462
fl_hw_replace_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held,struct netlink_ext_ack * extack)463 static int fl_hw_replace_filter(struct tcf_proto *tp,
464 struct cls_fl_filter *f, bool rtnl_held,
465 struct netlink_ext_ack *extack)
466 {
467 struct tcf_block *block = tp->chain->block;
468 struct flow_cls_offload cls_flower = {};
469 bool skip_sw = tc_skip_sw(f->flags);
470 int err = 0;
471
472 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
473 if (!cls_flower.rule)
474 return -ENOMEM;
475
476 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
477 cls_flower.command = FLOW_CLS_REPLACE;
478 cls_flower.cookie = (unsigned long) f;
479 cls_flower.rule->match.dissector = &f->mask->dissector;
480 cls_flower.rule->match.mask = &f->mask->key;
481 cls_flower.rule->match.key = &f->mkey;
482 cls_flower.classid = f->res.classid;
483
484 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
485 cls_flower.common.extack);
486 if (err) {
487 kfree(cls_flower.rule);
488
489 return skip_sw ? err : 0;
490 }
491
492 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
493 skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
494 tc_cleanup_offload_action(&cls_flower.rule->action);
495 kfree(cls_flower.rule);
496
497 if (err) {
498 fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
499 return err;
500 }
501
502 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
503 return -EINVAL;
504
505 return 0;
506 }
507
fl_hw_update_stats(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held)508 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
509 bool rtnl_held)
510 {
511 struct tcf_block *block = tp->chain->block;
512 struct flow_cls_offload cls_flower = {};
513
514 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
515 cls_flower.command = FLOW_CLS_STATS;
516 cls_flower.cookie = (unsigned long) f;
517 cls_flower.classid = f->res.classid;
518
519 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
520 rtnl_held);
521
522 tcf_exts_hw_stats_update(&f->exts, &cls_flower.stats, cls_flower.use_act_stats);
523 }
524
__fl_put(struct cls_fl_filter * f)525 static void __fl_put(struct cls_fl_filter *f)
526 {
527 if (!refcount_dec_and_test(&f->refcnt))
528 return;
529
530 if (tcf_exts_get_net(&f->exts))
531 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
532 else
533 __fl_destroy_filter(f);
534 }
535
__fl_get(struct cls_fl_head * head,u32 handle)536 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
537 {
538 struct cls_fl_filter *f;
539
540 rcu_read_lock();
541 f = idr_find(&head->handle_idr, handle);
542 if (f && !refcount_inc_not_zero(&f->refcnt))
543 f = NULL;
544 rcu_read_unlock();
545
546 return f;
547 }
548
fl_get_exts(const struct tcf_proto * tp,u32 handle)549 static struct tcf_exts *fl_get_exts(const struct tcf_proto *tp, u32 handle)
550 {
551 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
552 struct cls_fl_filter *f;
553
554 f = idr_find(&head->handle_idr, handle);
555 return f ? &f->exts : NULL;
556 }
557
__fl_delete(struct tcf_proto * tp,struct cls_fl_filter * f,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)558 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
559 bool *last, bool rtnl_held,
560 struct netlink_ext_ack *extack)
561 {
562 struct cls_fl_head *head = fl_head_dereference(tp);
563
564 *last = false;
565
566 spin_lock(&tp->lock);
567 if (f->deleted) {
568 spin_unlock(&tp->lock);
569 return -ENOENT;
570 }
571
572 f->deleted = true;
573 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
574 f->mask->filter_ht_params);
575 idr_remove(&head->handle_idr, f->handle);
576 list_del_rcu(&f->list);
577 spin_unlock(&tp->lock);
578
579 *last = fl_mask_put(head, f->mask);
580 if (!tc_skip_hw(f->flags))
581 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
582 tcf_unbind_filter(tp, &f->res);
583 __fl_put(f);
584
585 return 0;
586 }
587
fl_destroy_sleepable(struct work_struct * work)588 static void fl_destroy_sleepable(struct work_struct *work)
589 {
590 struct cls_fl_head *head = container_of(to_rcu_work(work),
591 struct cls_fl_head,
592 rwork);
593
594 rhashtable_destroy(&head->ht);
595 kfree(head);
596 module_put(THIS_MODULE);
597 }
598
fl_destroy(struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)599 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
600 struct netlink_ext_ack *extack)
601 {
602 struct cls_fl_head *head = fl_head_dereference(tp);
603 struct fl_flow_mask *mask, *next_mask;
604 struct cls_fl_filter *f, *next;
605 bool last;
606
607 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
608 list_for_each_entry_safe(f, next, &mask->filters, list) {
609 __fl_delete(tp, f, &last, rtnl_held, extack);
610 if (last)
611 break;
612 }
613 }
614 idr_destroy(&head->handle_idr);
615
616 __module_get(THIS_MODULE);
617 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
618 }
619
fl_put(struct tcf_proto * tp,void * arg)620 static void fl_put(struct tcf_proto *tp, void *arg)
621 {
622 struct cls_fl_filter *f = arg;
623
624 __fl_put(f);
625 }
626
fl_get(struct tcf_proto * tp,u32 handle)627 static void *fl_get(struct tcf_proto *tp, u32 handle)
628 {
629 struct cls_fl_head *head = fl_head_dereference(tp);
630
631 return __fl_get(head, handle);
632 }
633
634 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
635 [TCA_FLOWER_UNSPEC] = { .strict_start_type =
636 TCA_FLOWER_L2_MISS },
637 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
638 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
639 .len = IFNAMSIZ },
640 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
641 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
642 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
643 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
644 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
645 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
646 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
647 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
648 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
649 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
650 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
651 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
652 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
653 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
654 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
655 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
656 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
657 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
658 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
659 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
660 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
661 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
662 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
663 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
664 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
665 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
666 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
667 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
668 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
669 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
670 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
671 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
672 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
673 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
674 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
675 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
676 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
677 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
678 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
679 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
680 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
681 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
682 [TCA_FLOWER_KEY_FLAGS] = NLA_POLICY_MASK(NLA_BE32,
683 TCA_FLOWER_KEY_FLAGS_POLICY_MASK),
684 [TCA_FLOWER_KEY_FLAGS_MASK] = NLA_POLICY_MASK(NLA_BE32,
685 TCA_FLOWER_KEY_FLAGS_POLICY_MASK),
686 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
687 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
688 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
689 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
690 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
691 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
692 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
693 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
694 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
695 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
696 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
697 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
698 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
699 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
700 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
701 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
702 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
703 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
704 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
705 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
706 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
707 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
708 [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED },
709 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
710 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
711 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
712 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
713 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
714 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
715 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
716 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
717 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
718 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
719 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
720 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
721 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
722 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
723 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
724 [TCA_FLOWER_KEY_CT_STATE] =
725 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
726 [TCA_FLOWER_KEY_CT_STATE_MASK] =
727 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
728 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 },
729 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 },
730 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 },
731 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 },
732 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY,
733 .len = 128 / BITS_PER_BYTE },
734 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
735 .len = 128 / BITS_PER_BYTE },
736 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
737 [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 },
738 [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 },
739 [TCA_FLOWER_KEY_NUM_OF_VLANS] = { .type = NLA_U8 },
740 [TCA_FLOWER_KEY_PPPOE_SID] = { .type = NLA_U16 },
741 [TCA_FLOWER_KEY_PPP_PROTO] = { .type = NLA_U16 },
742 [TCA_FLOWER_KEY_L2TPV3_SID] = { .type = NLA_U32 },
743 [TCA_FLOWER_KEY_SPI] = { .type = NLA_U32 },
744 [TCA_FLOWER_KEY_SPI_MASK] = { .type = NLA_U32 },
745 [TCA_FLOWER_L2_MISS] = NLA_POLICY_MAX(NLA_U8, 1),
746 [TCA_FLOWER_KEY_CFM] = { .type = NLA_NESTED },
747 [TCA_FLOWER_KEY_ENC_FLAGS] = NLA_POLICY_MASK(NLA_BE32,
748 TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK),
749 [TCA_FLOWER_KEY_ENC_FLAGS_MASK] = NLA_POLICY_MASK(NLA_BE32,
750 TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK),
751 };
752
753 static const struct nla_policy
754 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
755 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = {
756 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
757 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
758 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
759 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
760 [TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED },
761 [TCA_FLOWER_KEY_ENC_OPTS_PFCP] = { .type = NLA_NESTED },
762 };
763
764 static const struct nla_policy
765 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
766 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
767 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
768 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
769 .len = 128 },
770 };
771
772 static const struct nla_policy
773 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
774 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
775 };
776
777 static const struct nla_policy
778 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
779 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
780 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
781 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
782 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
783 };
784
785 static const struct nla_policy
786 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
787 [TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] = { .type = NLA_U8 },
788 [TCA_FLOWER_KEY_ENC_OPT_GTP_QFI] = { .type = NLA_U8 },
789 };
790
791 static const struct nla_policy
792 pfcp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1] = {
793 [TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE] = { .type = NLA_U8 },
794 [TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID] = { .type = NLA_U64 },
795 };
796
797 static const struct nla_policy
798 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
799 [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 },
800 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 },
801 [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 },
802 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 },
803 [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 },
804 };
805
806 static const struct nla_policy
807 cfm_opt_policy[TCA_FLOWER_KEY_CFM_OPT_MAX + 1] = {
808 [TCA_FLOWER_KEY_CFM_MD_LEVEL] = NLA_POLICY_MAX(NLA_U8,
809 FLOW_DIS_CFM_MDL_MAX),
810 [TCA_FLOWER_KEY_CFM_OPCODE] = { .type = NLA_U8 },
811 };
812
fl_set_key_val(struct nlattr ** tb,void * val,int val_type,void * mask,int mask_type,int len)813 static void fl_set_key_val(struct nlattr **tb,
814 void *val, int val_type,
815 void *mask, int mask_type, int len)
816 {
817 if (!tb[val_type])
818 return;
819 nla_memcpy(val, tb[val_type], len);
820 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
821 memset(mask, 0xff, len);
822 else
823 nla_memcpy(mask, tb[mask_type], len);
824 }
825
fl_set_key_spi(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)826 static int fl_set_key_spi(struct nlattr **tb, struct fl_flow_key *key,
827 struct fl_flow_key *mask,
828 struct netlink_ext_ack *extack)
829 {
830 if (key->basic.ip_proto != IPPROTO_ESP &&
831 key->basic.ip_proto != IPPROTO_AH) {
832 NL_SET_ERR_MSG(extack,
833 "Protocol must be either ESP or AH");
834 return -EINVAL;
835 }
836
837 fl_set_key_val(tb, &key->ipsec.spi,
838 TCA_FLOWER_KEY_SPI,
839 &mask->ipsec.spi, TCA_FLOWER_KEY_SPI_MASK,
840 sizeof(key->ipsec.spi));
841 return 0;
842 }
843
fl_set_key_port_range(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)844 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
845 struct fl_flow_key *mask,
846 struct netlink_ext_ack *extack)
847 {
848 fl_set_key_val(tb, &key->tp_range.tp_min.dst,
849 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
850 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
851 fl_set_key_val(tb, &key->tp_range.tp_max.dst,
852 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
853 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
854 fl_set_key_val(tb, &key->tp_range.tp_min.src,
855 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
856 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
857 fl_set_key_val(tb, &key->tp_range.tp_max.src,
858 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
859 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
860
861 if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) {
862 NL_SET_ERR_MSG(extack,
863 "Both min and max destination ports must be specified");
864 return -EINVAL;
865 }
866 if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) {
867 NL_SET_ERR_MSG(extack,
868 "Both min and max source ports must be specified");
869 return -EINVAL;
870 }
871 if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
872 ntohs(key->tp_range.tp_max.dst) <=
873 ntohs(key->tp_range.tp_min.dst)) {
874 NL_SET_ERR_MSG_ATTR(extack,
875 tb[TCA_FLOWER_KEY_PORT_DST_MIN],
876 "Invalid destination port range (min must be strictly smaller than max)");
877 return -EINVAL;
878 }
879 if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
880 ntohs(key->tp_range.tp_max.src) <=
881 ntohs(key->tp_range.tp_min.src)) {
882 NL_SET_ERR_MSG_ATTR(extack,
883 tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
884 "Invalid source port range (min must be strictly smaller than max)");
885 return -EINVAL;
886 }
887
888 return 0;
889 }
890
fl_set_key_mpls_lse(const struct nlattr * nla_lse,struct flow_dissector_key_mpls * key_val,struct flow_dissector_key_mpls * key_mask,struct netlink_ext_ack * extack)891 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
892 struct flow_dissector_key_mpls *key_val,
893 struct flow_dissector_key_mpls *key_mask,
894 struct netlink_ext_ack *extack)
895 {
896 struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
897 struct flow_dissector_mpls_lse *lse_mask;
898 struct flow_dissector_mpls_lse *lse_val;
899 u8 lse_index;
900 u8 depth;
901 int err;
902
903 err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
904 mpls_stack_entry_policy, extack);
905 if (err < 0)
906 return err;
907
908 if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
909 NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
910 return -EINVAL;
911 }
912
913 depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
914
915 /* LSE depth starts at 1, for consistency with terminology used by
916 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
917 */
918 if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
919 NL_SET_ERR_MSG_ATTR(extack,
920 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
921 "Invalid MPLS depth");
922 return -EINVAL;
923 }
924 lse_index = depth - 1;
925
926 dissector_set_mpls_lse(key_val, lse_index);
927 dissector_set_mpls_lse(key_mask, lse_index);
928
929 lse_val = &key_val->ls[lse_index];
930 lse_mask = &key_mask->ls[lse_index];
931
932 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
933 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
934 lse_mask->mpls_ttl = MPLS_TTL_MASK;
935 }
936 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
937 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
938
939 if (bos & ~MPLS_BOS_MASK) {
940 NL_SET_ERR_MSG_ATTR(extack,
941 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
942 "Bottom Of Stack (BOS) must be 0 or 1");
943 return -EINVAL;
944 }
945 lse_val->mpls_bos = bos;
946 lse_mask->mpls_bos = MPLS_BOS_MASK;
947 }
948 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
949 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
950
951 if (tc & ~MPLS_TC_MASK) {
952 NL_SET_ERR_MSG_ATTR(extack,
953 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
954 "Traffic Class (TC) must be between 0 and 7");
955 return -EINVAL;
956 }
957 lse_val->mpls_tc = tc;
958 lse_mask->mpls_tc = MPLS_TC_MASK;
959 }
960 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
961 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
962
963 if (label & ~MPLS_LABEL_MASK) {
964 NL_SET_ERR_MSG_ATTR(extack,
965 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
966 "Label must be between 0 and 1048575");
967 return -EINVAL;
968 }
969 lse_val->mpls_label = label;
970 lse_mask->mpls_label = MPLS_LABEL_MASK;
971 }
972
973 return 0;
974 }
975
fl_set_key_mpls_opts(const struct nlattr * nla_mpls_opts,struct flow_dissector_key_mpls * key_val,struct flow_dissector_key_mpls * key_mask,struct netlink_ext_ack * extack)976 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
977 struct flow_dissector_key_mpls *key_val,
978 struct flow_dissector_key_mpls *key_mask,
979 struct netlink_ext_ack *extack)
980 {
981 struct nlattr *nla_lse;
982 int rem;
983 int err;
984
985 if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
986 NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
987 "NLA_F_NESTED is missing");
988 return -EINVAL;
989 }
990
991 nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
992 if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
993 NL_SET_ERR_MSG_ATTR(extack, nla_lse,
994 "Invalid MPLS option type");
995 return -EINVAL;
996 }
997
998 err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
999 if (err < 0)
1000 return err;
1001 }
1002 if (rem) {
1003 NL_SET_ERR_MSG(extack,
1004 "Bytes leftover after parsing MPLS options");
1005 return -EINVAL;
1006 }
1007
1008 return 0;
1009 }
1010
fl_set_key_mpls(struct nlattr ** tb,struct flow_dissector_key_mpls * key_val,struct flow_dissector_key_mpls * key_mask,struct netlink_ext_ack * extack)1011 static int fl_set_key_mpls(struct nlattr **tb,
1012 struct flow_dissector_key_mpls *key_val,
1013 struct flow_dissector_key_mpls *key_mask,
1014 struct netlink_ext_ack *extack)
1015 {
1016 struct flow_dissector_mpls_lse *lse_mask;
1017 struct flow_dissector_mpls_lse *lse_val;
1018
1019 if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
1020 if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
1021 tb[TCA_FLOWER_KEY_MPLS_BOS] ||
1022 tb[TCA_FLOWER_KEY_MPLS_TC] ||
1023 tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
1024 NL_SET_ERR_MSG_ATTR(extack,
1025 tb[TCA_FLOWER_KEY_MPLS_OPTS],
1026 "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
1027 return -EBADMSG;
1028 }
1029
1030 return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
1031 key_val, key_mask, extack);
1032 }
1033
1034 lse_val = &key_val->ls[0];
1035 lse_mask = &key_mask->ls[0];
1036
1037 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
1038 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
1039 lse_mask->mpls_ttl = MPLS_TTL_MASK;
1040 dissector_set_mpls_lse(key_val, 0);
1041 dissector_set_mpls_lse(key_mask, 0);
1042 }
1043 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
1044 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
1045
1046 if (bos & ~MPLS_BOS_MASK) {
1047 NL_SET_ERR_MSG_ATTR(extack,
1048 tb[TCA_FLOWER_KEY_MPLS_BOS],
1049 "Bottom Of Stack (BOS) must be 0 or 1");
1050 return -EINVAL;
1051 }
1052 lse_val->mpls_bos = bos;
1053 lse_mask->mpls_bos = MPLS_BOS_MASK;
1054 dissector_set_mpls_lse(key_val, 0);
1055 dissector_set_mpls_lse(key_mask, 0);
1056 }
1057 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
1058 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
1059
1060 if (tc & ~MPLS_TC_MASK) {
1061 NL_SET_ERR_MSG_ATTR(extack,
1062 tb[TCA_FLOWER_KEY_MPLS_TC],
1063 "Traffic Class (TC) must be between 0 and 7");
1064 return -EINVAL;
1065 }
1066 lse_val->mpls_tc = tc;
1067 lse_mask->mpls_tc = MPLS_TC_MASK;
1068 dissector_set_mpls_lse(key_val, 0);
1069 dissector_set_mpls_lse(key_mask, 0);
1070 }
1071 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
1072 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
1073
1074 if (label & ~MPLS_LABEL_MASK) {
1075 NL_SET_ERR_MSG_ATTR(extack,
1076 tb[TCA_FLOWER_KEY_MPLS_LABEL],
1077 "Label must be between 0 and 1048575");
1078 return -EINVAL;
1079 }
1080 lse_val->mpls_label = label;
1081 lse_mask->mpls_label = MPLS_LABEL_MASK;
1082 dissector_set_mpls_lse(key_val, 0);
1083 dissector_set_mpls_lse(key_mask, 0);
1084 }
1085 return 0;
1086 }
1087
fl_set_key_vlan(struct nlattr ** tb,__be16 ethertype,int vlan_id_key,int vlan_prio_key,int vlan_next_eth_type_key,struct flow_dissector_key_vlan * key_val,struct flow_dissector_key_vlan * key_mask)1088 static void fl_set_key_vlan(struct nlattr **tb,
1089 __be16 ethertype,
1090 int vlan_id_key, int vlan_prio_key,
1091 int vlan_next_eth_type_key,
1092 struct flow_dissector_key_vlan *key_val,
1093 struct flow_dissector_key_vlan *key_mask)
1094 {
1095 #define VLAN_PRIORITY_MASK 0x7
1096
1097 if (tb[vlan_id_key]) {
1098 key_val->vlan_id =
1099 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1100 key_mask->vlan_id = VLAN_VID_MASK;
1101 }
1102 if (tb[vlan_prio_key]) {
1103 key_val->vlan_priority =
1104 nla_get_u8(tb[vlan_prio_key]) &
1105 VLAN_PRIORITY_MASK;
1106 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1107 }
1108 if (ethertype) {
1109 key_val->vlan_tpid = ethertype;
1110 key_mask->vlan_tpid = cpu_to_be16(~0);
1111 }
1112 if (tb[vlan_next_eth_type_key]) {
1113 key_val->vlan_eth_type =
1114 nla_get_be16(tb[vlan_next_eth_type_key]);
1115 key_mask->vlan_eth_type = cpu_to_be16(~0);
1116 }
1117 }
1118
fl_set_key_pppoe(struct nlattr ** tb,struct flow_dissector_key_pppoe * key_val,struct flow_dissector_key_pppoe * key_mask,struct fl_flow_key * key,struct fl_flow_key * mask)1119 static void fl_set_key_pppoe(struct nlattr **tb,
1120 struct flow_dissector_key_pppoe *key_val,
1121 struct flow_dissector_key_pppoe *key_mask,
1122 struct fl_flow_key *key,
1123 struct fl_flow_key *mask)
1124 {
1125 /* key_val::type must be set to ETH_P_PPP_SES
1126 * because ETH_P_PPP_SES was stored in basic.n_proto
1127 * which might get overwritten by ppp_proto
1128 * or might be set to 0, the role of key_val::type
1129 * is similar to vlan_key::tpid
1130 */
1131 key_val->type = htons(ETH_P_PPP_SES);
1132 key_mask->type = cpu_to_be16(~0);
1133
1134 if (tb[TCA_FLOWER_KEY_PPPOE_SID]) {
1135 key_val->session_id =
1136 nla_get_be16(tb[TCA_FLOWER_KEY_PPPOE_SID]);
1137 key_mask->session_id = cpu_to_be16(~0);
1138 }
1139 if (tb[TCA_FLOWER_KEY_PPP_PROTO]) {
1140 key_val->ppp_proto =
1141 nla_get_be16(tb[TCA_FLOWER_KEY_PPP_PROTO]);
1142 key_mask->ppp_proto = cpu_to_be16(~0);
1143
1144 if (key_val->ppp_proto == htons(PPP_IP)) {
1145 key->basic.n_proto = htons(ETH_P_IP);
1146 mask->basic.n_proto = cpu_to_be16(~0);
1147 } else if (key_val->ppp_proto == htons(PPP_IPV6)) {
1148 key->basic.n_proto = htons(ETH_P_IPV6);
1149 mask->basic.n_proto = cpu_to_be16(~0);
1150 } else if (key_val->ppp_proto == htons(PPP_MPLS_UC)) {
1151 key->basic.n_proto = htons(ETH_P_MPLS_UC);
1152 mask->basic.n_proto = cpu_to_be16(~0);
1153 } else if (key_val->ppp_proto == htons(PPP_MPLS_MC)) {
1154 key->basic.n_proto = htons(ETH_P_MPLS_MC);
1155 mask->basic.n_proto = cpu_to_be16(~0);
1156 }
1157 } else {
1158 key->basic.n_proto = 0;
1159 mask->basic.n_proto = cpu_to_be16(0);
1160 }
1161 }
1162
fl_set_key_flag(u32 flower_key,u32 flower_mask,u32 * dissector_key,u32 * dissector_mask,u32 flower_flag_bit,u32 dissector_flag_bit)1163 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1164 u32 *dissector_key, u32 *dissector_mask,
1165 u32 flower_flag_bit, u32 dissector_flag_bit)
1166 {
1167 if (flower_mask & flower_flag_bit) {
1168 *dissector_mask |= dissector_flag_bit;
1169 if (flower_key & flower_flag_bit)
1170 *dissector_key |= dissector_flag_bit;
1171 }
1172 }
1173
fl_set_key_flags(struct nlattr * tca_opts,struct nlattr ** tb,bool encap,u32 * flags_key,u32 * flags_mask,struct netlink_ext_ack * extack)1174 static int fl_set_key_flags(struct nlattr *tca_opts, struct nlattr **tb,
1175 bool encap, u32 *flags_key, u32 *flags_mask,
1176 struct netlink_ext_ack *extack)
1177 {
1178 int fl_key, fl_mask;
1179 u32 key, mask;
1180
1181 if (encap) {
1182 fl_key = TCA_FLOWER_KEY_ENC_FLAGS;
1183 fl_mask = TCA_FLOWER_KEY_ENC_FLAGS_MASK;
1184 } else {
1185 fl_key = TCA_FLOWER_KEY_FLAGS;
1186 fl_mask = TCA_FLOWER_KEY_FLAGS_MASK;
1187 }
1188
1189 /* mask is mandatory for flags */
1190 if (NL_REQ_ATTR_CHECK(extack, tca_opts, tb, fl_mask)) {
1191 NL_SET_ERR_MSG(extack, "Missing flags mask");
1192 return -EINVAL;
1193 }
1194
1195 key = be32_to_cpu(nla_get_be32(tb[fl_key]));
1196 mask = be32_to_cpu(nla_get_be32(tb[fl_mask]));
1197
1198 *flags_key = 0;
1199 *flags_mask = 0;
1200
1201 fl_set_key_flag(key, mask, flags_key, flags_mask,
1202 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1203 fl_set_key_flag(key, mask, flags_key, flags_mask,
1204 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1205 FLOW_DIS_FIRST_FRAG);
1206
1207 fl_set_key_flag(key, mask, flags_key, flags_mask,
1208 TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM,
1209 FLOW_DIS_F_TUNNEL_CSUM);
1210
1211 fl_set_key_flag(key, mask, flags_key, flags_mask,
1212 TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT,
1213 FLOW_DIS_F_TUNNEL_DONT_FRAGMENT);
1214
1215 fl_set_key_flag(key, mask, flags_key, flags_mask,
1216 TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_OAM);
1217
1218 fl_set_key_flag(key, mask, flags_key, flags_mask,
1219 TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT,
1220 FLOW_DIS_F_TUNNEL_CRIT_OPT);
1221
1222 return 0;
1223 }
1224
fl_set_key_ip(struct nlattr ** tb,bool encap,struct flow_dissector_key_ip * key,struct flow_dissector_key_ip * mask)1225 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1226 struct flow_dissector_key_ip *key,
1227 struct flow_dissector_key_ip *mask)
1228 {
1229 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1230 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1231 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1232 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1233
1234 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1235 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1236 }
1237
fl_set_geneve_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1238 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1239 int depth, int option_len,
1240 struct netlink_ext_ack *extack)
1241 {
1242 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1243 struct nlattr *class = NULL, *type = NULL, *data = NULL;
1244 struct geneve_opt *opt;
1245 int err, data_len = 0;
1246
1247 if (option_len > sizeof(struct geneve_opt))
1248 data_len = option_len - sizeof(struct geneve_opt);
1249
1250 if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
1251 return -ERANGE;
1252
1253 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1254 memset(opt, 0xff, option_len);
1255 opt->length = data_len / 4;
1256 opt->r1 = 0;
1257 opt->r2 = 0;
1258 opt->r3 = 0;
1259
1260 /* If no mask has been prodived we assume an exact match. */
1261 if (!depth)
1262 return sizeof(struct geneve_opt) + data_len;
1263
1264 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1265 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1266 return -EINVAL;
1267 }
1268
1269 err = nla_parse_nested_deprecated(tb,
1270 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1271 nla, geneve_opt_policy, extack);
1272 if (err < 0)
1273 return err;
1274
1275 /* We are not allowed to omit any of CLASS, TYPE or DATA
1276 * fields from the key.
1277 */
1278 if (!option_len &&
1279 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1280 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1281 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1282 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1283 return -EINVAL;
1284 }
1285
1286 /* Omitting any of CLASS, TYPE or DATA fields is allowed
1287 * for the mask.
1288 */
1289 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1290 int new_len = key->enc_opts.len;
1291
1292 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1293 data_len = nla_len(data);
1294 if (data_len < 4) {
1295 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1296 return -ERANGE;
1297 }
1298 if (data_len % 4) {
1299 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1300 return -ERANGE;
1301 }
1302
1303 new_len += sizeof(struct geneve_opt) + data_len;
1304 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1305 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1306 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1307 return -ERANGE;
1308 }
1309 opt->length = data_len / 4;
1310 memcpy(opt->opt_data, nla_data(data), data_len);
1311 }
1312
1313 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1314 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1315 opt->opt_class = nla_get_be16(class);
1316 }
1317
1318 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1319 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1320 opt->type = nla_get_u8(type);
1321 }
1322
1323 return sizeof(struct geneve_opt) + data_len;
1324 }
1325
fl_set_vxlan_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1326 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1327 int depth, int option_len,
1328 struct netlink_ext_ack *extack)
1329 {
1330 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1331 struct vxlan_metadata *md;
1332 int err;
1333
1334 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1335 memset(md, 0xff, sizeof(*md));
1336
1337 if (!depth)
1338 return sizeof(*md);
1339
1340 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1341 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1342 return -EINVAL;
1343 }
1344
1345 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1346 vxlan_opt_policy, extack);
1347 if (err < 0)
1348 return err;
1349
1350 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1351 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1352 return -EINVAL;
1353 }
1354
1355 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1356 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1357 md->gbp &= VXLAN_GBP_MASK;
1358 }
1359
1360 return sizeof(*md);
1361 }
1362
fl_set_erspan_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1363 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1364 int depth, int option_len,
1365 struct netlink_ext_ack *extack)
1366 {
1367 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1368 struct erspan_metadata *md;
1369 int err;
1370
1371 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1372 memset(md, 0xff, sizeof(*md));
1373 md->version = 1;
1374
1375 if (!depth)
1376 return sizeof(*md);
1377
1378 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1379 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1380 return -EINVAL;
1381 }
1382
1383 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1384 erspan_opt_policy, extack);
1385 if (err < 0)
1386 return err;
1387
1388 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1389 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1390 return -EINVAL;
1391 }
1392
1393 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1394 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1395
1396 if (md->version == 1) {
1397 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1398 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1399 return -EINVAL;
1400 }
1401 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1402 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1403 memset(&md->u, 0x00, sizeof(md->u));
1404 md->u.index = nla_get_be32(nla);
1405 }
1406 } else if (md->version == 2) {
1407 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1408 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1409 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1410 return -EINVAL;
1411 }
1412 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1413 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1414 md->u.md2.dir = nla_get_u8(nla);
1415 }
1416 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1417 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1418 set_hwid(&md->u.md2, nla_get_u8(nla));
1419 }
1420 } else {
1421 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1422 return -EINVAL;
1423 }
1424
1425 return sizeof(*md);
1426 }
1427
fl_set_gtp_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1428 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
1429 int depth, int option_len,
1430 struct netlink_ext_ack *extack)
1431 {
1432 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
1433 struct gtp_pdu_session_info *sinfo;
1434 u8 len = key->enc_opts.len;
1435 int err;
1436
1437 sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
1438 memset(sinfo, 0xff, option_len);
1439
1440 if (!depth)
1441 return sizeof(*sinfo);
1442
1443 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
1444 NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
1445 return -EINVAL;
1446 }
1447
1448 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
1449 gtp_opt_policy, extack);
1450 if (err < 0)
1451 return err;
1452
1453 if (!option_len &&
1454 (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
1455 !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
1456 NL_SET_ERR_MSG_MOD(extack,
1457 "Missing tunnel key gtp option pdu type or qfi");
1458 return -EINVAL;
1459 }
1460
1461 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
1462 sinfo->pdu_type =
1463 nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
1464
1465 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
1466 sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
1467
1468 return sizeof(*sinfo);
1469 }
1470
fl_set_pfcp_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1471 static int fl_set_pfcp_opt(const struct nlattr *nla, struct fl_flow_key *key,
1472 int depth, int option_len,
1473 struct netlink_ext_ack *extack)
1474 {
1475 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1];
1476 struct pfcp_metadata *md;
1477 int err;
1478
1479 md = (struct pfcp_metadata *)&key->enc_opts.data[key->enc_opts.len];
1480 memset(md, 0xff, sizeof(*md));
1481
1482 if (!depth)
1483 return sizeof(*md);
1484
1485 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_PFCP) {
1486 NL_SET_ERR_MSG_MOD(extack, "Non-pfcp option type for mask");
1487 return -EINVAL;
1488 }
1489
1490 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX, nla,
1491 pfcp_opt_policy, extack);
1492 if (err < 0)
1493 return err;
1494
1495 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]) {
1496 NL_SET_ERR_MSG_MOD(extack, "Missing tunnel key pfcp option type");
1497 return -EINVAL;
1498 }
1499
1500 if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE])
1501 md->type = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]);
1502
1503 if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID])
1504 md->seid = nla_get_be64(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID]);
1505
1506 return sizeof(*md);
1507 }
1508
fl_set_enc_opt(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1509 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1510 struct fl_flow_key *mask,
1511 struct netlink_ext_ack *extack)
1512 {
1513 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1514 int err, option_len, key_depth, msk_depth = 0;
1515
1516 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1517 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1518 enc_opts_policy, extack);
1519 if (err)
1520 return err;
1521
1522 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1523
1524 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1525 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1526 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1527 enc_opts_policy, extack);
1528 if (err)
1529 return err;
1530
1531 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1532 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1533 if (!nla_ok(nla_opt_msk, msk_depth)) {
1534 NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1535 return -EINVAL;
1536 }
1537 }
1538
1539 nla_for_each_attr(nla_opt_key, nla_enc_key,
1540 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1541 switch (nla_type(nla_opt_key)) {
1542 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1543 if (key->enc_opts.dst_opt_type &&
1544 key->enc_opts.dst_opt_type !=
1545 IP_TUNNEL_GENEVE_OPT_BIT) {
1546 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1547 return -EINVAL;
1548 }
1549 option_len = 0;
1550 key->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT;
1551 option_len = fl_set_geneve_opt(nla_opt_key, key,
1552 key_depth, option_len,
1553 extack);
1554 if (option_len < 0)
1555 return option_len;
1556
1557 key->enc_opts.len += option_len;
1558 /* At the same time we need to parse through the mask
1559 * in order to verify exact and mask attribute lengths.
1560 */
1561 mask->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT;
1562 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1563 msk_depth, option_len,
1564 extack);
1565 if (option_len < 0)
1566 return option_len;
1567
1568 mask->enc_opts.len += option_len;
1569 if (key->enc_opts.len != mask->enc_opts.len) {
1570 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1571 return -EINVAL;
1572 }
1573 break;
1574 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1575 if (key->enc_opts.dst_opt_type) {
1576 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1577 return -EINVAL;
1578 }
1579 option_len = 0;
1580 key->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT;
1581 option_len = fl_set_vxlan_opt(nla_opt_key, key,
1582 key_depth, option_len,
1583 extack);
1584 if (option_len < 0)
1585 return option_len;
1586
1587 key->enc_opts.len += option_len;
1588 /* At the same time we need to parse through the mask
1589 * in order to verify exact and mask attribute lengths.
1590 */
1591 mask->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT;
1592 option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1593 msk_depth, option_len,
1594 extack);
1595 if (option_len < 0)
1596 return option_len;
1597
1598 mask->enc_opts.len += option_len;
1599 if (key->enc_opts.len != mask->enc_opts.len) {
1600 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1601 return -EINVAL;
1602 }
1603 break;
1604 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1605 if (key->enc_opts.dst_opt_type) {
1606 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1607 return -EINVAL;
1608 }
1609 option_len = 0;
1610 key->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT;
1611 option_len = fl_set_erspan_opt(nla_opt_key, key,
1612 key_depth, option_len,
1613 extack);
1614 if (option_len < 0)
1615 return option_len;
1616
1617 key->enc_opts.len += option_len;
1618 /* At the same time we need to parse through the mask
1619 * in order to verify exact and mask attribute lengths.
1620 */
1621 mask->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT;
1622 option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1623 msk_depth, option_len,
1624 extack);
1625 if (option_len < 0)
1626 return option_len;
1627
1628 mask->enc_opts.len += option_len;
1629 if (key->enc_opts.len != mask->enc_opts.len) {
1630 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1631 return -EINVAL;
1632 }
1633 break;
1634 case TCA_FLOWER_KEY_ENC_OPTS_GTP:
1635 if (key->enc_opts.dst_opt_type) {
1636 NL_SET_ERR_MSG_MOD(extack,
1637 "Duplicate type for gtp options");
1638 return -EINVAL;
1639 }
1640 option_len = 0;
1641 key->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT;
1642 option_len = fl_set_gtp_opt(nla_opt_key, key,
1643 key_depth, option_len,
1644 extack);
1645 if (option_len < 0)
1646 return option_len;
1647
1648 key->enc_opts.len += option_len;
1649 /* At the same time we need to parse through the mask
1650 * in order to verify exact and mask attribute lengths.
1651 */
1652 mask->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT;
1653 option_len = fl_set_gtp_opt(nla_opt_msk, mask,
1654 msk_depth, option_len,
1655 extack);
1656 if (option_len < 0)
1657 return option_len;
1658
1659 mask->enc_opts.len += option_len;
1660 if (key->enc_opts.len != mask->enc_opts.len) {
1661 NL_SET_ERR_MSG_MOD(extack,
1662 "Key and mask miss aligned");
1663 return -EINVAL;
1664 }
1665 break;
1666 case TCA_FLOWER_KEY_ENC_OPTS_PFCP:
1667 if (key->enc_opts.dst_opt_type) {
1668 NL_SET_ERR_MSG_MOD(extack, "Duplicate type for pfcp options");
1669 return -EINVAL;
1670 }
1671 option_len = 0;
1672 key->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT;
1673 option_len = fl_set_pfcp_opt(nla_opt_key, key,
1674 key_depth, option_len,
1675 extack);
1676 if (option_len < 0)
1677 return option_len;
1678
1679 key->enc_opts.len += option_len;
1680 /* At the same time we need to parse through the mask
1681 * in order to verify exact and mask attribute lengths.
1682 */
1683 mask->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT;
1684 option_len = fl_set_pfcp_opt(nla_opt_msk, mask,
1685 msk_depth, option_len,
1686 extack);
1687 if (option_len < 0)
1688 return option_len;
1689
1690 mask->enc_opts.len += option_len;
1691 if (key->enc_opts.len != mask->enc_opts.len) {
1692 NL_SET_ERR_MSG_MOD(extack, "Key and mask miss aligned");
1693 return -EINVAL;
1694 }
1695 break;
1696 default:
1697 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1698 return -EINVAL;
1699 }
1700
1701 if (!msk_depth)
1702 continue;
1703
1704 if (!nla_ok(nla_opt_msk, msk_depth)) {
1705 NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1706 return -EINVAL;
1707 }
1708 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1709 }
1710
1711 return 0;
1712 }
1713
fl_validate_ct_state(u16 state,struct nlattr * tb,struct netlink_ext_ack * extack)1714 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1715 struct netlink_ext_ack *extack)
1716 {
1717 if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1718 NL_SET_ERR_MSG_ATTR(extack, tb,
1719 "no trk, so no other flag can be set");
1720 return -EINVAL;
1721 }
1722
1723 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1724 state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1725 NL_SET_ERR_MSG_ATTR(extack, tb,
1726 "new and est are mutually exclusive");
1727 return -EINVAL;
1728 }
1729
1730 if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1731 state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1732 TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1733 NL_SET_ERR_MSG_ATTR(extack, tb,
1734 "when inv is set, only trk may be set");
1735 return -EINVAL;
1736 }
1737
1738 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1739 state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1740 NL_SET_ERR_MSG_ATTR(extack, tb,
1741 "new and rpl are mutually exclusive");
1742 return -EINVAL;
1743 }
1744
1745 return 0;
1746 }
1747
fl_set_key_ct(struct nlattr ** tb,struct flow_dissector_key_ct * key,struct flow_dissector_key_ct * mask,struct netlink_ext_ack * extack)1748 static int fl_set_key_ct(struct nlattr **tb,
1749 struct flow_dissector_key_ct *key,
1750 struct flow_dissector_key_ct *mask,
1751 struct netlink_ext_ack *extack)
1752 {
1753 if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1754 int err;
1755
1756 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1757 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1758 return -EOPNOTSUPP;
1759 }
1760 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1761 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1762 sizeof(key->ct_state));
1763
1764 err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1765 tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1766 extack);
1767 if (err)
1768 return err;
1769
1770 }
1771 if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1772 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1773 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1774 return -EOPNOTSUPP;
1775 }
1776 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1777 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1778 sizeof(key->ct_zone));
1779 }
1780 if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1781 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1782 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1783 return -EOPNOTSUPP;
1784 }
1785 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1786 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1787 sizeof(key->ct_mark));
1788 }
1789 if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1790 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1791 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1792 return -EOPNOTSUPP;
1793 }
1794 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1795 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1796 sizeof(key->ct_labels));
1797 }
1798
1799 return 0;
1800 }
1801
is_vlan_key(struct nlattr * tb,__be16 * ethertype,struct fl_flow_key * key,struct fl_flow_key * mask,int vthresh)1802 static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype,
1803 struct fl_flow_key *key, struct fl_flow_key *mask,
1804 int vthresh)
1805 {
1806 const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh;
1807
1808 if (!tb) {
1809 *ethertype = 0;
1810 return good_num_of_vlans;
1811 }
1812
1813 *ethertype = nla_get_be16(tb);
1814 if (good_num_of_vlans || eth_type_vlan(*ethertype))
1815 return true;
1816
1817 key->basic.n_proto = *ethertype;
1818 mask->basic.n_proto = cpu_to_be16(~0);
1819 return false;
1820 }
1821
fl_set_key_cfm_md_level(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1822 static void fl_set_key_cfm_md_level(struct nlattr **tb,
1823 struct fl_flow_key *key,
1824 struct fl_flow_key *mask,
1825 struct netlink_ext_ack *extack)
1826 {
1827 u8 level;
1828
1829 if (!tb[TCA_FLOWER_KEY_CFM_MD_LEVEL])
1830 return;
1831
1832 level = nla_get_u8(tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]);
1833 key->cfm.mdl_ver = FIELD_PREP(FLOW_DIS_CFM_MDL_MASK, level);
1834 mask->cfm.mdl_ver = FLOW_DIS_CFM_MDL_MASK;
1835 }
1836
fl_set_key_cfm_opcode(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1837 static void fl_set_key_cfm_opcode(struct nlattr **tb,
1838 struct fl_flow_key *key,
1839 struct fl_flow_key *mask,
1840 struct netlink_ext_ack *extack)
1841 {
1842 fl_set_key_val(tb, &key->cfm.opcode, TCA_FLOWER_KEY_CFM_OPCODE,
1843 &mask->cfm.opcode, TCA_FLOWER_UNSPEC,
1844 sizeof(key->cfm.opcode));
1845 }
1846
fl_set_key_cfm(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1847 static int fl_set_key_cfm(struct nlattr **tb,
1848 struct fl_flow_key *key,
1849 struct fl_flow_key *mask,
1850 struct netlink_ext_ack *extack)
1851 {
1852 struct nlattr *nla_cfm_opt[TCA_FLOWER_KEY_CFM_OPT_MAX + 1];
1853 int err;
1854
1855 if (!tb[TCA_FLOWER_KEY_CFM])
1856 return 0;
1857
1858 err = nla_parse_nested(nla_cfm_opt, TCA_FLOWER_KEY_CFM_OPT_MAX,
1859 tb[TCA_FLOWER_KEY_CFM], cfm_opt_policy, extack);
1860 if (err < 0)
1861 return err;
1862
1863 fl_set_key_cfm_opcode(nla_cfm_opt, key, mask, extack);
1864 fl_set_key_cfm_md_level(nla_cfm_opt, key, mask, extack);
1865
1866 return 0;
1867 }
1868
fl_set_key(struct net * net,struct nlattr * tca_opts,struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1869 static int fl_set_key(struct net *net, struct nlattr *tca_opts,
1870 struct nlattr **tb, struct fl_flow_key *key,
1871 struct fl_flow_key *mask, struct netlink_ext_ack *extack)
1872 {
1873 __be16 ethertype;
1874 int ret = 0;
1875
1876 if (tb[TCA_FLOWER_INDEV]) {
1877 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1878 if (err < 0)
1879 return err;
1880 key->meta.ingress_ifindex = err;
1881 mask->meta.ingress_ifindex = 0xffffffff;
1882 }
1883
1884 fl_set_key_val(tb, &key->meta.l2_miss, TCA_FLOWER_L2_MISS,
1885 &mask->meta.l2_miss, TCA_FLOWER_UNSPEC,
1886 sizeof(key->meta.l2_miss));
1887
1888 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1889 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1890 sizeof(key->eth.dst));
1891 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1892 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1893 sizeof(key->eth.src));
1894 fl_set_key_val(tb, &key->num_of_vlans,
1895 TCA_FLOWER_KEY_NUM_OF_VLANS,
1896 &mask->num_of_vlans,
1897 TCA_FLOWER_UNSPEC,
1898 sizeof(key->num_of_vlans));
1899
1900 if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], ðertype, key, mask, 0)) {
1901 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1902 TCA_FLOWER_KEY_VLAN_PRIO,
1903 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1904 &key->vlan, &mask->vlan);
1905
1906 if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE],
1907 ðertype, key, mask, 1)) {
1908 fl_set_key_vlan(tb, ethertype,
1909 TCA_FLOWER_KEY_CVLAN_ID,
1910 TCA_FLOWER_KEY_CVLAN_PRIO,
1911 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1912 &key->cvlan, &mask->cvlan);
1913 fl_set_key_val(tb, &key->basic.n_proto,
1914 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1915 &mask->basic.n_proto,
1916 TCA_FLOWER_UNSPEC,
1917 sizeof(key->basic.n_proto));
1918 }
1919 }
1920
1921 if (key->basic.n_proto == htons(ETH_P_PPP_SES))
1922 fl_set_key_pppoe(tb, &key->pppoe, &mask->pppoe, key, mask);
1923
1924 if (key->basic.n_proto == htons(ETH_P_IP) ||
1925 key->basic.n_proto == htons(ETH_P_IPV6)) {
1926 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1927 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1928 sizeof(key->basic.ip_proto));
1929 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1930 }
1931
1932 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1933 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1934 mask->control.addr_type = ~0;
1935 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1936 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1937 sizeof(key->ipv4.src));
1938 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1939 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1940 sizeof(key->ipv4.dst));
1941 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1942 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1943 mask->control.addr_type = ~0;
1944 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1945 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1946 sizeof(key->ipv6.src));
1947 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1948 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1949 sizeof(key->ipv6.dst));
1950 }
1951
1952 if (key->basic.ip_proto == IPPROTO_TCP) {
1953 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1954 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1955 sizeof(key->tp.src));
1956 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1957 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1958 sizeof(key->tp.dst));
1959 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1960 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1961 sizeof(key->tcp.flags));
1962 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1963 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1964 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1965 sizeof(key->tp.src));
1966 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1967 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1968 sizeof(key->tp.dst));
1969 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1970 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1971 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1972 sizeof(key->tp.src));
1973 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1974 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1975 sizeof(key->tp.dst));
1976 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1977 key->basic.ip_proto == IPPROTO_ICMP) {
1978 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1979 &mask->icmp.type,
1980 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1981 sizeof(key->icmp.type));
1982 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1983 &mask->icmp.code,
1984 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1985 sizeof(key->icmp.code));
1986 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1987 key->basic.ip_proto == IPPROTO_ICMPV6) {
1988 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1989 &mask->icmp.type,
1990 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1991 sizeof(key->icmp.type));
1992 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1993 &mask->icmp.code,
1994 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1995 sizeof(key->icmp.code));
1996 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1997 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1998 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1999 if (ret)
2000 return ret;
2001 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
2002 key->basic.n_proto == htons(ETH_P_RARP)) {
2003 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
2004 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
2005 sizeof(key->arp.sip));
2006 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
2007 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
2008 sizeof(key->arp.tip));
2009 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
2010 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
2011 sizeof(key->arp.op));
2012 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2013 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2014 sizeof(key->arp.sha));
2015 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2016 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2017 sizeof(key->arp.tha));
2018 } else if (key->basic.ip_proto == IPPROTO_L2TP) {
2019 fl_set_key_val(tb, &key->l2tpv3.session_id,
2020 TCA_FLOWER_KEY_L2TPV3_SID,
2021 &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC,
2022 sizeof(key->l2tpv3.session_id));
2023 } else if (key->basic.n_proto == htons(ETH_P_CFM)) {
2024 ret = fl_set_key_cfm(tb, key, mask, extack);
2025 if (ret)
2026 return ret;
2027 }
2028
2029 if (key->basic.ip_proto == IPPROTO_TCP ||
2030 key->basic.ip_proto == IPPROTO_UDP ||
2031 key->basic.ip_proto == IPPROTO_SCTP) {
2032 ret = fl_set_key_port_range(tb, key, mask, extack);
2033 if (ret)
2034 return ret;
2035 }
2036
2037 if (tb[TCA_FLOWER_KEY_SPI]) {
2038 ret = fl_set_key_spi(tb, key, mask, extack);
2039 if (ret)
2040 return ret;
2041 }
2042
2043 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
2044 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
2045 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2046 mask->enc_control.addr_type = ~0;
2047 fl_set_key_val(tb, &key->enc_ipv4.src,
2048 TCA_FLOWER_KEY_ENC_IPV4_SRC,
2049 &mask->enc_ipv4.src,
2050 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2051 sizeof(key->enc_ipv4.src));
2052 fl_set_key_val(tb, &key->enc_ipv4.dst,
2053 TCA_FLOWER_KEY_ENC_IPV4_DST,
2054 &mask->enc_ipv4.dst,
2055 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2056 sizeof(key->enc_ipv4.dst));
2057 }
2058
2059 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
2060 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
2061 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2062 mask->enc_control.addr_type = ~0;
2063 fl_set_key_val(tb, &key->enc_ipv6.src,
2064 TCA_FLOWER_KEY_ENC_IPV6_SRC,
2065 &mask->enc_ipv6.src,
2066 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2067 sizeof(key->enc_ipv6.src));
2068 fl_set_key_val(tb, &key->enc_ipv6.dst,
2069 TCA_FLOWER_KEY_ENC_IPV6_DST,
2070 &mask->enc_ipv6.dst,
2071 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2072 sizeof(key->enc_ipv6.dst));
2073 }
2074
2075 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
2076 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
2077 sizeof(key->enc_key_id.keyid));
2078
2079 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2080 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2081 sizeof(key->enc_tp.src));
2082
2083 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2084 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
2085 sizeof(key->enc_tp.dst));
2086
2087 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
2088
2089 fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
2090 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
2091 sizeof(key->hash.hash));
2092
2093 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
2094 ret = fl_set_enc_opt(tb, key, mask, extack);
2095 if (ret)
2096 return ret;
2097 }
2098
2099 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
2100 if (ret)
2101 return ret;
2102
2103 if (tb[TCA_FLOWER_KEY_FLAGS]) {
2104 ret = fl_set_key_flags(tca_opts, tb, false,
2105 &key->control.flags,
2106 &mask->control.flags, extack);
2107 if (ret)
2108 return ret;
2109 }
2110
2111 if (tb[TCA_FLOWER_KEY_ENC_FLAGS])
2112 ret = fl_set_key_flags(tca_opts, tb, true,
2113 &key->enc_control.flags,
2114 &mask->enc_control.flags, extack);
2115
2116 return ret;
2117 }
2118
fl_mask_copy(struct fl_flow_mask * dst,struct fl_flow_mask * src)2119 static void fl_mask_copy(struct fl_flow_mask *dst,
2120 struct fl_flow_mask *src)
2121 {
2122 const void *psrc = fl_key_get_start(&src->key, src);
2123 void *pdst = fl_key_get_start(&dst->key, src);
2124
2125 memcpy(pdst, psrc, fl_mask_range(src));
2126 dst->range = src->range;
2127 }
2128
2129 static const struct rhashtable_params fl_ht_params = {
2130 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
2131 .head_offset = offsetof(struct cls_fl_filter, ht_node),
2132 .automatic_shrinking = true,
2133 };
2134
fl_init_mask_hashtable(struct fl_flow_mask * mask)2135 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
2136 {
2137 mask->filter_ht_params = fl_ht_params;
2138 mask->filter_ht_params.key_len = fl_mask_range(mask);
2139 mask->filter_ht_params.key_offset += mask->range.start;
2140
2141 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
2142 }
2143
2144 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
2145 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
2146
2147 #define FL_KEY_IS_MASKED(mask, member) \
2148 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
2149 0, FL_KEY_MEMBER_SIZE(member)) \
2150
2151 #define FL_KEY_SET(keys, cnt, id, member) \
2152 do { \
2153 keys[cnt].key_id = id; \
2154 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
2155 cnt++; \
2156 } while(0);
2157
2158 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
2159 do { \
2160 if (FL_KEY_IS_MASKED(mask, member)) \
2161 FL_KEY_SET(keys, cnt, id, member); \
2162 } while(0);
2163
fl_init_dissector(struct flow_dissector * dissector,struct fl_flow_key * mask)2164 static void fl_init_dissector(struct flow_dissector *dissector,
2165 struct fl_flow_key *mask)
2166 {
2167 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
2168 size_t cnt = 0;
2169
2170 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2171 FLOW_DISSECTOR_KEY_META, meta);
2172 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
2173 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
2174 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2175 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
2176 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2177 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
2178 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2179 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
2180 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2181 FLOW_DISSECTOR_KEY_PORTS, tp);
2182 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2183 FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
2184 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2185 FLOW_DISSECTOR_KEY_IP, ip);
2186 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2187 FLOW_DISSECTOR_KEY_TCP, tcp);
2188 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2189 FLOW_DISSECTOR_KEY_ICMP, icmp);
2190 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2191 FLOW_DISSECTOR_KEY_ARP, arp);
2192 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2193 FLOW_DISSECTOR_KEY_MPLS, mpls);
2194 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2195 FLOW_DISSECTOR_KEY_VLAN, vlan);
2196 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2197 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
2198 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2199 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
2200 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2201 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
2202 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2203 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
2204 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
2205 FL_KEY_IS_MASKED(mask, enc_ipv6) ||
2206 FL_KEY_IS_MASKED(mask, enc_control))
2207 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
2208 enc_control);
2209 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2210 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
2211 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2212 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
2213 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2214 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
2215 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2216 FLOW_DISSECTOR_KEY_CT, ct);
2217 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2218 FLOW_DISSECTOR_KEY_HASH, hash);
2219 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2220 FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans);
2221 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2222 FLOW_DISSECTOR_KEY_PPPOE, pppoe);
2223 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2224 FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3);
2225 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2226 FLOW_DISSECTOR_KEY_IPSEC, ipsec);
2227 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2228 FLOW_DISSECTOR_KEY_CFM, cfm);
2229
2230 skb_flow_dissector_init(dissector, keys, cnt);
2231 }
2232
fl_create_new_mask(struct cls_fl_head * head,struct fl_flow_mask * mask)2233 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
2234 struct fl_flow_mask *mask)
2235 {
2236 struct fl_flow_mask *newmask;
2237 int err;
2238
2239 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
2240 if (!newmask)
2241 return ERR_PTR(-ENOMEM);
2242
2243 fl_mask_copy(newmask, mask);
2244
2245 if ((newmask->key.tp_range.tp_min.dst &&
2246 newmask->key.tp_range.tp_max.dst) ||
2247 (newmask->key.tp_range.tp_min.src &&
2248 newmask->key.tp_range.tp_max.src))
2249 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
2250
2251 err = fl_init_mask_hashtable(newmask);
2252 if (err)
2253 goto errout_free;
2254
2255 fl_init_dissector(&newmask->dissector, &newmask->key);
2256
2257 INIT_LIST_HEAD_RCU(&newmask->filters);
2258
2259 refcount_set(&newmask->refcnt, 1);
2260 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
2261 &newmask->ht_node, mask_ht_params);
2262 if (err)
2263 goto errout_destroy;
2264
2265 spin_lock(&head->masks_lock);
2266 list_add_tail_rcu(&newmask->list, &head->masks);
2267 spin_unlock(&head->masks_lock);
2268
2269 return newmask;
2270
2271 errout_destroy:
2272 rhashtable_destroy(&newmask->ht);
2273 errout_free:
2274 kfree(newmask);
2275
2276 return ERR_PTR(err);
2277 }
2278
fl_check_assign_mask(struct cls_fl_head * head,struct cls_fl_filter * fnew,struct cls_fl_filter * fold,struct fl_flow_mask * mask)2279 static int fl_check_assign_mask(struct cls_fl_head *head,
2280 struct cls_fl_filter *fnew,
2281 struct cls_fl_filter *fold,
2282 struct fl_flow_mask *mask)
2283 {
2284 struct fl_flow_mask *newmask;
2285 int ret = 0;
2286
2287 rcu_read_lock();
2288
2289 /* Insert mask as temporary node to prevent concurrent creation of mask
2290 * with same key. Any concurrent lookups with same key will return
2291 * -EAGAIN because mask's refcnt is zero.
2292 */
2293 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
2294 &mask->ht_node,
2295 mask_ht_params);
2296 if (!fnew->mask) {
2297 rcu_read_unlock();
2298
2299 if (fold) {
2300 ret = -EINVAL;
2301 goto errout_cleanup;
2302 }
2303
2304 newmask = fl_create_new_mask(head, mask);
2305 if (IS_ERR(newmask)) {
2306 ret = PTR_ERR(newmask);
2307 goto errout_cleanup;
2308 }
2309
2310 fnew->mask = newmask;
2311 return 0;
2312 } else if (IS_ERR(fnew->mask)) {
2313 ret = PTR_ERR(fnew->mask);
2314 } else if (fold && fold->mask != fnew->mask) {
2315 ret = -EINVAL;
2316 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
2317 /* Mask was deleted concurrently, try again */
2318 ret = -EAGAIN;
2319 }
2320 rcu_read_unlock();
2321 return ret;
2322
2323 errout_cleanup:
2324 rhashtable_remove_fast(&head->ht, &mask->ht_node,
2325 mask_ht_params);
2326 return ret;
2327 }
2328
fl_needs_tc_skb_ext(const struct fl_flow_key * mask)2329 static bool fl_needs_tc_skb_ext(const struct fl_flow_key *mask)
2330 {
2331 return mask->meta.l2_miss;
2332 }
2333
fl_ht_insert_unique(struct cls_fl_filter * fnew,struct cls_fl_filter * fold,bool * in_ht)2334 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
2335 struct cls_fl_filter *fold,
2336 bool *in_ht)
2337 {
2338 struct fl_flow_mask *mask = fnew->mask;
2339 int err;
2340
2341 err = rhashtable_lookup_insert_fast(&mask->ht,
2342 &fnew->ht_node,
2343 mask->filter_ht_params);
2344 if (err) {
2345 *in_ht = false;
2346 /* It is okay if filter with same key exists when
2347 * overwriting.
2348 */
2349 return fold && err == -EEXIST ? 0 : err;
2350 }
2351
2352 *in_ht = true;
2353 return 0;
2354 }
2355
fl_change(struct net * net,struct sk_buff * in_skb,struct tcf_proto * tp,unsigned long base,u32 handle,struct nlattr ** tca,void ** arg,u32 flags,struct netlink_ext_ack * extack)2356 static int fl_change(struct net *net, struct sk_buff *in_skb,
2357 struct tcf_proto *tp, unsigned long base,
2358 u32 handle, struct nlattr **tca,
2359 void **arg, u32 flags,
2360 struct netlink_ext_ack *extack)
2361 {
2362 struct cls_fl_head *head = fl_head_dereference(tp);
2363 bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
2364 struct nlattr *tca_opts = tca[TCA_OPTIONS];
2365 struct cls_fl_filter *fold = *arg;
2366 bool bound_to_filter = false;
2367 struct cls_fl_filter *fnew;
2368 struct fl_flow_mask *mask;
2369 struct nlattr **tb;
2370 bool in_ht;
2371 int err;
2372
2373 if (!tca_opts) {
2374 err = -EINVAL;
2375 goto errout_fold;
2376 }
2377
2378 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
2379 if (!mask) {
2380 err = -ENOBUFS;
2381 goto errout_fold;
2382 }
2383
2384 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2385 if (!tb) {
2386 err = -ENOBUFS;
2387 goto errout_mask_alloc;
2388 }
2389
2390 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2391 tca_opts, fl_policy, NULL);
2392 if (err < 0)
2393 goto errout_tb;
2394
2395 if (fold && handle && fold->handle != handle) {
2396 err = -EINVAL;
2397 goto errout_tb;
2398 }
2399
2400 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2401 if (!fnew) {
2402 err = -ENOBUFS;
2403 goto errout_tb;
2404 }
2405 INIT_LIST_HEAD(&fnew->hw_list);
2406 refcount_set(&fnew->refcnt, 1);
2407
2408 if (tb[TCA_FLOWER_FLAGS]) {
2409 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2410
2411 if (!tc_flags_valid(fnew->flags)) {
2412 kfree(fnew);
2413 err = -EINVAL;
2414 goto errout_tb;
2415 }
2416 }
2417
2418 if (!fold) {
2419 spin_lock(&tp->lock);
2420 if (!handle) {
2421 handle = 1;
2422 err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
2423 INT_MAX, GFP_ATOMIC);
2424 } else {
2425 err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
2426 handle, GFP_ATOMIC);
2427
2428 /* Filter with specified handle was concurrently
2429 * inserted after initial check in cls_api. This is not
2430 * necessarily an error if NLM_F_EXCL is not set in
2431 * message flags. Returning EAGAIN will cause cls_api to
2432 * try to update concurrently inserted rule.
2433 */
2434 if (err == -ENOSPC)
2435 err = -EAGAIN;
2436 }
2437 spin_unlock(&tp->lock);
2438
2439 if (err) {
2440 kfree(fnew);
2441 goto errout_tb;
2442 }
2443 }
2444 fnew->handle = handle;
2445
2446 err = tcf_exts_init_ex(&fnew->exts, net, TCA_FLOWER_ACT, 0, tp, handle,
2447 !tc_skip_hw(fnew->flags));
2448 if (err < 0)
2449 goto errout_idr;
2450
2451 err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE],
2452 &fnew->exts, flags, fnew->flags,
2453 extack);
2454 if (err < 0)
2455 goto errout_idr;
2456
2457 if (tb[TCA_FLOWER_CLASSID]) {
2458 fnew->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
2459 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2460 rtnl_lock();
2461 tcf_bind_filter(tp, &fnew->res, base);
2462 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2463 rtnl_unlock();
2464 bound_to_filter = true;
2465 }
2466
2467 err = fl_set_key(net, tca_opts, tb, &fnew->key, &mask->key, extack);
2468 if (err)
2469 goto unbind_filter;
2470
2471 fl_mask_update_range(mask);
2472 fl_set_masked_key(&fnew->mkey, &fnew->key, mask);
2473
2474 if (!fl_mask_fits_tmplt(tp->chain->tmplt_priv, mask)) {
2475 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
2476 err = -EINVAL;
2477 goto unbind_filter;
2478 }
2479
2480 /* Enable tc skb extension if filter matches on data extracted from
2481 * this extension.
2482 */
2483 if (fl_needs_tc_skb_ext(&mask->key)) {
2484 fnew->needs_tc_skb_ext = 1;
2485 tc_skb_ext_tc_enable();
2486 }
2487
2488 err = fl_check_assign_mask(head, fnew, fold, mask);
2489 if (err)
2490 goto unbind_filter;
2491
2492 err = fl_ht_insert_unique(fnew, fold, &in_ht);
2493 if (err)
2494 goto errout_mask;
2495
2496 if (!tc_skip_hw(fnew->flags)) {
2497 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2498 if (err)
2499 goto errout_ht;
2500 }
2501
2502 if (!tc_in_hw(fnew->flags))
2503 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2504
2505 spin_lock(&tp->lock);
2506
2507 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2508 * proto again or create new one, if necessary.
2509 */
2510 if (tp->deleting) {
2511 err = -EAGAIN;
2512 goto errout_hw;
2513 }
2514
2515 if (fold) {
2516 /* Fold filter was deleted concurrently. Retry lookup. */
2517 if (fold->deleted) {
2518 err = -EAGAIN;
2519 goto errout_hw;
2520 }
2521
2522 fnew->handle = handle;
2523
2524 if (!in_ht) {
2525 struct rhashtable_params params =
2526 fnew->mask->filter_ht_params;
2527
2528 err = rhashtable_insert_fast(&fnew->mask->ht,
2529 &fnew->ht_node,
2530 params);
2531 if (err)
2532 goto errout_hw;
2533 in_ht = true;
2534 }
2535
2536 refcount_inc(&fnew->refcnt);
2537 rhashtable_remove_fast(&fold->mask->ht,
2538 &fold->ht_node,
2539 fold->mask->filter_ht_params);
2540 idr_replace(&head->handle_idr, fnew, fnew->handle);
2541 list_replace_rcu(&fold->list, &fnew->list);
2542 fold->deleted = true;
2543
2544 spin_unlock(&tp->lock);
2545
2546 fl_mask_put(head, fold->mask);
2547 if (!tc_skip_hw(fold->flags))
2548 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2549 tcf_unbind_filter(tp, &fold->res);
2550 /* Caller holds reference to fold, so refcnt is always > 0
2551 * after this.
2552 */
2553 refcount_dec(&fold->refcnt);
2554 __fl_put(fold);
2555 } else {
2556 idr_replace(&head->handle_idr, fnew, fnew->handle);
2557
2558 refcount_inc(&fnew->refcnt);
2559 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2560 spin_unlock(&tp->lock);
2561 }
2562
2563 *arg = fnew;
2564
2565 kfree(tb);
2566 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2567 return 0;
2568
2569 errout_ht:
2570 spin_lock(&tp->lock);
2571 errout_hw:
2572 fnew->deleted = true;
2573 spin_unlock(&tp->lock);
2574 if (!tc_skip_hw(fnew->flags))
2575 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2576 if (in_ht)
2577 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2578 fnew->mask->filter_ht_params);
2579 errout_mask:
2580 fl_mask_put(head, fnew->mask);
2581
2582 unbind_filter:
2583 if (bound_to_filter) {
2584 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2585 rtnl_lock();
2586 tcf_unbind_filter(tp, &fnew->res);
2587 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2588 rtnl_unlock();
2589 }
2590
2591 errout_idr:
2592 if (!fold) {
2593 spin_lock(&tp->lock);
2594 idr_remove(&head->handle_idr, fnew->handle);
2595 spin_unlock(&tp->lock);
2596 }
2597 __fl_put(fnew);
2598 errout_tb:
2599 kfree(tb);
2600 errout_mask_alloc:
2601 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2602 errout_fold:
2603 if (fold)
2604 __fl_put(fold);
2605 return err;
2606 }
2607
fl_delete(struct tcf_proto * tp,void * arg,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)2608 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2609 bool rtnl_held, struct netlink_ext_ack *extack)
2610 {
2611 struct cls_fl_head *head = fl_head_dereference(tp);
2612 struct cls_fl_filter *f = arg;
2613 bool last_on_mask;
2614 int err = 0;
2615
2616 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2617 *last = list_empty(&head->masks);
2618 __fl_put(f);
2619
2620 return err;
2621 }
2622
fl_walk(struct tcf_proto * tp,struct tcf_walker * arg,bool rtnl_held)2623 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2624 bool rtnl_held)
2625 {
2626 struct cls_fl_head *head = fl_head_dereference(tp);
2627 unsigned long id = arg->cookie, tmp;
2628 struct cls_fl_filter *f;
2629
2630 arg->count = arg->skip;
2631
2632 rcu_read_lock();
2633 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2634 /* don't return filters that are being deleted */
2635 if (!f || !refcount_inc_not_zero(&f->refcnt))
2636 continue;
2637 rcu_read_unlock();
2638
2639 if (arg->fn(tp, f, arg) < 0) {
2640 __fl_put(f);
2641 arg->stop = 1;
2642 rcu_read_lock();
2643 break;
2644 }
2645 __fl_put(f);
2646 arg->count++;
2647 rcu_read_lock();
2648 }
2649 rcu_read_unlock();
2650 arg->cookie = id;
2651 }
2652
2653 static struct cls_fl_filter *
fl_get_next_hw_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool add)2654 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2655 {
2656 struct cls_fl_head *head = fl_head_dereference(tp);
2657
2658 spin_lock(&tp->lock);
2659 if (list_empty(&head->hw_filters)) {
2660 spin_unlock(&tp->lock);
2661 return NULL;
2662 }
2663
2664 if (!f)
2665 f = list_entry(&head->hw_filters, struct cls_fl_filter,
2666 hw_list);
2667 list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2668 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2669 spin_unlock(&tp->lock);
2670 return f;
2671 }
2672 }
2673
2674 spin_unlock(&tp->lock);
2675 return NULL;
2676 }
2677
fl_reoffload(struct tcf_proto * tp,bool add,flow_setup_cb_t * cb,void * cb_priv,struct netlink_ext_ack * extack)2678 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2679 void *cb_priv, struct netlink_ext_ack *extack)
2680 {
2681 struct tcf_block *block = tp->chain->block;
2682 struct flow_cls_offload cls_flower = {};
2683 struct cls_fl_filter *f = NULL;
2684 int err;
2685
2686 /* hw_filters list can only be changed by hw offload functions after
2687 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2688 * iterating it.
2689 */
2690 ASSERT_RTNL();
2691
2692 while ((f = fl_get_next_hw_filter(tp, f, add))) {
2693 cls_flower.rule =
2694 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2695 if (!cls_flower.rule) {
2696 __fl_put(f);
2697 return -ENOMEM;
2698 }
2699
2700 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2701 extack);
2702 cls_flower.command = add ?
2703 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2704 cls_flower.cookie = (unsigned long)f;
2705 cls_flower.rule->match.dissector = &f->mask->dissector;
2706 cls_flower.rule->match.mask = &f->mask->key;
2707 cls_flower.rule->match.key = &f->mkey;
2708
2709 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
2710 cls_flower.common.extack);
2711 if (err) {
2712 kfree(cls_flower.rule);
2713 if (tc_skip_sw(f->flags)) {
2714 __fl_put(f);
2715 return err;
2716 }
2717 goto next_flow;
2718 }
2719
2720 cls_flower.classid = f->res.classid;
2721
2722 err = tc_setup_cb_reoffload(block, tp, add, cb,
2723 TC_SETUP_CLSFLOWER, &cls_flower,
2724 cb_priv, &f->flags,
2725 &f->in_hw_count);
2726 tc_cleanup_offload_action(&cls_flower.rule->action);
2727 kfree(cls_flower.rule);
2728
2729 if (err) {
2730 __fl_put(f);
2731 return err;
2732 }
2733 next_flow:
2734 __fl_put(f);
2735 }
2736
2737 return 0;
2738 }
2739
fl_hw_add(struct tcf_proto * tp,void * type_data)2740 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2741 {
2742 struct flow_cls_offload *cls_flower = type_data;
2743 struct cls_fl_filter *f =
2744 (struct cls_fl_filter *) cls_flower->cookie;
2745 struct cls_fl_head *head = fl_head_dereference(tp);
2746
2747 spin_lock(&tp->lock);
2748 list_add(&f->hw_list, &head->hw_filters);
2749 spin_unlock(&tp->lock);
2750 }
2751
fl_hw_del(struct tcf_proto * tp,void * type_data)2752 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2753 {
2754 struct flow_cls_offload *cls_flower = type_data;
2755 struct cls_fl_filter *f =
2756 (struct cls_fl_filter *) cls_flower->cookie;
2757
2758 spin_lock(&tp->lock);
2759 if (!list_empty(&f->hw_list))
2760 list_del_init(&f->hw_list);
2761 spin_unlock(&tp->lock);
2762 }
2763
fl_hw_create_tmplt(struct tcf_chain * chain,struct fl_flow_tmplt * tmplt)2764 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2765 struct fl_flow_tmplt *tmplt)
2766 {
2767 struct flow_cls_offload cls_flower = {};
2768 struct tcf_block *block = chain->block;
2769
2770 cls_flower.rule = flow_rule_alloc(0);
2771 if (!cls_flower.rule)
2772 return -ENOMEM;
2773
2774 cls_flower.common.chain_index = chain->index;
2775 cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2776 cls_flower.cookie = (unsigned long) tmplt;
2777 cls_flower.rule->match.dissector = &tmplt->dissector;
2778 cls_flower.rule->match.mask = &tmplt->mask;
2779 cls_flower.rule->match.key = &tmplt->dummy_key;
2780
2781 /* We don't care if driver (any of them) fails to handle this
2782 * call. It serves just as a hint for it.
2783 */
2784 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2785 kfree(cls_flower.rule);
2786
2787 return 0;
2788 }
2789
fl_hw_destroy_tmplt(struct tcf_chain * chain,struct fl_flow_tmplt * tmplt)2790 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2791 struct fl_flow_tmplt *tmplt)
2792 {
2793 struct flow_cls_offload cls_flower = {};
2794 struct tcf_block *block = chain->block;
2795
2796 cls_flower.common.chain_index = chain->index;
2797 cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2798 cls_flower.cookie = (unsigned long) tmplt;
2799
2800 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2801 }
2802
fl_tmplt_create(struct net * net,struct tcf_chain * chain,struct nlattr ** tca,struct netlink_ext_ack * extack)2803 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2804 struct nlattr **tca,
2805 struct netlink_ext_ack *extack)
2806 {
2807 struct nlattr *tca_opts = tca[TCA_OPTIONS];
2808 struct fl_flow_tmplt *tmplt;
2809 struct nlattr **tb;
2810 int err;
2811
2812 if (!tca_opts)
2813 return ERR_PTR(-EINVAL);
2814
2815 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2816 if (!tb)
2817 return ERR_PTR(-ENOBUFS);
2818 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2819 tca_opts, fl_policy, NULL);
2820 if (err)
2821 goto errout_tb;
2822
2823 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2824 if (!tmplt) {
2825 err = -ENOMEM;
2826 goto errout_tb;
2827 }
2828 tmplt->chain = chain;
2829 err = fl_set_key(net, tca_opts, tb, &tmplt->dummy_key,
2830 &tmplt->mask, extack);
2831 if (err)
2832 goto errout_tmplt;
2833
2834 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2835
2836 err = fl_hw_create_tmplt(chain, tmplt);
2837 if (err)
2838 goto errout_tmplt;
2839
2840 kfree(tb);
2841 return tmplt;
2842
2843 errout_tmplt:
2844 kfree(tmplt);
2845 errout_tb:
2846 kfree(tb);
2847 return ERR_PTR(err);
2848 }
2849
fl_tmplt_destroy(void * tmplt_priv)2850 static void fl_tmplt_destroy(void *tmplt_priv)
2851 {
2852 struct fl_flow_tmplt *tmplt = tmplt_priv;
2853
2854 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2855 kfree(tmplt);
2856 }
2857
fl_tmplt_reoffload(struct tcf_chain * chain,bool add,flow_setup_cb_t * cb,void * cb_priv)2858 static void fl_tmplt_reoffload(struct tcf_chain *chain, bool add,
2859 flow_setup_cb_t *cb, void *cb_priv)
2860 {
2861 struct fl_flow_tmplt *tmplt = chain->tmplt_priv;
2862 struct flow_cls_offload cls_flower = {};
2863
2864 cls_flower.rule = flow_rule_alloc(0);
2865 if (!cls_flower.rule)
2866 return;
2867
2868 cls_flower.common.chain_index = chain->index;
2869 cls_flower.command = add ? FLOW_CLS_TMPLT_CREATE :
2870 FLOW_CLS_TMPLT_DESTROY;
2871 cls_flower.cookie = (unsigned long) tmplt;
2872 cls_flower.rule->match.dissector = &tmplt->dissector;
2873 cls_flower.rule->match.mask = &tmplt->mask;
2874 cls_flower.rule->match.key = &tmplt->dummy_key;
2875
2876 cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
2877 kfree(cls_flower.rule);
2878 }
2879
fl_dump_key_val(struct sk_buff * skb,void * val,int val_type,void * mask,int mask_type,int len)2880 static int fl_dump_key_val(struct sk_buff *skb,
2881 void *val, int val_type,
2882 void *mask, int mask_type, int len)
2883 {
2884 int err;
2885
2886 if (!memchr_inv(mask, 0, len))
2887 return 0;
2888 err = nla_put(skb, val_type, len, val);
2889 if (err)
2890 return err;
2891 if (mask_type != TCA_FLOWER_UNSPEC) {
2892 err = nla_put(skb, mask_type, len, mask);
2893 if (err)
2894 return err;
2895 }
2896 return 0;
2897 }
2898
fl_dump_key_port_range(struct sk_buff * skb,struct fl_flow_key * key,struct fl_flow_key * mask)2899 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2900 struct fl_flow_key *mask)
2901 {
2902 if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2903 TCA_FLOWER_KEY_PORT_DST_MIN,
2904 &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2905 sizeof(key->tp_range.tp_min.dst)) ||
2906 fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2907 TCA_FLOWER_KEY_PORT_DST_MAX,
2908 &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2909 sizeof(key->tp_range.tp_max.dst)) ||
2910 fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2911 TCA_FLOWER_KEY_PORT_SRC_MIN,
2912 &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2913 sizeof(key->tp_range.tp_min.src)) ||
2914 fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2915 TCA_FLOWER_KEY_PORT_SRC_MAX,
2916 &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2917 sizeof(key->tp_range.tp_max.src)))
2918 return -1;
2919
2920 return 0;
2921 }
2922
fl_dump_key_mpls_opt_lse(struct sk_buff * skb,struct flow_dissector_key_mpls * mpls_key,struct flow_dissector_key_mpls * mpls_mask,u8 lse_index)2923 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2924 struct flow_dissector_key_mpls *mpls_key,
2925 struct flow_dissector_key_mpls *mpls_mask,
2926 u8 lse_index)
2927 {
2928 struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2929 struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2930 int err;
2931
2932 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2933 lse_index + 1);
2934 if (err)
2935 return err;
2936
2937 if (lse_mask->mpls_ttl) {
2938 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2939 lse_key->mpls_ttl);
2940 if (err)
2941 return err;
2942 }
2943 if (lse_mask->mpls_bos) {
2944 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2945 lse_key->mpls_bos);
2946 if (err)
2947 return err;
2948 }
2949 if (lse_mask->mpls_tc) {
2950 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2951 lse_key->mpls_tc);
2952 if (err)
2953 return err;
2954 }
2955 if (lse_mask->mpls_label) {
2956 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2957 lse_key->mpls_label);
2958 if (err)
2959 return err;
2960 }
2961
2962 return 0;
2963 }
2964
fl_dump_key_mpls_opts(struct sk_buff * skb,struct flow_dissector_key_mpls * mpls_key,struct flow_dissector_key_mpls * mpls_mask)2965 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2966 struct flow_dissector_key_mpls *mpls_key,
2967 struct flow_dissector_key_mpls *mpls_mask)
2968 {
2969 struct nlattr *opts;
2970 struct nlattr *lse;
2971 u8 lse_index;
2972 int err;
2973
2974 opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2975 if (!opts)
2976 return -EMSGSIZE;
2977
2978 for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2979 if (!(mpls_mask->used_lses & 1 << lse_index))
2980 continue;
2981
2982 lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2983 if (!lse) {
2984 err = -EMSGSIZE;
2985 goto err_opts;
2986 }
2987
2988 err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2989 lse_index);
2990 if (err)
2991 goto err_opts_lse;
2992 nla_nest_end(skb, lse);
2993 }
2994 nla_nest_end(skb, opts);
2995
2996 return 0;
2997
2998 err_opts_lse:
2999 nla_nest_cancel(skb, lse);
3000 err_opts:
3001 nla_nest_cancel(skb, opts);
3002
3003 return err;
3004 }
3005
fl_dump_key_mpls(struct sk_buff * skb,struct flow_dissector_key_mpls * mpls_key,struct flow_dissector_key_mpls * mpls_mask)3006 static int fl_dump_key_mpls(struct sk_buff *skb,
3007 struct flow_dissector_key_mpls *mpls_key,
3008 struct flow_dissector_key_mpls *mpls_mask)
3009 {
3010 struct flow_dissector_mpls_lse *lse_mask;
3011 struct flow_dissector_mpls_lse *lse_key;
3012 int err;
3013
3014 if (!mpls_mask->used_lses)
3015 return 0;
3016
3017 lse_mask = &mpls_mask->ls[0];
3018 lse_key = &mpls_key->ls[0];
3019
3020 /* For backward compatibility, don't use the MPLS nested attributes if
3021 * the rule can be expressed using the old attributes.
3022 */
3023 if (mpls_mask->used_lses & ~1 ||
3024 (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
3025 !lse_mask->mpls_tc && !lse_mask->mpls_label))
3026 return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
3027
3028 if (lse_mask->mpls_ttl) {
3029 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
3030 lse_key->mpls_ttl);
3031 if (err)
3032 return err;
3033 }
3034 if (lse_mask->mpls_tc) {
3035 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
3036 lse_key->mpls_tc);
3037 if (err)
3038 return err;
3039 }
3040 if (lse_mask->mpls_label) {
3041 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
3042 lse_key->mpls_label);
3043 if (err)
3044 return err;
3045 }
3046 if (lse_mask->mpls_bos) {
3047 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
3048 lse_key->mpls_bos);
3049 if (err)
3050 return err;
3051 }
3052 return 0;
3053 }
3054
fl_dump_key_ip(struct sk_buff * skb,bool encap,struct flow_dissector_key_ip * key,struct flow_dissector_key_ip * mask)3055 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
3056 struct flow_dissector_key_ip *key,
3057 struct flow_dissector_key_ip *mask)
3058 {
3059 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
3060 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
3061 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
3062 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
3063
3064 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
3065 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
3066 return -1;
3067
3068 return 0;
3069 }
3070
fl_dump_key_vlan(struct sk_buff * skb,int vlan_id_key,int vlan_prio_key,struct flow_dissector_key_vlan * vlan_key,struct flow_dissector_key_vlan * vlan_mask)3071 static int fl_dump_key_vlan(struct sk_buff *skb,
3072 int vlan_id_key, int vlan_prio_key,
3073 struct flow_dissector_key_vlan *vlan_key,
3074 struct flow_dissector_key_vlan *vlan_mask)
3075 {
3076 int err;
3077
3078 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
3079 return 0;
3080 if (vlan_mask->vlan_id) {
3081 err = nla_put_u16(skb, vlan_id_key,
3082 vlan_key->vlan_id);
3083 if (err)
3084 return err;
3085 }
3086 if (vlan_mask->vlan_priority) {
3087 err = nla_put_u8(skb, vlan_prio_key,
3088 vlan_key->vlan_priority);
3089 if (err)
3090 return err;
3091 }
3092 return 0;
3093 }
3094
fl_get_key_flag(u32 dissector_key,u32 dissector_mask,u32 * flower_key,u32 * flower_mask,u32 flower_flag_bit,u32 dissector_flag_bit)3095 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
3096 u32 *flower_key, u32 *flower_mask,
3097 u32 flower_flag_bit, u32 dissector_flag_bit)
3098 {
3099 if (dissector_mask & dissector_flag_bit) {
3100 *flower_mask |= flower_flag_bit;
3101 if (dissector_key & dissector_flag_bit)
3102 *flower_key |= flower_flag_bit;
3103 }
3104 }
3105
fl_dump_key_flags(struct sk_buff * skb,bool encap,u32 flags_key,u32 flags_mask)3106 static int fl_dump_key_flags(struct sk_buff *skb, bool encap,
3107 u32 flags_key, u32 flags_mask)
3108 {
3109 int fl_key, fl_mask;
3110 __be32 _key, _mask;
3111 u32 key, mask;
3112 int err;
3113
3114 if (encap) {
3115 fl_key = TCA_FLOWER_KEY_ENC_FLAGS;
3116 fl_mask = TCA_FLOWER_KEY_ENC_FLAGS_MASK;
3117 } else {
3118 fl_key = TCA_FLOWER_KEY_FLAGS;
3119 fl_mask = TCA_FLOWER_KEY_FLAGS_MASK;
3120 }
3121
3122 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
3123 return 0;
3124
3125 key = 0;
3126 mask = 0;
3127
3128 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
3129 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
3130 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
3131 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
3132 FLOW_DIS_FIRST_FRAG);
3133
3134 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
3135 TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM,
3136 FLOW_DIS_F_TUNNEL_CSUM);
3137
3138 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
3139 TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT,
3140 FLOW_DIS_F_TUNNEL_DONT_FRAGMENT);
3141
3142 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
3143 TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_OAM);
3144
3145 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
3146 TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT,
3147 FLOW_DIS_F_TUNNEL_CRIT_OPT);
3148
3149 _key = cpu_to_be32(key);
3150 _mask = cpu_to_be32(mask);
3151
3152 err = nla_put(skb, fl_key, 4, &_key);
3153 if (err)
3154 return err;
3155
3156 return nla_put(skb, fl_mask, 4, &_mask);
3157 }
3158
fl_dump_key_geneve_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)3159 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
3160 struct flow_dissector_key_enc_opts *enc_opts)
3161 {
3162 struct geneve_opt *opt;
3163 struct nlattr *nest;
3164 int opt_off = 0;
3165
3166 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
3167 if (!nest)
3168 goto nla_put_failure;
3169
3170 while (enc_opts->len > opt_off) {
3171 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
3172
3173 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
3174 opt->opt_class))
3175 goto nla_put_failure;
3176 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
3177 opt->type))
3178 goto nla_put_failure;
3179 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
3180 opt->length * 4, opt->opt_data))
3181 goto nla_put_failure;
3182
3183 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
3184 }
3185 nla_nest_end(skb, nest);
3186 return 0;
3187
3188 nla_put_failure:
3189 nla_nest_cancel(skb, nest);
3190 return -EMSGSIZE;
3191 }
3192
fl_dump_key_vxlan_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)3193 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
3194 struct flow_dissector_key_enc_opts *enc_opts)
3195 {
3196 struct vxlan_metadata *md;
3197 struct nlattr *nest;
3198
3199 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
3200 if (!nest)
3201 goto nla_put_failure;
3202
3203 md = (struct vxlan_metadata *)&enc_opts->data[0];
3204 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
3205 goto nla_put_failure;
3206
3207 nla_nest_end(skb, nest);
3208 return 0;
3209
3210 nla_put_failure:
3211 nla_nest_cancel(skb, nest);
3212 return -EMSGSIZE;
3213 }
3214
fl_dump_key_erspan_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)3215 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
3216 struct flow_dissector_key_enc_opts *enc_opts)
3217 {
3218 struct erspan_metadata *md;
3219 struct nlattr *nest;
3220
3221 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
3222 if (!nest)
3223 goto nla_put_failure;
3224
3225 md = (struct erspan_metadata *)&enc_opts->data[0];
3226 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
3227 goto nla_put_failure;
3228
3229 if (md->version == 1 &&
3230 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
3231 goto nla_put_failure;
3232
3233 if (md->version == 2 &&
3234 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
3235 md->u.md2.dir) ||
3236 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
3237 get_hwid(&md->u.md2))))
3238 goto nla_put_failure;
3239
3240 nla_nest_end(skb, nest);
3241 return 0;
3242
3243 nla_put_failure:
3244 nla_nest_cancel(skb, nest);
3245 return -EMSGSIZE;
3246 }
3247
fl_dump_key_gtp_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)3248 static int fl_dump_key_gtp_opt(struct sk_buff *skb,
3249 struct flow_dissector_key_enc_opts *enc_opts)
3250
3251 {
3252 struct gtp_pdu_session_info *session_info;
3253 struct nlattr *nest;
3254
3255 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
3256 if (!nest)
3257 goto nla_put_failure;
3258
3259 session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
3260
3261 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
3262 session_info->pdu_type))
3263 goto nla_put_failure;
3264
3265 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
3266 goto nla_put_failure;
3267
3268 nla_nest_end(skb, nest);
3269 return 0;
3270
3271 nla_put_failure:
3272 nla_nest_cancel(skb, nest);
3273 return -EMSGSIZE;
3274 }
3275
fl_dump_key_pfcp_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)3276 static int fl_dump_key_pfcp_opt(struct sk_buff *skb,
3277 struct flow_dissector_key_enc_opts *enc_opts)
3278 {
3279 struct pfcp_metadata *md;
3280 struct nlattr *nest;
3281
3282 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_PFCP);
3283 if (!nest)
3284 goto nla_put_failure;
3285
3286 md = (struct pfcp_metadata *)&enc_opts->data[0];
3287 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE, md->type))
3288 goto nla_put_failure;
3289
3290 if (nla_put_be64(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID,
3291 md->seid, 0))
3292 goto nla_put_failure;
3293
3294 nla_nest_end(skb, nest);
3295 return 0;
3296
3297 nla_put_failure:
3298 nla_nest_cancel(skb, nest);
3299 return -EMSGSIZE;
3300 }
3301
fl_dump_key_ct(struct sk_buff * skb,struct flow_dissector_key_ct * key,struct flow_dissector_key_ct * mask)3302 static int fl_dump_key_ct(struct sk_buff *skb,
3303 struct flow_dissector_key_ct *key,
3304 struct flow_dissector_key_ct *mask)
3305 {
3306 if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
3307 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
3308 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
3309 sizeof(key->ct_state)))
3310 goto nla_put_failure;
3311
3312 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
3313 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
3314 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
3315 sizeof(key->ct_zone)))
3316 goto nla_put_failure;
3317
3318 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
3319 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
3320 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
3321 sizeof(key->ct_mark)))
3322 goto nla_put_failure;
3323
3324 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
3325 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
3326 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
3327 sizeof(key->ct_labels)))
3328 goto nla_put_failure;
3329
3330 return 0;
3331
3332 nla_put_failure:
3333 return -EMSGSIZE;
3334 }
3335
fl_dump_key_cfm(struct sk_buff * skb,struct flow_dissector_key_cfm * key,struct flow_dissector_key_cfm * mask)3336 static int fl_dump_key_cfm(struct sk_buff *skb,
3337 struct flow_dissector_key_cfm *key,
3338 struct flow_dissector_key_cfm *mask)
3339 {
3340 struct nlattr *opts;
3341 int err;
3342 u8 mdl;
3343
3344 if (!memchr_inv(mask, 0, sizeof(*mask)))
3345 return 0;
3346
3347 opts = nla_nest_start(skb, TCA_FLOWER_KEY_CFM);
3348 if (!opts)
3349 return -EMSGSIZE;
3350
3351 if (FIELD_GET(FLOW_DIS_CFM_MDL_MASK, mask->mdl_ver)) {
3352 mdl = FIELD_GET(FLOW_DIS_CFM_MDL_MASK, key->mdl_ver);
3353 err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_MD_LEVEL, mdl);
3354 if (err)
3355 goto err_cfm_opts;
3356 }
3357
3358 if (mask->opcode) {
3359 err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_OPCODE, key->opcode);
3360 if (err)
3361 goto err_cfm_opts;
3362 }
3363
3364 nla_nest_end(skb, opts);
3365
3366 return 0;
3367
3368 err_cfm_opts:
3369 nla_nest_cancel(skb, opts);
3370 return err;
3371 }
3372
fl_dump_key_options(struct sk_buff * skb,int enc_opt_type,struct flow_dissector_key_enc_opts * enc_opts)3373 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
3374 struct flow_dissector_key_enc_opts *enc_opts)
3375 {
3376 struct nlattr *nest;
3377 int err;
3378
3379 if (!enc_opts->len)
3380 return 0;
3381
3382 nest = nla_nest_start_noflag(skb, enc_opt_type);
3383 if (!nest)
3384 goto nla_put_failure;
3385
3386 switch (enc_opts->dst_opt_type) {
3387 case IP_TUNNEL_GENEVE_OPT_BIT:
3388 err = fl_dump_key_geneve_opt(skb, enc_opts);
3389 if (err)
3390 goto nla_put_failure;
3391 break;
3392 case IP_TUNNEL_VXLAN_OPT_BIT:
3393 err = fl_dump_key_vxlan_opt(skb, enc_opts);
3394 if (err)
3395 goto nla_put_failure;
3396 break;
3397 case IP_TUNNEL_ERSPAN_OPT_BIT:
3398 err = fl_dump_key_erspan_opt(skb, enc_opts);
3399 if (err)
3400 goto nla_put_failure;
3401 break;
3402 case IP_TUNNEL_GTP_OPT_BIT:
3403 err = fl_dump_key_gtp_opt(skb, enc_opts);
3404 if (err)
3405 goto nla_put_failure;
3406 break;
3407 case IP_TUNNEL_PFCP_OPT_BIT:
3408 err = fl_dump_key_pfcp_opt(skb, enc_opts);
3409 if (err)
3410 goto nla_put_failure;
3411 break;
3412 default:
3413 goto nla_put_failure;
3414 }
3415 nla_nest_end(skb, nest);
3416 return 0;
3417
3418 nla_put_failure:
3419 nla_nest_cancel(skb, nest);
3420 return -EMSGSIZE;
3421 }
3422
fl_dump_key_enc_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * key_opts,struct flow_dissector_key_enc_opts * msk_opts)3423 static int fl_dump_key_enc_opt(struct sk_buff *skb,
3424 struct flow_dissector_key_enc_opts *key_opts,
3425 struct flow_dissector_key_enc_opts *msk_opts)
3426 {
3427 int err;
3428
3429 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
3430 if (err)
3431 return err;
3432
3433 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
3434 }
3435
fl_dump_key(struct sk_buff * skb,struct net * net,struct fl_flow_key * key,struct fl_flow_key * mask)3436 static int fl_dump_key(struct sk_buff *skb, struct net *net,
3437 struct fl_flow_key *key, struct fl_flow_key *mask)
3438 {
3439 if (mask->meta.ingress_ifindex) {
3440 struct net_device *dev;
3441
3442 dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
3443 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
3444 goto nla_put_failure;
3445 }
3446
3447 if (fl_dump_key_val(skb, &key->meta.l2_miss,
3448 TCA_FLOWER_L2_MISS, &mask->meta.l2_miss,
3449 TCA_FLOWER_UNSPEC, sizeof(key->meta.l2_miss)))
3450 goto nla_put_failure;
3451
3452 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
3453 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
3454 sizeof(key->eth.dst)) ||
3455 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
3456 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
3457 sizeof(key->eth.src)) ||
3458 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
3459 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
3460 sizeof(key->basic.n_proto)))
3461 goto nla_put_failure;
3462
3463 if (mask->num_of_vlans.num_of_vlans) {
3464 if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans))
3465 goto nla_put_failure;
3466 }
3467
3468 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
3469 goto nla_put_failure;
3470
3471 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
3472 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
3473 goto nla_put_failure;
3474
3475 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
3476 TCA_FLOWER_KEY_CVLAN_PRIO,
3477 &key->cvlan, &mask->cvlan) ||
3478 (mask->cvlan.vlan_tpid &&
3479 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3480 key->cvlan.vlan_tpid)))
3481 goto nla_put_failure;
3482
3483 if (mask->basic.n_proto) {
3484 if (mask->cvlan.vlan_eth_type) {
3485 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
3486 key->basic.n_proto))
3487 goto nla_put_failure;
3488 } else if (mask->vlan.vlan_eth_type) {
3489 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3490 key->vlan.vlan_eth_type))
3491 goto nla_put_failure;
3492 }
3493 }
3494
3495 if ((key->basic.n_proto == htons(ETH_P_IP) ||
3496 key->basic.n_proto == htons(ETH_P_IPV6)) &&
3497 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
3498 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
3499 sizeof(key->basic.ip_proto)) ||
3500 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
3501 goto nla_put_failure;
3502
3503 if (mask->pppoe.session_id) {
3504 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPPOE_SID,
3505 key->pppoe.session_id))
3506 goto nla_put_failure;
3507 }
3508 if (mask->basic.n_proto && mask->pppoe.ppp_proto) {
3509 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPP_PROTO,
3510 key->pppoe.ppp_proto))
3511 goto nla_put_failure;
3512 }
3513
3514 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3515 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
3516 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
3517 sizeof(key->ipv4.src)) ||
3518 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
3519 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
3520 sizeof(key->ipv4.dst))))
3521 goto nla_put_failure;
3522 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3523 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
3524 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
3525 sizeof(key->ipv6.src)) ||
3526 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
3527 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
3528 sizeof(key->ipv6.dst))))
3529 goto nla_put_failure;
3530
3531 if (key->basic.ip_proto == IPPROTO_TCP &&
3532 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
3533 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
3534 sizeof(key->tp.src)) ||
3535 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
3536 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
3537 sizeof(key->tp.dst)) ||
3538 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
3539 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
3540 sizeof(key->tcp.flags))))
3541 goto nla_put_failure;
3542 else if (key->basic.ip_proto == IPPROTO_UDP &&
3543 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
3544 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
3545 sizeof(key->tp.src)) ||
3546 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
3547 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
3548 sizeof(key->tp.dst))))
3549 goto nla_put_failure;
3550 else if (key->basic.ip_proto == IPPROTO_SCTP &&
3551 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
3552 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
3553 sizeof(key->tp.src)) ||
3554 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
3555 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
3556 sizeof(key->tp.dst))))
3557 goto nla_put_failure;
3558 else if (key->basic.n_proto == htons(ETH_P_IP) &&
3559 key->basic.ip_proto == IPPROTO_ICMP &&
3560 (fl_dump_key_val(skb, &key->icmp.type,
3561 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
3562 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
3563 sizeof(key->icmp.type)) ||
3564 fl_dump_key_val(skb, &key->icmp.code,
3565 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
3566 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
3567 sizeof(key->icmp.code))))
3568 goto nla_put_failure;
3569 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
3570 key->basic.ip_proto == IPPROTO_ICMPV6 &&
3571 (fl_dump_key_val(skb, &key->icmp.type,
3572 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
3573 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
3574 sizeof(key->icmp.type)) ||
3575 fl_dump_key_val(skb, &key->icmp.code,
3576 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
3577 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
3578 sizeof(key->icmp.code))))
3579 goto nla_put_failure;
3580 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
3581 key->basic.n_proto == htons(ETH_P_RARP)) &&
3582 (fl_dump_key_val(skb, &key->arp.sip,
3583 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
3584 TCA_FLOWER_KEY_ARP_SIP_MASK,
3585 sizeof(key->arp.sip)) ||
3586 fl_dump_key_val(skb, &key->arp.tip,
3587 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
3588 TCA_FLOWER_KEY_ARP_TIP_MASK,
3589 sizeof(key->arp.tip)) ||
3590 fl_dump_key_val(skb, &key->arp.op,
3591 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
3592 TCA_FLOWER_KEY_ARP_OP_MASK,
3593 sizeof(key->arp.op)) ||
3594 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
3595 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
3596 sizeof(key->arp.sha)) ||
3597 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
3598 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
3599 sizeof(key->arp.tha))))
3600 goto nla_put_failure;
3601 else if (key->basic.ip_proto == IPPROTO_L2TP &&
3602 fl_dump_key_val(skb, &key->l2tpv3.session_id,
3603 TCA_FLOWER_KEY_L2TPV3_SID,
3604 &mask->l2tpv3.session_id,
3605 TCA_FLOWER_UNSPEC,
3606 sizeof(key->l2tpv3.session_id)))
3607 goto nla_put_failure;
3608
3609 if (key->ipsec.spi &&
3610 fl_dump_key_val(skb, &key->ipsec.spi, TCA_FLOWER_KEY_SPI,
3611 &mask->ipsec.spi, TCA_FLOWER_KEY_SPI_MASK,
3612 sizeof(key->ipsec.spi)))
3613 goto nla_put_failure;
3614
3615 if ((key->basic.ip_proto == IPPROTO_TCP ||
3616 key->basic.ip_proto == IPPROTO_UDP ||
3617 key->basic.ip_proto == IPPROTO_SCTP) &&
3618 fl_dump_key_port_range(skb, key, mask))
3619 goto nla_put_failure;
3620
3621 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3622 (fl_dump_key_val(skb, &key->enc_ipv4.src,
3623 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3624 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3625 sizeof(key->enc_ipv4.src)) ||
3626 fl_dump_key_val(skb, &key->enc_ipv4.dst,
3627 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3628 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3629 sizeof(key->enc_ipv4.dst))))
3630 goto nla_put_failure;
3631 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3632 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3633 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3634 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3635 sizeof(key->enc_ipv6.src)) ||
3636 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3637 TCA_FLOWER_KEY_ENC_IPV6_DST,
3638 &mask->enc_ipv6.dst,
3639 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3640 sizeof(key->enc_ipv6.dst))))
3641 goto nla_put_failure;
3642
3643 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3644 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3645 sizeof(key->enc_key_id)) ||
3646 fl_dump_key_val(skb, &key->enc_tp.src,
3647 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3648 &mask->enc_tp.src,
3649 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3650 sizeof(key->enc_tp.src)) ||
3651 fl_dump_key_val(skb, &key->enc_tp.dst,
3652 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3653 &mask->enc_tp.dst,
3654 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3655 sizeof(key->enc_tp.dst)) ||
3656 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3657 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3658 goto nla_put_failure;
3659
3660 if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3661 goto nla_put_failure;
3662
3663 if (fl_dump_key_flags(skb, false, key->control.flags,
3664 mask->control.flags))
3665 goto nla_put_failure;
3666
3667 if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3668 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3669 sizeof(key->hash.hash)))
3670 goto nla_put_failure;
3671
3672 if (fl_dump_key_cfm(skb, &key->cfm, &mask->cfm))
3673 goto nla_put_failure;
3674
3675 if (fl_dump_key_flags(skb, true, key->enc_control.flags,
3676 mask->enc_control.flags))
3677 goto nla_put_failure;
3678
3679 return 0;
3680
3681 nla_put_failure:
3682 return -EMSGSIZE;
3683 }
3684
fl_dump(struct net * net,struct tcf_proto * tp,void * fh,struct sk_buff * skb,struct tcmsg * t,bool rtnl_held)3685 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3686 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3687 {
3688 struct cls_fl_filter *f = fh;
3689 struct nlattr *nest;
3690 struct fl_flow_key *key, *mask;
3691 bool skip_hw;
3692
3693 if (!f)
3694 return skb->len;
3695
3696 t->tcm_handle = f->handle;
3697
3698 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3699 if (!nest)
3700 goto nla_put_failure;
3701
3702 spin_lock(&tp->lock);
3703
3704 if (f->res.classid &&
3705 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3706 goto nla_put_failure_locked;
3707
3708 key = &f->key;
3709 mask = &f->mask->key;
3710 skip_hw = tc_skip_hw(f->flags);
3711
3712 if (fl_dump_key(skb, net, key, mask))
3713 goto nla_put_failure_locked;
3714
3715 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3716 goto nla_put_failure_locked;
3717
3718 spin_unlock(&tp->lock);
3719
3720 if (!skip_hw)
3721 fl_hw_update_stats(tp, f, rtnl_held);
3722
3723 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3724 goto nla_put_failure;
3725
3726 if (tcf_exts_dump(skb, &f->exts))
3727 goto nla_put_failure;
3728
3729 nla_nest_end(skb, nest);
3730
3731 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3732 goto nla_put_failure;
3733
3734 return skb->len;
3735
3736 nla_put_failure_locked:
3737 spin_unlock(&tp->lock);
3738 nla_put_failure:
3739 nla_nest_cancel(skb, nest);
3740 return -1;
3741 }
3742
fl_terse_dump(struct net * net,struct tcf_proto * tp,void * fh,struct sk_buff * skb,struct tcmsg * t,bool rtnl_held)3743 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3744 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3745 {
3746 struct cls_fl_filter *f = fh;
3747 struct nlattr *nest;
3748 bool skip_hw;
3749
3750 if (!f)
3751 return skb->len;
3752
3753 t->tcm_handle = f->handle;
3754
3755 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3756 if (!nest)
3757 goto nla_put_failure;
3758
3759 spin_lock(&tp->lock);
3760
3761 skip_hw = tc_skip_hw(f->flags);
3762
3763 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3764 goto nla_put_failure_locked;
3765
3766 spin_unlock(&tp->lock);
3767
3768 if (!skip_hw)
3769 fl_hw_update_stats(tp, f, rtnl_held);
3770
3771 if (tcf_exts_terse_dump(skb, &f->exts))
3772 goto nla_put_failure;
3773
3774 nla_nest_end(skb, nest);
3775
3776 return skb->len;
3777
3778 nla_put_failure_locked:
3779 spin_unlock(&tp->lock);
3780 nla_put_failure:
3781 nla_nest_cancel(skb, nest);
3782 return -1;
3783 }
3784
fl_tmplt_dump(struct sk_buff * skb,struct net * net,void * tmplt_priv)3785 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3786 {
3787 struct fl_flow_tmplt *tmplt = tmplt_priv;
3788 struct fl_flow_key *key, *mask;
3789 struct nlattr *nest;
3790
3791 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3792 if (!nest)
3793 goto nla_put_failure;
3794
3795 key = &tmplt->dummy_key;
3796 mask = &tmplt->mask;
3797
3798 if (fl_dump_key(skb, net, key, mask))
3799 goto nla_put_failure;
3800
3801 nla_nest_end(skb, nest);
3802
3803 return skb->len;
3804
3805 nla_put_failure:
3806 nla_nest_cancel(skb, nest);
3807 return -EMSGSIZE;
3808 }
3809
fl_bind_class(void * fh,u32 classid,unsigned long cl,void * q,unsigned long base)3810 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3811 unsigned long base)
3812 {
3813 struct cls_fl_filter *f = fh;
3814
3815 tc_cls_bind_class(classid, cl, q, &f->res, base);
3816 }
3817
fl_delete_empty(struct tcf_proto * tp)3818 static bool fl_delete_empty(struct tcf_proto *tp)
3819 {
3820 struct cls_fl_head *head = fl_head_dereference(tp);
3821
3822 spin_lock(&tp->lock);
3823 tp->deleting = idr_is_empty(&head->handle_idr);
3824 spin_unlock(&tp->lock);
3825
3826 return tp->deleting;
3827 }
3828
3829 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3830 .kind = "flower",
3831 .classify = fl_classify,
3832 .init = fl_init,
3833 .destroy = fl_destroy,
3834 .get = fl_get,
3835 .put = fl_put,
3836 .change = fl_change,
3837 .delete = fl_delete,
3838 .delete_empty = fl_delete_empty,
3839 .walk = fl_walk,
3840 .reoffload = fl_reoffload,
3841 .hw_add = fl_hw_add,
3842 .hw_del = fl_hw_del,
3843 .dump = fl_dump,
3844 .terse_dump = fl_terse_dump,
3845 .bind_class = fl_bind_class,
3846 .tmplt_create = fl_tmplt_create,
3847 .tmplt_destroy = fl_tmplt_destroy,
3848 .tmplt_reoffload = fl_tmplt_reoffload,
3849 .tmplt_dump = fl_tmplt_dump,
3850 .get_exts = fl_get_exts,
3851 .owner = THIS_MODULE,
3852 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
3853 };
3854 MODULE_ALIAS_NET_CLS("flower");
3855
cls_fl_init(void)3856 static int __init cls_fl_init(void)
3857 {
3858 return register_tcf_proto_ops(&cls_fl_ops);
3859 }
3860
cls_fl_exit(void)3861 static void __exit cls_fl_exit(void)
3862 {
3863 unregister_tcf_proto_ops(&cls_fl_ops);
3864 }
3865
3866 module_init(cls_fl_init);
3867 module_exit(cls_fl_exit);
3868
3869 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3870 MODULE_DESCRIPTION("Flower classifier");
3871 MODULE_LICENSE("GPL v2");
3872