xref: /linux/net/sched/cls_flower.c (revision 675f176b4dcc2b75adbcea7ba0e9a649527f53bd)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 #include <linux/ppp_defs.h>
20 
21 #include <net/sch_generic.h>
22 #include <net/pkt_cls.h>
23 #include <net/pkt_sched.h>
24 #include <net/ip.h>
25 #include <net/flow_dissector.h>
26 #include <net/geneve.h>
27 #include <net/vxlan.h>
28 #include <net/erspan.h>
29 #include <net/gtp.h>
30 #include <net/tc_wrapper.h>
31 
32 #include <net/dst.h>
33 #include <net/dst_metadata.h>
34 
35 #include <uapi/linux/netfilter/nf_conntrack_common.h>
36 
37 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
38 		((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
39 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
40 		(TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
41 
42 struct fl_flow_key {
43 	struct flow_dissector_key_meta meta;
44 	struct flow_dissector_key_control control;
45 	struct flow_dissector_key_control enc_control;
46 	struct flow_dissector_key_basic basic;
47 	struct flow_dissector_key_eth_addrs eth;
48 	struct flow_dissector_key_vlan vlan;
49 	struct flow_dissector_key_vlan cvlan;
50 	union {
51 		struct flow_dissector_key_ipv4_addrs ipv4;
52 		struct flow_dissector_key_ipv6_addrs ipv6;
53 	};
54 	struct flow_dissector_key_ports tp;
55 	struct flow_dissector_key_icmp icmp;
56 	struct flow_dissector_key_arp arp;
57 	struct flow_dissector_key_keyid enc_key_id;
58 	union {
59 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
60 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
61 	};
62 	struct flow_dissector_key_ports enc_tp;
63 	struct flow_dissector_key_mpls mpls;
64 	struct flow_dissector_key_tcp tcp;
65 	struct flow_dissector_key_ip ip;
66 	struct flow_dissector_key_ip enc_ip;
67 	struct flow_dissector_key_enc_opts enc_opts;
68 	struct flow_dissector_key_ports_range tp_range;
69 	struct flow_dissector_key_ct ct;
70 	struct flow_dissector_key_hash hash;
71 	struct flow_dissector_key_num_of_vlans num_of_vlans;
72 	struct flow_dissector_key_pppoe pppoe;
73 	struct flow_dissector_key_l2tpv3 l2tpv3;
74 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
75 
76 struct fl_flow_mask_range {
77 	unsigned short int start;
78 	unsigned short int end;
79 };
80 
81 struct fl_flow_mask {
82 	struct fl_flow_key key;
83 	struct fl_flow_mask_range range;
84 	u32 flags;
85 	struct rhash_head ht_node;
86 	struct rhashtable ht;
87 	struct rhashtable_params filter_ht_params;
88 	struct flow_dissector dissector;
89 	struct list_head filters;
90 	struct rcu_work rwork;
91 	struct list_head list;
92 	refcount_t refcnt;
93 };
94 
95 struct fl_flow_tmplt {
96 	struct fl_flow_key dummy_key;
97 	struct fl_flow_key mask;
98 	struct flow_dissector dissector;
99 	struct tcf_chain *chain;
100 };
101 
102 struct cls_fl_head {
103 	struct rhashtable ht;
104 	spinlock_t masks_lock; /* Protect masks list */
105 	struct list_head masks;
106 	struct list_head hw_filters;
107 	struct rcu_work rwork;
108 	struct idr handle_idr;
109 };
110 
111 struct cls_fl_filter {
112 	struct fl_flow_mask *mask;
113 	struct rhash_head ht_node;
114 	struct fl_flow_key mkey;
115 	struct tcf_exts exts;
116 	struct tcf_result res;
117 	struct fl_flow_key key;
118 	struct list_head list;
119 	struct list_head hw_list;
120 	u32 handle;
121 	u32 flags;
122 	u32 in_hw_count;
123 	struct rcu_work rwork;
124 	struct net_device *hw_dev;
125 	/* Flower classifier is unlocked, which means that its reference counter
126 	 * can be changed concurrently without any kind of external
127 	 * synchronization. Use atomic reference counter to be concurrency-safe.
128 	 */
129 	refcount_t refcnt;
130 	bool deleted;
131 };
132 
133 static const struct rhashtable_params mask_ht_params = {
134 	.key_offset = offsetof(struct fl_flow_mask, key),
135 	.key_len = sizeof(struct fl_flow_key),
136 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
137 	.automatic_shrinking = true,
138 };
139 
140 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
141 {
142 	return mask->range.end - mask->range.start;
143 }
144 
145 static void fl_mask_update_range(struct fl_flow_mask *mask)
146 {
147 	const u8 *bytes = (const u8 *) &mask->key;
148 	size_t size = sizeof(mask->key);
149 	size_t i, first = 0, last;
150 
151 	for (i = 0; i < size; i++) {
152 		if (bytes[i]) {
153 			first = i;
154 			break;
155 		}
156 	}
157 	last = first;
158 	for (i = size - 1; i != first; i--) {
159 		if (bytes[i]) {
160 			last = i;
161 			break;
162 		}
163 	}
164 	mask->range.start = rounddown(first, sizeof(long));
165 	mask->range.end = roundup(last + 1, sizeof(long));
166 }
167 
168 static void *fl_key_get_start(struct fl_flow_key *key,
169 			      const struct fl_flow_mask *mask)
170 {
171 	return (u8 *) key + mask->range.start;
172 }
173 
174 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
175 			      struct fl_flow_mask *mask)
176 {
177 	const long *lkey = fl_key_get_start(key, mask);
178 	const long *lmask = fl_key_get_start(&mask->key, mask);
179 	long *lmkey = fl_key_get_start(mkey, mask);
180 	int i;
181 
182 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
183 		*lmkey++ = *lkey++ & *lmask++;
184 }
185 
186 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
187 			       struct fl_flow_mask *mask)
188 {
189 	const long *lmask = fl_key_get_start(&mask->key, mask);
190 	const long *ltmplt;
191 	int i;
192 
193 	if (!tmplt)
194 		return true;
195 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
196 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
197 		if (~*ltmplt++ & *lmask++)
198 			return false;
199 	}
200 	return true;
201 }
202 
203 static void fl_clear_masked_range(struct fl_flow_key *key,
204 				  struct fl_flow_mask *mask)
205 {
206 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
207 }
208 
209 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
210 				  struct fl_flow_key *key,
211 				  struct fl_flow_key *mkey)
212 {
213 	u16 min_mask, max_mask, min_val, max_val;
214 
215 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
216 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
217 	min_val = ntohs(filter->key.tp_range.tp_min.dst);
218 	max_val = ntohs(filter->key.tp_range.tp_max.dst);
219 
220 	if (min_mask && max_mask) {
221 		if (ntohs(key->tp_range.tp.dst) < min_val ||
222 		    ntohs(key->tp_range.tp.dst) > max_val)
223 			return false;
224 
225 		/* skb does not have min and max values */
226 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
227 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
228 	}
229 	return true;
230 }
231 
232 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
233 				  struct fl_flow_key *key,
234 				  struct fl_flow_key *mkey)
235 {
236 	u16 min_mask, max_mask, min_val, max_val;
237 
238 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
239 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
240 	min_val = ntohs(filter->key.tp_range.tp_min.src);
241 	max_val = ntohs(filter->key.tp_range.tp_max.src);
242 
243 	if (min_mask && max_mask) {
244 		if (ntohs(key->tp_range.tp.src) < min_val ||
245 		    ntohs(key->tp_range.tp.src) > max_val)
246 			return false;
247 
248 		/* skb does not have min and max values */
249 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
250 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
251 	}
252 	return true;
253 }
254 
255 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
256 					 struct fl_flow_key *mkey)
257 {
258 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
259 				      mask->filter_ht_params);
260 }
261 
262 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
263 					     struct fl_flow_key *mkey,
264 					     struct fl_flow_key *key)
265 {
266 	struct cls_fl_filter *filter, *f;
267 
268 	list_for_each_entry_rcu(filter, &mask->filters, list) {
269 		if (!fl_range_port_dst_cmp(filter, key, mkey))
270 			continue;
271 
272 		if (!fl_range_port_src_cmp(filter, key, mkey))
273 			continue;
274 
275 		f = __fl_lookup(mask, mkey);
276 		if (f)
277 			return f;
278 	}
279 	return NULL;
280 }
281 
282 static noinline_for_stack
283 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
284 {
285 	struct fl_flow_key mkey;
286 
287 	fl_set_masked_key(&mkey, key, mask);
288 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
289 		return fl_lookup_range(mask, &mkey, key);
290 
291 	return __fl_lookup(mask, &mkey);
292 }
293 
294 static u16 fl_ct_info_to_flower_map[] = {
295 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
296 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
297 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
298 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
299 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
300 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
301 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
302 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
303 					TCA_FLOWER_KEY_CT_FLAGS_RELATED |
304 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
305 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
306 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
307 };
308 
309 TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb,
310 				  const struct tcf_proto *tp,
311 				  struct tcf_result *res)
312 {
313 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
314 	bool post_ct = tc_skb_cb(skb)->post_ct;
315 	u16 zone = tc_skb_cb(skb)->zone;
316 	struct fl_flow_key skb_key;
317 	struct fl_flow_mask *mask;
318 	struct cls_fl_filter *f;
319 
320 	list_for_each_entry_rcu(mask, &head->masks, list) {
321 		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
322 		fl_clear_masked_range(&skb_key, mask);
323 
324 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
325 		/* skb_flow_dissect() does not set n_proto in case an unknown
326 		 * protocol, so do it rather here.
327 		 */
328 		skb_key.basic.n_proto = skb_protocol(skb, false);
329 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
330 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
331 				    fl_ct_info_to_flower_map,
332 				    ARRAY_SIZE(fl_ct_info_to_flower_map),
333 				    post_ct, zone);
334 		skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
335 		skb_flow_dissect(skb, &mask->dissector, &skb_key,
336 				 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
337 
338 		f = fl_mask_lookup(mask, &skb_key);
339 		if (f && !tc_skip_sw(f->flags)) {
340 			*res = f->res;
341 			return tcf_exts_exec(skb, &f->exts, res);
342 		}
343 	}
344 	return -1;
345 }
346 
347 static int fl_init(struct tcf_proto *tp)
348 {
349 	struct cls_fl_head *head;
350 
351 	head = kzalloc(sizeof(*head), GFP_KERNEL);
352 	if (!head)
353 		return -ENOBUFS;
354 
355 	spin_lock_init(&head->masks_lock);
356 	INIT_LIST_HEAD_RCU(&head->masks);
357 	INIT_LIST_HEAD(&head->hw_filters);
358 	rcu_assign_pointer(tp->root, head);
359 	idr_init(&head->handle_idr);
360 
361 	return rhashtable_init(&head->ht, &mask_ht_params);
362 }
363 
364 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
365 {
366 	/* temporary masks don't have their filters list and ht initialized */
367 	if (mask_init_done) {
368 		WARN_ON(!list_empty(&mask->filters));
369 		rhashtable_destroy(&mask->ht);
370 	}
371 	kfree(mask);
372 }
373 
374 static void fl_mask_free_work(struct work_struct *work)
375 {
376 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
377 						 struct fl_flow_mask, rwork);
378 
379 	fl_mask_free(mask, true);
380 }
381 
382 static void fl_uninit_mask_free_work(struct work_struct *work)
383 {
384 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
385 						 struct fl_flow_mask, rwork);
386 
387 	fl_mask_free(mask, false);
388 }
389 
390 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
391 {
392 	if (!refcount_dec_and_test(&mask->refcnt))
393 		return false;
394 
395 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
396 
397 	spin_lock(&head->masks_lock);
398 	list_del_rcu(&mask->list);
399 	spin_unlock(&head->masks_lock);
400 
401 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
402 
403 	return true;
404 }
405 
406 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
407 {
408 	/* Flower classifier only changes root pointer during init and destroy.
409 	 * Users must obtain reference to tcf_proto instance before calling its
410 	 * API, so tp->root pointer is protected from concurrent call to
411 	 * fl_destroy() by reference counting.
412 	 */
413 	return rcu_dereference_raw(tp->root);
414 }
415 
416 static void __fl_destroy_filter(struct cls_fl_filter *f)
417 {
418 	tcf_exts_destroy(&f->exts);
419 	tcf_exts_put_net(&f->exts);
420 	kfree(f);
421 }
422 
423 static void fl_destroy_filter_work(struct work_struct *work)
424 {
425 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
426 					struct cls_fl_filter, rwork);
427 
428 	__fl_destroy_filter(f);
429 }
430 
431 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
432 				 bool rtnl_held, struct netlink_ext_ack *extack)
433 {
434 	struct tcf_block *block = tp->chain->block;
435 	struct flow_cls_offload cls_flower = {};
436 
437 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
438 	cls_flower.command = FLOW_CLS_DESTROY;
439 	cls_flower.cookie = (unsigned long) f;
440 
441 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
442 			    &f->flags, &f->in_hw_count, rtnl_held);
443 
444 }
445 
446 static int fl_hw_replace_filter(struct tcf_proto *tp,
447 				struct cls_fl_filter *f, bool rtnl_held,
448 				struct netlink_ext_ack *extack)
449 {
450 	struct tcf_block *block = tp->chain->block;
451 	struct flow_cls_offload cls_flower = {};
452 	bool skip_sw = tc_skip_sw(f->flags);
453 	int err = 0;
454 
455 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
456 	if (!cls_flower.rule)
457 		return -ENOMEM;
458 
459 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
460 	cls_flower.command = FLOW_CLS_REPLACE;
461 	cls_flower.cookie = (unsigned long) f;
462 	cls_flower.rule->match.dissector = &f->mask->dissector;
463 	cls_flower.rule->match.mask = &f->mask->key;
464 	cls_flower.rule->match.key = &f->mkey;
465 	cls_flower.classid = f->res.classid;
466 
467 	err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
468 				      cls_flower.common.extack);
469 	if (err) {
470 		kfree(cls_flower.rule);
471 
472 		return skip_sw ? err : 0;
473 	}
474 
475 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
476 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
477 	tc_cleanup_offload_action(&cls_flower.rule->action);
478 	kfree(cls_flower.rule);
479 
480 	if (err) {
481 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
482 		return err;
483 	}
484 
485 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
486 		return -EINVAL;
487 
488 	return 0;
489 }
490 
491 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
492 			       bool rtnl_held)
493 {
494 	struct tcf_block *block = tp->chain->block;
495 	struct flow_cls_offload cls_flower = {};
496 
497 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
498 	cls_flower.command = FLOW_CLS_STATS;
499 	cls_flower.cookie = (unsigned long) f;
500 	cls_flower.classid = f->res.classid;
501 
502 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
503 			 rtnl_held);
504 
505 	tcf_exts_hw_stats_update(&f->exts, &cls_flower.stats, cls_flower.use_act_stats);
506 }
507 
508 static void __fl_put(struct cls_fl_filter *f)
509 {
510 	if (!refcount_dec_and_test(&f->refcnt))
511 		return;
512 
513 	if (tcf_exts_get_net(&f->exts))
514 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
515 	else
516 		__fl_destroy_filter(f);
517 }
518 
519 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
520 {
521 	struct cls_fl_filter *f;
522 
523 	rcu_read_lock();
524 	f = idr_find(&head->handle_idr, handle);
525 	if (f && !refcount_inc_not_zero(&f->refcnt))
526 		f = NULL;
527 	rcu_read_unlock();
528 
529 	return f;
530 }
531 
532 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
533 		       bool *last, bool rtnl_held,
534 		       struct netlink_ext_ack *extack)
535 {
536 	struct cls_fl_head *head = fl_head_dereference(tp);
537 
538 	*last = false;
539 
540 	spin_lock(&tp->lock);
541 	if (f->deleted) {
542 		spin_unlock(&tp->lock);
543 		return -ENOENT;
544 	}
545 
546 	f->deleted = true;
547 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
548 			       f->mask->filter_ht_params);
549 	idr_remove(&head->handle_idr, f->handle);
550 	list_del_rcu(&f->list);
551 	spin_unlock(&tp->lock);
552 
553 	*last = fl_mask_put(head, f->mask);
554 	if (!tc_skip_hw(f->flags))
555 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
556 	tcf_unbind_filter(tp, &f->res);
557 	__fl_put(f);
558 
559 	return 0;
560 }
561 
562 static void fl_destroy_sleepable(struct work_struct *work)
563 {
564 	struct cls_fl_head *head = container_of(to_rcu_work(work),
565 						struct cls_fl_head,
566 						rwork);
567 
568 	rhashtable_destroy(&head->ht);
569 	kfree(head);
570 	module_put(THIS_MODULE);
571 }
572 
573 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
574 		       struct netlink_ext_ack *extack)
575 {
576 	struct cls_fl_head *head = fl_head_dereference(tp);
577 	struct fl_flow_mask *mask, *next_mask;
578 	struct cls_fl_filter *f, *next;
579 	bool last;
580 
581 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
582 		list_for_each_entry_safe(f, next, &mask->filters, list) {
583 			__fl_delete(tp, f, &last, rtnl_held, extack);
584 			if (last)
585 				break;
586 		}
587 	}
588 	idr_destroy(&head->handle_idr);
589 
590 	__module_get(THIS_MODULE);
591 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
592 }
593 
594 static void fl_put(struct tcf_proto *tp, void *arg)
595 {
596 	struct cls_fl_filter *f = arg;
597 
598 	__fl_put(f);
599 }
600 
601 static void *fl_get(struct tcf_proto *tp, u32 handle)
602 {
603 	struct cls_fl_head *head = fl_head_dereference(tp);
604 
605 	return __fl_get(head, handle);
606 }
607 
608 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
609 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
610 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
611 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
612 					    .len = IFNAMSIZ },
613 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
614 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
615 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
616 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
617 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
618 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
619 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
620 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
621 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
622 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
623 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
624 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
625 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
626 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
627 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
628 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
629 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
630 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
631 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
632 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
633 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
634 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
635 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
636 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
637 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
638 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
639 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
640 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
641 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
642 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
643 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
644 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
645 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
646 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
647 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
648 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
649 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
650 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
651 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
652 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
653 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
654 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
655 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
656 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
657 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
658 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
659 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
660 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
661 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
662 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
663 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
664 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
665 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
666 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
667 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
668 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
669 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
670 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
671 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
672 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
673 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
674 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
675 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
676 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
677 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
678 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
679 	[TCA_FLOWER_KEY_MPLS_OPTS]	= { .type = NLA_NESTED },
680 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
681 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
682 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
683 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
684 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
685 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
686 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
687 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
688 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
689 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
690 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
691 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
692 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
693 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
694 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
695 	[TCA_FLOWER_KEY_CT_STATE]	=
696 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
697 	[TCA_FLOWER_KEY_CT_STATE_MASK]	=
698 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
699 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
700 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
701 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
702 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
703 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
704 					    .len = 128 / BITS_PER_BYTE },
705 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
706 					    .len = 128 / BITS_PER_BYTE },
707 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
708 	[TCA_FLOWER_KEY_HASH]		= { .type = NLA_U32 },
709 	[TCA_FLOWER_KEY_HASH_MASK]	= { .type = NLA_U32 },
710 	[TCA_FLOWER_KEY_NUM_OF_VLANS]	= { .type = NLA_U8 },
711 	[TCA_FLOWER_KEY_PPPOE_SID]	= { .type = NLA_U16 },
712 	[TCA_FLOWER_KEY_PPP_PROTO]	= { .type = NLA_U16 },
713 	[TCA_FLOWER_KEY_L2TPV3_SID]	= { .type = NLA_U32 },
714 
715 };
716 
717 static const struct nla_policy
718 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
719 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
720 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
721 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
722 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
723 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
724 	[TCA_FLOWER_KEY_ENC_OPTS_GTP]		= { .type = NLA_NESTED },
725 };
726 
727 static const struct nla_policy
728 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
729 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
730 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
731 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
732 						       .len = 128 },
733 };
734 
735 static const struct nla_policy
736 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
737 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
738 };
739 
740 static const struct nla_policy
741 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
742 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
743 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
744 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
745 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
746 };
747 
748 static const struct nla_policy
749 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
750 	[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]	   = { .type = NLA_U8 },
751 	[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]	   = { .type = NLA_U8 },
752 };
753 
754 static const struct nla_policy
755 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
756 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]    = { .type = NLA_U8 },
757 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]      = { .type = NLA_U8 },
758 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]      = { .type = NLA_U8 },
759 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]       = { .type = NLA_U8 },
760 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]    = { .type = NLA_U32 },
761 };
762 
763 static void fl_set_key_val(struct nlattr **tb,
764 			   void *val, int val_type,
765 			   void *mask, int mask_type, int len)
766 {
767 	if (!tb[val_type])
768 		return;
769 	nla_memcpy(val, tb[val_type], len);
770 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
771 		memset(mask, 0xff, len);
772 	else
773 		nla_memcpy(mask, tb[mask_type], len);
774 }
775 
776 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
777 				 struct fl_flow_key *mask,
778 				 struct netlink_ext_ack *extack)
779 {
780 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
781 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
782 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
783 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
784 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
785 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
786 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
787 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
788 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
789 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
790 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
791 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
792 
793 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
794 	    ntohs(key->tp_range.tp_max.dst) <=
795 	    ntohs(key->tp_range.tp_min.dst)) {
796 		NL_SET_ERR_MSG_ATTR(extack,
797 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
798 				    "Invalid destination port range (min must be strictly smaller than max)");
799 		return -EINVAL;
800 	}
801 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
802 	    ntohs(key->tp_range.tp_max.src) <=
803 	    ntohs(key->tp_range.tp_min.src)) {
804 		NL_SET_ERR_MSG_ATTR(extack,
805 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
806 				    "Invalid source port range (min must be strictly smaller than max)");
807 		return -EINVAL;
808 	}
809 
810 	return 0;
811 }
812 
813 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
814 			       struct flow_dissector_key_mpls *key_val,
815 			       struct flow_dissector_key_mpls *key_mask,
816 			       struct netlink_ext_ack *extack)
817 {
818 	struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
819 	struct flow_dissector_mpls_lse *lse_mask;
820 	struct flow_dissector_mpls_lse *lse_val;
821 	u8 lse_index;
822 	u8 depth;
823 	int err;
824 
825 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
826 			       mpls_stack_entry_policy, extack);
827 	if (err < 0)
828 		return err;
829 
830 	if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
831 		NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
832 		return -EINVAL;
833 	}
834 
835 	depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
836 
837 	/* LSE depth starts at 1, for consistency with terminology used by
838 	 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
839 	 */
840 	if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
841 		NL_SET_ERR_MSG_ATTR(extack,
842 				    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
843 				    "Invalid MPLS depth");
844 		return -EINVAL;
845 	}
846 	lse_index = depth - 1;
847 
848 	dissector_set_mpls_lse(key_val, lse_index);
849 	dissector_set_mpls_lse(key_mask, lse_index);
850 
851 	lse_val = &key_val->ls[lse_index];
852 	lse_mask = &key_mask->ls[lse_index];
853 
854 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
855 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
856 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
857 	}
858 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
859 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
860 
861 		if (bos & ~MPLS_BOS_MASK) {
862 			NL_SET_ERR_MSG_ATTR(extack,
863 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
864 					    "Bottom Of Stack (BOS) must be 0 or 1");
865 			return -EINVAL;
866 		}
867 		lse_val->mpls_bos = bos;
868 		lse_mask->mpls_bos = MPLS_BOS_MASK;
869 	}
870 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
871 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
872 
873 		if (tc & ~MPLS_TC_MASK) {
874 			NL_SET_ERR_MSG_ATTR(extack,
875 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
876 					    "Traffic Class (TC) must be between 0 and 7");
877 			return -EINVAL;
878 		}
879 		lse_val->mpls_tc = tc;
880 		lse_mask->mpls_tc = MPLS_TC_MASK;
881 	}
882 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
883 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
884 
885 		if (label & ~MPLS_LABEL_MASK) {
886 			NL_SET_ERR_MSG_ATTR(extack,
887 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
888 					    "Label must be between 0 and 1048575");
889 			return -EINVAL;
890 		}
891 		lse_val->mpls_label = label;
892 		lse_mask->mpls_label = MPLS_LABEL_MASK;
893 	}
894 
895 	return 0;
896 }
897 
898 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
899 				struct flow_dissector_key_mpls *key_val,
900 				struct flow_dissector_key_mpls *key_mask,
901 				struct netlink_ext_ack *extack)
902 {
903 	struct nlattr *nla_lse;
904 	int rem;
905 	int err;
906 
907 	if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
908 		NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
909 				    "NLA_F_NESTED is missing");
910 		return -EINVAL;
911 	}
912 
913 	nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
914 		if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
915 			NL_SET_ERR_MSG_ATTR(extack, nla_lse,
916 					    "Invalid MPLS option type");
917 			return -EINVAL;
918 		}
919 
920 		err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
921 		if (err < 0)
922 			return err;
923 	}
924 	if (rem) {
925 		NL_SET_ERR_MSG(extack,
926 			       "Bytes leftover after parsing MPLS options");
927 		return -EINVAL;
928 	}
929 
930 	return 0;
931 }
932 
933 static int fl_set_key_mpls(struct nlattr **tb,
934 			   struct flow_dissector_key_mpls *key_val,
935 			   struct flow_dissector_key_mpls *key_mask,
936 			   struct netlink_ext_ack *extack)
937 {
938 	struct flow_dissector_mpls_lse *lse_mask;
939 	struct flow_dissector_mpls_lse *lse_val;
940 
941 	if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
942 		if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
943 		    tb[TCA_FLOWER_KEY_MPLS_BOS] ||
944 		    tb[TCA_FLOWER_KEY_MPLS_TC] ||
945 		    tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
946 			NL_SET_ERR_MSG_ATTR(extack,
947 					    tb[TCA_FLOWER_KEY_MPLS_OPTS],
948 					    "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
949 			return -EBADMSG;
950 		}
951 
952 		return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
953 					    key_val, key_mask, extack);
954 	}
955 
956 	lse_val = &key_val->ls[0];
957 	lse_mask = &key_mask->ls[0];
958 
959 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
960 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
961 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
962 		dissector_set_mpls_lse(key_val, 0);
963 		dissector_set_mpls_lse(key_mask, 0);
964 	}
965 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
966 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
967 
968 		if (bos & ~MPLS_BOS_MASK) {
969 			NL_SET_ERR_MSG_ATTR(extack,
970 					    tb[TCA_FLOWER_KEY_MPLS_BOS],
971 					    "Bottom Of Stack (BOS) must be 0 or 1");
972 			return -EINVAL;
973 		}
974 		lse_val->mpls_bos = bos;
975 		lse_mask->mpls_bos = MPLS_BOS_MASK;
976 		dissector_set_mpls_lse(key_val, 0);
977 		dissector_set_mpls_lse(key_mask, 0);
978 	}
979 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
980 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
981 
982 		if (tc & ~MPLS_TC_MASK) {
983 			NL_SET_ERR_MSG_ATTR(extack,
984 					    tb[TCA_FLOWER_KEY_MPLS_TC],
985 					    "Traffic Class (TC) must be between 0 and 7");
986 			return -EINVAL;
987 		}
988 		lse_val->mpls_tc = tc;
989 		lse_mask->mpls_tc = MPLS_TC_MASK;
990 		dissector_set_mpls_lse(key_val, 0);
991 		dissector_set_mpls_lse(key_mask, 0);
992 	}
993 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
994 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
995 
996 		if (label & ~MPLS_LABEL_MASK) {
997 			NL_SET_ERR_MSG_ATTR(extack,
998 					    tb[TCA_FLOWER_KEY_MPLS_LABEL],
999 					    "Label must be between 0 and 1048575");
1000 			return -EINVAL;
1001 		}
1002 		lse_val->mpls_label = label;
1003 		lse_mask->mpls_label = MPLS_LABEL_MASK;
1004 		dissector_set_mpls_lse(key_val, 0);
1005 		dissector_set_mpls_lse(key_mask, 0);
1006 	}
1007 	return 0;
1008 }
1009 
1010 static void fl_set_key_vlan(struct nlattr **tb,
1011 			    __be16 ethertype,
1012 			    int vlan_id_key, int vlan_prio_key,
1013 			    int vlan_next_eth_type_key,
1014 			    struct flow_dissector_key_vlan *key_val,
1015 			    struct flow_dissector_key_vlan *key_mask)
1016 {
1017 #define VLAN_PRIORITY_MASK	0x7
1018 
1019 	if (tb[vlan_id_key]) {
1020 		key_val->vlan_id =
1021 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1022 		key_mask->vlan_id = VLAN_VID_MASK;
1023 	}
1024 	if (tb[vlan_prio_key]) {
1025 		key_val->vlan_priority =
1026 			nla_get_u8(tb[vlan_prio_key]) &
1027 			VLAN_PRIORITY_MASK;
1028 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1029 	}
1030 	if (ethertype) {
1031 		key_val->vlan_tpid = ethertype;
1032 		key_mask->vlan_tpid = cpu_to_be16(~0);
1033 	}
1034 	if (tb[vlan_next_eth_type_key]) {
1035 		key_val->vlan_eth_type =
1036 			nla_get_be16(tb[vlan_next_eth_type_key]);
1037 		key_mask->vlan_eth_type = cpu_to_be16(~0);
1038 	}
1039 }
1040 
1041 static void fl_set_key_pppoe(struct nlattr **tb,
1042 			     struct flow_dissector_key_pppoe *key_val,
1043 			     struct flow_dissector_key_pppoe *key_mask,
1044 			     struct fl_flow_key *key,
1045 			     struct fl_flow_key *mask)
1046 {
1047 	/* key_val::type must be set to ETH_P_PPP_SES
1048 	 * because ETH_P_PPP_SES was stored in basic.n_proto
1049 	 * which might get overwritten by ppp_proto
1050 	 * or might be set to 0, the role of key_val::type
1051 	 * is simmilar to vlan_key::tpid
1052 	 */
1053 	key_val->type = htons(ETH_P_PPP_SES);
1054 	key_mask->type = cpu_to_be16(~0);
1055 
1056 	if (tb[TCA_FLOWER_KEY_PPPOE_SID]) {
1057 		key_val->session_id =
1058 			nla_get_be16(tb[TCA_FLOWER_KEY_PPPOE_SID]);
1059 		key_mask->session_id = cpu_to_be16(~0);
1060 	}
1061 	if (tb[TCA_FLOWER_KEY_PPP_PROTO]) {
1062 		key_val->ppp_proto =
1063 			nla_get_be16(tb[TCA_FLOWER_KEY_PPP_PROTO]);
1064 		key_mask->ppp_proto = cpu_to_be16(~0);
1065 
1066 		if (key_val->ppp_proto == htons(PPP_IP)) {
1067 			key->basic.n_proto = htons(ETH_P_IP);
1068 			mask->basic.n_proto = cpu_to_be16(~0);
1069 		} else if (key_val->ppp_proto == htons(PPP_IPV6)) {
1070 			key->basic.n_proto = htons(ETH_P_IPV6);
1071 			mask->basic.n_proto = cpu_to_be16(~0);
1072 		} else if (key_val->ppp_proto == htons(PPP_MPLS_UC)) {
1073 			key->basic.n_proto = htons(ETH_P_MPLS_UC);
1074 			mask->basic.n_proto = cpu_to_be16(~0);
1075 		} else if (key_val->ppp_proto == htons(PPP_MPLS_MC)) {
1076 			key->basic.n_proto = htons(ETH_P_MPLS_MC);
1077 			mask->basic.n_proto = cpu_to_be16(~0);
1078 		}
1079 	} else {
1080 		key->basic.n_proto = 0;
1081 		mask->basic.n_proto = cpu_to_be16(0);
1082 	}
1083 }
1084 
1085 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1086 			    u32 *dissector_key, u32 *dissector_mask,
1087 			    u32 flower_flag_bit, u32 dissector_flag_bit)
1088 {
1089 	if (flower_mask & flower_flag_bit) {
1090 		*dissector_mask |= dissector_flag_bit;
1091 		if (flower_key & flower_flag_bit)
1092 			*dissector_key |= dissector_flag_bit;
1093 	}
1094 }
1095 
1096 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1097 			    u32 *flags_mask, struct netlink_ext_ack *extack)
1098 {
1099 	u32 key, mask;
1100 
1101 	/* mask is mandatory for flags */
1102 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1103 		NL_SET_ERR_MSG(extack, "Missing flags mask");
1104 		return -EINVAL;
1105 	}
1106 
1107 	key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
1108 	mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1109 
1110 	*flags_key  = 0;
1111 	*flags_mask = 0;
1112 
1113 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1114 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1115 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1116 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1117 			FLOW_DIS_FIRST_FRAG);
1118 
1119 	return 0;
1120 }
1121 
1122 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1123 			  struct flow_dissector_key_ip *key,
1124 			  struct flow_dissector_key_ip *mask)
1125 {
1126 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1127 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1128 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1129 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1130 
1131 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1132 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1133 }
1134 
1135 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1136 			     int depth, int option_len,
1137 			     struct netlink_ext_ack *extack)
1138 {
1139 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1140 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
1141 	struct geneve_opt *opt;
1142 	int err, data_len = 0;
1143 
1144 	if (option_len > sizeof(struct geneve_opt))
1145 		data_len = option_len - sizeof(struct geneve_opt);
1146 
1147 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1148 	memset(opt, 0xff, option_len);
1149 	opt->length = data_len / 4;
1150 	opt->r1 = 0;
1151 	opt->r2 = 0;
1152 	opt->r3 = 0;
1153 
1154 	/* If no mask has been prodived we assume an exact match. */
1155 	if (!depth)
1156 		return sizeof(struct geneve_opt) + data_len;
1157 
1158 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1159 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1160 		return -EINVAL;
1161 	}
1162 
1163 	err = nla_parse_nested_deprecated(tb,
1164 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1165 					  nla, geneve_opt_policy, extack);
1166 	if (err < 0)
1167 		return err;
1168 
1169 	/* We are not allowed to omit any of CLASS, TYPE or DATA
1170 	 * fields from the key.
1171 	 */
1172 	if (!option_len &&
1173 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1174 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1175 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1176 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1177 		return -EINVAL;
1178 	}
1179 
1180 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
1181 	 * for the mask.
1182 	 */
1183 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1184 		int new_len = key->enc_opts.len;
1185 
1186 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1187 		data_len = nla_len(data);
1188 		if (data_len < 4) {
1189 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1190 			return -ERANGE;
1191 		}
1192 		if (data_len % 4) {
1193 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1194 			return -ERANGE;
1195 		}
1196 
1197 		new_len += sizeof(struct geneve_opt) + data_len;
1198 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1199 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1200 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1201 			return -ERANGE;
1202 		}
1203 		opt->length = data_len / 4;
1204 		memcpy(opt->opt_data, nla_data(data), data_len);
1205 	}
1206 
1207 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1208 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1209 		opt->opt_class = nla_get_be16(class);
1210 	}
1211 
1212 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1213 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1214 		opt->type = nla_get_u8(type);
1215 	}
1216 
1217 	return sizeof(struct geneve_opt) + data_len;
1218 }
1219 
1220 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1221 			    int depth, int option_len,
1222 			    struct netlink_ext_ack *extack)
1223 {
1224 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1225 	struct vxlan_metadata *md;
1226 	int err;
1227 
1228 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1229 	memset(md, 0xff, sizeof(*md));
1230 
1231 	if (!depth)
1232 		return sizeof(*md);
1233 
1234 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1235 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1236 		return -EINVAL;
1237 	}
1238 
1239 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1240 			       vxlan_opt_policy, extack);
1241 	if (err < 0)
1242 		return err;
1243 
1244 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1245 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1246 		return -EINVAL;
1247 	}
1248 
1249 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1250 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1251 		md->gbp &= VXLAN_GBP_MASK;
1252 	}
1253 
1254 	return sizeof(*md);
1255 }
1256 
1257 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1258 			     int depth, int option_len,
1259 			     struct netlink_ext_ack *extack)
1260 {
1261 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1262 	struct erspan_metadata *md;
1263 	int err;
1264 
1265 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1266 	memset(md, 0xff, sizeof(*md));
1267 	md->version = 1;
1268 
1269 	if (!depth)
1270 		return sizeof(*md);
1271 
1272 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1273 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1274 		return -EINVAL;
1275 	}
1276 
1277 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1278 			       erspan_opt_policy, extack);
1279 	if (err < 0)
1280 		return err;
1281 
1282 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1283 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1284 		return -EINVAL;
1285 	}
1286 
1287 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1288 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1289 
1290 	if (md->version == 1) {
1291 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1292 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1293 			return -EINVAL;
1294 		}
1295 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1296 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1297 			memset(&md->u, 0x00, sizeof(md->u));
1298 			md->u.index = nla_get_be32(nla);
1299 		}
1300 	} else if (md->version == 2) {
1301 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1302 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1303 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1304 			return -EINVAL;
1305 		}
1306 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1307 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1308 			md->u.md2.dir = nla_get_u8(nla);
1309 		}
1310 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1311 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1312 			set_hwid(&md->u.md2, nla_get_u8(nla));
1313 		}
1314 	} else {
1315 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1316 		return -EINVAL;
1317 	}
1318 
1319 	return sizeof(*md);
1320 }
1321 
1322 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
1323 			  int depth, int option_len,
1324 			  struct netlink_ext_ack *extack)
1325 {
1326 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
1327 	struct gtp_pdu_session_info *sinfo;
1328 	u8 len = key->enc_opts.len;
1329 	int err;
1330 
1331 	sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
1332 	memset(sinfo, 0xff, option_len);
1333 
1334 	if (!depth)
1335 		return sizeof(*sinfo);
1336 
1337 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
1338 		NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
1339 		return -EINVAL;
1340 	}
1341 
1342 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
1343 			       gtp_opt_policy, extack);
1344 	if (err < 0)
1345 		return err;
1346 
1347 	if (!option_len &&
1348 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
1349 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
1350 		NL_SET_ERR_MSG_MOD(extack,
1351 				   "Missing tunnel key gtp option pdu type or qfi");
1352 		return -EINVAL;
1353 	}
1354 
1355 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
1356 		sinfo->pdu_type =
1357 			nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
1358 
1359 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
1360 		sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
1361 
1362 	return sizeof(*sinfo);
1363 }
1364 
1365 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1366 			  struct fl_flow_key *mask,
1367 			  struct netlink_ext_ack *extack)
1368 {
1369 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1370 	int err, option_len, key_depth, msk_depth = 0;
1371 
1372 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1373 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1374 					     enc_opts_policy, extack);
1375 	if (err)
1376 		return err;
1377 
1378 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1379 
1380 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1381 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1382 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1383 						     enc_opts_policy, extack);
1384 		if (err)
1385 			return err;
1386 
1387 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1388 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1389 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1390 			NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1391 			return -EINVAL;
1392 		}
1393 	}
1394 
1395 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1396 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1397 		switch (nla_type(nla_opt_key)) {
1398 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1399 			if (key->enc_opts.dst_opt_type &&
1400 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1401 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1402 				return -EINVAL;
1403 			}
1404 			option_len = 0;
1405 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1406 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1407 						       key_depth, option_len,
1408 						       extack);
1409 			if (option_len < 0)
1410 				return option_len;
1411 
1412 			key->enc_opts.len += option_len;
1413 			/* At the same time we need to parse through the mask
1414 			 * in order to verify exact and mask attribute lengths.
1415 			 */
1416 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1417 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1418 						       msk_depth, option_len,
1419 						       extack);
1420 			if (option_len < 0)
1421 				return option_len;
1422 
1423 			mask->enc_opts.len += option_len;
1424 			if (key->enc_opts.len != mask->enc_opts.len) {
1425 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1426 				return -EINVAL;
1427 			}
1428 			break;
1429 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1430 			if (key->enc_opts.dst_opt_type) {
1431 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1432 				return -EINVAL;
1433 			}
1434 			option_len = 0;
1435 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1436 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1437 						      key_depth, option_len,
1438 						      extack);
1439 			if (option_len < 0)
1440 				return option_len;
1441 
1442 			key->enc_opts.len += option_len;
1443 			/* At the same time we need to parse through the mask
1444 			 * in order to verify exact and mask attribute lengths.
1445 			 */
1446 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1447 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1448 						      msk_depth, option_len,
1449 						      extack);
1450 			if (option_len < 0)
1451 				return option_len;
1452 
1453 			mask->enc_opts.len += option_len;
1454 			if (key->enc_opts.len != mask->enc_opts.len) {
1455 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1456 				return -EINVAL;
1457 			}
1458 			break;
1459 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1460 			if (key->enc_opts.dst_opt_type) {
1461 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1462 				return -EINVAL;
1463 			}
1464 			option_len = 0;
1465 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1466 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1467 						       key_depth, option_len,
1468 						       extack);
1469 			if (option_len < 0)
1470 				return option_len;
1471 
1472 			key->enc_opts.len += option_len;
1473 			/* At the same time we need to parse through the mask
1474 			 * in order to verify exact and mask attribute lengths.
1475 			 */
1476 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1477 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1478 						       msk_depth, option_len,
1479 						       extack);
1480 			if (option_len < 0)
1481 				return option_len;
1482 
1483 			mask->enc_opts.len += option_len;
1484 			if (key->enc_opts.len != mask->enc_opts.len) {
1485 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1486 				return -EINVAL;
1487 			}
1488 			break;
1489 		case TCA_FLOWER_KEY_ENC_OPTS_GTP:
1490 			if (key->enc_opts.dst_opt_type) {
1491 				NL_SET_ERR_MSG_MOD(extack,
1492 						   "Duplicate type for gtp options");
1493 				return -EINVAL;
1494 			}
1495 			option_len = 0;
1496 			key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1497 			option_len = fl_set_gtp_opt(nla_opt_key, key,
1498 						    key_depth, option_len,
1499 						    extack);
1500 			if (option_len < 0)
1501 				return option_len;
1502 
1503 			key->enc_opts.len += option_len;
1504 			/* At the same time we need to parse through the mask
1505 			 * in order to verify exact and mask attribute lengths.
1506 			 */
1507 			mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1508 			option_len = fl_set_gtp_opt(nla_opt_msk, mask,
1509 						    msk_depth, option_len,
1510 						    extack);
1511 			if (option_len < 0)
1512 				return option_len;
1513 
1514 			mask->enc_opts.len += option_len;
1515 			if (key->enc_opts.len != mask->enc_opts.len) {
1516 				NL_SET_ERR_MSG_MOD(extack,
1517 						   "Key and mask miss aligned");
1518 				return -EINVAL;
1519 			}
1520 			break;
1521 		default:
1522 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1523 			return -EINVAL;
1524 		}
1525 
1526 		if (!msk_depth)
1527 			continue;
1528 
1529 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1530 			NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1531 			return -EINVAL;
1532 		}
1533 		nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1534 	}
1535 
1536 	return 0;
1537 }
1538 
1539 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1540 				struct netlink_ext_ack *extack)
1541 {
1542 	if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1543 		NL_SET_ERR_MSG_ATTR(extack, tb,
1544 				    "no trk, so no other flag can be set");
1545 		return -EINVAL;
1546 	}
1547 
1548 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1549 	    state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1550 		NL_SET_ERR_MSG_ATTR(extack, tb,
1551 				    "new and est are mutually exclusive");
1552 		return -EINVAL;
1553 	}
1554 
1555 	if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1556 	    state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1557 		      TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1558 		NL_SET_ERR_MSG_ATTR(extack, tb,
1559 				    "when inv is set, only trk may be set");
1560 		return -EINVAL;
1561 	}
1562 
1563 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1564 	    state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1565 		NL_SET_ERR_MSG_ATTR(extack, tb,
1566 				    "new and rpl are mutually exclusive");
1567 		return -EINVAL;
1568 	}
1569 
1570 	return 0;
1571 }
1572 
1573 static int fl_set_key_ct(struct nlattr **tb,
1574 			 struct flow_dissector_key_ct *key,
1575 			 struct flow_dissector_key_ct *mask,
1576 			 struct netlink_ext_ack *extack)
1577 {
1578 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1579 		int err;
1580 
1581 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1582 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1583 			return -EOPNOTSUPP;
1584 		}
1585 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1586 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1587 			       sizeof(key->ct_state));
1588 
1589 		err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1590 					   tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1591 					   extack);
1592 		if (err)
1593 			return err;
1594 
1595 	}
1596 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1597 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1598 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1599 			return -EOPNOTSUPP;
1600 		}
1601 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1602 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1603 			       sizeof(key->ct_zone));
1604 	}
1605 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1606 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1607 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1608 			return -EOPNOTSUPP;
1609 		}
1610 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1611 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1612 			       sizeof(key->ct_mark));
1613 	}
1614 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1615 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1616 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1617 			return -EOPNOTSUPP;
1618 		}
1619 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1620 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1621 			       sizeof(key->ct_labels));
1622 	}
1623 
1624 	return 0;
1625 }
1626 
1627 static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype,
1628 			struct fl_flow_key *key, struct fl_flow_key *mask,
1629 			int vthresh)
1630 {
1631 	const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh;
1632 
1633 	if (!tb) {
1634 		*ethertype = 0;
1635 		return good_num_of_vlans;
1636 	}
1637 
1638 	*ethertype = nla_get_be16(tb);
1639 	if (good_num_of_vlans || eth_type_vlan(*ethertype))
1640 		return true;
1641 
1642 	key->basic.n_proto = *ethertype;
1643 	mask->basic.n_proto = cpu_to_be16(~0);
1644 	return false;
1645 }
1646 
1647 static int fl_set_key(struct net *net, struct nlattr **tb,
1648 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1649 		      struct netlink_ext_ack *extack)
1650 {
1651 	__be16 ethertype;
1652 	int ret = 0;
1653 
1654 	if (tb[TCA_FLOWER_INDEV]) {
1655 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1656 		if (err < 0)
1657 			return err;
1658 		key->meta.ingress_ifindex = err;
1659 		mask->meta.ingress_ifindex = 0xffffffff;
1660 	}
1661 
1662 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1663 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1664 		       sizeof(key->eth.dst));
1665 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1666 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1667 		       sizeof(key->eth.src));
1668 	fl_set_key_val(tb, &key->num_of_vlans,
1669 		       TCA_FLOWER_KEY_NUM_OF_VLANS,
1670 		       &mask->num_of_vlans,
1671 		       TCA_FLOWER_UNSPEC,
1672 		       sizeof(key->num_of_vlans));
1673 
1674 	if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], &ethertype, key, mask, 0)) {
1675 		fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1676 				TCA_FLOWER_KEY_VLAN_PRIO,
1677 				TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1678 				&key->vlan, &mask->vlan);
1679 
1680 		if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE],
1681 				&ethertype, key, mask, 1)) {
1682 			fl_set_key_vlan(tb, ethertype,
1683 					TCA_FLOWER_KEY_CVLAN_ID,
1684 					TCA_FLOWER_KEY_CVLAN_PRIO,
1685 					TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1686 					&key->cvlan, &mask->cvlan);
1687 			fl_set_key_val(tb, &key->basic.n_proto,
1688 				       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1689 				       &mask->basic.n_proto,
1690 				       TCA_FLOWER_UNSPEC,
1691 				       sizeof(key->basic.n_proto));
1692 		}
1693 	}
1694 
1695 	if (key->basic.n_proto == htons(ETH_P_PPP_SES))
1696 		fl_set_key_pppoe(tb, &key->pppoe, &mask->pppoe, key, mask);
1697 
1698 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1699 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1700 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1701 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1702 			       sizeof(key->basic.ip_proto));
1703 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1704 	}
1705 
1706 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1707 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1708 		mask->control.addr_type = ~0;
1709 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1710 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1711 			       sizeof(key->ipv4.src));
1712 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1713 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1714 			       sizeof(key->ipv4.dst));
1715 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1716 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1717 		mask->control.addr_type = ~0;
1718 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1719 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1720 			       sizeof(key->ipv6.src));
1721 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1722 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1723 			       sizeof(key->ipv6.dst));
1724 	}
1725 
1726 	if (key->basic.ip_proto == IPPROTO_TCP) {
1727 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1728 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1729 			       sizeof(key->tp.src));
1730 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1731 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1732 			       sizeof(key->tp.dst));
1733 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1734 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1735 			       sizeof(key->tcp.flags));
1736 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1737 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1738 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1739 			       sizeof(key->tp.src));
1740 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1741 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1742 			       sizeof(key->tp.dst));
1743 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1744 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1745 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1746 			       sizeof(key->tp.src));
1747 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1748 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1749 			       sizeof(key->tp.dst));
1750 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1751 		   key->basic.ip_proto == IPPROTO_ICMP) {
1752 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1753 			       &mask->icmp.type,
1754 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1755 			       sizeof(key->icmp.type));
1756 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1757 			       &mask->icmp.code,
1758 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1759 			       sizeof(key->icmp.code));
1760 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1761 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1762 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1763 			       &mask->icmp.type,
1764 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1765 			       sizeof(key->icmp.type));
1766 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1767 			       &mask->icmp.code,
1768 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1769 			       sizeof(key->icmp.code));
1770 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1771 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1772 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1773 		if (ret)
1774 			return ret;
1775 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1776 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1777 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1778 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1779 			       sizeof(key->arp.sip));
1780 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1781 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1782 			       sizeof(key->arp.tip));
1783 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1784 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1785 			       sizeof(key->arp.op));
1786 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1787 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1788 			       sizeof(key->arp.sha));
1789 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1790 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1791 			       sizeof(key->arp.tha));
1792 	} else if (key->basic.ip_proto == IPPROTO_L2TP) {
1793 		fl_set_key_val(tb, &key->l2tpv3.session_id,
1794 			       TCA_FLOWER_KEY_L2TPV3_SID,
1795 			       &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC,
1796 			       sizeof(key->l2tpv3.session_id));
1797 	}
1798 
1799 	if (key->basic.ip_proto == IPPROTO_TCP ||
1800 	    key->basic.ip_proto == IPPROTO_UDP ||
1801 	    key->basic.ip_proto == IPPROTO_SCTP) {
1802 		ret = fl_set_key_port_range(tb, key, mask, extack);
1803 		if (ret)
1804 			return ret;
1805 	}
1806 
1807 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1808 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1809 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1810 		mask->enc_control.addr_type = ~0;
1811 		fl_set_key_val(tb, &key->enc_ipv4.src,
1812 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1813 			       &mask->enc_ipv4.src,
1814 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1815 			       sizeof(key->enc_ipv4.src));
1816 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1817 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1818 			       &mask->enc_ipv4.dst,
1819 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1820 			       sizeof(key->enc_ipv4.dst));
1821 	}
1822 
1823 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1824 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1825 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1826 		mask->enc_control.addr_type = ~0;
1827 		fl_set_key_val(tb, &key->enc_ipv6.src,
1828 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1829 			       &mask->enc_ipv6.src,
1830 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1831 			       sizeof(key->enc_ipv6.src));
1832 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1833 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1834 			       &mask->enc_ipv6.dst,
1835 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1836 			       sizeof(key->enc_ipv6.dst));
1837 	}
1838 
1839 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1840 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1841 		       sizeof(key->enc_key_id.keyid));
1842 
1843 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1844 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1845 		       sizeof(key->enc_tp.src));
1846 
1847 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1848 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1849 		       sizeof(key->enc_tp.dst));
1850 
1851 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1852 
1853 	fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1854 		       &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1855 		       sizeof(key->hash.hash));
1856 
1857 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1858 		ret = fl_set_enc_opt(tb, key, mask, extack);
1859 		if (ret)
1860 			return ret;
1861 	}
1862 
1863 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1864 	if (ret)
1865 		return ret;
1866 
1867 	if (tb[TCA_FLOWER_KEY_FLAGS])
1868 		ret = fl_set_key_flags(tb, &key->control.flags,
1869 				       &mask->control.flags, extack);
1870 
1871 	return ret;
1872 }
1873 
1874 static void fl_mask_copy(struct fl_flow_mask *dst,
1875 			 struct fl_flow_mask *src)
1876 {
1877 	const void *psrc = fl_key_get_start(&src->key, src);
1878 	void *pdst = fl_key_get_start(&dst->key, src);
1879 
1880 	memcpy(pdst, psrc, fl_mask_range(src));
1881 	dst->range = src->range;
1882 }
1883 
1884 static const struct rhashtable_params fl_ht_params = {
1885 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1886 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1887 	.automatic_shrinking = true,
1888 };
1889 
1890 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1891 {
1892 	mask->filter_ht_params = fl_ht_params;
1893 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1894 	mask->filter_ht_params.key_offset += mask->range.start;
1895 
1896 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1897 }
1898 
1899 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1900 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1901 
1902 #define FL_KEY_IS_MASKED(mask, member)						\
1903 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1904 		   0, FL_KEY_MEMBER_SIZE(member))				\
1905 
1906 #define FL_KEY_SET(keys, cnt, id, member)					\
1907 	do {									\
1908 		keys[cnt].key_id = id;						\
1909 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1910 		cnt++;								\
1911 	} while(0);
1912 
1913 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1914 	do {									\
1915 		if (FL_KEY_IS_MASKED(mask, member))				\
1916 			FL_KEY_SET(keys, cnt, id, member);			\
1917 	} while(0);
1918 
1919 static void fl_init_dissector(struct flow_dissector *dissector,
1920 			      struct fl_flow_key *mask)
1921 {
1922 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1923 	size_t cnt = 0;
1924 
1925 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1926 			     FLOW_DISSECTOR_KEY_META, meta);
1927 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1928 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1929 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1930 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1931 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1932 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1933 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1934 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1935 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1936 			     FLOW_DISSECTOR_KEY_PORTS, tp);
1937 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1938 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1939 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1940 			     FLOW_DISSECTOR_KEY_IP, ip);
1941 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1942 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1943 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1944 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1945 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1946 			     FLOW_DISSECTOR_KEY_ARP, arp);
1947 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1948 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1949 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1950 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1951 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1952 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1953 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1954 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1955 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1956 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1957 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1958 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1959 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1960 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1961 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1962 			   enc_control);
1963 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1964 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1965 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1966 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1967 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1968 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1969 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1970 			     FLOW_DISSECTOR_KEY_CT, ct);
1971 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1972 			     FLOW_DISSECTOR_KEY_HASH, hash);
1973 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1974 			     FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans);
1975 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1976 			     FLOW_DISSECTOR_KEY_PPPOE, pppoe);
1977 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1978 			     FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3);
1979 
1980 	skb_flow_dissector_init(dissector, keys, cnt);
1981 }
1982 
1983 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1984 					       struct fl_flow_mask *mask)
1985 {
1986 	struct fl_flow_mask *newmask;
1987 	int err;
1988 
1989 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1990 	if (!newmask)
1991 		return ERR_PTR(-ENOMEM);
1992 
1993 	fl_mask_copy(newmask, mask);
1994 
1995 	if ((newmask->key.tp_range.tp_min.dst &&
1996 	     newmask->key.tp_range.tp_max.dst) ||
1997 	    (newmask->key.tp_range.tp_min.src &&
1998 	     newmask->key.tp_range.tp_max.src))
1999 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
2000 
2001 	err = fl_init_mask_hashtable(newmask);
2002 	if (err)
2003 		goto errout_free;
2004 
2005 	fl_init_dissector(&newmask->dissector, &newmask->key);
2006 
2007 	INIT_LIST_HEAD_RCU(&newmask->filters);
2008 
2009 	refcount_set(&newmask->refcnt, 1);
2010 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
2011 				      &newmask->ht_node, mask_ht_params);
2012 	if (err)
2013 		goto errout_destroy;
2014 
2015 	spin_lock(&head->masks_lock);
2016 	list_add_tail_rcu(&newmask->list, &head->masks);
2017 	spin_unlock(&head->masks_lock);
2018 
2019 	return newmask;
2020 
2021 errout_destroy:
2022 	rhashtable_destroy(&newmask->ht);
2023 errout_free:
2024 	kfree(newmask);
2025 
2026 	return ERR_PTR(err);
2027 }
2028 
2029 static int fl_check_assign_mask(struct cls_fl_head *head,
2030 				struct cls_fl_filter *fnew,
2031 				struct cls_fl_filter *fold,
2032 				struct fl_flow_mask *mask)
2033 {
2034 	struct fl_flow_mask *newmask;
2035 	int ret = 0;
2036 
2037 	rcu_read_lock();
2038 
2039 	/* Insert mask as temporary node to prevent concurrent creation of mask
2040 	 * with same key. Any concurrent lookups with same key will return
2041 	 * -EAGAIN because mask's refcnt is zero.
2042 	 */
2043 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
2044 						       &mask->ht_node,
2045 						       mask_ht_params);
2046 	if (!fnew->mask) {
2047 		rcu_read_unlock();
2048 
2049 		if (fold) {
2050 			ret = -EINVAL;
2051 			goto errout_cleanup;
2052 		}
2053 
2054 		newmask = fl_create_new_mask(head, mask);
2055 		if (IS_ERR(newmask)) {
2056 			ret = PTR_ERR(newmask);
2057 			goto errout_cleanup;
2058 		}
2059 
2060 		fnew->mask = newmask;
2061 		return 0;
2062 	} else if (IS_ERR(fnew->mask)) {
2063 		ret = PTR_ERR(fnew->mask);
2064 	} else if (fold && fold->mask != fnew->mask) {
2065 		ret = -EINVAL;
2066 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
2067 		/* Mask was deleted concurrently, try again */
2068 		ret = -EAGAIN;
2069 	}
2070 	rcu_read_unlock();
2071 	return ret;
2072 
2073 errout_cleanup:
2074 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
2075 			       mask_ht_params);
2076 	return ret;
2077 }
2078 
2079 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
2080 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
2081 			unsigned long base, struct nlattr **tb,
2082 			struct nlattr *est,
2083 			struct fl_flow_tmplt *tmplt,
2084 			u32 flags, u32 fl_flags,
2085 			struct netlink_ext_ack *extack)
2086 {
2087 	int err;
2088 
2089 	err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags,
2090 				   fl_flags, extack);
2091 	if (err < 0)
2092 		return err;
2093 
2094 	if (tb[TCA_FLOWER_CLASSID]) {
2095 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
2096 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2097 			rtnl_lock();
2098 		tcf_bind_filter(tp, &f->res, base);
2099 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2100 			rtnl_unlock();
2101 	}
2102 
2103 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
2104 	if (err)
2105 		return err;
2106 
2107 	fl_mask_update_range(mask);
2108 	fl_set_masked_key(&f->mkey, &f->key, mask);
2109 
2110 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
2111 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
2112 		return -EINVAL;
2113 	}
2114 
2115 	return 0;
2116 }
2117 
2118 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
2119 			       struct cls_fl_filter *fold,
2120 			       bool *in_ht)
2121 {
2122 	struct fl_flow_mask *mask = fnew->mask;
2123 	int err;
2124 
2125 	err = rhashtable_lookup_insert_fast(&mask->ht,
2126 					    &fnew->ht_node,
2127 					    mask->filter_ht_params);
2128 	if (err) {
2129 		*in_ht = false;
2130 		/* It is okay if filter with same key exists when
2131 		 * overwriting.
2132 		 */
2133 		return fold && err == -EEXIST ? 0 : err;
2134 	}
2135 
2136 	*in_ht = true;
2137 	return 0;
2138 }
2139 
2140 static int fl_change(struct net *net, struct sk_buff *in_skb,
2141 		     struct tcf_proto *tp, unsigned long base,
2142 		     u32 handle, struct nlattr **tca,
2143 		     void **arg, u32 flags,
2144 		     struct netlink_ext_ack *extack)
2145 {
2146 	struct cls_fl_head *head = fl_head_dereference(tp);
2147 	bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
2148 	struct cls_fl_filter *fold = *arg;
2149 	struct cls_fl_filter *fnew;
2150 	struct fl_flow_mask *mask;
2151 	struct nlattr **tb;
2152 	bool in_ht;
2153 	int err;
2154 
2155 	if (!tca[TCA_OPTIONS]) {
2156 		err = -EINVAL;
2157 		goto errout_fold;
2158 	}
2159 
2160 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
2161 	if (!mask) {
2162 		err = -ENOBUFS;
2163 		goto errout_fold;
2164 	}
2165 
2166 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2167 	if (!tb) {
2168 		err = -ENOBUFS;
2169 		goto errout_mask_alloc;
2170 	}
2171 
2172 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2173 					  tca[TCA_OPTIONS], fl_policy, NULL);
2174 	if (err < 0)
2175 		goto errout_tb;
2176 
2177 	if (fold && handle && fold->handle != handle) {
2178 		err = -EINVAL;
2179 		goto errout_tb;
2180 	}
2181 
2182 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2183 	if (!fnew) {
2184 		err = -ENOBUFS;
2185 		goto errout_tb;
2186 	}
2187 	INIT_LIST_HEAD(&fnew->hw_list);
2188 	refcount_set(&fnew->refcnt, 1);
2189 
2190 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
2191 	if (err < 0)
2192 		goto errout;
2193 
2194 	if (tb[TCA_FLOWER_FLAGS]) {
2195 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2196 
2197 		if (!tc_flags_valid(fnew->flags)) {
2198 			err = -EINVAL;
2199 			goto errout;
2200 		}
2201 	}
2202 
2203 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
2204 			   tp->chain->tmplt_priv, flags, fnew->flags,
2205 			   extack);
2206 	if (err)
2207 		goto errout;
2208 
2209 	err = fl_check_assign_mask(head, fnew, fold, mask);
2210 	if (err)
2211 		goto errout;
2212 
2213 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
2214 	if (err)
2215 		goto errout_mask;
2216 
2217 	if (!tc_skip_hw(fnew->flags)) {
2218 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2219 		if (err)
2220 			goto errout_ht;
2221 	}
2222 
2223 	if (!tc_in_hw(fnew->flags))
2224 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2225 
2226 	spin_lock(&tp->lock);
2227 
2228 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2229 	 * proto again or create new one, if necessary.
2230 	 */
2231 	if (tp->deleting) {
2232 		err = -EAGAIN;
2233 		goto errout_hw;
2234 	}
2235 
2236 	if (fold) {
2237 		/* Fold filter was deleted concurrently. Retry lookup. */
2238 		if (fold->deleted) {
2239 			err = -EAGAIN;
2240 			goto errout_hw;
2241 		}
2242 
2243 		fnew->handle = handle;
2244 
2245 		if (!in_ht) {
2246 			struct rhashtable_params params =
2247 				fnew->mask->filter_ht_params;
2248 
2249 			err = rhashtable_insert_fast(&fnew->mask->ht,
2250 						     &fnew->ht_node,
2251 						     params);
2252 			if (err)
2253 				goto errout_hw;
2254 			in_ht = true;
2255 		}
2256 
2257 		refcount_inc(&fnew->refcnt);
2258 		rhashtable_remove_fast(&fold->mask->ht,
2259 				       &fold->ht_node,
2260 				       fold->mask->filter_ht_params);
2261 		idr_replace(&head->handle_idr, fnew, fnew->handle);
2262 		list_replace_rcu(&fold->list, &fnew->list);
2263 		fold->deleted = true;
2264 
2265 		spin_unlock(&tp->lock);
2266 
2267 		fl_mask_put(head, fold->mask);
2268 		if (!tc_skip_hw(fold->flags))
2269 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2270 		tcf_unbind_filter(tp, &fold->res);
2271 		/* Caller holds reference to fold, so refcnt is always > 0
2272 		 * after this.
2273 		 */
2274 		refcount_dec(&fold->refcnt);
2275 		__fl_put(fold);
2276 	} else {
2277 		if (handle) {
2278 			/* user specifies a handle and it doesn't exist */
2279 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2280 					    handle, GFP_ATOMIC);
2281 
2282 			/* Filter with specified handle was concurrently
2283 			 * inserted after initial check in cls_api. This is not
2284 			 * necessarily an error if NLM_F_EXCL is not set in
2285 			 * message flags. Returning EAGAIN will cause cls_api to
2286 			 * try to update concurrently inserted rule.
2287 			 */
2288 			if (err == -ENOSPC)
2289 				err = -EAGAIN;
2290 		} else {
2291 			handle = 1;
2292 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2293 					    INT_MAX, GFP_ATOMIC);
2294 		}
2295 		if (err)
2296 			goto errout_hw;
2297 
2298 		refcount_inc(&fnew->refcnt);
2299 		fnew->handle = handle;
2300 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2301 		spin_unlock(&tp->lock);
2302 	}
2303 
2304 	*arg = fnew;
2305 
2306 	kfree(tb);
2307 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2308 	return 0;
2309 
2310 errout_ht:
2311 	spin_lock(&tp->lock);
2312 errout_hw:
2313 	fnew->deleted = true;
2314 	spin_unlock(&tp->lock);
2315 	if (!tc_skip_hw(fnew->flags))
2316 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2317 	if (in_ht)
2318 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2319 				       fnew->mask->filter_ht_params);
2320 errout_mask:
2321 	fl_mask_put(head, fnew->mask);
2322 errout:
2323 	__fl_put(fnew);
2324 errout_tb:
2325 	kfree(tb);
2326 errout_mask_alloc:
2327 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2328 errout_fold:
2329 	if (fold)
2330 		__fl_put(fold);
2331 	return err;
2332 }
2333 
2334 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2335 		     bool rtnl_held, struct netlink_ext_ack *extack)
2336 {
2337 	struct cls_fl_head *head = fl_head_dereference(tp);
2338 	struct cls_fl_filter *f = arg;
2339 	bool last_on_mask;
2340 	int err = 0;
2341 
2342 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2343 	*last = list_empty(&head->masks);
2344 	__fl_put(f);
2345 
2346 	return err;
2347 }
2348 
2349 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2350 		    bool rtnl_held)
2351 {
2352 	struct cls_fl_head *head = fl_head_dereference(tp);
2353 	unsigned long id = arg->cookie, tmp;
2354 	struct cls_fl_filter *f;
2355 
2356 	arg->count = arg->skip;
2357 
2358 	rcu_read_lock();
2359 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2360 		/* don't return filters that are being deleted */
2361 		if (!refcount_inc_not_zero(&f->refcnt))
2362 			continue;
2363 		rcu_read_unlock();
2364 
2365 		if (arg->fn(tp, f, arg) < 0) {
2366 			__fl_put(f);
2367 			arg->stop = 1;
2368 			rcu_read_lock();
2369 			break;
2370 		}
2371 		__fl_put(f);
2372 		arg->count++;
2373 		rcu_read_lock();
2374 	}
2375 	rcu_read_unlock();
2376 	arg->cookie = id;
2377 }
2378 
2379 static struct cls_fl_filter *
2380 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2381 {
2382 	struct cls_fl_head *head = fl_head_dereference(tp);
2383 
2384 	spin_lock(&tp->lock);
2385 	if (list_empty(&head->hw_filters)) {
2386 		spin_unlock(&tp->lock);
2387 		return NULL;
2388 	}
2389 
2390 	if (!f)
2391 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
2392 			       hw_list);
2393 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2394 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2395 			spin_unlock(&tp->lock);
2396 			return f;
2397 		}
2398 	}
2399 
2400 	spin_unlock(&tp->lock);
2401 	return NULL;
2402 }
2403 
2404 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2405 			void *cb_priv, struct netlink_ext_ack *extack)
2406 {
2407 	struct tcf_block *block = tp->chain->block;
2408 	struct flow_cls_offload cls_flower = {};
2409 	struct cls_fl_filter *f = NULL;
2410 	int err;
2411 
2412 	/* hw_filters list can only be changed by hw offload functions after
2413 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2414 	 * iterating it.
2415 	 */
2416 	ASSERT_RTNL();
2417 
2418 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
2419 		cls_flower.rule =
2420 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2421 		if (!cls_flower.rule) {
2422 			__fl_put(f);
2423 			return -ENOMEM;
2424 		}
2425 
2426 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2427 					   extack);
2428 		cls_flower.command = add ?
2429 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2430 		cls_flower.cookie = (unsigned long)f;
2431 		cls_flower.rule->match.dissector = &f->mask->dissector;
2432 		cls_flower.rule->match.mask = &f->mask->key;
2433 		cls_flower.rule->match.key = &f->mkey;
2434 
2435 		err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
2436 					      cls_flower.common.extack);
2437 		if (err) {
2438 			kfree(cls_flower.rule);
2439 			if (tc_skip_sw(f->flags)) {
2440 				__fl_put(f);
2441 				return err;
2442 			}
2443 			goto next_flow;
2444 		}
2445 
2446 		cls_flower.classid = f->res.classid;
2447 
2448 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2449 					    TC_SETUP_CLSFLOWER, &cls_flower,
2450 					    cb_priv, &f->flags,
2451 					    &f->in_hw_count);
2452 		tc_cleanup_offload_action(&cls_flower.rule->action);
2453 		kfree(cls_flower.rule);
2454 
2455 		if (err) {
2456 			__fl_put(f);
2457 			return err;
2458 		}
2459 next_flow:
2460 		__fl_put(f);
2461 	}
2462 
2463 	return 0;
2464 }
2465 
2466 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2467 {
2468 	struct flow_cls_offload *cls_flower = type_data;
2469 	struct cls_fl_filter *f =
2470 		(struct cls_fl_filter *) cls_flower->cookie;
2471 	struct cls_fl_head *head = fl_head_dereference(tp);
2472 
2473 	spin_lock(&tp->lock);
2474 	list_add(&f->hw_list, &head->hw_filters);
2475 	spin_unlock(&tp->lock);
2476 }
2477 
2478 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2479 {
2480 	struct flow_cls_offload *cls_flower = type_data;
2481 	struct cls_fl_filter *f =
2482 		(struct cls_fl_filter *) cls_flower->cookie;
2483 
2484 	spin_lock(&tp->lock);
2485 	if (!list_empty(&f->hw_list))
2486 		list_del_init(&f->hw_list);
2487 	spin_unlock(&tp->lock);
2488 }
2489 
2490 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2491 			      struct fl_flow_tmplt *tmplt)
2492 {
2493 	struct flow_cls_offload cls_flower = {};
2494 	struct tcf_block *block = chain->block;
2495 
2496 	cls_flower.rule = flow_rule_alloc(0);
2497 	if (!cls_flower.rule)
2498 		return -ENOMEM;
2499 
2500 	cls_flower.common.chain_index = chain->index;
2501 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2502 	cls_flower.cookie = (unsigned long) tmplt;
2503 	cls_flower.rule->match.dissector = &tmplt->dissector;
2504 	cls_flower.rule->match.mask = &tmplt->mask;
2505 	cls_flower.rule->match.key = &tmplt->dummy_key;
2506 
2507 	/* We don't care if driver (any of them) fails to handle this
2508 	 * call. It serves just as a hint for it.
2509 	 */
2510 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2511 	kfree(cls_flower.rule);
2512 
2513 	return 0;
2514 }
2515 
2516 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2517 				struct fl_flow_tmplt *tmplt)
2518 {
2519 	struct flow_cls_offload cls_flower = {};
2520 	struct tcf_block *block = chain->block;
2521 
2522 	cls_flower.common.chain_index = chain->index;
2523 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2524 	cls_flower.cookie = (unsigned long) tmplt;
2525 
2526 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2527 }
2528 
2529 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2530 			     struct nlattr **tca,
2531 			     struct netlink_ext_ack *extack)
2532 {
2533 	struct fl_flow_tmplt *tmplt;
2534 	struct nlattr **tb;
2535 	int err;
2536 
2537 	if (!tca[TCA_OPTIONS])
2538 		return ERR_PTR(-EINVAL);
2539 
2540 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2541 	if (!tb)
2542 		return ERR_PTR(-ENOBUFS);
2543 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2544 					  tca[TCA_OPTIONS], fl_policy, NULL);
2545 	if (err)
2546 		goto errout_tb;
2547 
2548 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2549 	if (!tmplt) {
2550 		err = -ENOMEM;
2551 		goto errout_tb;
2552 	}
2553 	tmplt->chain = chain;
2554 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2555 	if (err)
2556 		goto errout_tmplt;
2557 
2558 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2559 
2560 	err = fl_hw_create_tmplt(chain, tmplt);
2561 	if (err)
2562 		goto errout_tmplt;
2563 
2564 	kfree(tb);
2565 	return tmplt;
2566 
2567 errout_tmplt:
2568 	kfree(tmplt);
2569 errout_tb:
2570 	kfree(tb);
2571 	return ERR_PTR(err);
2572 }
2573 
2574 static void fl_tmplt_destroy(void *tmplt_priv)
2575 {
2576 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2577 
2578 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2579 	kfree(tmplt);
2580 }
2581 
2582 static int fl_dump_key_val(struct sk_buff *skb,
2583 			   void *val, int val_type,
2584 			   void *mask, int mask_type, int len)
2585 {
2586 	int err;
2587 
2588 	if (!memchr_inv(mask, 0, len))
2589 		return 0;
2590 	err = nla_put(skb, val_type, len, val);
2591 	if (err)
2592 		return err;
2593 	if (mask_type != TCA_FLOWER_UNSPEC) {
2594 		err = nla_put(skb, mask_type, len, mask);
2595 		if (err)
2596 			return err;
2597 	}
2598 	return 0;
2599 }
2600 
2601 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2602 				  struct fl_flow_key *mask)
2603 {
2604 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2605 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2606 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2607 			    sizeof(key->tp_range.tp_min.dst)) ||
2608 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2609 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2610 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2611 			    sizeof(key->tp_range.tp_max.dst)) ||
2612 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2613 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2614 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2615 			    sizeof(key->tp_range.tp_min.src)) ||
2616 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2617 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2618 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2619 			    sizeof(key->tp_range.tp_max.src)))
2620 		return -1;
2621 
2622 	return 0;
2623 }
2624 
2625 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2626 				    struct flow_dissector_key_mpls *mpls_key,
2627 				    struct flow_dissector_key_mpls *mpls_mask,
2628 				    u8 lse_index)
2629 {
2630 	struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2631 	struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2632 	int err;
2633 
2634 	err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2635 			 lse_index + 1);
2636 	if (err)
2637 		return err;
2638 
2639 	if (lse_mask->mpls_ttl) {
2640 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2641 				 lse_key->mpls_ttl);
2642 		if (err)
2643 			return err;
2644 	}
2645 	if (lse_mask->mpls_bos) {
2646 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2647 				 lse_key->mpls_bos);
2648 		if (err)
2649 			return err;
2650 	}
2651 	if (lse_mask->mpls_tc) {
2652 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2653 				 lse_key->mpls_tc);
2654 		if (err)
2655 			return err;
2656 	}
2657 	if (lse_mask->mpls_label) {
2658 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2659 				  lse_key->mpls_label);
2660 		if (err)
2661 			return err;
2662 	}
2663 
2664 	return 0;
2665 }
2666 
2667 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2668 				 struct flow_dissector_key_mpls *mpls_key,
2669 				 struct flow_dissector_key_mpls *mpls_mask)
2670 {
2671 	struct nlattr *opts;
2672 	struct nlattr *lse;
2673 	u8 lse_index;
2674 	int err;
2675 
2676 	opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2677 	if (!opts)
2678 		return -EMSGSIZE;
2679 
2680 	for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2681 		if (!(mpls_mask->used_lses & 1 << lse_index))
2682 			continue;
2683 
2684 		lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2685 		if (!lse) {
2686 			err = -EMSGSIZE;
2687 			goto err_opts;
2688 		}
2689 
2690 		err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2691 					       lse_index);
2692 		if (err)
2693 			goto err_opts_lse;
2694 		nla_nest_end(skb, lse);
2695 	}
2696 	nla_nest_end(skb, opts);
2697 
2698 	return 0;
2699 
2700 err_opts_lse:
2701 	nla_nest_cancel(skb, lse);
2702 err_opts:
2703 	nla_nest_cancel(skb, opts);
2704 
2705 	return err;
2706 }
2707 
2708 static int fl_dump_key_mpls(struct sk_buff *skb,
2709 			    struct flow_dissector_key_mpls *mpls_key,
2710 			    struct flow_dissector_key_mpls *mpls_mask)
2711 {
2712 	struct flow_dissector_mpls_lse *lse_mask;
2713 	struct flow_dissector_mpls_lse *lse_key;
2714 	int err;
2715 
2716 	if (!mpls_mask->used_lses)
2717 		return 0;
2718 
2719 	lse_mask = &mpls_mask->ls[0];
2720 	lse_key = &mpls_key->ls[0];
2721 
2722 	/* For backward compatibility, don't use the MPLS nested attributes if
2723 	 * the rule can be expressed using the old attributes.
2724 	 */
2725 	if (mpls_mask->used_lses & ~1 ||
2726 	    (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2727 	     !lse_mask->mpls_tc && !lse_mask->mpls_label))
2728 		return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2729 
2730 	if (lse_mask->mpls_ttl) {
2731 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2732 				 lse_key->mpls_ttl);
2733 		if (err)
2734 			return err;
2735 	}
2736 	if (lse_mask->mpls_tc) {
2737 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2738 				 lse_key->mpls_tc);
2739 		if (err)
2740 			return err;
2741 	}
2742 	if (lse_mask->mpls_label) {
2743 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2744 				  lse_key->mpls_label);
2745 		if (err)
2746 			return err;
2747 	}
2748 	if (lse_mask->mpls_bos) {
2749 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2750 				 lse_key->mpls_bos);
2751 		if (err)
2752 			return err;
2753 	}
2754 	return 0;
2755 }
2756 
2757 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2758 			  struct flow_dissector_key_ip *key,
2759 			  struct flow_dissector_key_ip *mask)
2760 {
2761 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2762 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2763 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2764 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2765 
2766 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2767 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2768 		return -1;
2769 
2770 	return 0;
2771 }
2772 
2773 static int fl_dump_key_vlan(struct sk_buff *skb,
2774 			    int vlan_id_key, int vlan_prio_key,
2775 			    struct flow_dissector_key_vlan *vlan_key,
2776 			    struct flow_dissector_key_vlan *vlan_mask)
2777 {
2778 	int err;
2779 
2780 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2781 		return 0;
2782 	if (vlan_mask->vlan_id) {
2783 		err = nla_put_u16(skb, vlan_id_key,
2784 				  vlan_key->vlan_id);
2785 		if (err)
2786 			return err;
2787 	}
2788 	if (vlan_mask->vlan_priority) {
2789 		err = nla_put_u8(skb, vlan_prio_key,
2790 				 vlan_key->vlan_priority);
2791 		if (err)
2792 			return err;
2793 	}
2794 	return 0;
2795 }
2796 
2797 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2798 			    u32 *flower_key, u32 *flower_mask,
2799 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2800 {
2801 	if (dissector_mask & dissector_flag_bit) {
2802 		*flower_mask |= flower_flag_bit;
2803 		if (dissector_key & dissector_flag_bit)
2804 			*flower_key |= flower_flag_bit;
2805 	}
2806 }
2807 
2808 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2809 {
2810 	u32 key, mask;
2811 	__be32 _key, _mask;
2812 	int err;
2813 
2814 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2815 		return 0;
2816 
2817 	key = 0;
2818 	mask = 0;
2819 
2820 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2821 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2822 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2823 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2824 			FLOW_DIS_FIRST_FRAG);
2825 
2826 	_key = cpu_to_be32(key);
2827 	_mask = cpu_to_be32(mask);
2828 
2829 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2830 	if (err)
2831 		return err;
2832 
2833 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2834 }
2835 
2836 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2837 				  struct flow_dissector_key_enc_opts *enc_opts)
2838 {
2839 	struct geneve_opt *opt;
2840 	struct nlattr *nest;
2841 	int opt_off = 0;
2842 
2843 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2844 	if (!nest)
2845 		goto nla_put_failure;
2846 
2847 	while (enc_opts->len > opt_off) {
2848 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2849 
2850 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2851 				 opt->opt_class))
2852 			goto nla_put_failure;
2853 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2854 			       opt->type))
2855 			goto nla_put_failure;
2856 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2857 			    opt->length * 4, opt->opt_data))
2858 			goto nla_put_failure;
2859 
2860 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2861 	}
2862 	nla_nest_end(skb, nest);
2863 	return 0;
2864 
2865 nla_put_failure:
2866 	nla_nest_cancel(skb, nest);
2867 	return -EMSGSIZE;
2868 }
2869 
2870 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2871 				 struct flow_dissector_key_enc_opts *enc_opts)
2872 {
2873 	struct vxlan_metadata *md;
2874 	struct nlattr *nest;
2875 
2876 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2877 	if (!nest)
2878 		goto nla_put_failure;
2879 
2880 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2881 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2882 		goto nla_put_failure;
2883 
2884 	nla_nest_end(skb, nest);
2885 	return 0;
2886 
2887 nla_put_failure:
2888 	nla_nest_cancel(skb, nest);
2889 	return -EMSGSIZE;
2890 }
2891 
2892 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2893 				  struct flow_dissector_key_enc_opts *enc_opts)
2894 {
2895 	struct erspan_metadata *md;
2896 	struct nlattr *nest;
2897 
2898 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2899 	if (!nest)
2900 		goto nla_put_failure;
2901 
2902 	md = (struct erspan_metadata *)&enc_opts->data[0];
2903 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2904 		goto nla_put_failure;
2905 
2906 	if (md->version == 1 &&
2907 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2908 		goto nla_put_failure;
2909 
2910 	if (md->version == 2 &&
2911 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2912 			md->u.md2.dir) ||
2913 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2914 			get_hwid(&md->u.md2))))
2915 		goto nla_put_failure;
2916 
2917 	nla_nest_end(skb, nest);
2918 	return 0;
2919 
2920 nla_put_failure:
2921 	nla_nest_cancel(skb, nest);
2922 	return -EMSGSIZE;
2923 }
2924 
2925 static int fl_dump_key_gtp_opt(struct sk_buff *skb,
2926 			       struct flow_dissector_key_enc_opts *enc_opts)
2927 
2928 {
2929 	struct gtp_pdu_session_info *session_info;
2930 	struct nlattr *nest;
2931 
2932 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
2933 	if (!nest)
2934 		goto nla_put_failure;
2935 
2936 	session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
2937 
2938 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
2939 		       session_info->pdu_type))
2940 		goto nla_put_failure;
2941 
2942 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
2943 		goto nla_put_failure;
2944 
2945 	nla_nest_end(skb, nest);
2946 	return 0;
2947 
2948 nla_put_failure:
2949 	nla_nest_cancel(skb, nest);
2950 	return -EMSGSIZE;
2951 }
2952 
2953 static int fl_dump_key_ct(struct sk_buff *skb,
2954 			  struct flow_dissector_key_ct *key,
2955 			  struct flow_dissector_key_ct *mask)
2956 {
2957 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2958 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2959 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2960 			    sizeof(key->ct_state)))
2961 		goto nla_put_failure;
2962 
2963 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2964 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2965 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2966 			    sizeof(key->ct_zone)))
2967 		goto nla_put_failure;
2968 
2969 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2970 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2971 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2972 			    sizeof(key->ct_mark)))
2973 		goto nla_put_failure;
2974 
2975 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2976 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2977 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2978 			    sizeof(key->ct_labels)))
2979 		goto nla_put_failure;
2980 
2981 	return 0;
2982 
2983 nla_put_failure:
2984 	return -EMSGSIZE;
2985 }
2986 
2987 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2988 			       struct flow_dissector_key_enc_opts *enc_opts)
2989 {
2990 	struct nlattr *nest;
2991 	int err;
2992 
2993 	if (!enc_opts->len)
2994 		return 0;
2995 
2996 	nest = nla_nest_start_noflag(skb, enc_opt_type);
2997 	if (!nest)
2998 		goto nla_put_failure;
2999 
3000 	switch (enc_opts->dst_opt_type) {
3001 	case TUNNEL_GENEVE_OPT:
3002 		err = fl_dump_key_geneve_opt(skb, enc_opts);
3003 		if (err)
3004 			goto nla_put_failure;
3005 		break;
3006 	case TUNNEL_VXLAN_OPT:
3007 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
3008 		if (err)
3009 			goto nla_put_failure;
3010 		break;
3011 	case TUNNEL_ERSPAN_OPT:
3012 		err = fl_dump_key_erspan_opt(skb, enc_opts);
3013 		if (err)
3014 			goto nla_put_failure;
3015 		break;
3016 	case TUNNEL_GTP_OPT:
3017 		err = fl_dump_key_gtp_opt(skb, enc_opts);
3018 		if (err)
3019 			goto nla_put_failure;
3020 		break;
3021 	default:
3022 		goto nla_put_failure;
3023 	}
3024 	nla_nest_end(skb, nest);
3025 	return 0;
3026 
3027 nla_put_failure:
3028 	nla_nest_cancel(skb, nest);
3029 	return -EMSGSIZE;
3030 }
3031 
3032 static int fl_dump_key_enc_opt(struct sk_buff *skb,
3033 			       struct flow_dissector_key_enc_opts *key_opts,
3034 			       struct flow_dissector_key_enc_opts *msk_opts)
3035 {
3036 	int err;
3037 
3038 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
3039 	if (err)
3040 		return err;
3041 
3042 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
3043 }
3044 
3045 static int fl_dump_key(struct sk_buff *skb, struct net *net,
3046 		       struct fl_flow_key *key, struct fl_flow_key *mask)
3047 {
3048 	if (mask->meta.ingress_ifindex) {
3049 		struct net_device *dev;
3050 
3051 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
3052 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
3053 			goto nla_put_failure;
3054 	}
3055 
3056 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
3057 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
3058 			    sizeof(key->eth.dst)) ||
3059 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
3060 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
3061 			    sizeof(key->eth.src)) ||
3062 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
3063 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
3064 			    sizeof(key->basic.n_proto)))
3065 		goto nla_put_failure;
3066 
3067 	if (mask->num_of_vlans.num_of_vlans) {
3068 		if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans))
3069 			goto nla_put_failure;
3070 	}
3071 
3072 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
3073 		goto nla_put_failure;
3074 
3075 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
3076 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
3077 		goto nla_put_failure;
3078 
3079 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
3080 			     TCA_FLOWER_KEY_CVLAN_PRIO,
3081 			     &key->cvlan, &mask->cvlan) ||
3082 	    (mask->cvlan.vlan_tpid &&
3083 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3084 			  key->cvlan.vlan_tpid)))
3085 		goto nla_put_failure;
3086 
3087 	if (mask->basic.n_proto) {
3088 		if (mask->cvlan.vlan_eth_type) {
3089 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
3090 					 key->basic.n_proto))
3091 				goto nla_put_failure;
3092 		} else if (mask->vlan.vlan_eth_type) {
3093 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3094 					 key->vlan.vlan_eth_type))
3095 				goto nla_put_failure;
3096 		}
3097 	}
3098 
3099 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
3100 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
3101 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
3102 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
3103 			    sizeof(key->basic.ip_proto)) ||
3104 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
3105 		goto nla_put_failure;
3106 
3107 	if (mask->pppoe.session_id) {
3108 		if (nla_put_be16(skb, TCA_FLOWER_KEY_PPPOE_SID,
3109 				 key->pppoe.session_id))
3110 			goto nla_put_failure;
3111 	}
3112 	if (mask->basic.n_proto && mask->pppoe.ppp_proto) {
3113 		if (nla_put_be16(skb, TCA_FLOWER_KEY_PPP_PROTO,
3114 				 key->pppoe.ppp_proto))
3115 			goto nla_put_failure;
3116 	}
3117 
3118 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3119 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
3120 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
3121 			     sizeof(key->ipv4.src)) ||
3122 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
3123 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
3124 			     sizeof(key->ipv4.dst))))
3125 		goto nla_put_failure;
3126 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3127 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
3128 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
3129 				  sizeof(key->ipv6.src)) ||
3130 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
3131 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
3132 				  sizeof(key->ipv6.dst))))
3133 		goto nla_put_failure;
3134 
3135 	if (key->basic.ip_proto == IPPROTO_TCP &&
3136 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
3137 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
3138 			     sizeof(key->tp.src)) ||
3139 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
3140 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
3141 			     sizeof(key->tp.dst)) ||
3142 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
3143 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
3144 			     sizeof(key->tcp.flags))))
3145 		goto nla_put_failure;
3146 	else if (key->basic.ip_proto == IPPROTO_UDP &&
3147 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
3148 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
3149 				  sizeof(key->tp.src)) ||
3150 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
3151 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
3152 				  sizeof(key->tp.dst))))
3153 		goto nla_put_failure;
3154 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
3155 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
3156 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
3157 				  sizeof(key->tp.src)) ||
3158 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
3159 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
3160 				  sizeof(key->tp.dst))))
3161 		goto nla_put_failure;
3162 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
3163 		 key->basic.ip_proto == IPPROTO_ICMP &&
3164 		 (fl_dump_key_val(skb, &key->icmp.type,
3165 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
3166 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
3167 				  sizeof(key->icmp.type)) ||
3168 		  fl_dump_key_val(skb, &key->icmp.code,
3169 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
3170 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
3171 				  sizeof(key->icmp.code))))
3172 		goto nla_put_failure;
3173 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
3174 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
3175 		 (fl_dump_key_val(skb, &key->icmp.type,
3176 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
3177 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
3178 				  sizeof(key->icmp.type)) ||
3179 		  fl_dump_key_val(skb, &key->icmp.code,
3180 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
3181 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
3182 				  sizeof(key->icmp.code))))
3183 		goto nla_put_failure;
3184 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
3185 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
3186 		 (fl_dump_key_val(skb, &key->arp.sip,
3187 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
3188 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
3189 				  sizeof(key->arp.sip)) ||
3190 		  fl_dump_key_val(skb, &key->arp.tip,
3191 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
3192 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
3193 				  sizeof(key->arp.tip)) ||
3194 		  fl_dump_key_val(skb, &key->arp.op,
3195 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
3196 				  TCA_FLOWER_KEY_ARP_OP_MASK,
3197 				  sizeof(key->arp.op)) ||
3198 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
3199 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
3200 				  sizeof(key->arp.sha)) ||
3201 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
3202 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
3203 				  sizeof(key->arp.tha))))
3204 		goto nla_put_failure;
3205 	else if (key->basic.ip_proto == IPPROTO_L2TP &&
3206 		 fl_dump_key_val(skb, &key->l2tpv3.session_id,
3207 				 TCA_FLOWER_KEY_L2TPV3_SID,
3208 				 &mask->l2tpv3.session_id,
3209 				 TCA_FLOWER_UNSPEC,
3210 				 sizeof(key->l2tpv3.session_id)))
3211 		goto nla_put_failure;
3212 
3213 	if ((key->basic.ip_proto == IPPROTO_TCP ||
3214 	     key->basic.ip_proto == IPPROTO_UDP ||
3215 	     key->basic.ip_proto == IPPROTO_SCTP) &&
3216 	     fl_dump_key_port_range(skb, key, mask))
3217 		goto nla_put_failure;
3218 
3219 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3220 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
3221 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3222 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3223 			    sizeof(key->enc_ipv4.src)) ||
3224 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
3225 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3226 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3227 			     sizeof(key->enc_ipv4.dst))))
3228 		goto nla_put_failure;
3229 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3230 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3231 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3232 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3233 			    sizeof(key->enc_ipv6.src)) ||
3234 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3235 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
3236 				 &mask->enc_ipv6.dst,
3237 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3238 			    sizeof(key->enc_ipv6.dst))))
3239 		goto nla_put_failure;
3240 
3241 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3242 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3243 			    sizeof(key->enc_key_id)) ||
3244 	    fl_dump_key_val(skb, &key->enc_tp.src,
3245 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3246 			    &mask->enc_tp.src,
3247 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3248 			    sizeof(key->enc_tp.src)) ||
3249 	    fl_dump_key_val(skb, &key->enc_tp.dst,
3250 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3251 			    &mask->enc_tp.dst,
3252 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3253 			    sizeof(key->enc_tp.dst)) ||
3254 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3255 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3256 		goto nla_put_failure;
3257 
3258 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3259 		goto nla_put_failure;
3260 
3261 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3262 		goto nla_put_failure;
3263 
3264 	if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3265 			     &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3266 			     sizeof(key->hash.hash)))
3267 		goto nla_put_failure;
3268 
3269 	return 0;
3270 
3271 nla_put_failure:
3272 	return -EMSGSIZE;
3273 }
3274 
3275 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3276 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3277 {
3278 	struct cls_fl_filter *f = fh;
3279 	struct nlattr *nest;
3280 	struct fl_flow_key *key, *mask;
3281 	bool skip_hw;
3282 
3283 	if (!f)
3284 		return skb->len;
3285 
3286 	t->tcm_handle = f->handle;
3287 
3288 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3289 	if (!nest)
3290 		goto nla_put_failure;
3291 
3292 	spin_lock(&tp->lock);
3293 
3294 	if (f->res.classid &&
3295 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3296 		goto nla_put_failure_locked;
3297 
3298 	key = &f->key;
3299 	mask = &f->mask->key;
3300 	skip_hw = tc_skip_hw(f->flags);
3301 
3302 	if (fl_dump_key(skb, net, key, mask))
3303 		goto nla_put_failure_locked;
3304 
3305 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3306 		goto nla_put_failure_locked;
3307 
3308 	spin_unlock(&tp->lock);
3309 
3310 	if (!skip_hw)
3311 		fl_hw_update_stats(tp, f, rtnl_held);
3312 
3313 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3314 		goto nla_put_failure;
3315 
3316 	if (tcf_exts_dump(skb, &f->exts))
3317 		goto nla_put_failure;
3318 
3319 	nla_nest_end(skb, nest);
3320 
3321 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3322 		goto nla_put_failure;
3323 
3324 	return skb->len;
3325 
3326 nla_put_failure_locked:
3327 	spin_unlock(&tp->lock);
3328 nla_put_failure:
3329 	nla_nest_cancel(skb, nest);
3330 	return -1;
3331 }
3332 
3333 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3334 			 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3335 {
3336 	struct cls_fl_filter *f = fh;
3337 	struct nlattr *nest;
3338 	bool skip_hw;
3339 
3340 	if (!f)
3341 		return skb->len;
3342 
3343 	t->tcm_handle = f->handle;
3344 
3345 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3346 	if (!nest)
3347 		goto nla_put_failure;
3348 
3349 	spin_lock(&tp->lock);
3350 
3351 	skip_hw = tc_skip_hw(f->flags);
3352 
3353 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3354 		goto nla_put_failure_locked;
3355 
3356 	spin_unlock(&tp->lock);
3357 
3358 	if (!skip_hw)
3359 		fl_hw_update_stats(tp, f, rtnl_held);
3360 
3361 	if (tcf_exts_terse_dump(skb, &f->exts))
3362 		goto nla_put_failure;
3363 
3364 	nla_nest_end(skb, nest);
3365 
3366 	return skb->len;
3367 
3368 nla_put_failure_locked:
3369 	spin_unlock(&tp->lock);
3370 nla_put_failure:
3371 	nla_nest_cancel(skb, nest);
3372 	return -1;
3373 }
3374 
3375 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3376 {
3377 	struct fl_flow_tmplt *tmplt = tmplt_priv;
3378 	struct fl_flow_key *key, *mask;
3379 	struct nlattr *nest;
3380 
3381 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3382 	if (!nest)
3383 		goto nla_put_failure;
3384 
3385 	key = &tmplt->dummy_key;
3386 	mask = &tmplt->mask;
3387 
3388 	if (fl_dump_key(skb, net, key, mask))
3389 		goto nla_put_failure;
3390 
3391 	nla_nest_end(skb, nest);
3392 
3393 	return skb->len;
3394 
3395 nla_put_failure:
3396 	nla_nest_cancel(skb, nest);
3397 	return -EMSGSIZE;
3398 }
3399 
3400 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3401 			  unsigned long base)
3402 {
3403 	struct cls_fl_filter *f = fh;
3404 
3405 	tc_cls_bind_class(classid, cl, q, &f->res, base);
3406 }
3407 
3408 static bool fl_delete_empty(struct tcf_proto *tp)
3409 {
3410 	struct cls_fl_head *head = fl_head_dereference(tp);
3411 
3412 	spin_lock(&tp->lock);
3413 	tp->deleting = idr_is_empty(&head->handle_idr);
3414 	spin_unlock(&tp->lock);
3415 
3416 	return tp->deleting;
3417 }
3418 
3419 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3420 	.kind		= "flower",
3421 	.classify	= fl_classify,
3422 	.init		= fl_init,
3423 	.destroy	= fl_destroy,
3424 	.get		= fl_get,
3425 	.put		= fl_put,
3426 	.change		= fl_change,
3427 	.delete		= fl_delete,
3428 	.delete_empty	= fl_delete_empty,
3429 	.walk		= fl_walk,
3430 	.reoffload	= fl_reoffload,
3431 	.hw_add		= fl_hw_add,
3432 	.hw_del		= fl_hw_del,
3433 	.dump		= fl_dump,
3434 	.terse_dump	= fl_terse_dump,
3435 	.bind_class	= fl_bind_class,
3436 	.tmplt_create	= fl_tmplt_create,
3437 	.tmplt_destroy	= fl_tmplt_destroy,
3438 	.tmplt_dump	= fl_tmplt_dump,
3439 	.owner		= THIS_MODULE,
3440 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
3441 };
3442 
3443 static int __init cls_fl_init(void)
3444 {
3445 	return register_tcf_proto_ops(&cls_fl_ops);
3446 }
3447 
3448 static void __exit cls_fl_exit(void)
3449 {
3450 	unregister_tcf_proto_ops(&cls_fl_ops);
3451 }
3452 
3453 module_init(cls_fl_init);
3454 module_exit(cls_fl_exit);
3455 
3456 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3457 MODULE_DESCRIPTION("Flower classifier");
3458 MODULE_LICENSE("GPL v2");
3459