xref: /linux/net/sched/cls_flower.c (revision ff40b5769a50fab654a70575ff0f49853b799b0e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 #include <linux/ppp_defs.h>
20 
21 #include <net/sch_generic.h>
22 #include <net/pkt_cls.h>
23 #include <net/pkt_sched.h>
24 #include <net/ip.h>
25 #include <net/flow_dissector.h>
26 #include <net/geneve.h>
27 #include <net/vxlan.h>
28 #include <net/erspan.h>
29 #include <net/gtp.h>
30 #include <net/tc_wrapper.h>
31 
32 #include <net/dst.h>
33 #include <net/dst_metadata.h>
34 
35 #include <uapi/linux/netfilter/nf_conntrack_common.h>
36 
37 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
38 		((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
39 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
40 		(TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
41 
42 struct fl_flow_key {
43 	struct flow_dissector_key_meta meta;
44 	struct flow_dissector_key_control control;
45 	struct flow_dissector_key_control enc_control;
46 	struct flow_dissector_key_basic basic;
47 	struct flow_dissector_key_eth_addrs eth;
48 	struct flow_dissector_key_vlan vlan;
49 	struct flow_dissector_key_vlan cvlan;
50 	union {
51 		struct flow_dissector_key_ipv4_addrs ipv4;
52 		struct flow_dissector_key_ipv6_addrs ipv6;
53 	};
54 	struct flow_dissector_key_ports tp;
55 	struct flow_dissector_key_icmp icmp;
56 	struct flow_dissector_key_arp arp;
57 	struct flow_dissector_key_keyid enc_key_id;
58 	union {
59 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
60 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
61 	};
62 	struct flow_dissector_key_ports enc_tp;
63 	struct flow_dissector_key_mpls mpls;
64 	struct flow_dissector_key_tcp tcp;
65 	struct flow_dissector_key_ip ip;
66 	struct flow_dissector_key_ip enc_ip;
67 	struct flow_dissector_key_enc_opts enc_opts;
68 	struct flow_dissector_key_ports_range tp_range;
69 	struct flow_dissector_key_ct ct;
70 	struct flow_dissector_key_hash hash;
71 	struct flow_dissector_key_num_of_vlans num_of_vlans;
72 	struct flow_dissector_key_pppoe pppoe;
73 	struct flow_dissector_key_l2tpv3 l2tpv3;
74 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
75 
76 struct fl_flow_mask_range {
77 	unsigned short int start;
78 	unsigned short int end;
79 };
80 
81 struct fl_flow_mask {
82 	struct fl_flow_key key;
83 	struct fl_flow_mask_range range;
84 	u32 flags;
85 	struct rhash_head ht_node;
86 	struct rhashtable ht;
87 	struct rhashtable_params filter_ht_params;
88 	struct flow_dissector dissector;
89 	struct list_head filters;
90 	struct rcu_work rwork;
91 	struct list_head list;
92 	refcount_t refcnt;
93 };
94 
95 struct fl_flow_tmplt {
96 	struct fl_flow_key dummy_key;
97 	struct fl_flow_key mask;
98 	struct flow_dissector dissector;
99 	struct tcf_chain *chain;
100 };
101 
102 struct cls_fl_head {
103 	struct rhashtable ht;
104 	spinlock_t masks_lock; /* Protect masks list */
105 	struct list_head masks;
106 	struct list_head hw_filters;
107 	struct rcu_work rwork;
108 	struct idr handle_idr;
109 };
110 
111 struct cls_fl_filter {
112 	struct fl_flow_mask *mask;
113 	struct rhash_head ht_node;
114 	struct fl_flow_key mkey;
115 	struct tcf_exts exts;
116 	struct tcf_result res;
117 	struct fl_flow_key key;
118 	struct list_head list;
119 	struct list_head hw_list;
120 	u32 handle;
121 	u32 flags;
122 	u32 in_hw_count;
123 	struct rcu_work rwork;
124 	struct net_device *hw_dev;
125 	/* Flower classifier is unlocked, which means that its reference counter
126 	 * can be changed concurrently without any kind of external
127 	 * synchronization. Use atomic reference counter to be concurrency-safe.
128 	 */
129 	refcount_t refcnt;
130 	bool deleted;
131 };
132 
133 static const struct rhashtable_params mask_ht_params = {
134 	.key_offset = offsetof(struct fl_flow_mask, key),
135 	.key_len = sizeof(struct fl_flow_key),
136 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
137 	.automatic_shrinking = true,
138 };
139 
140 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
141 {
142 	return mask->range.end - mask->range.start;
143 }
144 
145 static void fl_mask_update_range(struct fl_flow_mask *mask)
146 {
147 	const u8 *bytes = (const u8 *) &mask->key;
148 	size_t size = sizeof(mask->key);
149 	size_t i, first = 0, last;
150 
151 	for (i = 0; i < size; i++) {
152 		if (bytes[i]) {
153 			first = i;
154 			break;
155 		}
156 	}
157 	last = first;
158 	for (i = size - 1; i != first; i--) {
159 		if (bytes[i]) {
160 			last = i;
161 			break;
162 		}
163 	}
164 	mask->range.start = rounddown(first, sizeof(long));
165 	mask->range.end = roundup(last + 1, sizeof(long));
166 }
167 
168 static void *fl_key_get_start(struct fl_flow_key *key,
169 			      const struct fl_flow_mask *mask)
170 {
171 	return (u8 *) key + mask->range.start;
172 }
173 
174 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
175 			      struct fl_flow_mask *mask)
176 {
177 	const long *lkey = fl_key_get_start(key, mask);
178 	const long *lmask = fl_key_get_start(&mask->key, mask);
179 	long *lmkey = fl_key_get_start(mkey, mask);
180 	int i;
181 
182 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
183 		*lmkey++ = *lkey++ & *lmask++;
184 }
185 
186 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
187 			       struct fl_flow_mask *mask)
188 {
189 	const long *lmask = fl_key_get_start(&mask->key, mask);
190 	const long *ltmplt;
191 	int i;
192 
193 	if (!tmplt)
194 		return true;
195 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
196 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
197 		if (~*ltmplt++ & *lmask++)
198 			return false;
199 	}
200 	return true;
201 }
202 
203 static void fl_clear_masked_range(struct fl_flow_key *key,
204 				  struct fl_flow_mask *mask)
205 {
206 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
207 }
208 
209 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
210 				  struct fl_flow_key *key,
211 				  struct fl_flow_key *mkey)
212 {
213 	u16 min_mask, max_mask, min_val, max_val;
214 
215 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
216 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
217 	min_val = ntohs(filter->key.tp_range.tp_min.dst);
218 	max_val = ntohs(filter->key.tp_range.tp_max.dst);
219 
220 	if (min_mask && max_mask) {
221 		if (ntohs(key->tp_range.tp.dst) < min_val ||
222 		    ntohs(key->tp_range.tp.dst) > max_val)
223 			return false;
224 
225 		/* skb does not have min and max values */
226 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
227 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
228 	}
229 	return true;
230 }
231 
232 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
233 				  struct fl_flow_key *key,
234 				  struct fl_flow_key *mkey)
235 {
236 	u16 min_mask, max_mask, min_val, max_val;
237 
238 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
239 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
240 	min_val = ntohs(filter->key.tp_range.tp_min.src);
241 	max_val = ntohs(filter->key.tp_range.tp_max.src);
242 
243 	if (min_mask && max_mask) {
244 		if (ntohs(key->tp_range.tp.src) < min_val ||
245 		    ntohs(key->tp_range.tp.src) > max_val)
246 			return false;
247 
248 		/* skb does not have min and max values */
249 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
250 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
251 	}
252 	return true;
253 }
254 
255 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
256 					 struct fl_flow_key *mkey)
257 {
258 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
259 				      mask->filter_ht_params);
260 }
261 
262 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
263 					     struct fl_flow_key *mkey,
264 					     struct fl_flow_key *key)
265 {
266 	struct cls_fl_filter *filter, *f;
267 
268 	list_for_each_entry_rcu(filter, &mask->filters, list) {
269 		if (!fl_range_port_dst_cmp(filter, key, mkey))
270 			continue;
271 
272 		if (!fl_range_port_src_cmp(filter, key, mkey))
273 			continue;
274 
275 		f = __fl_lookup(mask, mkey);
276 		if (f)
277 			return f;
278 	}
279 	return NULL;
280 }
281 
282 static noinline_for_stack
283 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
284 {
285 	struct fl_flow_key mkey;
286 
287 	fl_set_masked_key(&mkey, key, mask);
288 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
289 		return fl_lookup_range(mask, &mkey, key);
290 
291 	return __fl_lookup(mask, &mkey);
292 }
293 
294 static u16 fl_ct_info_to_flower_map[] = {
295 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
296 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
297 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
298 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
299 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
300 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
301 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
302 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
303 					TCA_FLOWER_KEY_CT_FLAGS_RELATED |
304 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
305 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
306 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
307 };
308 
309 TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb,
310 				  const struct tcf_proto *tp,
311 				  struct tcf_result *res)
312 {
313 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
314 	bool post_ct = tc_skb_cb(skb)->post_ct;
315 	u16 zone = tc_skb_cb(skb)->zone;
316 	struct fl_flow_key skb_key;
317 	struct fl_flow_mask *mask;
318 	struct cls_fl_filter *f;
319 
320 	list_for_each_entry_rcu(mask, &head->masks, list) {
321 		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
322 		fl_clear_masked_range(&skb_key, mask);
323 
324 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
325 		/* skb_flow_dissect() does not set n_proto in case an unknown
326 		 * protocol, so do it rather here.
327 		 */
328 		skb_key.basic.n_proto = skb_protocol(skb, false);
329 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
330 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
331 				    fl_ct_info_to_flower_map,
332 				    ARRAY_SIZE(fl_ct_info_to_flower_map),
333 				    post_ct, zone);
334 		skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
335 		skb_flow_dissect(skb, &mask->dissector, &skb_key,
336 				 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
337 
338 		f = fl_mask_lookup(mask, &skb_key);
339 		if (f && !tc_skip_sw(f->flags)) {
340 			*res = f->res;
341 			return tcf_exts_exec(skb, &f->exts, res);
342 		}
343 	}
344 	return -1;
345 }
346 
347 static int fl_init(struct tcf_proto *tp)
348 {
349 	struct cls_fl_head *head;
350 
351 	head = kzalloc(sizeof(*head), GFP_KERNEL);
352 	if (!head)
353 		return -ENOBUFS;
354 
355 	spin_lock_init(&head->masks_lock);
356 	INIT_LIST_HEAD_RCU(&head->masks);
357 	INIT_LIST_HEAD(&head->hw_filters);
358 	rcu_assign_pointer(tp->root, head);
359 	idr_init(&head->handle_idr);
360 
361 	return rhashtable_init(&head->ht, &mask_ht_params);
362 }
363 
364 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
365 {
366 	/* temporary masks don't have their filters list and ht initialized */
367 	if (mask_init_done) {
368 		WARN_ON(!list_empty(&mask->filters));
369 		rhashtable_destroy(&mask->ht);
370 	}
371 	kfree(mask);
372 }
373 
374 static void fl_mask_free_work(struct work_struct *work)
375 {
376 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
377 						 struct fl_flow_mask, rwork);
378 
379 	fl_mask_free(mask, true);
380 }
381 
382 static void fl_uninit_mask_free_work(struct work_struct *work)
383 {
384 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
385 						 struct fl_flow_mask, rwork);
386 
387 	fl_mask_free(mask, false);
388 }
389 
390 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
391 {
392 	if (!refcount_dec_and_test(&mask->refcnt))
393 		return false;
394 
395 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
396 
397 	spin_lock(&head->masks_lock);
398 	list_del_rcu(&mask->list);
399 	spin_unlock(&head->masks_lock);
400 
401 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
402 
403 	return true;
404 }
405 
406 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
407 {
408 	/* Flower classifier only changes root pointer during init and destroy.
409 	 * Users must obtain reference to tcf_proto instance before calling its
410 	 * API, so tp->root pointer is protected from concurrent call to
411 	 * fl_destroy() by reference counting.
412 	 */
413 	return rcu_dereference_raw(tp->root);
414 }
415 
416 static void __fl_destroy_filter(struct cls_fl_filter *f)
417 {
418 	tcf_exts_destroy(&f->exts);
419 	tcf_exts_put_net(&f->exts);
420 	kfree(f);
421 }
422 
423 static void fl_destroy_filter_work(struct work_struct *work)
424 {
425 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
426 					struct cls_fl_filter, rwork);
427 
428 	__fl_destroy_filter(f);
429 }
430 
431 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
432 				 bool rtnl_held, struct netlink_ext_ack *extack)
433 {
434 	struct tcf_block *block = tp->chain->block;
435 	struct flow_cls_offload cls_flower = {};
436 
437 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
438 	cls_flower.command = FLOW_CLS_DESTROY;
439 	cls_flower.cookie = (unsigned long) f;
440 
441 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
442 			    &f->flags, &f->in_hw_count, rtnl_held);
443 
444 }
445 
446 static int fl_hw_replace_filter(struct tcf_proto *tp,
447 				struct cls_fl_filter *f, bool rtnl_held,
448 				struct netlink_ext_ack *extack)
449 {
450 	struct tcf_block *block = tp->chain->block;
451 	struct flow_cls_offload cls_flower = {};
452 	bool skip_sw = tc_skip_sw(f->flags);
453 	int err = 0;
454 
455 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
456 	if (!cls_flower.rule)
457 		return -ENOMEM;
458 
459 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
460 	cls_flower.command = FLOW_CLS_REPLACE;
461 	cls_flower.cookie = (unsigned long) f;
462 	cls_flower.rule->match.dissector = &f->mask->dissector;
463 	cls_flower.rule->match.mask = &f->mask->key;
464 	cls_flower.rule->match.key = &f->mkey;
465 	cls_flower.classid = f->res.classid;
466 
467 	err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
468 				      cls_flower.common.extack);
469 	if (err) {
470 		kfree(cls_flower.rule);
471 
472 		return skip_sw ? err : 0;
473 	}
474 
475 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
476 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
477 	tc_cleanup_offload_action(&cls_flower.rule->action);
478 	kfree(cls_flower.rule);
479 
480 	if (err) {
481 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
482 		return err;
483 	}
484 
485 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
486 		return -EINVAL;
487 
488 	return 0;
489 }
490 
491 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
492 			       bool rtnl_held)
493 {
494 	struct tcf_block *block = tp->chain->block;
495 	struct flow_cls_offload cls_flower = {};
496 
497 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
498 	cls_flower.command = FLOW_CLS_STATS;
499 	cls_flower.cookie = (unsigned long) f;
500 	cls_flower.classid = f->res.classid;
501 
502 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
503 			 rtnl_held);
504 
505 	tcf_exts_hw_stats_update(&f->exts, &cls_flower.stats, cls_flower.use_act_stats);
506 }
507 
508 static void __fl_put(struct cls_fl_filter *f)
509 {
510 	if (!refcount_dec_and_test(&f->refcnt))
511 		return;
512 
513 	if (tcf_exts_get_net(&f->exts))
514 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
515 	else
516 		__fl_destroy_filter(f);
517 }
518 
519 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
520 {
521 	struct cls_fl_filter *f;
522 
523 	rcu_read_lock();
524 	f = idr_find(&head->handle_idr, handle);
525 	if (f && !refcount_inc_not_zero(&f->refcnt))
526 		f = NULL;
527 	rcu_read_unlock();
528 
529 	return f;
530 }
531 
532 static struct tcf_exts *fl_get_exts(const struct tcf_proto *tp, u32 handle)
533 {
534 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
535 	struct cls_fl_filter *f;
536 
537 	f = idr_find(&head->handle_idr, handle);
538 	return f ? &f->exts : NULL;
539 }
540 
541 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
542 		       bool *last, bool rtnl_held,
543 		       struct netlink_ext_ack *extack)
544 {
545 	struct cls_fl_head *head = fl_head_dereference(tp);
546 
547 	*last = false;
548 
549 	spin_lock(&tp->lock);
550 	if (f->deleted) {
551 		spin_unlock(&tp->lock);
552 		return -ENOENT;
553 	}
554 
555 	f->deleted = true;
556 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
557 			       f->mask->filter_ht_params);
558 	idr_remove(&head->handle_idr, f->handle);
559 	list_del_rcu(&f->list);
560 	spin_unlock(&tp->lock);
561 
562 	*last = fl_mask_put(head, f->mask);
563 	if (!tc_skip_hw(f->flags))
564 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
565 	tcf_unbind_filter(tp, &f->res);
566 	__fl_put(f);
567 
568 	return 0;
569 }
570 
571 static void fl_destroy_sleepable(struct work_struct *work)
572 {
573 	struct cls_fl_head *head = container_of(to_rcu_work(work),
574 						struct cls_fl_head,
575 						rwork);
576 
577 	rhashtable_destroy(&head->ht);
578 	kfree(head);
579 	module_put(THIS_MODULE);
580 }
581 
582 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
583 		       struct netlink_ext_ack *extack)
584 {
585 	struct cls_fl_head *head = fl_head_dereference(tp);
586 	struct fl_flow_mask *mask, *next_mask;
587 	struct cls_fl_filter *f, *next;
588 	bool last;
589 
590 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
591 		list_for_each_entry_safe(f, next, &mask->filters, list) {
592 			__fl_delete(tp, f, &last, rtnl_held, extack);
593 			if (last)
594 				break;
595 		}
596 	}
597 	idr_destroy(&head->handle_idr);
598 
599 	__module_get(THIS_MODULE);
600 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
601 }
602 
603 static void fl_put(struct tcf_proto *tp, void *arg)
604 {
605 	struct cls_fl_filter *f = arg;
606 
607 	__fl_put(f);
608 }
609 
610 static void *fl_get(struct tcf_proto *tp, u32 handle)
611 {
612 	struct cls_fl_head *head = fl_head_dereference(tp);
613 
614 	return __fl_get(head, handle);
615 }
616 
617 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
618 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
619 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
620 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
621 					    .len = IFNAMSIZ },
622 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
623 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
624 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
625 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
626 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
627 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
628 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
629 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
630 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
631 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
632 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
633 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
634 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
635 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
636 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
637 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
638 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
639 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
640 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
641 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
642 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
643 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
644 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
645 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
646 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
647 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
648 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
649 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
650 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
651 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
652 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
653 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
654 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
655 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
656 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
657 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
658 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
659 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
660 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
661 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
662 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
663 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
664 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
665 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
666 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
667 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
668 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
669 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
670 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
671 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
672 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
673 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
674 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
675 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
676 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
677 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
678 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
679 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
680 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
681 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
682 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
683 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
684 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
685 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
686 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
687 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
688 	[TCA_FLOWER_KEY_MPLS_OPTS]	= { .type = NLA_NESTED },
689 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
690 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
691 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
692 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
693 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
694 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
695 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
696 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
697 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
698 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
699 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
700 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
701 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
702 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
703 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
704 	[TCA_FLOWER_KEY_CT_STATE]	=
705 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
706 	[TCA_FLOWER_KEY_CT_STATE_MASK]	=
707 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
708 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
709 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
710 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
711 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
712 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
713 					    .len = 128 / BITS_PER_BYTE },
714 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
715 					    .len = 128 / BITS_PER_BYTE },
716 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
717 	[TCA_FLOWER_KEY_HASH]		= { .type = NLA_U32 },
718 	[TCA_FLOWER_KEY_HASH_MASK]	= { .type = NLA_U32 },
719 	[TCA_FLOWER_KEY_NUM_OF_VLANS]	= { .type = NLA_U8 },
720 	[TCA_FLOWER_KEY_PPPOE_SID]	= { .type = NLA_U16 },
721 	[TCA_FLOWER_KEY_PPP_PROTO]	= { .type = NLA_U16 },
722 	[TCA_FLOWER_KEY_L2TPV3_SID]	= { .type = NLA_U32 },
723 
724 };
725 
726 static const struct nla_policy
727 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
728 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
729 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
730 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
731 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
732 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
733 	[TCA_FLOWER_KEY_ENC_OPTS_GTP]		= { .type = NLA_NESTED },
734 };
735 
736 static const struct nla_policy
737 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
738 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
739 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
740 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
741 						       .len = 128 },
742 };
743 
744 static const struct nla_policy
745 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
746 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
747 };
748 
749 static const struct nla_policy
750 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
751 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
752 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
753 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
754 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
755 };
756 
757 static const struct nla_policy
758 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
759 	[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]	   = { .type = NLA_U8 },
760 	[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]	   = { .type = NLA_U8 },
761 };
762 
763 static const struct nla_policy
764 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
765 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]    = { .type = NLA_U8 },
766 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]      = { .type = NLA_U8 },
767 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]      = { .type = NLA_U8 },
768 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]       = { .type = NLA_U8 },
769 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]    = { .type = NLA_U32 },
770 };
771 
772 static void fl_set_key_val(struct nlattr **tb,
773 			   void *val, int val_type,
774 			   void *mask, int mask_type, int len)
775 {
776 	if (!tb[val_type])
777 		return;
778 	nla_memcpy(val, tb[val_type], len);
779 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
780 		memset(mask, 0xff, len);
781 	else
782 		nla_memcpy(mask, tb[mask_type], len);
783 }
784 
785 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
786 				 struct fl_flow_key *mask,
787 				 struct netlink_ext_ack *extack)
788 {
789 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
790 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
791 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
792 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
793 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
794 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
795 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
796 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
797 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
798 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
799 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
800 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
801 
802 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
803 	    ntohs(key->tp_range.tp_max.dst) <=
804 	    ntohs(key->tp_range.tp_min.dst)) {
805 		NL_SET_ERR_MSG_ATTR(extack,
806 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
807 				    "Invalid destination port range (min must be strictly smaller than max)");
808 		return -EINVAL;
809 	}
810 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
811 	    ntohs(key->tp_range.tp_max.src) <=
812 	    ntohs(key->tp_range.tp_min.src)) {
813 		NL_SET_ERR_MSG_ATTR(extack,
814 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
815 				    "Invalid source port range (min must be strictly smaller than max)");
816 		return -EINVAL;
817 	}
818 
819 	return 0;
820 }
821 
822 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
823 			       struct flow_dissector_key_mpls *key_val,
824 			       struct flow_dissector_key_mpls *key_mask,
825 			       struct netlink_ext_ack *extack)
826 {
827 	struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
828 	struct flow_dissector_mpls_lse *lse_mask;
829 	struct flow_dissector_mpls_lse *lse_val;
830 	u8 lse_index;
831 	u8 depth;
832 	int err;
833 
834 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
835 			       mpls_stack_entry_policy, extack);
836 	if (err < 0)
837 		return err;
838 
839 	if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
840 		NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
841 		return -EINVAL;
842 	}
843 
844 	depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
845 
846 	/* LSE depth starts at 1, for consistency with terminology used by
847 	 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
848 	 */
849 	if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
850 		NL_SET_ERR_MSG_ATTR(extack,
851 				    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
852 				    "Invalid MPLS depth");
853 		return -EINVAL;
854 	}
855 	lse_index = depth - 1;
856 
857 	dissector_set_mpls_lse(key_val, lse_index);
858 	dissector_set_mpls_lse(key_mask, lse_index);
859 
860 	lse_val = &key_val->ls[lse_index];
861 	lse_mask = &key_mask->ls[lse_index];
862 
863 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
864 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
865 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
866 	}
867 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
868 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
869 
870 		if (bos & ~MPLS_BOS_MASK) {
871 			NL_SET_ERR_MSG_ATTR(extack,
872 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
873 					    "Bottom Of Stack (BOS) must be 0 or 1");
874 			return -EINVAL;
875 		}
876 		lse_val->mpls_bos = bos;
877 		lse_mask->mpls_bos = MPLS_BOS_MASK;
878 	}
879 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
880 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
881 
882 		if (tc & ~MPLS_TC_MASK) {
883 			NL_SET_ERR_MSG_ATTR(extack,
884 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
885 					    "Traffic Class (TC) must be between 0 and 7");
886 			return -EINVAL;
887 		}
888 		lse_val->mpls_tc = tc;
889 		lse_mask->mpls_tc = MPLS_TC_MASK;
890 	}
891 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
892 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
893 
894 		if (label & ~MPLS_LABEL_MASK) {
895 			NL_SET_ERR_MSG_ATTR(extack,
896 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
897 					    "Label must be between 0 and 1048575");
898 			return -EINVAL;
899 		}
900 		lse_val->mpls_label = label;
901 		lse_mask->mpls_label = MPLS_LABEL_MASK;
902 	}
903 
904 	return 0;
905 }
906 
907 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
908 				struct flow_dissector_key_mpls *key_val,
909 				struct flow_dissector_key_mpls *key_mask,
910 				struct netlink_ext_ack *extack)
911 {
912 	struct nlattr *nla_lse;
913 	int rem;
914 	int err;
915 
916 	if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
917 		NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
918 				    "NLA_F_NESTED is missing");
919 		return -EINVAL;
920 	}
921 
922 	nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
923 		if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
924 			NL_SET_ERR_MSG_ATTR(extack, nla_lse,
925 					    "Invalid MPLS option type");
926 			return -EINVAL;
927 		}
928 
929 		err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
930 		if (err < 0)
931 			return err;
932 	}
933 	if (rem) {
934 		NL_SET_ERR_MSG(extack,
935 			       "Bytes leftover after parsing MPLS options");
936 		return -EINVAL;
937 	}
938 
939 	return 0;
940 }
941 
942 static int fl_set_key_mpls(struct nlattr **tb,
943 			   struct flow_dissector_key_mpls *key_val,
944 			   struct flow_dissector_key_mpls *key_mask,
945 			   struct netlink_ext_ack *extack)
946 {
947 	struct flow_dissector_mpls_lse *lse_mask;
948 	struct flow_dissector_mpls_lse *lse_val;
949 
950 	if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
951 		if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
952 		    tb[TCA_FLOWER_KEY_MPLS_BOS] ||
953 		    tb[TCA_FLOWER_KEY_MPLS_TC] ||
954 		    tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
955 			NL_SET_ERR_MSG_ATTR(extack,
956 					    tb[TCA_FLOWER_KEY_MPLS_OPTS],
957 					    "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
958 			return -EBADMSG;
959 		}
960 
961 		return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
962 					    key_val, key_mask, extack);
963 	}
964 
965 	lse_val = &key_val->ls[0];
966 	lse_mask = &key_mask->ls[0];
967 
968 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
969 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
970 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
971 		dissector_set_mpls_lse(key_val, 0);
972 		dissector_set_mpls_lse(key_mask, 0);
973 	}
974 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
975 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
976 
977 		if (bos & ~MPLS_BOS_MASK) {
978 			NL_SET_ERR_MSG_ATTR(extack,
979 					    tb[TCA_FLOWER_KEY_MPLS_BOS],
980 					    "Bottom Of Stack (BOS) must be 0 or 1");
981 			return -EINVAL;
982 		}
983 		lse_val->mpls_bos = bos;
984 		lse_mask->mpls_bos = MPLS_BOS_MASK;
985 		dissector_set_mpls_lse(key_val, 0);
986 		dissector_set_mpls_lse(key_mask, 0);
987 	}
988 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
989 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
990 
991 		if (tc & ~MPLS_TC_MASK) {
992 			NL_SET_ERR_MSG_ATTR(extack,
993 					    tb[TCA_FLOWER_KEY_MPLS_TC],
994 					    "Traffic Class (TC) must be between 0 and 7");
995 			return -EINVAL;
996 		}
997 		lse_val->mpls_tc = tc;
998 		lse_mask->mpls_tc = MPLS_TC_MASK;
999 		dissector_set_mpls_lse(key_val, 0);
1000 		dissector_set_mpls_lse(key_mask, 0);
1001 	}
1002 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
1003 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
1004 
1005 		if (label & ~MPLS_LABEL_MASK) {
1006 			NL_SET_ERR_MSG_ATTR(extack,
1007 					    tb[TCA_FLOWER_KEY_MPLS_LABEL],
1008 					    "Label must be between 0 and 1048575");
1009 			return -EINVAL;
1010 		}
1011 		lse_val->mpls_label = label;
1012 		lse_mask->mpls_label = MPLS_LABEL_MASK;
1013 		dissector_set_mpls_lse(key_val, 0);
1014 		dissector_set_mpls_lse(key_mask, 0);
1015 	}
1016 	return 0;
1017 }
1018 
1019 static void fl_set_key_vlan(struct nlattr **tb,
1020 			    __be16 ethertype,
1021 			    int vlan_id_key, int vlan_prio_key,
1022 			    int vlan_next_eth_type_key,
1023 			    struct flow_dissector_key_vlan *key_val,
1024 			    struct flow_dissector_key_vlan *key_mask)
1025 {
1026 #define VLAN_PRIORITY_MASK	0x7
1027 
1028 	if (tb[vlan_id_key]) {
1029 		key_val->vlan_id =
1030 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1031 		key_mask->vlan_id = VLAN_VID_MASK;
1032 	}
1033 	if (tb[vlan_prio_key]) {
1034 		key_val->vlan_priority =
1035 			nla_get_u8(tb[vlan_prio_key]) &
1036 			VLAN_PRIORITY_MASK;
1037 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1038 	}
1039 	if (ethertype) {
1040 		key_val->vlan_tpid = ethertype;
1041 		key_mask->vlan_tpid = cpu_to_be16(~0);
1042 	}
1043 	if (tb[vlan_next_eth_type_key]) {
1044 		key_val->vlan_eth_type =
1045 			nla_get_be16(tb[vlan_next_eth_type_key]);
1046 		key_mask->vlan_eth_type = cpu_to_be16(~0);
1047 	}
1048 }
1049 
1050 static void fl_set_key_pppoe(struct nlattr **tb,
1051 			     struct flow_dissector_key_pppoe *key_val,
1052 			     struct flow_dissector_key_pppoe *key_mask,
1053 			     struct fl_flow_key *key,
1054 			     struct fl_flow_key *mask)
1055 {
1056 	/* key_val::type must be set to ETH_P_PPP_SES
1057 	 * because ETH_P_PPP_SES was stored in basic.n_proto
1058 	 * which might get overwritten by ppp_proto
1059 	 * or might be set to 0, the role of key_val::type
1060 	 * is similar to vlan_key::tpid
1061 	 */
1062 	key_val->type = htons(ETH_P_PPP_SES);
1063 	key_mask->type = cpu_to_be16(~0);
1064 
1065 	if (tb[TCA_FLOWER_KEY_PPPOE_SID]) {
1066 		key_val->session_id =
1067 			nla_get_be16(tb[TCA_FLOWER_KEY_PPPOE_SID]);
1068 		key_mask->session_id = cpu_to_be16(~0);
1069 	}
1070 	if (tb[TCA_FLOWER_KEY_PPP_PROTO]) {
1071 		key_val->ppp_proto =
1072 			nla_get_be16(tb[TCA_FLOWER_KEY_PPP_PROTO]);
1073 		key_mask->ppp_proto = cpu_to_be16(~0);
1074 
1075 		if (key_val->ppp_proto == htons(PPP_IP)) {
1076 			key->basic.n_proto = htons(ETH_P_IP);
1077 			mask->basic.n_proto = cpu_to_be16(~0);
1078 		} else if (key_val->ppp_proto == htons(PPP_IPV6)) {
1079 			key->basic.n_proto = htons(ETH_P_IPV6);
1080 			mask->basic.n_proto = cpu_to_be16(~0);
1081 		} else if (key_val->ppp_proto == htons(PPP_MPLS_UC)) {
1082 			key->basic.n_proto = htons(ETH_P_MPLS_UC);
1083 			mask->basic.n_proto = cpu_to_be16(~0);
1084 		} else if (key_val->ppp_proto == htons(PPP_MPLS_MC)) {
1085 			key->basic.n_proto = htons(ETH_P_MPLS_MC);
1086 			mask->basic.n_proto = cpu_to_be16(~0);
1087 		}
1088 	} else {
1089 		key->basic.n_proto = 0;
1090 		mask->basic.n_proto = cpu_to_be16(0);
1091 	}
1092 }
1093 
1094 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1095 			    u32 *dissector_key, u32 *dissector_mask,
1096 			    u32 flower_flag_bit, u32 dissector_flag_bit)
1097 {
1098 	if (flower_mask & flower_flag_bit) {
1099 		*dissector_mask |= dissector_flag_bit;
1100 		if (flower_key & flower_flag_bit)
1101 			*dissector_key |= dissector_flag_bit;
1102 	}
1103 }
1104 
1105 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1106 			    u32 *flags_mask, struct netlink_ext_ack *extack)
1107 {
1108 	u32 key, mask;
1109 
1110 	/* mask is mandatory for flags */
1111 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1112 		NL_SET_ERR_MSG(extack, "Missing flags mask");
1113 		return -EINVAL;
1114 	}
1115 
1116 	key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
1117 	mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1118 
1119 	*flags_key  = 0;
1120 	*flags_mask = 0;
1121 
1122 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1123 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1124 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1125 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1126 			FLOW_DIS_FIRST_FRAG);
1127 
1128 	return 0;
1129 }
1130 
1131 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1132 			  struct flow_dissector_key_ip *key,
1133 			  struct flow_dissector_key_ip *mask)
1134 {
1135 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1136 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1137 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1138 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1139 
1140 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1141 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1142 }
1143 
1144 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1145 			     int depth, int option_len,
1146 			     struct netlink_ext_ack *extack)
1147 {
1148 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1149 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
1150 	struct geneve_opt *opt;
1151 	int err, data_len = 0;
1152 
1153 	if (option_len > sizeof(struct geneve_opt))
1154 		data_len = option_len - sizeof(struct geneve_opt);
1155 
1156 	if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
1157 		return -ERANGE;
1158 
1159 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1160 	memset(opt, 0xff, option_len);
1161 	opt->length = data_len / 4;
1162 	opt->r1 = 0;
1163 	opt->r2 = 0;
1164 	opt->r3 = 0;
1165 
1166 	/* If no mask has been prodived we assume an exact match. */
1167 	if (!depth)
1168 		return sizeof(struct geneve_opt) + data_len;
1169 
1170 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1171 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1172 		return -EINVAL;
1173 	}
1174 
1175 	err = nla_parse_nested_deprecated(tb,
1176 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1177 					  nla, geneve_opt_policy, extack);
1178 	if (err < 0)
1179 		return err;
1180 
1181 	/* We are not allowed to omit any of CLASS, TYPE or DATA
1182 	 * fields from the key.
1183 	 */
1184 	if (!option_len &&
1185 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1186 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1187 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1188 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1189 		return -EINVAL;
1190 	}
1191 
1192 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
1193 	 * for the mask.
1194 	 */
1195 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1196 		int new_len = key->enc_opts.len;
1197 
1198 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1199 		data_len = nla_len(data);
1200 		if (data_len < 4) {
1201 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1202 			return -ERANGE;
1203 		}
1204 		if (data_len % 4) {
1205 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1206 			return -ERANGE;
1207 		}
1208 
1209 		new_len += sizeof(struct geneve_opt) + data_len;
1210 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1211 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1212 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1213 			return -ERANGE;
1214 		}
1215 		opt->length = data_len / 4;
1216 		memcpy(opt->opt_data, nla_data(data), data_len);
1217 	}
1218 
1219 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1220 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1221 		opt->opt_class = nla_get_be16(class);
1222 	}
1223 
1224 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1225 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1226 		opt->type = nla_get_u8(type);
1227 	}
1228 
1229 	return sizeof(struct geneve_opt) + data_len;
1230 }
1231 
1232 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1233 			    int depth, int option_len,
1234 			    struct netlink_ext_ack *extack)
1235 {
1236 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1237 	struct vxlan_metadata *md;
1238 	int err;
1239 
1240 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1241 	memset(md, 0xff, sizeof(*md));
1242 
1243 	if (!depth)
1244 		return sizeof(*md);
1245 
1246 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1247 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1248 		return -EINVAL;
1249 	}
1250 
1251 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1252 			       vxlan_opt_policy, extack);
1253 	if (err < 0)
1254 		return err;
1255 
1256 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1257 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1258 		return -EINVAL;
1259 	}
1260 
1261 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1262 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1263 		md->gbp &= VXLAN_GBP_MASK;
1264 	}
1265 
1266 	return sizeof(*md);
1267 }
1268 
1269 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1270 			     int depth, int option_len,
1271 			     struct netlink_ext_ack *extack)
1272 {
1273 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1274 	struct erspan_metadata *md;
1275 	int err;
1276 
1277 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1278 	memset(md, 0xff, sizeof(*md));
1279 	md->version = 1;
1280 
1281 	if (!depth)
1282 		return sizeof(*md);
1283 
1284 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1285 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1286 		return -EINVAL;
1287 	}
1288 
1289 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1290 			       erspan_opt_policy, extack);
1291 	if (err < 0)
1292 		return err;
1293 
1294 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1295 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1296 		return -EINVAL;
1297 	}
1298 
1299 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1300 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1301 
1302 	if (md->version == 1) {
1303 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1304 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1305 			return -EINVAL;
1306 		}
1307 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1308 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1309 			memset(&md->u, 0x00, sizeof(md->u));
1310 			md->u.index = nla_get_be32(nla);
1311 		}
1312 	} else if (md->version == 2) {
1313 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1314 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1315 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1316 			return -EINVAL;
1317 		}
1318 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1319 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1320 			md->u.md2.dir = nla_get_u8(nla);
1321 		}
1322 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1323 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1324 			set_hwid(&md->u.md2, nla_get_u8(nla));
1325 		}
1326 	} else {
1327 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1328 		return -EINVAL;
1329 	}
1330 
1331 	return sizeof(*md);
1332 }
1333 
1334 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
1335 			  int depth, int option_len,
1336 			  struct netlink_ext_ack *extack)
1337 {
1338 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
1339 	struct gtp_pdu_session_info *sinfo;
1340 	u8 len = key->enc_opts.len;
1341 	int err;
1342 
1343 	sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
1344 	memset(sinfo, 0xff, option_len);
1345 
1346 	if (!depth)
1347 		return sizeof(*sinfo);
1348 
1349 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
1350 		NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
1351 		return -EINVAL;
1352 	}
1353 
1354 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
1355 			       gtp_opt_policy, extack);
1356 	if (err < 0)
1357 		return err;
1358 
1359 	if (!option_len &&
1360 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
1361 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
1362 		NL_SET_ERR_MSG_MOD(extack,
1363 				   "Missing tunnel key gtp option pdu type or qfi");
1364 		return -EINVAL;
1365 	}
1366 
1367 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
1368 		sinfo->pdu_type =
1369 			nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
1370 
1371 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
1372 		sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
1373 
1374 	return sizeof(*sinfo);
1375 }
1376 
1377 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1378 			  struct fl_flow_key *mask,
1379 			  struct netlink_ext_ack *extack)
1380 {
1381 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1382 	int err, option_len, key_depth, msk_depth = 0;
1383 
1384 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1385 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1386 					     enc_opts_policy, extack);
1387 	if (err)
1388 		return err;
1389 
1390 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1391 
1392 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1393 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1394 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1395 						     enc_opts_policy, extack);
1396 		if (err)
1397 			return err;
1398 
1399 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1400 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1401 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1402 			NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1403 			return -EINVAL;
1404 		}
1405 	}
1406 
1407 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1408 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1409 		switch (nla_type(nla_opt_key)) {
1410 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1411 			if (key->enc_opts.dst_opt_type &&
1412 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1413 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1414 				return -EINVAL;
1415 			}
1416 			option_len = 0;
1417 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1418 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1419 						       key_depth, option_len,
1420 						       extack);
1421 			if (option_len < 0)
1422 				return option_len;
1423 
1424 			key->enc_opts.len += option_len;
1425 			/* At the same time we need to parse through the mask
1426 			 * in order to verify exact and mask attribute lengths.
1427 			 */
1428 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1429 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1430 						       msk_depth, option_len,
1431 						       extack);
1432 			if (option_len < 0)
1433 				return option_len;
1434 
1435 			mask->enc_opts.len += option_len;
1436 			if (key->enc_opts.len != mask->enc_opts.len) {
1437 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1438 				return -EINVAL;
1439 			}
1440 			break;
1441 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1442 			if (key->enc_opts.dst_opt_type) {
1443 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1444 				return -EINVAL;
1445 			}
1446 			option_len = 0;
1447 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1448 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1449 						      key_depth, option_len,
1450 						      extack);
1451 			if (option_len < 0)
1452 				return option_len;
1453 
1454 			key->enc_opts.len += option_len;
1455 			/* At the same time we need to parse through the mask
1456 			 * in order to verify exact and mask attribute lengths.
1457 			 */
1458 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1459 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1460 						      msk_depth, option_len,
1461 						      extack);
1462 			if (option_len < 0)
1463 				return option_len;
1464 
1465 			mask->enc_opts.len += option_len;
1466 			if (key->enc_opts.len != mask->enc_opts.len) {
1467 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1468 				return -EINVAL;
1469 			}
1470 			break;
1471 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1472 			if (key->enc_opts.dst_opt_type) {
1473 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1474 				return -EINVAL;
1475 			}
1476 			option_len = 0;
1477 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1478 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1479 						       key_depth, option_len,
1480 						       extack);
1481 			if (option_len < 0)
1482 				return option_len;
1483 
1484 			key->enc_opts.len += option_len;
1485 			/* At the same time we need to parse through the mask
1486 			 * in order to verify exact and mask attribute lengths.
1487 			 */
1488 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1489 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1490 						       msk_depth, option_len,
1491 						       extack);
1492 			if (option_len < 0)
1493 				return option_len;
1494 
1495 			mask->enc_opts.len += option_len;
1496 			if (key->enc_opts.len != mask->enc_opts.len) {
1497 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1498 				return -EINVAL;
1499 			}
1500 			break;
1501 		case TCA_FLOWER_KEY_ENC_OPTS_GTP:
1502 			if (key->enc_opts.dst_opt_type) {
1503 				NL_SET_ERR_MSG_MOD(extack,
1504 						   "Duplicate type for gtp options");
1505 				return -EINVAL;
1506 			}
1507 			option_len = 0;
1508 			key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1509 			option_len = fl_set_gtp_opt(nla_opt_key, key,
1510 						    key_depth, option_len,
1511 						    extack);
1512 			if (option_len < 0)
1513 				return option_len;
1514 
1515 			key->enc_opts.len += option_len;
1516 			/* At the same time we need to parse through the mask
1517 			 * in order to verify exact and mask attribute lengths.
1518 			 */
1519 			mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1520 			option_len = fl_set_gtp_opt(nla_opt_msk, mask,
1521 						    msk_depth, option_len,
1522 						    extack);
1523 			if (option_len < 0)
1524 				return option_len;
1525 
1526 			mask->enc_opts.len += option_len;
1527 			if (key->enc_opts.len != mask->enc_opts.len) {
1528 				NL_SET_ERR_MSG_MOD(extack,
1529 						   "Key and mask miss aligned");
1530 				return -EINVAL;
1531 			}
1532 			break;
1533 		default:
1534 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1535 			return -EINVAL;
1536 		}
1537 
1538 		if (!msk_depth)
1539 			continue;
1540 
1541 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1542 			NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1543 			return -EINVAL;
1544 		}
1545 		nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1546 	}
1547 
1548 	return 0;
1549 }
1550 
1551 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1552 				struct netlink_ext_ack *extack)
1553 {
1554 	if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1555 		NL_SET_ERR_MSG_ATTR(extack, tb,
1556 				    "no trk, so no other flag can be set");
1557 		return -EINVAL;
1558 	}
1559 
1560 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1561 	    state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1562 		NL_SET_ERR_MSG_ATTR(extack, tb,
1563 				    "new and est are mutually exclusive");
1564 		return -EINVAL;
1565 	}
1566 
1567 	if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1568 	    state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1569 		      TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1570 		NL_SET_ERR_MSG_ATTR(extack, tb,
1571 				    "when inv is set, only trk may be set");
1572 		return -EINVAL;
1573 	}
1574 
1575 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1576 	    state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1577 		NL_SET_ERR_MSG_ATTR(extack, tb,
1578 				    "new and rpl are mutually exclusive");
1579 		return -EINVAL;
1580 	}
1581 
1582 	return 0;
1583 }
1584 
1585 static int fl_set_key_ct(struct nlattr **tb,
1586 			 struct flow_dissector_key_ct *key,
1587 			 struct flow_dissector_key_ct *mask,
1588 			 struct netlink_ext_ack *extack)
1589 {
1590 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1591 		int err;
1592 
1593 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1594 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1595 			return -EOPNOTSUPP;
1596 		}
1597 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1598 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1599 			       sizeof(key->ct_state));
1600 
1601 		err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1602 					   tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1603 					   extack);
1604 		if (err)
1605 			return err;
1606 
1607 	}
1608 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1609 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1610 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1611 			return -EOPNOTSUPP;
1612 		}
1613 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1614 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1615 			       sizeof(key->ct_zone));
1616 	}
1617 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1618 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1619 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1620 			return -EOPNOTSUPP;
1621 		}
1622 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1623 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1624 			       sizeof(key->ct_mark));
1625 	}
1626 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1627 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1628 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1629 			return -EOPNOTSUPP;
1630 		}
1631 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1632 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1633 			       sizeof(key->ct_labels));
1634 	}
1635 
1636 	return 0;
1637 }
1638 
1639 static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype,
1640 			struct fl_flow_key *key, struct fl_flow_key *mask,
1641 			int vthresh)
1642 {
1643 	const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh;
1644 
1645 	if (!tb) {
1646 		*ethertype = 0;
1647 		return good_num_of_vlans;
1648 	}
1649 
1650 	*ethertype = nla_get_be16(tb);
1651 	if (good_num_of_vlans || eth_type_vlan(*ethertype))
1652 		return true;
1653 
1654 	key->basic.n_proto = *ethertype;
1655 	mask->basic.n_proto = cpu_to_be16(~0);
1656 	return false;
1657 }
1658 
1659 static int fl_set_key(struct net *net, struct nlattr **tb,
1660 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1661 		      struct netlink_ext_ack *extack)
1662 {
1663 	__be16 ethertype;
1664 	int ret = 0;
1665 
1666 	if (tb[TCA_FLOWER_INDEV]) {
1667 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1668 		if (err < 0)
1669 			return err;
1670 		key->meta.ingress_ifindex = err;
1671 		mask->meta.ingress_ifindex = 0xffffffff;
1672 	}
1673 
1674 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1675 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1676 		       sizeof(key->eth.dst));
1677 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1678 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1679 		       sizeof(key->eth.src));
1680 	fl_set_key_val(tb, &key->num_of_vlans,
1681 		       TCA_FLOWER_KEY_NUM_OF_VLANS,
1682 		       &mask->num_of_vlans,
1683 		       TCA_FLOWER_UNSPEC,
1684 		       sizeof(key->num_of_vlans));
1685 
1686 	if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], &ethertype, key, mask, 0)) {
1687 		fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1688 				TCA_FLOWER_KEY_VLAN_PRIO,
1689 				TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1690 				&key->vlan, &mask->vlan);
1691 
1692 		if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE],
1693 				&ethertype, key, mask, 1)) {
1694 			fl_set_key_vlan(tb, ethertype,
1695 					TCA_FLOWER_KEY_CVLAN_ID,
1696 					TCA_FLOWER_KEY_CVLAN_PRIO,
1697 					TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1698 					&key->cvlan, &mask->cvlan);
1699 			fl_set_key_val(tb, &key->basic.n_proto,
1700 				       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1701 				       &mask->basic.n_proto,
1702 				       TCA_FLOWER_UNSPEC,
1703 				       sizeof(key->basic.n_proto));
1704 		}
1705 	}
1706 
1707 	if (key->basic.n_proto == htons(ETH_P_PPP_SES))
1708 		fl_set_key_pppoe(tb, &key->pppoe, &mask->pppoe, key, mask);
1709 
1710 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1711 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1712 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1713 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1714 			       sizeof(key->basic.ip_proto));
1715 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1716 	}
1717 
1718 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1719 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1720 		mask->control.addr_type = ~0;
1721 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1722 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1723 			       sizeof(key->ipv4.src));
1724 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1725 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1726 			       sizeof(key->ipv4.dst));
1727 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1728 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1729 		mask->control.addr_type = ~0;
1730 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1731 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1732 			       sizeof(key->ipv6.src));
1733 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1734 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1735 			       sizeof(key->ipv6.dst));
1736 	}
1737 
1738 	if (key->basic.ip_proto == IPPROTO_TCP) {
1739 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1740 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1741 			       sizeof(key->tp.src));
1742 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1743 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1744 			       sizeof(key->tp.dst));
1745 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1746 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1747 			       sizeof(key->tcp.flags));
1748 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1749 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1750 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1751 			       sizeof(key->tp.src));
1752 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1753 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1754 			       sizeof(key->tp.dst));
1755 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1756 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1757 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1758 			       sizeof(key->tp.src));
1759 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1760 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1761 			       sizeof(key->tp.dst));
1762 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1763 		   key->basic.ip_proto == IPPROTO_ICMP) {
1764 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1765 			       &mask->icmp.type,
1766 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1767 			       sizeof(key->icmp.type));
1768 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1769 			       &mask->icmp.code,
1770 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1771 			       sizeof(key->icmp.code));
1772 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1773 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1774 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1775 			       &mask->icmp.type,
1776 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1777 			       sizeof(key->icmp.type));
1778 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1779 			       &mask->icmp.code,
1780 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1781 			       sizeof(key->icmp.code));
1782 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1783 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1784 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1785 		if (ret)
1786 			return ret;
1787 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1788 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1789 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1790 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1791 			       sizeof(key->arp.sip));
1792 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1793 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1794 			       sizeof(key->arp.tip));
1795 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1796 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1797 			       sizeof(key->arp.op));
1798 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1799 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1800 			       sizeof(key->arp.sha));
1801 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1802 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1803 			       sizeof(key->arp.tha));
1804 	} else if (key->basic.ip_proto == IPPROTO_L2TP) {
1805 		fl_set_key_val(tb, &key->l2tpv3.session_id,
1806 			       TCA_FLOWER_KEY_L2TPV3_SID,
1807 			       &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC,
1808 			       sizeof(key->l2tpv3.session_id));
1809 	}
1810 
1811 	if (key->basic.ip_proto == IPPROTO_TCP ||
1812 	    key->basic.ip_proto == IPPROTO_UDP ||
1813 	    key->basic.ip_proto == IPPROTO_SCTP) {
1814 		ret = fl_set_key_port_range(tb, key, mask, extack);
1815 		if (ret)
1816 			return ret;
1817 	}
1818 
1819 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1820 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1821 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1822 		mask->enc_control.addr_type = ~0;
1823 		fl_set_key_val(tb, &key->enc_ipv4.src,
1824 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1825 			       &mask->enc_ipv4.src,
1826 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1827 			       sizeof(key->enc_ipv4.src));
1828 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1829 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1830 			       &mask->enc_ipv4.dst,
1831 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1832 			       sizeof(key->enc_ipv4.dst));
1833 	}
1834 
1835 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1836 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1837 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1838 		mask->enc_control.addr_type = ~0;
1839 		fl_set_key_val(tb, &key->enc_ipv6.src,
1840 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1841 			       &mask->enc_ipv6.src,
1842 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1843 			       sizeof(key->enc_ipv6.src));
1844 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1845 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1846 			       &mask->enc_ipv6.dst,
1847 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1848 			       sizeof(key->enc_ipv6.dst));
1849 	}
1850 
1851 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1852 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1853 		       sizeof(key->enc_key_id.keyid));
1854 
1855 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1856 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1857 		       sizeof(key->enc_tp.src));
1858 
1859 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1860 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1861 		       sizeof(key->enc_tp.dst));
1862 
1863 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1864 
1865 	fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1866 		       &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1867 		       sizeof(key->hash.hash));
1868 
1869 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1870 		ret = fl_set_enc_opt(tb, key, mask, extack);
1871 		if (ret)
1872 			return ret;
1873 	}
1874 
1875 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1876 	if (ret)
1877 		return ret;
1878 
1879 	if (tb[TCA_FLOWER_KEY_FLAGS])
1880 		ret = fl_set_key_flags(tb, &key->control.flags,
1881 				       &mask->control.flags, extack);
1882 
1883 	return ret;
1884 }
1885 
1886 static void fl_mask_copy(struct fl_flow_mask *dst,
1887 			 struct fl_flow_mask *src)
1888 {
1889 	const void *psrc = fl_key_get_start(&src->key, src);
1890 	void *pdst = fl_key_get_start(&dst->key, src);
1891 
1892 	memcpy(pdst, psrc, fl_mask_range(src));
1893 	dst->range = src->range;
1894 }
1895 
1896 static const struct rhashtable_params fl_ht_params = {
1897 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1898 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1899 	.automatic_shrinking = true,
1900 };
1901 
1902 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1903 {
1904 	mask->filter_ht_params = fl_ht_params;
1905 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1906 	mask->filter_ht_params.key_offset += mask->range.start;
1907 
1908 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1909 }
1910 
1911 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1912 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1913 
1914 #define FL_KEY_IS_MASKED(mask, member)						\
1915 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1916 		   0, FL_KEY_MEMBER_SIZE(member))				\
1917 
1918 #define FL_KEY_SET(keys, cnt, id, member)					\
1919 	do {									\
1920 		keys[cnt].key_id = id;						\
1921 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1922 		cnt++;								\
1923 	} while(0);
1924 
1925 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1926 	do {									\
1927 		if (FL_KEY_IS_MASKED(mask, member))				\
1928 			FL_KEY_SET(keys, cnt, id, member);			\
1929 	} while(0);
1930 
1931 static void fl_init_dissector(struct flow_dissector *dissector,
1932 			      struct fl_flow_key *mask)
1933 {
1934 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1935 	size_t cnt = 0;
1936 
1937 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1938 			     FLOW_DISSECTOR_KEY_META, meta);
1939 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1940 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1941 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1942 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1943 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1944 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1945 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1946 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1947 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1948 			     FLOW_DISSECTOR_KEY_PORTS, tp);
1949 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1950 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1951 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1952 			     FLOW_DISSECTOR_KEY_IP, ip);
1953 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1954 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1955 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1956 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1957 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1958 			     FLOW_DISSECTOR_KEY_ARP, arp);
1959 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1960 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1961 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1962 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1963 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1964 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1965 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1966 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1967 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1968 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1969 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1970 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1971 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1972 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1973 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1974 			   enc_control);
1975 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1976 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1977 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1978 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1979 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1980 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1981 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1982 			     FLOW_DISSECTOR_KEY_CT, ct);
1983 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1984 			     FLOW_DISSECTOR_KEY_HASH, hash);
1985 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1986 			     FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans);
1987 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1988 			     FLOW_DISSECTOR_KEY_PPPOE, pppoe);
1989 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1990 			     FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3);
1991 
1992 	skb_flow_dissector_init(dissector, keys, cnt);
1993 }
1994 
1995 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1996 					       struct fl_flow_mask *mask)
1997 {
1998 	struct fl_flow_mask *newmask;
1999 	int err;
2000 
2001 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
2002 	if (!newmask)
2003 		return ERR_PTR(-ENOMEM);
2004 
2005 	fl_mask_copy(newmask, mask);
2006 
2007 	if ((newmask->key.tp_range.tp_min.dst &&
2008 	     newmask->key.tp_range.tp_max.dst) ||
2009 	    (newmask->key.tp_range.tp_min.src &&
2010 	     newmask->key.tp_range.tp_max.src))
2011 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
2012 
2013 	err = fl_init_mask_hashtable(newmask);
2014 	if (err)
2015 		goto errout_free;
2016 
2017 	fl_init_dissector(&newmask->dissector, &newmask->key);
2018 
2019 	INIT_LIST_HEAD_RCU(&newmask->filters);
2020 
2021 	refcount_set(&newmask->refcnt, 1);
2022 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
2023 				      &newmask->ht_node, mask_ht_params);
2024 	if (err)
2025 		goto errout_destroy;
2026 
2027 	spin_lock(&head->masks_lock);
2028 	list_add_tail_rcu(&newmask->list, &head->masks);
2029 	spin_unlock(&head->masks_lock);
2030 
2031 	return newmask;
2032 
2033 errout_destroy:
2034 	rhashtable_destroy(&newmask->ht);
2035 errout_free:
2036 	kfree(newmask);
2037 
2038 	return ERR_PTR(err);
2039 }
2040 
2041 static int fl_check_assign_mask(struct cls_fl_head *head,
2042 				struct cls_fl_filter *fnew,
2043 				struct cls_fl_filter *fold,
2044 				struct fl_flow_mask *mask)
2045 {
2046 	struct fl_flow_mask *newmask;
2047 	int ret = 0;
2048 
2049 	rcu_read_lock();
2050 
2051 	/* Insert mask as temporary node to prevent concurrent creation of mask
2052 	 * with same key. Any concurrent lookups with same key will return
2053 	 * -EAGAIN because mask's refcnt is zero.
2054 	 */
2055 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
2056 						       &mask->ht_node,
2057 						       mask_ht_params);
2058 	if (!fnew->mask) {
2059 		rcu_read_unlock();
2060 
2061 		if (fold) {
2062 			ret = -EINVAL;
2063 			goto errout_cleanup;
2064 		}
2065 
2066 		newmask = fl_create_new_mask(head, mask);
2067 		if (IS_ERR(newmask)) {
2068 			ret = PTR_ERR(newmask);
2069 			goto errout_cleanup;
2070 		}
2071 
2072 		fnew->mask = newmask;
2073 		return 0;
2074 	} else if (IS_ERR(fnew->mask)) {
2075 		ret = PTR_ERR(fnew->mask);
2076 	} else if (fold && fold->mask != fnew->mask) {
2077 		ret = -EINVAL;
2078 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
2079 		/* Mask was deleted concurrently, try again */
2080 		ret = -EAGAIN;
2081 	}
2082 	rcu_read_unlock();
2083 	return ret;
2084 
2085 errout_cleanup:
2086 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
2087 			       mask_ht_params);
2088 	return ret;
2089 }
2090 
2091 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
2092 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
2093 			unsigned long base, struct nlattr **tb,
2094 			struct nlattr *est,
2095 			struct fl_flow_tmplt *tmplt,
2096 			u32 flags, u32 fl_flags,
2097 			struct netlink_ext_ack *extack)
2098 {
2099 	int err;
2100 
2101 	err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags,
2102 				   fl_flags, extack);
2103 	if (err < 0)
2104 		return err;
2105 
2106 	if (tb[TCA_FLOWER_CLASSID]) {
2107 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
2108 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2109 			rtnl_lock();
2110 		tcf_bind_filter(tp, &f->res, base);
2111 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2112 			rtnl_unlock();
2113 	}
2114 
2115 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
2116 	if (err)
2117 		return err;
2118 
2119 	fl_mask_update_range(mask);
2120 	fl_set_masked_key(&f->mkey, &f->key, mask);
2121 
2122 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
2123 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
2124 		return -EINVAL;
2125 	}
2126 
2127 	return 0;
2128 }
2129 
2130 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
2131 			       struct cls_fl_filter *fold,
2132 			       bool *in_ht)
2133 {
2134 	struct fl_flow_mask *mask = fnew->mask;
2135 	int err;
2136 
2137 	err = rhashtable_lookup_insert_fast(&mask->ht,
2138 					    &fnew->ht_node,
2139 					    mask->filter_ht_params);
2140 	if (err) {
2141 		*in_ht = false;
2142 		/* It is okay if filter with same key exists when
2143 		 * overwriting.
2144 		 */
2145 		return fold && err == -EEXIST ? 0 : err;
2146 	}
2147 
2148 	*in_ht = true;
2149 	return 0;
2150 }
2151 
2152 static int fl_change(struct net *net, struct sk_buff *in_skb,
2153 		     struct tcf_proto *tp, unsigned long base,
2154 		     u32 handle, struct nlattr **tca,
2155 		     void **arg, u32 flags,
2156 		     struct netlink_ext_ack *extack)
2157 {
2158 	struct cls_fl_head *head = fl_head_dereference(tp);
2159 	bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
2160 	struct cls_fl_filter *fold = *arg;
2161 	struct cls_fl_filter *fnew;
2162 	struct fl_flow_mask *mask;
2163 	struct nlattr **tb;
2164 	bool in_ht;
2165 	int err;
2166 
2167 	if (!tca[TCA_OPTIONS]) {
2168 		err = -EINVAL;
2169 		goto errout_fold;
2170 	}
2171 
2172 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
2173 	if (!mask) {
2174 		err = -ENOBUFS;
2175 		goto errout_fold;
2176 	}
2177 
2178 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2179 	if (!tb) {
2180 		err = -ENOBUFS;
2181 		goto errout_mask_alloc;
2182 	}
2183 
2184 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2185 					  tca[TCA_OPTIONS], fl_policy, NULL);
2186 	if (err < 0)
2187 		goto errout_tb;
2188 
2189 	if (fold && handle && fold->handle != handle) {
2190 		err = -EINVAL;
2191 		goto errout_tb;
2192 	}
2193 
2194 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2195 	if (!fnew) {
2196 		err = -ENOBUFS;
2197 		goto errout_tb;
2198 	}
2199 	INIT_LIST_HEAD(&fnew->hw_list);
2200 	refcount_set(&fnew->refcnt, 1);
2201 
2202 	if (tb[TCA_FLOWER_FLAGS]) {
2203 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2204 
2205 		if (!tc_flags_valid(fnew->flags)) {
2206 			kfree(fnew);
2207 			err = -EINVAL;
2208 			goto errout_tb;
2209 		}
2210 	}
2211 
2212 	if (!fold) {
2213 		spin_lock(&tp->lock);
2214 		if (!handle) {
2215 			handle = 1;
2216 			err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
2217 					    INT_MAX, GFP_ATOMIC);
2218 		} else {
2219 			err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
2220 					    handle, GFP_ATOMIC);
2221 
2222 			/* Filter with specified handle was concurrently
2223 			 * inserted after initial check in cls_api. This is not
2224 			 * necessarily an error if NLM_F_EXCL is not set in
2225 			 * message flags. Returning EAGAIN will cause cls_api to
2226 			 * try to update concurrently inserted rule.
2227 			 */
2228 			if (err == -ENOSPC)
2229 				err = -EAGAIN;
2230 		}
2231 		spin_unlock(&tp->lock);
2232 
2233 		if (err) {
2234 			kfree(fnew);
2235 			goto errout_tb;
2236 		}
2237 	}
2238 	fnew->handle = handle;
2239 
2240 	err = tcf_exts_init_ex(&fnew->exts, net, TCA_FLOWER_ACT, 0, tp, handle,
2241 			       !tc_skip_hw(fnew->flags));
2242 	if (err < 0)
2243 		goto errout_idr;
2244 
2245 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
2246 			   tp->chain->tmplt_priv, flags, fnew->flags,
2247 			   extack);
2248 	if (err)
2249 		goto errout_idr;
2250 
2251 	err = fl_check_assign_mask(head, fnew, fold, mask);
2252 	if (err)
2253 		goto errout_idr;
2254 
2255 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
2256 	if (err)
2257 		goto errout_mask;
2258 
2259 	if (!tc_skip_hw(fnew->flags)) {
2260 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2261 		if (err)
2262 			goto errout_ht;
2263 	}
2264 
2265 	if (!tc_in_hw(fnew->flags))
2266 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2267 
2268 	spin_lock(&tp->lock);
2269 
2270 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2271 	 * proto again or create new one, if necessary.
2272 	 */
2273 	if (tp->deleting) {
2274 		err = -EAGAIN;
2275 		goto errout_hw;
2276 	}
2277 
2278 	if (fold) {
2279 		/* Fold filter was deleted concurrently. Retry lookup. */
2280 		if (fold->deleted) {
2281 			err = -EAGAIN;
2282 			goto errout_hw;
2283 		}
2284 
2285 		fnew->handle = handle;
2286 
2287 		if (!in_ht) {
2288 			struct rhashtable_params params =
2289 				fnew->mask->filter_ht_params;
2290 
2291 			err = rhashtable_insert_fast(&fnew->mask->ht,
2292 						     &fnew->ht_node,
2293 						     params);
2294 			if (err)
2295 				goto errout_hw;
2296 			in_ht = true;
2297 		}
2298 
2299 		refcount_inc(&fnew->refcnt);
2300 		rhashtable_remove_fast(&fold->mask->ht,
2301 				       &fold->ht_node,
2302 				       fold->mask->filter_ht_params);
2303 		idr_replace(&head->handle_idr, fnew, fnew->handle);
2304 		list_replace_rcu(&fold->list, &fnew->list);
2305 		fold->deleted = true;
2306 
2307 		spin_unlock(&tp->lock);
2308 
2309 		fl_mask_put(head, fold->mask);
2310 		if (!tc_skip_hw(fold->flags))
2311 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2312 		tcf_unbind_filter(tp, &fold->res);
2313 		/* Caller holds reference to fold, so refcnt is always > 0
2314 		 * after this.
2315 		 */
2316 		refcount_dec(&fold->refcnt);
2317 		__fl_put(fold);
2318 	} else {
2319 		idr_replace(&head->handle_idr, fnew, fnew->handle);
2320 
2321 		refcount_inc(&fnew->refcnt);
2322 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2323 		spin_unlock(&tp->lock);
2324 	}
2325 
2326 	*arg = fnew;
2327 
2328 	kfree(tb);
2329 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2330 	return 0;
2331 
2332 errout_ht:
2333 	spin_lock(&tp->lock);
2334 errout_hw:
2335 	fnew->deleted = true;
2336 	spin_unlock(&tp->lock);
2337 	if (!tc_skip_hw(fnew->flags))
2338 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2339 	if (in_ht)
2340 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2341 				       fnew->mask->filter_ht_params);
2342 errout_mask:
2343 	fl_mask_put(head, fnew->mask);
2344 errout_idr:
2345 	if (!fold)
2346 		idr_remove(&head->handle_idr, fnew->handle);
2347 	__fl_put(fnew);
2348 errout_tb:
2349 	kfree(tb);
2350 errout_mask_alloc:
2351 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2352 errout_fold:
2353 	if (fold)
2354 		__fl_put(fold);
2355 	return err;
2356 }
2357 
2358 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2359 		     bool rtnl_held, struct netlink_ext_ack *extack)
2360 {
2361 	struct cls_fl_head *head = fl_head_dereference(tp);
2362 	struct cls_fl_filter *f = arg;
2363 	bool last_on_mask;
2364 	int err = 0;
2365 
2366 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2367 	*last = list_empty(&head->masks);
2368 	__fl_put(f);
2369 
2370 	return err;
2371 }
2372 
2373 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2374 		    bool rtnl_held)
2375 {
2376 	struct cls_fl_head *head = fl_head_dereference(tp);
2377 	unsigned long id = arg->cookie, tmp;
2378 	struct cls_fl_filter *f;
2379 
2380 	arg->count = arg->skip;
2381 
2382 	rcu_read_lock();
2383 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2384 		/* don't return filters that are being deleted */
2385 		if (!f || !refcount_inc_not_zero(&f->refcnt))
2386 			continue;
2387 		rcu_read_unlock();
2388 
2389 		if (arg->fn(tp, f, arg) < 0) {
2390 			__fl_put(f);
2391 			arg->stop = 1;
2392 			rcu_read_lock();
2393 			break;
2394 		}
2395 		__fl_put(f);
2396 		arg->count++;
2397 		rcu_read_lock();
2398 	}
2399 	rcu_read_unlock();
2400 	arg->cookie = id;
2401 }
2402 
2403 static struct cls_fl_filter *
2404 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2405 {
2406 	struct cls_fl_head *head = fl_head_dereference(tp);
2407 
2408 	spin_lock(&tp->lock);
2409 	if (list_empty(&head->hw_filters)) {
2410 		spin_unlock(&tp->lock);
2411 		return NULL;
2412 	}
2413 
2414 	if (!f)
2415 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
2416 			       hw_list);
2417 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2418 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2419 			spin_unlock(&tp->lock);
2420 			return f;
2421 		}
2422 	}
2423 
2424 	spin_unlock(&tp->lock);
2425 	return NULL;
2426 }
2427 
2428 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2429 			void *cb_priv, struct netlink_ext_ack *extack)
2430 {
2431 	struct tcf_block *block = tp->chain->block;
2432 	struct flow_cls_offload cls_flower = {};
2433 	struct cls_fl_filter *f = NULL;
2434 	int err;
2435 
2436 	/* hw_filters list can only be changed by hw offload functions after
2437 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2438 	 * iterating it.
2439 	 */
2440 	ASSERT_RTNL();
2441 
2442 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
2443 		cls_flower.rule =
2444 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2445 		if (!cls_flower.rule) {
2446 			__fl_put(f);
2447 			return -ENOMEM;
2448 		}
2449 
2450 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2451 					   extack);
2452 		cls_flower.command = add ?
2453 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2454 		cls_flower.cookie = (unsigned long)f;
2455 		cls_flower.rule->match.dissector = &f->mask->dissector;
2456 		cls_flower.rule->match.mask = &f->mask->key;
2457 		cls_flower.rule->match.key = &f->mkey;
2458 
2459 		err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
2460 					      cls_flower.common.extack);
2461 		if (err) {
2462 			kfree(cls_flower.rule);
2463 			if (tc_skip_sw(f->flags)) {
2464 				__fl_put(f);
2465 				return err;
2466 			}
2467 			goto next_flow;
2468 		}
2469 
2470 		cls_flower.classid = f->res.classid;
2471 
2472 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2473 					    TC_SETUP_CLSFLOWER, &cls_flower,
2474 					    cb_priv, &f->flags,
2475 					    &f->in_hw_count);
2476 		tc_cleanup_offload_action(&cls_flower.rule->action);
2477 		kfree(cls_flower.rule);
2478 
2479 		if (err) {
2480 			__fl_put(f);
2481 			return err;
2482 		}
2483 next_flow:
2484 		__fl_put(f);
2485 	}
2486 
2487 	return 0;
2488 }
2489 
2490 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2491 {
2492 	struct flow_cls_offload *cls_flower = type_data;
2493 	struct cls_fl_filter *f =
2494 		(struct cls_fl_filter *) cls_flower->cookie;
2495 	struct cls_fl_head *head = fl_head_dereference(tp);
2496 
2497 	spin_lock(&tp->lock);
2498 	list_add(&f->hw_list, &head->hw_filters);
2499 	spin_unlock(&tp->lock);
2500 }
2501 
2502 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2503 {
2504 	struct flow_cls_offload *cls_flower = type_data;
2505 	struct cls_fl_filter *f =
2506 		(struct cls_fl_filter *) cls_flower->cookie;
2507 
2508 	spin_lock(&tp->lock);
2509 	if (!list_empty(&f->hw_list))
2510 		list_del_init(&f->hw_list);
2511 	spin_unlock(&tp->lock);
2512 }
2513 
2514 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2515 			      struct fl_flow_tmplt *tmplt)
2516 {
2517 	struct flow_cls_offload cls_flower = {};
2518 	struct tcf_block *block = chain->block;
2519 
2520 	cls_flower.rule = flow_rule_alloc(0);
2521 	if (!cls_flower.rule)
2522 		return -ENOMEM;
2523 
2524 	cls_flower.common.chain_index = chain->index;
2525 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2526 	cls_flower.cookie = (unsigned long) tmplt;
2527 	cls_flower.rule->match.dissector = &tmplt->dissector;
2528 	cls_flower.rule->match.mask = &tmplt->mask;
2529 	cls_flower.rule->match.key = &tmplt->dummy_key;
2530 
2531 	/* We don't care if driver (any of them) fails to handle this
2532 	 * call. It serves just as a hint for it.
2533 	 */
2534 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2535 	kfree(cls_flower.rule);
2536 
2537 	return 0;
2538 }
2539 
2540 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2541 				struct fl_flow_tmplt *tmplt)
2542 {
2543 	struct flow_cls_offload cls_flower = {};
2544 	struct tcf_block *block = chain->block;
2545 
2546 	cls_flower.common.chain_index = chain->index;
2547 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2548 	cls_flower.cookie = (unsigned long) tmplt;
2549 
2550 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2551 }
2552 
2553 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2554 			     struct nlattr **tca,
2555 			     struct netlink_ext_ack *extack)
2556 {
2557 	struct fl_flow_tmplt *tmplt;
2558 	struct nlattr **tb;
2559 	int err;
2560 
2561 	if (!tca[TCA_OPTIONS])
2562 		return ERR_PTR(-EINVAL);
2563 
2564 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2565 	if (!tb)
2566 		return ERR_PTR(-ENOBUFS);
2567 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2568 					  tca[TCA_OPTIONS], fl_policy, NULL);
2569 	if (err)
2570 		goto errout_tb;
2571 
2572 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2573 	if (!tmplt) {
2574 		err = -ENOMEM;
2575 		goto errout_tb;
2576 	}
2577 	tmplt->chain = chain;
2578 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2579 	if (err)
2580 		goto errout_tmplt;
2581 
2582 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2583 
2584 	err = fl_hw_create_tmplt(chain, tmplt);
2585 	if (err)
2586 		goto errout_tmplt;
2587 
2588 	kfree(tb);
2589 	return tmplt;
2590 
2591 errout_tmplt:
2592 	kfree(tmplt);
2593 errout_tb:
2594 	kfree(tb);
2595 	return ERR_PTR(err);
2596 }
2597 
2598 static void fl_tmplt_destroy(void *tmplt_priv)
2599 {
2600 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2601 
2602 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2603 	kfree(tmplt);
2604 }
2605 
2606 static int fl_dump_key_val(struct sk_buff *skb,
2607 			   void *val, int val_type,
2608 			   void *mask, int mask_type, int len)
2609 {
2610 	int err;
2611 
2612 	if (!memchr_inv(mask, 0, len))
2613 		return 0;
2614 	err = nla_put(skb, val_type, len, val);
2615 	if (err)
2616 		return err;
2617 	if (mask_type != TCA_FLOWER_UNSPEC) {
2618 		err = nla_put(skb, mask_type, len, mask);
2619 		if (err)
2620 			return err;
2621 	}
2622 	return 0;
2623 }
2624 
2625 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2626 				  struct fl_flow_key *mask)
2627 {
2628 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2629 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2630 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2631 			    sizeof(key->tp_range.tp_min.dst)) ||
2632 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2633 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2634 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2635 			    sizeof(key->tp_range.tp_max.dst)) ||
2636 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2637 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2638 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2639 			    sizeof(key->tp_range.tp_min.src)) ||
2640 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2641 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2642 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2643 			    sizeof(key->tp_range.tp_max.src)))
2644 		return -1;
2645 
2646 	return 0;
2647 }
2648 
2649 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2650 				    struct flow_dissector_key_mpls *mpls_key,
2651 				    struct flow_dissector_key_mpls *mpls_mask,
2652 				    u8 lse_index)
2653 {
2654 	struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2655 	struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2656 	int err;
2657 
2658 	err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2659 			 lse_index + 1);
2660 	if (err)
2661 		return err;
2662 
2663 	if (lse_mask->mpls_ttl) {
2664 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2665 				 lse_key->mpls_ttl);
2666 		if (err)
2667 			return err;
2668 	}
2669 	if (lse_mask->mpls_bos) {
2670 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2671 				 lse_key->mpls_bos);
2672 		if (err)
2673 			return err;
2674 	}
2675 	if (lse_mask->mpls_tc) {
2676 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2677 				 lse_key->mpls_tc);
2678 		if (err)
2679 			return err;
2680 	}
2681 	if (lse_mask->mpls_label) {
2682 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2683 				  lse_key->mpls_label);
2684 		if (err)
2685 			return err;
2686 	}
2687 
2688 	return 0;
2689 }
2690 
2691 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2692 				 struct flow_dissector_key_mpls *mpls_key,
2693 				 struct flow_dissector_key_mpls *mpls_mask)
2694 {
2695 	struct nlattr *opts;
2696 	struct nlattr *lse;
2697 	u8 lse_index;
2698 	int err;
2699 
2700 	opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2701 	if (!opts)
2702 		return -EMSGSIZE;
2703 
2704 	for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2705 		if (!(mpls_mask->used_lses & 1 << lse_index))
2706 			continue;
2707 
2708 		lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2709 		if (!lse) {
2710 			err = -EMSGSIZE;
2711 			goto err_opts;
2712 		}
2713 
2714 		err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2715 					       lse_index);
2716 		if (err)
2717 			goto err_opts_lse;
2718 		nla_nest_end(skb, lse);
2719 	}
2720 	nla_nest_end(skb, opts);
2721 
2722 	return 0;
2723 
2724 err_opts_lse:
2725 	nla_nest_cancel(skb, lse);
2726 err_opts:
2727 	nla_nest_cancel(skb, opts);
2728 
2729 	return err;
2730 }
2731 
2732 static int fl_dump_key_mpls(struct sk_buff *skb,
2733 			    struct flow_dissector_key_mpls *mpls_key,
2734 			    struct flow_dissector_key_mpls *mpls_mask)
2735 {
2736 	struct flow_dissector_mpls_lse *lse_mask;
2737 	struct flow_dissector_mpls_lse *lse_key;
2738 	int err;
2739 
2740 	if (!mpls_mask->used_lses)
2741 		return 0;
2742 
2743 	lse_mask = &mpls_mask->ls[0];
2744 	lse_key = &mpls_key->ls[0];
2745 
2746 	/* For backward compatibility, don't use the MPLS nested attributes if
2747 	 * the rule can be expressed using the old attributes.
2748 	 */
2749 	if (mpls_mask->used_lses & ~1 ||
2750 	    (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2751 	     !lse_mask->mpls_tc && !lse_mask->mpls_label))
2752 		return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2753 
2754 	if (lse_mask->mpls_ttl) {
2755 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2756 				 lse_key->mpls_ttl);
2757 		if (err)
2758 			return err;
2759 	}
2760 	if (lse_mask->mpls_tc) {
2761 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2762 				 lse_key->mpls_tc);
2763 		if (err)
2764 			return err;
2765 	}
2766 	if (lse_mask->mpls_label) {
2767 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2768 				  lse_key->mpls_label);
2769 		if (err)
2770 			return err;
2771 	}
2772 	if (lse_mask->mpls_bos) {
2773 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2774 				 lse_key->mpls_bos);
2775 		if (err)
2776 			return err;
2777 	}
2778 	return 0;
2779 }
2780 
2781 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2782 			  struct flow_dissector_key_ip *key,
2783 			  struct flow_dissector_key_ip *mask)
2784 {
2785 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2786 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2787 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2788 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2789 
2790 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2791 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2792 		return -1;
2793 
2794 	return 0;
2795 }
2796 
2797 static int fl_dump_key_vlan(struct sk_buff *skb,
2798 			    int vlan_id_key, int vlan_prio_key,
2799 			    struct flow_dissector_key_vlan *vlan_key,
2800 			    struct flow_dissector_key_vlan *vlan_mask)
2801 {
2802 	int err;
2803 
2804 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2805 		return 0;
2806 	if (vlan_mask->vlan_id) {
2807 		err = nla_put_u16(skb, vlan_id_key,
2808 				  vlan_key->vlan_id);
2809 		if (err)
2810 			return err;
2811 	}
2812 	if (vlan_mask->vlan_priority) {
2813 		err = nla_put_u8(skb, vlan_prio_key,
2814 				 vlan_key->vlan_priority);
2815 		if (err)
2816 			return err;
2817 	}
2818 	return 0;
2819 }
2820 
2821 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2822 			    u32 *flower_key, u32 *flower_mask,
2823 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2824 {
2825 	if (dissector_mask & dissector_flag_bit) {
2826 		*flower_mask |= flower_flag_bit;
2827 		if (dissector_key & dissector_flag_bit)
2828 			*flower_key |= flower_flag_bit;
2829 	}
2830 }
2831 
2832 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2833 {
2834 	u32 key, mask;
2835 	__be32 _key, _mask;
2836 	int err;
2837 
2838 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2839 		return 0;
2840 
2841 	key = 0;
2842 	mask = 0;
2843 
2844 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2845 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2846 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2847 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2848 			FLOW_DIS_FIRST_FRAG);
2849 
2850 	_key = cpu_to_be32(key);
2851 	_mask = cpu_to_be32(mask);
2852 
2853 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2854 	if (err)
2855 		return err;
2856 
2857 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2858 }
2859 
2860 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2861 				  struct flow_dissector_key_enc_opts *enc_opts)
2862 {
2863 	struct geneve_opt *opt;
2864 	struct nlattr *nest;
2865 	int opt_off = 0;
2866 
2867 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2868 	if (!nest)
2869 		goto nla_put_failure;
2870 
2871 	while (enc_opts->len > opt_off) {
2872 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2873 
2874 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2875 				 opt->opt_class))
2876 			goto nla_put_failure;
2877 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2878 			       opt->type))
2879 			goto nla_put_failure;
2880 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2881 			    opt->length * 4, opt->opt_data))
2882 			goto nla_put_failure;
2883 
2884 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2885 	}
2886 	nla_nest_end(skb, nest);
2887 	return 0;
2888 
2889 nla_put_failure:
2890 	nla_nest_cancel(skb, nest);
2891 	return -EMSGSIZE;
2892 }
2893 
2894 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2895 				 struct flow_dissector_key_enc_opts *enc_opts)
2896 {
2897 	struct vxlan_metadata *md;
2898 	struct nlattr *nest;
2899 
2900 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2901 	if (!nest)
2902 		goto nla_put_failure;
2903 
2904 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2905 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2906 		goto nla_put_failure;
2907 
2908 	nla_nest_end(skb, nest);
2909 	return 0;
2910 
2911 nla_put_failure:
2912 	nla_nest_cancel(skb, nest);
2913 	return -EMSGSIZE;
2914 }
2915 
2916 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2917 				  struct flow_dissector_key_enc_opts *enc_opts)
2918 {
2919 	struct erspan_metadata *md;
2920 	struct nlattr *nest;
2921 
2922 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2923 	if (!nest)
2924 		goto nla_put_failure;
2925 
2926 	md = (struct erspan_metadata *)&enc_opts->data[0];
2927 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2928 		goto nla_put_failure;
2929 
2930 	if (md->version == 1 &&
2931 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2932 		goto nla_put_failure;
2933 
2934 	if (md->version == 2 &&
2935 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2936 			md->u.md2.dir) ||
2937 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2938 			get_hwid(&md->u.md2))))
2939 		goto nla_put_failure;
2940 
2941 	nla_nest_end(skb, nest);
2942 	return 0;
2943 
2944 nla_put_failure:
2945 	nla_nest_cancel(skb, nest);
2946 	return -EMSGSIZE;
2947 }
2948 
2949 static int fl_dump_key_gtp_opt(struct sk_buff *skb,
2950 			       struct flow_dissector_key_enc_opts *enc_opts)
2951 
2952 {
2953 	struct gtp_pdu_session_info *session_info;
2954 	struct nlattr *nest;
2955 
2956 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
2957 	if (!nest)
2958 		goto nla_put_failure;
2959 
2960 	session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
2961 
2962 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
2963 		       session_info->pdu_type))
2964 		goto nla_put_failure;
2965 
2966 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
2967 		goto nla_put_failure;
2968 
2969 	nla_nest_end(skb, nest);
2970 	return 0;
2971 
2972 nla_put_failure:
2973 	nla_nest_cancel(skb, nest);
2974 	return -EMSGSIZE;
2975 }
2976 
2977 static int fl_dump_key_ct(struct sk_buff *skb,
2978 			  struct flow_dissector_key_ct *key,
2979 			  struct flow_dissector_key_ct *mask)
2980 {
2981 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2982 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2983 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2984 			    sizeof(key->ct_state)))
2985 		goto nla_put_failure;
2986 
2987 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2988 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2989 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2990 			    sizeof(key->ct_zone)))
2991 		goto nla_put_failure;
2992 
2993 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2994 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2995 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2996 			    sizeof(key->ct_mark)))
2997 		goto nla_put_failure;
2998 
2999 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
3000 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
3001 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
3002 			    sizeof(key->ct_labels)))
3003 		goto nla_put_failure;
3004 
3005 	return 0;
3006 
3007 nla_put_failure:
3008 	return -EMSGSIZE;
3009 }
3010 
3011 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
3012 			       struct flow_dissector_key_enc_opts *enc_opts)
3013 {
3014 	struct nlattr *nest;
3015 	int err;
3016 
3017 	if (!enc_opts->len)
3018 		return 0;
3019 
3020 	nest = nla_nest_start_noflag(skb, enc_opt_type);
3021 	if (!nest)
3022 		goto nla_put_failure;
3023 
3024 	switch (enc_opts->dst_opt_type) {
3025 	case TUNNEL_GENEVE_OPT:
3026 		err = fl_dump_key_geneve_opt(skb, enc_opts);
3027 		if (err)
3028 			goto nla_put_failure;
3029 		break;
3030 	case TUNNEL_VXLAN_OPT:
3031 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
3032 		if (err)
3033 			goto nla_put_failure;
3034 		break;
3035 	case TUNNEL_ERSPAN_OPT:
3036 		err = fl_dump_key_erspan_opt(skb, enc_opts);
3037 		if (err)
3038 			goto nla_put_failure;
3039 		break;
3040 	case TUNNEL_GTP_OPT:
3041 		err = fl_dump_key_gtp_opt(skb, enc_opts);
3042 		if (err)
3043 			goto nla_put_failure;
3044 		break;
3045 	default:
3046 		goto nla_put_failure;
3047 	}
3048 	nla_nest_end(skb, nest);
3049 	return 0;
3050 
3051 nla_put_failure:
3052 	nla_nest_cancel(skb, nest);
3053 	return -EMSGSIZE;
3054 }
3055 
3056 static int fl_dump_key_enc_opt(struct sk_buff *skb,
3057 			       struct flow_dissector_key_enc_opts *key_opts,
3058 			       struct flow_dissector_key_enc_opts *msk_opts)
3059 {
3060 	int err;
3061 
3062 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
3063 	if (err)
3064 		return err;
3065 
3066 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
3067 }
3068 
3069 static int fl_dump_key(struct sk_buff *skb, struct net *net,
3070 		       struct fl_flow_key *key, struct fl_flow_key *mask)
3071 {
3072 	if (mask->meta.ingress_ifindex) {
3073 		struct net_device *dev;
3074 
3075 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
3076 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
3077 			goto nla_put_failure;
3078 	}
3079 
3080 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
3081 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
3082 			    sizeof(key->eth.dst)) ||
3083 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
3084 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
3085 			    sizeof(key->eth.src)) ||
3086 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
3087 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
3088 			    sizeof(key->basic.n_proto)))
3089 		goto nla_put_failure;
3090 
3091 	if (mask->num_of_vlans.num_of_vlans) {
3092 		if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans))
3093 			goto nla_put_failure;
3094 	}
3095 
3096 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
3097 		goto nla_put_failure;
3098 
3099 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
3100 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
3101 		goto nla_put_failure;
3102 
3103 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
3104 			     TCA_FLOWER_KEY_CVLAN_PRIO,
3105 			     &key->cvlan, &mask->cvlan) ||
3106 	    (mask->cvlan.vlan_tpid &&
3107 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3108 			  key->cvlan.vlan_tpid)))
3109 		goto nla_put_failure;
3110 
3111 	if (mask->basic.n_proto) {
3112 		if (mask->cvlan.vlan_eth_type) {
3113 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
3114 					 key->basic.n_proto))
3115 				goto nla_put_failure;
3116 		} else if (mask->vlan.vlan_eth_type) {
3117 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3118 					 key->vlan.vlan_eth_type))
3119 				goto nla_put_failure;
3120 		}
3121 	}
3122 
3123 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
3124 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
3125 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
3126 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
3127 			    sizeof(key->basic.ip_proto)) ||
3128 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
3129 		goto nla_put_failure;
3130 
3131 	if (mask->pppoe.session_id) {
3132 		if (nla_put_be16(skb, TCA_FLOWER_KEY_PPPOE_SID,
3133 				 key->pppoe.session_id))
3134 			goto nla_put_failure;
3135 	}
3136 	if (mask->basic.n_proto && mask->pppoe.ppp_proto) {
3137 		if (nla_put_be16(skb, TCA_FLOWER_KEY_PPP_PROTO,
3138 				 key->pppoe.ppp_proto))
3139 			goto nla_put_failure;
3140 	}
3141 
3142 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3143 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
3144 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
3145 			     sizeof(key->ipv4.src)) ||
3146 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
3147 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
3148 			     sizeof(key->ipv4.dst))))
3149 		goto nla_put_failure;
3150 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3151 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
3152 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
3153 				  sizeof(key->ipv6.src)) ||
3154 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
3155 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
3156 				  sizeof(key->ipv6.dst))))
3157 		goto nla_put_failure;
3158 
3159 	if (key->basic.ip_proto == IPPROTO_TCP &&
3160 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
3161 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
3162 			     sizeof(key->tp.src)) ||
3163 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
3164 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
3165 			     sizeof(key->tp.dst)) ||
3166 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
3167 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
3168 			     sizeof(key->tcp.flags))))
3169 		goto nla_put_failure;
3170 	else if (key->basic.ip_proto == IPPROTO_UDP &&
3171 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
3172 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
3173 				  sizeof(key->tp.src)) ||
3174 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
3175 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
3176 				  sizeof(key->tp.dst))))
3177 		goto nla_put_failure;
3178 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
3179 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
3180 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
3181 				  sizeof(key->tp.src)) ||
3182 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
3183 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
3184 				  sizeof(key->tp.dst))))
3185 		goto nla_put_failure;
3186 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
3187 		 key->basic.ip_proto == IPPROTO_ICMP &&
3188 		 (fl_dump_key_val(skb, &key->icmp.type,
3189 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
3190 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
3191 				  sizeof(key->icmp.type)) ||
3192 		  fl_dump_key_val(skb, &key->icmp.code,
3193 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
3194 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
3195 				  sizeof(key->icmp.code))))
3196 		goto nla_put_failure;
3197 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
3198 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
3199 		 (fl_dump_key_val(skb, &key->icmp.type,
3200 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
3201 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
3202 				  sizeof(key->icmp.type)) ||
3203 		  fl_dump_key_val(skb, &key->icmp.code,
3204 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
3205 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
3206 				  sizeof(key->icmp.code))))
3207 		goto nla_put_failure;
3208 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
3209 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
3210 		 (fl_dump_key_val(skb, &key->arp.sip,
3211 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
3212 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
3213 				  sizeof(key->arp.sip)) ||
3214 		  fl_dump_key_val(skb, &key->arp.tip,
3215 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
3216 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
3217 				  sizeof(key->arp.tip)) ||
3218 		  fl_dump_key_val(skb, &key->arp.op,
3219 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
3220 				  TCA_FLOWER_KEY_ARP_OP_MASK,
3221 				  sizeof(key->arp.op)) ||
3222 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
3223 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
3224 				  sizeof(key->arp.sha)) ||
3225 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
3226 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
3227 				  sizeof(key->arp.tha))))
3228 		goto nla_put_failure;
3229 	else if (key->basic.ip_proto == IPPROTO_L2TP &&
3230 		 fl_dump_key_val(skb, &key->l2tpv3.session_id,
3231 				 TCA_FLOWER_KEY_L2TPV3_SID,
3232 				 &mask->l2tpv3.session_id,
3233 				 TCA_FLOWER_UNSPEC,
3234 				 sizeof(key->l2tpv3.session_id)))
3235 		goto nla_put_failure;
3236 
3237 	if ((key->basic.ip_proto == IPPROTO_TCP ||
3238 	     key->basic.ip_proto == IPPROTO_UDP ||
3239 	     key->basic.ip_proto == IPPROTO_SCTP) &&
3240 	     fl_dump_key_port_range(skb, key, mask))
3241 		goto nla_put_failure;
3242 
3243 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3244 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
3245 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3246 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3247 			    sizeof(key->enc_ipv4.src)) ||
3248 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
3249 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3250 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3251 			     sizeof(key->enc_ipv4.dst))))
3252 		goto nla_put_failure;
3253 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3254 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3255 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3256 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3257 			    sizeof(key->enc_ipv6.src)) ||
3258 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3259 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
3260 				 &mask->enc_ipv6.dst,
3261 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3262 			    sizeof(key->enc_ipv6.dst))))
3263 		goto nla_put_failure;
3264 
3265 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3266 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3267 			    sizeof(key->enc_key_id)) ||
3268 	    fl_dump_key_val(skb, &key->enc_tp.src,
3269 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3270 			    &mask->enc_tp.src,
3271 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3272 			    sizeof(key->enc_tp.src)) ||
3273 	    fl_dump_key_val(skb, &key->enc_tp.dst,
3274 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3275 			    &mask->enc_tp.dst,
3276 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3277 			    sizeof(key->enc_tp.dst)) ||
3278 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3279 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3280 		goto nla_put_failure;
3281 
3282 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3283 		goto nla_put_failure;
3284 
3285 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3286 		goto nla_put_failure;
3287 
3288 	if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3289 			     &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3290 			     sizeof(key->hash.hash)))
3291 		goto nla_put_failure;
3292 
3293 	return 0;
3294 
3295 nla_put_failure:
3296 	return -EMSGSIZE;
3297 }
3298 
3299 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3300 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3301 {
3302 	struct cls_fl_filter *f = fh;
3303 	struct nlattr *nest;
3304 	struct fl_flow_key *key, *mask;
3305 	bool skip_hw;
3306 
3307 	if (!f)
3308 		return skb->len;
3309 
3310 	t->tcm_handle = f->handle;
3311 
3312 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3313 	if (!nest)
3314 		goto nla_put_failure;
3315 
3316 	spin_lock(&tp->lock);
3317 
3318 	if (f->res.classid &&
3319 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3320 		goto nla_put_failure_locked;
3321 
3322 	key = &f->key;
3323 	mask = &f->mask->key;
3324 	skip_hw = tc_skip_hw(f->flags);
3325 
3326 	if (fl_dump_key(skb, net, key, mask))
3327 		goto nla_put_failure_locked;
3328 
3329 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3330 		goto nla_put_failure_locked;
3331 
3332 	spin_unlock(&tp->lock);
3333 
3334 	if (!skip_hw)
3335 		fl_hw_update_stats(tp, f, rtnl_held);
3336 
3337 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3338 		goto nla_put_failure;
3339 
3340 	if (tcf_exts_dump(skb, &f->exts))
3341 		goto nla_put_failure;
3342 
3343 	nla_nest_end(skb, nest);
3344 
3345 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3346 		goto nla_put_failure;
3347 
3348 	return skb->len;
3349 
3350 nla_put_failure_locked:
3351 	spin_unlock(&tp->lock);
3352 nla_put_failure:
3353 	nla_nest_cancel(skb, nest);
3354 	return -1;
3355 }
3356 
3357 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3358 			 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3359 {
3360 	struct cls_fl_filter *f = fh;
3361 	struct nlattr *nest;
3362 	bool skip_hw;
3363 
3364 	if (!f)
3365 		return skb->len;
3366 
3367 	t->tcm_handle = f->handle;
3368 
3369 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3370 	if (!nest)
3371 		goto nla_put_failure;
3372 
3373 	spin_lock(&tp->lock);
3374 
3375 	skip_hw = tc_skip_hw(f->flags);
3376 
3377 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3378 		goto nla_put_failure_locked;
3379 
3380 	spin_unlock(&tp->lock);
3381 
3382 	if (!skip_hw)
3383 		fl_hw_update_stats(tp, f, rtnl_held);
3384 
3385 	if (tcf_exts_terse_dump(skb, &f->exts))
3386 		goto nla_put_failure;
3387 
3388 	nla_nest_end(skb, nest);
3389 
3390 	return skb->len;
3391 
3392 nla_put_failure_locked:
3393 	spin_unlock(&tp->lock);
3394 nla_put_failure:
3395 	nla_nest_cancel(skb, nest);
3396 	return -1;
3397 }
3398 
3399 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3400 {
3401 	struct fl_flow_tmplt *tmplt = tmplt_priv;
3402 	struct fl_flow_key *key, *mask;
3403 	struct nlattr *nest;
3404 
3405 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3406 	if (!nest)
3407 		goto nla_put_failure;
3408 
3409 	key = &tmplt->dummy_key;
3410 	mask = &tmplt->mask;
3411 
3412 	if (fl_dump_key(skb, net, key, mask))
3413 		goto nla_put_failure;
3414 
3415 	nla_nest_end(skb, nest);
3416 
3417 	return skb->len;
3418 
3419 nla_put_failure:
3420 	nla_nest_cancel(skb, nest);
3421 	return -EMSGSIZE;
3422 }
3423 
3424 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3425 			  unsigned long base)
3426 {
3427 	struct cls_fl_filter *f = fh;
3428 
3429 	tc_cls_bind_class(classid, cl, q, &f->res, base);
3430 }
3431 
3432 static bool fl_delete_empty(struct tcf_proto *tp)
3433 {
3434 	struct cls_fl_head *head = fl_head_dereference(tp);
3435 
3436 	spin_lock(&tp->lock);
3437 	tp->deleting = idr_is_empty(&head->handle_idr);
3438 	spin_unlock(&tp->lock);
3439 
3440 	return tp->deleting;
3441 }
3442 
3443 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3444 	.kind		= "flower",
3445 	.classify	= fl_classify,
3446 	.init		= fl_init,
3447 	.destroy	= fl_destroy,
3448 	.get		= fl_get,
3449 	.put		= fl_put,
3450 	.change		= fl_change,
3451 	.delete		= fl_delete,
3452 	.delete_empty	= fl_delete_empty,
3453 	.walk		= fl_walk,
3454 	.reoffload	= fl_reoffload,
3455 	.hw_add		= fl_hw_add,
3456 	.hw_del		= fl_hw_del,
3457 	.dump		= fl_dump,
3458 	.terse_dump	= fl_terse_dump,
3459 	.bind_class	= fl_bind_class,
3460 	.tmplt_create	= fl_tmplt_create,
3461 	.tmplt_destroy	= fl_tmplt_destroy,
3462 	.tmplt_dump	= fl_tmplt_dump,
3463 	.get_exts	= fl_get_exts,
3464 	.owner		= THIS_MODULE,
3465 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
3466 };
3467 
3468 static int __init cls_fl_init(void)
3469 {
3470 	return register_tcf_proto_ops(&cls_fl_ops);
3471 }
3472 
3473 static void __exit cls_fl_exit(void)
3474 {
3475 	unregister_tcf_proto_ops(&cls_fl_ops);
3476 }
3477 
3478 module_init(cls_fl_init);
3479 module_exit(cls_fl_exit);
3480 
3481 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3482 MODULE_DESCRIPTION("Flower classifier");
3483 MODULE_LICENSE("GPL v2");
3484