xref: /linux/net/sched/cls_flower.c (revision 9e7c9b8eb719835638ee74d93dccc2173581324c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/pkt_sched.h>
23 #include <net/ip.h>
24 #include <net/flow_dissector.h>
25 #include <net/geneve.h>
26 #include <net/vxlan.h>
27 #include <net/erspan.h>
28 #include <net/gtp.h>
29 
30 #include <net/dst.h>
31 #include <net/dst_metadata.h>
32 
33 #include <uapi/linux/netfilter/nf_conntrack_common.h>
34 
35 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
36 		((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
37 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
38 		(TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
39 
40 struct fl_flow_key {
41 	struct flow_dissector_key_meta meta;
42 	struct flow_dissector_key_control control;
43 	struct flow_dissector_key_control enc_control;
44 	struct flow_dissector_key_basic basic;
45 	struct flow_dissector_key_eth_addrs eth;
46 	struct flow_dissector_key_vlan vlan;
47 	struct flow_dissector_key_vlan cvlan;
48 	union {
49 		struct flow_dissector_key_ipv4_addrs ipv4;
50 		struct flow_dissector_key_ipv6_addrs ipv6;
51 	};
52 	struct flow_dissector_key_ports tp;
53 	struct flow_dissector_key_icmp icmp;
54 	struct flow_dissector_key_arp arp;
55 	struct flow_dissector_key_keyid enc_key_id;
56 	union {
57 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
58 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
59 	};
60 	struct flow_dissector_key_ports enc_tp;
61 	struct flow_dissector_key_mpls mpls;
62 	struct flow_dissector_key_tcp tcp;
63 	struct flow_dissector_key_ip ip;
64 	struct flow_dissector_key_ip enc_ip;
65 	struct flow_dissector_key_enc_opts enc_opts;
66 	struct flow_dissector_key_ports_range tp_range;
67 	struct flow_dissector_key_ct ct;
68 	struct flow_dissector_key_hash hash;
69 	struct flow_dissector_key_num_of_vlans num_of_vlans;
70 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
71 
72 struct fl_flow_mask_range {
73 	unsigned short int start;
74 	unsigned short int end;
75 };
76 
77 struct fl_flow_mask {
78 	struct fl_flow_key key;
79 	struct fl_flow_mask_range range;
80 	u32 flags;
81 	struct rhash_head ht_node;
82 	struct rhashtable ht;
83 	struct rhashtable_params filter_ht_params;
84 	struct flow_dissector dissector;
85 	struct list_head filters;
86 	struct rcu_work rwork;
87 	struct list_head list;
88 	refcount_t refcnt;
89 };
90 
91 struct fl_flow_tmplt {
92 	struct fl_flow_key dummy_key;
93 	struct fl_flow_key mask;
94 	struct flow_dissector dissector;
95 	struct tcf_chain *chain;
96 };
97 
98 struct cls_fl_head {
99 	struct rhashtable ht;
100 	spinlock_t masks_lock; /* Protect masks list */
101 	struct list_head masks;
102 	struct list_head hw_filters;
103 	struct rcu_work rwork;
104 	struct idr handle_idr;
105 };
106 
107 struct cls_fl_filter {
108 	struct fl_flow_mask *mask;
109 	struct rhash_head ht_node;
110 	struct fl_flow_key mkey;
111 	struct tcf_exts exts;
112 	struct tcf_result res;
113 	struct fl_flow_key key;
114 	struct list_head list;
115 	struct list_head hw_list;
116 	u32 handle;
117 	u32 flags;
118 	u32 in_hw_count;
119 	struct rcu_work rwork;
120 	struct net_device *hw_dev;
121 	/* Flower classifier is unlocked, which means that its reference counter
122 	 * can be changed concurrently without any kind of external
123 	 * synchronization. Use atomic reference counter to be concurrency-safe.
124 	 */
125 	refcount_t refcnt;
126 	bool deleted;
127 };
128 
129 static const struct rhashtable_params mask_ht_params = {
130 	.key_offset = offsetof(struct fl_flow_mask, key),
131 	.key_len = sizeof(struct fl_flow_key),
132 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
133 	.automatic_shrinking = true,
134 };
135 
136 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
137 {
138 	return mask->range.end - mask->range.start;
139 }
140 
141 static void fl_mask_update_range(struct fl_flow_mask *mask)
142 {
143 	const u8 *bytes = (const u8 *) &mask->key;
144 	size_t size = sizeof(mask->key);
145 	size_t i, first = 0, last;
146 
147 	for (i = 0; i < size; i++) {
148 		if (bytes[i]) {
149 			first = i;
150 			break;
151 		}
152 	}
153 	last = first;
154 	for (i = size - 1; i != first; i--) {
155 		if (bytes[i]) {
156 			last = i;
157 			break;
158 		}
159 	}
160 	mask->range.start = rounddown(first, sizeof(long));
161 	mask->range.end = roundup(last + 1, sizeof(long));
162 }
163 
164 static void *fl_key_get_start(struct fl_flow_key *key,
165 			      const struct fl_flow_mask *mask)
166 {
167 	return (u8 *) key + mask->range.start;
168 }
169 
170 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
171 			      struct fl_flow_mask *mask)
172 {
173 	const long *lkey = fl_key_get_start(key, mask);
174 	const long *lmask = fl_key_get_start(&mask->key, mask);
175 	long *lmkey = fl_key_get_start(mkey, mask);
176 	int i;
177 
178 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
179 		*lmkey++ = *lkey++ & *lmask++;
180 }
181 
182 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
183 			       struct fl_flow_mask *mask)
184 {
185 	const long *lmask = fl_key_get_start(&mask->key, mask);
186 	const long *ltmplt;
187 	int i;
188 
189 	if (!tmplt)
190 		return true;
191 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
192 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
193 		if (~*ltmplt++ & *lmask++)
194 			return false;
195 	}
196 	return true;
197 }
198 
199 static void fl_clear_masked_range(struct fl_flow_key *key,
200 				  struct fl_flow_mask *mask)
201 {
202 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
203 }
204 
205 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
206 				  struct fl_flow_key *key,
207 				  struct fl_flow_key *mkey)
208 {
209 	u16 min_mask, max_mask, min_val, max_val;
210 
211 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
212 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
213 	min_val = ntohs(filter->key.tp_range.tp_min.dst);
214 	max_val = ntohs(filter->key.tp_range.tp_max.dst);
215 
216 	if (min_mask && max_mask) {
217 		if (ntohs(key->tp_range.tp.dst) < min_val ||
218 		    ntohs(key->tp_range.tp.dst) > max_val)
219 			return false;
220 
221 		/* skb does not have min and max values */
222 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
223 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
224 	}
225 	return true;
226 }
227 
228 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
229 				  struct fl_flow_key *key,
230 				  struct fl_flow_key *mkey)
231 {
232 	u16 min_mask, max_mask, min_val, max_val;
233 
234 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
235 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
236 	min_val = ntohs(filter->key.tp_range.tp_min.src);
237 	max_val = ntohs(filter->key.tp_range.tp_max.src);
238 
239 	if (min_mask && max_mask) {
240 		if (ntohs(key->tp_range.tp.src) < min_val ||
241 		    ntohs(key->tp_range.tp.src) > max_val)
242 			return false;
243 
244 		/* skb does not have min and max values */
245 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
246 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
247 	}
248 	return true;
249 }
250 
251 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
252 					 struct fl_flow_key *mkey)
253 {
254 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
255 				      mask->filter_ht_params);
256 }
257 
258 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
259 					     struct fl_flow_key *mkey,
260 					     struct fl_flow_key *key)
261 {
262 	struct cls_fl_filter *filter, *f;
263 
264 	list_for_each_entry_rcu(filter, &mask->filters, list) {
265 		if (!fl_range_port_dst_cmp(filter, key, mkey))
266 			continue;
267 
268 		if (!fl_range_port_src_cmp(filter, key, mkey))
269 			continue;
270 
271 		f = __fl_lookup(mask, mkey);
272 		if (f)
273 			return f;
274 	}
275 	return NULL;
276 }
277 
278 static noinline_for_stack
279 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
280 {
281 	struct fl_flow_key mkey;
282 
283 	fl_set_masked_key(&mkey, key, mask);
284 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
285 		return fl_lookup_range(mask, &mkey, key);
286 
287 	return __fl_lookup(mask, &mkey);
288 }
289 
290 static u16 fl_ct_info_to_flower_map[] = {
291 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
292 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
293 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
294 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
295 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
296 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
297 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
298 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
299 					TCA_FLOWER_KEY_CT_FLAGS_RELATED |
300 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
301 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
302 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
303 };
304 
305 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
306 		       struct tcf_result *res)
307 {
308 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
309 	bool post_ct = tc_skb_cb(skb)->post_ct;
310 	u16 zone = tc_skb_cb(skb)->zone;
311 	struct fl_flow_key skb_key;
312 	struct fl_flow_mask *mask;
313 	struct cls_fl_filter *f;
314 
315 	list_for_each_entry_rcu(mask, &head->masks, list) {
316 		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
317 		fl_clear_masked_range(&skb_key, mask);
318 
319 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
320 		/* skb_flow_dissect() does not set n_proto in case an unknown
321 		 * protocol, so do it rather here.
322 		 */
323 		skb_key.basic.n_proto = skb_protocol(skb, false);
324 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
325 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
326 				    fl_ct_info_to_flower_map,
327 				    ARRAY_SIZE(fl_ct_info_to_flower_map),
328 				    post_ct, zone);
329 		skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
330 		skb_flow_dissect(skb, &mask->dissector, &skb_key,
331 				 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
332 
333 		f = fl_mask_lookup(mask, &skb_key);
334 		if (f && !tc_skip_sw(f->flags)) {
335 			*res = f->res;
336 			return tcf_exts_exec(skb, &f->exts, res);
337 		}
338 	}
339 	return -1;
340 }
341 
342 static int fl_init(struct tcf_proto *tp)
343 {
344 	struct cls_fl_head *head;
345 
346 	head = kzalloc(sizeof(*head), GFP_KERNEL);
347 	if (!head)
348 		return -ENOBUFS;
349 
350 	spin_lock_init(&head->masks_lock);
351 	INIT_LIST_HEAD_RCU(&head->masks);
352 	INIT_LIST_HEAD(&head->hw_filters);
353 	rcu_assign_pointer(tp->root, head);
354 	idr_init(&head->handle_idr);
355 
356 	return rhashtable_init(&head->ht, &mask_ht_params);
357 }
358 
359 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
360 {
361 	/* temporary masks don't have their filters list and ht initialized */
362 	if (mask_init_done) {
363 		WARN_ON(!list_empty(&mask->filters));
364 		rhashtable_destroy(&mask->ht);
365 	}
366 	kfree(mask);
367 }
368 
369 static void fl_mask_free_work(struct work_struct *work)
370 {
371 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
372 						 struct fl_flow_mask, rwork);
373 
374 	fl_mask_free(mask, true);
375 }
376 
377 static void fl_uninit_mask_free_work(struct work_struct *work)
378 {
379 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
380 						 struct fl_flow_mask, rwork);
381 
382 	fl_mask_free(mask, false);
383 }
384 
385 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
386 {
387 	if (!refcount_dec_and_test(&mask->refcnt))
388 		return false;
389 
390 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
391 
392 	spin_lock(&head->masks_lock);
393 	list_del_rcu(&mask->list);
394 	spin_unlock(&head->masks_lock);
395 
396 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
397 
398 	return true;
399 }
400 
401 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
402 {
403 	/* Flower classifier only changes root pointer during init and destroy.
404 	 * Users must obtain reference to tcf_proto instance before calling its
405 	 * API, so tp->root pointer is protected from concurrent call to
406 	 * fl_destroy() by reference counting.
407 	 */
408 	return rcu_dereference_raw(tp->root);
409 }
410 
411 static void __fl_destroy_filter(struct cls_fl_filter *f)
412 {
413 	tcf_exts_destroy(&f->exts);
414 	tcf_exts_put_net(&f->exts);
415 	kfree(f);
416 }
417 
418 static void fl_destroy_filter_work(struct work_struct *work)
419 {
420 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
421 					struct cls_fl_filter, rwork);
422 
423 	__fl_destroy_filter(f);
424 }
425 
426 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
427 				 bool rtnl_held, struct netlink_ext_ack *extack)
428 {
429 	struct tcf_block *block = tp->chain->block;
430 	struct flow_cls_offload cls_flower = {};
431 
432 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
433 	cls_flower.command = FLOW_CLS_DESTROY;
434 	cls_flower.cookie = (unsigned long) f;
435 
436 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
437 			    &f->flags, &f->in_hw_count, rtnl_held);
438 
439 }
440 
441 static int fl_hw_replace_filter(struct tcf_proto *tp,
442 				struct cls_fl_filter *f, bool rtnl_held,
443 				struct netlink_ext_ack *extack)
444 {
445 	struct tcf_block *block = tp->chain->block;
446 	struct flow_cls_offload cls_flower = {};
447 	bool skip_sw = tc_skip_sw(f->flags);
448 	int err = 0;
449 
450 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
451 	if (!cls_flower.rule)
452 		return -ENOMEM;
453 
454 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
455 	cls_flower.command = FLOW_CLS_REPLACE;
456 	cls_flower.cookie = (unsigned long) f;
457 	cls_flower.rule->match.dissector = &f->mask->dissector;
458 	cls_flower.rule->match.mask = &f->mask->key;
459 	cls_flower.rule->match.key = &f->mkey;
460 	cls_flower.classid = f->res.classid;
461 
462 	err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
463 				      cls_flower.common.extack);
464 	if (err) {
465 		kfree(cls_flower.rule);
466 
467 		return skip_sw ? err : 0;
468 	}
469 
470 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
471 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
472 	tc_cleanup_offload_action(&cls_flower.rule->action);
473 	kfree(cls_flower.rule);
474 
475 	if (err) {
476 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
477 		return err;
478 	}
479 
480 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
481 		return -EINVAL;
482 
483 	return 0;
484 }
485 
486 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
487 			       bool rtnl_held)
488 {
489 	struct tcf_block *block = tp->chain->block;
490 	struct flow_cls_offload cls_flower = {};
491 
492 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
493 	cls_flower.command = FLOW_CLS_STATS;
494 	cls_flower.cookie = (unsigned long) f;
495 	cls_flower.classid = f->res.classid;
496 
497 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
498 			 rtnl_held);
499 
500 	tcf_exts_hw_stats_update(&f->exts, cls_flower.stats.bytes,
501 				 cls_flower.stats.pkts,
502 				 cls_flower.stats.drops,
503 				 cls_flower.stats.lastused,
504 				 cls_flower.stats.used_hw_stats,
505 				 cls_flower.stats.used_hw_stats_valid);
506 }
507 
508 static void __fl_put(struct cls_fl_filter *f)
509 {
510 	if (!refcount_dec_and_test(&f->refcnt))
511 		return;
512 
513 	if (tcf_exts_get_net(&f->exts))
514 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
515 	else
516 		__fl_destroy_filter(f);
517 }
518 
519 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
520 {
521 	struct cls_fl_filter *f;
522 
523 	rcu_read_lock();
524 	f = idr_find(&head->handle_idr, handle);
525 	if (f && !refcount_inc_not_zero(&f->refcnt))
526 		f = NULL;
527 	rcu_read_unlock();
528 
529 	return f;
530 }
531 
532 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
533 		       bool *last, bool rtnl_held,
534 		       struct netlink_ext_ack *extack)
535 {
536 	struct cls_fl_head *head = fl_head_dereference(tp);
537 
538 	*last = false;
539 
540 	spin_lock(&tp->lock);
541 	if (f->deleted) {
542 		spin_unlock(&tp->lock);
543 		return -ENOENT;
544 	}
545 
546 	f->deleted = true;
547 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
548 			       f->mask->filter_ht_params);
549 	idr_remove(&head->handle_idr, f->handle);
550 	list_del_rcu(&f->list);
551 	spin_unlock(&tp->lock);
552 
553 	*last = fl_mask_put(head, f->mask);
554 	if (!tc_skip_hw(f->flags))
555 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
556 	tcf_unbind_filter(tp, &f->res);
557 	__fl_put(f);
558 
559 	return 0;
560 }
561 
562 static void fl_destroy_sleepable(struct work_struct *work)
563 {
564 	struct cls_fl_head *head = container_of(to_rcu_work(work),
565 						struct cls_fl_head,
566 						rwork);
567 
568 	rhashtable_destroy(&head->ht);
569 	kfree(head);
570 	module_put(THIS_MODULE);
571 }
572 
573 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
574 		       struct netlink_ext_ack *extack)
575 {
576 	struct cls_fl_head *head = fl_head_dereference(tp);
577 	struct fl_flow_mask *mask, *next_mask;
578 	struct cls_fl_filter *f, *next;
579 	bool last;
580 
581 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
582 		list_for_each_entry_safe(f, next, &mask->filters, list) {
583 			__fl_delete(tp, f, &last, rtnl_held, extack);
584 			if (last)
585 				break;
586 		}
587 	}
588 	idr_destroy(&head->handle_idr);
589 
590 	__module_get(THIS_MODULE);
591 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
592 }
593 
594 static void fl_put(struct tcf_proto *tp, void *arg)
595 {
596 	struct cls_fl_filter *f = arg;
597 
598 	__fl_put(f);
599 }
600 
601 static void *fl_get(struct tcf_proto *tp, u32 handle)
602 {
603 	struct cls_fl_head *head = fl_head_dereference(tp);
604 
605 	return __fl_get(head, handle);
606 }
607 
608 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
609 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
610 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
611 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
612 					    .len = IFNAMSIZ },
613 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
614 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
615 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
616 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
617 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
618 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
619 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
620 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
621 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
622 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
623 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
624 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
625 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
626 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
627 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
628 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
629 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
630 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
631 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
632 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
633 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
634 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
635 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
636 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
637 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
638 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
639 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
640 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
641 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
642 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
643 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
644 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
645 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
646 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
647 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
648 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
649 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
650 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
651 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
652 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
653 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
654 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
655 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
656 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
657 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
658 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
659 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
660 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
661 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
662 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
663 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
664 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
665 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
666 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
667 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
668 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
669 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
670 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
671 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
672 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
673 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
674 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
675 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
676 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
677 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
678 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
679 	[TCA_FLOWER_KEY_MPLS_OPTS]	= { .type = NLA_NESTED },
680 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
681 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
682 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
683 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
684 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
685 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
686 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
687 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
688 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
689 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
690 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
691 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
692 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
693 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
694 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
695 	[TCA_FLOWER_KEY_CT_STATE]	=
696 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
697 	[TCA_FLOWER_KEY_CT_STATE_MASK]	=
698 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
699 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
700 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
701 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
702 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
703 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
704 					    .len = 128 / BITS_PER_BYTE },
705 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
706 					    .len = 128 / BITS_PER_BYTE },
707 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
708 	[TCA_FLOWER_KEY_HASH]		= { .type = NLA_U32 },
709 	[TCA_FLOWER_KEY_HASH_MASK]	= { .type = NLA_U32 },
710 	[TCA_FLOWER_KEY_NUM_OF_VLANS]	= { .type = NLA_U8 },
711 
712 };
713 
714 static const struct nla_policy
715 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
716 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
717 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
718 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
719 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
720 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
721 	[TCA_FLOWER_KEY_ENC_OPTS_GTP]		= { .type = NLA_NESTED },
722 };
723 
724 static const struct nla_policy
725 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
726 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
727 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
728 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
729 						       .len = 128 },
730 };
731 
732 static const struct nla_policy
733 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
734 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
735 };
736 
737 static const struct nla_policy
738 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
739 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
740 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
741 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
742 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
743 };
744 
745 static const struct nla_policy
746 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
747 	[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]	   = { .type = NLA_U8 },
748 	[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]	   = { .type = NLA_U8 },
749 };
750 
751 static const struct nla_policy
752 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
753 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]    = { .type = NLA_U8 },
754 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]      = { .type = NLA_U8 },
755 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]      = { .type = NLA_U8 },
756 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]       = { .type = NLA_U8 },
757 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]    = { .type = NLA_U32 },
758 };
759 
760 static void fl_set_key_val(struct nlattr **tb,
761 			   void *val, int val_type,
762 			   void *mask, int mask_type, int len)
763 {
764 	if (!tb[val_type])
765 		return;
766 	nla_memcpy(val, tb[val_type], len);
767 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
768 		memset(mask, 0xff, len);
769 	else
770 		nla_memcpy(mask, tb[mask_type], len);
771 }
772 
773 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
774 				 struct fl_flow_key *mask,
775 				 struct netlink_ext_ack *extack)
776 {
777 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
778 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
779 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
780 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
781 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
782 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
783 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
784 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
785 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
786 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
787 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
788 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
789 
790 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
791 	    ntohs(key->tp_range.tp_max.dst) <=
792 	    ntohs(key->tp_range.tp_min.dst)) {
793 		NL_SET_ERR_MSG_ATTR(extack,
794 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
795 				    "Invalid destination port range (min must be strictly smaller than max)");
796 		return -EINVAL;
797 	}
798 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
799 	    ntohs(key->tp_range.tp_max.src) <=
800 	    ntohs(key->tp_range.tp_min.src)) {
801 		NL_SET_ERR_MSG_ATTR(extack,
802 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
803 				    "Invalid source port range (min must be strictly smaller than max)");
804 		return -EINVAL;
805 	}
806 
807 	return 0;
808 }
809 
810 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
811 			       struct flow_dissector_key_mpls *key_val,
812 			       struct flow_dissector_key_mpls *key_mask,
813 			       struct netlink_ext_ack *extack)
814 {
815 	struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
816 	struct flow_dissector_mpls_lse *lse_mask;
817 	struct flow_dissector_mpls_lse *lse_val;
818 	u8 lse_index;
819 	u8 depth;
820 	int err;
821 
822 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
823 			       mpls_stack_entry_policy, extack);
824 	if (err < 0)
825 		return err;
826 
827 	if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
828 		NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
829 		return -EINVAL;
830 	}
831 
832 	depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
833 
834 	/* LSE depth starts at 1, for consistency with terminology used by
835 	 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
836 	 */
837 	if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
838 		NL_SET_ERR_MSG_ATTR(extack,
839 				    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
840 				    "Invalid MPLS depth");
841 		return -EINVAL;
842 	}
843 	lse_index = depth - 1;
844 
845 	dissector_set_mpls_lse(key_val, lse_index);
846 	dissector_set_mpls_lse(key_mask, lse_index);
847 
848 	lse_val = &key_val->ls[lse_index];
849 	lse_mask = &key_mask->ls[lse_index];
850 
851 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
852 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
853 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
854 	}
855 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
856 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
857 
858 		if (bos & ~MPLS_BOS_MASK) {
859 			NL_SET_ERR_MSG_ATTR(extack,
860 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
861 					    "Bottom Of Stack (BOS) must be 0 or 1");
862 			return -EINVAL;
863 		}
864 		lse_val->mpls_bos = bos;
865 		lse_mask->mpls_bos = MPLS_BOS_MASK;
866 	}
867 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
868 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
869 
870 		if (tc & ~MPLS_TC_MASK) {
871 			NL_SET_ERR_MSG_ATTR(extack,
872 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
873 					    "Traffic Class (TC) must be between 0 and 7");
874 			return -EINVAL;
875 		}
876 		lse_val->mpls_tc = tc;
877 		lse_mask->mpls_tc = MPLS_TC_MASK;
878 	}
879 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
880 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
881 
882 		if (label & ~MPLS_LABEL_MASK) {
883 			NL_SET_ERR_MSG_ATTR(extack,
884 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
885 					    "Label must be between 0 and 1048575");
886 			return -EINVAL;
887 		}
888 		lse_val->mpls_label = label;
889 		lse_mask->mpls_label = MPLS_LABEL_MASK;
890 	}
891 
892 	return 0;
893 }
894 
895 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
896 				struct flow_dissector_key_mpls *key_val,
897 				struct flow_dissector_key_mpls *key_mask,
898 				struct netlink_ext_ack *extack)
899 {
900 	struct nlattr *nla_lse;
901 	int rem;
902 	int err;
903 
904 	if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
905 		NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
906 				    "NLA_F_NESTED is missing");
907 		return -EINVAL;
908 	}
909 
910 	nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
911 		if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
912 			NL_SET_ERR_MSG_ATTR(extack, nla_lse,
913 					    "Invalid MPLS option type");
914 			return -EINVAL;
915 		}
916 
917 		err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
918 		if (err < 0)
919 			return err;
920 	}
921 	if (rem) {
922 		NL_SET_ERR_MSG(extack,
923 			       "Bytes leftover after parsing MPLS options");
924 		return -EINVAL;
925 	}
926 
927 	return 0;
928 }
929 
930 static int fl_set_key_mpls(struct nlattr **tb,
931 			   struct flow_dissector_key_mpls *key_val,
932 			   struct flow_dissector_key_mpls *key_mask,
933 			   struct netlink_ext_ack *extack)
934 {
935 	struct flow_dissector_mpls_lse *lse_mask;
936 	struct flow_dissector_mpls_lse *lse_val;
937 
938 	if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
939 		if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
940 		    tb[TCA_FLOWER_KEY_MPLS_BOS] ||
941 		    tb[TCA_FLOWER_KEY_MPLS_TC] ||
942 		    tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
943 			NL_SET_ERR_MSG_ATTR(extack,
944 					    tb[TCA_FLOWER_KEY_MPLS_OPTS],
945 					    "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
946 			return -EBADMSG;
947 		}
948 
949 		return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
950 					    key_val, key_mask, extack);
951 	}
952 
953 	lse_val = &key_val->ls[0];
954 	lse_mask = &key_mask->ls[0];
955 
956 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
957 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
958 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
959 		dissector_set_mpls_lse(key_val, 0);
960 		dissector_set_mpls_lse(key_mask, 0);
961 	}
962 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
963 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
964 
965 		if (bos & ~MPLS_BOS_MASK) {
966 			NL_SET_ERR_MSG_ATTR(extack,
967 					    tb[TCA_FLOWER_KEY_MPLS_BOS],
968 					    "Bottom Of Stack (BOS) must be 0 or 1");
969 			return -EINVAL;
970 		}
971 		lse_val->mpls_bos = bos;
972 		lse_mask->mpls_bos = MPLS_BOS_MASK;
973 		dissector_set_mpls_lse(key_val, 0);
974 		dissector_set_mpls_lse(key_mask, 0);
975 	}
976 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
977 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
978 
979 		if (tc & ~MPLS_TC_MASK) {
980 			NL_SET_ERR_MSG_ATTR(extack,
981 					    tb[TCA_FLOWER_KEY_MPLS_TC],
982 					    "Traffic Class (TC) must be between 0 and 7");
983 			return -EINVAL;
984 		}
985 		lse_val->mpls_tc = tc;
986 		lse_mask->mpls_tc = MPLS_TC_MASK;
987 		dissector_set_mpls_lse(key_val, 0);
988 		dissector_set_mpls_lse(key_mask, 0);
989 	}
990 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
991 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
992 
993 		if (label & ~MPLS_LABEL_MASK) {
994 			NL_SET_ERR_MSG_ATTR(extack,
995 					    tb[TCA_FLOWER_KEY_MPLS_LABEL],
996 					    "Label must be between 0 and 1048575");
997 			return -EINVAL;
998 		}
999 		lse_val->mpls_label = label;
1000 		lse_mask->mpls_label = MPLS_LABEL_MASK;
1001 		dissector_set_mpls_lse(key_val, 0);
1002 		dissector_set_mpls_lse(key_mask, 0);
1003 	}
1004 	return 0;
1005 }
1006 
1007 static void fl_set_key_vlan(struct nlattr **tb,
1008 			    __be16 ethertype,
1009 			    int vlan_id_key, int vlan_prio_key,
1010 			    int vlan_next_eth_type_key,
1011 			    struct flow_dissector_key_vlan *key_val,
1012 			    struct flow_dissector_key_vlan *key_mask)
1013 {
1014 #define VLAN_PRIORITY_MASK	0x7
1015 
1016 	if (tb[vlan_id_key]) {
1017 		key_val->vlan_id =
1018 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1019 		key_mask->vlan_id = VLAN_VID_MASK;
1020 	}
1021 	if (tb[vlan_prio_key]) {
1022 		key_val->vlan_priority =
1023 			nla_get_u8(tb[vlan_prio_key]) &
1024 			VLAN_PRIORITY_MASK;
1025 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1026 	}
1027 	if (ethertype) {
1028 		key_val->vlan_tpid = ethertype;
1029 		key_mask->vlan_tpid = cpu_to_be16(~0);
1030 	}
1031 	if (tb[vlan_next_eth_type_key]) {
1032 		key_val->vlan_eth_type =
1033 			nla_get_be16(tb[vlan_next_eth_type_key]);
1034 		key_mask->vlan_eth_type = cpu_to_be16(~0);
1035 	}
1036 }
1037 
1038 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1039 			    u32 *dissector_key, u32 *dissector_mask,
1040 			    u32 flower_flag_bit, u32 dissector_flag_bit)
1041 {
1042 	if (flower_mask & flower_flag_bit) {
1043 		*dissector_mask |= dissector_flag_bit;
1044 		if (flower_key & flower_flag_bit)
1045 			*dissector_key |= dissector_flag_bit;
1046 	}
1047 }
1048 
1049 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1050 			    u32 *flags_mask, struct netlink_ext_ack *extack)
1051 {
1052 	u32 key, mask;
1053 
1054 	/* mask is mandatory for flags */
1055 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1056 		NL_SET_ERR_MSG(extack, "Missing flags mask");
1057 		return -EINVAL;
1058 	}
1059 
1060 	key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
1061 	mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1062 
1063 	*flags_key  = 0;
1064 	*flags_mask = 0;
1065 
1066 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1067 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1068 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1069 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1070 			FLOW_DIS_FIRST_FRAG);
1071 
1072 	return 0;
1073 }
1074 
1075 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1076 			  struct flow_dissector_key_ip *key,
1077 			  struct flow_dissector_key_ip *mask)
1078 {
1079 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1080 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1081 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1082 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1083 
1084 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1085 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1086 }
1087 
1088 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1089 			     int depth, int option_len,
1090 			     struct netlink_ext_ack *extack)
1091 {
1092 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1093 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
1094 	struct geneve_opt *opt;
1095 	int err, data_len = 0;
1096 
1097 	if (option_len > sizeof(struct geneve_opt))
1098 		data_len = option_len - sizeof(struct geneve_opt);
1099 
1100 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1101 	memset(opt, 0xff, option_len);
1102 	opt->length = data_len / 4;
1103 	opt->r1 = 0;
1104 	opt->r2 = 0;
1105 	opt->r3 = 0;
1106 
1107 	/* If no mask has been prodived we assume an exact match. */
1108 	if (!depth)
1109 		return sizeof(struct geneve_opt) + data_len;
1110 
1111 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1112 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1113 		return -EINVAL;
1114 	}
1115 
1116 	err = nla_parse_nested_deprecated(tb,
1117 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1118 					  nla, geneve_opt_policy, extack);
1119 	if (err < 0)
1120 		return err;
1121 
1122 	/* We are not allowed to omit any of CLASS, TYPE or DATA
1123 	 * fields from the key.
1124 	 */
1125 	if (!option_len &&
1126 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1127 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1128 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1129 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1130 		return -EINVAL;
1131 	}
1132 
1133 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
1134 	 * for the mask.
1135 	 */
1136 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1137 		int new_len = key->enc_opts.len;
1138 
1139 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1140 		data_len = nla_len(data);
1141 		if (data_len < 4) {
1142 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1143 			return -ERANGE;
1144 		}
1145 		if (data_len % 4) {
1146 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1147 			return -ERANGE;
1148 		}
1149 
1150 		new_len += sizeof(struct geneve_opt) + data_len;
1151 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1152 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1153 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1154 			return -ERANGE;
1155 		}
1156 		opt->length = data_len / 4;
1157 		memcpy(opt->opt_data, nla_data(data), data_len);
1158 	}
1159 
1160 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1161 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1162 		opt->opt_class = nla_get_be16(class);
1163 	}
1164 
1165 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1166 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1167 		opt->type = nla_get_u8(type);
1168 	}
1169 
1170 	return sizeof(struct geneve_opt) + data_len;
1171 }
1172 
1173 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1174 			    int depth, int option_len,
1175 			    struct netlink_ext_ack *extack)
1176 {
1177 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1178 	struct vxlan_metadata *md;
1179 	int err;
1180 
1181 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1182 	memset(md, 0xff, sizeof(*md));
1183 
1184 	if (!depth)
1185 		return sizeof(*md);
1186 
1187 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1188 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1189 		return -EINVAL;
1190 	}
1191 
1192 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1193 			       vxlan_opt_policy, extack);
1194 	if (err < 0)
1195 		return err;
1196 
1197 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1198 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1199 		return -EINVAL;
1200 	}
1201 
1202 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1203 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1204 		md->gbp &= VXLAN_GBP_MASK;
1205 	}
1206 
1207 	return sizeof(*md);
1208 }
1209 
1210 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1211 			     int depth, int option_len,
1212 			     struct netlink_ext_ack *extack)
1213 {
1214 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1215 	struct erspan_metadata *md;
1216 	int err;
1217 
1218 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1219 	memset(md, 0xff, sizeof(*md));
1220 	md->version = 1;
1221 
1222 	if (!depth)
1223 		return sizeof(*md);
1224 
1225 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1226 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1227 		return -EINVAL;
1228 	}
1229 
1230 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1231 			       erspan_opt_policy, extack);
1232 	if (err < 0)
1233 		return err;
1234 
1235 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1236 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1237 		return -EINVAL;
1238 	}
1239 
1240 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1241 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1242 
1243 	if (md->version == 1) {
1244 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1245 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1246 			return -EINVAL;
1247 		}
1248 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1249 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1250 			memset(&md->u, 0x00, sizeof(md->u));
1251 			md->u.index = nla_get_be32(nla);
1252 		}
1253 	} else if (md->version == 2) {
1254 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1255 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1256 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1257 			return -EINVAL;
1258 		}
1259 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1260 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1261 			md->u.md2.dir = nla_get_u8(nla);
1262 		}
1263 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1264 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1265 			set_hwid(&md->u.md2, nla_get_u8(nla));
1266 		}
1267 	} else {
1268 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1269 		return -EINVAL;
1270 	}
1271 
1272 	return sizeof(*md);
1273 }
1274 
1275 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
1276 			  int depth, int option_len,
1277 			  struct netlink_ext_ack *extack)
1278 {
1279 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
1280 	struct gtp_pdu_session_info *sinfo;
1281 	u8 len = key->enc_opts.len;
1282 	int err;
1283 
1284 	sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
1285 	memset(sinfo, 0xff, option_len);
1286 
1287 	if (!depth)
1288 		return sizeof(*sinfo);
1289 
1290 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
1291 		NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
1292 		return -EINVAL;
1293 	}
1294 
1295 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
1296 			       gtp_opt_policy, extack);
1297 	if (err < 0)
1298 		return err;
1299 
1300 	if (!option_len &&
1301 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
1302 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
1303 		NL_SET_ERR_MSG_MOD(extack,
1304 				   "Missing tunnel key gtp option pdu type or qfi");
1305 		return -EINVAL;
1306 	}
1307 
1308 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
1309 		sinfo->pdu_type =
1310 			nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
1311 
1312 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
1313 		sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
1314 
1315 	return sizeof(*sinfo);
1316 }
1317 
1318 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1319 			  struct fl_flow_key *mask,
1320 			  struct netlink_ext_ack *extack)
1321 {
1322 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1323 	int err, option_len, key_depth, msk_depth = 0;
1324 
1325 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1326 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1327 					     enc_opts_policy, extack);
1328 	if (err)
1329 		return err;
1330 
1331 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1332 
1333 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1334 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1335 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1336 						     enc_opts_policy, extack);
1337 		if (err)
1338 			return err;
1339 
1340 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1341 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1342 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1343 			NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1344 			return -EINVAL;
1345 		}
1346 	}
1347 
1348 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1349 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1350 		switch (nla_type(nla_opt_key)) {
1351 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1352 			if (key->enc_opts.dst_opt_type &&
1353 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1354 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1355 				return -EINVAL;
1356 			}
1357 			option_len = 0;
1358 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1359 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1360 						       key_depth, option_len,
1361 						       extack);
1362 			if (option_len < 0)
1363 				return option_len;
1364 
1365 			key->enc_opts.len += option_len;
1366 			/* At the same time we need to parse through the mask
1367 			 * in order to verify exact and mask attribute lengths.
1368 			 */
1369 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1370 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1371 						       msk_depth, option_len,
1372 						       extack);
1373 			if (option_len < 0)
1374 				return option_len;
1375 
1376 			mask->enc_opts.len += option_len;
1377 			if (key->enc_opts.len != mask->enc_opts.len) {
1378 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1379 				return -EINVAL;
1380 			}
1381 			break;
1382 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1383 			if (key->enc_opts.dst_opt_type) {
1384 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1385 				return -EINVAL;
1386 			}
1387 			option_len = 0;
1388 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1389 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1390 						      key_depth, option_len,
1391 						      extack);
1392 			if (option_len < 0)
1393 				return option_len;
1394 
1395 			key->enc_opts.len += option_len;
1396 			/* At the same time we need to parse through the mask
1397 			 * in order to verify exact and mask attribute lengths.
1398 			 */
1399 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1400 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1401 						      msk_depth, option_len,
1402 						      extack);
1403 			if (option_len < 0)
1404 				return option_len;
1405 
1406 			mask->enc_opts.len += option_len;
1407 			if (key->enc_opts.len != mask->enc_opts.len) {
1408 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1409 				return -EINVAL;
1410 			}
1411 			break;
1412 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1413 			if (key->enc_opts.dst_opt_type) {
1414 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1415 				return -EINVAL;
1416 			}
1417 			option_len = 0;
1418 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1419 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1420 						       key_depth, option_len,
1421 						       extack);
1422 			if (option_len < 0)
1423 				return option_len;
1424 
1425 			key->enc_opts.len += option_len;
1426 			/* At the same time we need to parse through the mask
1427 			 * in order to verify exact and mask attribute lengths.
1428 			 */
1429 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1430 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1431 						       msk_depth, option_len,
1432 						       extack);
1433 			if (option_len < 0)
1434 				return option_len;
1435 
1436 			mask->enc_opts.len += option_len;
1437 			if (key->enc_opts.len != mask->enc_opts.len) {
1438 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1439 				return -EINVAL;
1440 			}
1441 			break;
1442 		case TCA_FLOWER_KEY_ENC_OPTS_GTP:
1443 			if (key->enc_opts.dst_opt_type) {
1444 				NL_SET_ERR_MSG_MOD(extack,
1445 						   "Duplicate type for gtp options");
1446 				return -EINVAL;
1447 			}
1448 			option_len = 0;
1449 			key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1450 			option_len = fl_set_gtp_opt(nla_opt_key, key,
1451 						    key_depth, option_len,
1452 						    extack);
1453 			if (option_len < 0)
1454 				return option_len;
1455 
1456 			key->enc_opts.len += option_len;
1457 			/* At the same time we need to parse through the mask
1458 			 * in order to verify exact and mask attribute lengths.
1459 			 */
1460 			mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1461 			option_len = fl_set_gtp_opt(nla_opt_msk, mask,
1462 						    msk_depth, option_len,
1463 						    extack);
1464 			if (option_len < 0)
1465 				return option_len;
1466 
1467 			mask->enc_opts.len += option_len;
1468 			if (key->enc_opts.len != mask->enc_opts.len) {
1469 				NL_SET_ERR_MSG_MOD(extack,
1470 						   "Key and mask miss aligned");
1471 				return -EINVAL;
1472 			}
1473 			break;
1474 		default:
1475 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1476 			return -EINVAL;
1477 		}
1478 
1479 		if (!msk_depth)
1480 			continue;
1481 
1482 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1483 			NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1484 			return -EINVAL;
1485 		}
1486 		nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1487 	}
1488 
1489 	return 0;
1490 }
1491 
1492 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1493 				struct netlink_ext_ack *extack)
1494 {
1495 	if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1496 		NL_SET_ERR_MSG_ATTR(extack, tb,
1497 				    "no trk, so no other flag can be set");
1498 		return -EINVAL;
1499 	}
1500 
1501 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1502 	    state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1503 		NL_SET_ERR_MSG_ATTR(extack, tb,
1504 				    "new and est are mutually exclusive");
1505 		return -EINVAL;
1506 	}
1507 
1508 	if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1509 	    state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1510 		      TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1511 		NL_SET_ERR_MSG_ATTR(extack, tb,
1512 				    "when inv is set, only trk may be set");
1513 		return -EINVAL;
1514 	}
1515 
1516 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1517 	    state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1518 		NL_SET_ERR_MSG_ATTR(extack, tb,
1519 				    "new and rpl are mutually exclusive");
1520 		return -EINVAL;
1521 	}
1522 
1523 	return 0;
1524 }
1525 
1526 static int fl_set_key_ct(struct nlattr **tb,
1527 			 struct flow_dissector_key_ct *key,
1528 			 struct flow_dissector_key_ct *mask,
1529 			 struct netlink_ext_ack *extack)
1530 {
1531 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1532 		int err;
1533 
1534 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1535 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1536 			return -EOPNOTSUPP;
1537 		}
1538 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1539 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1540 			       sizeof(key->ct_state));
1541 
1542 		err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1543 					   tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1544 					   extack);
1545 		if (err)
1546 			return err;
1547 
1548 	}
1549 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1550 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1551 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1552 			return -EOPNOTSUPP;
1553 		}
1554 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1555 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1556 			       sizeof(key->ct_zone));
1557 	}
1558 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1559 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1560 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1561 			return -EOPNOTSUPP;
1562 		}
1563 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1564 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1565 			       sizeof(key->ct_mark));
1566 	}
1567 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1568 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1569 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1570 			return -EOPNOTSUPP;
1571 		}
1572 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1573 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1574 			       sizeof(key->ct_labels));
1575 	}
1576 
1577 	return 0;
1578 }
1579 
1580 static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype,
1581 			struct fl_flow_key *key, struct fl_flow_key *mask,
1582 			int vthresh)
1583 {
1584 	const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh;
1585 
1586 	if (!tb) {
1587 		*ethertype = 0;
1588 		return good_num_of_vlans;
1589 	}
1590 
1591 	*ethertype = nla_get_be16(tb);
1592 	if (good_num_of_vlans || eth_type_vlan(*ethertype))
1593 		return true;
1594 
1595 	key->basic.n_proto = *ethertype;
1596 	mask->basic.n_proto = cpu_to_be16(~0);
1597 	return false;
1598 }
1599 
1600 static int fl_set_key(struct net *net, struct nlattr **tb,
1601 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1602 		      struct netlink_ext_ack *extack)
1603 {
1604 	__be16 ethertype;
1605 	int ret = 0;
1606 
1607 	if (tb[TCA_FLOWER_INDEV]) {
1608 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1609 		if (err < 0)
1610 			return err;
1611 		key->meta.ingress_ifindex = err;
1612 		mask->meta.ingress_ifindex = 0xffffffff;
1613 	}
1614 
1615 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1616 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1617 		       sizeof(key->eth.dst));
1618 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1619 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1620 		       sizeof(key->eth.src));
1621 	fl_set_key_val(tb, &key->num_of_vlans,
1622 		       TCA_FLOWER_KEY_NUM_OF_VLANS,
1623 		       &mask->num_of_vlans,
1624 		       TCA_FLOWER_UNSPEC,
1625 		       sizeof(key->num_of_vlans));
1626 
1627 	if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], &ethertype, key, mask, 0)) {
1628 		fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1629 				TCA_FLOWER_KEY_VLAN_PRIO,
1630 				TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1631 				&key->vlan, &mask->vlan);
1632 
1633 		if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE],
1634 				&ethertype, key, mask, 1)) {
1635 			fl_set_key_vlan(tb, ethertype,
1636 					TCA_FLOWER_KEY_CVLAN_ID,
1637 					TCA_FLOWER_KEY_CVLAN_PRIO,
1638 					TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1639 					&key->cvlan, &mask->cvlan);
1640 			fl_set_key_val(tb, &key->basic.n_proto,
1641 				       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1642 				       &mask->basic.n_proto,
1643 				       TCA_FLOWER_UNSPEC,
1644 				       sizeof(key->basic.n_proto));
1645 		}
1646 	}
1647 
1648 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1649 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1650 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1651 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1652 			       sizeof(key->basic.ip_proto));
1653 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1654 	}
1655 
1656 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1657 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1658 		mask->control.addr_type = ~0;
1659 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1660 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1661 			       sizeof(key->ipv4.src));
1662 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1663 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1664 			       sizeof(key->ipv4.dst));
1665 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1666 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1667 		mask->control.addr_type = ~0;
1668 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1669 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1670 			       sizeof(key->ipv6.src));
1671 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1672 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1673 			       sizeof(key->ipv6.dst));
1674 	}
1675 
1676 	if (key->basic.ip_proto == IPPROTO_TCP) {
1677 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1678 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1679 			       sizeof(key->tp.src));
1680 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1681 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1682 			       sizeof(key->tp.dst));
1683 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1684 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1685 			       sizeof(key->tcp.flags));
1686 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1687 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1688 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1689 			       sizeof(key->tp.src));
1690 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1691 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1692 			       sizeof(key->tp.dst));
1693 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1694 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1695 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1696 			       sizeof(key->tp.src));
1697 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1698 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1699 			       sizeof(key->tp.dst));
1700 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1701 		   key->basic.ip_proto == IPPROTO_ICMP) {
1702 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1703 			       &mask->icmp.type,
1704 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1705 			       sizeof(key->icmp.type));
1706 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1707 			       &mask->icmp.code,
1708 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1709 			       sizeof(key->icmp.code));
1710 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1711 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1712 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1713 			       &mask->icmp.type,
1714 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1715 			       sizeof(key->icmp.type));
1716 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1717 			       &mask->icmp.code,
1718 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1719 			       sizeof(key->icmp.code));
1720 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1721 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1722 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1723 		if (ret)
1724 			return ret;
1725 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1726 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1727 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1728 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1729 			       sizeof(key->arp.sip));
1730 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1731 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1732 			       sizeof(key->arp.tip));
1733 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1734 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1735 			       sizeof(key->arp.op));
1736 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1737 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1738 			       sizeof(key->arp.sha));
1739 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1740 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1741 			       sizeof(key->arp.tha));
1742 	}
1743 
1744 	if (key->basic.ip_proto == IPPROTO_TCP ||
1745 	    key->basic.ip_proto == IPPROTO_UDP ||
1746 	    key->basic.ip_proto == IPPROTO_SCTP) {
1747 		ret = fl_set_key_port_range(tb, key, mask, extack);
1748 		if (ret)
1749 			return ret;
1750 	}
1751 
1752 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1753 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1754 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1755 		mask->enc_control.addr_type = ~0;
1756 		fl_set_key_val(tb, &key->enc_ipv4.src,
1757 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1758 			       &mask->enc_ipv4.src,
1759 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1760 			       sizeof(key->enc_ipv4.src));
1761 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1762 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1763 			       &mask->enc_ipv4.dst,
1764 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1765 			       sizeof(key->enc_ipv4.dst));
1766 	}
1767 
1768 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1769 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1770 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1771 		mask->enc_control.addr_type = ~0;
1772 		fl_set_key_val(tb, &key->enc_ipv6.src,
1773 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1774 			       &mask->enc_ipv6.src,
1775 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1776 			       sizeof(key->enc_ipv6.src));
1777 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1778 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1779 			       &mask->enc_ipv6.dst,
1780 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1781 			       sizeof(key->enc_ipv6.dst));
1782 	}
1783 
1784 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1785 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1786 		       sizeof(key->enc_key_id.keyid));
1787 
1788 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1789 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1790 		       sizeof(key->enc_tp.src));
1791 
1792 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1793 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1794 		       sizeof(key->enc_tp.dst));
1795 
1796 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1797 
1798 	fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1799 		       &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1800 		       sizeof(key->hash.hash));
1801 
1802 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1803 		ret = fl_set_enc_opt(tb, key, mask, extack);
1804 		if (ret)
1805 			return ret;
1806 	}
1807 
1808 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1809 	if (ret)
1810 		return ret;
1811 
1812 	if (tb[TCA_FLOWER_KEY_FLAGS])
1813 		ret = fl_set_key_flags(tb, &key->control.flags,
1814 				       &mask->control.flags, extack);
1815 
1816 	return ret;
1817 }
1818 
1819 static void fl_mask_copy(struct fl_flow_mask *dst,
1820 			 struct fl_flow_mask *src)
1821 {
1822 	const void *psrc = fl_key_get_start(&src->key, src);
1823 	void *pdst = fl_key_get_start(&dst->key, src);
1824 
1825 	memcpy(pdst, psrc, fl_mask_range(src));
1826 	dst->range = src->range;
1827 }
1828 
1829 static const struct rhashtable_params fl_ht_params = {
1830 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1831 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1832 	.automatic_shrinking = true,
1833 };
1834 
1835 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1836 {
1837 	mask->filter_ht_params = fl_ht_params;
1838 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1839 	mask->filter_ht_params.key_offset += mask->range.start;
1840 
1841 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1842 }
1843 
1844 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1845 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1846 
1847 #define FL_KEY_IS_MASKED(mask, member)						\
1848 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1849 		   0, FL_KEY_MEMBER_SIZE(member))				\
1850 
1851 #define FL_KEY_SET(keys, cnt, id, member)					\
1852 	do {									\
1853 		keys[cnt].key_id = id;						\
1854 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1855 		cnt++;								\
1856 	} while(0);
1857 
1858 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1859 	do {									\
1860 		if (FL_KEY_IS_MASKED(mask, member))				\
1861 			FL_KEY_SET(keys, cnt, id, member);			\
1862 	} while(0);
1863 
1864 static void fl_init_dissector(struct flow_dissector *dissector,
1865 			      struct fl_flow_key *mask)
1866 {
1867 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1868 	size_t cnt = 0;
1869 
1870 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1871 			     FLOW_DISSECTOR_KEY_META, meta);
1872 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1873 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1874 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1875 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1876 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1877 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1878 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1879 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1880 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1881 			     FLOW_DISSECTOR_KEY_PORTS, tp);
1882 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1883 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1884 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1885 			     FLOW_DISSECTOR_KEY_IP, ip);
1886 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1887 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1888 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1889 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1890 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1891 			     FLOW_DISSECTOR_KEY_ARP, arp);
1892 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1893 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1894 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1895 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1896 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1897 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1898 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1899 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1900 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1901 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1902 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1903 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1904 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1905 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1906 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1907 			   enc_control);
1908 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1909 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1910 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1911 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1912 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1913 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1914 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1915 			     FLOW_DISSECTOR_KEY_CT, ct);
1916 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1917 			     FLOW_DISSECTOR_KEY_HASH, hash);
1918 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1919 			     FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans);
1920 
1921 	skb_flow_dissector_init(dissector, keys, cnt);
1922 }
1923 
1924 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1925 					       struct fl_flow_mask *mask)
1926 {
1927 	struct fl_flow_mask *newmask;
1928 	int err;
1929 
1930 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1931 	if (!newmask)
1932 		return ERR_PTR(-ENOMEM);
1933 
1934 	fl_mask_copy(newmask, mask);
1935 
1936 	if ((newmask->key.tp_range.tp_min.dst &&
1937 	     newmask->key.tp_range.tp_max.dst) ||
1938 	    (newmask->key.tp_range.tp_min.src &&
1939 	     newmask->key.tp_range.tp_max.src))
1940 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1941 
1942 	err = fl_init_mask_hashtable(newmask);
1943 	if (err)
1944 		goto errout_free;
1945 
1946 	fl_init_dissector(&newmask->dissector, &newmask->key);
1947 
1948 	INIT_LIST_HEAD_RCU(&newmask->filters);
1949 
1950 	refcount_set(&newmask->refcnt, 1);
1951 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1952 				      &newmask->ht_node, mask_ht_params);
1953 	if (err)
1954 		goto errout_destroy;
1955 
1956 	spin_lock(&head->masks_lock);
1957 	list_add_tail_rcu(&newmask->list, &head->masks);
1958 	spin_unlock(&head->masks_lock);
1959 
1960 	return newmask;
1961 
1962 errout_destroy:
1963 	rhashtable_destroy(&newmask->ht);
1964 errout_free:
1965 	kfree(newmask);
1966 
1967 	return ERR_PTR(err);
1968 }
1969 
1970 static int fl_check_assign_mask(struct cls_fl_head *head,
1971 				struct cls_fl_filter *fnew,
1972 				struct cls_fl_filter *fold,
1973 				struct fl_flow_mask *mask)
1974 {
1975 	struct fl_flow_mask *newmask;
1976 	int ret = 0;
1977 
1978 	rcu_read_lock();
1979 
1980 	/* Insert mask as temporary node to prevent concurrent creation of mask
1981 	 * with same key. Any concurrent lookups with same key will return
1982 	 * -EAGAIN because mask's refcnt is zero.
1983 	 */
1984 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1985 						       &mask->ht_node,
1986 						       mask_ht_params);
1987 	if (!fnew->mask) {
1988 		rcu_read_unlock();
1989 
1990 		if (fold) {
1991 			ret = -EINVAL;
1992 			goto errout_cleanup;
1993 		}
1994 
1995 		newmask = fl_create_new_mask(head, mask);
1996 		if (IS_ERR(newmask)) {
1997 			ret = PTR_ERR(newmask);
1998 			goto errout_cleanup;
1999 		}
2000 
2001 		fnew->mask = newmask;
2002 		return 0;
2003 	} else if (IS_ERR(fnew->mask)) {
2004 		ret = PTR_ERR(fnew->mask);
2005 	} else if (fold && fold->mask != fnew->mask) {
2006 		ret = -EINVAL;
2007 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
2008 		/* Mask was deleted concurrently, try again */
2009 		ret = -EAGAIN;
2010 	}
2011 	rcu_read_unlock();
2012 	return ret;
2013 
2014 errout_cleanup:
2015 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
2016 			       mask_ht_params);
2017 	return ret;
2018 }
2019 
2020 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
2021 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
2022 			unsigned long base, struct nlattr **tb,
2023 			struct nlattr *est,
2024 			struct fl_flow_tmplt *tmplt,
2025 			u32 flags, u32 fl_flags,
2026 			struct netlink_ext_ack *extack)
2027 {
2028 	int err;
2029 
2030 	err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags,
2031 				   fl_flags, extack);
2032 	if (err < 0)
2033 		return err;
2034 
2035 	if (tb[TCA_FLOWER_CLASSID]) {
2036 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
2037 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2038 			rtnl_lock();
2039 		tcf_bind_filter(tp, &f->res, base);
2040 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2041 			rtnl_unlock();
2042 	}
2043 
2044 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
2045 	if (err)
2046 		return err;
2047 
2048 	fl_mask_update_range(mask);
2049 	fl_set_masked_key(&f->mkey, &f->key, mask);
2050 
2051 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
2052 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
2053 		return -EINVAL;
2054 	}
2055 
2056 	return 0;
2057 }
2058 
2059 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
2060 			       struct cls_fl_filter *fold,
2061 			       bool *in_ht)
2062 {
2063 	struct fl_flow_mask *mask = fnew->mask;
2064 	int err;
2065 
2066 	err = rhashtable_lookup_insert_fast(&mask->ht,
2067 					    &fnew->ht_node,
2068 					    mask->filter_ht_params);
2069 	if (err) {
2070 		*in_ht = false;
2071 		/* It is okay if filter with same key exists when
2072 		 * overwriting.
2073 		 */
2074 		return fold && err == -EEXIST ? 0 : err;
2075 	}
2076 
2077 	*in_ht = true;
2078 	return 0;
2079 }
2080 
2081 static int fl_change(struct net *net, struct sk_buff *in_skb,
2082 		     struct tcf_proto *tp, unsigned long base,
2083 		     u32 handle, struct nlattr **tca,
2084 		     void **arg, u32 flags,
2085 		     struct netlink_ext_ack *extack)
2086 {
2087 	struct cls_fl_head *head = fl_head_dereference(tp);
2088 	bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
2089 	struct cls_fl_filter *fold = *arg;
2090 	struct cls_fl_filter *fnew;
2091 	struct fl_flow_mask *mask;
2092 	struct nlattr **tb;
2093 	bool in_ht;
2094 	int err;
2095 
2096 	if (!tca[TCA_OPTIONS]) {
2097 		err = -EINVAL;
2098 		goto errout_fold;
2099 	}
2100 
2101 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
2102 	if (!mask) {
2103 		err = -ENOBUFS;
2104 		goto errout_fold;
2105 	}
2106 
2107 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2108 	if (!tb) {
2109 		err = -ENOBUFS;
2110 		goto errout_mask_alloc;
2111 	}
2112 
2113 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2114 					  tca[TCA_OPTIONS], fl_policy, NULL);
2115 	if (err < 0)
2116 		goto errout_tb;
2117 
2118 	if (fold && handle && fold->handle != handle) {
2119 		err = -EINVAL;
2120 		goto errout_tb;
2121 	}
2122 
2123 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2124 	if (!fnew) {
2125 		err = -ENOBUFS;
2126 		goto errout_tb;
2127 	}
2128 	INIT_LIST_HEAD(&fnew->hw_list);
2129 	refcount_set(&fnew->refcnt, 1);
2130 
2131 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
2132 	if (err < 0)
2133 		goto errout;
2134 
2135 	if (tb[TCA_FLOWER_FLAGS]) {
2136 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2137 
2138 		if (!tc_flags_valid(fnew->flags)) {
2139 			err = -EINVAL;
2140 			goto errout;
2141 		}
2142 	}
2143 
2144 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
2145 			   tp->chain->tmplt_priv, flags, fnew->flags,
2146 			   extack);
2147 	if (err)
2148 		goto errout;
2149 
2150 	err = fl_check_assign_mask(head, fnew, fold, mask);
2151 	if (err)
2152 		goto errout;
2153 
2154 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
2155 	if (err)
2156 		goto errout_mask;
2157 
2158 	if (!tc_skip_hw(fnew->flags)) {
2159 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2160 		if (err)
2161 			goto errout_ht;
2162 	}
2163 
2164 	if (!tc_in_hw(fnew->flags))
2165 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2166 
2167 	spin_lock(&tp->lock);
2168 
2169 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2170 	 * proto again or create new one, if necessary.
2171 	 */
2172 	if (tp->deleting) {
2173 		err = -EAGAIN;
2174 		goto errout_hw;
2175 	}
2176 
2177 	if (fold) {
2178 		/* Fold filter was deleted concurrently. Retry lookup. */
2179 		if (fold->deleted) {
2180 			err = -EAGAIN;
2181 			goto errout_hw;
2182 		}
2183 
2184 		fnew->handle = handle;
2185 
2186 		if (!in_ht) {
2187 			struct rhashtable_params params =
2188 				fnew->mask->filter_ht_params;
2189 
2190 			err = rhashtable_insert_fast(&fnew->mask->ht,
2191 						     &fnew->ht_node,
2192 						     params);
2193 			if (err)
2194 				goto errout_hw;
2195 			in_ht = true;
2196 		}
2197 
2198 		refcount_inc(&fnew->refcnt);
2199 		rhashtable_remove_fast(&fold->mask->ht,
2200 				       &fold->ht_node,
2201 				       fold->mask->filter_ht_params);
2202 		idr_replace(&head->handle_idr, fnew, fnew->handle);
2203 		list_replace_rcu(&fold->list, &fnew->list);
2204 		fold->deleted = true;
2205 
2206 		spin_unlock(&tp->lock);
2207 
2208 		fl_mask_put(head, fold->mask);
2209 		if (!tc_skip_hw(fold->flags))
2210 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2211 		tcf_unbind_filter(tp, &fold->res);
2212 		/* Caller holds reference to fold, so refcnt is always > 0
2213 		 * after this.
2214 		 */
2215 		refcount_dec(&fold->refcnt);
2216 		__fl_put(fold);
2217 	} else {
2218 		if (handle) {
2219 			/* user specifies a handle and it doesn't exist */
2220 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2221 					    handle, GFP_ATOMIC);
2222 
2223 			/* Filter with specified handle was concurrently
2224 			 * inserted after initial check in cls_api. This is not
2225 			 * necessarily an error if NLM_F_EXCL is not set in
2226 			 * message flags. Returning EAGAIN will cause cls_api to
2227 			 * try to update concurrently inserted rule.
2228 			 */
2229 			if (err == -ENOSPC)
2230 				err = -EAGAIN;
2231 		} else {
2232 			handle = 1;
2233 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2234 					    INT_MAX, GFP_ATOMIC);
2235 		}
2236 		if (err)
2237 			goto errout_hw;
2238 
2239 		refcount_inc(&fnew->refcnt);
2240 		fnew->handle = handle;
2241 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2242 		spin_unlock(&tp->lock);
2243 	}
2244 
2245 	*arg = fnew;
2246 
2247 	kfree(tb);
2248 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2249 	return 0;
2250 
2251 errout_ht:
2252 	spin_lock(&tp->lock);
2253 errout_hw:
2254 	fnew->deleted = true;
2255 	spin_unlock(&tp->lock);
2256 	if (!tc_skip_hw(fnew->flags))
2257 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2258 	if (in_ht)
2259 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2260 				       fnew->mask->filter_ht_params);
2261 errout_mask:
2262 	fl_mask_put(head, fnew->mask);
2263 errout:
2264 	__fl_put(fnew);
2265 errout_tb:
2266 	kfree(tb);
2267 errout_mask_alloc:
2268 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2269 errout_fold:
2270 	if (fold)
2271 		__fl_put(fold);
2272 	return err;
2273 }
2274 
2275 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2276 		     bool rtnl_held, struct netlink_ext_ack *extack)
2277 {
2278 	struct cls_fl_head *head = fl_head_dereference(tp);
2279 	struct cls_fl_filter *f = arg;
2280 	bool last_on_mask;
2281 	int err = 0;
2282 
2283 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2284 	*last = list_empty(&head->masks);
2285 	__fl_put(f);
2286 
2287 	return err;
2288 }
2289 
2290 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2291 		    bool rtnl_held)
2292 {
2293 	struct cls_fl_head *head = fl_head_dereference(tp);
2294 	unsigned long id = arg->cookie, tmp;
2295 	struct cls_fl_filter *f;
2296 
2297 	arg->count = arg->skip;
2298 
2299 	rcu_read_lock();
2300 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2301 		/* don't return filters that are being deleted */
2302 		if (!refcount_inc_not_zero(&f->refcnt))
2303 			continue;
2304 		rcu_read_unlock();
2305 
2306 		if (arg->fn(tp, f, arg) < 0) {
2307 			__fl_put(f);
2308 			arg->stop = 1;
2309 			rcu_read_lock();
2310 			break;
2311 		}
2312 		__fl_put(f);
2313 		arg->count++;
2314 		rcu_read_lock();
2315 	}
2316 	rcu_read_unlock();
2317 	arg->cookie = id;
2318 }
2319 
2320 static struct cls_fl_filter *
2321 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2322 {
2323 	struct cls_fl_head *head = fl_head_dereference(tp);
2324 
2325 	spin_lock(&tp->lock);
2326 	if (list_empty(&head->hw_filters)) {
2327 		spin_unlock(&tp->lock);
2328 		return NULL;
2329 	}
2330 
2331 	if (!f)
2332 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
2333 			       hw_list);
2334 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2335 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2336 			spin_unlock(&tp->lock);
2337 			return f;
2338 		}
2339 	}
2340 
2341 	spin_unlock(&tp->lock);
2342 	return NULL;
2343 }
2344 
2345 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2346 			void *cb_priv, struct netlink_ext_ack *extack)
2347 {
2348 	struct tcf_block *block = tp->chain->block;
2349 	struct flow_cls_offload cls_flower = {};
2350 	struct cls_fl_filter *f = NULL;
2351 	int err;
2352 
2353 	/* hw_filters list can only be changed by hw offload functions after
2354 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2355 	 * iterating it.
2356 	 */
2357 	ASSERT_RTNL();
2358 
2359 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
2360 		cls_flower.rule =
2361 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2362 		if (!cls_flower.rule) {
2363 			__fl_put(f);
2364 			return -ENOMEM;
2365 		}
2366 
2367 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2368 					   extack);
2369 		cls_flower.command = add ?
2370 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2371 		cls_flower.cookie = (unsigned long)f;
2372 		cls_flower.rule->match.dissector = &f->mask->dissector;
2373 		cls_flower.rule->match.mask = &f->mask->key;
2374 		cls_flower.rule->match.key = &f->mkey;
2375 
2376 		err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
2377 					      cls_flower.common.extack);
2378 		if (err) {
2379 			kfree(cls_flower.rule);
2380 			if (tc_skip_sw(f->flags)) {
2381 				__fl_put(f);
2382 				return err;
2383 			}
2384 			goto next_flow;
2385 		}
2386 
2387 		cls_flower.classid = f->res.classid;
2388 
2389 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2390 					    TC_SETUP_CLSFLOWER, &cls_flower,
2391 					    cb_priv, &f->flags,
2392 					    &f->in_hw_count);
2393 		tc_cleanup_offload_action(&cls_flower.rule->action);
2394 		kfree(cls_flower.rule);
2395 
2396 		if (err) {
2397 			__fl_put(f);
2398 			return err;
2399 		}
2400 next_flow:
2401 		__fl_put(f);
2402 	}
2403 
2404 	return 0;
2405 }
2406 
2407 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2408 {
2409 	struct flow_cls_offload *cls_flower = type_data;
2410 	struct cls_fl_filter *f =
2411 		(struct cls_fl_filter *) cls_flower->cookie;
2412 	struct cls_fl_head *head = fl_head_dereference(tp);
2413 
2414 	spin_lock(&tp->lock);
2415 	list_add(&f->hw_list, &head->hw_filters);
2416 	spin_unlock(&tp->lock);
2417 }
2418 
2419 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2420 {
2421 	struct flow_cls_offload *cls_flower = type_data;
2422 	struct cls_fl_filter *f =
2423 		(struct cls_fl_filter *) cls_flower->cookie;
2424 
2425 	spin_lock(&tp->lock);
2426 	if (!list_empty(&f->hw_list))
2427 		list_del_init(&f->hw_list);
2428 	spin_unlock(&tp->lock);
2429 }
2430 
2431 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2432 			      struct fl_flow_tmplt *tmplt)
2433 {
2434 	struct flow_cls_offload cls_flower = {};
2435 	struct tcf_block *block = chain->block;
2436 
2437 	cls_flower.rule = flow_rule_alloc(0);
2438 	if (!cls_flower.rule)
2439 		return -ENOMEM;
2440 
2441 	cls_flower.common.chain_index = chain->index;
2442 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2443 	cls_flower.cookie = (unsigned long) tmplt;
2444 	cls_flower.rule->match.dissector = &tmplt->dissector;
2445 	cls_flower.rule->match.mask = &tmplt->mask;
2446 	cls_flower.rule->match.key = &tmplt->dummy_key;
2447 
2448 	/* We don't care if driver (any of them) fails to handle this
2449 	 * call. It serves just as a hint for it.
2450 	 */
2451 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2452 	kfree(cls_flower.rule);
2453 
2454 	return 0;
2455 }
2456 
2457 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2458 				struct fl_flow_tmplt *tmplt)
2459 {
2460 	struct flow_cls_offload cls_flower = {};
2461 	struct tcf_block *block = chain->block;
2462 
2463 	cls_flower.common.chain_index = chain->index;
2464 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2465 	cls_flower.cookie = (unsigned long) tmplt;
2466 
2467 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2468 }
2469 
2470 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2471 			     struct nlattr **tca,
2472 			     struct netlink_ext_ack *extack)
2473 {
2474 	struct fl_flow_tmplt *tmplt;
2475 	struct nlattr **tb;
2476 	int err;
2477 
2478 	if (!tca[TCA_OPTIONS])
2479 		return ERR_PTR(-EINVAL);
2480 
2481 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2482 	if (!tb)
2483 		return ERR_PTR(-ENOBUFS);
2484 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2485 					  tca[TCA_OPTIONS], fl_policy, NULL);
2486 	if (err)
2487 		goto errout_tb;
2488 
2489 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2490 	if (!tmplt) {
2491 		err = -ENOMEM;
2492 		goto errout_tb;
2493 	}
2494 	tmplt->chain = chain;
2495 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2496 	if (err)
2497 		goto errout_tmplt;
2498 
2499 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2500 
2501 	err = fl_hw_create_tmplt(chain, tmplt);
2502 	if (err)
2503 		goto errout_tmplt;
2504 
2505 	kfree(tb);
2506 	return tmplt;
2507 
2508 errout_tmplt:
2509 	kfree(tmplt);
2510 errout_tb:
2511 	kfree(tb);
2512 	return ERR_PTR(err);
2513 }
2514 
2515 static void fl_tmplt_destroy(void *tmplt_priv)
2516 {
2517 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2518 
2519 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2520 	kfree(tmplt);
2521 }
2522 
2523 static int fl_dump_key_val(struct sk_buff *skb,
2524 			   void *val, int val_type,
2525 			   void *mask, int mask_type, int len)
2526 {
2527 	int err;
2528 
2529 	if (!memchr_inv(mask, 0, len))
2530 		return 0;
2531 	err = nla_put(skb, val_type, len, val);
2532 	if (err)
2533 		return err;
2534 	if (mask_type != TCA_FLOWER_UNSPEC) {
2535 		err = nla_put(skb, mask_type, len, mask);
2536 		if (err)
2537 			return err;
2538 	}
2539 	return 0;
2540 }
2541 
2542 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2543 				  struct fl_flow_key *mask)
2544 {
2545 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2546 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2547 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2548 			    sizeof(key->tp_range.tp_min.dst)) ||
2549 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2550 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2551 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2552 			    sizeof(key->tp_range.tp_max.dst)) ||
2553 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2554 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2555 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2556 			    sizeof(key->tp_range.tp_min.src)) ||
2557 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2558 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2559 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2560 			    sizeof(key->tp_range.tp_max.src)))
2561 		return -1;
2562 
2563 	return 0;
2564 }
2565 
2566 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2567 				    struct flow_dissector_key_mpls *mpls_key,
2568 				    struct flow_dissector_key_mpls *mpls_mask,
2569 				    u8 lse_index)
2570 {
2571 	struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2572 	struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2573 	int err;
2574 
2575 	err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2576 			 lse_index + 1);
2577 	if (err)
2578 		return err;
2579 
2580 	if (lse_mask->mpls_ttl) {
2581 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2582 				 lse_key->mpls_ttl);
2583 		if (err)
2584 			return err;
2585 	}
2586 	if (lse_mask->mpls_bos) {
2587 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2588 				 lse_key->mpls_bos);
2589 		if (err)
2590 			return err;
2591 	}
2592 	if (lse_mask->mpls_tc) {
2593 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2594 				 lse_key->mpls_tc);
2595 		if (err)
2596 			return err;
2597 	}
2598 	if (lse_mask->mpls_label) {
2599 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2600 				  lse_key->mpls_label);
2601 		if (err)
2602 			return err;
2603 	}
2604 
2605 	return 0;
2606 }
2607 
2608 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2609 				 struct flow_dissector_key_mpls *mpls_key,
2610 				 struct flow_dissector_key_mpls *mpls_mask)
2611 {
2612 	struct nlattr *opts;
2613 	struct nlattr *lse;
2614 	u8 lse_index;
2615 	int err;
2616 
2617 	opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2618 	if (!opts)
2619 		return -EMSGSIZE;
2620 
2621 	for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2622 		if (!(mpls_mask->used_lses & 1 << lse_index))
2623 			continue;
2624 
2625 		lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2626 		if (!lse) {
2627 			err = -EMSGSIZE;
2628 			goto err_opts;
2629 		}
2630 
2631 		err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2632 					       lse_index);
2633 		if (err)
2634 			goto err_opts_lse;
2635 		nla_nest_end(skb, lse);
2636 	}
2637 	nla_nest_end(skb, opts);
2638 
2639 	return 0;
2640 
2641 err_opts_lse:
2642 	nla_nest_cancel(skb, lse);
2643 err_opts:
2644 	nla_nest_cancel(skb, opts);
2645 
2646 	return err;
2647 }
2648 
2649 static int fl_dump_key_mpls(struct sk_buff *skb,
2650 			    struct flow_dissector_key_mpls *mpls_key,
2651 			    struct flow_dissector_key_mpls *mpls_mask)
2652 {
2653 	struct flow_dissector_mpls_lse *lse_mask;
2654 	struct flow_dissector_mpls_lse *lse_key;
2655 	int err;
2656 
2657 	if (!mpls_mask->used_lses)
2658 		return 0;
2659 
2660 	lse_mask = &mpls_mask->ls[0];
2661 	lse_key = &mpls_key->ls[0];
2662 
2663 	/* For backward compatibility, don't use the MPLS nested attributes if
2664 	 * the rule can be expressed using the old attributes.
2665 	 */
2666 	if (mpls_mask->used_lses & ~1 ||
2667 	    (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2668 	     !lse_mask->mpls_tc && !lse_mask->mpls_label))
2669 		return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2670 
2671 	if (lse_mask->mpls_ttl) {
2672 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2673 				 lse_key->mpls_ttl);
2674 		if (err)
2675 			return err;
2676 	}
2677 	if (lse_mask->mpls_tc) {
2678 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2679 				 lse_key->mpls_tc);
2680 		if (err)
2681 			return err;
2682 	}
2683 	if (lse_mask->mpls_label) {
2684 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2685 				  lse_key->mpls_label);
2686 		if (err)
2687 			return err;
2688 	}
2689 	if (lse_mask->mpls_bos) {
2690 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2691 				 lse_key->mpls_bos);
2692 		if (err)
2693 			return err;
2694 	}
2695 	return 0;
2696 }
2697 
2698 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2699 			  struct flow_dissector_key_ip *key,
2700 			  struct flow_dissector_key_ip *mask)
2701 {
2702 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2703 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2704 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2705 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2706 
2707 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2708 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2709 		return -1;
2710 
2711 	return 0;
2712 }
2713 
2714 static int fl_dump_key_vlan(struct sk_buff *skb,
2715 			    int vlan_id_key, int vlan_prio_key,
2716 			    struct flow_dissector_key_vlan *vlan_key,
2717 			    struct flow_dissector_key_vlan *vlan_mask)
2718 {
2719 	int err;
2720 
2721 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2722 		return 0;
2723 	if (vlan_mask->vlan_id) {
2724 		err = nla_put_u16(skb, vlan_id_key,
2725 				  vlan_key->vlan_id);
2726 		if (err)
2727 			return err;
2728 	}
2729 	if (vlan_mask->vlan_priority) {
2730 		err = nla_put_u8(skb, vlan_prio_key,
2731 				 vlan_key->vlan_priority);
2732 		if (err)
2733 			return err;
2734 	}
2735 	return 0;
2736 }
2737 
2738 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2739 			    u32 *flower_key, u32 *flower_mask,
2740 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2741 {
2742 	if (dissector_mask & dissector_flag_bit) {
2743 		*flower_mask |= flower_flag_bit;
2744 		if (dissector_key & dissector_flag_bit)
2745 			*flower_key |= flower_flag_bit;
2746 	}
2747 }
2748 
2749 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2750 {
2751 	u32 key, mask;
2752 	__be32 _key, _mask;
2753 	int err;
2754 
2755 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2756 		return 0;
2757 
2758 	key = 0;
2759 	mask = 0;
2760 
2761 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2762 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2763 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2764 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2765 			FLOW_DIS_FIRST_FRAG);
2766 
2767 	_key = cpu_to_be32(key);
2768 	_mask = cpu_to_be32(mask);
2769 
2770 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2771 	if (err)
2772 		return err;
2773 
2774 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2775 }
2776 
2777 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2778 				  struct flow_dissector_key_enc_opts *enc_opts)
2779 {
2780 	struct geneve_opt *opt;
2781 	struct nlattr *nest;
2782 	int opt_off = 0;
2783 
2784 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2785 	if (!nest)
2786 		goto nla_put_failure;
2787 
2788 	while (enc_opts->len > opt_off) {
2789 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2790 
2791 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2792 				 opt->opt_class))
2793 			goto nla_put_failure;
2794 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2795 			       opt->type))
2796 			goto nla_put_failure;
2797 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2798 			    opt->length * 4, opt->opt_data))
2799 			goto nla_put_failure;
2800 
2801 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2802 	}
2803 	nla_nest_end(skb, nest);
2804 	return 0;
2805 
2806 nla_put_failure:
2807 	nla_nest_cancel(skb, nest);
2808 	return -EMSGSIZE;
2809 }
2810 
2811 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2812 				 struct flow_dissector_key_enc_opts *enc_opts)
2813 {
2814 	struct vxlan_metadata *md;
2815 	struct nlattr *nest;
2816 
2817 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2818 	if (!nest)
2819 		goto nla_put_failure;
2820 
2821 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2822 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2823 		goto nla_put_failure;
2824 
2825 	nla_nest_end(skb, nest);
2826 	return 0;
2827 
2828 nla_put_failure:
2829 	nla_nest_cancel(skb, nest);
2830 	return -EMSGSIZE;
2831 }
2832 
2833 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2834 				  struct flow_dissector_key_enc_opts *enc_opts)
2835 {
2836 	struct erspan_metadata *md;
2837 	struct nlattr *nest;
2838 
2839 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2840 	if (!nest)
2841 		goto nla_put_failure;
2842 
2843 	md = (struct erspan_metadata *)&enc_opts->data[0];
2844 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2845 		goto nla_put_failure;
2846 
2847 	if (md->version == 1 &&
2848 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2849 		goto nla_put_failure;
2850 
2851 	if (md->version == 2 &&
2852 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2853 			md->u.md2.dir) ||
2854 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2855 			get_hwid(&md->u.md2))))
2856 		goto nla_put_failure;
2857 
2858 	nla_nest_end(skb, nest);
2859 	return 0;
2860 
2861 nla_put_failure:
2862 	nla_nest_cancel(skb, nest);
2863 	return -EMSGSIZE;
2864 }
2865 
2866 static int fl_dump_key_gtp_opt(struct sk_buff *skb,
2867 			       struct flow_dissector_key_enc_opts *enc_opts)
2868 
2869 {
2870 	struct gtp_pdu_session_info *session_info;
2871 	struct nlattr *nest;
2872 
2873 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
2874 	if (!nest)
2875 		goto nla_put_failure;
2876 
2877 	session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
2878 
2879 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
2880 		       session_info->pdu_type))
2881 		goto nla_put_failure;
2882 
2883 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
2884 		goto nla_put_failure;
2885 
2886 	nla_nest_end(skb, nest);
2887 	return 0;
2888 
2889 nla_put_failure:
2890 	nla_nest_cancel(skb, nest);
2891 	return -EMSGSIZE;
2892 }
2893 
2894 static int fl_dump_key_ct(struct sk_buff *skb,
2895 			  struct flow_dissector_key_ct *key,
2896 			  struct flow_dissector_key_ct *mask)
2897 {
2898 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2899 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2900 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2901 			    sizeof(key->ct_state)))
2902 		goto nla_put_failure;
2903 
2904 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2905 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2906 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2907 			    sizeof(key->ct_zone)))
2908 		goto nla_put_failure;
2909 
2910 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2911 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2912 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2913 			    sizeof(key->ct_mark)))
2914 		goto nla_put_failure;
2915 
2916 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2917 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2918 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2919 			    sizeof(key->ct_labels)))
2920 		goto nla_put_failure;
2921 
2922 	return 0;
2923 
2924 nla_put_failure:
2925 	return -EMSGSIZE;
2926 }
2927 
2928 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2929 			       struct flow_dissector_key_enc_opts *enc_opts)
2930 {
2931 	struct nlattr *nest;
2932 	int err;
2933 
2934 	if (!enc_opts->len)
2935 		return 0;
2936 
2937 	nest = nla_nest_start_noflag(skb, enc_opt_type);
2938 	if (!nest)
2939 		goto nla_put_failure;
2940 
2941 	switch (enc_opts->dst_opt_type) {
2942 	case TUNNEL_GENEVE_OPT:
2943 		err = fl_dump_key_geneve_opt(skb, enc_opts);
2944 		if (err)
2945 			goto nla_put_failure;
2946 		break;
2947 	case TUNNEL_VXLAN_OPT:
2948 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
2949 		if (err)
2950 			goto nla_put_failure;
2951 		break;
2952 	case TUNNEL_ERSPAN_OPT:
2953 		err = fl_dump_key_erspan_opt(skb, enc_opts);
2954 		if (err)
2955 			goto nla_put_failure;
2956 		break;
2957 	case TUNNEL_GTP_OPT:
2958 		err = fl_dump_key_gtp_opt(skb, enc_opts);
2959 		if (err)
2960 			goto nla_put_failure;
2961 		break;
2962 	default:
2963 		goto nla_put_failure;
2964 	}
2965 	nla_nest_end(skb, nest);
2966 	return 0;
2967 
2968 nla_put_failure:
2969 	nla_nest_cancel(skb, nest);
2970 	return -EMSGSIZE;
2971 }
2972 
2973 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2974 			       struct flow_dissector_key_enc_opts *key_opts,
2975 			       struct flow_dissector_key_enc_opts *msk_opts)
2976 {
2977 	int err;
2978 
2979 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2980 	if (err)
2981 		return err;
2982 
2983 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2984 }
2985 
2986 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2987 		       struct fl_flow_key *key, struct fl_flow_key *mask)
2988 {
2989 	if (mask->meta.ingress_ifindex) {
2990 		struct net_device *dev;
2991 
2992 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2993 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2994 			goto nla_put_failure;
2995 	}
2996 
2997 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2998 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2999 			    sizeof(key->eth.dst)) ||
3000 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
3001 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
3002 			    sizeof(key->eth.src)) ||
3003 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
3004 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
3005 			    sizeof(key->basic.n_proto)))
3006 		goto nla_put_failure;
3007 
3008 	if (mask->num_of_vlans.num_of_vlans) {
3009 		if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans))
3010 			goto nla_put_failure;
3011 	}
3012 
3013 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
3014 		goto nla_put_failure;
3015 
3016 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
3017 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
3018 		goto nla_put_failure;
3019 
3020 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
3021 			     TCA_FLOWER_KEY_CVLAN_PRIO,
3022 			     &key->cvlan, &mask->cvlan) ||
3023 	    (mask->cvlan.vlan_tpid &&
3024 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3025 			  key->cvlan.vlan_tpid)))
3026 		goto nla_put_failure;
3027 
3028 	if (mask->basic.n_proto) {
3029 		if (mask->cvlan.vlan_eth_type) {
3030 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
3031 					 key->basic.n_proto))
3032 				goto nla_put_failure;
3033 		} else if (mask->vlan.vlan_eth_type) {
3034 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3035 					 key->vlan.vlan_eth_type))
3036 				goto nla_put_failure;
3037 		}
3038 	}
3039 
3040 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
3041 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
3042 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
3043 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
3044 			    sizeof(key->basic.ip_proto)) ||
3045 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
3046 		goto nla_put_failure;
3047 
3048 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3049 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
3050 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
3051 			     sizeof(key->ipv4.src)) ||
3052 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
3053 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
3054 			     sizeof(key->ipv4.dst))))
3055 		goto nla_put_failure;
3056 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3057 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
3058 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
3059 				  sizeof(key->ipv6.src)) ||
3060 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
3061 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
3062 				  sizeof(key->ipv6.dst))))
3063 		goto nla_put_failure;
3064 
3065 	if (key->basic.ip_proto == IPPROTO_TCP &&
3066 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
3067 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
3068 			     sizeof(key->tp.src)) ||
3069 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
3070 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
3071 			     sizeof(key->tp.dst)) ||
3072 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
3073 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
3074 			     sizeof(key->tcp.flags))))
3075 		goto nla_put_failure;
3076 	else if (key->basic.ip_proto == IPPROTO_UDP &&
3077 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
3078 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
3079 				  sizeof(key->tp.src)) ||
3080 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
3081 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
3082 				  sizeof(key->tp.dst))))
3083 		goto nla_put_failure;
3084 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
3085 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
3086 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
3087 				  sizeof(key->tp.src)) ||
3088 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
3089 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
3090 				  sizeof(key->tp.dst))))
3091 		goto nla_put_failure;
3092 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
3093 		 key->basic.ip_proto == IPPROTO_ICMP &&
3094 		 (fl_dump_key_val(skb, &key->icmp.type,
3095 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
3096 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
3097 				  sizeof(key->icmp.type)) ||
3098 		  fl_dump_key_val(skb, &key->icmp.code,
3099 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
3100 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
3101 				  sizeof(key->icmp.code))))
3102 		goto nla_put_failure;
3103 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
3104 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
3105 		 (fl_dump_key_val(skb, &key->icmp.type,
3106 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
3107 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
3108 				  sizeof(key->icmp.type)) ||
3109 		  fl_dump_key_val(skb, &key->icmp.code,
3110 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
3111 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
3112 				  sizeof(key->icmp.code))))
3113 		goto nla_put_failure;
3114 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
3115 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
3116 		 (fl_dump_key_val(skb, &key->arp.sip,
3117 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
3118 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
3119 				  sizeof(key->arp.sip)) ||
3120 		  fl_dump_key_val(skb, &key->arp.tip,
3121 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
3122 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
3123 				  sizeof(key->arp.tip)) ||
3124 		  fl_dump_key_val(skb, &key->arp.op,
3125 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
3126 				  TCA_FLOWER_KEY_ARP_OP_MASK,
3127 				  sizeof(key->arp.op)) ||
3128 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
3129 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
3130 				  sizeof(key->arp.sha)) ||
3131 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
3132 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
3133 				  sizeof(key->arp.tha))))
3134 		goto nla_put_failure;
3135 
3136 	if ((key->basic.ip_proto == IPPROTO_TCP ||
3137 	     key->basic.ip_proto == IPPROTO_UDP ||
3138 	     key->basic.ip_proto == IPPROTO_SCTP) &&
3139 	     fl_dump_key_port_range(skb, key, mask))
3140 		goto nla_put_failure;
3141 
3142 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3143 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
3144 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3145 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3146 			    sizeof(key->enc_ipv4.src)) ||
3147 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
3148 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3149 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3150 			     sizeof(key->enc_ipv4.dst))))
3151 		goto nla_put_failure;
3152 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3153 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3154 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3155 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3156 			    sizeof(key->enc_ipv6.src)) ||
3157 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3158 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
3159 				 &mask->enc_ipv6.dst,
3160 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3161 			    sizeof(key->enc_ipv6.dst))))
3162 		goto nla_put_failure;
3163 
3164 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3165 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3166 			    sizeof(key->enc_key_id)) ||
3167 	    fl_dump_key_val(skb, &key->enc_tp.src,
3168 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3169 			    &mask->enc_tp.src,
3170 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3171 			    sizeof(key->enc_tp.src)) ||
3172 	    fl_dump_key_val(skb, &key->enc_tp.dst,
3173 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3174 			    &mask->enc_tp.dst,
3175 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3176 			    sizeof(key->enc_tp.dst)) ||
3177 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3178 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3179 		goto nla_put_failure;
3180 
3181 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3182 		goto nla_put_failure;
3183 
3184 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3185 		goto nla_put_failure;
3186 
3187 	if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3188 			     &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3189 			     sizeof(key->hash.hash)))
3190 		goto nla_put_failure;
3191 
3192 	return 0;
3193 
3194 nla_put_failure:
3195 	return -EMSGSIZE;
3196 }
3197 
3198 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3199 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3200 {
3201 	struct cls_fl_filter *f = fh;
3202 	struct nlattr *nest;
3203 	struct fl_flow_key *key, *mask;
3204 	bool skip_hw;
3205 
3206 	if (!f)
3207 		return skb->len;
3208 
3209 	t->tcm_handle = f->handle;
3210 
3211 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3212 	if (!nest)
3213 		goto nla_put_failure;
3214 
3215 	spin_lock(&tp->lock);
3216 
3217 	if (f->res.classid &&
3218 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3219 		goto nla_put_failure_locked;
3220 
3221 	key = &f->key;
3222 	mask = &f->mask->key;
3223 	skip_hw = tc_skip_hw(f->flags);
3224 
3225 	if (fl_dump_key(skb, net, key, mask))
3226 		goto nla_put_failure_locked;
3227 
3228 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3229 		goto nla_put_failure_locked;
3230 
3231 	spin_unlock(&tp->lock);
3232 
3233 	if (!skip_hw)
3234 		fl_hw_update_stats(tp, f, rtnl_held);
3235 
3236 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3237 		goto nla_put_failure;
3238 
3239 	if (tcf_exts_dump(skb, &f->exts))
3240 		goto nla_put_failure;
3241 
3242 	nla_nest_end(skb, nest);
3243 
3244 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3245 		goto nla_put_failure;
3246 
3247 	return skb->len;
3248 
3249 nla_put_failure_locked:
3250 	spin_unlock(&tp->lock);
3251 nla_put_failure:
3252 	nla_nest_cancel(skb, nest);
3253 	return -1;
3254 }
3255 
3256 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3257 			 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3258 {
3259 	struct cls_fl_filter *f = fh;
3260 	struct nlattr *nest;
3261 	bool skip_hw;
3262 
3263 	if (!f)
3264 		return skb->len;
3265 
3266 	t->tcm_handle = f->handle;
3267 
3268 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3269 	if (!nest)
3270 		goto nla_put_failure;
3271 
3272 	spin_lock(&tp->lock);
3273 
3274 	skip_hw = tc_skip_hw(f->flags);
3275 
3276 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3277 		goto nla_put_failure_locked;
3278 
3279 	spin_unlock(&tp->lock);
3280 
3281 	if (!skip_hw)
3282 		fl_hw_update_stats(tp, f, rtnl_held);
3283 
3284 	if (tcf_exts_terse_dump(skb, &f->exts))
3285 		goto nla_put_failure;
3286 
3287 	nla_nest_end(skb, nest);
3288 
3289 	return skb->len;
3290 
3291 nla_put_failure_locked:
3292 	spin_unlock(&tp->lock);
3293 nla_put_failure:
3294 	nla_nest_cancel(skb, nest);
3295 	return -1;
3296 }
3297 
3298 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3299 {
3300 	struct fl_flow_tmplt *tmplt = tmplt_priv;
3301 	struct fl_flow_key *key, *mask;
3302 	struct nlattr *nest;
3303 
3304 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3305 	if (!nest)
3306 		goto nla_put_failure;
3307 
3308 	key = &tmplt->dummy_key;
3309 	mask = &tmplt->mask;
3310 
3311 	if (fl_dump_key(skb, net, key, mask))
3312 		goto nla_put_failure;
3313 
3314 	nla_nest_end(skb, nest);
3315 
3316 	return skb->len;
3317 
3318 nla_put_failure:
3319 	nla_nest_cancel(skb, nest);
3320 	return -EMSGSIZE;
3321 }
3322 
3323 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3324 			  unsigned long base)
3325 {
3326 	struct cls_fl_filter *f = fh;
3327 
3328 	if (f && f->res.classid == classid) {
3329 		if (cl)
3330 			__tcf_bind_filter(q, &f->res, base);
3331 		else
3332 			__tcf_unbind_filter(q, &f->res);
3333 	}
3334 }
3335 
3336 static bool fl_delete_empty(struct tcf_proto *tp)
3337 {
3338 	struct cls_fl_head *head = fl_head_dereference(tp);
3339 
3340 	spin_lock(&tp->lock);
3341 	tp->deleting = idr_is_empty(&head->handle_idr);
3342 	spin_unlock(&tp->lock);
3343 
3344 	return tp->deleting;
3345 }
3346 
3347 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3348 	.kind		= "flower",
3349 	.classify	= fl_classify,
3350 	.init		= fl_init,
3351 	.destroy	= fl_destroy,
3352 	.get		= fl_get,
3353 	.put		= fl_put,
3354 	.change		= fl_change,
3355 	.delete		= fl_delete,
3356 	.delete_empty	= fl_delete_empty,
3357 	.walk		= fl_walk,
3358 	.reoffload	= fl_reoffload,
3359 	.hw_add		= fl_hw_add,
3360 	.hw_del		= fl_hw_del,
3361 	.dump		= fl_dump,
3362 	.terse_dump	= fl_terse_dump,
3363 	.bind_class	= fl_bind_class,
3364 	.tmplt_create	= fl_tmplt_create,
3365 	.tmplt_destroy	= fl_tmplt_destroy,
3366 	.tmplt_dump	= fl_tmplt_dump,
3367 	.owner		= THIS_MODULE,
3368 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
3369 };
3370 
3371 static int __init cls_fl_init(void)
3372 {
3373 	return register_tcf_proto_ops(&cls_fl_ops);
3374 }
3375 
3376 static void __exit cls_fl_exit(void)
3377 {
3378 	unregister_tcf_proto_ops(&cls_fl_ops);
3379 }
3380 
3381 module_init(cls_fl_init);
3382 module_exit(cls_fl_exit);
3383 
3384 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3385 MODULE_DESCRIPTION("Flower classifier");
3386 MODULE_LICENSE("GPL v2");
3387