xref: /linux/net/sched/cls_flower.c (revision a997157e42e3119b13c644549a3d8381a1d825d6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/pkt_sched.h>
23 #include <net/ip.h>
24 #include <net/flow_dissector.h>
25 #include <net/geneve.h>
26 #include <net/vxlan.h>
27 #include <net/erspan.h>
28 #include <net/gtp.h>
29 
30 #include <net/dst.h>
31 #include <net/dst_metadata.h>
32 
33 #include <uapi/linux/netfilter/nf_conntrack_common.h>
34 
35 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
36 		((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
37 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
38 		(TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
39 
40 struct fl_flow_key {
41 	struct flow_dissector_key_meta meta;
42 	struct flow_dissector_key_control control;
43 	struct flow_dissector_key_control enc_control;
44 	struct flow_dissector_key_basic basic;
45 	struct flow_dissector_key_eth_addrs eth;
46 	struct flow_dissector_key_vlan vlan;
47 	struct flow_dissector_key_vlan cvlan;
48 	union {
49 		struct flow_dissector_key_ipv4_addrs ipv4;
50 		struct flow_dissector_key_ipv6_addrs ipv6;
51 	};
52 	struct flow_dissector_key_ports tp;
53 	struct flow_dissector_key_icmp icmp;
54 	struct flow_dissector_key_arp arp;
55 	struct flow_dissector_key_keyid enc_key_id;
56 	union {
57 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
58 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
59 	};
60 	struct flow_dissector_key_ports enc_tp;
61 	struct flow_dissector_key_mpls mpls;
62 	struct flow_dissector_key_tcp tcp;
63 	struct flow_dissector_key_ip ip;
64 	struct flow_dissector_key_ip enc_ip;
65 	struct flow_dissector_key_enc_opts enc_opts;
66 	union {
67 		struct flow_dissector_key_ports tp;
68 		struct {
69 			struct flow_dissector_key_ports tp_min;
70 			struct flow_dissector_key_ports tp_max;
71 		};
72 	} tp_range;
73 	struct flow_dissector_key_ct ct;
74 	struct flow_dissector_key_hash hash;
75 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
76 
77 struct fl_flow_mask_range {
78 	unsigned short int start;
79 	unsigned short int end;
80 };
81 
82 struct fl_flow_mask {
83 	struct fl_flow_key key;
84 	struct fl_flow_mask_range range;
85 	u32 flags;
86 	struct rhash_head ht_node;
87 	struct rhashtable ht;
88 	struct rhashtable_params filter_ht_params;
89 	struct flow_dissector dissector;
90 	struct list_head filters;
91 	struct rcu_work rwork;
92 	struct list_head list;
93 	refcount_t refcnt;
94 };
95 
96 struct fl_flow_tmplt {
97 	struct fl_flow_key dummy_key;
98 	struct fl_flow_key mask;
99 	struct flow_dissector dissector;
100 	struct tcf_chain *chain;
101 };
102 
103 struct cls_fl_head {
104 	struct rhashtable ht;
105 	spinlock_t masks_lock; /* Protect masks list */
106 	struct list_head masks;
107 	struct list_head hw_filters;
108 	struct rcu_work rwork;
109 	struct idr handle_idr;
110 };
111 
112 struct cls_fl_filter {
113 	struct fl_flow_mask *mask;
114 	struct rhash_head ht_node;
115 	struct fl_flow_key mkey;
116 	struct tcf_exts exts;
117 	struct tcf_result res;
118 	struct fl_flow_key key;
119 	struct list_head list;
120 	struct list_head hw_list;
121 	u32 handle;
122 	u32 flags;
123 	u32 in_hw_count;
124 	struct rcu_work rwork;
125 	struct net_device *hw_dev;
126 	/* Flower classifier is unlocked, which means that its reference counter
127 	 * can be changed concurrently without any kind of external
128 	 * synchronization. Use atomic reference counter to be concurrency-safe.
129 	 */
130 	refcount_t refcnt;
131 	bool deleted;
132 };
133 
134 static const struct rhashtable_params mask_ht_params = {
135 	.key_offset = offsetof(struct fl_flow_mask, key),
136 	.key_len = sizeof(struct fl_flow_key),
137 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
138 	.automatic_shrinking = true,
139 };
140 
141 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
142 {
143 	return mask->range.end - mask->range.start;
144 }
145 
146 static void fl_mask_update_range(struct fl_flow_mask *mask)
147 {
148 	const u8 *bytes = (const u8 *) &mask->key;
149 	size_t size = sizeof(mask->key);
150 	size_t i, first = 0, last;
151 
152 	for (i = 0; i < size; i++) {
153 		if (bytes[i]) {
154 			first = i;
155 			break;
156 		}
157 	}
158 	last = first;
159 	for (i = size - 1; i != first; i--) {
160 		if (bytes[i]) {
161 			last = i;
162 			break;
163 		}
164 	}
165 	mask->range.start = rounddown(first, sizeof(long));
166 	mask->range.end = roundup(last + 1, sizeof(long));
167 }
168 
169 static void *fl_key_get_start(struct fl_flow_key *key,
170 			      const struct fl_flow_mask *mask)
171 {
172 	return (u8 *) key + mask->range.start;
173 }
174 
175 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
176 			      struct fl_flow_mask *mask)
177 {
178 	const long *lkey = fl_key_get_start(key, mask);
179 	const long *lmask = fl_key_get_start(&mask->key, mask);
180 	long *lmkey = fl_key_get_start(mkey, mask);
181 	int i;
182 
183 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
184 		*lmkey++ = *lkey++ & *lmask++;
185 }
186 
187 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
188 			       struct fl_flow_mask *mask)
189 {
190 	const long *lmask = fl_key_get_start(&mask->key, mask);
191 	const long *ltmplt;
192 	int i;
193 
194 	if (!tmplt)
195 		return true;
196 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
197 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
198 		if (~*ltmplt++ & *lmask++)
199 			return false;
200 	}
201 	return true;
202 }
203 
204 static void fl_clear_masked_range(struct fl_flow_key *key,
205 				  struct fl_flow_mask *mask)
206 {
207 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
208 }
209 
210 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
211 				  struct fl_flow_key *key,
212 				  struct fl_flow_key *mkey)
213 {
214 	u16 min_mask, max_mask, min_val, max_val;
215 
216 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
217 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
218 	min_val = ntohs(filter->key.tp_range.tp_min.dst);
219 	max_val = ntohs(filter->key.tp_range.tp_max.dst);
220 
221 	if (min_mask && max_mask) {
222 		if (ntohs(key->tp_range.tp.dst) < min_val ||
223 		    ntohs(key->tp_range.tp.dst) > max_val)
224 			return false;
225 
226 		/* skb does not have min and max values */
227 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
228 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
229 	}
230 	return true;
231 }
232 
233 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
234 				  struct fl_flow_key *key,
235 				  struct fl_flow_key *mkey)
236 {
237 	u16 min_mask, max_mask, min_val, max_val;
238 
239 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
240 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
241 	min_val = ntohs(filter->key.tp_range.tp_min.src);
242 	max_val = ntohs(filter->key.tp_range.tp_max.src);
243 
244 	if (min_mask && max_mask) {
245 		if (ntohs(key->tp_range.tp.src) < min_val ||
246 		    ntohs(key->tp_range.tp.src) > max_val)
247 			return false;
248 
249 		/* skb does not have min and max values */
250 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
251 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
252 	}
253 	return true;
254 }
255 
256 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
257 					 struct fl_flow_key *mkey)
258 {
259 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
260 				      mask->filter_ht_params);
261 }
262 
263 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
264 					     struct fl_flow_key *mkey,
265 					     struct fl_flow_key *key)
266 {
267 	struct cls_fl_filter *filter, *f;
268 
269 	list_for_each_entry_rcu(filter, &mask->filters, list) {
270 		if (!fl_range_port_dst_cmp(filter, key, mkey))
271 			continue;
272 
273 		if (!fl_range_port_src_cmp(filter, key, mkey))
274 			continue;
275 
276 		f = __fl_lookup(mask, mkey);
277 		if (f)
278 			return f;
279 	}
280 	return NULL;
281 }
282 
283 static noinline_for_stack
284 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
285 {
286 	struct fl_flow_key mkey;
287 
288 	fl_set_masked_key(&mkey, key, mask);
289 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
290 		return fl_lookup_range(mask, &mkey, key);
291 
292 	return __fl_lookup(mask, &mkey);
293 }
294 
295 static u16 fl_ct_info_to_flower_map[] = {
296 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
297 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
298 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
299 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
300 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
301 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
302 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
303 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
304 					TCA_FLOWER_KEY_CT_FLAGS_RELATED |
305 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
306 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
307 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
308 };
309 
310 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
311 		       struct tcf_result *res)
312 {
313 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
314 	bool post_ct = tc_skb_cb(skb)->post_ct;
315 	u16 zone = tc_skb_cb(skb)->zone;
316 	struct fl_flow_key skb_key;
317 	struct fl_flow_mask *mask;
318 	struct cls_fl_filter *f;
319 
320 	list_for_each_entry_rcu(mask, &head->masks, list) {
321 		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
322 		fl_clear_masked_range(&skb_key, mask);
323 
324 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
325 		/* skb_flow_dissect() does not set n_proto in case an unknown
326 		 * protocol, so do it rather here.
327 		 */
328 		skb_key.basic.n_proto = skb_protocol(skb, false);
329 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
330 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
331 				    fl_ct_info_to_flower_map,
332 				    ARRAY_SIZE(fl_ct_info_to_flower_map),
333 				    post_ct, zone);
334 		skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
335 		skb_flow_dissect(skb, &mask->dissector, &skb_key,
336 				 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
337 
338 		f = fl_mask_lookup(mask, &skb_key);
339 		if (f && !tc_skip_sw(f->flags)) {
340 			*res = f->res;
341 			return tcf_exts_exec(skb, &f->exts, res);
342 		}
343 	}
344 	return -1;
345 }
346 
347 static int fl_init(struct tcf_proto *tp)
348 {
349 	struct cls_fl_head *head;
350 
351 	head = kzalloc(sizeof(*head), GFP_KERNEL);
352 	if (!head)
353 		return -ENOBUFS;
354 
355 	spin_lock_init(&head->masks_lock);
356 	INIT_LIST_HEAD_RCU(&head->masks);
357 	INIT_LIST_HEAD(&head->hw_filters);
358 	rcu_assign_pointer(tp->root, head);
359 	idr_init(&head->handle_idr);
360 
361 	return rhashtable_init(&head->ht, &mask_ht_params);
362 }
363 
364 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
365 {
366 	/* temporary masks don't have their filters list and ht initialized */
367 	if (mask_init_done) {
368 		WARN_ON(!list_empty(&mask->filters));
369 		rhashtable_destroy(&mask->ht);
370 	}
371 	kfree(mask);
372 }
373 
374 static void fl_mask_free_work(struct work_struct *work)
375 {
376 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
377 						 struct fl_flow_mask, rwork);
378 
379 	fl_mask_free(mask, true);
380 }
381 
382 static void fl_uninit_mask_free_work(struct work_struct *work)
383 {
384 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
385 						 struct fl_flow_mask, rwork);
386 
387 	fl_mask_free(mask, false);
388 }
389 
390 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
391 {
392 	if (!refcount_dec_and_test(&mask->refcnt))
393 		return false;
394 
395 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
396 
397 	spin_lock(&head->masks_lock);
398 	list_del_rcu(&mask->list);
399 	spin_unlock(&head->masks_lock);
400 
401 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
402 
403 	return true;
404 }
405 
406 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
407 {
408 	/* Flower classifier only changes root pointer during init and destroy.
409 	 * Users must obtain reference to tcf_proto instance before calling its
410 	 * API, so tp->root pointer is protected from concurrent call to
411 	 * fl_destroy() by reference counting.
412 	 */
413 	return rcu_dereference_raw(tp->root);
414 }
415 
416 static void __fl_destroy_filter(struct cls_fl_filter *f)
417 {
418 	tcf_exts_destroy(&f->exts);
419 	tcf_exts_put_net(&f->exts);
420 	kfree(f);
421 }
422 
423 static void fl_destroy_filter_work(struct work_struct *work)
424 {
425 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
426 					struct cls_fl_filter, rwork);
427 
428 	__fl_destroy_filter(f);
429 }
430 
431 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
432 				 bool rtnl_held, struct netlink_ext_ack *extack)
433 {
434 	struct tcf_block *block = tp->chain->block;
435 	struct flow_cls_offload cls_flower = {};
436 
437 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
438 	cls_flower.command = FLOW_CLS_DESTROY;
439 	cls_flower.cookie = (unsigned long) f;
440 
441 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
442 			    &f->flags, &f->in_hw_count, rtnl_held);
443 
444 }
445 
446 static int fl_hw_replace_filter(struct tcf_proto *tp,
447 				struct cls_fl_filter *f, bool rtnl_held,
448 				struct netlink_ext_ack *extack)
449 {
450 	struct tcf_block *block = tp->chain->block;
451 	struct flow_cls_offload cls_flower = {};
452 	bool skip_sw = tc_skip_sw(f->flags);
453 	int err = 0;
454 
455 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
456 	if (!cls_flower.rule)
457 		return -ENOMEM;
458 
459 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
460 	cls_flower.command = FLOW_CLS_REPLACE;
461 	cls_flower.cookie = (unsigned long) f;
462 	cls_flower.rule->match.dissector = &f->mask->dissector;
463 	cls_flower.rule->match.mask = &f->mask->key;
464 	cls_flower.rule->match.key = &f->mkey;
465 	cls_flower.classid = f->res.classid;
466 
467 	err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
468 				      cls_flower.common.extack);
469 	if (err) {
470 		kfree(cls_flower.rule);
471 
472 		return skip_sw ? err : 0;
473 	}
474 
475 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
476 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
477 	tc_cleanup_offload_action(&cls_flower.rule->action);
478 	kfree(cls_flower.rule);
479 
480 	if (err) {
481 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
482 		return err;
483 	}
484 
485 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
486 		return -EINVAL;
487 
488 	return 0;
489 }
490 
491 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
492 			       bool rtnl_held)
493 {
494 	struct tcf_block *block = tp->chain->block;
495 	struct flow_cls_offload cls_flower = {};
496 
497 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
498 	cls_flower.command = FLOW_CLS_STATS;
499 	cls_flower.cookie = (unsigned long) f;
500 	cls_flower.classid = f->res.classid;
501 
502 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
503 			 rtnl_held);
504 
505 	tcf_exts_hw_stats_update(&f->exts, cls_flower.stats.bytes,
506 				 cls_flower.stats.pkts,
507 				 cls_flower.stats.drops,
508 				 cls_flower.stats.lastused,
509 				 cls_flower.stats.used_hw_stats,
510 				 cls_flower.stats.used_hw_stats_valid);
511 }
512 
513 static void __fl_put(struct cls_fl_filter *f)
514 {
515 	if (!refcount_dec_and_test(&f->refcnt))
516 		return;
517 
518 	if (tcf_exts_get_net(&f->exts))
519 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
520 	else
521 		__fl_destroy_filter(f);
522 }
523 
524 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
525 {
526 	struct cls_fl_filter *f;
527 
528 	rcu_read_lock();
529 	f = idr_find(&head->handle_idr, handle);
530 	if (f && !refcount_inc_not_zero(&f->refcnt))
531 		f = NULL;
532 	rcu_read_unlock();
533 
534 	return f;
535 }
536 
537 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
538 		       bool *last, bool rtnl_held,
539 		       struct netlink_ext_ack *extack)
540 {
541 	struct cls_fl_head *head = fl_head_dereference(tp);
542 
543 	*last = false;
544 
545 	spin_lock(&tp->lock);
546 	if (f->deleted) {
547 		spin_unlock(&tp->lock);
548 		return -ENOENT;
549 	}
550 
551 	f->deleted = true;
552 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
553 			       f->mask->filter_ht_params);
554 	idr_remove(&head->handle_idr, f->handle);
555 	list_del_rcu(&f->list);
556 	spin_unlock(&tp->lock);
557 
558 	*last = fl_mask_put(head, f->mask);
559 	if (!tc_skip_hw(f->flags))
560 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
561 	tcf_unbind_filter(tp, &f->res);
562 	__fl_put(f);
563 
564 	return 0;
565 }
566 
567 static void fl_destroy_sleepable(struct work_struct *work)
568 {
569 	struct cls_fl_head *head = container_of(to_rcu_work(work),
570 						struct cls_fl_head,
571 						rwork);
572 
573 	rhashtable_destroy(&head->ht);
574 	kfree(head);
575 	module_put(THIS_MODULE);
576 }
577 
578 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
579 		       struct netlink_ext_ack *extack)
580 {
581 	struct cls_fl_head *head = fl_head_dereference(tp);
582 	struct fl_flow_mask *mask, *next_mask;
583 	struct cls_fl_filter *f, *next;
584 	bool last;
585 
586 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
587 		list_for_each_entry_safe(f, next, &mask->filters, list) {
588 			__fl_delete(tp, f, &last, rtnl_held, extack);
589 			if (last)
590 				break;
591 		}
592 	}
593 	idr_destroy(&head->handle_idr);
594 
595 	__module_get(THIS_MODULE);
596 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
597 }
598 
599 static void fl_put(struct tcf_proto *tp, void *arg)
600 {
601 	struct cls_fl_filter *f = arg;
602 
603 	__fl_put(f);
604 }
605 
606 static void *fl_get(struct tcf_proto *tp, u32 handle)
607 {
608 	struct cls_fl_head *head = fl_head_dereference(tp);
609 
610 	return __fl_get(head, handle);
611 }
612 
613 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
614 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
615 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
616 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
617 					    .len = IFNAMSIZ },
618 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
619 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
620 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
621 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
622 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
623 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
624 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
625 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
626 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
627 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
628 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
629 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
630 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
631 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
632 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
633 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
634 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
635 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
636 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
637 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
638 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
639 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
640 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
641 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
642 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
643 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
644 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
645 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
646 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
647 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
648 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
649 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
650 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
651 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
652 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
653 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
654 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
655 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
656 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
657 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
658 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
659 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
660 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
661 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
662 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
663 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
664 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
665 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
666 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
667 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
668 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
669 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
670 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
671 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
672 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
673 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
674 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
675 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
676 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
677 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
678 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
679 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
680 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
681 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
682 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
683 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
684 	[TCA_FLOWER_KEY_MPLS_OPTS]	= { .type = NLA_NESTED },
685 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
686 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
687 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
688 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
689 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
690 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
691 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
692 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
693 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
694 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
695 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
696 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
697 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
698 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
699 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
700 	[TCA_FLOWER_KEY_CT_STATE]	=
701 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
702 	[TCA_FLOWER_KEY_CT_STATE_MASK]	=
703 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
704 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
705 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
706 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
707 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
708 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
709 					    .len = 128 / BITS_PER_BYTE },
710 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
711 					    .len = 128 / BITS_PER_BYTE },
712 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
713 	[TCA_FLOWER_KEY_HASH]		= { .type = NLA_U32 },
714 	[TCA_FLOWER_KEY_HASH_MASK]	= { .type = NLA_U32 },
715 
716 };
717 
718 static const struct nla_policy
719 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
720 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
721 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
722 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
723 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
724 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
725 	[TCA_FLOWER_KEY_ENC_OPTS_GTP]		= { .type = NLA_NESTED },
726 };
727 
728 static const struct nla_policy
729 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
730 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
731 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
732 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
733 						       .len = 128 },
734 };
735 
736 static const struct nla_policy
737 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
738 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
739 };
740 
741 static const struct nla_policy
742 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
743 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
744 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
745 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
746 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
747 };
748 
749 static const struct nla_policy
750 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
751 	[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]	   = { .type = NLA_U8 },
752 	[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]	   = { .type = NLA_U8 },
753 };
754 
755 static const struct nla_policy
756 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
757 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]    = { .type = NLA_U8 },
758 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]      = { .type = NLA_U8 },
759 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]      = { .type = NLA_U8 },
760 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]       = { .type = NLA_U8 },
761 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]    = { .type = NLA_U32 },
762 };
763 
764 static void fl_set_key_val(struct nlattr **tb,
765 			   void *val, int val_type,
766 			   void *mask, int mask_type, int len)
767 {
768 	if (!tb[val_type])
769 		return;
770 	nla_memcpy(val, tb[val_type], len);
771 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
772 		memset(mask, 0xff, len);
773 	else
774 		nla_memcpy(mask, tb[mask_type], len);
775 }
776 
777 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
778 				 struct fl_flow_key *mask,
779 				 struct netlink_ext_ack *extack)
780 {
781 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
782 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
783 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
784 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
785 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
786 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
787 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
788 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
789 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
790 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
791 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
792 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
793 
794 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
795 	    ntohs(key->tp_range.tp_max.dst) <=
796 	    ntohs(key->tp_range.tp_min.dst)) {
797 		NL_SET_ERR_MSG_ATTR(extack,
798 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
799 				    "Invalid destination port range (min must be strictly smaller than max)");
800 		return -EINVAL;
801 	}
802 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
803 	    ntohs(key->tp_range.tp_max.src) <=
804 	    ntohs(key->tp_range.tp_min.src)) {
805 		NL_SET_ERR_MSG_ATTR(extack,
806 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
807 				    "Invalid source port range (min must be strictly smaller than max)");
808 		return -EINVAL;
809 	}
810 
811 	return 0;
812 }
813 
814 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
815 			       struct flow_dissector_key_mpls *key_val,
816 			       struct flow_dissector_key_mpls *key_mask,
817 			       struct netlink_ext_ack *extack)
818 {
819 	struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
820 	struct flow_dissector_mpls_lse *lse_mask;
821 	struct flow_dissector_mpls_lse *lse_val;
822 	u8 lse_index;
823 	u8 depth;
824 	int err;
825 
826 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
827 			       mpls_stack_entry_policy, extack);
828 	if (err < 0)
829 		return err;
830 
831 	if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
832 		NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
833 		return -EINVAL;
834 	}
835 
836 	depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
837 
838 	/* LSE depth starts at 1, for consistency with terminology used by
839 	 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
840 	 */
841 	if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
842 		NL_SET_ERR_MSG_ATTR(extack,
843 				    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
844 				    "Invalid MPLS depth");
845 		return -EINVAL;
846 	}
847 	lse_index = depth - 1;
848 
849 	dissector_set_mpls_lse(key_val, lse_index);
850 	dissector_set_mpls_lse(key_mask, lse_index);
851 
852 	lse_val = &key_val->ls[lse_index];
853 	lse_mask = &key_mask->ls[lse_index];
854 
855 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
856 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
857 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
858 	}
859 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
860 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
861 
862 		if (bos & ~MPLS_BOS_MASK) {
863 			NL_SET_ERR_MSG_ATTR(extack,
864 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
865 					    "Bottom Of Stack (BOS) must be 0 or 1");
866 			return -EINVAL;
867 		}
868 		lse_val->mpls_bos = bos;
869 		lse_mask->mpls_bos = MPLS_BOS_MASK;
870 	}
871 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
872 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
873 
874 		if (tc & ~MPLS_TC_MASK) {
875 			NL_SET_ERR_MSG_ATTR(extack,
876 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
877 					    "Traffic Class (TC) must be between 0 and 7");
878 			return -EINVAL;
879 		}
880 		lse_val->mpls_tc = tc;
881 		lse_mask->mpls_tc = MPLS_TC_MASK;
882 	}
883 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
884 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
885 
886 		if (label & ~MPLS_LABEL_MASK) {
887 			NL_SET_ERR_MSG_ATTR(extack,
888 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
889 					    "Label must be between 0 and 1048575");
890 			return -EINVAL;
891 		}
892 		lse_val->mpls_label = label;
893 		lse_mask->mpls_label = MPLS_LABEL_MASK;
894 	}
895 
896 	return 0;
897 }
898 
899 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
900 				struct flow_dissector_key_mpls *key_val,
901 				struct flow_dissector_key_mpls *key_mask,
902 				struct netlink_ext_ack *extack)
903 {
904 	struct nlattr *nla_lse;
905 	int rem;
906 	int err;
907 
908 	if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
909 		NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
910 				    "NLA_F_NESTED is missing");
911 		return -EINVAL;
912 	}
913 
914 	nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
915 		if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
916 			NL_SET_ERR_MSG_ATTR(extack, nla_lse,
917 					    "Invalid MPLS option type");
918 			return -EINVAL;
919 		}
920 
921 		err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
922 		if (err < 0)
923 			return err;
924 	}
925 	if (rem) {
926 		NL_SET_ERR_MSG(extack,
927 			       "Bytes leftover after parsing MPLS options");
928 		return -EINVAL;
929 	}
930 
931 	return 0;
932 }
933 
934 static int fl_set_key_mpls(struct nlattr **tb,
935 			   struct flow_dissector_key_mpls *key_val,
936 			   struct flow_dissector_key_mpls *key_mask,
937 			   struct netlink_ext_ack *extack)
938 {
939 	struct flow_dissector_mpls_lse *lse_mask;
940 	struct flow_dissector_mpls_lse *lse_val;
941 
942 	if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
943 		if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
944 		    tb[TCA_FLOWER_KEY_MPLS_BOS] ||
945 		    tb[TCA_FLOWER_KEY_MPLS_TC] ||
946 		    tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
947 			NL_SET_ERR_MSG_ATTR(extack,
948 					    tb[TCA_FLOWER_KEY_MPLS_OPTS],
949 					    "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
950 			return -EBADMSG;
951 		}
952 
953 		return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
954 					    key_val, key_mask, extack);
955 	}
956 
957 	lse_val = &key_val->ls[0];
958 	lse_mask = &key_mask->ls[0];
959 
960 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
961 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
962 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
963 		dissector_set_mpls_lse(key_val, 0);
964 		dissector_set_mpls_lse(key_mask, 0);
965 	}
966 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
967 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
968 
969 		if (bos & ~MPLS_BOS_MASK) {
970 			NL_SET_ERR_MSG_ATTR(extack,
971 					    tb[TCA_FLOWER_KEY_MPLS_BOS],
972 					    "Bottom Of Stack (BOS) must be 0 or 1");
973 			return -EINVAL;
974 		}
975 		lse_val->mpls_bos = bos;
976 		lse_mask->mpls_bos = MPLS_BOS_MASK;
977 		dissector_set_mpls_lse(key_val, 0);
978 		dissector_set_mpls_lse(key_mask, 0);
979 	}
980 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
981 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
982 
983 		if (tc & ~MPLS_TC_MASK) {
984 			NL_SET_ERR_MSG_ATTR(extack,
985 					    tb[TCA_FLOWER_KEY_MPLS_TC],
986 					    "Traffic Class (TC) must be between 0 and 7");
987 			return -EINVAL;
988 		}
989 		lse_val->mpls_tc = tc;
990 		lse_mask->mpls_tc = MPLS_TC_MASK;
991 		dissector_set_mpls_lse(key_val, 0);
992 		dissector_set_mpls_lse(key_mask, 0);
993 	}
994 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
995 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
996 
997 		if (label & ~MPLS_LABEL_MASK) {
998 			NL_SET_ERR_MSG_ATTR(extack,
999 					    tb[TCA_FLOWER_KEY_MPLS_LABEL],
1000 					    "Label must be between 0 and 1048575");
1001 			return -EINVAL;
1002 		}
1003 		lse_val->mpls_label = label;
1004 		lse_mask->mpls_label = MPLS_LABEL_MASK;
1005 		dissector_set_mpls_lse(key_val, 0);
1006 		dissector_set_mpls_lse(key_mask, 0);
1007 	}
1008 	return 0;
1009 }
1010 
1011 static void fl_set_key_vlan(struct nlattr **tb,
1012 			    __be16 ethertype,
1013 			    int vlan_id_key, int vlan_prio_key,
1014 			    int vlan_next_eth_type_key,
1015 			    struct flow_dissector_key_vlan *key_val,
1016 			    struct flow_dissector_key_vlan *key_mask)
1017 {
1018 #define VLAN_PRIORITY_MASK	0x7
1019 
1020 	if (tb[vlan_id_key]) {
1021 		key_val->vlan_id =
1022 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1023 		key_mask->vlan_id = VLAN_VID_MASK;
1024 	}
1025 	if (tb[vlan_prio_key]) {
1026 		key_val->vlan_priority =
1027 			nla_get_u8(tb[vlan_prio_key]) &
1028 			VLAN_PRIORITY_MASK;
1029 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1030 	}
1031 	key_val->vlan_tpid = ethertype;
1032 	key_mask->vlan_tpid = cpu_to_be16(~0);
1033 	if (tb[vlan_next_eth_type_key]) {
1034 		key_val->vlan_eth_type =
1035 			nla_get_be16(tb[vlan_next_eth_type_key]);
1036 		key_mask->vlan_eth_type = cpu_to_be16(~0);
1037 	}
1038 }
1039 
1040 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1041 			    u32 *dissector_key, u32 *dissector_mask,
1042 			    u32 flower_flag_bit, u32 dissector_flag_bit)
1043 {
1044 	if (flower_mask & flower_flag_bit) {
1045 		*dissector_mask |= dissector_flag_bit;
1046 		if (flower_key & flower_flag_bit)
1047 			*dissector_key |= dissector_flag_bit;
1048 	}
1049 }
1050 
1051 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1052 			    u32 *flags_mask, struct netlink_ext_ack *extack)
1053 {
1054 	u32 key, mask;
1055 
1056 	/* mask is mandatory for flags */
1057 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1058 		NL_SET_ERR_MSG(extack, "Missing flags mask");
1059 		return -EINVAL;
1060 	}
1061 
1062 	key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
1063 	mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1064 
1065 	*flags_key  = 0;
1066 	*flags_mask = 0;
1067 
1068 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1069 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1070 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1071 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1072 			FLOW_DIS_FIRST_FRAG);
1073 
1074 	return 0;
1075 }
1076 
1077 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1078 			  struct flow_dissector_key_ip *key,
1079 			  struct flow_dissector_key_ip *mask)
1080 {
1081 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1082 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1083 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1084 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1085 
1086 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1087 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1088 }
1089 
1090 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1091 			     int depth, int option_len,
1092 			     struct netlink_ext_ack *extack)
1093 {
1094 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1095 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
1096 	struct geneve_opt *opt;
1097 	int err, data_len = 0;
1098 
1099 	if (option_len > sizeof(struct geneve_opt))
1100 		data_len = option_len - sizeof(struct geneve_opt);
1101 
1102 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1103 	memset(opt, 0xff, option_len);
1104 	opt->length = data_len / 4;
1105 	opt->r1 = 0;
1106 	opt->r2 = 0;
1107 	opt->r3 = 0;
1108 
1109 	/* If no mask has been prodived we assume an exact match. */
1110 	if (!depth)
1111 		return sizeof(struct geneve_opt) + data_len;
1112 
1113 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1114 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1115 		return -EINVAL;
1116 	}
1117 
1118 	err = nla_parse_nested_deprecated(tb,
1119 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1120 					  nla, geneve_opt_policy, extack);
1121 	if (err < 0)
1122 		return err;
1123 
1124 	/* We are not allowed to omit any of CLASS, TYPE or DATA
1125 	 * fields from the key.
1126 	 */
1127 	if (!option_len &&
1128 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1129 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1130 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1131 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1132 		return -EINVAL;
1133 	}
1134 
1135 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
1136 	 * for the mask.
1137 	 */
1138 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1139 		int new_len = key->enc_opts.len;
1140 
1141 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1142 		data_len = nla_len(data);
1143 		if (data_len < 4) {
1144 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1145 			return -ERANGE;
1146 		}
1147 		if (data_len % 4) {
1148 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1149 			return -ERANGE;
1150 		}
1151 
1152 		new_len += sizeof(struct geneve_opt) + data_len;
1153 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1154 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1155 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1156 			return -ERANGE;
1157 		}
1158 		opt->length = data_len / 4;
1159 		memcpy(opt->opt_data, nla_data(data), data_len);
1160 	}
1161 
1162 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1163 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1164 		opt->opt_class = nla_get_be16(class);
1165 	}
1166 
1167 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1168 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1169 		opt->type = nla_get_u8(type);
1170 	}
1171 
1172 	return sizeof(struct geneve_opt) + data_len;
1173 }
1174 
1175 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1176 			    int depth, int option_len,
1177 			    struct netlink_ext_ack *extack)
1178 {
1179 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1180 	struct vxlan_metadata *md;
1181 	int err;
1182 
1183 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1184 	memset(md, 0xff, sizeof(*md));
1185 
1186 	if (!depth)
1187 		return sizeof(*md);
1188 
1189 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1190 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1191 		return -EINVAL;
1192 	}
1193 
1194 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1195 			       vxlan_opt_policy, extack);
1196 	if (err < 0)
1197 		return err;
1198 
1199 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1200 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1201 		return -EINVAL;
1202 	}
1203 
1204 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1205 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1206 		md->gbp &= VXLAN_GBP_MASK;
1207 	}
1208 
1209 	return sizeof(*md);
1210 }
1211 
1212 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1213 			     int depth, int option_len,
1214 			     struct netlink_ext_ack *extack)
1215 {
1216 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1217 	struct erspan_metadata *md;
1218 	int err;
1219 
1220 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1221 	memset(md, 0xff, sizeof(*md));
1222 	md->version = 1;
1223 
1224 	if (!depth)
1225 		return sizeof(*md);
1226 
1227 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1228 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1229 		return -EINVAL;
1230 	}
1231 
1232 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1233 			       erspan_opt_policy, extack);
1234 	if (err < 0)
1235 		return err;
1236 
1237 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1238 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1239 		return -EINVAL;
1240 	}
1241 
1242 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1243 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1244 
1245 	if (md->version == 1) {
1246 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1247 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1248 			return -EINVAL;
1249 		}
1250 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1251 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1252 			memset(&md->u, 0x00, sizeof(md->u));
1253 			md->u.index = nla_get_be32(nla);
1254 		}
1255 	} else if (md->version == 2) {
1256 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1257 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1258 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1259 			return -EINVAL;
1260 		}
1261 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1262 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1263 			md->u.md2.dir = nla_get_u8(nla);
1264 		}
1265 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1266 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1267 			set_hwid(&md->u.md2, nla_get_u8(nla));
1268 		}
1269 	} else {
1270 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1271 		return -EINVAL;
1272 	}
1273 
1274 	return sizeof(*md);
1275 }
1276 
1277 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
1278 			  int depth, int option_len,
1279 			  struct netlink_ext_ack *extack)
1280 {
1281 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
1282 	struct gtp_pdu_session_info *sinfo;
1283 	u8 len = key->enc_opts.len;
1284 	int err;
1285 
1286 	sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
1287 	memset(sinfo, 0xff, option_len);
1288 
1289 	if (!depth)
1290 		return sizeof(*sinfo);
1291 
1292 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
1293 		NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
1294 		return -EINVAL;
1295 	}
1296 
1297 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
1298 			       gtp_opt_policy, extack);
1299 	if (err < 0)
1300 		return err;
1301 
1302 	if (!option_len &&
1303 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
1304 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
1305 		NL_SET_ERR_MSG_MOD(extack,
1306 				   "Missing tunnel key gtp option pdu type or qfi");
1307 		return -EINVAL;
1308 	}
1309 
1310 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
1311 		sinfo->pdu_type =
1312 			nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
1313 
1314 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
1315 		sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
1316 
1317 	return sizeof(*sinfo);
1318 }
1319 
1320 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1321 			  struct fl_flow_key *mask,
1322 			  struct netlink_ext_ack *extack)
1323 {
1324 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1325 	int err, option_len, key_depth, msk_depth = 0;
1326 
1327 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1328 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1329 					     enc_opts_policy, extack);
1330 	if (err)
1331 		return err;
1332 
1333 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1334 
1335 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1336 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1337 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1338 						     enc_opts_policy, extack);
1339 		if (err)
1340 			return err;
1341 
1342 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1343 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1344 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1345 			NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1346 			return -EINVAL;
1347 		}
1348 	}
1349 
1350 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1351 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1352 		switch (nla_type(nla_opt_key)) {
1353 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1354 			if (key->enc_opts.dst_opt_type &&
1355 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1356 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1357 				return -EINVAL;
1358 			}
1359 			option_len = 0;
1360 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1361 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1362 						       key_depth, option_len,
1363 						       extack);
1364 			if (option_len < 0)
1365 				return option_len;
1366 
1367 			key->enc_opts.len += option_len;
1368 			/* At the same time we need to parse through the mask
1369 			 * in order to verify exact and mask attribute lengths.
1370 			 */
1371 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1372 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1373 						       msk_depth, option_len,
1374 						       extack);
1375 			if (option_len < 0)
1376 				return option_len;
1377 
1378 			mask->enc_opts.len += option_len;
1379 			if (key->enc_opts.len != mask->enc_opts.len) {
1380 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1381 				return -EINVAL;
1382 			}
1383 			break;
1384 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1385 			if (key->enc_opts.dst_opt_type) {
1386 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1387 				return -EINVAL;
1388 			}
1389 			option_len = 0;
1390 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1391 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1392 						      key_depth, option_len,
1393 						      extack);
1394 			if (option_len < 0)
1395 				return option_len;
1396 
1397 			key->enc_opts.len += option_len;
1398 			/* At the same time we need to parse through the mask
1399 			 * in order to verify exact and mask attribute lengths.
1400 			 */
1401 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1402 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1403 						      msk_depth, option_len,
1404 						      extack);
1405 			if (option_len < 0)
1406 				return option_len;
1407 
1408 			mask->enc_opts.len += option_len;
1409 			if (key->enc_opts.len != mask->enc_opts.len) {
1410 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1411 				return -EINVAL;
1412 			}
1413 			break;
1414 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1415 			if (key->enc_opts.dst_opt_type) {
1416 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1417 				return -EINVAL;
1418 			}
1419 			option_len = 0;
1420 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1421 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1422 						       key_depth, option_len,
1423 						       extack);
1424 			if (option_len < 0)
1425 				return option_len;
1426 
1427 			key->enc_opts.len += option_len;
1428 			/* At the same time we need to parse through the mask
1429 			 * in order to verify exact and mask attribute lengths.
1430 			 */
1431 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1432 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1433 						       msk_depth, option_len,
1434 						       extack);
1435 			if (option_len < 0)
1436 				return option_len;
1437 
1438 			mask->enc_opts.len += option_len;
1439 			if (key->enc_opts.len != mask->enc_opts.len) {
1440 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1441 				return -EINVAL;
1442 			}
1443 			break;
1444 		case TCA_FLOWER_KEY_ENC_OPTS_GTP:
1445 			if (key->enc_opts.dst_opt_type) {
1446 				NL_SET_ERR_MSG_MOD(extack,
1447 						   "Duplicate type for gtp options");
1448 				return -EINVAL;
1449 			}
1450 			option_len = 0;
1451 			key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1452 			option_len = fl_set_gtp_opt(nla_opt_key, key,
1453 						    key_depth, option_len,
1454 						    extack);
1455 			if (option_len < 0)
1456 				return option_len;
1457 
1458 			key->enc_opts.len += option_len;
1459 			/* At the same time we need to parse through the mask
1460 			 * in order to verify exact and mask attribute lengths.
1461 			 */
1462 			mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1463 			option_len = fl_set_gtp_opt(nla_opt_msk, mask,
1464 						    msk_depth, option_len,
1465 						    extack);
1466 			if (option_len < 0)
1467 				return option_len;
1468 
1469 			mask->enc_opts.len += option_len;
1470 			if (key->enc_opts.len != mask->enc_opts.len) {
1471 				NL_SET_ERR_MSG_MOD(extack,
1472 						   "Key and mask miss aligned");
1473 				return -EINVAL;
1474 			}
1475 			break;
1476 		default:
1477 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1478 			return -EINVAL;
1479 		}
1480 
1481 		if (!msk_depth)
1482 			continue;
1483 
1484 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1485 			NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1486 			return -EINVAL;
1487 		}
1488 		nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1489 	}
1490 
1491 	return 0;
1492 }
1493 
1494 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1495 				struct netlink_ext_ack *extack)
1496 {
1497 	if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1498 		NL_SET_ERR_MSG_ATTR(extack, tb,
1499 				    "no trk, so no other flag can be set");
1500 		return -EINVAL;
1501 	}
1502 
1503 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1504 	    state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1505 		NL_SET_ERR_MSG_ATTR(extack, tb,
1506 				    "new and est are mutually exclusive");
1507 		return -EINVAL;
1508 	}
1509 
1510 	if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1511 	    state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1512 		      TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1513 		NL_SET_ERR_MSG_ATTR(extack, tb,
1514 				    "when inv is set, only trk may be set");
1515 		return -EINVAL;
1516 	}
1517 
1518 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1519 	    state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1520 		NL_SET_ERR_MSG_ATTR(extack, tb,
1521 				    "new and rpl are mutually exclusive");
1522 		return -EINVAL;
1523 	}
1524 
1525 	return 0;
1526 }
1527 
1528 static int fl_set_key_ct(struct nlattr **tb,
1529 			 struct flow_dissector_key_ct *key,
1530 			 struct flow_dissector_key_ct *mask,
1531 			 struct netlink_ext_ack *extack)
1532 {
1533 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1534 		int err;
1535 
1536 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1537 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1538 			return -EOPNOTSUPP;
1539 		}
1540 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1541 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1542 			       sizeof(key->ct_state));
1543 
1544 		err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1545 					   tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1546 					   extack);
1547 		if (err)
1548 			return err;
1549 
1550 	}
1551 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1552 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1553 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1554 			return -EOPNOTSUPP;
1555 		}
1556 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1557 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1558 			       sizeof(key->ct_zone));
1559 	}
1560 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1561 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1562 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1563 			return -EOPNOTSUPP;
1564 		}
1565 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1566 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1567 			       sizeof(key->ct_mark));
1568 	}
1569 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1570 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1571 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1572 			return -EOPNOTSUPP;
1573 		}
1574 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1575 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1576 			       sizeof(key->ct_labels));
1577 	}
1578 
1579 	return 0;
1580 }
1581 
1582 static int fl_set_key(struct net *net, struct nlattr **tb,
1583 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1584 		      struct netlink_ext_ack *extack)
1585 {
1586 	__be16 ethertype;
1587 	int ret = 0;
1588 
1589 	if (tb[TCA_FLOWER_INDEV]) {
1590 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1591 		if (err < 0)
1592 			return err;
1593 		key->meta.ingress_ifindex = err;
1594 		mask->meta.ingress_ifindex = 0xffffffff;
1595 	}
1596 
1597 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1598 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1599 		       sizeof(key->eth.dst));
1600 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1601 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1602 		       sizeof(key->eth.src));
1603 
1604 	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1605 		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1606 
1607 		if (eth_type_vlan(ethertype)) {
1608 			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1609 					TCA_FLOWER_KEY_VLAN_PRIO,
1610 					TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1611 					&key->vlan, &mask->vlan);
1612 
1613 			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1614 				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1615 				if (eth_type_vlan(ethertype)) {
1616 					fl_set_key_vlan(tb, ethertype,
1617 							TCA_FLOWER_KEY_CVLAN_ID,
1618 							TCA_FLOWER_KEY_CVLAN_PRIO,
1619 							TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1620 							&key->cvlan, &mask->cvlan);
1621 					fl_set_key_val(tb, &key->basic.n_proto,
1622 						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1623 						       &mask->basic.n_proto,
1624 						       TCA_FLOWER_UNSPEC,
1625 						       sizeof(key->basic.n_proto));
1626 				} else {
1627 					key->basic.n_proto = ethertype;
1628 					mask->basic.n_proto = cpu_to_be16(~0);
1629 				}
1630 			}
1631 		} else {
1632 			key->basic.n_proto = ethertype;
1633 			mask->basic.n_proto = cpu_to_be16(~0);
1634 		}
1635 	}
1636 
1637 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1638 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1639 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1640 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1641 			       sizeof(key->basic.ip_proto));
1642 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1643 	}
1644 
1645 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1646 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1647 		mask->control.addr_type = ~0;
1648 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1649 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1650 			       sizeof(key->ipv4.src));
1651 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1652 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1653 			       sizeof(key->ipv4.dst));
1654 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1655 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1656 		mask->control.addr_type = ~0;
1657 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1658 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1659 			       sizeof(key->ipv6.src));
1660 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1661 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1662 			       sizeof(key->ipv6.dst));
1663 	}
1664 
1665 	if (key->basic.ip_proto == IPPROTO_TCP) {
1666 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1667 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1668 			       sizeof(key->tp.src));
1669 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1670 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1671 			       sizeof(key->tp.dst));
1672 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1673 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1674 			       sizeof(key->tcp.flags));
1675 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1676 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1677 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1678 			       sizeof(key->tp.src));
1679 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1680 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1681 			       sizeof(key->tp.dst));
1682 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1683 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1684 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1685 			       sizeof(key->tp.src));
1686 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1687 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1688 			       sizeof(key->tp.dst));
1689 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1690 		   key->basic.ip_proto == IPPROTO_ICMP) {
1691 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1692 			       &mask->icmp.type,
1693 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1694 			       sizeof(key->icmp.type));
1695 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1696 			       &mask->icmp.code,
1697 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1698 			       sizeof(key->icmp.code));
1699 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1700 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1701 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1702 			       &mask->icmp.type,
1703 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1704 			       sizeof(key->icmp.type));
1705 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1706 			       &mask->icmp.code,
1707 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1708 			       sizeof(key->icmp.code));
1709 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1710 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1711 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1712 		if (ret)
1713 			return ret;
1714 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1715 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1716 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1717 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1718 			       sizeof(key->arp.sip));
1719 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1720 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1721 			       sizeof(key->arp.tip));
1722 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1723 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1724 			       sizeof(key->arp.op));
1725 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1726 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1727 			       sizeof(key->arp.sha));
1728 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1729 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1730 			       sizeof(key->arp.tha));
1731 	}
1732 
1733 	if (key->basic.ip_proto == IPPROTO_TCP ||
1734 	    key->basic.ip_proto == IPPROTO_UDP ||
1735 	    key->basic.ip_proto == IPPROTO_SCTP) {
1736 		ret = fl_set_key_port_range(tb, key, mask, extack);
1737 		if (ret)
1738 			return ret;
1739 	}
1740 
1741 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1742 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1743 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1744 		mask->enc_control.addr_type = ~0;
1745 		fl_set_key_val(tb, &key->enc_ipv4.src,
1746 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1747 			       &mask->enc_ipv4.src,
1748 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1749 			       sizeof(key->enc_ipv4.src));
1750 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1751 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1752 			       &mask->enc_ipv4.dst,
1753 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1754 			       sizeof(key->enc_ipv4.dst));
1755 	}
1756 
1757 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1758 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1759 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1760 		mask->enc_control.addr_type = ~0;
1761 		fl_set_key_val(tb, &key->enc_ipv6.src,
1762 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1763 			       &mask->enc_ipv6.src,
1764 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1765 			       sizeof(key->enc_ipv6.src));
1766 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1767 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1768 			       &mask->enc_ipv6.dst,
1769 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1770 			       sizeof(key->enc_ipv6.dst));
1771 	}
1772 
1773 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1774 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1775 		       sizeof(key->enc_key_id.keyid));
1776 
1777 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1778 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1779 		       sizeof(key->enc_tp.src));
1780 
1781 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1782 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1783 		       sizeof(key->enc_tp.dst));
1784 
1785 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1786 
1787 	fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1788 		       &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1789 		       sizeof(key->hash.hash));
1790 
1791 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1792 		ret = fl_set_enc_opt(tb, key, mask, extack);
1793 		if (ret)
1794 			return ret;
1795 	}
1796 
1797 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1798 	if (ret)
1799 		return ret;
1800 
1801 	if (tb[TCA_FLOWER_KEY_FLAGS])
1802 		ret = fl_set_key_flags(tb, &key->control.flags,
1803 				       &mask->control.flags, extack);
1804 
1805 	return ret;
1806 }
1807 
1808 static void fl_mask_copy(struct fl_flow_mask *dst,
1809 			 struct fl_flow_mask *src)
1810 {
1811 	const void *psrc = fl_key_get_start(&src->key, src);
1812 	void *pdst = fl_key_get_start(&dst->key, src);
1813 
1814 	memcpy(pdst, psrc, fl_mask_range(src));
1815 	dst->range = src->range;
1816 }
1817 
1818 static const struct rhashtable_params fl_ht_params = {
1819 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1820 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1821 	.automatic_shrinking = true,
1822 };
1823 
1824 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1825 {
1826 	mask->filter_ht_params = fl_ht_params;
1827 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1828 	mask->filter_ht_params.key_offset += mask->range.start;
1829 
1830 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1831 }
1832 
1833 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1834 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1835 
1836 #define FL_KEY_IS_MASKED(mask, member)						\
1837 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1838 		   0, FL_KEY_MEMBER_SIZE(member))				\
1839 
1840 #define FL_KEY_SET(keys, cnt, id, member)					\
1841 	do {									\
1842 		keys[cnt].key_id = id;						\
1843 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1844 		cnt++;								\
1845 	} while(0);
1846 
1847 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1848 	do {									\
1849 		if (FL_KEY_IS_MASKED(mask, member))				\
1850 			FL_KEY_SET(keys, cnt, id, member);			\
1851 	} while(0);
1852 
1853 static void fl_init_dissector(struct flow_dissector *dissector,
1854 			      struct fl_flow_key *mask)
1855 {
1856 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1857 	size_t cnt = 0;
1858 
1859 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1860 			     FLOW_DISSECTOR_KEY_META, meta);
1861 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1862 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1863 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1864 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1865 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1866 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1867 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1868 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1869 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1870 			     FLOW_DISSECTOR_KEY_PORTS, tp);
1871 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1872 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1873 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1874 			     FLOW_DISSECTOR_KEY_IP, ip);
1875 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1876 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1877 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1878 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1879 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1880 			     FLOW_DISSECTOR_KEY_ARP, arp);
1881 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1882 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1883 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1884 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1885 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1886 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1887 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1888 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1889 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1890 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1891 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1892 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1893 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1894 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1895 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1896 			   enc_control);
1897 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1898 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1899 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1900 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1901 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1902 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1903 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1904 			     FLOW_DISSECTOR_KEY_CT, ct);
1905 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1906 			     FLOW_DISSECTOR_KEY_HASH, hash);
1907 
1908 	skb_flow_dissector_init(dissector, keys, cnt);
1909 }
1910 
1911 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1912 					       struct fl_flow_mask *mask)
1913 {
1914 	struct fl_flow_mask *newmask;
1915 	int err;
1916 
1917 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1918 	if (!newmask)
1919 		return ERR_PTR(-ENOMEM);
1920 
1921 	fl_mask_copy(newmask, mask);
1922 
1923 	if ((newmask->key.tp_range.tp_min.dst &&
1924 	     newmask->key.tp_range.tp_max.dst) ||
1925 	    (newmask->key.tp_range.tp_min.src &&
1926 	     newmask->key.tp_range.tp_max.src))
1927 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1928 
1929 	err = fl_init_mask_hashtable(newmask);
1930 	if (err)
1931 		goto errout_free;
1932 
1933 	fl_init_dissector(&newmask->dissector, &newmask->key);
1934 
1935 	INIT_LIST_HEAD_RCU(&newmask->filters);
1936 
1937 	refcount_set(&newmask->refcnt, 1);
1938 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1939 				      &newmask->ht_node, mask_ht_params);
1940 	if (err)
1941 		goto errout_destroy;
1942 
1943 	spin_lock(&head->masks_lock);
1944 	list_add_tail_rcu(&newmask->list, &head->masks);
1945 	spin_unlock(&head->masks_lock);
1946 
1947 	return newmask;
1948 
1949 errout_destroy:
1950 	rhashtable_destroy(&newmask->ht);
1951 errout_free:
1952 	kfree(newmask);
1953 
1954 	return ERR_PTR(err);
1955 }
1956 
1957 static int fl_check_assign_mask(struct cls_fl_head *head,
1958 				struct cls_fl_filter *fnew,
1959 				struct cls_fl_filter *fold,
1960 				struct fl_flow_mask *mask)
1961 {
1962 	struct fl_flow_mask *newmask;
1963 	int ret = 0;
1964 
1965 	rcu_read_lock();
1966 
1967 	/* Insert mask as temporary node to prevent concurrent creation of mask
1968 	 * with same key. Any concurrent lookups with same key will return
1969 	 * -EAGAIN because mask's refcnt is zero.
1970 	 */
1971 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1972 						       &mask->ht_node,
1973 						       mask_ht_params);
1974 	if (!fnew->mask) {
1975 		rcu_read_unlock();
1976 
1977 		if (fold) {
1978 			ret = -EINVAL;
1979 			goto errout_cleanup;
1980 		}
1981 
1982 		newmask = fl_create_new_mask(head, mask);
1983 		if (IS_ERR(newmask)) {
1984 			ret = PTR_ERR(newmask);
1985 			goto errout_cleanup;
1986 		}
1987 
1988 		fnew->mask = newmask;
1989 		return 0;
1990 	} else if (IS_ERR(fnew->mask)) {
1991 		ret = PTR_ERR(fnew->mask);
1992 	} else if (fold && fold->mask != fnew->mask) {
1993 		ret = -EINVAL;
1994 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1995 		/* Mask was deleted concurrently, try again */
1996 		ret = -EAGAIN;
1997 	}
1998 	rcu_read_unlock();
1999 	return ret;
2000 
2001 errout_cleanup:
2002 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
2003 			       mask_ht_params);
2004 	return ret;
2005 }
2006 
2007 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
2008 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
2009 			unsigned long base, struct nlattr **tb,
2010 			struct nlattr *est,
2011 			struct fl_flow_tmplt *tmplt,
2012 			u32 flags, u32 fl_flags,
2013 			struct netlink_ext_ack *extack)
2014 {
2015 	int err;
2016 
2017 	err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags,
2018 				   fl_flags, extack);
2019 	if (err < 0)
2020 		return err;
2021 
2022 	if (tb[TCA_FLOWER_CLASSID]) {
2023 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
2024 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2025 			rtnl_lock();
2026 		tcf_bind_filter(tp, &f->res, base);
2027 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2028 			rtnl_unlock();
2029 	}
2030 
2031 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
2032 	if (err)
2033 		return err;
2034 
2035 	fl_mask_update_range(mask);
2036 	fl_set_masked_key(&f->mkey, &f->key, mask);
2037 
2038 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
2039 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
2040 		return -EINVAL;
2041 	}
2042 
2043 	return 0;
2044 }
2045 
2046 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
2047 			       struct cls_fl_filter *fold,
2048 			       bool *in_ht)
2049 {
2050 	struct fl_flow_mask *mask = fnew->mask;
2051 	int err;
2052 
2053 	err = rhashtable_lookup_insert_fast(&mask->ht,
2054 					    &fnew->ht_node,
2055 					    mask->filter_ht_params);
2056 	if (err) {
2057 		*in_ht = false;
2058 		/* It is okay if filter with same key exists when
2059 		 * overwriting.
2060 		 */
2061 		return fold && err == -EEXIST ? 0 : err;
2062 	}
2063 
2064 	*in_ht = true;
2065 	return 0;
2066 }
2067 
2068 static int fl_change(struct net *net, struct sk_buff *in_skb,
2069 		     struct tcf_proto *tp, unsigned long base,
2070 		     u32 handle, struct nlattr **tca,
2071 		     void **arg, u32 flags,
2072 		     struct netlink_ext_ack *extack)
2073 {
2074 	struct cls_fl_head *head = fl_head_dereference(tp);
2075 	bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
2076 	struct cls_fl_filter *fold = *arg;
2077 	struct cls_fl_filter *fnew;
2078 	struct fl_flow_mask *mask;
2079 	struct nlattr **tb;
2080 	bool in_ht;
2081 	int err;
2082 
2083 	if (!tca[TCA_OPTIONS]) {
2084 		err = -EINVAL;
2085 		goto errout_fold;
2086 	}
2087 
2088 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
2089 	if (!mask) {
2090 		err = -ENOBUFS;
2091 		goto errout_fold;
2092 	}
2093 
2094 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2095 	if (!tb) {
2096 		err = -ENOBUFS;
2097 		goto errout_mask_alloc;
2098 	}
2099 
2100 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2101 					  tca[TCA_OPTIONS], fl_policy, NULL);
2102 	if (err < 0)
2103 		goto errout_tb;
2104 
2105 	if (fold && handle && fold->handle != handle) {
2106 		err = -EINVAL;
2107 		goto errout_tb;
2108 	}
2109 
2110 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2111 	if (!fnew) {
2112 		err = -ENOBUFS;
2113 		goto errout_tb;
2114 	}
2115 	INIT_LIST_HEAD(&fnew->hw_list);
2116 	refcount_set(&fnew->refcnt, 1);
2117 
2118 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
2119 	if (err < 0)
2120 		goto errout;
2121 
2122 	if (tb[TCA_FLOWER_FLAGS]) {
2123 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2124 
2125 		if (!tc_flags_valid(fnew->flags)) {
2126 			err = -EINVAL;
2127 			goto errout;
2128 		}
2129 	}
2130 
2131 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
2132 			   tp->chain->tmplt_priv, flags, fnew->flags,
2133 			   extack);
2134 	if (err)
2135 		goto errout;
2136 
2137 	err = fl_check_assign_mask(head, fnew, fold, mask);
2138 	if (err)
2139 		goto errout;
2140 
2141 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
2142 	if (err)
2143 		goto errout_mask;
2144 
2145 	if (!tc_skip_hw(fnew->flags)) {
2146 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2147 		if (err)
2148 			goto errout_ht;
2149 	}
2150 
2151 	if (!tc_in_hw(fnew->flags))
2152 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2153 
2154 	spin_lock(&tp->lock);
2155 
2156 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2157 	 * proto again or create new one, if necessary.
2158 	 */
2159 	if (tp->deleting) {
2160 		err = -EAGAIN;
2161 		goto errout_hw;
2162 	}
2163 
2164 	if (fold) {
2165 		/* Fold filter was deleted concurrently. Retry lookup. */
2166 		if (fold->deleted) {
2167 			err = -EAGAIN;
2168 			goto errout_hw;
2169 		}
2170 
2171 		fnew->handle = handle;
2172 
2173 		if (!in_ht) {
2174 			struct rhashtable_params params =
2175 				fnew->mask->filter_ht_params;
2176 
2177 			err = rhashtable_insert_fast(&fnew->mask->ht,
2178 						     &fnew->ht_node,
2179 						     params);
2180 			if (err)
2181 				goto errout_hw;
2182 			in_ht = true;
2183 		}
2184 
2185 		refcount_inc(&fnew->refcnt);
2186 		rhashtable_remove_fast(&fold->mask->ht,
2187 				       &fold->ht_node,
2188 				       fold->mask->filter_ht_params);
2189 		idr_replace(&head->handle_idr, fnew, fnew->handle);
2190 		list_replace_rcu(&fold->list, &fnew->list);
2191 		fold->deleted = true;
2192 
2193 		spin_unlock(&tp->lock);
2194 
2195 		fl_mask_put(head, fold->mask);
2196 		if (!tc_skip_hw(fold->flags))
2197 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2198 		tcf_unbind_filter(tp, &fold->res);
2199 		/* Caller holds reference to fold, so refcnt is always > 0
2200 		 * after this.
2201 		 */
2202 		refcount_dec(&fold->refcnt);
2203 		__fl_put(fold);
2204 	} else {
2205 		if (handle) {
2206 			/* user specifies a handle and it doesn't exist */
2207 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2208 					    handle, GFP_ATOMIC);
2209 
2210 			/* Filter with specified handle was concurrently
2211 			 * inserted after initial check in cls_api. This is not
2212 			 * necessarily an error if NLM_F_EXCL is not set in
2213 			 * message flags. Returning EAGAIN will cause cls_api to
2214 			 * try to update concurrently inserted rule.
2215 			 */
2216 			if (err == -ENOSPC)
2217 				err = -EAGAIN;
2218 		} else {
2219 			handle = 1;
2220 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2221 					    INT_MAX, GFP_ATOMIC);
2222 		}
2223 		if (err)
2224 			goto errout_hw;
2225 
2226 		refcount_inc(&fnew->refcnt);
2227 		fnew->handle = handle;
2228 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2229 		spin_unlock(&tp->lock);
2230 	}
2231 
2232 	*arg = fnew;
2233 
2234 	kfree(tb);
2235 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2236 	return 0;
2237 
2238 errout_ht:
2239 	spin_lock(&tp->lock);
2240 errout_hw:
2241 	fnew->deleted = true;
2242 	spin_unlock(&tp->lock);
2243 	if (!tc_skip_hw(fnew->flags))
2244 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2245 	if (in_ht)
2246 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2247 				       fnew->mask->filter_ht_params);
2248 errout_mask:
2249 	fl_mask_put(head, fnew->mask);
2250 errout:
2251 	__fl_put(fnew);
2252 errout_tb:
2253 	kfree(tb);
2254 errout_mask_alloc:
2255 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2256 errout_fold:
2257 	if (fold)
2258 		__fl_put(fold);
2259 	return err;
2260 }
2261 
2262 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2263 		     bool rtnl_held, struct netlink_ext_ack *extack)
2264 {
2265 	struct cls_fl_head *head = fl_head_dereference(tp);
2266 	struct cls_fl_filter *f = arg;
2267 	bool last_on_mask;
2268 	int err = 0;
2269 
2270 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2271 	*last = list_empty(&head->masks);
2272 	__fl_put(f);
2273 
2274 	return err;
2275 }
2276 
2277 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2278 		    bool rtnl_held)
2279 {
2280 	struct cls_fl_head *head = fl_head_dereference(tp);
2281 	unsigned long id = arg->cookie, tmp;
2282 	struct cls_fl_filter *f;
2283 
2284 	arg->count = arg->skip;
2285 
2286 	rcu_read_lock();
2287 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2288 		/* don't return filters that are being deleted */
2289 		if (!refcount_inc_not_zero(&f->refcnt))
2290 			continue;
2291 		rcu_read_unlock();
2292 
2293 		if (arg->fn(tp, f, arg) < 0) {
2294 			__fl_put(f);
2295 			arg->stop = 1;
2296 			rcu_read_lock();
2297 			break;
2298 		}
2299 		__fl_put(f);
2300 		arg->count++;
2301 		rcu_read_lock();
2302 	}
2303 	rcu_read_unlock();
2304 	arg->cookie = id;
2305 }
2306 
2307 static struct cls_fl_filter *
2308 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2309 {
2310 	struct cls_fl_head *head = fl_head_dereference(tp);
2311 
2312 	spin_lock(&tp->lock);
2313 	if (list_empty(&head->hw_filters)) {
2314 		spin_unlock(&tp->lock);
2315 		return NULL;
2316 	}
2317 
2318 	if (!f)
2319 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
2320 			       hw_list);
2321 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2322 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2323 			spin_unlock(&tp->lock);
2324 			return f;
2325 		}
2326 	}
2327 
2328 	spin_unlock(&tp->lock);
2329 	return NULL;
2330 }
2331 
2332 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2333 			void *cb_priv, struct netlink_ext_ack *extack)
2334 {
2335 	struct tcf_block *block = tp->chain->block;
2336 	struct flow_cls_offload cls_flower = {};
2337 	struct cls_fl_filter *f = NULL;
2338 	int err;
2339 
2340 	/* hw_filters list can only be changed by hw offload functions after
2341 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2342 	 * iterating it.
2343 	 */
2344 	ASSERT_RTNL();
2345 
2346 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
2347 		cls_flower.rule =
2348 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2349 		if (!cls_flower.rule) {
2350 			__fl_put(f);
2351 			return -ENOMEM;
2352 		}
2353 
2354 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2355 					   extack);
2356 		cls_flower.command = add ?
2357 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2358 		cls_flower.cookie = (unsigned long)f;
2359 		cls_flower.rule->match.dissector = &f->mask->dissector;
2360 		cls_flower.rule->match.mask = &f->mask->key;
2361 		cls_flower.rule->match.key = &f->mkey;
2362 
2363 		err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
2364 					      cls_flower.common.extack);
2365 		if (err) {
2366 			kfree(cls_flower.rule);
2367 			if (tc_skip_sw(f->flags)) {
2368 				__fl_put(f);
2369 				return err;
2370 			}
2371 			goto next_flow;
2372 		}
2373 
2374 		cls_flower.classid = f->res.classid;
2375 
2376 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2377 					    TC_SETUP_CLSFLOWER, &cls_flower,
2378 					    cb_priv, &f->flags,
2379 					    &f->in_hw_count);
2380 		tc_cleanup_offload_action(&cls_flower.rule->action);
2381 		kfree(cls_flower.rule);
2382 
2383 		if (err) {
2384 			__fl_put(f);
2385 			return err;
2386 		}
2387 next_flow:
2388 		__fl_put(f);
2389 	}
2390 
2391 	return 0;
2392 }
2393 
2394 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2395 {
2396 	struct flow_cls_offload *cls_flower = type_data;
2397 	struct cls_fl_filter *f =
2398 		(struct cls_fl_filter *) cls_flower->cookie;
2399 	struct cls_fl_head *head = fl_head_dereference(tp);
2400 
2401 	spin_lock(&tp->lock);
2402 	list_add(&f->hw_list, &head->hw_filters);
2403 	spin_unlock(&tp->lock);
2404 }
2405 
2406 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2407 {
2408 	struct flow_cls_offload *cls_flower = type_data;
2409 	struct cls_fl_filter *f =
2410 		(struct cls_fl_filter *) cls_flower->cookie;
2411 
2412 	spin_lock(&tp->lock);
2413 	if (!list_empty(&f->hw_list))
2414 		list_del_init(&f->hw_list);
2415 	spin_unlock(&tp->lock);
2416 }
2417 
2418 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2419 			      struct fl_flow_tmplt *tmplt)
2420 {
2421 	struct flow_cls_offload cls_flower = {};
2422 	struct tcf_block *block = chain->block;
2423 
2424 	cls_flower.rule = flow_rule_alloc(0);
2425 	if (!cls_flower.rule)
2426 		return -ENOMEM;
2427 
2428 	cls_flower.common.chain_index = chain->index;
2429 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2430 	cls_flower.cookie = (unsigned long) tmplt;
2431 	cls_flower.rule->match.dissector = &tmplt->dissector;
2432 	cls_flower.rule->match.mask = &tmplt->mask;
2433 	cls_flower.rule->match.key = &tmplt->dummy_key;
2434 
2435 	/* We don't care if driver (any of them) fails to handle this
2436 	 * call. It serves just as a hint for it.
2437 	 */
2438 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2439 	kfree(cls_flower.rule);
2440 
2441 	return 0;
2442 }
2443 
2444 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2445 				struct fl_flow_tmplt *tmplt)
2446 {
2447 	struct flow_cls_offload cls_flower = {};
2448 	struct tcf_block *block = chain->block;
2449 
2450 	cls_flower.common.chain_index = chain->index;
2451 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2452 	cls_flower.cookie = (unsigned long) tmplt;
2453 
2454 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2455 }
2456 
2457 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2458 			     struct nlattr **tca,
2459 			     struct netlink_ext_ack *extack)
2460 {
2461 	struct fl_flow_tmplt *tmplt;
2462 	struct nlattr **tb;
2463 	int err;
2464 
2465 	if (!tca[TCA_OPTIONS])
2466 		return ERR_PTR(-EINVAL);
2467 
2468 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2469 	if (!tb)
2470 		return ERR_PTR(-ENOBUFS);
2471 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2472 					  tca[TCA_OPTIONS], fl_policy, NULL);
2473 	if (err)
2474 		goto errout_tb;
2475 
2476 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2477 	if (!tmplt) {
2478 		err = -ENOMEM;
2479 		goto errout_tb;
2480 	}
2481 	tmplt->chain = chain;
2482 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2483 	if (err)
2484 		goto errout_tmplt;
2485 
2486 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2487 
2488 	err = fl_hw_create_tmplt(chain, tmplt);
2489 	if (err)
2490 		goto errout_tmplt;
2491 
2492 	kfree(tb);
2493 	return tmplt;
2494 
2495 errout_tmplt:
2496 	kfree(tmplt);
2497 errout_tb:
2498 	kfree(tb);
2499 	return ERR_PTR(err);
2500 }
2501 
2502 static void fl_tmplt_destroy(void *tmplt_priv)
2503 {
2504 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2505 
2506 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2507 	kfree(tmplt);
2508 }
2509 
2510 static int fl_dump_key_val(struct sk_buff *skb,
2511 			   void *val, int val_type,
2512 			   void *mask, int mask_type, int len)
2513 {
2514 	int err;
2515 
2516 	if (!memchr_inv(mask, 0, len))
2517 		return 0;
2518 	err = nla_put(skb, val_type, len, val);
2519 	if (err)
2520 		return err;
2521 	if (mask_type != TCA_FLOWER_UNSPEC) {
2522 		err = nla_put(skb, mask_type, len, mask);
2523 		if (err)
2524 			return err;
2525 	}
2526 	return 0;
2527 }
2528 
2529 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2530 				  struct fl_flow_key *mask)
2531 {
2532 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2533 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2534 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2535 			    sizeof(key->tp_range.tp_min.dst)) ||
2536 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2537 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2538 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2539 			    sizeof(key->tp_range.tp_max.dst)) ||
2540 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2541 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2542 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2543 			    sizeof(key->tp_range.tp_min.src)) ||
2544 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2545 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2546 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2547 			    sizeof(key->tp_range.tp_max.src)))
2548 		return -1;
2549 
2550 	return 0;
2551 }
2552 
2553 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2554 				    struct flow_dissector_key_mpls *mpls_key,
2555 				    struct flow_dissector_key_mpls *mpls_mask,
2556 				    u8 lse_index)
2557 {
2558 	struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2559 	struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2560 	int err;
2561 
2562 	err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2563 			 lse_index + 1);
2564 	if (err)
2565 		return err;
2566 
2567 	if (lse_mask->mpls_ttl) {
2568 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2569 				 lse_key->mpls_ttl);
2570 		if (err)
2571 			return err;
2572 	}
2573 	if (lse_mask->mpls_bos) {
2574 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2575 				 lse_key->mpls_bos);
2576 		if (err)
2577 			return err;
2578 	}
2579 	if (lse_mask->mpls_tc) {
2580 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2581 				 lse_key->mpls_tc);
2582 		if (err)
2583 			return err;
2584 	}
2585 	if (lse_mask->mpls_label) {
2586 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2587 				  lse_key->mpls_label);
2588 		if (err)
2589 			return err;
2590 	}
2591 
2592 	return 0;
2593 }
2594 
2595 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2596 				 struct flow_dissector_key_mpls *mpls_key,
2597 				 struct flow_dissector_key_mpls *mpls_mask)
2598 {
2599 	struct nlattr *opts;
2600 	struct nlattr *lse;
2601 	u8 lse_index;
2602 	int err;
2603 
2604 	opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2605 	if (!opts)
2606 		return -EMSGSIZE;
2607 
2608 	for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2609 		if (!(mpls_mask->used_lses & 1 << lse_index))
2610 			continue;
2611 
2612 		lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2613 		if (!lse) {
2614 			err = -EMSGSIZE;
2615 			goto err_opts;
2616 		}
2617 
2618 		err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2619 					       lse_index);
2620 		if (err)
2621 			goto err_opts_lse;
2622 		nla_nest_end(skb, lse);
2623 	}
2624 	nla_nest_end(skb, opts);
2625 
2626 	return 0;
2627 
2628 err_opts_lse:
2629 	nla_nest_cancel(skb, lse);
2630 err_opts:
2631 	nla_nest_cancel(skb, opts);
2632 
2633 	return err;
2634 }
2635 
2636 static int fl_dump_key_mpls(struct sk_buff *skb,
2637 			    struct flow_dissector_key_mpls *mpls_key,
2638 			    struct flow_dissector_key_mpls *mpls_mask)
2639 {
2640 	struct flow_dissector_mpls_lse *lse_mask;
2641 	struct flow_dissector_mpls_lse *lse_key;
2642 	int err;
2643 
2644 	if (!mpls_mask->used_lses)
2645 		return 0;
2646 
2647 	lse_mask = &mpls_mask->ls[0];
2648 	lse_key = &mpls_key->ls[0];
2649 
2650 	/* For backward compatibility, don't use the MPLS nested attributes if
2651 	 * the rule can be expressed using the old attributes.
2652 	 */
2653 	if (mpls_mask->used_lses & ~1 ||
2654 	    (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2655 	     !lse_mask->mpls_tc && !lse_mask->mpls_label))
2656 		return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2657 
2658 	if (lse_mask->mpls_ttl) {
2659 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2660 				 lse_key->mpls_ttl);
2661 		if (err)
2662 			return err;
2663 	}
2664 	if (lse_mask->mpls_tc) {
2665 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2666 				 lse_key->mpls_tc);
2667 		if (err)
2668 			return err;
2669 	}
2670 	if (lse_mask->mpls_label) {
2671 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2672 				  lse_key->mpls_label);
2673 		if (err)
2674 			return err;
2675 	}
2676 	if (lse_mask->mpls_bos) {
2677 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2678 				 lse_key->mpls_bos);
2679 		if (err)
2680 			return err;
2681 	}
2682 	return 0;
2683 }
2684 
2685 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2686 			  struct flow_dissector_key_ip *key,
2687 			  struct flow_dissector_key_ip *mask)
2688 {
2689 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2690 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2691 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2692 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2693 
2694 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2695 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2696 		return -1;
2697 
2698 	return 0;
2699 }
2700 
2701 static int fl_dump_key_vlan(struct sk_buff *skb,
2702 			    int vlan_id_key, int vlan_prio_key,
2703 			    struct flow_dissector_key_vlan *vlan_key,
2704 			    struct flow_dissector_key_vlan *vlan_mask)
2705 {
2706 	int err;
2707 
2708 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2709 		return 0;
2710 	if (vlan_mask->vlan_id) {
2711 		err = nla_put_u16(skb, vlan_id_key,
2712 				  vlan_key->vlan_id);
2713 		if (err)
2714 			return err;
2715 	}
2716 	if (vlan_mask->vlan_priority) {
2717 		err = nla_put_u8(skb, vlan_prio_key,
2718 				 vlan_key->vlan_priority);
2719 		if (err)
2720 			return err;
2721 	}
2722 	return 0;
2723 }
2724 
2725 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2726 			    u32 *flower_key, u32 *flower_mask,
2727 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2728 {
2729 	if (dissector_mask & dissector_flag_bit) {
2730 		*flower_mask |= flower_flag_bit;
2731 		if (dissector_key & dissector_flag_bit)
2732 			*flower_key |= flower_flag_bit;
2733 	}
2734 }
2735 
2736 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2737 {
2738 	u32 key, mask;
2739 	__be32 _key, _mask;
2740 	int err;
2741 
2742 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2743 		return 0;
2744 
2745 	key = 0;
2746 	mask = 0;
2747 
2748 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2749 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2750 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2751 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2752 			FLOW_DIS_FIRST_FRAG);
2753 
2754 	_key = cpu_to_be32(key);
2755 	_mask = cpu_to_be32(mask);
2756 
2757 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2758 	if (err)
2759 		return err;
2760 
2761 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2762 }
2763 
2764 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2765 				  struct flow_dissector_key_enc_opts *enc_opts)
2766 {
2767 	struct geneve_opt *opt;
2768 	struct nlattr *nest;
2769 	int opt_off = 0;
2770 
2771 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2772 	if (!nest)
2773 		goto nla_put_failure;
2774 
2775 	while (enc_opts->len > opt_off) {
2776 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2777 
2778 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2779 				 opt->opt_class))
2780 			goto nla_put_failure;
2781 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2782 			       opt->type))
2783 			goto nla_put_failure;
2784 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2785 			    opt->length * 4, opt->opt_data))
2786 			goto nla_put_failure;
2787 
2788 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2789 	}
2790 	nla_nest_end(skb, nest);
2791 	return 0;
2792 
2793 nla_put_failure:
2794 	nla_nest_cancel(skb, nest);
2795 	return -EMSGSIZE;
2796 }
2797 
2798 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2799 				 struct flow_dissector_key_enc_opts *enc_opts)
2800 {
2801 	struct vxlan_metadata *md;
2802 	struct nlattr *nest;
2803 
2804 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2805 	if (!nest)
2806 		goto nla_put_failure;
2807 
2808 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2809 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2810 		goto nla_put_failure;
2811 
2812 	nla_nest_end(skb, nest);
2813 	return 0;
2814 
2815 nla_put_failure:
2816 	nla_nest_cancel(skb, nest);
2817 	return -EMSGSIZE;
2818 }
2819 
2820 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2821 				  struct flow_dissector_key_enc_opts *enc_opts)
2822 {
2823 	struct erspan_metadata *md;
2824 	struct nlattr *nest;
2825 
2826 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2827 	if (!nest)
2828 		goto nla_put_failure;
2829 
2830 	md = (struct erspan_metadata *)&enc_opts->data[0];
2831 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2832 		goto nla_put_failure;
2833 
2834 	if (md->version == 1 &&
2835 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2836 		goto nla_put_failure;
2837 
2838 	if (md->version == 2 &&
2839 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2840 			md->u.md2.dir) ||
2841 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2842 			get_hwid(&md->u.md2))))
2843 		goto nla_put_failure;
2844 
2845 	nla_nest_end(skb, nest);
2846 	return 0;
2847 
2848 nla_put_failure:
2849 	nla_nest_cancel(skb, nest);
2850 	return -EMSGSIZE;
2851 }
2852 
2853 static int fl_dump_key_gtp_opt(struct sk_buff *skb,
2854 			       struct flow_dissector_key_enc_opts *enc_opts)
2855 
2856 {
2857 	struct gtp_pdu_session_info *session_info;
2858 	struct nlattr *nest;
2859 
2860 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
2861 	if (!nest)
2862 		goto nla_put_failure;
2863 
2864 	session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
2865 
2866 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
2867 		       session_info->pdu_type))
2868 		goto nla_put_failure;
2869 
2870 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
2871 		goto nla_put_failure;
2872 
2873 	nla_nest_end(skb, nest);
2874 	return 0;
2875 
2876 nla_put_failure:
2877 	nla_nest_cancel(skb, nest);
2878 	return -EMSGSIZE;
2879 }
2880 
2881 static int fl_dump_key_ct(struct sk_buff *skb,
2882 			  struct flow_dissector_key_ct *key,
2883 			  struct flow_dissector_key_ct *mask)
2884 {
2885 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2886 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2887 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2888 			    sizeof(key->ct_state)))
2889 		goto nla_put_failure;
2890 
2891 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2892 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2893 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2894 			    sizeof(key->ct_zone)))
2895 		goto nla_put_failure;
2896 
2897 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2898 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2899 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2900 			    sizeof(key->ct_mark)))
2901 		goto nla_put_failure;
2902 
2903 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2904 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2905 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2906 			    sizeof(key->ct_labels)))
2907 		goto nla_put_failure;
2908 
2909 	return 0;
2910 
2911 nla_put_failure:
2912 	return -EMSGSIZE;
2913 }
2914 
2915 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2916 			       struct flow_dissector_key_enc_opts *enc_opts)
2917 {
2918 	struct nlattr *nest;
2919 	int err;
2920 
2921 	if (!enc_opts->len)
2922 		return 0;
2923 
2924 	nest = nla_nest_start_noflag(skb, enc_opt_type);
2925 	if (!nest)
2926 		goto nla_put_failure;
2927 
2928 	switch (enc_opts->dst_opt_type) {
2929 	case TUNNEL_GENEVE_OPT:
2930 		err = fl_dump_key_geneve_opt(skb, enc_opts);
2931 		if (err)
2932 			goto nla_put_failure;
2933 		break;
2934 	case TUNNEL_VXLAN_OPT:
2935 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
2936 		if (err)
2937 			goto nla_put_failure;
2938 		break;
2939 	case TUNNEL_ERSPAN_OPT:
2940 		err = fl_dump_key_erspan_opt(skb, enc_opts);
2941 		if (err)
2942 			goto nla_put_failure;
2943 		break;
2944 	case TUNNEL_GTP_OPT:
2945 		err = fl_dump_key_gtp_opt(skb, enc_opts);
2946 		if (err)
2947 			goto nla_put_failure;
2948 		break;
2949 	default:
2950 		goto nla_put_failure;
2951 	}
2952 	nla_nest_end(skb, nest);
2953 	return 0;
2954 
2955 nla_put_failure:
2956 	nla_nest_cancel(skb, nest);
2957 	return -EMSGSIZE;
2958 }
2959 
2960 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2961 			       struct flow_dissector_key_enc_opts *key_opts,
2962 			       struct flow_dissector_key_enc_opts *msk_opts)
2963 {
2964 	int err;
2965 
2966 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2967 	if (err)
2968 		return err;
2969 
2970 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2971 }
2972 
2973 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2974 		       struct fl_flow_key *key, struct fl_flow_key *mask)
2975 {
2976 	if (mask->meta.ingress_ifindex) {
2977 		struct net_device *dev;
2978 
2979 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2980 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2981 			goto nla_put_failure;
2982 	}
2983 
2984 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2985 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2986 			    sizeof(key->eth.dst)) ||
2987 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2988 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2989 			    sizeof(key->eth.src)) ||
2990 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2991 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2992 			    sizeof(key->basic.n_proto)))
2993 		goto nla_put_failure;
2994 
2995 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2996 		goto nla_put_failure;
2997 
2998 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2999 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
3000 		goto nla_put_failure;
3001 
3002 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
3003 			     TCA_FLOWER_KEY_CVLAN_PRIO,
3004 			     &key->cvlan, &mask->cvlan) ||
3005 	    (mask->cvlan.vlan_tpid &&
3006 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3007 			  key->cvlan.vlan_tpid)))
3008 		goto nla_put_failure;
3009 
3010 	if (mask->basic.n_proto) {
3011 		if (mask->cvlan.vlan_eth_type) {
3012 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
3013 					 key->basic.n_proto))
3014 				goto nla_put_failure;
3015 		} else if (mask->vlan.vlan_eth_type) {
3016 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3017 					 key->vlan.vlan_eth_type))
3018 				goto nla_put_failure;
3019 		}
3020 	}
3021 
3022 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
3023 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
3024 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
3025 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
3026 			    sizeof(key->basic.ip_proto)) ||
3027 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
3028 		goto nla_put_failure;
3029 
3030 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3031 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
3032 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
3033 			     sizeof(key->ipv4.src)) ||
3034 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
3035 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
3036 			     sizeof(key->ipv4.dst))))
3037 		goto nla_put_failure;
3038 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3039 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
3040 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
3041 				  sizeof(key->ipv6.src)) ||
3042 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
3043 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
3044 				  sizeof(key->ipv6.dst))))
3045 		goto nla_put_failure;
3046 
3047 	if (key->basic.ip_proto == IPPROTO_TCP &&
3048 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
3049 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
3050 			     sizeof(key->tp.src)) ||
3051 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
3052 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
3053 			     sizeof(key->tp.dst)) ||
3054 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
3055 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
3056 			     sizeof(key->tcp.flags))))
3057 		goto nla_put_failure;
3058 	else if (key->basic.ip_proto == IPPROTO_UDP &&
3059 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
3060 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
3061 				  sizeof(key->tp.src)) ||
3062 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
3063 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
3064 				  sizeof(key->tp.dst))))
3065 		goto nla_put_failure;
3066 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
3067 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
3068 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
3069 				  sizeof(key->tp.src)) ||
3070 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
3071 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
3072 				  sizeof(key->tp.dst))))
3073 		goto nla_put_failure;
3074 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
3075 		 key->basic.ip_proto == IPPROTO_ICMP &&
3076 		 (fl_dump_key_val(skb, &key->icmp.type,
3077 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
3078 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
3079 				  sizeof(key->icmp.type)) ||
3080 		  fl_dump_key_val(skb, &key->icmp.code,
3081 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
3082 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
3083 				  sizeof(key->icmp.code))))
3084 		goto nla_put_failure;
3085 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
3086 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
3087 		 (fl_dump_key_val(skb, &key->icmp.type,
3088 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
3089 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
3090 				  sizeof(key->icmp.type)) ||
3091 		  fl_dump_key_val(skb, &key->icmp.code,
3092 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
3093 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
3094 				  sizeof(key->icmp.code))))
3095 		goto nla_put_failure;
3096 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
3097 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
3098 		 (fl_dump_key_val(skb, &key->arp.sip,
3099 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
3100 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
3101 				  sizeof(key->arp.sip)) ||
3102 		  fl_dump_key_val(skb, &key->arp.tip,
3103 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
3104 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
3105 				  sizeof(key->arp.tip)) ||
3106 		  fl_dump_key_val(skb, &key->arp.op,
3107 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
3108 				  TCA_FLOWER_KEY_ARP_OP_MASK,
3109 				  sizeof(key->arp.op)) ||
3110 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
3111 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
3112 				  sizeof(key->arp.sha)) ||
3113 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
3114 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
3115 				  sizeof(key->arp.tha))))
3116 		goto nla_put_failure;
3117 
3118 	if ((key->basic.ip_proto == IPPROTO_TCP ||
3119 	     key->basic.ip_proto == IPPROTO_UDP ||
3120 	     key->basic.ip_proto == IPPROTO_SCTP) &&
3121 	     fl_dump_key_port_range(skb, key, mask))
3122 		goto nla_put_failure;
3123 
3124 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3125 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
3126 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3127 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3128 			    sizeof(key->enc_ipv4.src)) ||
3129 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
3130 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3131 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3132 			     sizeof(key->enc_ipv4.dst))))
3133 		goto nla_put_failure;
3134 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3135 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3136 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3137 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3138 			    sizeof(key->enc_ipv6.src)) ||
3139 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3140 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
3141 				 &mask->enc_ipv6.dst,
3142 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3143 			    sizeof(key->enc_ipv6.dst))))
3144 		goto nla_put_failure;
3145 
3146 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3147 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3148 			    sizeof(key->enc_key_id)) ||
3149 	    fl_dump_key_val(skb, &key->enc_tp.src,
3150 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3151 			    &mask->enc_tp.src,
3152 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3153 			    sizeof(key->enc_tp.src)) ||
3154 	    fl_dump_key_val(skb, &key->enc_tp.dst,
3155 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3156 			    &mask->enc_tp.dst,
3157 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3158 			    sizeof(key->enc_tp.dst)) ||
3159 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3160 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3161 		goto nla_put_failure;
3162 
3163 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3164 		goto nla_put_failure;
3165 
3166 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3167 		goto nla_put_failure;
3168 
3169 	if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3170 			     &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3171 			     sizeof(key->hash.hash)))
3172 		goto nla_put_failure;
3173 
3174 	return 0;
3175 
3176 nla_put_failure:
3177 	return -EMSGSIZE;
3178 }
3179 
3180 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3181 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3182 {
3183 	struct cls_fl_filter *f = fh;
3184 	struct nlattr *nest;
3185 	struct fl_flow_key *key, *mask;
3186 	bool skip_hw;
3187 
3188 	if (!f)
3189 		return skb->len;
3190 
3191 	t->tcm_handle = f->handle;
3192 
3193 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3194 	if (!nest)
3195 		goto nla_put_failure;
3196 
3197 	spin_lock(&tp->lock);
3198 
3199 	if (f->res.classid &&
3200 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3201 		goto nla_put_failure_locked;
3202 
3203 	key = &f->key;
3204 	mask = &f->mask->key;
3205 	skip_hw = tc_skip_hw(f->flags);
3206 
3207 	if (fl_dump_key(skb, net, key, mask))
3208 		goto nla_put_failure_locked;
3209 
3210 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3211 		goto nla_put_failure_locked;
3212 
3213 	spin_unlock(&tp->lock);
3214 
3215 	if (!skip_hw)
3216 		fl_hw_update_stats(tp, f, rtnl_held);
3217 
3218 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3219 		goto nla_put_failure;
3220 
3221 	if (tcf_exts_dump(skb, &f->exts))
3222 		goto nla_put_failure;
3223 
3224 	nla_nest_end(skb, nest);
3225 
3226 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3227 		goto nla_put_failure;
3228 
3229 	return skb->len;
3230 
3231 nla_put_failure_locked:
3232 	spin_unlock(&tp->lock);
3233 nla_put_failure:
3234 	nla_nest_cancel(skb, nest);
3235 	return -1;
3236 }
3237 
3238 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3239 			 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3240 {
3241 	struct cls_fl_filter *f = fh;
3242 	struct nlattr *nest;
3243 	bool skip_hw;
3244 
3245 	if (!f)
3246 		return skb->len;
3247 
3248 	t->tcm_handle = f->handle;
3249 
3250 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3251 	if (!nest)
3252 		goto nla_put_failure;
3253 
3254 	spin_lock(&tp->lock);
3255 
3256 	skip_hw = tc_skip_hw(f->flags);
3257 
3258 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3259 		goto nla_put_failure_locked;
3260 
3261 	spin_unlock(&tp->lock);
3262 
3263 	if (!skip_hw)
3264 		fl_hw_update_stats(tp, f, rtnl_held);
3265 
3266 	if (tcf_exts_terse_dump(skb, &f->exts))
3267 		goto nla_put_failure;
3268 
3269 	nla_nest_end(skb, nest);
3270 
3271 	return skb->len;
3272 
3273 nla_put_failure_locked:
3274 	spin_unlock(&tp->lock);
3275 nla_put_failure:
3276 	nla_nest_cancel(skb, nest);
3277 	return -1;
3278 }
3279 
3280 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3281 {
3282 	struct fl_flow_tmplt *tmplt = tmplt_priv;
3283 	struct fl_flow_key *key, *mask;
3284 	struct nlattr *nest;
3285 
3286 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3287 	if (!nest)
3288 		goto nla_put_failure;
3289 
3290 	key = &tmplt->dummy_key;
3291 	mask = &tmplt->mask;
3292 
3293 	if (fl_dump_key(skb, net, key, mask))
3294 		goto nla_put_failure;
3295 
3296 	nla_nest_end(skb, nest);
3297 
3298 	return skb->len;
3299 
3300 nla_put_failure:
3301 	nla_nest_cancel(skb, nest);
3302 	return -EMSGSIZE;
3303 }
3304 
3305 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3306 			  unsigned long base)
3307 {
3308 	struct cls_fl_filter *f = fh;
3309 
3310 	if (f && f->res.classid == classid) {
3311 		if (cl)
3312 			__tcf_bind_filter(q, &f->res, base);
3313 		else
3314 			__tcf_unbind_filter(q, &f->res);
3315 	}
3316 }
3317 
3318 static bool fl_delete_empty(struct tcf_proto *tp)
3319 {
3320 	struct cls_fl_head *head = fl_head_dereference(tp);
3321 
3322 	spin_lock(&tp->lock);
3323 	tp->deleting = idr_is_empty(&head->handle_idr);
3324 	spin_unlock(&tp->lock);
3325 
3326 	return tp->deleting;
3327 }
3328 
3329 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3330 	.kind		= "flower",
3331 	.classify	= fl_classify,
3332 	.init		= fl_init,
3333 	.destroy	= fl_destroy,
3334 	.get		= fl_get,
3335 	.put		= fl_put,
3336 	.change		= fl_change,
3337 	.delete		= fl_delete,
3338 	.delete_empty	= fl_delete_empty,
3339 	.walk		= fl_walk,
3340 	.reoffload	= fl_reoffload,
3341 	.hw_add		= fl_hw_add,
3342 	.hw_del		= fl_hw_del,
3343 	.dump		= fl_dump,
3344 	.terse_dump	= fl_terse_dump,
3345 	.bind_class	= fl_bind_class,
3346 	.tmplt_create	= fl_tmplt_create,
3347 	.tmplt_destroy	= fl_tmplt_destroy,
3348 	.tmplt_dump	= fl_tmplt_dump,
3349 	.owner		= THIS_MODULE,
3350 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
3351 };
3352 
3353 static int __init cls_fl_init(void)
3354 {
3355 	return register_tcf_proto_ops(&cls_fl_ops);
3356 }
3357 
3358 static void __exit cls_fl_exit(void)
3359 {
3360 	unregister_tcf_proto_ops(&cls_fl_ops);
3361 }
3362 
3363 module_init(cls_fl_init);
3364 module_exit(cls_fl_exit);
3365 
3366 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3367 MODULE_DESCRIPTION("Flower classifier");
3368 MODULE_LICENSE("GPL v2");
3369