xref: /linux/net/sched/cls_flower.c (revision ef815d2cba782e96b9aad9483523d474ed41c62a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 #include <linux/bitfield.h>
15 
16 #include <linux/if_ether.h>
17 #include <linux/in6.h>
18 #include <linux/ip.h>
19 #include <linux/mpls.h>
20 #include <linux/ppp_defs.h>
21 
22 #include <net/sch_generic.h>
23 #include <net/pkt_cls.h>
24 #include <net/pkt_sched.h>
25 #include <net/ip.h>
26 #include <net/flow_dissector.h>
27 #include <net/geneve.h>
28 #include <net/vxlan.h>
29 #include <net/erspan.h>
30 #include <net/gtp.h>
31 #include <net/tc_wrapper.h>
32 
33 #include <net/dst.h>
34 #include <net/dst_metadata.h>
35 
36 #include <uapi/linux/netfilter/nf_conntrack_common.h>
37 
38 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
39 		((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
40 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
41 		(TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
42 
43 struct fl_flow_key {
44 	struct flow_dissector_key_meta meta;
45 	struct flow_dissector_key_control control;
46 	struct flow_dissector_key_control enc_control;
47 	struct flow_dissector_key_basic basic;
48 	struct flow_dissector_key_eth_addrs eth;
49 	struct flow_dissector_key_vlan vlan;
50 	struct flow_dissector_key_vlan cvlan;
51 	union {
52 		struct flow_dissector_key_ipv4_addrs ipv4;
53 		struct flow_dissector_key_ipv6_addrs ipv6;
54 	};
55 	struct flow_dissector_key_ports tp;
56 	struct flow_dissector_key_icmp icmp;
57 	struct flow_dissector_key_arp arp;
58 	struct flow_dissector_key_keyid enc_key_id;
59 	union {
60 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
61 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
62 	};
63 	struct flow_dissector_key_ports enc_tp;
64 	struct flow_dissector_key_mpls mpls;
65 	struct flow_dissector_key_tcp tcp;
66 	struct flow_dissector_key_ip ip;
67 	struct flow_dissector_key_ip enc_ip;
68 	struct flow_dissector_key_enc_opts enc_opts;
69 	struct flow_dissector_key_ports_range tp_range;
70 	struct flow_dissector_key_ct ct;
71 	struct flow_dissector_key_hash hash;
72 	struct flow_dissector_key_num_of_vlans num_of_vlans;
73 	struct flow_dissector_key_pppoe pppoe;
74 	struct flow_dissector_key_l2tpv3 l2tpv3;
75 	struct flow_dissector_key_cfm cfm;
76 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
77 
78 struct fl_flow_mask_range {
79 	unsigned short int start;
80 	unsigned short int end;
81 };
82 
83 struct fl_flow_mask {
84 	struct fl_flow_key key;
85 	struct fl_flow_mask_range range;
86 	u32 flags;
87 	struct rhash_head ht_node;
88 	struct rhashtable ht;
89 	struct rhashtable_params filter_ht_params;
90 	struct flow_dissector dissector;
91 	struct list_head filters;
92 	struct rcu_work rwork;
93 	struct list_head list;
94 	refcount_t refcnt;
95 };
96 
97 struct fl_flow_tmplt {
98 	struct fl_flow_key dummy_key;
99 	struct fl_flow_key mask;
100 	struct flow_dissector dissector;
101 	struct tcf_chain *chain;
102 };
103 
104 struct cls_fl_head {
105 	struct rhashtable ht;
106 	spinlock_t masks_lock; /* Protect masks list */
107 	struct list_head masks;
108 	struct list_head hw_filters;
109 	struct rcu_work rwork;
110 	struct idr handle_idr;
111 };
112 
113 struct cls_fl_filter {
114 	struct fl_flow_mask *mask;
115 	struct rhash_head ht_node;
116 	struct fl_flow_key mkey;
117 	struct tcf_exts exts;
118 	struct tcf_result res;
119 	struct fl_flow_key key;
120 	struct list_head list;
121 	struct list_head hw_list;
122 	u32 handle;
123 	u32 flags;
124 	u32 in_hw_count;
125 	u8 needs_tc_skb_ext:1;
126 	struct rcu_work rwork;
127 	struct net_device *hw_dev;
128 	/* Flower classifier is unlocked, which means that its reference counter
129 	 * can be changed concurrently without any kind of external
130 	 * synchronization. Use atomic reference counter to be concurrency-safe.
131 	 */
132 	refcount_t refcnt;
133 	bool deleted;
134 };
135 
136 static const struct rhashtable_params mask_ht_params = {
137 	.key_offset = offsetof(struct fl_flow_mask, key),
138 	.key_len = sizeof(struct fl_flow_key),
139 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
140 	.automatic_shrinking = true,
141 };
142 
143 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
144 {
145 	return mask->range.end - mask->range.start;
146 }
147 
148 static void fl_mask_update_range(struct fl_flow_mask *mask)
149 {
150 	const u8 *bytes = (const u8 *) &mask->key;
151 	size_t size = sizeof(mask->key);
152 	size_t i, first = 0, last;
153 
154 	for (i = 0; i < size; i++) {
155 		if (bytes[i]) {
156 			first = i;
157 			break;
158 		}
159 	}
160 	last = first;
161 	for (i = size - 1; i != first; i--) {
162 		if (bytes[i]) {
163 			last = i;
164 			break;
165 		}
166 	}
167 	mask->range.start = rounddown(first, sizeof(long));
168 	mask->range.end = roundup(last + 1, sizeof(long));
169 }
170 
171 static void *fl_key_get_start(struct fl_flow_key *key,
172 			      const struct fl_flow_mask *mask)
173 {
174 	return (u8 *) key + mask->range.start;
175 }
176 
177 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
178 			      struct fl_flow_mask *mask)
179 {
180 	const long *lkey = fl_key_get_start(key, mask);
181 	const long *lmask = fl_key_get_start(&mask->key, mask);
182 	long *lmkey = fl_key_get_start(mkey, mask);
183 	int i;
184 
185 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
186 		*lmkey++ = *lkey++ & *lmask++;
187 }
188 
189 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
190 			       struct fl_flow_mask *mask)
191 {
192 	const long *lmask = fl_key_get_start(&mask->key, mask);
193 	const long *ltmplt;
194 	int i;
195 
196 	if (!tmplt)
197 		return true;
198 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
199 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
200 		if (~*ltmplt++ & *lmask++)
201 			return false;
202 	}
203 	return true;
204 }
205 
206 static void fl_clear_masked_range(struct fl_flow_key *key,
207 				  struct fl_flow_mask *mask)
208 {
209 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
210 }
211 
212 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
213 				  struct fl_flow_key *key,
214 				  struct fl_flow_key *mkey)
215 {
216 	u16 min_mask, max_mask, min_val, max_val;
217 
218 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
219 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
220 	min_val = ntohs(filter->key.tp_range.tp_min.dst);
221 	max_val = ntohs(filter->key.tp_range.tp_max.dst);
222 
223 	if (min_mask && max_mask) {
224 		if (ntohs(key->tp_range.tp.dst) < min_val ||
225 		    ntohs(key->tp_range.tp.dst) > max_val)
226 			return false;
227 
228 		/* skb does not have min and max values */
229 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
230 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
231 	}
232 	return true;
233 }
234 
235 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
236 				  struct fl_flow_key *key,
237 				  struct fl_flow_key *mkey)
238 {
239 	u16 min_mask, max_mask, min_val, max_val;
240 
241 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
242 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
243 	min_val = ntohs(filter->key.tp_range.tp_min.src);
244 	max_val = ntohs(filter->key.tp_range.tp_max.src);
245 
246 	if (min_mask && max_mask) {
247 		if (ntohs(key->tp_range.tp.src) < min_val ||
248 		    ntohs(key->tp_range.tp.src) > max_val)
249 			return false;
250 
251 		/* skb does not have min and max values */
252 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
253 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
254 	}
255 	return true;
256 }
257 
258 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
259 					 struct fl_flow_key *mkey)
260 {
261 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
262 				      mask->filter_ht_params);
263 }
264 
265 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
266 					     struct fl_flow_key *mkey,
267 					     struct fl_flow_key *key)
268 {
269 	struct cls_fl_filter *filter, *f;
270 
271 	list_for_each_entry_rcu(filter, &mask->filters, list) {
272 		if (!fl_range_port_dst_cmp(filter, key, mkey))
273 			continue;
274 
275 		if (!fl_range_port_src_cmp(filter, key, mkey))
276 			continue;
277 
278 		f = __fl_lookup(mask, mkey);
279 		if (f)
280 			return f;
281 	}
282 	return NULL;
283 }
284 
285 static noinline_for_stack
286 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
287 {
288 	struct fl_flow_key mkey;
289 
290 	fl_set_masked_key(&mkey, key, mask);
291 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
292 		return fl_lookup_range(mask, &mkey, key);
293 
294 	return __fl_lookup(mask, &mkey);
295 }
296 
297 static u16 fl_ct_info_to_flower_map[] = {
298 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
299 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
300 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
301 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
302 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
303 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
304 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
305 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
306 					TCA_FLOWER_KEY_CT_FLAGS_RELATED |
307 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
308 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
309 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
310 };
311 
312 TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb,
313 				  const struct tcf_proto *tp,
314 				  struct tcf_result *res)
315 {
316 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
317 	bool post_ct = tc_skb_cb(skb)->post_ct;
318 	u16 zone = tc_skb_cb(skb)->zone;
319 	struct fl_flow_key skb_key;
320 	struct fl_flow_mask *mask;
321 	struct cls_fl_filter *f;
322 
323 	list_for_each_entry_rcu(mask, &head->masks, list) {
324 		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
325 		fl_clear_masked_range(&skb_key, mask);
326 
327 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
328 		/* skb_flow_dissect() does not set n_proto in case an unknown
329 		 * protocol, so do it rather here.
330 		 */
331 		skb_key.basic.n_proto = skb_protocol(skb, false);
332 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
333 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
334 				    fl_ct_info_to_flower_map,
335 				    ARRAY_SIZE(fl_ct_info_to_flower_map),
336 				    post_ct, zone);
337 		skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
338 		skb_flow_dissect(skb, &mask->dissector, &skb_key,
339 				 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
340 
341 		f = fl_mask_lookup(mask, &skb_key);
342 		if (f && !tc_skip_sw(f->flags)) {
343 			*res = f->res;
344 			return tcf_exts_exec(skb, &f->exts, res);
345 		}
346 	}
347 	return -1;
348 }
349 
350 static int fl_init(struct tcf_proto *tp)
351 {
352 	struct cls_fl_head *head;
353 
354 	head = kzalloc(sizeof(*head), GFP_KERNEL);
355 	if (!head)
356 		return -ENOBUFS;
357 
358 	spin_lock_init(&head->masks_lock);
359 	INIT_LIST_HEAD_RCU(&head->masks);
360 	INIT_LIST_HEAD(&head->hw_filters);
361 	rcu_assign_pointer(tp->root, head);
362 	idr_init(&head->handle_idr);
363 
364 	return rhashtable_init(&head->ht, &mask_ht_params);
365 }
366 
367 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
368 {
369 	/* temporary masks don't have their filters list and ht initialized */
370 	if (mask_init_done) {
371 		WARN_ON(!list_empty(&mask->filters));
372 		rhashtable_destroy(&mask->ht);
373 	}
374 	kfree(mask);
375 }
376 
377 static void fl_mask_free_work(struct work_struct *work)
378 {
379 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
380 						 struct fl_flow_mask, rwork);
381 
382 	fl_mask_free(mask, true);
383 }
384 
385 static void fl_uninit_mask_free_work(struct work_struct *work)
386 {
387 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
388 						 struct fl_flow_mask, rwork);
389 
390 	fl_mask_free(mask, false);
391 }
392 
393 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
394 {
395 	if (!refcount_dec_and_test(&mask->refcnt))
396 		return false;
397 
398 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
399 
400 	spin_lock(&head->masks_lock);
401 	list_del_rcu(&mask->list);
402 	spin_unlock(&head->masks_lock);
403 
404 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
405 
406 	return true;
407 }
408 
409 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
410 {
411 	/* Flower classifier only changes root pointer during init and destroy.
412 	 * Users must obtain reference to tcf_proto instance before calling its
413 	 * API, so tp->root pointer is protected from concurrent call to
414 	 * fl_destroy() by reference counting.
415 	 */
416 	return rcu_dereference_raw(tp->root);
417 }
418 
419 static void __fl_destroy_filter(struct cls_fl_filter *f)
420 {
421 	if (f->needs_tc_skb_ext)
422 		tc_skb_ext_tc_disable();
423 	tcf_exts_destroy(&f->exts);
424 	tcf_exts_put_net(&f->exts);
425 	kfree(f);
426 }
427 
428 static void fl_destroy_filter_work(struct work_struct *work)
429 {
430 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
431 					struct cls_fl_filter, rwork);
432 
433 	__fl_destroy_filter(f);
434 }
435 
436 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
437 				 bool rtnl_held, struct netlink_ext_ack *extack)
438 {
439 	struct tcf_block *block = tp->chain->block;
440 	struct flow_cls_offload cls_flower = {};
441 
442 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
443 	cls_flower.command = FLOW_CLS_DESTROY;
444 	cls_flower.cookie = (unsigned long) f;
445 
446 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
447 			    &f->flags, &f->in_hw_count, rtnl_held);
448 
449 }
450 
451 static int fl_hw_replace_filter(struct tcf_proto *tp,
452 				struct cls_fl_filter *f, bool rtnl_held,
453 				struct netlink_ext_ack *extack)
454 {
455 	struct tcf_block *block = tp->chain->block;
456 	struct flow_cls_offload cls_flower = {};
457 	bool skip_sw = tc_skip_sw(f->flags);
458 	int err = 0;
459 
460 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
461 	if (!cls_flower.rule)
462 		return -ENOMEM;
463 
464 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
465 	cls_flower.command = FLOW_CLS_REPLACE;
466 	cls_flower.cookie = (unsigned long) f;
467 	cls_flower.rule->match.dissector = &f->mask->dissector;
468 	cls_flower.rule->match.mask = &f->mask->key;
469 	cls_flower.rule->match.key = &f->mkey;
470 	cls_flower.classid = f->res.classid;
471 
472 	err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
473 				      cls_flower.common.extack);
474 	if (err) {
475 		kfree(cls_flower.rule);
476 
477 		return skip_sw ? err : 0;
478 	}
479 
480 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
481 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
482 	tc_cleanup_offload_action(&cls_flower.rule->action);
483 	kfree(cls_flower.rule);
484 
485 	if (err) {
486 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
487 		return err;
488 	}
489 
490 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
491 		return -EINVAL;
492 
493 	return 0;
494 }
495 
496 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
497 			       bool rtnl_held)
498 {
499 	struct tcf_block *block = tp->chain->block;
500 	struct flow_cls_offload cls_flower = {};
501 
502 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
503 	cls_flower.command = FLOW_CLS_STATS;
504 	cls_flower.cookie = (unsigned long) f;
505 	cls_flower.classid = f->res.classid;
506 
507 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
508 			 rtnl_held);
509 
510 	tcf_exts_hw_stats_update(&f->exts, &cls_flower.stats, cls_flower.use_act_stats);
511 }
512 
513 static void __fl_put(struct cls_fl_filter *f)
514 {
515 	if (!refcount_dec_and_test(&f->refcnt))
516 		return;
517 
518 	if (tcf_exts_get_net(&f->exts))
519 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
520 	else
521 		__fl_destroy_filter(f);
522 }
523 
524 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
525 {
526 	struct cls_fl_filter *f;
527 
528 	rcu_read_lock();
529 	f = idr_find(&head->handle_idr, handle);
530 	if (f && !refcount_inc_not_zero(&f->refcnt))
531 		f = NULL;
532 	rcu_read_unlock();
533 
534 	return f;
535 }
536 
537 static struct tcf_exts *fl_get_exts(const struct tcf_proto *tp, u32 handle)
538 {
539 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
540 	struct cls_fl_filter *f;
541 
542 	f = idr_find(&head->handle_idr, handle);
543 	return f ? &f->exts : NULL;
544 }
545 
546 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
547 		       bool *last, bool rtnl_held,
548 		       struct netlink_ext_ack *extack)
549 {
550 	struct cls_fl_head *head = fl_head_dereference(tp);
551 
552 	*last = false;
553 
554 	spin_lock(&tp->lock);
555 	if (f->deleted) {
556 		spin_unlock(&tp->lock);
557 		return -ENOENT;
558 	}
559 
560 	f->deleted = true;
561 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
562 			       f->mask->filter_ht_params);
563 	idr_remove(&head->handle_idr, f->handle);
564 	list_del_rcu(&f->list);
565 	spin_unlock(&tp->lock);
566 
567 	*last = fl_mask_put(head, f->mask);
568 	if (!tc_skip_hw(f->flags))
569 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
570 	tcf_unbind_filter(tp, &f->res);
571 	__fl_put(f);
572 
573 	return 0;
574 }
575 
576 static void fl_destroy_sleepable(struct work_struct *work)
577 {
578 	struct cls_fl_head *head = container_of(to_rcu_work(work),
579 						struct cls_fl_head,
580 						rwork);
581 
582 	rhashtable_destroy(&head->ht);
583 	kfree(head);
584 	module_put(THIS_MODULE);
585 }
586 
587 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
588 		       struct netlink_ext_ack *extack)
589 {
590 	struct cls_fl_head *head = fl_head_dereference(tp);
591 	struct fl_flow_mask *mask, *next_mask;
592 	struct cls_fl_filter *f, *next;
593 	bool last;
594 
595 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
596 		list_for_each_entry_safe(f, next, &mask->filters, list) {
597 			__fl_delete(tp, f, &last, rtnl_held, extack);
598 			if (last)
599 				break;
600 		}
601 	}
602 	idr_destroy(&head->handle_idr);
603 
604 	__module_get(THIS_MODULE);
605 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
606 }
607 
608 static void fl_put(struct tcf_proto *tp, void *arg)
609 {
610 	struct cls_fl_filter *f = arg;
611 
612 	__fl_put(f);
613 }
614 
615 static void *fl_get(struct tcf_proto *tp, u32 handle)
616 {
617 	struct cls_fl_head *head = fl_head_dereference(tp);
618 
619 	return __fl_get(head, handle);
620 }
621 
622 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
623 	[TCA_FLOWER_UNSPEC]		= { .strict_start_type =
624 						TCA_FLOWER_L2_MISS },
625 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
626 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
627 					    .len = IFNAMSIZ },
628 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
629 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
630 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
631 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
632 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
633 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
634 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
635 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
636 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
637 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
638 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
639 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
640 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
641 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
642 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
643 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
644 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
645 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
646 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
647 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
648 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
649 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
650 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
651 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
652 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
653 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
654 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
655 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
656 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
657 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
658 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
659 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
660 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
661 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
662 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
663 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
664 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
665 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
666 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
667 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
668 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
669 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
670 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
671 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
672 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
673 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
674 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
675 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
676 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
677 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
678 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
679 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
680 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
681 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
682 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
683 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
684 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
685 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
686 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
687 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
688 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
689 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
690 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
691 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
692 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
693 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
694 	[TCA_FLOWER_KEY_MPLS_OPTS]	= { .type = NLA_NESTED },
695 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
696 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
697 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
698 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
699 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
700 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
701 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
702 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
703 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
704 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
705 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
706 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
707 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
708 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
709 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
710 	[TCA_FLOWER_KEY_CT_STATE]	=
711 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
712 	[TCA_FLOWER_KEY_CT_STATE_MASK]	=
713 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
714 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
715 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
716 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
717 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
718 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
719 					    .len = 128 / BITS_PER_BYTE },
720 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
721 					    .len = 128 / BITS_PER_BYTE },
722 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
723 	[TCA_FLOWER_KEY_HASH]		= { .type = NLA_U32 },
724 	[TCA_FLOWER_KEY_HASH_MASK]	= { .type = NLA_U32 },
725 	[TCA_FLOWER_KEY_NUM_OF_VLANS]	= { .type = NLA_U8 },
726 	[TCA_FLOWER_KEY_PPPOE_SID]	= { .type = NLA_U16 },
727 	[TCA_FLOWER_KEY_PPP_PROTO]	= { .type = NLA_U16 },
728 	[TCA_FLOWER_KEY_L2TPV3_SID]	= { .type = NLA_U32 },
729 	[TCA_FLOWER_L2_MISS]		= NLA_POLICY_MAX(NLA_U8, 1),
730 	[TCA_FLOWER_KEY_CFM]		= { .type = NLA_NESTED },
731 };
732 
733 static const struct nla_policy
734 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
735 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
736 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
737 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
738 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
739 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
740 	[TCA_FLOWER_KEY_ENC_OPTS_GTP]		= { .type = NLA_NESTED },
741 };
742 
743 static const struct nla_policy
744 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
745 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
746 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
747 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
748 						       .len = 128 },
749 };
750 
751 static const struct nla_policy
752 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
753 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
754 };
755 
756 static const struct nla_policy
757 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
758 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
759 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
760 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
761 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
762 };
763 
764 static const struct nla_policy
765 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
766 	[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]	   = { .type = NLA_U8 },
767 	[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]	   = { .type = NLA_U8 },
768 };
769 
770 static const struct nla_policy
771 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
772 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]    = { .type = NLA_U8 },
773 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]      = { .type = NLA_U8 },
774 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]      = { .type = NLA_U8 },
775 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]       = { .type = NLA_U8 },
776 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]    = { .type = NLA_U32 },
777 };
778 
779 static const struct nla_policy cfm_opt_policy[TCA_FLOWER_KEY_CFM_OPT_MAX] = {
780 	[TCA_FLOWER_KEY_CFM_MD_LEVEL]	= NLA_POLICY_MAX(NLA_U8,
781 						FLOW_DIS_CFM_MDL_MAX),
782 	[TCA_FLOWER_KEY_CFM_OPCODE]	= { .type = NLA_U8 },
783 };
784 
785 static void fl_set_key_val(struct nlattr **tb,
786 			   void *val, int val_type,
787 			   void *mask, int mask_type, int len)
788 {
789 	if (!tb[val_type])
790 		return;
791 	nla_memcpy(val, tb[val_type], len);
792 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
793 		memset(mask, 0xff, len);
794 	else
795 		nla_memcpy(mask, tb[mask_type], len);
796 }
797 
798 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
799 				 struct fl_flow_key *mask,
800 				 struct netlink_ext_ack *extack)
801 {
802 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
803 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
804 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
805 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
806 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
807 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
808 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
809 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
810 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
811 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
812 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
813 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
814 
815 	if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) {
816 		NL_SET_ERR_MSG(extack,
817 			       "Both min and max destination ports must be specified");
818 		return -EINVAL;
819 	}
820 	if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) {
821 		NL_SET_ERR_MSG(extack,
822 			       "Both min and max source ports must be specified");
823 		return -EINVAL;
824 	}
825 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
826 	    ntohs(key->tp_range.tp_max.dst) <=
827 	    ntohs(key->tp_range.tp_min.dst)) {
828 		NL_SET_ERR_MSG_ATTR(extack,
829 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
830 				    "Invalid destination port range (min must be strictly smaller than max)");
831 		return -EINVAL;
832 	}
833 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
834 	    ntohs(key->tp_range.tp_max.src) <=
835 	    ntohs(key->tp_range.tp_min.src)) {
836 		NL_SET_ERR_MSG_ATTR(extack,
837 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
838 				    "Invalid source port range (min must be strictly smaller than max)");
839 		return -EINVAL;
840 	}
841 
842 	return 0;
843 }
844 
845 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
846 			       struct flow_dissector_key_mpls *key_val,
847 			       struct flow_dissector_key_mpls *key_mask,
848 			       struct netlink_ext_ack *extack)
849 {
850 	struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
851 	struct flow_dissector_mpls_lse *lse_mask;
852 	struct flow_dissector_mpls_lse *lse_val;
853 	u8 lse_index;
854 	u8 depth;
855 	int err;
856 
857 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
858 			       mpls_stack_entry_policy, extack);
859 	if (err < 0)
860 		return err;
861 
862 	if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
863 		NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
864 		return -EINVAL;
865 	}
866 
867 	depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
868 
869 	/* LSE depth starts at 1, for consistency with terminology used by
870 	 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
871 	 */
872 	if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
873 		NL_SET_ERR_MSG_ATTR(extack,
874 				    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
875 				    "Invalid MPLS depth");
876 		return -EINVAL;
877 	}
878 	lse_index = depth - 1;
879 
880 	dissector_set_mpls_lse(key_val, lse_index);
881 	dissector_set_mpls_lse(key_mask, lse_index);
882 
883 	lse_val = &key_val->ls[lse_index];
884 	lse_mask = &key_mask->ls[lse_index];
885 
886 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
887 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
888 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
889 	}
890 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
891 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
892 
893 		if (bos & ~MPLS_BOS_MASK) {
894 			NL_SET_ERR_MSG_ATTR(extack,
895 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
896 					    "Bottom Of Stack (BOS) must be 0 or 1");
897 			return -EINVAL;
898 		}
899 		lse_val->mpls_bos = bos;
900 		lse_mask->mpls_bos = MPLS_BOS_MASK;
901 	}
902 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
903 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
904 
905 		if (tc & ~MPLS_TC_MASK) {
906 			NL_SET_ERR_MSG_ATTR(extack,
907 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
908 					    "Traffic Class (TC) must be between 0 and 7");
909 			return -EINVAL;
910 		}
911 		lse_val->mpls_tc = tc;
912 		lse_mask->mpls_tc = MPLS_TC_MASK;
913 	}
914 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
915 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
916 
917 		if (label & ~MPLS_LABEL_MASK) {
918 			NL_SET_ERR_MSG_ATTR(extack,
919 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
920 					    "Label must be between 0 and 1048575");
921 			return -EINVAL;
922 		}
923 		lse_val->mpls_label = label;
924 		lse_mask->mpls_label = MPLS_LABEL_MASK;
925 	}
926 
927 	return 0;
928 }
929 
930 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
931 				struct flow_dissector_key_mpls *key_val,
932 				struct flow_dissector_key_mpls *key_mask,
933 				struct netlink_ext_ack *extack)
934 {
935 	struct nlattr *nla_lse;
936 	int rem;
937 	int err;
938 
939 	if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
940 		NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
941 				    "NLA_F_NESTED is missing");
942 		return -EINVAL;
943 	}
944 
945 	nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
946 		if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
947 			NL_SET_ERR_MSG_ATTR(extack, nla_lse,
948 					    "Invalid MPLS option type");
949 			return -EINVAL;
950 		}
951 
952 		err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
953 		if (err < 0)
954 			return err;
955 	}
956 	if (rem) {
957 		NL_SET_ERR_MSG(extack,
958 			       "Bytes leftover after parsing MPLS options");
959 		return -EINVAL;
960 	}
961 
962 	return 0;
963 }
964 
965 static int fl_set_key_mpls(struct nlattr **tb,
966 			   struct flow_dissector_key_mpls *key_val,
967 			   struct flow_dissector_key_mpls *key_mask,
968 			   struct netlink_ext_ack *extack)
969 {
970 	struct flow_dissector_mpls_lse *lse_mask;
971 	struct flow_dissector_mpls_lse *lse_val;
972 
973 	if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
974 		if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
975 		    tb[TCA_FLOWER_KEY_MPLS_BOS] ||
976 		    tb[TCA_FLOWER_KEY_MPLS_TC] ||
977 		    tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
978 			NL_SET_ERR_MSG_ATTR(extack,
979 					    tb[TCA_FLOWER_KEY_MPLS_OPTS],
980 					    "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
981 			return -EBADMSG;
982 		}
983 
984 		return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
985 					    key_val, key_mask, extack);
986 	}
987 
988 	lse_val = &key_val->ls[0];
989 	lse_mask = &key_mask->ls[0];
990 
991 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
992 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
993 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
994 		dissector_set_mpls_lse(key_val, 0);
995 		dissector_set_mpls_lse(key_mask, 0);
996 	}
997 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
998 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
999 
1000 		if (bos & ~MPLS_BOS_MASK) {
1001 			NL_SET_ERR_MSG_ATTR(extack,
1002 					    tb[TCA_FLOWER_KEY_MPLS_BOS],
1003 					    "Bottom Of Stack (BOS) must be 0 or 1");
1004 			return -EINVAL;
1005 		}
1006 		lse_val->mpls_bos = bos;
1007 		lse_mask->mpls_bos = MPLS_BOS_MASK;
1008 		dissector_set_mpls_lse(key_val, 0);
1009 		dissector_set_mpls_lse(key_mask, 0);
1010 	}
1011 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
1012 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
1013 
1014 		if (tc & ~MPLS_TC_MASK) {
1015 			NL_SET_ERR_MSG_ATTR(extack,
1016 					    tb[TCA_FLOWER_KEY_MPLS_TC],
1017 					    "Traffic Class (TC) must be between 0 and 7");
1018 			return -EINVAL;
1019 		}
1020 		lse_val->mpls_tc = tc;
1021 		lse_mask->mpls_tc = MPLS_TC_MASK;
1022 		dissector_set_mpls_lse(key_val, 0);
1023 		dissector_set_mpls_lse(key_mask, 0);
1024 	}
1025 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
1026 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
1027 
1028 		if (label & ~MPLS_LABEL_MASK) {
1029 			NL_SET_ERR_MSG_ATTR(extack,
1030 					    tb[TCA_FLOWER_KEY_MPLS_LABEL],
1031 					    "Label must be between 0 and 1048575");
1032 			return -EINVAL;
1033 		}
1034 		lse_val->mpls_label = label;
1035 		lse_mask->mpls_label = MPLS_LABEL_MASK;
1036 		dissector_set_mpls_lse(key_val, 0);
1037 		dissector_set_mpls_lse(key_mask, 0);
1038 	}
1039 	return 0;
1040 }
1041 
1042 static void fl_set_key_vlan(struct nlattr **tb,
1043 			    __be16 ethertype,
1044 			    int vlan_id_key, int vlan_prio_key,
1045 			    int vlan_next_eth_type_key,
1046 			    struct flow_dissector_key_vlan *key_val,
1047 			    struct flow_dissector_key_vlan *key_mask)
1048 {
1049 #define VLAN_PRIORITY_MASK	0x7
1050 
1051 	if (tb[vlan_id_key]) {
1052 		key_val->vlan_id =
1053 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1054 		key_mask->vlan_id = VLAN_VID_MASK;
1055 	}
1056 	if (tb[vlan_prio_key]) {
1057 		key_val->vlan_priority =
1058 			nla_get_u8(tb[vlan_prio_key]) &
1059 			VLAN_PRIORITY_MASK;
1060 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1061 	}
1062 	if (ethertype) {
1063 		key_val->vlan_tpid = ethertype;
1064 		key_mask->vlan_tpid = cpu_to_be16(~0);
1065 	}
1066 	if (tb[vlan_next_eth_type_key]) {
1067 		key_val->vlan_eth_type =
1068 			nla_get_be16(tb[vlan_next_eth_type_key]);
1069 		key_mask->vlan_eth_type = cpu_to_be16(~0);
1070 	}
1071 }
1072 
1073 static void fl_set_key_pppoe(struct nlattr **tb,
1074 			     struct flow_dissector_key_pppoe *key_val,
1075 			     struct flow_dissector_key_pppoe *key_mask,
1076 			     struct fl_flow_key *key,
1077 			     struct fl_flow_key *mask)
1078 {
1079 	/* key_val::type must be set to ETH_P_PPP_SES
1080 	 * because ETH_P_PPP_SES was stored in basic.n_proto
1081 	 * which might get overwritten by ppp_proto
1082 	 * or might be set to 0, the role of key_val::type
1083 	 * is similar to vlan_key::tpid
1084 	 */
1085 	key_val->type = htons(ETH_P_PPP_SES);
1086 	key_mask->type = cpu_to_be16(~0);
1087 
1088 	if (tb[TCA_FLOWER_KEY_PPPOE_SID]) {
1089 		key_val->session_id =
1090 			nla_get_be16(tb[TCA_FLOWER_KEY_PPPOE_SID]);
1091 		key_mask->session_id = cpu_to_be16(~0);
1092 	}
1093 	if (tb[TCA_FLOWER_KEY_PPP_PROTO]) {
1094 		key_val->ppp_proto =
1095 			nla_get_be16(tb[TCA_FLOWER_KEY_PPP_PROTO]);
1096 		key_mask->ppp_proto = cpu_to_be16(~0);
1097 
1098 		if (key_val->ppp_proto == htons(PPP_IP)) {
1099 			key->basic.n_proto = htons(ETH_P_IP);
1100 			mask->basic.n_proto = cpu_to_be16(~0);
1101 		} else if (key_val->ppp_proto == htons(PPP_IPV6)) {
1102 			key->basic.n_proto = htons(ETH_P_IPV6);
1103 			mask->basic.n_proto = cpu_to_be16(~0);
1104 		} else if (key_val->ppp_proto == htons(PPP_MPLS_UC)) {
1105 			key->basic.n_proto = htons(ETH_P_MPLS_UC);
1106 			mask->basic.n_proto = cpu_to_be16(~0);
1107 		} else if (key_val->ppp_proto == htons(PPP_MPLS_MC)) {
1108 			key->basic.n_proto = htons(ETH_P_MPLS_MC);
1109 			mask->basic.n_proto = cpu_to_be16(~0);
1110 		}
1111 	} else {
1112 		key->basic.n_proto = 0;
1113 		mask->basic.n_proto = cpu_to_be16(0);
1114 	}
1115 }
1116 
1117 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1118 			    u32 *dissector_key, u32 *dissector_mask,
1119 			    u32 flower_flag_bit, u32 dissector_flag_bit)
1120 {
1121 	if (flower_mask & flower_flag_bit) {
1122 		*dissector_mask |= dissector_flag_bit;
1123 		if (flower_key & flower_flag_bit)
1124 			*dissector_key |= dissector_flag_bit;
1125 	}
1126 }
1127 
1128 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1129 			    u32 *flags_mask, struct netlink_ext_ack *extack)
1130 {
1131 	u32 key, mask;
1132 
1133 	/* mask is mandatory for flags */
1134 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1135 		NL_SET_ERR_MSG(extack, "Missing flags mask");
1136 		return -EINVAL;
1137 	}
1138 
1139 	key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
1140 	mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1141 
1142 	*flags_key  = 0;
1143 	*flags_mask = 0;
1144 
1145 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1146 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1147 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1148 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1149 			FLOW_DIS_FIRST_FRAG);
1150 
1151 	return 0;
1152 }
1153 
1154 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1155 			  struct flow_dissector_key_ip *key,
1156 			  struct flow_dissector_key_ip *mask)
1157 {
1158 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1159 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1160 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1161 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1162 
1163 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1164 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1165 }
1166 
1167 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1168 			     int depth, int option_len,
1169 			     struct netlink_ext_ack *extack)
1170 {
1171 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1172 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
1173 	struct geneve_opt *opt;
1174 	int err, data_len = 0;
1175 
1176 	if (option_len > sizeof(struct geneve_opt))
1177 		data_len = option_len - sizeof(struct geneve_opt);
1178 
1179 	if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
1180 		return -ERANGE;
1181 
1182 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1183 	memset(opt, 0xff, option_len);
1184 	opt->length = data_len / 4;
1185 	opt->r1 = 0;
1186 	opt->r2 = 0;
1187 	opt->r3 = 0;
1188 
1189 	/* If no mask has been prodived we assume an exact match. */
1190 	if (!depth)
1191 		return sizeof(struct geneve_opt) + data_len;
1192 
1193 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1194 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1195 		return -EINVAL;
1196 	}
1197 
1198 	err = nla_parse_nested_deprecated(tb,
1199 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1200 					  nla, geneve_opt_policy, extack);
1201 	if (err < 0)
1202 		return err;
1203 
1204 	/* We are not allowed to omit any of CLASS, TYPE or DATA
1205 	 * fields from the key.
1206 	 */
1207 	if (!option_len &&
1208 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1209 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1210 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1211 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1212 		return -EINVAL;
1213 	}
1214 
1215 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
1216 	 * for the mask.
1217 	 */
1218 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1219 		int new_len = key->enc_opts.len;
1220 
1221 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1222 		data_len = nla_len(data);
1223 		if (data_len < 4) {
1224 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1225 			return -ERANGE;
1226 		}
1227 		if (data_len % 4) {
1228 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1229 			return -ERANGE;
1230 		}
1231 
1232 		new_len += sizeof(struct geneve_opt) + data_len;
1233 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1234 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1235 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1236 			return -ERANGE;
1237 		}
1238 		opt->length = data_len / 4;
1239 		memcpy(opt->opt_data, nla_data(data), data_len);
1240 	}
1241 
1242 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1243 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1244 		opt->opt_class = nla_get_be16(class);
1245 	}
1246 
1247 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1248 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1249 		opt->type = nla_get_u8(type);
1250 	}
1251 
1252 	return sizeof(struct geneve_opt) + data_len;
1253 }
1254 
1255 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1256 			    int depth, int option_len,
1257 			    struct netlink_ext_ack *extack)
1258 {
1259 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1260 	struct vxlan_metadata *md;
1261 	int err;
1262 
1263 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1264 	memset(md, 0xff, sizeof(*md));
1265 
1266 	if (!depth)
1267 		return sizeof(*md);
1268 
1269 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1270 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1271 		return -EINVAL;
1272 	}
1273 
1274 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1275 			       vxlan_opt_policy, extack);
1276 	if (err < 0)
1277 		return err;
1278 
1279 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1280 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1281 		return -EINVAL;
1282 	}
1283 
1284 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1285 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1286 		md->gbp &= VXLAN_GBP_MASK;
1287 	}
1288 
1289 	return sizeof(*md);
1290 }
1291 
1292 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1293 			     int depth, int option_len,
1294 			     struct netlink_ext_ack *extack)
1295 {
1296 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1297 	struct erspan_metadata *md;
1298 	int err;
1299 
1300 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1301 	memset(md, 0xff, sizeof(*md));
1302 	md->version = 1;
1303 
1304 	if (!depth)
1305 		return sizeof(*md);
1306 
1307 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1308 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1309 		return -EINVAL;
1310 	}
1311 
1312 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1313 			       erspan_opt_policy, extack);
1314 	if (err < 0)
1315 		return err;
1316 
1317 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1318 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1319 		return -EINVAL;
1320 	}
1321 
1322 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1323 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1324 
1325 	if (md->version == 1) {
1326 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1327 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1328 			return -EINVAL;
1329 		}
1330 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1331 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1332 			memset(&md->u, 0x00, sizeof(md->u));
1333 			md->u.index = nla_get_be32(nla);
1334 		}
1335 	} else if (md->version == 2) {
1336 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1337 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1338 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1339 			return -EINVAL;
1340 		}
1341 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1342 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1343 			md->u.md2.dir = nla_get_u8(nla);
1344 		}
1345 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1346 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1347 			set_hwid(&md->u.md2, nla_get_u8(nla));
1348 		}
1349 	} else {
1350 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1351 		return -EINVAL;
1352 	}
1353 
1354 	return sizeof(*md);
1355 }
1356 
1357 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
1358 			  int depth, int option_len,
1359 			  struct netlink_ext_ack *extack)
1360 {
1361 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
1362 	struct gtp_pdu_session_info *sinfo;
1363 	u8 len = key->enc_opts.len;
1364 	int err;
1365 
1366 	sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
1367 	memset(sinfo, 0xff, option_len);
1368 
1369 	if (!depth)
1370 		return sizeof(*sinfo);
1371 
1372 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
1373 		NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
1374 		return -EINVAL;
1375 	}
1376 
1377 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
1378 			       gtp_opt_policy, extack);
1379 	if (err < 0)
1380 		return err;
1381 
1382 	if (!option_len &&
1383 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
1384 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
1385 		NL_SET_ERR_MSG_MOD(extack,
1386 				   "Missing tunnel key gtp option pdu type or qfi");
1387 		return -EINVAL;
1388 	}
1389 
1390 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
1391 		sinfo->pdu_type =
1392 			nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
1393 
1394 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
1395 		sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
1396 
1397 	return sizeof(*sinfo);
1398 }
1399 
1400 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1401 			  struct fl_flow_key *mask,
1402 			  struct netlink_ext_ack *extack)
1403 {
1404 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1405 	int err, option_len, key_depth, msk_depth = 0;
1406 
1407 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1408 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1409 					     enc_opts_policy, extack);
1410 	if (err)
1411 		return err;
1412 
1413 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1414 
1415 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1416 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1417 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1418 						     enc_opts_policy, extack);
1419 		if (err)
1420 			return err;
1421 
1422 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1423 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1424 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1425 			NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1426 			return -EINVAL;
1427 		}
1428 	}
1429 
1430 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1431 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1432 		switch (nla_type(nla_opt_key)) {
1433 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1434 			if (key->enc_opts.dst_opt_type &&
1435 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1436 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1437 				return -EINVAL;
1438 			}
1439 			option_len = 0;
1440 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1441 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1442 						       key_depth, option_len,
1443 						       extack);
1444 			if (option_len < 0)
1445 				return option_len;
1446 
1447 			key->enc_opts.len += option_len;
1448 			/* At the same time we need to parse through the mask
1449 			 * in order to verify exact and mask attribute lengths.
1450 			 */
1451 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1452 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1453 						       msk_depth, option_len,
1454 						       extack);
1455 			if (option_len < 0)
1456 				return option_len;
1457 
1458 			mask->enc_opts.len += option_len;
1459 			if (key->enc_opts.len != mask->enc_opts.len) {
1460 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1461 				return -EINVAL;
1462 			}
1463 			break;
1464 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1465 			if (key->enc_opts.dst_opt_type) {
1466 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1467 				return -EINVAL;
1468 			}
1469 			option_len = 0;
1470 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1471 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1472 						      key_depth, option_len,
1473 						      extack);
1474 			if (option_len < 0)
1475 				return option_len;
1476 
1477 			key->enc_opts.len += option_len;
1478 			/* At the same time we need to parse through the mask
1479 			 * in order to verify exact and mask attribute lengths.
1480 			 */
1481 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1482 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1483 						      msk_depth, option_len,
1484 						      extack);
1485 			if (option_len < 0)
1486 				return option_len;
1487 
1488 			mask->enc_opts.len += option_len;
1489 			if (key->enc_opts.len != mask->enc_opts.len) {
1490 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1491 				return -EINVAL;
1492 			}
1493 			break;
1494 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1495 			if (key->enc_opts.dst_opt_type) {
1496 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1497 				return -EINVAL;
1498 			}
1499 			option_len = 0;
1500 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1501 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1502 						       key_depth, option_len,
1503 						       extack);
1504 			if (option_len < 0)
1505 				return option_len;
1506 
1507 			key->enc_opts.len += option_len;
1508 			/* At the same time we need to parse through the mask
1509 			 * in order to verify exact and mask attribute lengths.
1510 			 */
1511 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1512 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1513 						       msk_depth, option_len,
1514 						       extack);
1515 			if (option_len < 0)
1516 				return option_len;
1517 
1518 			mask->enc_opts.len += option_len;
1519 			if (key->enc_opts.len != mask->enc_opts.len) {
1520 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1521 				return -EINVAL;
1522 			}
1523 			break;
1524 		case TCA_FLOWER_KEY_ENC_OPTS_GTP:
1525 			if (key->enc_opts.dst_opt_type) {
1526 				NL_SET_ERR_MSG_MOD(extack,
1527 						   "Duplicate type for gtp options");
1528 				return -EINVAL;
1529 			}
1530 			option_len = 0;
1531 			key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1532 			option_len = fl_set_gtp_opt(nla_opt_key, key,
1533 						    key_depth, option_len,
1534 						    extack);
1535 			if (option_len < 0)
1536 				return option_len;
1537 
1538 			key->enc_opts.len += option_len;
1539 			/* At the same time we need to parse through the mask
1540 			 * in order to verify exact and mask attribute lengths.
1541 			 */
1542 			mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1543 			option_len = fl_set_gtp_opt(nla_opt_msk, mask,
1544 						    msk_depth, option_len,
1545 						    extack);
1546 			if (option_len < 0)
1547 				return option_len;
1548 
1549 			mask->enc_opts.len += option_len;
1550 			if (key->enc_opts.len != mask->enc_opts.len) {
1551 				NL_SET_ERR_MSG_MOD(extack,
1552 						   "Key and mask miss aligned");
1553 				return -EINVAL;
1554 			}
1555 			break;
1556 		default:
1557 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1558 			return -EINVAL;
1559 		}
1560 
1561 		if (!msk_depth)
1562 			continue;
1563 
1564 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1565 			NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1566 			return -EINVAL;
1567 		}
1568 		nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1569 	}
1570 
1571 	return 0;
1572 }
1573 
1574 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1575 				struct netlink_ext_ack *extack)
1576 {
1577 	if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1578 		NL_SET_ERR_MSG_ATTR(extack, tb,
1579 				    "no trk, so no other flag can be set");
1580 		return -EINVAL;
1581 	}
1582 
1583 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1584 	    state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1585 		NL_SET_ERR_MSG_ATTR(extack, tb,
1586 				    "new and est are mutually exclusive");
1587 		return -EINVAL;
1588 	}
1589 
1590 	if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1591 	    state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1592 		      TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1593 		NL_SET_ERR_MSG_ATTR(extack, tb,
1594 				    "when inv is set, only trk may be set");
1595 		return -EINVAL;
1596 	}
1597 
1598 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1599 	    state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1600 		NL_SET_ERR_MSG_ATTR(extack, tb,
1601 				    "new and rpl are mutually exclusive");
1602 		return -EINVAL;
1603 	}
1604 
1605 	return 0;
1606 }
1607 
1608 static int fl_set_key_ct(struct nlattr **tb,
1609 			 struct flow_dissector_key_ct *key,
1610 			 struct flow_dissector_key_ct *mask,
1611 			 struct netlink_ext_ack *extack)
1612 {
1613 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1614 		int err;
1615 
1616 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1617 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1618 			return -EOPNOTSUPP;
1619 		}
1620 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1621 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1622 			       sizeof(key->ct_state));
1623 
1624 		err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1625 					   tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1626 					   extack);
1627 		if (err)
1628 			return err;
1629 
1630 	}
1631 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1632 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1633 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1634 			return -EOPNOTSUPP;
1635 		}
1636 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1637 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1638 			       sizeof(key->ct_zone));
1639 	}
1640 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1641 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1642 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1643 			return -EOPNOTSUPP;
1644 		}
1645 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1646 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1647 			       sizeof(key->ct_mark));
1648 	}
1649 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1650 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1651 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1652 			return -EOPNOTSUPP;
1653 		}
1654 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1655 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1656 			       sizeof(key->ct_labels));
1657 	}
1658 
1659 	return 0;
1660 }
1661 
1662 static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype,
1663 			struct fl_flow_key *key, struct fl_flow_key *mask,
1664 			int vthresh)
1665 {
1666 	const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh;
1667 
1668 	if (!tb) {
1669 		*ethertype = 0;
1670 		return good_num_of_vlans;
1671 	}
1672 
1673 	*ethertype = nla_get_be16(tb);
1674 	if (good_num_of_vlans || eth_type_vlan(*ethertype))
1675 		return true;
1676 
1677 	key->basic.n_proto = *ethertype;
1678 	mask->basic.n_proto = cpu_to_be16(~0);
1679 	return false;
1680 }
1681 
1682 static void fl_set_key_cfm_md_level(struct nlattr **tb,
1683 				    struct fl_flow_key *key,
1684 				    struct fl_flow_key *mask,
1685 				    struct netlink_ext_ack *extack)
1686 {
1687 	u8 level;
1688 
1689 	if (!tb[TCA_FLOWER_KEY_CFM_MD_LEVEL])
1690 		return;
1691 
1692 	level = nla_get_u8(tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]);
1693 	key->cfm.mdl_ver = FIELD_PREP(FLOW_DIS_CFM_MDL_MASK, level);
1694 	mask->cfm.mdl_ver = FLOW_DIS_CFM_MDL_MASK;
1695 }
1696 
1697 static void fl_set_key_cfm_opcode(struct nlattr **tb,
1698 				  struct fl_flow_key *key,
1699 				  struct fl_flow_key *mask,
1700 				  struct netlink_ext_ack *extack)
1701 {
1702 	fl_set_key_val(tb, &key->cfm.opcode, TCA_FLOWER_KEY_CFM_OPCODE,
1703 		       &mask->cfm.opcode, TCA_FLOWER_UNSPEC,
1704 		       sizeof(key->cfm.opcode));
1705 }
1706 
1707 static int fl_set_key_cfm(struct nlattr **tb,
1708 			  struct fl_flow_key *key,
1709 			  struct fl_flow_key *mask,
1710 			  struct netlink_ext_ack *extack)
1711 {
1712 	struct nlattr *nla_cfm_opt[TCA_FLOWER_KEY_CFM_OPT_MAX];
1713 	int err;
1714 
1715 	if (!tb[TCA_FLOWER_KEY_CFM])
1716 		return 0;
1717 
1718 	err = nla_parse_nested(nla_cfm_opt, TCA_FLOWER_KEY_CFM_OPT_MAX,
1719 			       tb[TCA_FLOWER_KEY_CFM], cfm_opt_policy, extack);
1720 	if (err < 0)
1721 		return err;
1722 
1723 	fl_set_key_cfm_opcode(nla_cfm_opt, key, mask, extack);
1724 	fl_set_key_cfm_md_level(nla_cfm_opt, key, mask, extack);
1725 
1726 	return 0;
1727 }
1728 
1729 static int fl_set_key(struct net *net, struct nlattr **tb,
1730 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1731 		      struct netlink_ext_ack *extack)
1732 {
1733 	__be16 ethertype;
1734 	int ret = 0;
1735 
1736 	if (tb[TCA_FLOWER_INDEV]) {
1737 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1738 		if (err < 0)
1739 			return err;
1740 		key->meta.ingress_ifindex = err;
1741 		mask->meta.ingress_ifindex = 0xffffffff;
1742 	}
1743 
1744 	fl_set_key_val(tb, &key->meta.l2_miss, TCA_FLOWER_L2_MISS,
1745 		       &mask->meta.l2_miss, TCA_FLOWER_UNSPEC,
1746 		       sizeof(key->meta.l2_miss));
1747 
1748 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1749 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1750 		       sizeof(key->eth.dst));
1751 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1752 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1753 		       sizeof(key->eth.src));
1754 	fl_set_key_val(tb, &key->num_of_vlans,
1755 		       TCA_FLOWER_KEY_NUM_OF_VLANS,
1756 		       &mask->num_of_vlans,
1757 		       TCA_FLOWER_UNSPEC,
1758 		       sizeof(key->num_of_vlans));
1759 
1760 	if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], &ethertype, key, mask, 0)) {
1761 		fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1762 				TCA_FLOWER_KEY_VLAN_PRIO,
1763 				TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1764 				&key->vlan, &mask->vlan);
1765 
1766 		if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE],
1767 				&ethertype, key, mask, 1)) {
1768 			fl_set_key_vlan(tb, ethertype,
1769 					TCA_FLOWER_KEY_CVLAN_ID,
1770 					TCA_FLOWER_KEY_CVLAN_PRIO,
1771 					TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1772 					&key->cvlan, &mask->cvlan);
1773 			fl_set_key_val(tb, &key->basic.n_proto,
1774 				       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1775 				       &mask->basic.n_proto,
1776 				       TCA_FLOWER_UNSPEC,
1777 				       sizeof(key->basic.n_proto));
1778 		}
1779 	}
1780 
1781 	if (key->basic.n_proto == htons(ETH_P_PPP_SES))
1782 		fl_set_key_pppoe(tb, &key->pppoe, &mask->pppoe, key, mask);
1783 
1784 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1785 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1786 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1787 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1788 			       sizeof(key->basic.ip_proto));
1789 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1790 	}
1791 
1792 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1793 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1794 		mask->control.addr_type = ~0;
1795 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1796 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1797 			       sizeof(key->ipv4.src));
1798 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1799 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1800 			       sizeof(key->ipv4.dst));
1801 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1802 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1803 		mask->control.addr_type = ~0;
1804 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1805 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1806 			       sizeof(key->ipv6.src));
1807 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1808 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1809 			       sizeof(key->ipv6.dst));
1810 	}
1811 
1812 	if (key->basic.ip_proto == IPPROTO_TCP) {
1813 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1814 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1815 			       sizeof(key->tp.src));
1816 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1817 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1818 			       sizeof(key->tp.dst));
1819 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1820 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1821 			       sizeof(key->tcp.flags));
1822 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1823 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1824 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1825 			       sizeof(key->tp.src));
1826 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1827 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1828 			       sizeof(key->tp.dst));
1829 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1830 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1831 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1832 			       sizeof(key->tp.src));
1833 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1834 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1835 			       sizeof(key->tp.dst));
1836 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1837 		   key->basic.ip_proto == IPPROTO_ICMP) {
1838 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1839 			       &mask->icmp.type,
1840 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1841 			       sizeof(key->icmp.type));
1842 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1843 			       &mask->icmp.code,
1844 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1845 			       sizeof(key->icmp.code));
1846 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1847 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1848 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1849 			       &mask->icmp.type,
1850 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1851 			       sizeof(key->icmp.type));
1852 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1853 			       &mask->icmp.code,
1854 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1855 			       sizeof(key->icmp.code));
1856 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1857 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1858 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1859 		if (ret)
1860 			return ret;
1861 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1862 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1863 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1864 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1865 			       sizeof(key->arp.sip));
1866 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1867 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1868 			       sizeof(key->arp.tip));
1869 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1870 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1871 			       sizeof(key->arp.op));
1872 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1873 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1874 			       sizeof(key->arp.sha));
1875 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1876 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1877 			       sizeof(key->arp.tha));
1878 	} else if (key->basic.ip_proto == IPPROTO_L2TP) {
1879 		fl_set_key_val(tb, &key->l2tpv3.session_id,
1880 			       TCA_FLOWER_KEY_L2TPV3_SID,
1881 			       &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC,
1882 			       sizeof(key->l2tpv3.session_id));
1883 	} else if (key->basic.n_proto  == htons(ETH_P_CFM)) {
1884 		ret = fl_set_key_cfm(tb, key, mask, extack);
1885 		if (ret)
1886 			return ret;
1887 	}
1888 
1889 	if (key->basic.ip_proto == IPPROTO_TCP ||
1890 	    key->basic.ip_proto == IPPROTO_UDP ||
1891 	    key->basic.ip_proto == IPPROTO_SCTP) {
1892 		ret = fl_set_key_port_range(tb, key, mask, extack);
1893 		if (ret)
1894 			return ret;
1895 	}
1896 
1897 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1898 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1899 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1900 		mask->enc_control.addr_type = ~0;
1901 		fl_set_key_val(tb, &key->enc_ipv4.src,
1902 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1903 			       &mask->enc_ipv4.src,
1904 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1905 			       sizeof(key->enc_ipv4.src));
1906 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1907 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1908 			       &mask->enc_ipv4.dst,
1909 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1910 			       sizeof(key->enc_ipv4.dst));
1911 	}
1912 
1913 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1914 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1915 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1916 		mask->enc_control.addr_type = ~0;
1917 		fl_set_key_val(tb, &key->enc_ipv6.src,
1918 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1919 			       &mask->enc_ipv6.src,
1920 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1921 			       sizeof(key->enc_ipv6.src));
1922 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1923 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1924 			       &mask->enc_ipv6.dst,
1925 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1926 			       sizeof(key->enc_ipv6.dst));
1927 	}
1928 
1929 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1930 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1931 		       sizeof(key->enc_key_id.keyid));
1932 
1933 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1934 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1935 		       sizeof(key->enc_tp.src));
1936 
1937 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1938 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1939 		       sizeof(key->enc_tp.dst));
1940 
1941 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1942 
1943 	fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1944 		       &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1945 		       sizeof(key->hash.hash));
1946 
1947 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1948 		ret = fl_set_enc_opt(tb, key, mask, extack);
1949 		if (ret)
1950 			return ret;
1951 	}
1952 
1953 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1954 	if (ret)
1955 		return ret;
1956 
1957 	if (tb[TCA_FLOWER_KEY_FLAGS])
1958 		ret = fl_set_key_flags(tb, &key->control.flags,
1959 				       &mask->control.flags, extack);
1960 
1961 	return ret;
1962 }
1963 
1964 static void fl_mask_copy(struct fl_flow_mask *dst,
1965 			 struct fl_flow_mask *src)
1966 {
1967 	const void *psrc = fl_key_get_start(&src->key, src);
1968 	void *pdst = fl_key_get_start(&dst->key, src);
1969 
1970 	memcpy(pdst, psrc, fl_mask_range(src));
1971 	dst->range = src->range;
1972 }
1973 
1974 static const struct rhashtable_params fl_ht_params = {
1975 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1976 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1977 	.automatic_shrinking = true,
1978 };
1979 
1980 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1981 {
1982 	mask->filter_ht_params = fl_ht_params;
1983 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1984 	mask->filter_ht_params.key_offset += mask->range.start;
1985 
1986 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1987 }
1988 
1989 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1990 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1991 
1992 #define FL_KEY_IS_MASKED(mask, member)						\
1993 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1994 		   0, FL_KEY_MEMBER_SIZE(member))				\
1995 
1996 #define FL_KEY_SET(keys, cnt, id, member)					\
1997 	do {									\
1998 		keys[cnt].key_id = id;						\
1999 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
2000 		cnt++;								\
2001 	} while(0);
2002 
2003 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
2004 	do {									\
2005 		if (FL_KEY_IS_MASKED(mask, member))				\
2006 			FL_KEY_SET(keys, cnt, id, member);			\
2007 	} while(0);
2008 
2009 static void fl_init_dissector(struct flow_dissector *dissector,
2010 			      struct fl_flow_key *mask)
2011 {
2012 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
2013 	size_t cnt = 0;
2014 
2015 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2016 			     FLOW_DISSECTOR_KEY_META, meta);
2017 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
2018 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
2019 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2020 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
2021 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2022 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
2023 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2024 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
2025 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2026 			     FLOW_DISSECTOR_KEY_PORTS, tp);
2027 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2028 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
2029 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2030 			     FLOW_DISSECTOR_KEY_IP, ip);
2031 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2032 			     FLOW_DISSECTOR_KEY_TCP, tcp);
2033 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2034 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
2035 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2036 			     FLOW_DISSECTOR_KEY_ARP, arp);
2037 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2038 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
2039 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2040 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
2041 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2042 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
2043 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2044 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
2045 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2046 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
2047 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2048 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
2049 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
2050 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
2051 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
2052 			   enc_control);
2053 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2054 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
2055 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2056 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
2057 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2058 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
2059 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2060 			     FLOW_DISSECTOR_KEY_CT, ct);
2061 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2062 			     FLOW_DISSECTOR_KEY_HASH, hash);
2063 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2064 			     FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans);
2065 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2066 			     FLOW_DISSECTOR_KEY_PPPOE, pppoe);
2067 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2068 			     FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3);
2069 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2070 			     FLOW_DISSECTOR_KEY_CFM, cfm);
2071 
2072 	skb_flow_dissector_init(dissector, keys, cnt);
2073 }
2074 
2075 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
2076 					       struct fl_flow_mask *mask)
2077 {
2078 	struct fl_flow_mask *newmask;
2079 	int err;
2080 
2081 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
2082 	if (!newmask)
2083 		return ERR_PTR(-ENOMEM);
2084 
2085 	fl_mask_copy(newmask, mask);
2086 
2087 	if ((newmask->key.tp_range.tp_min.dst &&
2088 	     newmask->key.tp_range.tp_max.dst) ||
2089 	    (newmask->key.tp_range.tp_min.src &&
2090 	     newmask->key.tp_range.tp_max.src))
2091 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
2092 
2093 	err = fl_init_mask_hashtable(newmask);
2094 	if (err)
2095 		goto errout_free;
2096 
2097 	fl_init_dissector(&newmask->dissector, &newmask->key);
2098 
2099 	INIT_LIST_HEAD_RCU(&newmask->filters);
2100 
2101 	refcount_set(&newmask->refcnt, 1);
2102 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
2103 				      &newmask->ht_node, mask_ht_params);
2104 	if (err)
2105 		goto errout_destroy;
2106 
2107 	spin_lock(&head->masks_lock);
2108 	list_add_tail_rcu(&newmask->list, &head->masks);
2109 	spin_unlock(&head->masks_lock);
2110 
2111 	return newmask;
2112 
2113 errout_destroy:
2114 	rhashtable_destroy(&newmask->ht);
2115 errout_free:
2116 	kfree(newmask);
2117 
2118 	return ERR_PTR(err);
2119 }
2120 
2121 static int fl_check_assign_mask(struct cls_fl_head *head,
2122 				struct cls_fl_filter *fnew,
2123 				struct cls_fl_filter *fold,
2124 				struct fl_flow_mask *mask)
2125 {
2126 	struct fl_flow_mask *newmask;
2127 	int ret = 0;
2128 
2129 	rcu_read_lock();
2130 
2131 	/* Insert mask as temporary node to prevent concurrent creation of mask
2132 	 * with same key. Any concurrent lookups with same key will return
2133 	 * -EAGAIN because mask's refcnt is zero.
2134 	 */
2135 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
2136 						       &mask->ht_node,
2137 						       mask_ht_params);
2138 	if (!fnew->mask) {
2139 		rcu_read_unlock();
2140 
2141 		if (fold) {
2142 			ret = -EINVAL;
2143 			goto errout_cleanup;
2144 		}
2145 
2146 		newmask = fl_create_new_mask(head, mask);
2147 		if (IS_ERR(newmask)) {
2148 			ret = PTR_ERR(newmask);
2149 			goto errout_cleanup;
2150 		}
2151 
2152 		fnew->mask = newmask;
2153 		return 0;
2154 	} else if (IS_ERR(fnew->mask)) {
2155 		ret = PTR_ERR(fnew->mask);
2156 	} else if (fold && fold->mask != fnew->mask) {
2157 		ret = -EINVAL;
2158 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
2159 		/* Mask was deleted concurrently, try again */
2160 		ret = -EAGAIN;
2161 	}
2162 	rcu_read_unlock();
2163 	return ret;
2164 
2165 errout_cleanup:
2166 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
2167 			       mask_ht_params);
2168 	return ret;
2169 }
2170 
2171 static bool fl_needs_tc_skb_ext(const struct fl_flow_key *mask)
2172 {
2173 	return mask->meta.l2_miss;
2174 }
2175 
2176 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
2177 			       struct cls_fl_filter *fold,
2178 			       bool *in_ht)
2179 {
2180 	struct fl_flow_mask *mask = fnew->mask;
2181 	int err;
2182 
2183 	err = rhashtable_lookup_insert_fast(&mask->ht,
2184 					    &fnew->ht_node,
2185 					    mask->filter_ht_params);
2186 	if (err) {
2187 		*in_ht = false;
2188 		/* It is okay if filter with same key exists when
2189 		 * overwriting.
2190 		 */
2191 		return fold && err == -EEXIST ? 0 : err;
2192 	}
2193 
2194 	*in_ht = true;
2195 	return 0;
2196 }
2197 
2198 static int fl_change(struct net *net, struct sk_buff *in_skb,
2199 		     struct tcf_proto *tp, unsigned long base,
2200 		     u32 handle, struct nlattr **tca,
2201 		     void **arg, u32 flags,
2202 		     struct netlink_ext_ack *extack)
2203 {
2204 	struct cls_fl_head *head = fl_head_dereference(tp);
2205 	bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
2206 	struct cls_fl_filter *fold = *arg;
2207 	bool bound_to_filter = false;
2208 	struct cls_fl_filter *fnew;
2209 	struct fl_flow_mask *mask;
2210 	struct nlattr **tb;
2211 	bool in_ht;
2212 	int err;
2213 
2214 	if (!tca[TCA_OPTIONS]) {
2215 		err = -EINVAL;
2216 		goto errout_fold;
2217 	}
2218 
2219 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
2220 	if (!mask) {
2221 		err = -ENOBUFS;
2222 		goto errout_fold;
2223 	}
2224 
2225 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2226 	if (!tb) {
2227 		err = -ENOBUFS;
2228 		goto errout_mask_alloc;
2229 	}
2230 
2231 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2232 					  tca[TCA_OPTIONS], fl_policy, NULL);
2233 	if (err < 0)
2234 		goto errout_tb;
2235 
2236 	if (fold && handle && fold->handle != handle) {
2237 		err = -EINVAL;
2238 		goto errout_tb;
2239 	}
2240 
2241 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2242 	if (!fnew) {
2243 		err = -ENOBUFS;
2244 		goto errout_tb;
2245 	}
2246 	INIT_LIST_HEAD(&fnew->hw_list);
2247 	refcount_set(&fnew->refcnt, 1);
2248 
2249 	if (tb[TCA_FLOWER_FLAGS]) {
2250 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2251 
2252 		if (!tc_flags_valid(fnew->flags)) {
2253 			kfree(fnew);
2254 			err = -EINVAL;
2255 			goto errout_tb;
2256 		}
2257 	}
2258 
2259 	if (!fold) {
2260 		spin_lock(&tp->lock);
2261 		if (!handle) {
2262 			handle = 1;
2263 			err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
2264 					    INT_MAX, GFP_ATOMIC);
2265 		} else {
2266 			err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
2267 					    handle, GFP_ATOMIC);
2268 
2269 			/* Filter with specified handle was concurrently
2270 			 * inserted after initial check in cls_api. This is not
2271 			 * necessarily an error if NLM_F_EXCL is not set in
2272 			 * message flags. Returning EAGAIN will cause cls_api to
2273 			 * try to update concurrently inserted rule.
2274 			 */
2275 			if (err == -ENOSPC)
2276 				err = -EAGAIN;
2277 		}
2278 		spin_unlock(&tp->lock);
2279 
2280 		if (err) {
2281 			kfree(fnew);
2282 			goto errout_tb;
2283 		}
2284 	}
2285 	fnew->handle = handle;
2286 
2287 	err = tcf_exts_init_ex(&fnew->exts, net, TCA_FLOWER_ACT, 0, tp, handle,
2288 			       !tc_skip_hw(fnew->flags));
2289 	if (err < 0)
2290 		goto errout_idr;
2291 
2292 	err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE],
2293 				   &fnew->exts, flags, fnew->flags,
2294 				   extack);
2295 	if (err < 0)
2296 		goto errout_idr;
2297 
2298 	if (tb[TCA_FLOWER_CLASSID]) {
2299 		fnew->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
2300 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2301 			rtnl_lock();
2302 		tcf_bind_filter(tp, &fnew->res, base);
2303 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2304 			rtnl_unlock();
2305 		bound_to_filter = true;
2306 	}
2307 
2308 	err = fl_set_key(net, tb, &fnew->key, &mask->key, extack);
2309 	if (err)
2310 		goto unbind_filter;
2311 
2312 	fl_mask_update_range(mask);
2313 	fl_set_masked_key(&fnew->mkey, &fnew->key, mask);
2314 
2315 	if (!fl_mask_fits_tmplt(tp->chain->tmplt_priv, mask)) {
2316 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
2317 		err = -EINVAL;
2318 		goto unbind_filter;
2319 	}
2320 
2321 	/* Enable tc skb extension if filter matches on data extracted from
2322 	 * this extension.
2323 	 */
2324 	if (fl_needs_tc_skb_ext(&mask->key)) {
2325 		fnew->needs_tc_skb_ext = 1;
2326 		tc_skb_ext_tc_enable();
2327 	}
2328 
2329 	err = fl_check_assign_mask(head, fnew, fold, mask);
2330 	if (err)
2331 		goto unbind_filter;
2332 
2333 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
2334 	if (err)
2335 		goto errout_mask;
2336 
2337 	if (!tc_skip_hw(fnew->flags)) {
2338 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2339 		if (err)
2340 			goto errout_ht;
2341 	}
2342 
2343 	if (!tc_in_hw(fnew->flags))
2344 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2345 
2346 	spin_lock(&tp->lock);
2347 
2348 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2349 	 * proto again or create new one, if necessary.
2350 	 */
2351 	if (tp->deleting) {
2352 		err = -EAGAIN;
2353 		goto errout_hw;
2354 	}
2355 
2356 	if (fold) {
2357 		/* Fold filter was deleted concurrently. Retry lookup. */
2358 		if (fold->deleted) {
2359 			err = -EAGAIN;
2360 			goto errout_hw;
2361 		}
2362 
2363 		fnew->handle = handle;
2364 
2365 		if (!in_ht) {
2366 			struct rhashtable_params params =
2367 				fnew->mask->filter_ht_params;
2368 
2369 			err = rhashtable_insert_fast(&fnew->mask->ht,
2370 						     &fnew->ht_node,
2371 						     params);
2372 			if (err)
2373 				goto errout_hw;
2374 			in_ht = true;
2375 		}
2376 
2377 		refcount_inc(&fnew->refcnt);
2378 		rhashtable_remove_fast(&fold->mask->ht,
2379 				       &fold->ht_node,
2380 				       fold->mask->filter_ht_params);
2381 		idr_replace(&head->handle_idr, fnew, fnew->handle);
2382 		list_replace_rcu(&fold->list, &fnew->list);
2383 		fold->deleted = true;
2384 
2385 		spin_unlock(&tp->lock);
2386 
2387 		fl_mask_put(head, fold->mask);
2388 		if (!tc_skip_hw(fold->flags))
2389 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2390 		tcf_unbind_filter(tp, &fold->res);
2391 		/* Caller holds reference to fold, so refcnt is always > 0
2392 		 * after this.
2393 		 */
2394 		refcount_dec(&fold->refcnt);
2395 		__fl_put(fold);
2396 	} else {
2397 		idr_replace(&head->handle_idr, fnew, fnew->handle);
2398 
2399 		refcount_inc(&fnew->refcnt);
2400 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2401 		spin_unlock(&tp->lock);
2402 	}
2403 
2404 	*arg = fnew;
2405 
2406 	kfree(tb);
2407 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2408 	return 0;
2409 
2410 errout_ht:
2411 	spin_lock(&tp->lock);
2412 errout_hw:
2413 	fnew->deleted = true;
2414 	spin_unlock(&tp->lock);
2415 	if (!tc_skip_hw(fnew->flags))
2416 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2417 	if (in_ht)
2418 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2419 				       fnew->mask->filter_ht_params);
2420 errout_mask:
2421 	fl_mask_put(head, fnew->mask);
2422 
2423 unbind_filter:
2424 	if (bound_to_filter) {
2425 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2426 			rtnl_lock();
2427 		tcf_unbind_filter(tp, &fnew->res);
2428 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2429 			rtnl_unlock();
2430 	}
2431 
2432 errout_idr:
2433 	if (!fold)
2434 		idr_remove(&head->handle_idr, fnew->handle);
2435 	__fl_put(fnew);
2436 errout_tb:
2437 	kfree(tb);
2438 errout_mask_alloc:
2439 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2440 errout_fold:
2441 	if (fold)
2442 		__fl_put(fold);
2443 	return err;
2444 }
2445 
2446 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2447 		     bool rtnl_held, struct netlink_ext_ack *extack)
2448 {
2449 	struct cls_fl_head *head = fl_head_dereference(tp);
2450 	struct cls_fl_filter *f = arg;
2451 	bool last_on_mask;
2452 	int err = 0;
2453 
2454 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2455 	*last = list_empty(&head->masks);
2456 	__fl_put(f);
2457 
2458 	return err;
2459 }
2460 
2461 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2462 		    bool rtnl_held)
2463 {
2464 	struct cls_fl_head *head = fl_head_dereference(tp);
2465 	unsigned long id = arg->cookie, tmp;
2466 	struct cls_fl_filter *f;
2467 
2468 	arg->count = arg->skip;
2469 
2470 	rcu_read_lock();
2471 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2472 		/* don't return filters that are being deleted */
2473 		if (!f || !refcount_inc_not_zero(&f->refcnt))
2474 			continue;
2475 		rcu_read_unlock();
2476 
2477 		if (arg->fn(tp, f, arg) < 0) {
2478 			__fl_put(f);
2479 			arg->stop = 1;
2480 			rcu_read_lock();
2481 			break;
2482 		}
2483 		__fl_put(f);
2484 		arg->count++;
2485 		rcu_read_lock();
2486 	}
2487 	rcu_read_unlock();
2488 	arg->cookie = id;
2489 }
2490 
2491 static struct cls_fl_filter *
2492 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2493 {
2494 	struct cls_fl_head *head = fl_head_dereference(tp);
2495 
2496 	spin_lock(&tp->lock);
2497 	if (list_empty(&head->hw_filters)) {
2498 		spin_unlock(&tp->lock);
2499 		return NULL;
2500 	}
2501 
2502 	if (!f)
2503 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
2504 			       hw_list);
2505 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2506 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2507 			spin_unlock(&tp->lock);
2508 			return f;
2509 		}
2510 	}
2511 
2512 	spin_unlock(&tp->lock);
2513 	return NULL;
2514 }
2515 
2516 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2517 			void *cb_priv, struct netlink_ext_ack *extack)
2518 {
2519 	struct tcf_block *block = tp->chain->block;
2520 	struct flow_cls_offload cls_flower = {};
2521 	struct cls_fl_filter *f = NULL;
2522 	int err;
2523 
2524 	/* hw_filters list can only be changed by hw offload functions after
2525 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2526 	 * iterating it.
2527 	 */
2528 	ASSERT_RTNL();
2529 
2530 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
2531 		cls_flower.rule =
2532 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2533 		if (!cls_flower.rule) {
2534 			__fl_put(f);
2535 			return -ENOMEM;
2536 		}
2537 
2538 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2539 					   extack);
2540 		cls_flower.command = add ?
2541 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2542 		cls_flower.cookie = (unsigned long)f;
2543 		cls_flower.rule->match.dissector = &f->mask->dissector;
2544 		cls_flower.rule->match.mask = &f->mask->key;
2545 		cls_flower.rule->match.key = &f->mkey;
2546 
2547 		err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
2548 					      cls_flower.common.extack);
2549 		if (err) {
2550 			kfree(cls_flower.rule);
2551 			if (tc_skip_sw(f->flags)) {
2552 				__fl_put(f);
2553 				return err;
2554 			}
2555 			goto next_flow;
2556 		}
2557 
2558 		cls_flower.classid = f->res.classid;
2559 
2560 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2561 					    TC_SETUP_CLSFLOWER, &cls_flower,
2562 					    cb_priv, &f->flags,
2563 					    &f->in_hw_count);
2564 		tc_cleanup_offload_action(&cls_flower.rule->action);
2565 		kfree(cls_flower.rule);
2566 
2567 		if (err) {
2568 			__fl_put(f);
2569 			return err;
2570 		}
2571 next_flow:
2572 		__fl_put(f);
2573 	}
2574 
2575 	return 0;
2576 }
2577 
2578 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2579 {
2580 	struct flow_cls_offload *cls_flower = type_data;
2581 	struct cls_fl_filter *f =
2582 		(struct cls_fl_filter *) cls_flower->cookie;
2583 	struct cls_fl_head *head = fl_head_dereference(tp);
2584 
2585 	spin_lock(&tp->lock);
2586 	list_add(&f->hw_list, &head->hw_filters);
2587 	spin_unlock(&tp->lock);
2588 }
2589 
2590 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2591 {
2592 	struct flow_cls_offload *cls_flower = type_data;
2593 	struct cls_fl_filter *f =
2594 		(struct cls_fl_filter *) cls_flower->cookie;
2595 
2596 	spin_lock(&tp->lock);
2597 	if (!list_empty(&f->hw_list))
2598 		list_del_init(&f->hw_list);
2599 	spin_unlock(&tp->lock);
2600 }
2601 
2602 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2603 			      struct fl_flow_tmplt *tmplt)
2604 {
2605 	struct flow_cls_offload cls_flower = {};
2606 	struct tcf_block *block = chain->block;
2607 
2608 	cls_flower.rule = flow_rule_alloc(0);
2609 	if (!cls_flower.rule)
2610 		return -ENOMEM;
2611 
2612 	cls_flower.common.chain_index = chain->index;
2613 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2614 	cls_flower.cookie = (unsigned long) tmplt;
2615 	cls_flower.rule->match.dissector = &tmplt->dissector;
2616 	cls_flower.rule->match.mask = &tmplt->mask;
2617 	cls_flower.rule->match.key = &tmplt->dummy_key;
2618 
2619 	/* We don't care if driver (any of them) fails to handle this
2620 	 * call. It serves just as a hint for it.
2621 	 */
2622 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2623 	kfree(cls_flower.rule);
2624 
2625 	return 0;
2626 }
2627 
2628 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2629 				struct fl_flow_tmplt *tmplt)
2630 {
2631 	struct flow_cls_offload cls_flower = {};
2632 	struct tcf_block *block = chain->block;
2633 
2634 	cls_flower.common.chain_index = chain->index;
2635 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2636 	cls_flower.cookie = (unsigned long) tmplt;
2637 
2638 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2639 }
2640 
2641 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2642 			     struct nlattr **tca,
2643 			     struct netlink_ext_ack *extack)
2644 {
2645 	struct fl_flow_tmplt *tmplt;
2646 	struct nlattr **tb;
2647 	int err;
2648 
2649 	if (!tca[TCA_OPTIONS])
2650 		return ERR_PTR(-EINVAL);
2651 
2652 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2653 	if (!tb)
2654 		return ERR_PTR(-ENOBUFS);
2655 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2656 					  tca[TCA_OPTIONS], fl_policy, NULL);
2657 	if (err)
2658 		goto errout_tb;
2659 
2660 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2661 	if (!tmplt) {
2662 		err = -ENOMEM;
2663 		goto errout_tb;
2664 	}
2665 	tmplt->chain = chain;
2666 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2667 	if (err)
2668 		goto errout_tmplt;
2669 
2670 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2671 
2672 	err = fl_hw_create_tmplt(chain, tmplt);
2673 	if (err)
2674 		goto errout_tmplt;
2675 
2676 	kfree(tb);
2677 	return tmplt;
2678 
2679 errout_tmplt:
2680 	kfree(tmplt);
2681 errout_tb:
2682 	kfree(tb);
2683 	return ERR_PTR(err);
2684 }
2685 
2686 static void fl_tmplt_destroy(void *tmplt_priv)
2687 {
2688 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2689 
2690 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2691 	kfree(tmplt);
2692 }
2693 
2694 static int fl_dump_key_val(struct sk_buff *skb,
2695 			   void *val, int val_type,
2696 			   void *mask, int mask_type, int len)
2697 {
2698 	int err;
2699 
2700 	if (!memchr_inv(mask, 0, len))
2701 		return 0;
2702 	err = nla_put(skb, val_type, len, val);
2703 	if (err)
2704 		return err;
2705 	if (mask_type != TCA_FLOWER_UNSPEC) {
2706 		err = nla_put(skb, mask_type, len, mask);
2707 		if (err)
2708 			return err;
2709 	}
2710 	return 0;
2711 }
2712 
2713 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2714 				  struct fl_flow_key *mask)
2715 {
2716 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2717 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2718 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2719 			    sizeof(key->tp_range.tp_min.dst)) ||
2720 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2721 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2722 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2723 			    sizeof(key->tp_range.tp_max.dst)) ||
2724 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2725 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2726 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2727 			    sizeof(key->tp_range.tp_min.src)) ||
2728 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2729 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2730 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2731 			    sizeof(key->tp_range.tp_max.src)))
2732 		return -1;
2733 
2734 	return 0;
2735 }
2736 
2737 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2738 				    struct flow_dissector_key_mpls *mpls_key,
2739 				    struct flow_dissector_key_mpls *mpls_mask,
2740 				    u8 lse_index)
2741 {
2742 	struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2743 	struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2744 	int err;
2745 
2746 	err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2747 			 lse_index + 1);
2748 	if (err)
2749 		return err;
2750 
2751 	if (lse_mask->mpls_ttl) {
2752 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2753 				 lse_key->mpls_ttl);
2754 		if (err)
2755 			return err;
2756 	}
2757 	if (lse_mask->mpls_bos) {
2758 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2759 				 lse_key->mpls_bos);
2760 		if (err)
2761 			return err;
2762 	}
2763 	if (lse_mask->mpls_tc) {
2764 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2765 				 lse_key->mpls_tc);
2766 		if (err)
2767 			return err;
2768 	}
2769 	if (lse_mask->mpls_label) {
2770 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2771 				  lse_key->mpls_label);
2772 		if (err)
2773 			return err;
2774 	}
2775 
2776 	return 0;
2777 }
2778 
2779 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2780 				 struct flow_dissector_key_mpls *mpls_key,
2781 				 struct flow_dissector_key_mpls *mpls_mask)
2782 {
2783 	struct nlattr *opts;
2784 	struct nlattr *lse;
2785 	u8 lse_index;
2786 	int err;
2787 
2788 	opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2789 	if (!opts)
2790 		return -EMSGSIZE;
2791 
2792 	for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2793 		if (!(mpls_mask->used_lses & 1 << lse_index))
2794 			continue;
2795 
2796 		lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2797 		if (!lse) {
2798 			err = -EMSGSIZE;
2799 			goto err_opts;
2800 		}
2801 
2802 		err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2803 					       lse_index);
2804 		if (err)
2805 			goto err_opts_lse;
2806 		nla_nest_end(skb, lse);
2807 	}
2808 	nla_nest_end(skb, opts);
2809 
2810 	return 0;
2811 
2812 err_opts_lse:
2813 	nla_nest_cancel(skb, lse);
2814 err_opts:
2815 	nla_nest_cancel(skb, opts);
2816 
2817 	return err;
2818 }
2819 
2820 static int fl_dump_key_mpls(struct sk_buff *skb,
2821 			    struct flow_dissector_key_mpls *mpls_key,
2822 			    struct flow_dissector_key_mpls *mpls_mask)
2823 {
2824 	struct flow_dissector_mpls_lse *lse_mask;
2825 	struct flow_dissector_mpls_lse *lse_key;
2826 	int err;
2827 
2828 	if (!mpls_mask->used_lses)
2829 		return 0;
2830 
2831 	lse_mask = &mpls_mask->ls[0];
2832 	lse_key = &mpls_key->ls[0];
2833 
2834 	/* For backward compatibility, don't use the MPLS nested attributes if
2835 	 * the rule can be expressed using the old attributes.
2836 	 */
2837 	if (mpls_mask->used_lses & ~1 ||
2838 	    (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2839 	     !lse_mask->mpls_tc && !lse_mask->mpls_label))
2840 		return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2841 
2842 	if (lse_mask->mpls_ttl) {
2843 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2844 				 lse_key->mpls_ttl);
2845 		if (err)
2846 			return err;
2847 	}
2848 	if (lse_mask->mpls_tc) {
2849 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2850 				 lse_key->mpls_tc);
2851 		if (err)
2852 			return err;
2853 	}
2854 	if (lse_mask->mpls_label) {
2855 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2856 				  lse_key->mpls_label);
2857 		if (err)
2858 			return err;
2859 	}
2860 	if (lse_mask->mpls_bos) {
2861 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2862 				 lse_key->mpls_bos);
2863 		if (err)
2864 			return err;
2865 	}
2866 	return 0;
2867 }
2868 
2869 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2870 			  struct flow_dissector_key_ip *key,
2871 			  struct flow_dissector_key_ip *mask)
2872 {
2873 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2874 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2875 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2876 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2877 
2878 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2879 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2880 		return -1;
2881 
2882 	return 0;
2883 }
2884 
2885 static int fl_dump_key_vlan(struct sk_buff *skb,
2886 			    int vlan_id_key, int vlan_prio_key,
2887 			    struct flow_dissector_key_vlan *vlan_key,
2888 			    struct flow_dissector_key_vlan *vlan_mask)
2889 {
2890 	int err;
2891 
2892 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2893 		return 0;
2894 	if (vlan_mask->vlan_id) {
2895 		err = nla_put_u16(skb, vlan_id_key,
2896 				  vlan_key->vlan_id);
2897 		if (err)
2898 			return err;
2899 	}
2900 	if (vlan_mask->vlan_priority) {
2901 		err = nla_put_u8(skb, vlan_prio_key,
2902 				 vlan_key->vlan_priority);
2903 		if (err)
2904 			return err;
2905 	}
2906 	return 0;
2907 }
2908 
2909 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2910 			    u32 *flower_key, u32 *flower_mask,
2911 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2912 {
2913 	if (dissector_mask & dissector_flag_bit) {
2914 		*flower_mask |= flower_flag_bit;
2915 		if (dissector_key & dissector_flag_bit)
2916 			*flower_key |= flower_flag_bit;
2917 	}
2918 }
2919 
2920 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2921 {
2922 	u32 key, mask;
2923 	__be32 _key, _mask;
2924 	int err;
2925 
2926 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2927 		return 0;
2928 
2929 	key = 0;
2930 	mask = 0;
2931 
2932 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2933 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2934 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2935 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2936 			FLOW_DIS_FIRST_FRAG);
2937 
2938 	_key = cpu_to_be32(key);
2939 	_mask = cpu_to_be32(mask);
2940 
2941 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2942 	if (err)
2943 		return err;
2944 
2945 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2946 }
2947 
2948 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2949 				  struct flow_dissector_key_enc_opts *enc_opts)
2950 {
2951 	struct geneve_opt *opt;
2952 	struct nlattr *nest;
2953 	int opt_off = 0;
2954 
2955 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2956 	if (!nest)
2957 		goto nla_put_failure;
2958 
2959 	while (enc_opts->len > opt_off) {
2960 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2961 
2962 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2963 				 opt->opt_class))
2964 			goto nla_put_failure;
2965 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2966 			       opt->type))
2967 			goto nla_put_failure;
2968 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2969 			    opt->length * 4, opt->opt_data))
2970 			goto nla_put_failure;
2971 
2972 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2973 	}
2974 	nla_nest_end(skb, nest);
2975 	return 0;
2976 
2977 nla_put_failure:
2978 	nla_nest_cancel(skb, nest);
2979 	return -EMSGSIZE;
2980 }
2981 
2982 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2983 				 struct flow_dissector_key_enc_opts *enc_opts)
2984 {
2985 	struct vxlan_metadata *md;
2986 	struct nlattr *nest;
2987 
2988 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2989 	if (!nest)
2990 		goto nla_put_failure;
2991 
2992 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2993 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2994 		goto nla_put_failure;
2995 
2996 	nla_nest_end(skb, nest);
2997 	return 0;
2998 
2999 nla_put_failure:
3000 	nla_nest_cancel(skb, nest);
3001 	return -EMSGSIZE;
3002 }
3003 
3004 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
3005 				  struct flow_dissector_key_enc_opts *enc_opts)
3006 {
3007 	struct erspan_metadata *md;
3008 	struct nlattr *nest;
3009 
3010 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
3011 	if (!nest)
3012 		goto nla_put_failure;
3013 
3014 	md = (struct erspan_metadata *)&enc_opts->data[0];
3015 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
3016 		goto nla_put_failure;
3017 
3018 	if (md->version == 1 &&
3019 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
3020 		goto nla_put_failure;
3021 
3022 	if (md->version == 2 &&
3023 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
3024 			md->u.md2.dir) ||
3025 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
3026 			get_hwid(&md->u.md2))))
3027 		goto nla_put_failure;
3028 
3029 	nla_nest_end(skb, nest);
3030 	return 0;
3031 
3032 nla_put_failure:
3033 	nla_nest_cancel(skb, nest);
3034 	return -EMSGSIZE;
3035 }
3036 
3037 static int fl_dump_key_gtp_opt(struct sk_buff *skb,
3038 			       struct flow_dissector_key_enc_opts *enc_opts)
3039 
3040 {
3041 	struct gtp_pdu_session_info *session_info;
3042 	struct nlattr *nest;
3043 
3044 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
3045 	if (!nest)
3046 		goto nla_put_failure;
3047 
3048 	session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
3049 
3050 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
3051 		       session_info->pdu_type))
3052 		goto nla_put_failure;
3053 
3054 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
3055 		goto nla_put_failure;
3056 
3057 	nla_nest_end(skb, nest);
3058 	return 0;
3059 
3060 nla_put_failure:
3061 	nla_nest_cancel(skb, nest);
3062 	return -EMSGSIZE;
3063 }
3064 
3065 static int fl_dump_key_ct(struct sk_buff *skb,
3066 			  struct flow_dissector_key_ct *key,
3067 			  struct flow_dissector_key_ct *mask)
3068 {
3069 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
3070 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
3071 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
3072 			    sizeof(key->ct_state)))
3073 		goto nla_put_failure;
3074 
3075 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
3076 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
3077 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
3078 			    sizeof(key->ct_zone)))
3079 		goto nla_put_failure;
3080 
3081 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
3082 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
3083 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
3084 			    sizeof(key->ct_mark)))
3085 		goto nla_put_failure;
3086 
3087 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
3088 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
3089 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
3090 			    sizeof(key->ct_labels)))
3091 		goto nla_put_failure;
3092 
3093 	return 0;
3094 
3095 nla_put_failure:
3096 	return -EMSGSIZE;
3097 }
3098 
3099 static int fl_dump_key_cfm(struct sk_buff *skb,
3100 			   struct flow_dissector_key_cfm *key,
3101 			   struct flow_dissector_key_cfm *mask)
3102 {
3103 	struct nlattr *opts;
3104 	int err;
3105 	u8 mdl;
3106 
3107 	if (!memchr_inv(mask, 0, sizeof(*mask)))
3108 		return 0;
3109 
3110 	opts = nla_nest_start(skb, TCA_FLOWER_KEY_CFM);
3111 	if (!opts)
3112 		return -EMSGSIZE;
3113 
3114 	if (FIELD_GET(FLOW_DIS_CFM_MDL_MASK, mask->mdl_ver)) {
3115 		mdl = FIELD_GET(FLOW_DIS_CFM_MDL_MASK, key->mdl_ver);
3116 		err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_MD_LEVEL, mdl);
3117 		if (err)
3118 			goto err_cfm_opts;
3119 	}
3120 
3121 	if (mask->opcode) {
3122 		err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_OPCODE, key->opcode);
3123 		if (err)
3124 			goto err_cfm_opts;
3125 	}
3126 
3127 	nla_nest_end(skb, opts);
3128 
3129 	return 0;
3130 
3131 err_cfm_opts:
3132 	nla_nest_cancel(skb, opts);
3133 	return err;
3134 }
3135 
3136 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
3137 			       struct flow_dissector_key_enc_opts *enc_opts)
3138 {
3139 	struct nlattr *nest;
3140 	int err;
3141 
3142 	if (!enc_opts->len)
3143 		return 0;
3144 
3145 	nest = nla_nest_start_noflag(skb, enc_opt_type);
3146 	if (!nest)
3147 		goto nla_put_failure;
3148 
3149 	switch (enc_opts->dst_opt_type) {
3150 	case TUNNEL_GENEVE_OPT:
3151 		err = fl_dump_key_geneve_opt(skb, enc_opts);
3152 		if (err)
3153 			goto nla_put_failure;
3154 		break;
3155 	case TUNNEL_VXLAN_OPT:
3156 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
3157 		if (err)
3158 			goto nla_put_failure;
3159 		break;
3160 	case TUNNEL_ERSPAN_OPT:
3161 		err = fl_dump_key_erspan_opt(skb, enc_opts);
3162 		if (err)
3163 			goto nla_put_failure;
3164 		break;
3165 	case TUNNEL_GTP_OPT:
3166 		err = fl_dump_key_gtp_opt(skb, enc_opts);
3167 		if (err)
3168 			goto nla_put_failure;
3169 		break;
3170 	default:
3171 		goto nla_put_failure;
3172 	}
3173 	nla_nest_end(skb, nest);
3174 	return 0;
3175 
3176 nla_put_failure:
3177 	nla_nest_cancel(skb, nest);
3178 	return -EMSGSIZE;
3179 }
3180 
3181 static int fl_dump_key_enc_opt(struct sk_buff *skb,
3182 			       struct flow_dissector_key_enc_opts *key_opts,
3183 			       struct flow_dissector_key_enc_opts *msk_opts)
3184 {
3185 	int err;
3186 
3187 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
3188 	if (err)
3189 		return err;
3190 
3191 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
3192 }
3193 
3194 static int fl_dump_key(struct sk_buff *skb, struct net *net,
3195 		       struct fl_flow_key *key, struct fl_flow_key *mask)
3196 {
3197 	if (mask->meta.ingress_ifindex) {
3198 		struct net_device *dev;
3199 
3200 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
3201 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
3202 			goto nla_put_failure;
3203 	}
3204 
3205 	if (fl_dump_key_val(skb, &key->meta.l2_miss,
3206 			    TCA_FLOWER_L2_MISS, &mask->meta.l2_miss,
3207 			    TCA_FLOWER_UNSPEC, sizeof(key->meta.l2_miss)))
3208 		goto nla_put_failure;
3209 
3210 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
3211 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
3212 			    sizeof(key->eth.dst)) ||
3213 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
3214 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
3215 			    sizeof(key->eth.src)) ||
3216 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
3217 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
3218 			    sizeof(key->basic.n_proto)))
3219 		goto nla_put_failure;
3220 
3221 	if (mask->num_of_vlans.num_of_vlans) {
3222 		if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans))
3223 			goto nla_put_failure;
3224 	}
3225 
3226 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
3227 		goto nla_put_failure;
3228 
3229 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
3230 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
3231 		goto nla_put_failure;
3232 
3233 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
3234 			     TCA_FLOWER_KEY_CVLAN_PRIO,
3235 			     &key->cvlan, &mask->cvlan) ||
3236 	    (mask->cvlan.vlan_tpid &&
3237 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3238 			  key->cvlan.vlan_tpid)))
3239 		goto nla_put_failure;
3240 
3241 	if (mask->basic.n_proto) {
3242 		if (mask->cvlan.vlan_eth_type) {
3243 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
3244 					 key->basic.n_proto))
3245 				goto nla_put_failure;
3246 		} else if (mask->vlan.vlan_eth_type) {
3247 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3248 					 key->vlan.vlan_eth_type))
3249 				goto nla_put_failure;
3250 		}
3251 	}
3252 
3253 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
3254 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
3255 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
3256 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
3257 			    sizeof(key->basic.ip_proto)) ||
3258 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
3259 		goto nla_put_failure;
3260 
3261 	if (mask->pppoe.session_id) {
3262 		if (nla_put_be16(skb, TCA_FLOWER_KEY_PPPOE_SID,
3263 				 key->pppoe.session_id))
3264 			goto nla_put_failure;
3265 	}
3266 	if (mask->basic.n_proto && mask->pppoe.ppp_proto) {
3267 		if (nla_put_be16(skb, TCA_FLOWER_KEY_PPP_PROTO,
3268 				 key->pppoe.ppp_proto))
3269 			goto nla_put_failure;
3270 	}
3271 
3272 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3273 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
3274 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
3275 			     sizeof(key->ipv4.src)) ||
3276 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
3277 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
3278 			     sizeof(key->ipv4.dst))))
3279 		goto nla_put_failure;
3280 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3281 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
3282 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
3283 				  sizeof(key->ipv6.src)) ||
3284 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
3285 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
3286 				  sizeof(key->ipv6.dst))))
3287 		goto nla_put_failure;
3288 
3289 	if (key->basic.ip_proto == IPPROTO_TCP &&
3290 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
3291 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
3292 			     sizeof(key->tp.src)) ||
3293 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
3294 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
3295 			     sizeof(key->tp.dst)) ||
3296 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
3297 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
3298 			     sizeof(key->tcp.flags))))
3299 		goto nla_put_failure;
3300 	else if (key->basic.ip_proto == IPPROTO_UDP &&
3301 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
3302 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
3303 				  sizeof(key->tp.src)) ||
3304 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
3305 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
3306 				  sizeof(key->tp.dst))))
3307 		goto nla_put_failure;
3308 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
3309 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
3310 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
3311 				  sizeof(key->tp.src)) ||
3312 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
3313 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
3314 				  sizeof(key->tp.dst))))
3315 		goto nla_put_failure;
3316 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
3317 		 key->basic.ip_proto == IPPROTO_ICMP &&
3318 		 (fl_dump_key_val(skb, &key->icmp.type,
3319 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
3320 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
3321 				  sizeof(key->icmp.type)) ||
3322 		  fl_dump_key_val(skb, &key->icmp.code,
3323 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
3324 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
3325 				  sizeof(key->icmp.code))))
3326 		goto nla_put_failure;
3327 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
3328 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
3329 		 (fl_dump_key_val(skb, &key->icmp.type,
3330 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
3331 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
3332 				  sizeof(key->icmp.type)) ||
3333 		  fl_dump_key_val(skb, &key->icmp.code,
3334 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
3335 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
3336 				  sizeof(key->icmp.code))))
3337 		goto nla_put_failure;
3338 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
3339 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
3340 		 (fl_dump_key_val(skb, &key->arp.sip,
3341 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
3342 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
3343 				  sizeof(key->arp.sip)) ||
3344 		  fl_dump_key_val(skb, &key->arp.tip,
3345 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
3346 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
3347 				  sizeof(key->arp.tip)) ||
3348 		  fl_dump_key_val(skb, &key->arp.op,
3349 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
3350 				  TCA_FLOWER_KEY_ARP_OP_MASK,
3351 				  sizeof(key->arp.op)) ||
3352 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
3353 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
3354 				  sizeof(key->arp.sha)) ||
3355 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
3356 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
3357 				  sizeof(key->arp.tha))))
3358 		goto nla_put_failure;
3359 	else if (key->basic.ip_proto == IPPROTO_L2TP &&
3360 		 fl_dump_key_val(skb, &key->l2tpv3.session_id,
3361 				 TCA_FLOWER_KEY_L2TPV3_SID,
3362 				 &mask->l2tpv3.session_id,
3363 				 TCA_FLOWER_UNSPEC,
3364 				 sizeof(key->l2tpv3.session_id)))
3365 		goto nla_put_failure;
3366 
3367 	if ((key->basic.ip_proto == IPPROTO_TCP ||
3368 	     key->basic.ip_proto == IPPROTO_UDP ||
3369 	     key->basic.ip_proto == IPPROTO_SCTP) &&
3370 	     fl_dump_key_port_range(skb, key, mask))
3371 		goto nla_put_failure;
3372 
3373 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3374 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
3375 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3376 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3377 			    sizeof(key->enc_ipv4.src)) ||
3378 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
3379 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3380 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3381 			     sizeof(key->enc_ipv4.dst))))
3382 		goto nla_put_failure;
3383 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3384 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3385 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3386 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3387 			    sizeof(key->enc_ipv6.src)) ||
3388 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3389 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
3390 				 &mask->enc_ipv6.dst,
3391 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3392 			    sizeof(key->enc_ipv6.dst))))
3393 		goto nla_put_failure;
3394 
3395 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3396 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3397 			    sizeof(key->enc_key_id)) ||
3398 	    fl_dump_key_val(skb, &key->enc_tp.src,
3399 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3400 			    &mask->enc_tp.src,
3401 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3402 			    sizeof(key->enc_tp.src)) ||
3403 	    fl_dump_key_val(skb, &key->enc_tp.dst,
3404 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3405 			    &mask->enc_tp.dst,
3406 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3407 			    sizeof(key->enc_tp.dst)) ||
3408 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3409 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3410 		goto nla_put_failure;
3411 
3412 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3413 		goto nla_put_failure;
3414 
3415 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3416 		goto nla_put_failure;
3417 
3418 	if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3419 			     &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3420 			     sizeof(key->hash.hash)))
3421 		goto nla_put_failure;
3422 
3423 	if (fl_dump_key_cfm(skb, &key->cfm, &mask->cfm))
3424 		goto nla_put_failure;
3425 
3426 	return 0;
3427 
3428 nla_put_failure:
3429 	return -EMSGSIZE;
3430 }
3431 
3432 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3433 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3434 {
3435 	struct cls_fl_filter *f = fh;
3436 	struct nlattr *nest;
3437 	struct fl_flow_key *key, *mask;
3438 	bool skip_hw;
3439 
3440 	if (!f)
3441 		return skb->len;
3442 
3443 	t->tcm_handle = f->handle;
3444 
3445 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3446 	if (!nest)
3447 		goto nla_put_failure;
3448 
3449 	spin_lock(&tp->lock);
3450 
3451 	if (f->res.classid &&
3452 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3453 		goto nla_put_failure_locked;
3454 
3455 	key = &f->key;
3456 	mask = &f->mask->key;
3457 	skip_hw = tc_skip_hw(f->flags);
3458 
3459 	if (fl_dump_key(skb, net, key, mask))
3460 		goto nla_put_failure_locked;
3461 
3462 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3463 		goto nla_put_failure_locked;
3464 
3465 	spin_unlock(&tp->lock);
3466 
3467 	if (!skip_hw)
3468 		fl_hw_update_stats(tp, f, rtnl_held);
3469 
3470 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3471 		goto nla_put_failure;
3472 
3473 	if (tcf_exts_dump(skb, &f->exts))
3474 		goto nla_put_failure;
3475 
3476 	nla_nest_end(skb, nest);
3477 
3478 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3479 		goto nla_put_failure;
3480 
3481 	return skb->len;
3482 
3483 nla_put_failure_locked:
3484 	spin_unlock(&tp->lock);
3485 nla_put_failure:
3486 	nla_nest_cancel(skb, nest);
3487 	return -1;
3488 }
3489 
3490 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3491 			 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3492 {
3493 	struct cls_fl_filter *f = fh;
3494 	struct nlattr *nest;
3495 	bool skip_hw;
3496 
3497 	if (!f)
3498 		return skb->len;
3499 
3500 	t->tcm_handle = f->handle;
3501 
3502 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3503 	if (!nest)
3504 		goto nla_put_failure;
3505 
3506 	spin_lock(&tp->lock);
3507 
3508 	skip_hw = tc_skip_hw(f->flags);
3509 
3510 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3511 		goto nla_put_failure_locked;
3512 
3513 	spin_unlock(&tp->lock);
3514 
3515 	if (!skip_hw)
3516 		fl_hw_update_stats(tp, f, rtnl_held);
3517 
3518 	if (tcf_exts_terse_dump(skb, &f->exts))
3519 		goto nla_put_failure;
3520 
3521 	nla_nest_end(skb, nest);
3522 
3523 	return skb->len;
3524 
3525 nla_put_failure_locked:
3526 	spin_unlock(&tp->lock);
3527 nla_put_failure:
3528 	nla_nest_cancel(skb, nest);
3529 	return -1;
3530 }
3531 
3532 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3533 {
3534 	struct fl_flow_tmplt *tmplt = tmplt_priv;
3535 	struct fl_flow_key *key, *mask;
3536 	struct nlattr *nest;
3537 
3538 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3539 	if (!nest)
3540 		goto nla_put_failure;
3541 
3542 	key = &tmplt->dummy_key;
3543 	mask = &tmplt->mask;
3544 
3545 	if (fl_dump_key(skb, net, key, mask))
3546 		goto nla_put_failure;
3547 
3548 	nla_nest_end(skb, nest);
3549 
3550 	return skb->len;
3551 
3552 nla_put_failure:
3553 	nla_nest_cancel(skb, nest);
3554 	return -EMSGSIZE;
3555 }
3556 
3557 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3558 			  unsigned long base)
3559 {
3560 	struct cls_fl_filter *f = fh;
3561 
3562 	tc_cls_bind_class(classid, cl, q, &f->res, base);
3563 }
3564 
3565 static bool fl_delete_empty(struct tcf_proto *tp)
3566 {
3567 	struct cls_fl_head *head = fl_head_dereference(tp);
3568 
3569 	spin_lock(&tp->lock);
3570 	tp->deleting = idr_is_empty(&head->handle_idr);
3571 	spin_unlock(&tp->lock);
3572 
3573 	return tp->deleting;
3574 }
3575 
3576 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3577 	.kind		= "flower",
3578 	.classify	= fl_classify,
3579 	.init		= fl_init,
3580 	.destroy	= fl_destroy,
3581 	.get		= fl_get,
3582 	.put		= fl_put,
3583 	.change		= fl_change,
3584 	.delete		= fl_delete,
3585 	.delete_empty	= fl_delete_empty,
3586 	.walk		= fl_walk,
3587 	.reoffload	= fl_reoffload,
3588 	.hw_add		= fl_hw_add,
3589 	.hw_del		= fl_hw_del,
3590 	.dump		= fl_dump,
3591 	.terse_dump	= fl_terse_dump,
3592 	.bind_class	= fl_bind_class,
3593 	.tmplt_create	= fl_tmplt_create,
3594 	.tmplt_destroy	= fl_tmplt_destroy,
3595 	.tmplt_dump	= fl_tmplt_dump,
3596 	.get_exts	= fl_get_exts,
3597 	.owner		= THIS_MODULE,
3598 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
3599 };
3600 
3601 static int __init cls_fl_init(void)
3602 {
3603 	return register_tcf_proto_ops(&cls_fl_ops);
3604 }
3605 
3606 static void __exit cls_fl_exit(void)
3607 {
3608 	unregister_tcf_proto_ops(&cls_fl_ops);
3609 }
3610 
3611 module_init(cls_fl_init);
3612 module_exit(cls_fl_exit);
3613 
3614 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3615 MODULE_DESCRIPTION("Flower classifier");
3616 MODULE_LICENSE("GPL v2");
3617