xref: /linux/net/sched/cls_api.c (revision ab52c59103002b49f2455371e4b9c56ba3ef1781)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c	Packet classifier API.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <linux/rhashtable.h>
26 #include <net/net_namespace.h>
27 #include <net/sock.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/pkt_cls.h>
31 #include <net/tc_act/tc_pedit.h>
32 #include <net/tc_act/tc_mirred.h>
33 #include <net/tc_act/tc_vlan.h>
34 #include <net/tc_act/tc_tunnel_key.h>
35 #include <net/tc_act/tc_csum.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_police.h>
38 #include <net/tc_act/tc_sample.h>
39 #include <net/tc_act/tc_skbedit.h>
40 #include <net/tc_act/tc_ct.h>
41 #include <net/tc_act/tc_mpls.h>
42 #include <net/tc_act/tc_gate.h>
43 #include <net/flow_offload.h>
44 #include <net/tc_wrapper.h>
45 
46 /* The list of all installed classifier types */
47 static LIST_HEAD(tcf_proto_base);
48 
49 /* Protects list of registered TC modules. It is pure SMP lock. */
50 static DEFINE_RWLOCK(cls_mod_lock);
51 
52 static struct xarray tcf_exts_miss_cookies_xa;
53 struct tcf_exts_miss_cookie_node {
54 	const struct tcf_chain *chain;
55 	const struct tcf_proto *tp;
56 	const struct tcf_exts *exts;
57 	u32 chain_index;
58 	u32 tp_prio;
59 	u32 handle;
60 	u32 miss_cookie_base;
61 	struct rcu_head rcu;
62 };
63 
64 /* Each tc action entry cookie will be comprised of 32bit miss_cookie_base +
65  * action index in the exts tc actions array.
66  */
67 union tcf_exts_miss_cookie {
68 	struct {
69 		u32 miss_cookie_base;
70 		u32 act_index;
71 	};
72 	u64 miss_cookie;
73 };
74 
75 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
76 static int
77 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
78 				u32 handle)
79 {
80 	struct tcf_exts_miss_cookie_node *n;
81 	static u32 next;
82 	int err;
83 
84 	if (WARN_ON(!handle || !tp->ops->get_exts))
85 		return -EINVAL;
86 
87 	n = kzalloc(sizeof(*n), GFP_KERNEL);
88 	if (!n)
89 		return -ENOMEM;
90 
91 	n->chain_index = tp->chain->index;
92 	n->chain = tp->chain;
93 	n->tp_prio = tp->prio;
94 	n->tp = tp;
95 	n->exts = exts;
96 	n->handle = handle;
97 
98 	err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
99 			      n, xa_limit_32b, &next, GFP_KERNEL);
100 	if (err)
101 		goto err_xa_alloc;
102 
103 	exts->miss_cookie_node = n;
104 	return 0;
105 
106 err_xa_alloc:
107 	kfree(n);
108 	return err;
109 }
110 
111 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
112 {
113 	struct tcf_exts_miss_cookie_node *n;
114 
115 	if (!exts->miss_cookie_node)
116 		return;
117 
118 	n = exts->miss_cookie_node;
119 	xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base);
120 	kfree_rcu(n, rcu);
121 }
122 
123 static struct tcf_exts_miss_cookie_node *
124 tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index)
125 {
126 	union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, };
127 
128 	*act_index = mc.act_index;
129 	return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base);
130 }
131 #else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
132 static int
133 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
134 				u32 handle)
135 {
136 	return 0;
137 }
138 
139 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
140 {
141 }
142 #endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
143 
144 static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index)
145 {
146 	union tcf_exts_miss_cookie mc = { .act_index = act_index, };
147 
148 	if (!miss_cookie_base)
149 		return 0;
150 
151 	mc.miss_cookie_base = miss_cookie_base;
152 	return mc.miss_cookie;
153 }
154 
155 #ifdef CONFIG_NET_CLS_ACT
156 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
157 EXPORT_SYMBOL(tc_skb_ext_tc);
158 
159 void tc_skb_ext_tc_enable(void)
160 {
161 	static_branch_inc(&tc_skb_ext_tc);
162 }
163 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
164 
165 void tc_skb_ext_tc_disable(void)
166 {
167 	static_branch_dec(&tc_skb_ext_tc);
168 }
169 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
170 #endif
171 
172 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
173 {
174 	return jhash_3words(tp->chain->index, tp->prio,
175 			    (__force __u32)tp->protocol, 0);
176 }
177 
178 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
179 					struct tcf_proto *tp)
180 {
181 	struct tcf_block *block = chain->block;
182 
183 	mutex_lock(&block->proto_destroy_lock);
184 	hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
185 		     destroy_obj_hashfn(tp));
186 	mutex_unlock(&block->proto_destroy_lock);
187 }
188 
189 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
190 			  const struct tcf_proto *tp2)
191 {
192 	return tp1->chain->index == tp2->chain->index &&
193 	       tp1->prio == tp2->prio &&
194 	       tp1->protocol == tp2->protocol;
195 }
196 
197 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
198 					struct tcf_proto *tp)
199 {
200 	u32 hash = destroy_obj_hashfn(tp);
201 	struct tcf_proto *iter;
202 	bool found = false;
203 
204 	rcu_read_lock();
205 	hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
206 				   destroy_ht_node, hash) {
207 		if (tcf_proto_cmp(tp, iter)) {
208 			found = true;
209 			break;
210 		}
211 	}
212 	rcu_read_unlock();
213 
214 	return found;
215 }
216 
217 static void
218 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
219 {
220 	struct tcf_block *block = chain->block;
221 
222 	mutex_lock(&block->proto_destroy_lock);
223 	if (hash_hashed(&tp->destroy_ht_node))
224 		hash_del_rcu(&tp->destroy_ht_node);
225 	mutex_unlock(&block->proto_destroy_lock);
226 }
227 
228 /* Find classifier type by string name */
229 
230 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
231 {
232 	const struct tcf_proto_ops *t, *res = NULL;
233 
234 	if (kind) {
235 		read_lock(&cls_mod_lock);
236 		list_for_each_entry(t, &tcf_proto_base, head) {
237 			if (strcmp(kind, t->kind) == 0) {
238 				if (try_module_get(t->owner))
239 					res = t;
240 				break;
241 			}
242 		}
243 		read_unlock(&cls_mod_lock);
244 	}
245 	return res;
246 }
247 
248 static const struct tcf_proto_ops *
249 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
250 		     struct netlink_ext_ack *extack)
251 {
252 	const struct tcf_proto_ops *ops;
253 
254 	ops = __tcf_proto_lookup_ops(kind);
255 	if (ops)
256 		return ops;
257 #ifdef CONFIG_MODULES
258 	if (rtnl_held)
259 		rtnl_unlock();
260 	request_module(NET_CLS_ALIAS_PREFIX "%s", kind);
261 	if (rtnl_held)
262 		rtnl_lock();
263 	ops = __tcf_proto_lookup_ops(kind);
264 	/* We dropped the RTNL semaphore in order to perform
265 	 * the module load. So, even if we succeeded in loading
266 	 * the module we have to replay the request. We indicate
267 	 * this using -EAGAIN.
268 	 */
269 	if (ops) {
270 		module_put(ops->owner);
271 		return ERR_PTR(-EAGAIN);
272 	}
273 #endif
274 	NL_SET_ERR_MSG(extack, "TC classifier not found");
275 	return ERR_PTR(-ENOENT);
276 }
277 
278 /* Register(unregister) new classifier type */
279 
280 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
281 {
282 	struct tcf_proto_ops *t;
283 	int rc = -EEXIST;
284 
285 	write_lock(&cls_mod_lock);
286 	list_for_each_entry(t, &tcf_proto_base, head)
287 		if (!strcmp(ops->kind, t->kind))
288 			goto out;
289 
290 	list_add_tail(&ops->head, &tcf_proto_base);
291 	rc = 0;
292 out:
293 	write_unlock(&cls_mod_lock);
294 	return rc;
295 }
296 EXPORT_SYMBOL(register_tcf_proto_ops);
297 
298 static struct workqueue_struct *tc_filter_wq;
299 
300 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
301 {
302 	struct tcf_proto_ops *t;
303 	int rc = -ENOENT;
304 
305 	/* Wait for outstanding call_rcu()s, if any, from a
306 	 * tcf_proto_ops's destroy() handler.
307 	 */
308 	rcu_barrier();
309 	flush_workqueue(tc_filter_wq);
310 
311 	write_lock(&cls_mod_lock);
312 	list_for_each_entry(t, &tcf_proto_base, head) {
313 		if (t == ops) {
314 			list_del(&t->head);
315 			rc = 0;
316 			break;
317 		}
318 	}
319 	write_unlock(&cls_mod_lock);
320 
321 	WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
322 }
323 EXPORT_SYMBOL(unregister_tcf_proto_ops);
324 
325 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
326 {
327 	INIT_RCU_WORK(rwork, func);
328 	return queue_rcu_work(tc_filter_wq, rwork);
329 }
330 EXPORT_SYMBOL(tcf_queue_work);
331 
332 /* Select new prio value from the range, managed by kernel. */
333 
334 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
335 {
336 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
337 
338 	if (tp)
339 		first = tp->prio - 1;
340 
341 	return TC_H_MAJ(first);
342 }
343 
344 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
345 {
346 	if (kind)
347 		return nla_strscpy(name, kind, IFNAMSIZ) < 0;
348 	memset(name, 0, IFNAMSIZ);
349 	return false;
350 }
351 
352 static bool tcf_proto_is_unlocked(const char *kind)
353 {
354 	const struct tcf_proto_ops *ops;
355 	bool ret;
356 
357 	if (strlen(kind) == 0)
358 		return false;
359 
360 	ops = tcf_proto_lookup_ops(kind, false, NULL);
361 	/* On error return false to take rtnl lock. Proto lookup/create
362 	 * functions will perform lookup again and properly handle errors.
363 	 */
364 	if (IS_ERR(ops))
365 		return false;
366 
367 	ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
368 	module_put(ops->owner);
369 	return ret;
370 }
371 
372 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
373 					  u32 prio, struct tcf_chain *chain,
374 					  bool rtnl_held,
375 					  struct netlink_ext_ack *extack)
376 {
377 	struct tcf_proto *tp;
378 	int err;
379 
380 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
381 	if (!tp)
382 		return ERR_PTR(-ENOBUFS);
383 
384 	tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
385 	if (IS_ERR(tp->ops)) {
386 		err = PTR_ERR(tp->ops);
387 		goto errout;
388 	}
389 	tp->classify = tp->ops->classify;
390 	tp->protocol = protocol;
391 	tp->prio = prio;
392 	tp->chain = chain;
393 	spin_lock_init(&tp->lock);
394 	refcount_set(&tp->refcnt, 1);
395 
396 	err = tp->ops->init(tp);
397 	if (err) {
398 		module_put(tp->ops->owner);
399 		goto errout;
400 	}
401 	return tp;
402 
403 errout:
404 	kfree(tp);
405 	return ERR_PTR(err);
406 }
407 
408 static void tcf_proto_get(struct tcf_proto *tp)
409 {
410 	refcount_inc(&tp->refcnt);
411 }
412 
413 static void tcf_maintain_bypass(struct tcf_block *block)
414 {
415 	int filtercnt = atomic_read(&block->filtercnt);
416 	int skipswcnt = atomic_read(&block->skipswcnt);
417 	bool bypass_wanted = filtercnt > 0 && filtercnt == skipswcnt;
418 
419 	if (bypass_wanted != block->bypass_wanted) {
420 #ifdef CONFIG_NET_CLS_ACT
421 		if (bypass_wanted)
422 			static_branch_inc(&tcf_bypass_check_needed_key);
423 		else
424 			static_branch_dec(&tcf_bypass_check_needed_key);
425 #endif
426 		block->bypass_wanted = bypass_wanted;
427 	}
428 }
429 
430 static void tcf_block_filter_cnt_update(struct tcf_block *block, bool *counted, bool add)
431 {
432 	lockdep_assert_not_held(&block->cb_lock);
433 
434 	down_write(&block->cb_lock);
435 	if (*counted != add) {
436 		if (add) {
437 			atomic_inc(&block->filtercnt);
438 			*counted = true;
439 		} else {
440 			atomic_dec(&block->filtercnt);
441 			*counted = false;
442 		}
443 	}
444 	tcf_maintain_bypass(block);
445 	up_write(&block->cb_lock);
446 }
447 
448 static void tcf_chain_put(struct tcf_chain *chain);
449 
450 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
451 			      bool sig_destroy, struct netlink_ext_ack *extack)
452 {
453 	tp->ops->destroy(tp, rtnl_held, extack);
454 	tcf_block_filter_cnt_update(tp->chain->block, &tp->counted, false);
455 	if (sig_destroy)
456 		tcf_proto_signal_destroyed(tp->chain, tp);
457 	tcf_chain_put(tp->chain);
458 	module_put(tp->ops->owner);
459 	kfree_rcu(tp, rcu);
460 }
461 
462 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
463 			  struct netlink_ext_ack *extack)
464 {
465 	if (refcount_dec_and_test(&tp->refcnt))
466 		tcf_proto_destroy(tp, rtnl_held, true, extack);
467 }
468 
469 static bool tcf_proto_check_delete(struct tcf_proto *tp)
470 {
471 	if (tp->ops->delete_empty)
472 		return tp->ops->delete_empty(tp);
473 
474 	tp->deleting = true;
475 	return tp->deleting;
476 }
477 
478 static void tcf_proto_mark_delete(struct tcf_proto *tp)
479 {
480 	spin_lock(&tp->lock);
481 	tp->deleting = true;
482 	spin_unlock(&tp->lock);
483 }
484 
485 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
486 {
487 	bool deleting;
488 
489 	spin_lock(&tp->lock);
490 	deleting = tp->deleting;
491 	spin_unlock(&tp->lock);
492 
493 	return deleting;
494 }
495 
496 #define ASSERT_BLOCK_LOCKED(block)					\
497 	lockdep_assert_held(&(block)->lock)
498 
499 struct tcf_filter_chain_list_item {
500 	struct list_head list;
501 	tcf_chain_head_change_t *chain_head_change;
502 	void *chain_head_change_priv;
503 };
504 
505 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
506 					  u32 chain_index)
507 {
508 	struct tcf_chain *chain;
509 
510 	ASSERT_BLOCK_LOCKED(block);
511 
512 	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
513 	if (!chain)
514 		return NULL;
515 	list_add_tail_rcu(&chain->list, &block->chain_list);
516 	mutex_init(&chain->filter_chain_lock);
517 	chain->block = block;
518 	chain->index = chain_index;
519 	chain->refcnt = 1;
520 	if (!chain->index)
521 		block->chain0.chain = chain;
522 	return chain;
523 }
524 
525 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
526 				       struct tcf_proto *tp_head)
527 {
528 	if (item->chain_head_change)
529 		item->chain_head_change(tp_head, item->chain_head_change_priv);
530 }
531 
532 static void tcf_chain0_head_change(struct tcf_chain *chain,
533 				   struct tcf_proto *tp_head)
534 {
535 	struct tcf_filter_chain_list_item *item;
536 	struct tcf_block *block = chain->block;
537 
538 	if (chain->index)
539 		return;
540 
541 	mutex_lock(&block->lock);
542 	list_for_each_entry(item, &block->chain0.filter_chain_list, list)
543 		tcf_chain_head_change_item(item, tp_head);
544 	mutex_unlock(&block->lock);
545 }
546 
547 /* Returns true if block can be safely freed. */
548 
549 static bool tcf_chain_detach(struct tcf_chain *chain)
550 {
551 	struct tcf_block *block = chain->block;
552 
553 	ASSERT_BLOCK_LOCKED(block);
554 
555 	list_del_rcu(&chain->list);
556 	if (!chain->index)
557 		block->chain0.chain = NULL;
558 
559 	if (list_empty(&block->chain_list) &&
560 	    refcount_read(&block->refcnt) == 0)
561 		return true;
562 
563 	return false;
564 }
565 
566 static void tcf_block_destroy(struct tcf_block *block)
567 {
568 	mutex_destroy(&block->lock);
569 	mutex_destroy(&block->proto_destroy_lock);
570 	xa_destroy(&block->ports);
571 	kfree_rcu(block, rcu);
572 }
573 
574 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
575 {
576 	struct tcf_block *block = chain->block;
577 
578 	mutex_destroy(&chain->filter_chain_lock);
579 	kfree_rcu(chain, rcu);
580 	if (free_block)
581 		tcf_block_destroy(block);
582 }
583 
584 static void tcf_chain_hold(struct tcf_chain *chain)
585 {
586 	ASSERT_BLOCK_LOCKED(chain->block);
587 
588 	++chain->refcnt;
589 }
590 
591 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
592 {
593 	ASSERT_BLOCK_LOCKED(chain->block);
594 
595 	/* In case all the references are action references, this
596 	 * chain should not be shown to the user.
597 	 */
598 	return chain->refcnt == chain->action_refcnt;
599 }
600 
601 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
602 					  u32 chain_index)
603 {
604 	struct tcf_chain *chain;
605 
606 	ASSERT_BLOCK_LOCKED(block);
607 
608 	list_for_each_entry(chain, &block->chain_list, list) {
609 		if (chain->index == chain_index)
610 			return chain;
611 	}
612 	return NULL;
613 }
614 
615 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
616 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
617 					      u32 chain_index)
618 {
619 	struct tcf_chain *chain;
620 
621 	list_for_each_entry_rcu(chain, &block->chain_list, list) {
622 		if (chain->index == chain_index)
623 			return chain;
624 	}
625 	return NULL;
626 }
627 #endif
628 
629 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
630 			   u32 seq, u16 flags, int event, bool unicast,
631 			   struct netlink_ext_ack *extack);
632 
633 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
634 					 u32 chain_index, bool create,
635 					 bool by_act)
636 {
637 	struct tcf_chain *chain = NULL;
638 	bool is_first_reference;
639 
640 	mutex_lock(&block->lock);
641 	chain = tcf_chain_lookup(block, chain_index);
642 	if (chain) {
643 		tcf_chain_hold(chain);
644 	} else {
645 		if (!create)
646 			goto errout;
647 		chain = tcf_chain_create(block, chain_index);
648 		if (!chain)
649 			goto errout;
650 	}
651 
652 	if (by_act)
653 		++chain->action_refcnt;
654 	is_first_reference = chain->refcnt - chain->action_refcnt == 1;
655 	mutex_unlock(&block->lock);
656 
657 	/* Send notification only in case we got the first
658 	 * non-action reference. Until then, the chain acts only as
659 	 * a placeholder for actions pointing to it and user ought
660 	 * not know about them.
661 	 */
662 	if (is_first_reference && !by_act)
663 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
664 				RTM_NEWCHAIN, false, NULL);
665 
666 	return chain;
667 
668 errout:
669 	mutex_unlock(&block->lock);
670 	return chain;
671 }
672 
673 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
674 				       bool create)
675 {
676 	return __tcf_chain_get(block, chain_index, create, false);
677 }
678 
679 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
680 {
681 	return __tcf_chain_get(block, chain_index, true, true);
682 }
683 EXPORT_SYMBOL(tcf_chain_get_by_act);
684 
685 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
686 			       void *tmplt_priv);
687 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
688 				  void *tmplt_priv, u32 chain_index,
689 				  struct tcf_block *block, struct sk_buff *oskb,
690 				  u32 seq, u16 flags);
691 
692 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
693 			    bool explicitly_created)
694 {
695 	struct tcf_block *block = chain->block;
696 	const struct tcf_proto_ops *tmplt_ops;
697 	unsigned int refcnt, non_act_refcnt;
698 	bool free_block = false;
699 	void *tmplt_priv;
700 
701 	mutex_lock(&block->lock);
702 	if (explicitly_created) {
703 		if (!chain->explicitly_created) {
704 			mutex_unlock(&block->lock);
705 			return;
706 		}
707 		chain->explicitly_created = false;
708 	}
709 
710 	if (by_act)
711 		chain->action_refcnt--;
712 
713 	/* tc_chain_notify_delete can't be called while holding block lock.
714 	 * However, when block is unlocked chain can be changed concurrently, so
715 	 * save these to temporary variables.
716 	 */
717 	refcnt = --chain->refcnt;
718 	non_act_refcnt = refcnt - chain->action_refcnt;
719 	tmplt_ops = chain->tmplt_ops;
720 	tmplt_priv = chain->tmplt_priv;
721 
722 	if (non_act_refcnt == chain->explicitly_created && !by_act) {
723 		if (non_act_refcnt == 0)
724 			tc_chain_notify_delete(tmplt_ops, tmplt_priv,
725 					       chain->index, block, NULL, 0, 0);
726 		/* Last reference to chain, no need to lock. */
727 		chain->flushing = false;
728 	}
729 
730 	if (refcnt == 0)
731 		free_block = tcf_chain_detach(chain);
732 	mutex_unlock(&block->lock);
733 
734 	if (refcnt == 0) {
735 		tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
736 		tcf_chain_destroy(chain, free_block);
737 	}
738 }
739 
740 static void tcf_chain_put(struct tcf_chain *chain)
741 {
742 	__tcf_chain_put(chain, false, false);
743 }
744 
745 void tcf_chain_put_by_act(struct tcf_chain *chain)
746 {
747 	__tcf_chain_put(chain, true, false);
748 }
749 EXPORT_SYMBOL(tcf_chain_put_by_act);
750 
751 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
752 {
753 	__tcf_chain_put(chain, false, true);
754 }
755 
756 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
757 {
758 	struct tcf_proto *tp, *tp_next;
759 
760 	mutex_lock(&chain->filter_chain_lock);
761 	tp = tcf_chain_dereference(chain->filter_chain, chain);
762 	while (tp) {
763 		tp_next = rcu_dereference_protected(tp->next, 1);
764 		tcf_proto_signal_destroying(chain, tp);
765 		tp = tp_next;
766 	}
767 	tp = tcf_chain_dereference(chain->filter_chain, chain);
768 	RCU_INIT_POINTER(chain->filter_chain, NULL);
769 	tcf_chain0_head_change(chain, NULL);
770 	chain->flushing = true;
771 	mutex_unlock(&chain->filter_chain_lock);
772 
773 	while (tp) {
774 		tp_next = rcu_dereference_protected(tp->next, 1);
775 		tcf_proto_put(tp, rtnl_held, NULL);
776 		tp = tp_next;
777 	}
778 }
779 
780 static int tcf_block_setup(struct tcf_block *block,
781 			   struct flow_block_offload *bo);
782 
783 static void tcf_block_offload_init(struct flow_block_offload *bo,
784 				   struct net_device *dev, struct Qdisc *sch,
785 				   enum flow_block_command command,
786 				   enum flow_block_binder_type binder_type,
787 				   struct flow_block *flow_block,
788 				   bool shared, struct netlink_ext_ack *extack)
789 {
790 	bo->net = dev_net(dev);
791 	bo->command = command;
792 	bo->binder_type = binder_type;
793 	bo->block = flow_block;
794 	bo->block_shared = shared;
795 	bo->extack = extack;
796 	bo->sch = sch;
797 	bo->cb_list_head = &flow_block->cb_list;
798 	INIT_LIST_HEAD(&bo->cb_list);
799 }
800 
801 static void tcf_block_unbind(struct tcf_block *block,
802 			     struct flow_block_offload *bo);
803 
804 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
805 {
806 	struct tcf_block *block = block_cb->indr.data;
807 	struct net_device *dev = block_cb->indr.dev;
808 	struct Qdisc *sch = block_cb->indr.sch;
809 	struct netlink_ext_ack extack = {};
810 	struct flow_block_offload bo = {};
811 
812 	tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
813 			       block_cb->indr.binder_type,
814 			       &block->flow_block, tcf_block_shared(block),
815 			       &extack);
816 	rtnl_lock();
817 	down_write(&block->cb_lock);
818 	list_del(&block_cb->driver_list);
819 	list_move(&block_cb->list, &bo.cb_list);
820 	tcf_block_unbind(block, &bo);
821 	up_write(&block->cb_lock);
822 	rtnl_unlock();
823 }
824 
825 static bool tcf_block_offload_in_use(struct tcf_block *block)
826 {
827 	return atomic_read(&block->offloadcnt);
828 }
829 
830 static int tcf_block_offload_cmd(struct tcf_block *block,
831 				 struct net_device *dev, struct Qdisc *sch,
832 				 struct tcf_block_ext_info *ei,
833 				 enum flow_block_command command,
834 				 struct netlink_ext_ack *extack)
835 {
836 	struct flow_block_offload bo = {};
837 
838 	tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
839 			       &block->flow_block, tcf_block_shared(block),
840 			       extack);
841 
842 	if (dev->netdev_ops->ndo_setup_tc) {
843 		int err;
844 
845 		err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
846 		if (err < 0) {
847 			if (err != -EOPNOTSUPP)
848 				NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
849 			return err;
850 		}
851 
852 		return tcf_block_setup(block, &bo);
853 	}
854 
855 	flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
856 				    tc_block_indr_cleanup);
857 	tcf_block_setup(block, &bo);
858 
859 	return -EOPNOTSUPP;
860 }
861 
862 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
863 				  struct tcf_block_ext_info *ei,
864 				  struct netlink_ext_ack *extack)
865 {
866 	struct net_device *dev = q->dev_queue->dev;
867 	int err;
868 
869 	down_write(&block->cb_lock);
870 
871 	/* If tc offload feature is disabled and the block we try to bind
872 	 * to already has some offloaded filters, forbid to bind.
873 	 */
874 	if (dev->netdev_ops->ndo_setup_tc &&
875 	    !tc_can_offload(dev) &&
876 	    tcf_block_offload_in_use(block)) {
877 		NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
878 		err = -EOPNOTSUPP;
879 		goto err_unlock;
880 	}
881 
882 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
883 	if (err == -EOPNOTSUPP)
884 		goto no_offload_dev_inc;
885 	if (err)
886 		goto err_unlock;
887 
888 	up_write(&block->cb_lock);
889 	return 0;
890 
891 no_offload_dev_inc:
892 	if (tcf_block_offload_in_use(block))
893 		goto err_unlock;
894 
895 	err = 0;
896 	block->nooffloaddevcnt++;
897 err_unlock:
898 	up_write(&block->cb_lock);
899 	return err;
900 }
901 
902 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
903 				     struct tcf_block_ext_info *ei)
904 {
905 	struct net_device *dev = q->dev_queue->dev;
906 	int err;
907 
908 	down_write(&block->cb_lock);
909 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
910 	if (err == -EOPNOTSUPP)
911 		goto no_offload_dev_dec;
912 	up_write(&block->cb_lock);
913 	return;
914 
915 no_offload_dev_dec:
916 	WARN_ON(block->nooffloaddevcnt-- == 0);
917 	up_write(&block->cb_lock);
918 }
919 
920 static int
921 tcf_chain0_head_change_cb_add(struct tcf_block *block,
922 			      struct tcf_block_ext_info *ei,
923 			      struct netlink_ext_ack *extack)
924 {
925 	struct tcf_filter_chain_list_item *item;
926 	struct tcf_chain *chain0;
927 
928 	item = kmalloc(sizeof(*item), GFP_KERNEL);
929 	if (!item) {
930 		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
931 		return -ENOMEM;
932 	}
933 	item->chain_head_change = ei->chain_head_change;
934 	item->chain_head_change_priv = ei->chain_head_change_priv;
935 
936 	mutex_lock(&block->lock);
937 	chain0 = block->chain0.chain;
938 	if (chain0)
939 		tcf_chain_hold(chain0);
940 	else
941 		list_add(&item->list, &block->chain0.filter_chain_list);
942 	mutex_unlock(&block->lock);
943 
944 	if (chain0) {
945 		struct tcf_proto *tp_head;
946 
947 		mutex_lock(&chain0->filter_chain_lock);
948 
949 		tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
950 		if (tp_head)
951 			tcf_chain_head_change_item(item, tp_head);
952 
953 		mutex_lock(&block->lock);
954 		list_add(&item->list, &block->chain0.filter_chain_list);
955 		mutex_unlock(&block->lock);
956 
957 		mutex_unlock(&chain0->filter_chain_lock);
958 		tcf_chain_put(chain0);
959 	}
960 
961 	return 0;
962 }
963 
964 static void
965 tcf_chain0_head_change_cb_del(struct tcf_block *block,
966 			      struct tcf_block_ext_info *ei)
967 {
968 	struct tcf_filter_chain_list_item *item;
969 
970 	mutex_lock(&block->lock);
971 	list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
972 		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
973 		    (item->chain_head_change == ei->chain_head_change &&
974 		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
975 			if (block->chain0.chain)
976 				tcf_chain_head_change_item(item, NULL);
977 			list_del(&item->list);
978 			mutex_unlock(&block->lock);
979 
980 			kfree(item);
981 			return;
982 		}
983 	}
984 	mutex_unlock(&block->lock);
985 	WARN_ON(1);
986 }
987 
988 struct tcf_net {
989 	spinlock_t idr_lock; /* Protects idr */
990 	struct idr idr;
991 };
992 
993 static unsigned int tcf_net_id;
994 
995 static int tcf_block_insert(struct tcf_block *block, struct net *net,
996 			    struct netlink_ext_ack *extack)
997 {
998 	struct tcf_net *tn = net_generic(net, tcf_net_id);
999 	int err;
1000 
1001 	idr_preload(GFP_KERNEL);
1002 	spin_lock(&tn->idr_lock);
1003 	err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
1004 			    GFP_NOWAIT);
1005 	spin_unlock(&tn->idr_lock);
1006 	idr_preload_end();
1007 
1008 	return err;
1009 }
1010 
1011 static void tcf_block_remove(struct tcf_block *block, struct net *net)
1012 {
1013 	struct tcf_net *tn = net_generic(net, tcf_net_id);
1014 
1015 	spin_lock(&tn->idr_lock);
1016 	idr_remove(&tn->idr, block->index);
1017 	spin_unlock(&tn->idr_lock);
1018 }
1019 
1020 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
1021 					  u32 block_index,
1022 					  struct netlink_ext_ack *extack)
1023 {
1024 	struct tcf_block *block;
1025 
1026 	block = kzalloc(sizeof(*block), GFP_KERNEL);
1027 	if (!block) {
1028 		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
1029 		return ERR_PTR(-ENOMEM);
1030 	}
1031 	mutex_init(&block->lock);
1032 	mutex_init(&block->proto_destroy_lock);
1033 	init_rwsem(&block->cb_lock);
1034 	flow_block_init(&block->flow_block);
1035 	INIT_LIST_HEAD(&block->chain_list);
1036 	INIT_LIST_HEAD(&block->owner_list);
1037 	INIT_LIST_HEAD(&block->chain0.filter_chain_list);
1038 
1039 	refcount_set(&block->refcnt, 1);
1040 	block->net = net;
1041 	block->index = block_index;
1042 	xa_init(&block->ports);
1043 
1044 	/* Don't store q pointer for blocks which are shared */
1045 	if (!tcf_block_shared(block))
1046 		block->q = q;
1047 	return block;
1048 }
1049 
1050 struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1051 {
1052 	struct tcf_net *tn = net_generic(net, tcf_net_id);
1053 
1054 	return idr_find(&tn->idr, block_index);
1055 }
1056 EXPORT_SYMBOL(tcf_block_lookup);
1057 
1058 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1059 {
1060 	struct tcf_block *block;
1061 
1062 	rcu_read_lock();
1063 	block = tcf_block_lookup(net, block_index);
1064 	if (block && !refcount_inc_not_zero(&block->refcnt))
1065 		block = NULL;
1066 	rcu_read_unlock();
1067 
1068 	return block;
1069 }
1070 
1071 static struct tcf_chain *
1072 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1073 {
1074 	mutex_lock(&block->lock);
1075 	if (chain)
1076 		chain = list_is_last(&chain->list, &block->chain_list) ?
1077 			NULL : list_next_entry(chain, list);
1078 	else
1079 		chain = list_first_entry_or_null(&block->chain_list,
1080 						 struct tcf_chain, list);
1081 
1082 	/* skip all action-only chains */
1083 	while (chain && tcf_chain_held_by_acts_only(chain))
1084 		chain = list_is_last(&chain->list, &block->chain_list) ?
1085 			NULL : list_next_entry(chain, list);
1086 
1087 	if (chain)
1088 		tcf_chain_hold(chain);
1089 	mutex_unlock(&block->lock);
1090 
1091 	return chain;
1092 }
1093 
1094 /* Function to be used by all clients that want to iterate over all chains on
1095  * block. It properly obtains block->lock and takes reference to chain before
1096  * returning it. Users of this function must be tolerant to concurrent chain
1097  * insertion/deletion or ensure that no concurrent chain modification is
1098  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1099  * consistent dump because rtnl lock is released each time skb is filled with
1100  * data and sent to user-space.
1101  */
1102 
1103 struct tcf_chain *
1104 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1105 {
1106 	struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1107 
1108 	if (chain)
1109 		tcf_chain_put(chain);
1110 
1111 	return chain_next;
1112 }
1113 EXPORT_SYMBOL(tcf_get_next_chain);
1114 
1115 static struct tcf_proto *
1116 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1117 {
1118 	u32 prio = 0;
1119 
1120 	ASSERT_RTNL();
1121 	mutex_lock(&chain->filter_chain_lock);
1122 
1123 	if (!tp) {
1124 		tp = tcf_chain_dereference(chain->filter_chain, chain);
1125 	} else if (tcf_proto_is_deleting(tp)) {
1126 		/* 'deleting' flag is set and chain->filter_chain_lock was
1127 		 * unlocked, which means next pointer could be invalid. Restart
1128 		 * search.
1129 		 */
1130 		prio = tp->prio + 1;
1131 		tp = tcf_chain_dereference(chain->filter_chain, chain);
1132 
1133 		for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1134 			if (!tp->deleting && tp->prio >= prio)
1135 				break;
1136 	} else {
1137 		tp = tcf_chain_dereference(tp->next, chain);
1138 	}
1139 
1140 	if (tp)
1141 		tcf_proto_get(tp);
1142 
1143 	mutex_unlock(&chain->filter_chain_lock);
1144 
1145 	return tp;
1146 }
1147 
1148 /* Function to be used by all clients that want to iterate over all tp's on
1149  * chain. Users of this function must be tolerant to concurrent tp
1150  * insertion/deletion or ensure that no concurrent chain modification is
1151  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1152  * consistent dump because rtnl lock is released each time skb is filled with
1153  * data and sent to user-space.
1154  */
1155 
1156 struct tcf_proto *
1157 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1158 {
1159 	struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1160 
1161 	if (tp)
1162 		tcf_proto_put(tp, true, NULL);
1163 
1164 	return tp_next;
1165 }
1166 EXPORT_SYMBOL(tcf_get_next_proto);
1167 
1168 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1169 {
1170 	struct tcf_chain *chain;
1171 
1172 	/* Last reference to block. At this point chains cannot be added or
1173 	 * removed concurrently.
1174 	 */
1175 	for (chain = tcf_get_next_chain(block, NULL);
1176 	     chain;
1177 	     chain = tcf_get_next_chain(block, chain)) {
1178 		tcf_chain_put_explicitly_created(chain);
1179 		tcf_chain_flush(chain, rtnl_held);
1180 	}
1181 }
1182 
1183 /* Lookup Qdisc and increments its reference counter.
1184  * Set parent, if necessary.
1185  */
1186 
1187 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1188 			    u32 *parent, int ifindex, bool rtnl_held,
1189 			    struct netlink_ext_ack *extack)
1190 {
1191 	const struct Qdisc_class_ops *cops;
1192 	struct net_device *dev;
1193 	int err = 0;
1194 
1195 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1196 		return 0;
1197 
1198 	rcu_read_lock();
1199 
1200 	/* Find link */
1201 	dev = dev_get_by_index_rcu(net, ifindex);
1202 	if (!dev) {
1203 		rcu_read_unlock();
1204 		return -ENODEV;
1205 	}
1206 
1207 	/* Find qdisc */
1208 	if (!*parent) {
1209 		*q = rcu_dereference(dev->qdisc);
1210 		*parent = (*q)->handle;
1211 	} else {
1212 		*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1213 		if (!*q) {
1214 			NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1215 			err = -EINVAL;
1216 			goto errout_rcu;
1217 		}
1218 	}
1219 
1220 	*q = qdisc_refcount_inc_nz(*q);
1221 	if (!*q) {
1222 		NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1223 		err = -EINVAL;
1224 		goto errout_rcu;
1225 	}
1226 
1227 	/* Is it classful? */
1228 	cops = (*q)->ops->cl_ops;
1229 	if (!cops) {
1230 		NL_SET_ERR_MSG(extack, "Qdisc not classful");
1231 		err = -EINVAL;
1232 		goto errout_qdisc;
1233 	}
1234 
1235 	if (!cops->tcf_block) {
1236 		NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1237 		err = -EOPNOTSUPP;
1238 		goto errout_qdisc;
1239 	}
1240 
1241 errout_rcu:
1242 	/* At this point we know that qdisc is not noop_qdisc,
1243 	 * which means that qdisc holds a reference to net_device
1244 	 * and we hold a reference to qdisc, so it is safe to release
1245 	 * rcu read lock.
1246 	 */
1247 	rcu_read_unlock();
1248 	return err;
1249 
1250 errout_qdisc:
1251 	rcu_read_unlock();
1252 
1253 	if (rtnl_held)
1254 		qdisc_put(*q);
1255 	else
1256 		qdisc_put_unlocked(*q);
1257 	*q = NULL;
1258 
1259 	return err;
1260 }
1261 
1262 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1263 			       int ifindex, struct netlink_ext_ack *extack)
1264 {
1265 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1266 		return 0;
1267 
1268 	/* Do we search for filter, attached to class? */
1269 	if (TC_H_MIN(parent)) {
1270 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1271 
1272 		*cl = cops->find(q, parent);
1273 		if (*cl == 0) {
1274 			NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1275 			return -ENOENT;
1276 		}
1277 	}
1278 
1279 	return 0;
1280 }
1281 
1282 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1283 					  unsigned long cl, int ifindex,
1284 					  u32 block_index,
1285 					  struct netlink_ext_ack *extack)
1286 {
1287 	struct tcf_block *block;
1288 
1289 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1290 		block = tcf_block_refcnt_get(net, block_index);
1291 		if (!block) {
1292 			NL_SET_ERR_MSG(extack, "Block of given index was not found");
1293 			return ERR_PTR(-EINVAL);
1294 		}
1295 	} else {
1296 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1297 
1298 		block = cops->tcf_block(q, cl, extack);
1299 		if (!block)
1300 			return ERR_PTR(-EINVAL);
1301 
1302 		if (tcf_block_shared(block)) {
1303 			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1304 			return ERR_PTR(-EOPNOTSUPP);
1305 		}
1306 
1307 		/* Always take reference to block in order to support execution
1308 		 * of rules update path of cls API without rtnl lock. Caller
1309 		 * must release block when it is finished using it. 'if' block
1310 		 * of this conditional obtain reference to block by calling
1311 		 * tcf_block_refcnt_get().
1312 		 */
1313 		refcount_inc(&block->refcnt);
1314 	}
1315 
1316 	return block;
1317 }
1318 
1319 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1320 			    struct tcf_block_ext_info *ei, bool rtnl_held)
1321 {
1322 	if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1323 		/* Flushing/putting all chains will cause the block to be
1324 		 * deallocated when last chain is freed. However, if chain_list
1325 		 * is empty, block has to be manually deallocated. After block
1326 		 * reference counter reached 0, it is no longer possible to
1327 		 * increment it or add new chains to block.
1328 		 */
1329 		bool free_block = list_empty(&block->chain_list);
1330 
1331 		mutex_unlock(&block->lock);
1332 		if (tcf_block_shared(block))
1333 			tcf_block_remove(block, block->net);
1334 
1335 		if (q)
1336 			tcf_block_offload_unbind(block, q, ei);
1337 
1338 		if (free_block)
1339 			tcf_block_destroy(block);
1340 		else
1341 			tcf_block_flush_all_chains(block, rtnl_held);
1342 	} else if (q) {
1343 		tcf_block_offload_unbind(block, q, ei);
1344 	}
1345 }
1346 
1347 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1348 {
1349 	__tcf_block_put(block, NULL, NULL, rtnl_held);
1350 }
1351 
1352 /* Find tcf block.
1353  * Set q, parent, cl when appropriate.
1354  */
1355 
1356 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1357 					u32 *parent, unsigned long *cl,
1358 					int ifindex, u32 block_index,
1359 					struct netlink_ext_ack *extack)
1360 {
1361 	struct tcf_block *block;
1362 	int err = 0;
1363 
1364 	ASSERT_RTNL();
1365 
1366 	err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1367 	if (err)
1368 		goto errout;
1369 
1370 	err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1371 	if (err)
1372 		goto errout_qdisc;
1373 
1374 	block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1375 	if (IS_ERR(block)) {
1376 		err = PTR_ERR(block);
1377 		goto errout_qdisc;
1378 	}
1379 
1380 	return block;
1381 
1382 errout_qdisc:
1383 	if (*q)
1384 		qdisc_put(*q);
1385 errout:
1386 	*q = NULL;
1387 	return ERR_PTR(err);
1388 }
1389 
1390 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1391 			      bool rtnl_held)
1392 {
1393 	if (!IS_ERR_OR_NULL(block))
1394 		tcf_block_refcnt_put(block, rtnl_held);
1395 
1396 	if (q) {
1397 		if (rtnl_held)
1398 			qdisc_put(q);
1399 		else
1400 			qdisc_put_unlocked(q);
1401 	}
1402 }
1403 
1404 struct tcf_block_owner_item {
1405 	struct list_head list;
1406 	struct Qdisc *q;
1407 	enum flow_block_binder_type binder_type;
1408 };
1409 
1410 static void
1411 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1412 			       struct Qdisc *q,
1413 			       enum flow_block_binder_type binder_type)
1414 {
1415 	if (block->keep_dst &&
1416 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1417 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1418 		netif_keep_dst(qdisc_dev(q));
1419 }
1420 
1421 void tcf_block_netif_keep_dst(struct tcf_block *block)
1422 {
1423 	struct tcf_block_owner_item *item;
1424 
1425 	block->keep_dst = true;
1426 	list_for_each_entry(item, &block->owner_list, list)
1427 		tcf_block_owner_netif_keep_dst(block, item->q,
1428 					       item->binder_type);
1429 }
1430 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1431 
1432 static int tcf_block_owner_add(struct tcf_block *block,
1433 			       struct Qdisc *q,
1434 			       enum flow_block_binder_type binder_type)
1435 {
1436 	struct tcf_block_owner_item *item;
1437 
1438 	item = kmalloc(sizeof(*item), GFP_KERNEL);
1439 	if (!item)
1440 		return -ENOMEM;
1441 	item->q = q;
1442 	item->binder_type = binder_type;
1443 	list_add(&item->list, &block->owner_list);
1444 	return 0;
1445 }
1446 
1447 static void tcf_block_owner_del(struct tcf_block *block,
1448 				struct Qdisc *q,
1449 				enum flow_block_binder_type binder_type)
1450 {
1451 	struct tcf_block_owner_item *item;
1452 
1453 	list_for_each_entry(item, &block->owner_list, list) {
1454 		if (item->q == q && item->binder_type == binder_type) {
1455 			list_del(&item->list);
1456 			kfree(item);
1457 			return;
1458 		}
1459 	}
1460 	WARN_ON(1);
1461 }
1462 
1463 static bool tcf_block_tracks_dev(struct tcf_block *block,
1464 				 struct tcf_block_ext_info *ei)
1465 {
1466 	return tcf_block_shared(block) &&
1467 	       (ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS ||
1468 		ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS);
1469 }
1470 
1471 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1472 		      struct tcf_block_ext_info *ei,
1473 		      struct netlink_ext_ack *extack)
1474 {
1475 	struct net_device *dev = qdisc_dev(q);
1476 	struct net *net = qdisc_net(q);
1477 	struct tcf_block *block = NULL;
1478 	int err;
1479 
1480 	if (ei->block_index)
1481 		/* block_index not 0 means the shared block is requested */
1482 		block = tcf_block_refcnt_get(net, ei->block_index);
1483 
1484 	if (!block) {
1485 		block = tcf_block_create(net, q, ei->block_index, extack);
1486 		if (IS_ERR(block))
1487 			return PTR_ERR(block);
1488 		if (tcf_block_shared(block)) {
1489 			err = tcf_block_insert(block, net, extack);
1490 			if (err)
1491 				goto err_block_insert;
1492 		}
1493 	}
1494 
1495 	err = tcf_block_owner_add(block, q, ei->binder_type);
1496 	if (err)
1497 		goto err_block_owner_add;
1498 
1499 	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1500 
1501 	err = tcf_chain0_head_change_cb_add(block, ei, extack);
1502 	if (err)
1503 		goto err_chain0_head_change_cb_add;
1504 
1505 	err = tcf_block_offload_bind(block, q, ei, extack);
1506 	if (err)
1507 		goto err_block_offload_bind;
1508 
1509 	if (tcf_block_tracks_dev(block, ei)) {
1510 		err = xa_insert(&block->ports, dev->ifindex, dev, GFP_KERNEL);
1511 		if (err) {
1512 			NL_SET_ERR_MSG(extack, "block dev insert failed");
1513 			goto err_dev_insert;
1514 		}
1515 	}
1516 
1517 	*p_block = block;
1518 	return 0;
1519 
1520 err_dev_insert:
1521 err_block_offload_bind:
1522 	tcf_chain0_head_change_cb_del(block, ei);
1523 err_chain0_head_change_cb_add:
1524 	tcf_block_owner_del(block, q, ei->binder_type);
1525 err_block_owner_add:
1526 err_block_insert:
1527 	tcf_block_refcnt_put(block, true);
1528 	return err;
1529 }
1530 EXPORT_SYMBOL(tcf_block_get_ext);
1531 
1532 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1533 {
1534 	struct tcf_proto __rcu **p_filter_chain = priv;
1535 
1536 	rcu_assign_pointer(*p_filter_chain, tp_head);
1537 }
1538 
1539 int tcf_block_get(struct tcf_block **p_block,
1540 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1541 		  struct netlink_ext_ack *extack)
1542 {
1543 	struct tcf_block_ext_info ei = {
1544 		.chain_head_change = tcf_chain_head_change_dflt,
1545 		.chain_head_change_priv = p_filter_chain,
1546 	};
1547 
1548 	WARN_ON(!p_filter_chain);
1549 	return tcf_block_get_ext(p_block, q, &ei, extack);
1550 }
1551 EXPORT_SYMBOL(tcf_block_get);
1552 
1553 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1554  * actions should be all removed after flushing.
1555  */
1556 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1557 		       struct tcf_block_ext_info *ei)
1558 {
1559 	struct net_device *dev = qdisc_dev(q);
1560 
1561 	if (!block)
1562 		return;
1563 	if (tcf_block_tracks_dev(block, ei))
1564 		xa_erase(&block->ports, dev->ifindex);
1565 	tcf_chain0_head_change_cb_del(block, ei);
1566 	tcf_block_owner_del(block, q, ei->binder_type);
1567 
1568 	__tcf_block_put(block, q, ei, true);
1569 }
1570 EXPORT_SYMBOL(tcf_block_put_ext);
1571 
1572 void tcf_block_put(struct tcf_block *block)
1573 {
1574 	struct tcf_block_ext_info ei = {0, };
1575 
1576 	if (!block)
1577 		return;
1578 	tcf_block_put_ext(block, block->q, &ei);
1579 }
1580 
1581 EXPORT_SYMBOL(tcf_block_put);
1582 
1583 static int
1584 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1585 			    void *cb_priv, bool add, bool offload_in_use,
1586 			    struct netlink_ext_ack *extack)
1587 {
1588 	struct tcf_chain *chain, *chain_prev;
1589 	struct tcf_proto *tp, *tp_prev;
1590 	int err;
1591 
1592 	lockdep_assert_held(&block->cb_lock);
1593 
1594 	for (chain = __tcf_get_next_chain(block, NULL);
1595 	     chain;
1596 	     chain_prev = chain,
1597 		     chain = __tcf_get_next_chain(block, chain),
1598 		     tcf_chain_put(chain_prev)) {
1599 		if (chain->tmplt_ops && add)
1600 			chain->tmplt_ops->tmplt_reoffload(chain, true, cb,
1601 							  cb_priv);
1602 		for (tp = __tcf_get_next_proto(chain, NULL); tp;
1603 		     tp_prev = tp,
1604 			     tp = __tcf_get_next_proto(chain, tp),
1605 			     tcf_proto_put(tp_prev, true, NULL)) {
1606 			if (tp->ops->reoffload) {
1607 				err = tp->ops->reoffload(tp, add, cb, cb_priv,
1608 							 extack);
1609 				if (err && add)
1610 					goto err_playback_remove;
1611 			} else if (add && offload_in_use) {
1612 				err = -EOPNOTSUPP;
1613 				NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1614 				goto err_playback_remove;
1615 			}
1616 		}
1617 		if (chain->tmplt_ops && !add)
1618 			chain->tmplt_ops->tmplt_reoffload(chain, false, cb,
1619 							  cb_priv);
1620 	}
1621 
1622 	return 0;
1623 
1624 err_playback_remove:
1625 	tcf_proto_put(tp, true, NULL);
1626 	tcf_chain_put(chain);
1627 	tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1628 				    extack);
1629 	return err;
1630 }
1631 
1632 static int tcf_block_bind(struct tcf_block *block,
1633 			  struct flow_block_offload *bo)
1634 {
1635 	struct flow_block_cb *block_cb, *next;
1636 	int err, i = 0;
1637 
1638 	lockdep_assert_held(&block->cb_lock);
1639 
1640 	list_for_each_entry(block_cb, &bo->cb_list, list) {
1641 		err = tcf_block_playback_offloads(block, block_cb->cb,
1642 						  block_cb->cb_priv, true,
1643 						  tcf_block_offload_in_use(block),
1644 						  bo->extack);
1645 		if (err)
1646 			goto err_unroll;
1647 		if (!bo->unlocked_driver_cb)
1648 			block->lockeddevcnt++;
1649 
1650 		i++;
1651 	}
1652 	list_splice(&bo->cb_list, &block->flow_block.cb_list);
1653 
1654 	return 0;
1655 
1656 err_unroll:
1657 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1658 		list_del(&block_cb->driver_list);
1659 		if (i-- > 0) {
1660 			list_del(&block_cb->list);
1661 			tcf_block_playback_offloads(block, block_cb->cb,
1662 						    block_cb->cb_priv, false,
1663 						    tcf_block_offload_in_use(block),
1664 						    NULL);
1665 			if (!bo->unlocked_driver_cb)
1666 				block->lockeddevcnt--;
1667 		}
1668 		flow_block_cb_free(block_cb);
1669 	}
1670 
1671 	return err;
1672 }
1673 
1674 static void tcf_block_unbind(struct tcf_block *block,
1675 			     struct flow_block_offload *bo)
1676 {
1677 	struct flow_block_cb *block_cb, *next;
1678 
1679 	lockdep_assert_held(&block->cb_lock);
1680 
1681 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1682 		tcf_block_playback_offloads(block, block_cb->cb,
1683 					    block_cb->cb_priv, false,
1684 					    tcf_block_offload_in_use(block),
1685 					    NULL);
1686 		list_del(&block_cb->list);
1687 		flow_block_cb_free(block_cb);
1688 		if (!bo->unlocked_driver_cb)
1689 			block->lockeddevcnt--;
1690 	}
1691 }
1692 
1693 static int tcf_block_setup(struct tcf_block *block,
1694 			   struct flow_block_offload *bo)
1695 {
1696 	int err;
1697 
1698 	switch (bo->command) {
1699 	case FLOW_BLOCK_BIND:
1700 		err = tcf_block_bind(block, bo);
1701 		break;
1702 	case FLOW_BLOCK_UNBIND:
1703 		err = 0;
1704 		tcf_block_unbind(block, bo);
1705 		break;
1706 	default:
1707 		WARN_ON_ONCE(1);
1708 		err = -EOPNOTSUPP;
1709 	}
1710 
1711 	return err;
1712 }
1713 
1714 /* Main classifier routine: scans classifier chain attached
1715  * to this qdisc, (optionally) tests for protocol and asks
1716  * specific classifiers.
1717  */
1718 static inline int __tcf_classify(struct sk_buff *skb,
1719 				 const struct tcf_proto *tp,
1720 				 const struct tcf_proto *orig_tp,
1721 				 struct tcf_result *res,
1722 				 bool compat_mode,
1723 				 struct tcf_exts_miss_cookie_node *n,
1724 				 int act_index,
1725 				 u32 *last_executed_chain)
1726 {
1727 #ifdef CONFIG_NET_CLS_ACT
1728 	const int max_reclassify_loop = 16;
1729 	const struct tcf_proto *first_tp;
1730 	int limit = 0;
1731 
1732 reclassify:
1733 #endif
1734 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1735 		__be16 protocol = skb_protocol(skb, false);
1736 		int err = 0;
1737 
1738 		if (n) {
1739 			struct tcf_exts *exts;
1740 
1741 			if (n->tp_prio != tp->prio)
1742 				continue;
1743 
1744 			/* We re-lookup the tp and chain based on index instead
1745 			 * of having hard refs and locks to them, so do a sanity
1746 			 * check if any of tp,chain,exts was replaced by the
1747 			 * time we got here with a cookie from hardware.
1748 			 */
1749 			if (unlikely(n->tp != tp || n->tp->chain != n->chain ||
1750 				     !tp->ops->get_exts)) {
1751 				tcf_set_drop_reason(skb,
1752 						    SKB_DROP_REASON_TC_COOKIE_ERROR);
1753 				return TC_ACT_SHOT;
1754 			}
1755 
1756 			exts = tp->ops->get_exts(tp, n->handle);
1757 			if (unlikely(!exts || n->exts != exts)) {
1758 				tcf_set_drop_reason(skb,
1759 						    SKB_DROP_REASON_TC_COOKIE_ERROR);
1760 				return TC_ACT_SHOT;
1761 			}
1762 
1763 			n = NULL;
1764 			err = tcf_exts_exec_ex(skb, exts, act_index, res);
1765 		} else {
1766 			if (tp->protocol != protocol &&
1767 			    tp->protocol != htons(ETH_P_ALL))
1768 				continue;
1769 
1770 			err = tc_classify(skb, tp, res);
1771 		}
1772 #ifdef CONFIG_NET_CLS_ACT
1773 		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1774 			first_tp = orig_tp;
1775 			*last_executed_chain = first_tp->chain->index;
1776 			goto reset;
1777 		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1778 			first_tp = res->goto_tp;
1779 			*last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1780 			goto reset;
1781 		}
1782 #endif
1783 		if (err >= 0)
1784 			return err;
1785 	}
1786 
1787 	if (unlikely(n)) {
1788 		tcf_set_drop_reason(skb,
1789 				    SKB_DROP_REASON_TC_COOKIE_ERROR);
1790 		return TC_ACT_SHOT;
1791 	}
1792 
1793 	return TC_ACT_UNSPEC; /* signal: continue lookup */
1794 #ifdef CONFIG_NET_CLS_ACT
1795 reset:
1796 	if (unlikely(limit++ >= max_reclassify_loop)) {
1797 		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1798 				       tp->chain->block->index,
1799 				       tp->prio & 0xffff,
1800 				       ntohs(tp->protocol));
1801 		tcf_set_drop_reason(skb,
1802 				    SKB_DROP_REASON_TC_RECLASSIFY_LOOP);
1803 		return TC_ACT_SHOT;
1804 	}
1805 
1806 	tp = first_tp;
1807 	goto reclassify;
1808 #endif
1809 }
1810 
1811 int tcf_classify(struct sk_buff *skb,
1812 		 const struct tcf_block *block,
1813 		 const struct tcf_proto *tp,
1814 		 struct tcf_result *res, bool compat_mode)
1815 {
1816 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1817 	u32 last_executed_chain = 0;
1818 
1819 	return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0,
1820 			      &last_executed_chain);
1821 #else
1822 	u32 last_executed_chain = tp ? tp->chain->index : 0;
1823 	struct tcf_exts_miss_cookie_node *n = NULL;
1824 	const struct tcf_proto *orig_tp = tp;
1825 	struct tc_skb_ext *ext;
1826 	int act_index = 0;
1827 	int ret;
1828 
1829 	if (block) {
1830 		ext = skb_ext_find(skb, TC_SKB_EXT);
1831 
1832 		if (ext && (ext->chain || ext->act_miss)) {
1833 			struct tcf_chain *fchain;
1834 			u32 chain;
1835 
1836 			if (ext->act_miss) {
1837 				n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie,
1838 								&act_index);
1839 				if (!n) {
1840 					tcf_set_drop_reason(skb,
1841 							    SKB_DROP_REASON_TC_COOKIE_ERROR);
1842 					return TC_ACT_SHOT;
1843 				}
1844 
1845 				chain = n->chain_index;
1846 			} else {
1847 				chain = ext->chain;
1848 			}
1849 
1850 			fchain = tcf_chain_lookup_rcu(block, chain);
1851 			if (!fchain) {
1852 				tcf_set_drop_reason(skb,
1853 						    SKB_DROP_REASON_TC_CHAIN_NOTFOUND);
1854 
1855 				return TC_ACT_SHOT;
1856 			}
1857 
1858 			/* Consume, so cloned/redirect skbs won't inherit ext */
1859 			skb_ext_del(skb, TC_SKB_EXT);
1860 
1861 			tp = rcu_dereference_bh(fchain->filter_chain);
1862 			last_executed_chain = fchain->index;
1863 		}
1864 	}
1865 
1866 	ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index,
1867 			     &last_executed_chain);
1868 
1869 	if (tc_skb_ext_tc_enabled()) {
1870 		/* If we missed on some chain */
1871 		if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1872 			struct tc_skb_cb *cb = tc_skb_cb(skb);
1873 
1874 			ext = tc_skb_ext_alloc(skb);
1875 			if (WARN_ON_ONCE(!ext)) {
1876 				tcf_set_drop_reason(skb, SKB_DROP_REASON_NOMEM);
1877 				return TC_ACT_SHOT;
1878 			}
1879 			ext->chain = last_executed_chain;
1880 			ext->mru = cb->mru;
1881 			ext->post_ct = cb->post_ct;
1882 			ext->post_ct_snat = cb->post_ct_snat;
1883 			ext->post_ct_dnat = cb->post_ct_dnat;
1884 			ext->zone = cb->zone;
1885 		}
1886 	}
1887 
1888 	return ret;
1889 #endif
1890 }
1891 EXPORT_SYMBOL(tcf_classify);
1892 
1893 struct tcf_chain_info {
1894 	struct tcf_proto __rcu **pprev;
1895 	struct tcf_proto __rcu *next;
1896 };
1897 
1898 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1899 					   struct tcf_chain_info *chain_info)
1900 {
1901 	return tcf_chain_dereference(*chain_info->pprev, chain);
1902 }
1903 
1904 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1905 			       struct tcf_chain_info *chain_info,
1906 			       struct tcf_proto *tp)
1907 {
1908 	if (chain->flushing)
1909 		return -EAGAIN;
1910 
1911 	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1912 	if (*chain_info->pprev == chain->filter_chain)
1913 		tcf_chain0_head_change(chain, tp);
1914 	tcf_proto_get(tp);
1915 	rcu_assign_pointer(*chain_info->pprev, tp);
1916 
1917 	return 0;
1918 }
1919 
1920 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1921 				struct tcf_chain_info *chain_info,
1922 				struct tcf_proto *tp)
1923 {
1924 	struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1925 
1926 	tcf_proto_mark_delete(tp);
1927 	if (tp == chain->filter_chain)
1928 		tcf_chain0_head_change(chain, next);
1929 	RCU_INIT_POINTER(*chain_info->pprev, next);
1930 }
1931 
1932 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1933 					   struct tcf_chain_info *chain_info,
1934 					   u32 protocol, u32 prio,
1935 					   bool prio_allocate);
1936 
1937 /* Try to insert new proto.
1938  * If proto with specified priority already exists, free new proto
1939  * and return existing one.
1940  */
1941 
1942 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1943 						    struct tcf_proto *tp_new,
1944 						    u32 protocol, u32 prio,
1945 						    bool rtnl_held)
1946 {
1947 	struct tcf_chain_info chain_info;
1948 	struct tcf_proto *tp;
1949 	int err = 0;
1950 
1951 	mutex_lock(&chain->filter_chain_lock);
1952 
1953 	if (tcf_proto_exists_destroying(chain, tp_new)) {
1954 		mutex_unlock(&chain->filter_chain_lock);
1955 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1956 		return ERR_PTR(-EAGAIN);
1957 	}
1958 
1959 	tp = tcf_chain_tp_find(chain, &chain_info,
1960 			       protocol, prio, false);
1961 	if (!tp)
1962 		err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1963 	mutex_unlock(&chain->filter_chain_lock);
1964 
1965 	if (tp) {
1966 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1967 		tp_new = tp;
1968 	} else if (err) {
1969 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1970 		tp_new = ERR_PTR(err);
1971 	}
1972 
1973 	return tp_new;
1974 }
1975 
1976 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1977 				      struct tcf_proto *tp, bool rtnl_held,
1978 				      struct netlink_ext_ack *extack)
1979 {
1980 	struct tcf_chain_info chain_info;
1981 	struct tcf_proto *tp_iter;
1982 	struct tcf_proto **pprev;
1983 	struct tcf_proto *next;
1984 
1985 	mutex_lock(&chain->filter_chain_lock);
1986 
1987 	/* Atomically find and remove tp from chain. */
1988 	for (pprev = &chain->filter_chain;
1989 	     (tp_iter = tcf_chain_dereference(*pprev, chain));
1990 	     pprev = &tp_iter->next) {
1991 		if (tp_iter == tp) {
1992 			chain_info.pprev = pprev;
1993 			chain_info.next = tp_iter->next;
1994 			WARN_ON(tp_iter->deleting);
1995 			break;
1996 		}
1997 	}
1998 	/* Verify that tp still exists and no new filters were inserted
1999 	 * concurrently.
2000 	 * Mark tp for deletion if it is empty.
2001 	 */
2002 	if (!tp_iter || !tcf_proto_check_delete(tp)) {
2003 		mutex_unlock(&chain->filter_chain_lock);
2004 		return;
2005 	}
2006 
2007 	tcf_proto_signal_destroying(chain, tp);
2008 	next = tcf_chain_dereference(chain_info.next, chain);
2009 	if (tp == chain->filter_chain)
2010 		tcf_chain0_head_change(chain, next);
2011 	RCU_INIT_POINTER(*chain_info.pprev, next);
2012 	mutex_unlock(&chain->filter_chain_lock);
2013 
2014 	tcf_proto_put(tp, rtnl_held, extack);
2015 }
2016 
2017 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
2018 					   struct tcf_chain_info *chain_info,
2019 					   u32 protocol, u32 prio,
2020 					   bool prio_allocate)
2021 {
2022 	struct tcf_proto **pprev;
2023 	struct tcf_proto *tp;
2024 
2025 	/* Check the chain for existence of proto-tcf with this priority */
2026 	for (pprev = &chain->filter_chain;
2027 	     (tp = tcf_chain_dereference(*pprev, chain));
2028 	     pprev = &tp->next) {
2029 		if (tp->prio >= prio) {
2030 			if (tp->prio == prio) {
2031 				if (prio_allocate ||
2032 				    (tp->protocol != protocol && protocol))
2033 					return ERR_PTR(-EINVAL);
2034 			} else {
2035 				tp = NULL;
2036 			}
2037 			break;
2038 		}
2039 	}
2040 	chain_info->pprev = pprev;
2041 	if (tp) {
2042 		chain_info->next = tp->next;
2043 		tcf_proto_get(tp);
2044 	} else {
2045 		chain_info->next = NULL;
2046 	}
2047 	return tp;
2048 }
2049 
2050 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
2051 			 struct tcf_proto *tp, struct tcf_block *block,
2052 			 struct Qdisc *q, u32 parent, void *fh,
2053 			 u32 portid, u32 seq, u16 flags, int event,
2054 			 bool terse_dump, bool rtnl_held,
2055 			 struct netlink_ext_ack *extack)
2056 {
2057 	struct tcmsg *tcm;
2058 	struct nlmsghdr  *nlh;
2059 	unsigned char *b = skb_tail_pointer(skb);
2060 
2061 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2062 	if (!nlh)
2063 		goto out_nlmsg_trim;
2064 	tcm = nlmsg_data(nlh);
2065 	tcm->tcm_family = AF_UNSPEC;
2066 	tcm->tcm__pad1 = 0;
2067 	tcm->tcm__pad2 = 0;
2068 	if (q) {
2069 		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
2070 		tcm->tcm_parent = parent;
2071 	} else {
2072 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2073 		tcm->tcm_block_index = block->index;
2074 	}
2075 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
2076 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
2077 		goto nla_put_failure;
2078 	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
2079 		goto nla_put_failure;
2080 	if (!fh) {
2081 		tcm->tcm_handle = 0;
2082 	} else if (terse_dump) {
2083 		if (tp->ops->terse_dump) {
2084 			if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
2085 						rtnl_held) < 0)
2086 				goto nla_put_failure;
2087 		} else {
2088 			goto cls_op_not_supp;
2089 		}
2090 	} else {
2091 		if (tp->ops->dump &&
2092 		    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
2093 			goto nla_put_failure;
2094 	}
2095 
2096 	if (extack && extack->_msg &&
2097 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2098 		goto nla_put_failure;
2099 
2100 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2101 
2102 	return skb->len;
2103 
2104 out_nlmsg_trim:
2105 nla_put_failure:
2106 cls_op_not_supp:
2107 	nlmsg_trim(skb, b);
2108 	return -1;
2109 }
2110 
2111 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
2112 			  struct nlmsghdr *n, struct tcf_proto *tp,
2113 			  struct tcf_block *block, struct Qdisc *q,
2114 			  u32 parent, void *fh, int event, bool unicast,
2115 			  bool rtnl_held, struct netlink_ext_ack *extack)
2116 {
2117 	struct sk_buff *skb;
2118 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2119 	int err = 0;
2120 
2121 	if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2122 		return 0;
2123 
2124 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2125 	if (!skb)
2126 		return -ENOBUFS;
2127 
2128 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2129 			  n->nlmsg_seq, n->nlmsg_flags, event,
2130 			  false, rtnl_held, extack) <= 0) {
2131 		kfree_skb(skb);
2132 		return -EINVAL;
2133 	}
2134 
2135 	if (unicast)
2136 		err = rtnl_unicast(skb, net, portid);
2137 	else
2138 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2139 				     n->nlmsg_flags & NLM_F_ECHO);
2140 	return err;
2141 }
2142 
2143 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
2144 			      struct nlmsghdr *n, struct tcf_proto *tp,
2145 			      struct tcf_block *block, struct Qdisc *q,
2146 			      u32 parent, void *fh, bool *last, bool rtnl_held,
2147 			      struct netlink_ext_ack *extack)
2148 {
2149 	struct sk_buff *skb;
2150 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2151 	int err;
2152 
2153 	if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2154 		return tp->ops->delete(tp, fh, last, rtnl_held, extack);
2155 
2156 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2157 	if (!skb)
2158 		return -ENOBUFS;
2159 
2160 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2161 			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
2162 			  false, rtnl_held, extack) <= 0) {
2163 		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
2164 		kfree_skb(skb);
2165 		return -EINVAL;
2166 	}
2167 
2168 	err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
2169 	if (err) {
2170 		kfree_skb(skb);
2171 		return err;
2172 	}
2173 
2174 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2175 			     n->nlmsg_flags & NLM_F_ECHO);
2176 	if (err < 0)
2177 		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
2178 
2179 	return err;
2180 }
2181 
2182 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
2183 				 struct tcf_block *block, struct Qdisc *q,
2184 				 u32 parent, struct nlmsghdr *n,
2185 				 struct tcf_chain *chain, int event,
2186 				 struct netlink_ext_ack *extack)
2187 {
2188 	struct tcf_proto *tp;
2189 
2190 	for (tp = tcf_get_next_proto(chain, NULL);
2191 	     tp; tp = tcf_get_next_proto(chain, tp))
2192 		tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
2193 			       event, false, true, extack);
2194 }
2195 
2196 static void tfilter_put(struct tcf_proto *tp, void *fh)
2197 {
2198 	if (tp->ops->put && fh)
2199 		tp->ops->put(tp, fh);
2200 }
2201 
2202 static bool is_qdisc_ingress(__u32 classid)
2203 {
2204 	return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
2205 }
2206 
2207 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2208 			  struct netlink_ext_ack *extack)
2209 {
2210 	struct net *net = sock_net(skb->sk);
2211 	struct nlattr *tca[TCA_MAX + 1];
2212 	char name[IFNAMSIZ];
2213 	struct tcmsg *t;
2214 	u32 protocol;
2215 	u32 prio;
2216 	bool prio_allocate;
2217 	u32 parent;
2218 	u32 chain_index;
2219 	struct Qdisc *q;
2220 	struct tcf_chain_info chain_info;
2221 	struct tcf_chain *chain;
2222 	struct tcf_block *block;
2223 	struct tcf_proto *tp;
2224 	unsigned long cl;
2225 	void *fh;
2226 	int err;
2227 	int tp_created;
2228 	bool rtnl_held = false;
2229 	u32 flags;
2230 
2231 replay:
2232 	tp_created = 0;
2233 
2234 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2235 				     rtm_tca_policy, extack);
2236 	if (err < 0)
2237 		return err;
2238 
2239 	t = nlmsg_data(n);
2240 	protocol = TC_H_MIN(t->tcm_info);
2241 	prio = TC_H_MAJ(t->tcm_info);
2242 	prio_allocate = false;
2243 	parent = t->tcm_parent;
2244 	tp = NULL;
2245 	cl = 0;
2246 	block = NULL;
2247 	q = NULL;
2248 	chain = NULL;
2249 	flags = 0;
2250 
2251 	if (prio == 0) {
2252 		/* If no priority is provided by the user,
2253 		 * we allocate one.
2254 		 */
2255 		if (n->nlmsg_flags & NLM_F_CREATE) {
2256 			prio = TC_H_MAKE(0x80000000U, 0U);
2257 			prio_allocate = true;
2258 		} else {
2259 			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2260 			return -ENOENT;
2261 		}
2262 	}
2263 
2264 	/* Find head of filter chain. */
2265 
2266 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2267 	if (err)
2268 		return err;
2269 
2270 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2271 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2272 		err = -EINVAL;
2273 		goto errout;
2274 	}
2275 
2276 	/* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2277 	 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2278 	 * type is not specified, classifier is not unlocked.
2279 	 */
2280 	if (rtnl_held ||
2281 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2282 	    !tcf_proto_is_unlocked(name)) {
2283 		rtnl_held = true;
2284 		rtnl_lock();
2285 	}
2286 
2287 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2288 	if (err)
2289 		goto errout;
2290 
2291 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2292 				 extack);
2293 	if (IS_ERR(block)) {
2294 		err = PTR_ERR(block);
2295 		goto errout;
2296 	}
2297 	block->classid = parent;
2298 
2299 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2300 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2301 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2302 		err = -EINVAL;
2303 		goto errout;
2304 	}
2305 	chain = tcf_chain_get(block, chain_index, true);
2306 	if (!chain) {
2307 		NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2308 		err = -ENOMEM;
2309 		goto errout;
2310 	}
2311 
2312 	mutex_lock(&chain->filter_chain_lock);
2313 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2314 			       prio, prio_allocate);
2315 	if (IS_ERR(tp)) {
2316 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2317 		err = PTR_ERR(tp);
2318 		goto errout_locked;
2319 	}
2320 
2321 	if (tp == NULL) {
2322 		struct tcf_proto *tp_new = NULL;
2323 
2324 		if (chain->flushing) {
2325 			err = -EAGAIN;
2326 			goto errout_locked;
2327 		}
2328 
2329 		/* Proto-tcf does not exist, create new one */
2330 
2331 		if (tca[TCA_KIND] == NULL || !protocol) {
2332 			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2333 			err = -EINVAL;
2334 			goto errout_locked;
2335 		}
2336 
2337 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2338 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2339 			err = -ENOENT;
2340 			goto errout_locked;
2341 		}
2342 
2343 		if (prio_allocate)
2344 			prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2345 							       &chain_info));
2346 
2347 		mutex_unlock(&chain->filter_chain_lock);
2348 		tp_new = tcf_proto_create(name, protocol, prio, chain,
2349 					  rtnl_held, extack);
2350 		if (IS_ERR(tp_new)) {
2351 			err = PTR_ERR(tp_new);
2352 			goto errout_tp;
2353 		}
2354 
2355 		tp_created = 1;
2356 		tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2357 						rtnl_held);
2358 		if (IS_ERR(tp)) {
2359 			err = PTR_ERR(tp);
2360 			goto errout_tp;
2361 		}
2362 	} else {
2363 		mutex_unlock(&chain->filter_chain_lock);
2364 	}
2365 
2366 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2367 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2368 		err = -EINVAL;
2369 		goto errout;
2370 	}
2371 
2372 	fh = tp->ops->get(tp, t->tcm_handle);
2373 
2374 	if (!fh) {
2375 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2376 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2377 			err = -ENOENT;
2378 			goto errout;
2379 		}
2380 	} else if (n->nlmsg_flags & NLM_F_EXCL) {
2381 		tfilter_put(tp, fh);
2382 		NL_SET_ERR_MSG(extack, "Filter already exists");
2383 		err = -EEXIST;
2384 		goto errout;
2385 	}
2386 
2387 	if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2388 		tfilter_put(tp, fh);
2389 		NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2390 		err = -EINVAL;
2391 		goto errout;
2392 	}
2393 
2394 	if (!(n->nlmsg_flags & NLM_F_CREATE))
2395 		flags |= TCA_ACT_FLAGS_REPLACE;
2396 	if (!rtnl_held)
2397 		flags |= TCA_ACT_FLAGS_NO_RTNL;
2398 	if (is_qdisc_ingress(parent))
2399 		flags |= TCA_ACT_FLAGS_AT_INGRESS;
2400 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2401 			      flags, extack);
2402 	if (err == 0) {
2403 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2404 			       RTM_NEWTFILTER, false, rtnl_held, extack);
2405 		tfilter_put(tp, fh);
2406 		tcf_block_filter_cnt_update(block, &tp->counted, true);
2407 		/* q pointer is NULL for shared blocks */
2408 		if (q)
2409 			q->flags &= ~TCQ_F_CAN_BYPASS;
2410 	}
2411 
2412 errout:
2413 	if (err && tp_created)
2414 		tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2415 errout_tp:
2416 	if (chain) {
2417 		if (tp && !IS_ERR(tp))
2418 			tcf_proto_put(tp, rtnl_held, NULL);
2419 		if (!tp_created)
2420 			tcf_chain_put(chain);
2421 	}
2422 	tcf_block_release(q, block, rtnl_held);
2423 
2424 	if (rtnl_held)
2425 		rtnl_unlock();
2426 
2427 	if (err == -EAGAIN) {
2428 		/* Take rtnl lock in case EAGAIN is caused by concurrent flush
2429 		 * of target chain.
2430 		 */
2431 		rtnl_held = true;
2432 		/* Replay the request. */
2433 		goto replay;
2434 	}
2435 	return err;
2436 
2437 errout_locked:
2438 	mutex_unlock(&chain->filter_chain_lock);
2439 	goto errout;
2440 }
2441 
2442 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2443 			  struct netlink_ext_ack *extack)
2444 {
2445 	struct net *net = sock_net(skb->sk);
2446 	struct nlattr *tca[TCA_MAX + 1];
2447 	char name[IFNAMSIZ];
2448 	struct tcmsg *t;
2449 	u32 protocol;
2450 	u32 prio;
2451 	u32 parent;
2452 	u32 chain_index;
2453 	struct Qdisc *q = NULL;
2454 	struct tcf_chain_info chain_info;
2455 	struct tcf_chain *chain = NULL;
2456 	struct tcf_block *block = NULL;
2457 	struct tcf_proto *tp = NULL;
2458 	unsigned long cl = 0;
2459 	void *fh = NULL;
2460 	int err;
2461 	bool rtnl_held = false;
2462 
2463 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2464 				     rtm_tca_policy, extack);
2465 	if (err < 0)
2466 		return err;
2467 
2468 	t = nlmsg_data(n);
2469 	protocol = TC_H_MIN(t->tcm_info);
2470 	prio = TC_H_MAJ(t->tcm_info);
2471 	parent = t->tcm_parent;
2472 
2473 	if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2474 		NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2475 		return -ENOENT;
2476 	}
2477 
2478 	/* Find head of filter chain. */
2479 
2480 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2481 	if (err)
2482 		return err;
2483 
2484 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2485 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2486 		err = -EINVAL;
2487 		goto errout;
2488 	}
2489 	/* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2490 	 * found), qdisc is not unlocked, classifier type is not specified,
2491 	 * classifier is not unlocked.
2492 	 */
2493 	if (!prio ||
2494 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2495 	    !tcf_proto_is_unlocked(name)) {
2496 		rtnl_held = true;
2497 		rtnl_lock();
2498 	}
2499 
2500 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2501 	if (err)
2502 		goto errout;
2503 
2504 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2505 				 extack);
2506 	if (IS_ERR(block)) {
2507 		err = PTR_ERR(block);
2508 		goto errout;
2509 	}
2510 
2511 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2512 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2513 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2514 		err = -EINVAL;
2515 		goto errout;
2516 	}
2517 	chain = tcf_chain_get(block, chain_index, false);
2518 	if (!chain) {
2519 		/* User requested flush on non-existent chain. Nothing to do,
2520 		 * so just return success.
2521 		 */
2522 		if (prio == 0) {
2523 			err = 0;
2524 			goto errout;
2525 		}
2526 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2527 		err = -ENOENT;
2528 		goto errout;
2529 	}
2530 
2531 	if (prio == 0) {
2532 		tfilter_notify_chain(net, skb, block, q, parent, n,
2533 				     chain, RTM_DELTFILTER, extack);
2534 		tcf_chain_flush(chain, rtnl_held);
2535 		err = 0;
2536 		goto errout;
2537 	}
2538 
2539 	mutex_lock(&chain->filter_chain_lock);
2540 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2541 			       prio, false);
2542 	if (!tp || IS_ERR(tp)) {
2543 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2544 		err = tp ? PTR_ERR(tp) : -ENOENT;
2545 		goto errout_locked;
2546 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2547 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2548 		err = -EINVAL;
2549 		goto errout_locked;
2550 	} else if (t->tcm_handle == 0) {
2551 		tcf_proto_signal_destroying(chain, tp);
2552 		tcf_chain_tp_remove(chain, &chain_info, tp);
2553 		mutex_unlock(&chain->filter_chain_lock);
2554 
2555 		tcf_proto_put(tp, rtnl_held, NULL);
2556 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2557 			       RTM_DELTFILTER, false, rtnl_held, extack);
2558 		err = 0;
2559 		goto errout;
2560 	}
2561 	mutex_unlock(&chain->filter_chain_lock);
2562 
2563 	fh = tp->ops->get(tp, t->tcm_handle);
2564 
2565 	if (!fh) {
2566 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2567 		err = -ENOENT;
2568 	} else {
2569 		bool last;
2570 
2571 		err = tfilter_del_notify(net, skb, n, tp, block, q, parent, fh,
2572 					 &last, rtnl_held, extack);
2573 
2574 		if (err)
2575 			goto errout;
2576 		if (last)
2577 			tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2578 	}
2579 
2580 errout:
2581 	if (chain) {
2582 		if (tp && !IS_ERR(tp))
2583 			tcf_proto_put(tp, rtnl_held, NULL);
2584 		tcf_chain_put(chain);
2585 	}
2586 	tcf_block_release(q, block, rtnl_held);
2587 
2588 	if (rtnl_held)
2589 		rtnl_unlock();
2590 
2591 	return err;
2592 
2593 errout_locked:
2594 	mutex_unlock(&chain->filter_chain_lock);
2595 	goto errout;
2596 }
2597 
2598 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2599 			  struct netlink_ext_ack *extack)
2600 {
2601 	struct net *net = sock_net(skb->sk);
2602 	struct nlattr *tca[TCA_MAX + 1];
2603 	char name[IFNAMSIZ];
2604 	struct tcmsg *t;
2605 	u32 protocol;
2606 	u32 prio;
2607 	u32 parent;
2608 	u32 chain_index;
2609 	struct Qdisc *q = NULL;
2610 	struct tcf_chain_info chain_info;
2611 	struct tcf_chain *chain = NULL;
2612 	struct tcf_block *block = NULL;
2613 	struct tcf_proto *tp = NULL;
2614 	unsigned long cl = 0;
2615 	void *fh = NULL;
2616 	int err;
2617 	bool rtnl_held = false;
2618 
2619 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2620 				     rtm_tca_policy, extack);
2621 	if (err < 0)
2622 		return err;
2623 
2624 	t = nlmsg_data(n);
2625 	protocol = TC_H_MIN(t->tcm_info);
2626 	prio = TC_H_MAJ(t->tcm_info);
2627 	parent = t->tcm_parent;
2628 
2629 	if (prio == 0) {
2630 		NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2631 		return -ENOENT;
2632 	}
2633 
2634 	/* Find head of filter chain. */
2635 
2636 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2637 	if (err)
2638 		return err;
2639 
2640 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2641 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2642 		err = -EINVAL;
2643 		goto errout;
2644 	}
2645 	/* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2646 	 * unlocked, classifier type is not specified, classifier is not
2647 	 * unlocked.
2648 	 */
2649 	if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2650 	    !tcf_proto_is_unlocked(name)) {
2651 		rtnl_held = true;
2652 		rtnl_lock();
2653 	}
2654 
2655 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2656 	if (err)
2657 		goto errout;
2658 
2659 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2660 				 extack);
2661 	if (IS_ERR(block)) {
2662 		err = PTR_ERR(block);
2663 		goto errout;
2664 	}
2665 
2666 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2667 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2668 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2669 		err = -EINVAL;
2670 		goto errout;
2671 	}
2672 	chain = tcf_chain_get(block, chain_index, false);
2673 	if (!chain) {
2674 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2675 		err = -EINVAL;
2676 		goto errout;
2677 	}
2678 
2679 	mutex_lock(&chain->filter_chain_lock);
2680 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2681 			       prio, false);
2682 	mutex_unlock(&chain->filter_chain_lock);
2683 	if (!tp || IS_ERR(tp)) {
2684 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2685 		err = tp ? PTR_ERR(tp) : -ENOENT;
2686 		goto errout;
2687 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2688 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2689 		err = -EINVAL;
2690 		goto errout;
2691 	}
2692 
2693 	fh = tp->ops->get(tp, t->tcm_handle);
2694 
2695 	if (!fh) {
2696 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2697 		err = -ENOENT;
2698 	} else {
2699 		err = tfilter_notify(net, skb, n, tp, block, q, parent,
2700 				     fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2701 		if (err < 0)
2702 			NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2703 	}
2704 
2705 	tfilter_put(tp, fh);
2706 errout:
2707 	if (chain) {
2708 		if (tp && !IS_ERR(tp))
2709 			tcf_proto_put(tp, rtnl_held, NULL);
2710 		tcf_chain_put(chain);
2711 	}
2712 	tcf_block_release(q, block, rtnl_held);
2713 
2714 	if (rtnl_held)
2715 		rtnl_unlock();
2716 
2717 	return err;
2718 }
2719 
2720 struct tcf_dump_args {
2721 	struct tcf_walker w;
2722 	struct sk_buff *skb;
2723 	struct netlink_callback *cb;
2724 	struct tcf_block *block;
2725 	struct Qdisc *q;
2726 	u32 parent;
2727 	bool terse_dump;
2728 };
2729 
2730 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2731 {
2732 	struct tcf_dump_args *a = (void *)arg;
2733 	struct net *net = sock_net(a->skb->sk);
2734 
2735 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2736 			     n, NETLINK_CB(a->cb->skb).portid,
2737 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2738 			     RTM_NEWTFILTER, a->terse_dump, true, NULL);
2739 }
2740 
2741 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2742 			   struct sk_buff *skb, struct netlink_callback *cb,
2743 			   long index_start, long *p_index, bool terse)
2744 {
2745 	struct net *net = sock_net(skb->sk);
2746 	struct tcf_block *block = chain->block;
2747 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2748 	struct tcf_proto *tp, *tp_prev;
2749 	struct tcf_dump_args arg;
2750 
2751 	for (tp = __tcf_get_next_proto(chain, NULL);
2752 	     tp;
2753 	     tp_prev = tp,
2754 		     tp = __tcf_get_next_proto(chain, tp),
2755 		     tcf_proto_put(tp_prev, true, NULL),
2756 		     (*p_index)++) {
2757 		if (*p_index < index_start)
2758 			continue;
2759 		if (TC_H_MAJ(tcm->tcm_info) &&
2760 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
2761 			continue;
2762 		if (TC_H_MIN(tcm->tcm_info) &&
2763 		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
2764 			continue;
2765 		if (*p_index > index_start)
2766 			memset(&cb->args[1], 0,
2767 			       sizeof(cb->args) - sizeof(cb->args[0]));
2768 		if (cb->args[1] == 0) {
2769 			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2770 					  NETLINK_CB(cb->skb).portid,
2771 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
2772 					  RTM_NEWTFILTER, false, true, NULL) <= 0)
2773 				goto errout;
2774 			cb->args[1] = 1;
2775 		}
2776 		if (!tp->ops->walk)
2777 			continue;
2778 		arg.w.fn = tcf_node_dump;
2779 		arg.skb = skb;
2780 		arg.cb = cb;
2781 		arg.block = block;
2782 		arg.q = q;
2783 		arg.parent = parent;
2784 		arg.w.stop = 0;
2785 		arg.w.skip = cb->args[1] - 1;
2786 		arg.w.count = 0;
2787 		arg.w.cookie = cb->args[2];
2788 		arg.terse_dump = terse;
2789 		tp->ops->walk(tp, &arg.w, true);
2790 		cb->args[2] = arg.w.cookie;
2791 		cb->args[1] = arg.w.count + 1;
2792 		if (arg.w.stop)
2793 			goto errout;
2794 	}
2795 	return true;
2796 
2797 errout:
2798 	tcf_proto_put(tp, true, NULL);
2799 	return false;
2800 }
2801 
2802 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2803 	[TCA_CHAIN]      = { .type = NLA_U32 },
2804 	[TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2805 };
2806 
2807 /* called with RTNL */
2808 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2809 {
2810 	struct tcf_chain *chain, *chain_prev;
2811 	struct net *net = sock_net(skb->sk);
2812 	struct nlattr *tca[TCA_MAX + 1];
2813 	struct Qdisc *q = NULL;
2814 	struct tcf_block *block;
2815 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2816 	bool terse_dump = false;
2817 	long index_start;
2818 	long index;
2819 	u32 parent;
2820 	int err;
2821 
2822 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2823 		return skb->len;
2824 
2825 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2826 				     tcf_tfilter_dump_policy, cb->extack);
2827 	if (err)
2828 		return err;
2829 
2830 	if (tca[TCA_DUMP_FLAGS]) {
2831 		struct nla_bitfield32 flags =
2832 			nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2833 
2834 		terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2835 	}
2836 
2837 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2838 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2839 		if (!block)
2840 			goto out;
2841 		/* If we work with block index, q is NULL and parent value
2842 		 * will never be used in the following code. The check
2843 		 * in tcf_fill_node prevents it. However, compiler does not
2844 		 * see that far, so set parent to zero to silence the warning
2845 		 * about parent being uninitialized.
2846 		 */
2847 		parent = 0;
2848 	} else {
2849 		const struct Qdisc_class_ops *cops;
2850 		struct net_device *dev;
2851 		unsigned long cl = 0;
2852 
2853 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2854 		if (!dev)
2855 			return skb->len;
2856 
2857 		parent = tcm->tcm_parent;
2858 		if (!parent)
2859 			q = rtnl_dereference(dev->qdisc);
2860 		else
2861 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2862 		if (!q)
2863 			goto out;
2864 		cops = q->ops->cl_ops;
2865 		if (!cops)
2866 			goto out;
2867 		if (!cops->tcf_block)
2868 			goto out;
2869 		if (TC_H_MIN(tcm->tcm_parent)) {
2870 			cl = cops->find(q, tcm->tcm_parent);
2871 			if (cl == 0)
2872 				goto out;
2873 		}
2874 		block = cops->tcf_block(q, cl, NULL);
2875 		if (!block)
2876 			goto out;
2877 		parent = block->classid;
2878 		if (tcf_block_shared(block))
2879 			q = NULL;
2880 	}
2881 
2882 	index_start = cb->args[0];
2883 	index = 0;
2884 
2885 	for (chain = __tcf_get_next_chain(block, NULL);
2886 	     chain;
2887 	     chain_prev = chain,
2888 		     chain = __tcf_get_next_chain(block, chain),
2889 		     tcf_chain_put(chain_prev)) {
2890 		if (tca[TCA_CHAIN] &&
2891 		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2892 			continue;
2893 		if (!tcf_chain_dump(chain, q, parent, skb, cb,
2894 				    index_start, &index, terse_dump)) {
2895 			tcf_chain_put(chain);
2896 			err = -EMSGSIZE;
2897 			break;
2898 		}
2899 	}
2900 
2901 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2902 		tcf_block_refcnt_put(block, true);
2903 	cb->args[0] = index;
2904 
2905 out:
2906 	/* If we did no progress, the error (EMSGSIZE) is real */
2907 	if (skb->len == 0 && err)
2908 		return err;
2909 	return skb->len;
2910 }
2911 
2912 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2913 			      void *tmplt_priv, u32 chain_index,
2914 			      struct net *net, struct sk_buff *skb,
2915 			      struct tcf_block *block,
2916 			      u32 portid, u32 seq, u16 flags, int event,
2917 			      struct netlink_ext_ack *extack)
2918 {
2919 	unsigned char *b = skb_tail_pointer(skb);
2920 	const struct tcf_proto_ops *ops;
2921 	struct nlmsghdr *nlh;
2922 	struct tcmsg *tcm;
2923 	void *priv;
2924 
2925 	ops = tmplt_ops;
2926 	priv = tmplt_priv;
2927 
2928 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2929 	if (!nlh)
2930 		goto out_nlmsg_trim;
2931 	tcm = nlmsg_data(nlh);
2932 	tcm->tcm_family = AF_UNSPEC;
2933 	tcm->tcm__pad1 = 0;
2934 	tcm->tcm__pad2 = 0;
2935 	tcm->tcm_handle = 0;
2936 	if (block->q) {
2937 		tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2938 		tcm->tcm_parent = block->q->handle;
2939 	} else {
2940 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2941 		tcm->tcm_block_index = block->index;
2942 	}
2943 
2944 	if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2945 		goto nla_put_failure;
2946 
2947 	if (ops) {
2948 		if (nla_put_string(skb, TCA_KIND, ops->kind))
2949 			goto nla_put_failure;
2950 		if (ops->tmplt_dump(skb, net, priv) < 0)
2951 			goto nla_put_failure;
2952 	}
2953 
2954 	if (extack && extack->_msg &&
2955 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2956 		goto out_nlmsg_trim;
2957 
2958 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2959 
2960 	return skb->len;
2961 
2962 out_nlmsg_trim:
2963 nla_put_failure:
2964 	nlmsg_trim(skb, b);
2965 	return -EMSGSIZE;
2966 }
2967 
2968 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2969 			   u32 seq, u16 flags, int event, bool unicast,
2970 			   struct netlink_ext_ack *extack)
2971 {
2972 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2973 	struct tcf_block *block = chain->block;
2974 	struct net *net = block->net;
2975 	struct sk_buff *skb;
2976 	int err = 0;
2977 
2978 	if (!unicast && !rtnl_notify_needed(net, flags, RTNLGRP_TC))
2979 		return 0;
2980 
2981 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2982 	if (!skb)
2983 		return -ENOBUFS;
2984 
2985 	if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2986 			       chain->index, net, skb, block, portid,
2987 			       seq, flags, event, extack) <= 0) {
2988 		kfree_skb(skb);
2989 		return -EINVAL;
2990 	}
2991 
2992 	if (unicast)
2993 		err = rtnl_unicast(skb, net, portid);
2994 	else
2995 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2996 				     flags & NLM_F_ECHO);
2997 
2998 	return err;
2999 }
3000 
3001 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
3002 				  void *tmplt_priv, u32 chain_index,
3003 				  struct tcf_block *block, struct sk_buff *oskb,
3004 				  u32 seq, u16 flags)
3005 {
3006 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
3007 	struct net *net = block->net;
3008 	struct sk_buff *skb;
3009 
3010 	if (!rtnl_notify_needed(net, flags, RTNLGRP_TC))
3011 		return 0;
3012 
3013 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3014 	if (!skb)
3015 		return -ENOBUFS;
3016 
3017 	if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
3018 			       block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
3019 		kfree_skb(skb);
3020 		return -EINVAL;
3021 	}
3022 
3023 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
3024 }
3025 
3026 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
3027 			      struct nlattr **tca,
3028 			      struct netlink_ext_ack *extack)
3029 {
3030 	const struct tcf_proto_ops *ops;
3031 	char name[IFNAMSIZ];
3032 	void *tmplt_priv;
3033 
3034 	/* If kind is not set, user did not specify template. */
3035 	if (!tca[TCA_KIND])
3036 		return 0;
3037 
3038 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
3039 		NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
3040 		return -EINVAL;
3041 	}
3042 
3043 	ops = tcf_proto_lookup_ops(name, true, extack);
3044 	if (IS_ERR(ops))
3045 		return PTR_ERR(ops);
3046 	if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump ||
3047 	    !ops->tmplt_reoffload) {
3048 		NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
3049 		module_put(ops->owner);
3050 		return -EOPNOTSUPP;
3051 	}
3052 
3053 	tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
3054 	if (IS_ERR(tmplt_priv)) {
3055 		module_put(ops->owner);
3056 		return PTR_ERR(tmplt_priv);
3057 	}
3058 	chain->tmplt_ops = ops;
3059 	chain->tmplt_priv = tmplt_priv;
3060 	return 0;
3061 }
3062 
3063 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
3064 			       void *tmplt_priv)
3065 {
3066 	/* If template ops are set, no work to do for us. */
3067 	if (!tmplt_ops)
3068 		return;
3069 
3070 	tmplt_ops->tmplt_destroy(tmplt_priv);
3071 	module_put(tmplt_ops->owner);
3072 }
3073 
3074 /* Add/delete/get a chain */
3075 
3076 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
3077 			struct netlink_ext_ack *extack)
3078 {
3079 	struct net *net = sock_net(skb->sk);
3080 	struct nlattr *tca[TCA_MAX + 1];
3081 	struct tcmsg *t;
3082 	u32 parent;
3083 	u32 chain_index;
3084 	struct Qdisc *q;
3085 	struct tcf_chain *chain;
3086 	struct tcf_block *block;
3087 	unsigned long cl;
3088 	int err;
3089 
3090 replay:
3091 	q = NULL;
3092 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
3093 				     rtm_tca_policy, extack);
3094 	if (err < 0)
3095 		return err;
3096 
3097 	t = nlmsg_data(n);
3098 	parent = t->tcm_parent;
3099 	cl = 0;
3100 
3101 	block = tcf_block_find(net, &q, &parent, &cl,
3102 			       t->tcm_ifindex, t->tcm_block_index, extack);
3103 	if (IS_ERR(block))
3104 		return PTR_ERR(block);
3105 
3106 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
3107 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
3108 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
3109 		err = -EINVAL;
3110 		goto errout_block;
3111 	}
3112 
3113 	mutex_lock(&block->lock);
3114 	chain = tcf_chain_lookup(block, chain_index);
3115 	if (n->nlmsg_type == RTM_NEWCHAIN) {
3116 		if (chain) {
3117 			if (tcf_chain_held_by_acts_only(chain)) {
3118 				/* The chain exists only because there is
3119 				 * some action referencing it.
3120 				 */
3121 				tcf_chain_hold(chain);
3122 			} else {
3123 				NL_SET_ERR_MSG(extack, "Filter chain already exists");
3124 				err = -EEXIST;
3125 				goto errout_block_locked;
3126 			}
3127 		} else {
3128 			if (!(n->nlmsg_flags & NLM_F_CREATE)) {
3129 				NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
3130 				err = -ENOENT;
3131 				goto errout_block_locked;
3132 			}
3133 			chain = tcf_chain_create(block, chain_index);
3134 			if (!chain) {
3135 				NL_SET_ERR_MSG(extack, "Failed to create filter chain");
3136 				err = -ENOMEM;
3137 				goto errout_block_locked;
3138 			}
3139 		}
3140 	} else {
3141 		if (!chain || tcf_chain_held_by_acts_only(chain)) {
3142 			NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
3143 			err = -EINVAL;
3144 			goto errout_block_locked;
3145 		}
3146 		tcf_chain_hold(chain);
3147 	}
3148 
3149 	if (n->nlmsg_type == RTM_NEWCHAIN) {
3150 		/* Modifying chain requires holding parent block lock. In case
3151 		 * the chain was successfully added, take a reference to the
3152 		 * chain. This ensures that an empty chain does not disappear at
3153 		 * the end of this function.
3154 		 */
3155 		tcf_chain_hold(chain);
3156 		chain->explicitly_created = true;
3157 	}
3158 	mutex_unlock(&block->lock);
3159 
3160 	switch (n->nlmsg_type) {
3161 	case RTM_NEWCHAIN:
3162 		err = tc_chain_tmplt_add(chain, net, tca, extack);
3163 		if (err) {
3164 			tcf_chain_put_explicitly_created(chain);
3165 			goto errout;
3166 		}
3167 
3168 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
3169 				RTM_NEWCHAIN, false, extack);
3170 		break;
3171 	case RTM_DELCHAIN:
3172 		tfilter_notify_chain(net, skb, block, q, parent, n,
3173 				     chain, RTM_DELTFILTER, extack);
3174 		/* Flush the chain first as the user requested chain removal. */
3175 		tcf_chain_flush(chain, true);
3176 		/* In case the chain was successfully deleted, put a reference
3177 		 * to the chain previously taken during addition.
3178 		 */
3179 		tcf_chain_put_explicitly_created(chain);
3180 		break;
3181 	case RTM_GETCHAIN:
3182 		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
3183 				      n->nlmsg_flags, n->nlmsg_type, true, extack);
3184 		if (err < 0)
3185 			NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
3186 		break;
3187 	default:
3188 		err = -EOPNOTSUPP;
3189 		NL_SET_ERR_MSG(extack, "Unsupported message type");
3190 		goto errout;
3191 	}
3192 
3193 errout:
3194 	tcf_chain_put(chain);
3195 errout_block:
3196 	tcf_block_release(q, block, true);
3197 	if (err == -EAGAIN)
3198 		/* Replay the request. */
3199 		goto replay;
3200 	return err;
3201 
3202 errout_block_locked:
3203 	mutex_unlock(&block->lock);
3204 	goto errout_block;
3205 }
3206 
3207 /* called with RTNL */
3208 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
3209 {
3210 	struct net *net = sock_net(skb->sk);
3211 	struct nlattr *tca[TCA_MAX + 1];
3212 	struct Qdisc *q = NULL;
3213 	struct tcf_block *block;
3214 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
3215 	struct tcf_chain *chain;
3216 	long index_start;
3217 	long index;
3218 	int err;
3219 
3220 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
3221 		return skb->len;
3222 
3223 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
3224 				     rtm_tca_policy, cb->extack);
3225 	if (err)
3226 		return err;
3227 
3228 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
3229 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
3230 		if (!block)
3231 			goto out;
3232 	} else {
3233 		const struct Qdisc_class_ops *cops;
3234 		struct net_device *dev;
3235 		unsigned long cl = 0;
3236 
3237 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
3238 		if (!dev)
3239 			return skb->len;
3240 
3241 		if (!tcm->tcm_parent)
3242 			q = rtnl_dereference(dev->qdisc);
3243 		else
3244 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3245 
3246 		if (!q)
3247 			goto out;
3248 		cops = q->ops->cl_ops;
3249 		if (!cops)
3250 			goto out;
3251 		if (!cops->tcf_block)
3252 			goto out;
3253 		if (TC_H_MIN(tcm->tcm_parent)) {
3254 			cl = cops->find(q, tcm->tcm_parent);
3255 			if (cl == 0)
3256 				goto out;
3257 		}
3258 		block = cops->tcf_block(q, cl, NULL);
3259 		if (!block)
3260 			goto out;
3261 		if (tcf_block_shared(block))
3262 			q = NULL;
3263 	}
3264 
3265 	index_start = cb->args[0];
3266 	index = 0;
3267 
3268 	mutex_lock(&block->lock);
3269 	list_for_each_entry(chain, &block->chain_list, list) {
3270 		if ((tca[TCA_CHAIN] &&
3271 		     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3272 			continue;
3273 		if (index < index_start) {
3274 			index++;
3275 			continue;
3276 		}
3277 		if (tcf_chain_held_by_acts_only(chain))
3278 			continue;
3279 		err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3280 					 chain->index, net, skb, block,
3281 					 NETLINK_CB(cb->skb).portid,
3282 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3283 					 RTM_NEWCHAIN, NULL);
3284 		if (err <= 0)
3285 			break;
3286 		index++;
3287 	}
3288 	mutex_unlock(&block->lock);
3289 
3290 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3291 		tcf_block_refcnt_put(block, true);
3292 	cb->args[0] = index;
3293 
3294 out:
3295 	/* If we did no progress, the error (EMSGSIZE) is real */
3296 	if (skb->len == 0 && err)
3297 		return err;
3298 	return skb->len;
3299 }
3300 
3301 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
3302 		     int police, struct tcf_proto *tp, u32 handle,
3303 		     bool use_action_miss)
3304 {
3305 	int err = 0;
3306 
3307 #ifdef CONFIG_NET_CLS_ACT
3308 	exts->type = 0;
3309 	exts->nr_actions = 0;
3310 	exts->miss_cookie_node = NULL;
3311 	/* Note: we do not own yet a reference on net.
3312 	 * This reference might be taken later from tcf_exts_get_net().
3313 	 */
3314 	exts->net = net;
3315 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
3316 				GFP_KERNEL);
3317 	if (!exts->actions)
3318 		return -ENOMEM;
3319 #endif
3320 
3321 	exts->action = action;
3322 	exts->police = police;
3323 
3324 	if (!use_action_miss)
3325 		return 0;
3326 
3327 	err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle);
3328 	if (err)
3329 		goto err_miss_alloc;
3330 
3331 	return 0;
3332 
3333 err_miss_alloc:
3334 	tcf_exts_destroy(exts);
3335 #ifdef CONFIG_NET_CLS_ACT
3336 	exts->actions = NULL;
3337 #endif
3338 	return err;
3339 }
3340 EXPORT_SYMBOL(tcf_exts_init_ex);
3341 
3342 void tcf_exts_destroy(struct tcf_exts *exts)
3343 {
3344 	tcf_exts_miss_cookie_base_destroy(exts);
3345 
3346 #ifdef CONFIG_NET_CLS_ACT
3347 	if (exts->actions) {
3348 		tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3349 		kfree(exts->actions);
3350 	}
3351 	exts->nr_actions = 0;
3352 #endif
3353 }
3354 EXPORT_SYMBOL(tcf_exts_destroy);
3355 
3356 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3357 			 struct nlattr *rate_tlv, struct tcf_exts *exts,
3358 			 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3359 {
3360 #ifdef CONFIG_NET_CLS_ACT
3361 	{
3362 		int init_res[TCA_ACT_MAX_PRIO] = {};
3363 		struct tc_action *act;
3364 		size_t attr_size = 0;
3365 
3366 		if (exts->police && tb[exts->police]) {
3367 			struct tc_action_ops *a_o;
3368 
3369 			flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3370 			a_o = tc_action_load_ops(tb[exts->police], flags,
3371 						 extack);
3372 			if (IS_ERR(a_o))
3373 				return PTR_ERR(a_o);
3374 			act = tcf_action_init_1(net, tp, tb[exts->police],
3375 						rate_tlv, a_o, init_res, flags,
3376 						extack);
3377 			module_put(a_o->owner);
3378 			if (IS_ERR(act))
3379 				return PTR_ERR(act);
3380 
3381 			act->type = exts->type = TCA_OLD_COMPAT;
3382 			exts->actions[0] = act;
3383 			exts->nr_actions = 1;
3384 			tcf_idr_insert_many(exts->actions, init_res);
3385 		} else if (exts->action && tb[exts->action]) {
3386 			int err;
3387 
3388 			flags |= TCA_ACT_FLAGS_BIND;
3389 			err = tcf_action_init(net, tp, tb[exts->action],
3390 					      rate_tlv, exts->actions, init_res,
3391 					      &attr_size, flags, fl_flags,
3392 					      extack);
3393 			if (err < 0)
3394 				return err;
3395 			exts->nr_actions = err;
3396 		}
3397 	}
3398 #else
3399 	if ((exts->action && tb[exts->action]) ||
3400 	    (exts->police && tb[exts->police])) {
3401 		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3402 		return -EOPNOTSUPP;
3403 	}
3404 #endif
3405 
3406 	return 0;
3407 }
3408 EXPORT_SYMBOL(tcf_exts_validate_ex);
3409 
3410 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3411 		      struct nlattr *rate_tlv, struct tcf_exts *exts,
3412 		      u32 flags, struct netlink_ext_ack *extack)
3413 {
3414 	return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3415 				    flags, 0, extack);
3416 }
3417 EXPORT_SYMBOL(tcf_exts_validate);
3418 
3419 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3420 {
3421 #ifdef CONFIG_NET_CLS_ACT
3422 	struct tcf_exts old = *dst;
3423 
3424 	*dst = *src;
3425 	tcf_exts_destroy(&old);
3426 #endif
3427 }
3428 EXPORT_SYMBOL(tcf_exts_change);
3429 
3430 #ifdef CONFIG_NET_CLS_ACT
3431 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3432 {
3433 	if (exts->nr_actions == 0)
3434 		return NULL;
3435 	else
3436 		return exts->actions[0];
3437 }
3438 #endif
3439 
3440 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3441 {
3442 #ifdef CONFIG_NET_CLS_ACT
3443 	struct nlattr *nest;
3444 
3445 	if (exts->action && tcf_exts_has_actions(exts)) {
3446 		/*
3447 		 * again for backward compatible mode - we want
3448 		 * to work with both old and new modes of entering
3449 		 * tc data even if iproute2  was newer - jhs
3450 		 */
3451 		if (exts->type != TCA_OLD_COMPAT) {
3452 			nest = nla_nest_start_noflag(skb, exts->action);
3453 			if (nest == NULL)
3454 				goto nla_put_failure;
3455 
3456 			if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3457 			    < 0)
3458 				goto nla_put_failure;
3459 			nla_nest_end(skb, nest);
3460 		} else if (exts->police) {
3461 			struct tc_action *act = tcf_exts_first_act(exts);
3462 			nest = nla_nest_start_noflag(skb, exts->police);
3463 			if (nest == NULL || !act)
3464 				goto nla_put_failure;
3465 			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3466 				goto nla_put_failure;
3467 			nla_nest_end(skb, nest);
3468 		}
3469 	}
3470 	return 0;
3471 
3472 nla_put_failure:
3473 	nla_nest_cancel(skb, nest);
3474 	return -1;
3475 #else
3476 	return 0;
3477 #endif
3478 }
3479 EXPORT_SYMBOL(tcf_exts_dump);
3480 
3481 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3482 {
3483 #ifdef CONFIG_NET_CLS_ACT
3484 	struct nlattr *nest;
3485 
3486 	if (!exts->action || !tcf_exts_has_actions(exts))
3487 		return 0;
3488 
3489 	nest = nla_nest_start_noflag(skb, exts->action);
3490 	if (!nest)
3491 		goto nla_put_failure;
3492 
3493 	if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3494 		goto nla_put_failure;
3495 	nla_nest_end(skb, nest);
3496 	return 0;
3497 
3498 nla_put_failure:
3499 	nla_nest_cancel(skb, nest);
3500 	return -1;
3501 #else
3502 	return 0;
3503 #endif
3504 }
3505 EXPORT_SYMBOL(tcf_exts_terse_dump);
3506 
3507 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3508 {
3509 #ifdef CONFIG_NET_CLS_ACT
3510 	struct tc_action *a = tcf_exts_first_act(exts);
3511 	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3512 		return -1;
3513 #endif
3514 	return 0;
3515 }
3516 EXPORT_SYMBOL(tcf_exts_dump_stats);
3517 
3518 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3519 {
3520 	if (*flags & TCA_CLS_FLAGS_IN_HW)
3521 		return;
3522 	*flags |= TCA_CLS_FLAGS_IN_HW;
3523 	if (tc_skip_sw(*flags))
3524 		atomic_inc(&block->skipswcnt);
3525 	atomic_inc(&block->offloadcnt);
3526 }
3527 
3528 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3529 {
3530 	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3531 		return;
3532 	*flags &= ~TCA_CLS_FLAGS_IN_HW;
3533 	if (tc_skip_sw(*flags))
3534 		atomic_dec(&block->skipswcnt);
3535 	atomic_dec(&block->offloadcnt);
3536 }
3537 
3538 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3539 				      struct tcf_proto *tp, u32 *cnt,
3540 				      u32 *flags, u32 diff, bool add)
3541 {
3542 	lockdep_assert_held(&block->cb_lock);
3543 
3544 	spin_lock(&tp->lock);
3545 	if (add) {
3546 		if (!*cnt)
3547 			tcf_block_offload_inc(block, flags);
3548 		*cnt += diff;
3549 	} else {
3550 		*cnt -= diff;
3551 		if (!*cnt)
3552 			tcf_block_offload_dec(block, flags);
3553 	}
3554 	spin_unlock(&tp->lock);
3555 }
3556 
3557 static void
3558 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3559 			 u32 *cnt, u32 *flags)
3560 {
3561 	lockdep_assert_held(&block->cb_lock);
3562 
3563 	spin_lock(&tp->lock);
3564 	tcf_block_offload_dec(block, flags);
3565 	*cnt = 0;
3566 	spin_unlock(&tp->lock);
3567 }
3568 
3569 static int
3570 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3571 		   void *type_data, bool err_stop)
3572 {
3573 	struct flow_block_cb *block_cb;
3574 	int ok_count = 0;
3575 	int err;
3576 
3577 	list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3578 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
3579 		if (err) {
3580 			if (err_stop)
3581 				return err;
3582 		} else {
3583 			ok_count++;
3584 		}
3585 	}
3586 	return ok_count;
3587 }
3588 
3589 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3590 		     void *type_data, bool err_stop, bool rtnl_held)
3591 {
3592 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3593 	int ok_count;
3594 
3595 retry:
3596 	if (take_rtnl)
3597 		rtnl_lock();
3598 	down_read(&block->cb_lock);
3599 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3600 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3601 	 * obtain the locks in same order here.
3602 	 */
3603 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3604 		up_read(&block->cb_lock);
3605 		take_rtnl = true;
3606 		goto retry;
3607 	}
3608 
3609 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3610 
3611 	up_read(&block->cb_lock);
3612 	if (take_rtnl)
3613 		rtnl_unlock();
3614 	return ok_count;
3615 }
3616 EXPORT_SYMBOL(tc_setup_cb_call);
3617 
3618 /* Non-destructive filter add. If filter that wasn't already in hardware is
3619  * successfully offloaded, increment block offloads counter. On failure,
3620  * previously offloaded filter is considered to be intact and offloads counter
3621  * is not decremented.
3622  */
3623 
3624 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3625 		    enum tc_setup_type type, void *type_data, bool err_stop,
3626 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3627 {
3628 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3629 	int ok_count;
3630 
3631 retry:
3632 	if (take_rtnl)
3633 		rtnl_lock();
3634 	down_read(&block->cb_lock);
3635 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3636 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3637 	 * obtain the locks in same order here.
3638 	 */
3639 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3640 		up_read(&block->cb_lock);
3641 		take_rtnl = true;
3642 		goto retry;
3643 	}
3644 
3645 	/* Make sure all netdevs sharing this block are offload-capable. */
3646 	if (block->nooffloaddevcnt && err_stop) {
3647 		ok_count = -EOPNOTSUPP;
3648 		goto err_unlock;
3649 	}
3650 
3651 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3652 	if (ok_count < 0)
3653 		goto err_unlock;
3654 
3655 	if (tp->ops->hw_add)
3656 		tp->ops->hw_add(tp, type_data);
3657 	if (ok_count > 0)
3658 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3659 					  ok_count, true);
3660 err_unlock:
3661 	up_read(&block->cb_lock);
3662 	if (take_rtnl)
3663 		rtnl_unlock();
3664 	return min(ok_count, 0);
3665 }
3666 EXPORT_SYMBOL(tc_setup_cb_add);
3667 
3668 /* Destructive filter replace. If filter that wasn't already in hardware is
3669  * successfully offloaded, increment block offload counter. On failure,
3670  * previously offloaded filter is considered to be destroyed and offload counter
3671  * is decremented.
3672  */
3673 
3674 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3675 			enum tc_setup_type type, void *type_data, bool err_stop,
3676 			u32 *old_flags, unsigned int *old_in_hw_count,
3677 			u32 *new_flags, unsigned int *new_in_hw_count,
3678 			bool rtnl_held)
3679 {
3680 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3681 	int ok_count;
3682 
3683 retry:
3684 	if (take_rtnl)
3685 		rtnl_lock();
3686 	down_read(&block->cb_lock);
3687 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3688 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3689 	 * obtain the locks in same order here.
3690 	 */
3691 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3692 		up_read(&block->cb_lock);
3693 		take_rtnl = true;
3694 		goto retry;
3695 	}
3696 
3697 	/* Make sure all netdevs sharing this block are offload-capable. */
3698 	if (block->nooffloaddevcnt && err_stop) {
3699 		ok_count = -EOPNOTSUPP;
3700 		goto err_unlock;
3701 	}
3702 
3703 	tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3704 	if (tp->ops->hw_del)
3705 		tp->ops->hw_del(tp, type_data);
3706 
3707 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3708 	if (ok_count < 0)
3709 		goto err_unlock;
3710 
3711 	if (tp->ops->hw_add)
3712 		tp->ops->hw_add(tp, type_data);
3713 	if (ok_count > 0)
3714 		tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3715 					  new_flags, ok_count, true);
3716 err_unlock:
3717 	up_read(&block->cb_lock);
3718 	if (take_rtnl)
3719 		rtnl_unlock();
3720 	return min(ok_count, 0);
3721 }
3722 EXPORT_SYMBOL(tc_setup_cb_replace);
3723 
3724 /* Destroy filter and decrement block offload counter, if filter was previously
3725  * offloaded.
3726  */
3727 
3728 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3729 			enum tc_setup_type type, void *type_data, bool err_stop,
3730 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3731 {
3732 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3733 	int ok_count;
3734 
3735 retry:
3736 	if (take_rtnl)
3737 		rtnl_lock();
3738 	down_read(&block->cb_lock);
3739 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3740 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3741 	 * obtain the locks in same order here.
3742 	 */
3743 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3744 		up_read(&block->cb_lock);
3745 		take_rtnl = true;
3746 		goto retry;
3747 	}
3748 
3749 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3750 
3751 	tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3752 	if (tp->ops->hw_del)
3753 		tp->ops->hw_del(tp, type_data);
3754 
3755 	up_read(&block->cb_lock);
3756 	if (take_rtnl)
3757 		rtnl_unlock();
3758 	return min(ok_count, 0);
3759 }
3760 EXPORT_SYMBOL(tc_setup_cb_destroy);
3761 
3762 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3763 			  bool add, flow_setup_cb_t *cb,
3764 			  enum tc_setup_type type, void *type_data,
3765 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3766 {
3767 	int err = cb(type, type_data, cb_priv);
3768 
3769 	if (err) {
3770 		if (add && tc_skip_sw(*flags))
3771 			return err;
3772 	} else {
3773 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3774 					  add);
3775 	}
3776 
3777 	return 0;
3778 }
3779 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3780 
3781 static int tcf_act_get_user_cookie(struct flow_action_entry *entry,
3782 				   const struct tc_action *act)
3783 {
3784 	struct tc_cookie *user_cookie;
3785 	int err = 0;
3786 
3787 	rcu_read_lock();
3788 	user_cookie = rcu_dereference(act->user_cookie);
3789 	if (user_cookie) {
3790 		entry->user_cookie = flow_action_cookie_create(user_cookie->data,
3791 							       user_cookie->len,
3792 							       GFP_ATOMIC);
3793 		if (!entry->user_cookie)
3794 			err = -ENOMEM;
3795 	}
3796 	rcu_read_unlock();
3797 	return err;
3798 }
3799 
3800 static void tcf_act_put_user_cookie(struct flow_action_entry *entry)
3801 {
3802 	flow_action_cookie_destroy(entry->user_cookie);
3803 }
3804 
3805 void tc_cleanup_offload_action(struct flow_action *flow_action)
3806 {
3807 	struct flow_action_entry *entry;
3808 	int i;
3809 
3810 	flow_action_for_each(i, entry, flow_action) {
3811 		tcf_act_put_user_cookie(entry);
3812 		if (entry->destructor)
3813 			entry->destructor(entry->destructor_priv);
3814 	}
3815 }
3816 EXPORT_SYMBOL(tc_cleanup_offload_action);
3817 
3818 static int tc_setup_offload_act(struct tc_action *act,
3819 				struct flow_action_entry *entry,
3820 				u32 *index_inc,
3821 				struct netlink_ext_ack *extack)
3822 {
3823 #ifdef CONFIG_NET_CLS_ACT
3824 	if (act->ops->offload_act_setup) {
3825 		return act->ops->offload_act_setup(act, entry, index_inc, true,
3826 						   extack);
3827 	} else {
3828 		NL_SET_ERR_MSG(extack, "Action does not support offload");
3829 		return -EOPNOTSUPP;
3830 	}
3831 #else
3832 	return 0;
3833 #endif
3834 }
3835 
3836 int tc_setup_action(struct flow_action *flow_action,
3837 		    struct tc_action *actions[],
3838 		    u32 miss_cookie_base,
3839 		    struct netlink_ext_ack *extack)
3840 {
3841 	int i, j, k, index, err = 0;
3842 	struct tc_action *act;
3843 
3844 	BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3845 	BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3846 	BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3847 
3848 	if (!actions)
3849 		return 0;
3850 
3851 	j = 0;
3852 	tcf_act_for_each_action(i, act, actions) {
3853 		struct flow_action_entry *entry;
3854 
3855 		entry = &flow_action->entries[j];
3856 		spin_lock_bh(&act->tcfa_lock);
3857 		err = tcf_act_get_user_cookie(entry, act);
3858 		if (err)
3859 			goto err_out_locked;
3860 
3861 		index = 0;
3862 		err = tc_setup_offload_act(act, entry, &index, extack);
3863 		if (err)
3864 			goto err_out_locked;
3865 
3866 		for (k = 0; k < index ; k++) {
3867 			entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3868 			entry[k].hw_index = act->tcfa_index;
3869 			entry[k].cookie = (unsigned long)act;
3870 			entry[k].miss_cookie =
3871 				tcf_exts_miss_cookie_get(miss_cookie_base, i);
3872 		}
3873 
3874 		j += index;
3875 
3876 		spin_unlock_bh(&act->tcfa_lock);
3877 	}
3878 
3879 err_out:
3880 	if (err)
3881 		tc_cleanup_offload_action(flow_action);
3882 
3883 	return err;
3884 err_out_locked:
3885 	spin_unlock_bh(&act->tcfa_lock);
3886 	goto err_out;
3887 }
3888 
3889 int tc_setup_offload_action(struct flow_action *flow_action,
3890 			    const struct tcf_exts *exts,
3891 			    struct netlink_ext_ack *extack)
3892 {
3893 #ifdef CONFIG_NET_CLS_ACT
3894 	u32 miss_cookie_base;
3895 
3896 	if (!exts)
3897 		return 0;
3898 
3899 	miss_cookie_base = exts->miss_cookie_node ?
3900 			   exts->miss_cookie_node->miss_cookie_base : 0;
3901 	return tc_setup_action(flow_action, exts->actions, miss_cookie_base,
3902 			       extack);
3903 #else
3904 	return 0;
3905 #endif
3906 }
3907 EXPORT_SYMBOL(tc_setup_offload_action);
3908 
3909 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3910 {
3911 	unsigned int num_acts = 0;
3912 	struct tc_action *act;
3913 	int i;
3914 
3915 	tcf_exts_for_each_action(i, act, exts) {
3916 		if (is_tcf_pedit(act))
3917 			num_acts += tcf_pedit_nkeys(act);
3918 		else
3919 			num_acts++;
3920 	}
3921 	return num_acts;
3922 }
3923 EXPORT_SYMBOL(tcf_exts_num_actions);
3924 
3925 #ifdef CONFIG_NET_CLS_ACT
3926 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3927 					u32 *p_block_index,
3928 					struct netlink_ext_ack *extack)
3929 {
3930 	*p_block_index = nla_get_u32(block_index_attr);
3931 	if (!*p_block_index) {
3932 		NL_SET_ERR_MSG(extack, "Block number may not be zero");
3933 		return -EINVAL;
3934 	}
3935 
3936 	return 0;
3937 }
3938 
3939 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3940 		    enum flow_block_binder_type binder_type,
3941 		    struct nlattr *block_index_attr,
3942 		    struct netlink_ext_ack *extack)
3943 {
3944 	u32 block_index;
3945 	int err;
3946 
3947 	if (!block_index_attr)
3948 		return 0;
3949 
3950 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3951 	if (err)
3952 		return err;
3953 
3954 	qe->info.binder_type = binder_type;
3955 	qe->info.chain_head_change = tcf_chain_head_change_dflt;
3956 	qe->info.chain_head_change_priv = &qe->filter_chain;
3957 	qe->info.block_index = block_index;
3958 
3959 	return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3960 }
3961 EXPORT_SYMBOL(tcf_qevent_init);
3962 
3963 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3964 {
3965 	if (qe->info.block_index)
3966 		tcf_block_put_ext(qe->block, sch, &qe->info);
3967 }
3968 EXPORT_SYMBOL(tcf_qevent_destroy);
3969 
3970 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3971 			       struct netlink_ext_ack *extack)
3972 {
3973 	u32 block_index;
3974 	int err;
3975 
3976 	if (!block_index_attr)
3977 		return 0;
3978 
3979 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3980 	if (err)
3981 		return err;
3982 
3983 	/* Bounce newly-configured block or change in block. */
3984 	if (block_index != qe->info.block_index) {
3985 		NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3986 		return -EINVAL;
3987 	}
3988 
3989 	return 0;
3990 }
3991 EXPORT_SYMBOL(tcf_qevent_validate_change);
3992 
3993 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3994 				  struct sk_buff **to_free, int *ret)
3995 {
3996 	struct tcf_result cl_res;
3997 	struct tcf_proto *fl;
3998 
3999 	if (!qe->info.block_index)
4000 		return skb;
4001 
4002 	fl = rcu_dereference_bh(qe->filter_chain);
4003 
4004 	switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
4005 	case TC_ACT_SHOT:
4006 		qdisc_qstats_drop(sch);
4007 		__qdisc_drop(skb, to_free);
4008 		*ret = __NET_XMIT_BYPASS;
4009 		return NULL;
4010 	case TC_ACT_STOLEN:
4011 	case TC_ACT_QUEUED:
4012 	case TC_ACT_TRAP:
4013 		__qdisc_drop(skb, to_free);
4014 		*ret = __NET_XMIT_STOLEN;
4015 		return NULL;
4016 	case TC_ACT_REDIRECT:
4017 		skb_do_redirect(skb);
4018 		*ret = __NET_XMIT_STOLEN;
4019 		return NULL;
4020 	}
4021 
4022 	return skb;
4023 }
4024 EXPORT_SYMBOL(tcf_qevent_handle);
4025 
4026 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
4027 {
4028 	if (!qe->info.block_index)
4029 		return 0;
4030 	return nla_put_u32(skb, attr_name, qe->info.block_index);
4031 }
4032 EXPORT_SYMBOL(tcf_qevent_dump);
4033 #endif
4034 
4035 static __net_init int tcf_net_init(struct net *net)
4036 {
4037 	struct tcf_net *tn = net_generic(net, tcf_net_id);
4038 
4039 	spin_lock_init(&tn->idr_lock);
4040 	idr_init(&tn->idr);
4041 	return 0;
4042 }
4043 
4044 static void __net_exit tcf_net_exit(struct net *net)
4045 {
4046 	struct tcf_net *tn = net_generic(net, tcf_net_id);
4047 
4048 	idr_destroy(&tn->idr);
4049 }
4050 
4051 static struct pernet_operations tcf_net_ops = {
4052 	.init = tcf_net_init,
4053 	.exit = tcf_net_exit,
4054 	.id   = &tcf_net_id,
4055 	.size = sizeof(struct tcf_net),
4056 };
4057 
4058 static int __init tc_filter_init(void)
4059 {
4060 	int err;
4061 
4062 	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
4063 	if (!tc_filter_wq)
4064 		return -ENOMEM;
4065 
4066 	err = register_pernet_subsys(&tcf_net_ops);
4067 	if (err)
4068 		goto err_register_pernet_subsys;
4069 
4070 	xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
4071 
4072 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
4073 		      RTNL_FLAG_DOIT_UNLOCKED);
4074 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
4075 		      RTNL_FLAG_DOIT_UNLOCKED);
4076 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
4077 		      tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
4078 	rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
4079 	rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
4080 	rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
4081 		      tc_dump_chain, 0);
4082 
4083 	return 0;
4084 
4085 err_register_pernet_subsys:
4086 	destroy_workqueue(tc_filter_wq);
4087 	return err;
4088 }
4089 
4090 subsys_initcall(tc_filter_init);
4091