xref: /linux/net/sched/cls_api.c (revision e80a48bade619ec5a92230b3d4ae84bfc2746822)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c	Packet classifier API.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27 #include <net/netlink.h>
28 #include <net/pkt_sched.h>
29 #include <net/pkt_cls.h>
30 #include <net/tc_act/tc_pedit.h>
31 #include <net/tc_act/tc_mirred.h>
32 #include <net/tc_act/tc_vlan.h>
33 #include <net/tc_act/tc_tunnel_key.h>
34 #include <net/tc_act/tc_csum.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_police.h>
37 #include <net/tc_act/tc_sample.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <net/tc_act/tc_ct.h>
40 #include <net/tc_act/tc_mpls.h>
41 #include <net/tc_act/tc_gate.h>
42 #include <net/flow_offload.h>
43 #include <net/tc_wrapper.h>
44 
45 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
46 
47 /* The list of all installed classifier types */
48 static LIST_HEAD(tcf_proto_base);
49 
50 /* Protects list of registered TC modules. It is pure SMP lock. */
51 static DEFINE_RWLOCK(cls_mod_lock);
52 
53 #ifdef CONFIG_NET_CLS_ACT
54 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
55 EXPORT_SYMBOL(tc_skb_ext_tc);
56 
57 void tc_skb_ext_tc_enable(void)
58 {
59 	static_branch_inc(&tc_skb_ext_tc);
60 }
61 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
62 
63 void tc_skb_ext_tc_disable(void)
64 {
65 	static_branch_dec(&tc_skb_ext_tc);
66 }
67 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
68 #endif
69 
70 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
71 {
72 	return jhash_3words(tp->chain->index, tp->prio,
73 			    (__force __u32)tp->protocol, 0);
74 }
75 
76 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
77 					struct tcf_proto *tp)
78 {
79 	struct tcf_block *block = chain->block;
80 
81 	mutex_lock(&block->proto_destroy_lock);
82 	hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
83 		     destroy_obj_hashfn(tp));
84 	mutex_unlock(&block->proto_destroy_lock);
85 }
86 
87 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
88 			  const struct tcf_proto *tp2)
89 {
90 	return tp1->chain->index == tp2->chain->index &&
91 	       tp1->prio == tp2->prio &&
92 	       tp1->protocol == tp2->protocol;
93 }
94 
95 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
96 					struct tcf_proto *tp)
97 {
98 	u32 hash = destroy_obj_hashfn(tp);
99 	struct tcf_proto *iter;
100 	bool found = false;
101 
102 	rcu_read_lock();
103 	hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
104 				   destroy_ht_node, hash) {
105 		if (tcf_proto_cmp(tp, iter)) {
106 			found = true;
107 			break;
108 		}
109 	}
110 	rcu_read_unlock();
111 
112 	return found;
113 }
114 
115 static void
116 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
117 {
118 	struct tcf_block *block = chain->block;
119 
120 	mutex_lock(&block->proto_destroy_lock);
121 	if (hash_hashed(&tp->destroy_ht_node))
122 		hash_del_rcu(&tp->destroy_ht_node);
123 	mutex_unlock(&block->proto_destroy_lock);
124 }
125 
126 /* Find classifier type by string name */
127 
128 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
129 {
130 	const struct tcf_proto_ops *t, *res = NULL;
131 
132 	if (kind) {
133 		read_lock(&cls_mod_lock);
134 		list_for_each_entry(t, &tcf_proto_base, head) {
135 			if (strcmp(kind, t->kind) == 0) {
136 				if (try_module_get(t->owner))
137 					res = t;
138 				break;
139 			}
140 		}
141 		read_unlock(&cls_mod_lock);
142 	}
143 	return res;
144 }
145 
146 static const struct tcf_proto_ops *
147 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
148 		     struct netlink_ext_ack *extack)
149 {
150 	const struct tcf_proto_ops *ops;
151 
152 	ops = __tcf_proto_lookup_ops(kind);
153 	if (ops)
154 		return ops;
155 #ifdef CONFIG_MODULES
156 	if (rtnl_held)
157 		rtnl_unlock();
158 	request_module("cls_%s", kind);
159 	if (rtnl_held)
160 		rtnl_lock();
161 	ops = __tcf_proto_lookup_ops(kind);
162 	/* We dropped the RTNL semaphore in order to perform
163 	 * the module load. So, even if we succeeded in loading
164 	 * the module we have to replay the request. We indicate
165 	 * this using -EAGAIN.
166 	 */
167 	if (ops) {
168 		module_put(ops->owner);
169 		return ERR_PTR(-EAGAIN);
170 	}
171 #endif
172 	NL_SET_ERR_MSG(extack, "TC classifier not found");
173 	return ERR_PTR(-ENOENT);
174 }
175 
176 /* Register(unregister) new classifier type */
177 
178 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
179 {
180 	struct tcf_proto_ops *t;
181 	int rc = -EEXIST;
182 
183 	write_lock(&cls_mod_lock);
184 	list_for_each_entry(t, &tcf_proto_base, head)
185 		if (!strcmp(ops->kind, t->kind))
186 			goto out;
187 
188 	list_add_tail(&ops->head, &tcf_proto_base);
189 	rc = 0;
190 out:
191 	write_unlock(&cls_mod_lock);
192 	return rc;
193 }
194 EXPORT_SYMBOL(register_tcf_proto_ops);
195 
196 static struct workqueue_struct *tc_filter_wq;
197 
198 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
199 {
200 	struct tcf_proto_ops *t;
201 	int rc = -ENOENT;
202 
203 	/* Wait for outstanding call_rcu()s, if any, from a
204 	 * tcf_proto_ops's destroy() handler.
205 	 */
206 	rcu_barrier();
207 	flush_workqueue(tc_filter_wq);
208 
209 	write_lock(&cls_mod_lock);
210 	list_for_each_entry(t, &tcf_proto_base, head) {
211 		if (t == ops) {
212 			list_del(&t->head);
213 			rc = 0;
214 			break;
215 		}
216 	}
217 	write_unlock(&cls_mod_lock);
218 
219 	WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
220 }
221 EXPORT_SYMBOL(unregister_tcf_proto_ops);
222 
223 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
224 {
225 	INIT_RCU_WORK(rwork, func);
226 	return queue_rcu_work(tc_filter_wq, rwork);
227 }
228 EXPORT_SYMBOL(tcf_queue_work);
229 
230 /* Select new prio value from the range, managed by kernel. */
231 
232 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
233 {
234 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
235 
236 	if (tp)
237 		first = tp->prio - 1;
238 
239 	return TC_H_MAJ(first);
240 }
241 
242 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
243 {
244 	if (kind)
245 		return nla_strscpy(name, kind, IFNAMSIZ) < 0;
246 	memset(name, 0, IFNAMSIZ);
247 	return false;
248 }
249 
250 static bool tcf_proto_is_unlocked(const char *kind)
251 {
252 	const struct tcf_proto_ops *ops;
253 	bool ret;
254 
255 	if (strlen(kind) == 0)
256 		return false;
257 
258 	ops = tcf_proto_lookup_ops(kind, false, NULL);
259 	/* On error return false to take rtnl lock. Proto lookup/create
260 	 * functions will perform lookup again and properly handle errors.
261 	 */
262 	if (IS_ERR(ops))
263 		return false;
264 
265 	ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
266 	module_put(ops->owner);
267 	return ret;
268 }
269 
270 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
271 					  u32 prio, struct tcf_chain *chain,
272 					  bool rtnl_held,
273 					  struct netlink_ext_ack *extack)
274 {
275 	struct tcf_proto *tp;
276 	int err;
277 
278 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
279 	if (!tp)
280 		return ERR_PTR(-ENOBUFS);
281 
282 	tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
283 	if (IS_ERR(tp->ops)) {
284 		err = PTR_ERR(tp->ops);
285 		goto errout;
286 	}
287 	tp->classify = tp->ops->classify;
288 	tp->protocol = protocol;
289 	tp->prio = prio;
290 	tp->chain = chain;
291 	spin_lock_init(&tp->lock);
292 	refcount_set(&tp->refcnt, 1);
293 
294 	err = tp->ops->init(tp);
295 	if (err) {
296 		module_put(tp->ops->owner);
297 		goto errout;
298 	}
299 	return tp;
300 
301 errout:
302 	kfree(tp);
303 	return ERR_PTR(err);
304 }
305 
306 static void tcf_proto_get(struct tcf_proto *tp)
307 {
308 	refcount_inc(&tp->refcnt);
309 }
310 
311 static void tcf_chain_put(struct tcf_chain *chain);
312 
313 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
314 			      bool sig_destroy, struct netlink_ext_ack *extack)
315 {
316 	tp->ops->destroy(tp, rtnl_held, extack);
317 	if (sig_destroy)
318 		tcf_proto_signal_destroyed(tp->chain, tp);
319 	tcf_chain_put(tp->chain);
320 	module_put(tp->ops->owner);
321 	kfree_rcu(tp, rcu);
322 }
323 
324 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
325 			  struct netlink_ext_ack *extack)
326 {
327 	if (refcount_dec_and_test(&tp->refcnt))
328 		tcf_proto_destroy(tp, rtnl_held, true, extack);
329 }
330 
331 static bool tcf_proto_check_delete(struct tcf_proto *tp)
332 {
333 	if (tp->ops->delete_empty)
334 		return tp->ops->delete_empty(tp);
335 
336 	tp->deleting = true;
337 	return tp->deleting;
338 }
339 
340 static void tcf_proto_mark_delete(struct tcf_proto *tp)
341 {
342 	spin_lock(&tp->lock);
343 	tp->deleting = true;
344 	spin_unlock(&tp->lock);
345 }
346 
347 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
348 {
349 	bool deleting;
350 
351 	spin_lock(&tp->lock);
352 	deleting = tp->deleting;
353 	spin_unlock(&tp->lock);
354 
355 	return deleting;
356 }
357 
358 #define ASSERT_BLOCK_LOCKED(block)					\
359 	lockdep_assert_held(&(block)->lock)
360 
361 struct tcf_filter_chain_list_item {
362 	struct list_head list;
363 	tcf_chain_head_change_t *chain_head_change;
364 	void *chain_head_change_priv;
365 };
366 
367 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
368 					  u32 chain_index)
369 {
370 	struct tcf_chain *chain;
371 
372 	ASSERT_BLOCK_LOCKED(block);
373 
374 	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
375 	if (!chain)
376 		return NULL;
377 	list_add_tail_rcu(&chain->list, &block->chain_list);
378 	mutex_init(&chain->filter_chain_lock);
379 	chain->block = block;
380 	chain->index = chain_index;
381 	chain->refcnt = 1;
382 	if (!chain->index)
383 		block->chain0.chain = chain;
384 	return chain;
385 }
386 
387 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
388 				       struct tcf_proto *tp_head)
389 {
390 	if (item->chain_head_change)
391 		item->chain_head_change(tp_head, item->chain_head_change_priv);
392 }
393 
394 static void tcf_chain0_head_change(struct tcf_chain *chain,
395 				   struct tcf_proto *tp_head)
396 {
397 	struct tcf_filter_chain_list_item *item;
398 	struct tcf_block *block = chain->block;
399 
400 	if (chain->index)
401 		return;
402 
403 	mutex_lock(&block->lock);
404 	list_for_each_entry(item, &block->chain0.filter_chain_list, list)
405 		tcf_chain_head_change_item(item, tp_head);
406 	mutex_unlock(&block->lock);
407 }
408 
409 /* Returns true if block can be safely freed. */
410 
411 static bool tcf_chain_detach(struct tcf_chain *chain)
412 {
413 	struct tcf_block *block = chain->block;
414 
415 	ASSERT_BLOCK_LOCKED(block);
416 
417 	list_del_rcu(&chain->list);
418 	if (!chain->index)
419 		block->chain0.chain = NULL;
420 
421 	if (list_empty(&block->chain_list) &&
422 	    refcount_read(&block->refcnt) == 0)
423 		return true;
424 
425 	return false;
426 }
427 
428 static void tcf_block_destroy(struct tcf_block *block)
429 {
430 	mutex_destroy(&block->lock);
431 	mutex_destroy(&block->proto_destroy_lock);
432 	kfree_rcu(block, rcu);
433 }
434 
435 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
436 {
437 	struct tcf_block *block = chain->block;
438 
439 	mutex_destroy(&chain->filter_chain_lock);
440 	kfree_rcu(chain, rcu);
441 	if (free_block)
442 		tcf_block_destroy(block);
443 }
444 
445 static void tcf_chain_hold(struct tcf_chain *chain)
446 {
447 	ASSERT_BLOCK_LOCKED(chain->block);
448 
449 	++chain->refcnt;
450 }
451 
452 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
453 {
454 	ASSERT_BLOCK_LOCKED(chain->block);
455 
456 	/* In case all the references are action references, this
457 	 * chain should not be shown to the user.
458 	 */
459 	return chain->refcnt == chain->action_refcnt;
460 }
461 
462 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
463 					  u32 chain_index)
464 {
465 	struct tcf_chain *chain;
466 
467 	ASSERT_BLOCK_LOCKED(block);
468 
469 	list_for_each_entry(chain, &block->chain_list, list) {
470 		if (chain->index == chain_index)
471 			return chain;
472 	}
473 	return NULL;
474 }
475 
476 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
477 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
478 					      u32 chain_index)
479 {
480 	struct tcf_chain *chain;
481 
482 	list_for_each_entry_rcu(chain, &block->chain_list, list) {
483 		if (chain->index == chain_index)
484 			return chain;
485 	}
486 	return NULL;
487 }
488 #endif
489 
490 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
491 			   u32 seq, u16 flags, int event, bool unicast);
492 
493 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
494 					 u32 chain_index, bool create,
495 					 bool by_act)
496 {
497 	struct tcf_chain *chain = NULL;
498 	bool is_first_reference;
499 
500 	mutex_lock(&block->lock);
501 	chain = tcf_chain_lookup(block, chain_index);
502 	if (chain) {
503 		tcf_chain_hold(chain);
504 	} else {
505 		if (!create)
506 			goto errout;
507 		chain = tcf_chain_create(block, chain_index);
508 		if (!chain)
509 			goto errout;
510 	}
511 
512 	if (by_act)
513 		++chain->action_refcnt;
514 	is_first_reference = chain->refcnt - chain->action_refcnt == 1;
515 	mutex_unlock(&block->lock);
516 
517 	/* Send notification only in case we got the first
518 	 * non-action reference. Until then, the chain acts only as
519 	 * a placeholder for actions pointing to it and user ought
520 	 * not know about them.
521 	 */
522 	if (is_first_reference && !by_act)
523 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
524 				RTM_NEWCHAIN, false);
525 
526 	return chain;
527 
528 errout:
529 	mutex_unlock(&block->lock);
530 	return chain;
531 }
532 
533 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
534 				       bool create)
535 {
536 	return __tcf_chain_get(block, chain_index, create, false);
537 }
538 
539 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
540 {
541 	return __tcf_chain_get(block, chain_index, true, true);
542 }
543 EXPORT_SYMBOL(tcf_chain_get_by_act);
544 
545 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
546 			       void *tmplt_priv);
547 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
548 				  void *tmplt_priv, u32 chain_index,
549 				  struct tcf_block *block, struct sk_buff *oskb,
550 				  u32 seq, u16 flags, bool unicast);
551 
552 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
553 			    bool explicitly_created)
554 {
555 	struct tcf_block *block = chain->block;
556 	const struct tcf_proto_ops *tmplt_ops;
557 	bool free_block = false;
558 	unsigned int refcnt;
559 	void *tmplt_priv;
560 
561 	mutex_lock(&block->lock);
562 	if (explicitly_created) {
563 		if (!chain->explicitly_created) {
564 			mutex_unlock(&block->lock);
565 			return;
566 		}
567 		chain->explicitly_created = false;
568 	}
569 
570 	if (by_act)
571 		chain->action_refcnt--;
572 
573 	/* tc_chain_notify_delete can't be called while holding block lock.
574 	 * However, when block is unlocked chain can be changed concurrently, so
575 	 * save these to temporary variables.
576 	 */
577 	refcnt = --chain->refcnt;
578 	tmplt_ops = chain->tmplt_ops;
579 	tmplt_priv = chain->tmplt_priv;
580 
581 	/* The last dropped non-action reference will trigger notification. */
582 	if (refcnt - chain->action_refcnt == 0 && !by_act) {
583 		tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
584 				       block, NULL, 0, 0, false);
585 		/* Last reference to chain, no need to lock. */
586 		chain->flushing = false;
587 	}
588 
589 	if (refcnt == 0)
590 		free_block = tcf_chain_detach(chain);
591 	mutex_unlock(&block->lock);
592 
593 	if (refcnt == 0) {
594 		tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
595 		tcf_chain_destroy(chain, free_block);
596 	}
597 }
598 
599 static void tcf_chain_put(struct tcf_chain *chain)
600 {
601 	__tcf_chain_put(chain, false, false);
602 }
603 
604 void tcf_chain_put_by_act(struct tcf_chain *chain)
605 {
606 	__tcf_chain_put(chain, true, false);
607 }
608 EXPORT_SYMBOL(tcf_chain_put_by_act);
609 
610 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
611 {
612 	__tcf_chain_put(chain, false, true);
613 }
614 
615 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
616 {
617 	struct tcf_proto *tp, *tp_next;
618 
619 	mutex_lock(&chain->filter_chain_lock);
620 	tp = tcf_chain_dereference(chain->filter_chain, chain);
621 	while (tp) {
622 		tp_next = rcu_dereference_protected(tp->next, 1);
623 		tcf_proto_signal_destroying(chain, tp);
624 		tp = tp_next;
625 	}
626 	tp = tcf_chain_dereference(chain->filter_chain, chain);
627 	RCU_INIT_POINTER(chain->filter_chain, NULL);
628 	tcf_chain0_head_change(chain, NULL);
629 	chain->flushing = true;
630 	mutex_unlock(&chain->filter_chain_lock);
631 
632 	while (tp) {
633 		tp_next = rcu_dereference_protected(tp->next, 1);
634 		tcf_proto_put(tp, rtnl_held, NULL);
635 		tp = tp_next;
636 	}
637 }
638 
639 static int tcf_block_setup(struct tcf_block *block,
640 			   struct flow_block_offload *bo);
641 
642 static void tcf_block_offload_init(struct flow_block_offload *bo,
643 				   struct net_device *dev, struct Qdisc *sch,
644 				   enum flow_block_command command,
645 				   enum flow_block_binder_type binder_type,
646 				   struct flow_block *flow_block,
647 				   bool shared, struct netlink_ext_ack *extack)
648 {
649 	bo->net = dev_net(dev);
650 	bo->command = command;
651 	bo->binder_type = binder_type;
652 	bo->block = flow_block;
653 	bo->block_shared = shared;
654 	bo->extack = extack;
655 	bo->sch = sch;
656 	bo->cb_list_head = &flow_block->cb_list;
657 	INIT_LIST_HEAD(&bo->cb_list);
658 }
659 
660 static void tcf_block_unbind(struct tcf_block *block,
661 			     struct flow_block_offload *bo);
662 
663 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
664 {
665 	struct tcf_block *block = block_cb->indr.data;
666 	struct net_device *dev = block_cb->indr.dev;
667 	struct Qdisc *sch = block_cb->indr.sch;
668 	struct netlink_ext_ack extack = {};
669 	struct flow_block_offload bo = {};
670 
671 	tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
672 			       block_cb->indr.binder_type,
673 			       &block->flow_block, tcf_block_shared(block),
674 			       &extack);
675 	rtnl_lock();
676 	down_write(&block->cb_lock);
677 	list_del(&block_cb->driver_list);
678 	list_move(&block_cb->list, &bo.cb_list);
679 	tcf_block_unbind(block, &bo);
680 	up_write(&block->cb_lock);
681 	rtnl_unlock();
682 }
683 
684 static bool tcf_block_offload_in_use(struct tcf_block *block)
685 {
686 	return atomic_read(&block->offloadcnt);
687 }
688 
689 static int tcf_block_offload_cmd(struct tcf_block *block,
690 				 struct net_device *dev, struct Qdisc *sch,
691 				 struct tcf_block_ext_info *ei,
692 				 enum flow_block_command command,
693 				 struct netlink_ext_ack *extack)
694 {
695 	struct flow_block_offload bo = {};
696 
697 	tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
698 			       &block->flow_block, tcf_block_shared(block),
699 			       extack);
700 
701 	if (dev->netdev_ops->ndo_setup_tc) {
702 		int err;
703 
704 		err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
705 		if (err < 0) {
706 			if (err != -EOPNOTSUPP)
707 				NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
708 			return err;
709 		}
710 
711 		return tcf_block_setup(block, &bo);
712 	}
713 
714 	flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
715 				    tc_block_indr_cleanup);
716 	tcf_block_setup(block, &bo);
717 
718 	return -EOPNOTSUPP;
719 }
720 
721 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
722 				  struct tcf_block_ext_info *ei,
723 				  struct netlink_ext_ack *extack)
724 {
725 	struct net_device *dev = q->dev_queue->dev;
726 	int err;
727 
728 	down_write(&block->cb_lock);
729 
730 	/* If tc offload feature is disabled and the block we try to bind
731 	 * to already has some offloaded filters, forbid to bind.
732 	 */
733 	if (dev->netdev_ops->ndo_setup_tc &&
734 	    !tc_can_offload(dev) &&
735 	    tcf_block_offload_in_use(block)) {
736 		NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
737 		err = -EOPNOTSUPP;
738 		goto err_unlock;
739 	}
740 
741 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
742 	if (err == -EOPNOTSUPP)
743 		goto no_offload_dev_inc;
744 	if (err)
745 		goto err_unlock;
746 
747 	up_write(&block->cb_lock);
748 	return 0;
749 
750 no_offload_dev_inc:
751 	if (tcf_block_offload_in_use(block))
752 		goto err_unlock;
753 
754 	err = 0;
755 	block->nooffloaddevcnt++;
756 err_unlock:
757 	up_write(&block->cb_lock);
758 	return err;
759 }
760 
761 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
762 				     struct tcf_block_ext_info *ei)
763 {
764 	struct net_device *dev = q->dev_queue->dev;
765 	int err;
766 
767 	down_write(&block->cb_lock);
768 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
769 	if (err == -EOPNOTSUPP)
770 		goto no_offload_dev_dec;
771 	up_write(&block->cb_lock);
772 	return;
773 
774 no_offload_dev_dec:
775 	WARN_ON(block->nooffloaddevcnt-- == 0);
776 	up_write(&block->cb_lock);
777 }
778 
779 static int
780 tcf_chain0_head_change_cb_add(struct tcf_block *block,
781 			      struct tcf_block_ext_info *ei,
782 			      struct netlink_ext_ack *extack)
783 {
784 	struct tcf_filter_chain_list_item *item;
785 	struct tcf_chain *chain0;
786 
787 	item = kmalloc(sizeof(*item), GFP_KERNEL);
788 	if (!item) {
789 		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
790 		return -ENOMEM;
791 	}
792 	item->chain_head_change = ei->chain_head_change;
793 	item->chain_head_change_priv = ei->chain_head_change_priv;
794 
795 	mutex_lock(&block->lock);
796 	chain0 = block->chain0.chain;
797 	if (chain0)
798 		tcf_chain_hold(chain0);
799 	else
800 		list_add(&item->list, &block->chain0.filter_chain_list);
801 	mutex_unlock(&block->lock);
802 
803 	if (chain0) {
804 		struct tcf_proto *tp_head;
805 
806 		mutex_lock(&chain0->filter_chain_lock);
807 
808 		tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
809 		if (tp_head)
810 			tcf_chain_head_change_item(item, tp_head);
811 
812 		mutex_lock(&block->lock);
813 		list_add(&item->list, &block->chain0.filter_chain_list);
814 		mutex_unlock(&block->lock);
815 
816 		mutex_unlock(&chain0->filter_chain_lock);
817 		tcf_chain_put(chain0);
818 	}
819 
820 	return 0;
821 }
822 
823 static void
824 tcf_chain0_head_change_cb_del(struct tcf_block *block,
825 			      struct tcf_block_ext_info *ei)
826 {
827 	struct tcf_filter_chain_list_item *item;
828 
829 	mutex_lock(&block->lock);
830 	list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
831 		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
832 		    (item->chain_head_change == ei->chain_head_change &&
833 		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
834 			if (block->chain0.chain)
835 				tcf_chain_head_change_item(item, NULL);
836 			list_del(&item->list);
837 			mutex_unlock(&block->lock);
838 
839 			kfree(item);
840 			return;
841 		}
842 	}
843 	mutex_unlock(&block->lock);
844 	WARN_ON(1);
845 }
846 
847 struct tcf_net {
848 	spinlock_t idr_lock; /* Protects idr */
849 	struct idr idr;
850 };
851 
852 static unsigned int tcf_net_id;
853 
854 static int tcf_block_insert(struct tcf_block *block, struct net *net,
855 			    struct netlink_ext_ack *extack)
856 {
857 	struct tcf_net *tn = net_generic(net, tcf_net_id);
858 	int err;
859 
860 	idr_preload(GFP_KERNEL);
861 	spin_lock(&tn->idr_lock);
862 	err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
863 			    GFP_NOWAIT);
864 	spin_unlock(&tn->idr_lock);
865 	idr_preload_end();
866 
867 	return err;
868 }
869 
870 static void tcf_block_remove(struct tcf_block *block, struct net *net)
871 {
872 	struct tcf_net *tn = net_generic(net, tcf_net_id);
873 
874 	spin_lock(&tn->idr_lock);
875 	idr_remove(&tn->idr, block->index);
876 	spin_unlock(&tn->idr_lock);
877 }
878 
879 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
880 					  u32 block_index,
881 					  struct netlink_ext_ack *extack)
882 {
883 	struct tcf_block *block;
884 
885 	block = kzalloc(sizeof(*block), GFP_KERNEL);
886 	if (!block) {
887 		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
888 		return ERR_PTR(-ENOMEM);
889 	}
890 	mutex_init(&block->lock);
891 	mutex_init(&block->proto_destroy_lock);
892 	init_rwsem(&block->cb_lock);
893 	flow_block_init(&block->flow_block);
894 	INIT_LIST_HEAD(&block->chain_list);
895 	INIT_LIST_HEAD(&block->owner_list);
896 	INIT_LIST_HEAD(&block->chain0.filter_chain_list);
897 
898 	refcount_set(&block->refcnt, 1);
899 	block->net = net;
900 	block->index = block_index;
901 
902 	/* Don't store q pointer for blocks which are shared */
903 	if (!tcf_block_shared(block))
904 		block->q = q;
905 	return block;
906 }
907 
908 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
909 {
910 	struct tcf_net *tn = net_generic(net, tcf_net_id);
911 
912 	return idr_find(&tn->idr, block_index);
913 }
914 
915 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
916 {
917 	struct tcf_block *block;
918 
919 	rcu_read_lock();
920 	block = tcf_block_lookup(net, block_index);
921 	if (block && !refcount_inc_not_zero(&block->refcnt))
922 		block = NULL;
923 	rcu_read_unlock();
924 
925 	return block;
926 }
927 
928 static struct tcf_chain *
929 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
930 {
931 	mutex_lock(&block->lock);
932 	if (chain)
933 		chain = list_is_last(&chain->list, &block->chain_list) ?
934 			NULL : list_next_entry(chain, list);
935 	else
936 		chain = list_first_entry_or_null(&block->chain_list,
937 						 struct tcf_chain, list);
938 
939 	/* skip all action-only chains */
940 	while (chain && tcf_chain_held_by_acts_only(chain))
941 		chain = list_is_last(&chain->list, &block->chain_list) ?
942 			NULL : list_next_entry(chain, list);
943 
944 	if (chain)
945 		tcf_chain_hold(chain);
946 	mutex_unlock(&block->lock);
947 
948 	return chain;
949 }
950 
951 /* Function to be used by all clients that want to iterate over all chains on
952  * block. It properly obtains block->lock and takes reference to chain before
953  * returning it. Users of this function must be tolerant to concurrent chain
954  * insertion/deletion or ensure that no concurrent chain modification is
955  * possible. Note that all netlink dump callbacks cannot guarantee to provide
956  * consistent dump because rtnl lock is released each time skb is filled with
957  * data and sent to user-space.
958  */
959 
960 struct tcf_chain *
961 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
962 {
963 	struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
964 
965 	if (chain)
966 		tcf_chain_put(chain);
967 
968 	return chain_next;
969 }
970 EXPORT_SYMBOL(tcf_get_next_chain);
971 
972 static struct tcf_proto *
973 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
974 {
975 	u32 prio = 0;
976 
977 	ASSERT_RTNL();
978 	mutex_lock(&chain->filter_chain_lock);
979 
980 	if (!tp) {
981 		tp = tcf_chain_dereference(chain->filter_chain, chain);
982 	} else if (tcf_proto_is_deleting(tp)) {
983 		/* 'deleting' flag is set and chain->filter_chain_lock was
984 		 * unlocked, which means next pointer could be invalid. Restart
985 		 * search.
986 		 */
987 		prio = tp->prio + 1;
988 		tp = tcf_chain_dereference(chain->filter_chain, chain);
989 
990 		for (; tp; tp = tcf_chain_dereference(tp->next, chain))
991 			if (!tp->deleting && tp->prio >= prio)
992 				break;
993 	} else {
994 		tp = tcf_chain_dereference(tp->next, chain);
995 	}
996 
997 	if (tp)
998 		tcf_proto_get(tp);
999 
1000 	mutex_unlock(&chain->filter_chain_lock);
1001 
1002 	return tp;
1003 }
1004 
1005 /* Function to be used by all clients that want to iterate over all tp's on
1006  * chain. Users of this function must be tolerant to concurrent tp
1007  * insertion/deletion or ensure that no concurrent chain modification is
1008  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1009  * consistent dump because rtnl lock is released each time skb is filled with
1010  * data and sent to user-space.
1011  */
1012 
1013 struct tcf_proto *
1014 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1015 {
1016 	struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1017 
1018 	if (tp)
1019 		tcf_proto_put(tp, true, NULL);
1020 
1021 	return tp_next;
1022 }
1023 EXPORT_SYMBOL(tcf_get_next_proto);
1024 
1025 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1026 {
1027 	struct tcf_chain *chain;
1028 
1029 	/* Last reference to block. At this point chains cannot be added or
1030 	 * removed concurrently.
1031 	 */
1032 	for (chain = tcf_get_next_chain(block, NULL);
1033 	     chain;
1034 	     chain = tcf_get_next_chain(block, chain)) {
1035 		tcf_chain_put_explicitly_created(chain);
1036 		tcf_chain_flush(chain, rtnl_held);
1037 	}
1038 }
1039 
1040 /* Lookup Qdisc and increments its reference counter.
1041  * Set parent, if necessary.
1042  */
1043 
1044 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1045 			    u32 *parent, int ifindex, bool rtnl_held,
1046 			    struct netlink_ext_ack *extack)
1047 {
1048 	const struct Qdisc_class_ops *cops;
1049 	struct net_device *dev;
1050 	int err = 0;
1051 
1052 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1053 		return 0;
1054 
1055 	rcu_read_lock();
1056 
1057 	/* Find link */
1058 	dev = dev_get_by_index_rcu(net, ifindex);
1059 	if (!dev) {
1060 		rcu_read_unlock();
1061 		return -ENODEV;
1062 	}
1063 
1064 	/* Find qdisc */
1065 	if (!*parent) {
1066 		*q = rcu_dereference(dev->qdisc);
1067 		*parent = (*q)->handle;
1068 	} else {
1069 		*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1070 		if (!*q) {
1071 			NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1072 			err = -EINVAL;
1073 			goto errout_rcu;
1074 		}
1075 	}
1076 
1077 	*q = qdisc_refcount_inc_nz(*q);
1078 	if (!*q) {
1079 		NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1080 		err = -EINVAL;
1081 		goto errout_rcu;
1082 	}
1083 
1084 	/* Is it classful? */
1085 	cops = (*q)->ops->cl_ops;
1086 	if (!cops) {
1087 		NL_SET_ERR_MSG(extack, "Qdisc not classful");
1088 		err = -EINVAL;
1089 		goto errout_qdisc;
1090 	}
1091 
1092 	if (!cops->tcf_block) {
1093 		NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1094 		err = -EOPNOTSUPP;
1095 		goto errout_qdisc;
1096 	}
1097 
1098 errout_rcu:
1099 	/* At this point we know that qdisc is not noop_qdisc,
1100 	 * which means that qdisc holds a reference to net_device
1101 	 * and we hold a reference to qdisc, so it is safe to release
1102 	 * rcu read lock.
1103 	 */
1104 	rcu_read_unlock();
1105 	return err;
1106 
1107 errout_qdisc:
1108 	rcu_read_unlock();
1109 
1110 	if (rtnl_held)
1111 		qdisc_put(*q);
1112 	else
1113 		qdisc_put_unlocked(*q);
1114 	*q = NULL;
1115 
1116 	return err;
1117 }
1118 
1119 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1120 			       int ifindex, struct netlink_ext_ack *extack)
1121 {
1122 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1123 		return 0;
1124 
1125 	/* Do we search for filter, attached to class? */
1126 	if (TC_H_MIN(parent)) {
1127 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1128 
1129 		*cl = cops->find(q, parent);
1130 		if (*cl == 0) {
1131 			NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1132 			return -ENOENT;
1133 		}
1134 	}
1135 
1136 	return 0;
1137 }
1138 
1139 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1140 					  unsigned long cl, int ifindex,
1141 					  u32 block_index,
1142 					  struct netlink_ext_ack *extack)
1143 {
1144 	struct tcf_block *block;
1145 
1146 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1147 		block = tcf_block_refcnt_get(net, block_index);
1148 		if (!block) {
1149 			NL_SET_ERR_MSG(extack, "Block of given index was not found");
1150 			return ERR_PTR(-EINVAL);
1151 		}
1152 	} else {
1153 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1154 
1155 		block = cops->tcf_block(q, cl, extack);
1156 		if (!block)
1157 			return ERR_PTR(-EINVAL);
1158 
1159 		if (tcf_block_shared(block)) {
1160 			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1161 			return ERR_PTR(-EOPNOTSUPP);
1162 		}
1163 
1164 		/* Always take reference to block in order to support execution
1165 		 * of rules update path of cls API without rtnl lock. Caller
1166 		 * must release block when it is finished using it. 'if' block
1167 		 * of this conditional obtain reference to block by calling
1168 		 * tcf_block_refcnt_get().
1169 		 */
1170 		refcount_inc(&block->refcnt);
1171 	}
1172 
1173 	return block;
1174 }
1175 
1176 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1177 			    struct tcf_block_ext_info *ei, bool rtnl_held)
1178 {
1179 	if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1180 		/* Flushing/putting all chains will cause the block to be
1181 		 * deallocated when last chain is freed. However, if chain_list
1182 		 * is empty, block has to be manually deallocated. After block
1183 		 * reference counter reached 0, it is no longer possible to
1184 		 * increment it or add new chains to block.
1185 		 */
1186 		bool free_block = list_empty(&block->chain_list);
1187 
1188 		mutex_unlock(&block->lock);
1189 		if (tcf_block_shared(block))
1190 			tcf_block_remove(block, block->net);
1191 
1192 		if (q)
1193 			tcf_block_offload_unbind(block, q, ei);
1194 
1195 		if (free_block)
1196 			tcf_block_destroy(block);
1197 		else
1198 			tcf_block_flush_all_chains(block, rtnl_held);
1199 	} else if (q) {
1200 		tcf_block_offload_unbind(block, q, ei);
1201 	}
1202 }
1203 
1204 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1205 {
1206 	__tcf_block_put(block, NULL, NULL, rtnl_held);
1207 }
1208 
1209 /* Find tcf block.
1210  * Set q, parent, cl when appropriate.
1211  */
1212 
1213 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1214 					u32 *parent, unsigned long *cl,
1215 					int ifindex, u32 block_index,
1216 					struct netlink_ext_ack *extack)
1217 {
1218 	struct tcf_block *block;
1219 	int err = 0;
1220 
1221 	ASSERT_RTNL();
1222 
1223 	err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1224 	if (err)
1225 		goto errout;
1226 
1227 	err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1228 	if (err)
1229 		goto errout_qdisc;
1230 
1231 	block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1232 	if (IS_ERR(block)) {
1233 		err = PTR_ERR(block);
1234 		goto errout_qdisc;
1235 	}
1236 
1237 	return block;
1238 
1239 errout_qdisc:
1240 	if (*q)
1241 		qdisc_put(*q);
1242 errout:
1243 	*q = NULL;
1244 	return ERR_PTR(err);
1245 }
1246 
1247 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1248 			      bool rtnl_held)
1249 {
1250 	if (!IS_ERR_OR_NULL(block))
1251 		tcf_block_refcnt_put(block, rtnl_held);
1252 
1253 	if (q) {
1254 		if (rtnl_held)
1255 			qdisc_put(q);
1256 		else
1257 			qdisc_put_unlocked(q);
1258 	}
1259 }
1260 
1261 struct tcf_block_owner_item {
1262 	struct list_head list;
1263 	struct Qdisc *q;
1264 	enum flow_block_binder_type binder_type;
1265 };
1266 
1267 static void
1268 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1269 			       struct Qdisc *q,
1270 			       enum flow_block_binder_type binder_type)
1271 {
1272 	if (block->keep_dst &&
1273 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1274 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1275 		netif_keep_dst(qdisc_dev(q));
1276 }
1277 
1278 void tcf_block_netif_keep_dst(struct tcf_block *block)
1279 {
1280 	struct tcf_block_owner_item *item;
1281 
1282 	block->keep_dst = true;
1283 	list_for_each_entry(item, &block->owner_list, list)
1284 		tcf_block_owner_netif_keep_dst(block, item->q,
1285 					       item->binder_type);
1286 }
1287 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1288 
1289 static int tcf_block_owner_add(struct tcf_block *block,
1290 			       struct Qdisc *q,
1291 			       enum flow_block_binder_type binder_type)
1292 {
1293 	struct tcf_block_owner_item *item;
1294 
1295 	item = kmalloc(sizeof(*item), GFP_KERNEL);
1296 	if (!item)
1297 		return -ENOMEM;
1298 	item->q = q;
1299 	item->binder_type = binder_type;
1300 	list_add(&item->list, &block->owner_list);
1301 	return 0;
1302 }
1303 
1304 static void tcf_block_owner_del(struct tcf_block *block,
1305 				struct Qdisc *q,
1306 				enum flow_block_binder_type binder_type)
1307 {
1308 	struct tcf_block_owner_item *item;
1309 
1310 	list_for_each_entry(item, &block->owner_list, list) {
1311 		if (item->q == q && item->binder_type == binder_type) {
1312 			list_del(&item->list);
1313 			kfree(item);
1314 			return;
1315 		}
1316 	}
1317 	WARN_ON(1);
1318 }
1319 
1320 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1321 		      struct tcf_block_ext_info *ei,
1322 		      struct netlink_ext_ack *extack)
1323 {
1324 	struct net *net = qdisc_net(q);
1325 	struct tcf_block *block = NULL;
1326 	int err;
1327 
1328 	if (ei->block_index)
1329 		/* block_index not 0 means the shared block is requested */
1330 		block = tcf_block_refcnt_get(net, ei->block_index);
1331 
1332 	if (!block) {
1333 		block = tcf_block_create(net, q, ei->block_index, extack);
1334 		if (IS_ERR(block))
1335 			return PTR_ERR(block);
1336 		if (tcf_block_shared(block)) {
1337 			err = tcf_block_insert(block, net, extack);
1338 			if (err)
1339 				goto err_block_insert;
1340 		}
1341 	}
1342 
1343 	err = tcf_block_owner_add(block, q, ei->binder_type);
1344 	if (err)
1345 		goto err_block_owner_add;
1346 
1347 	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1348 
1349 	err = tcf_chain0_head_change_cb_add(block, ei, extack);
1350 	if (err)
1351 		goto err_chain0_head_change_cb_add;
1352 
1353 	err = tcf_block_offload_bind(block, q, ei, extack);
1354 	if (err)
1355 		goto err_block_offload_bind;
1356 
1357 	*p_block = block;
1358 	return 0;
1359 
1360 err_block_offload_bind:
1361 	tcf_chain0_head_change_cb_del(block, ei);
1362 err_chain0_head_change_cb_add:
1363 	tcf_block_owner_del(block, q, ei->binder_type);
1364 err_block_owner_add:
1365 err_block_insert:
1366 	tcf_block_refcnt_put(block, true);
1367 	return err;
1368 }
1369 EXPORT_SYMBOL(tcf_block_get_ext);
1370 
1371 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1372 {
1373 	struct tcf_proto __rcu **p_filter_chain = priv;
1374 
1375 	rcu_assign_pointer(*p_filter_chain, tp_head);
1376 }
1377 
1378 int tcf_block_get(struct tcf_block **p_block,
1379 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1380 		  struct netlink_ext_ack *extack)
1381 {
1382 	struct tcf_block_ext_info ei = {
1383 		.chain_head_change = tcf_chain_head_change_dflt,
1384 		.chain_head_change_priv = p_filter_chain,
1385 	};
1386 
1387 	WARN_ON(!p_filter_chain);
1388 	return tcf_block_get_ext(p_block, q, &ei, extack);
1389 }
1390 EXPORT_SYMBOL(tcf_block_get);
1391 
1392 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1393  * actions should be all removed after flushing.
1394  */
1395 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1396 		       struct tcf_block_ext_info *ei)
1397 {
1398 	if (!block)
1399 		return;
1400 	tcf_chain0_head_change_cb_del(block, ei);
1401 	tcf_block_owner_del(block, q, ei->binder_type);
1402 
1403 	__tcf_block_put(block, q, ei, true);
1404 }
1405 EXPORT_SYMBOL(tcf_block_put_ext);
1406 
1407 void tcf_block_put(struct tcf_block *block)
1408 {
1409 	struct tcf_block_ext_info ei = {0, };
1410 
1411 	if (!block)
1412 		return;
1413 	tcf_block_put_ext(block, block->q, &ei);
1414 }
1415 
1416 EXPORT_SYMBOL(tcf_block_put);
1417 
1418 static int
1419 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1420 			    void *cb_priv, bool add, bool offload_in_use,
1421 			    struct netlink_ext_ack *extack)
1422 {
1423 	struct tcf_chain *chain, *chain_prev;
1424 	struct tcf_proto *tp, *tp_prev;
1425 	int err;
1426 
1427 	lockdep_assert_held(&block->cb_lock);
1428 
1429 	for (chain = __tcf_get_next_chain(block, NULL);
1430 	     chain;
1431 	     chain_prev = chain,
1432 		     chain = __tcf_get_next_chain(block, chain),
1433 		     tcf_chain_put(chain_prev)) {
1434 		for (tp = __tcf_get_next_proto(chain, NULL); tp;
1435 		     tp_prev = tp,
1436 			     tp = __tcf_get_next_proto(chain, tp),
1437 			     tcf_proto_put(tp_prev, true, NULL)) {
1438 			if (tp->ops->reoffload) {
1439 				err = tp->ops->reoffload(tp, add, cb, cb_priv,
1440 							 extack);
1441 				if (err && add)
1442 					goto err_playback_remove;
1443 			} else if (add && offload_in_use) {
1444 				err = -EOPNOTSUPP;
1445 				NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1446 				goto err_playback_remove;
1447 			}
1448 		}
1449 	}
1450 
1451 	return 0;
1452 
1453 err_playback_remove:
1454 	tcf_proto_put(tp, true, NULL);
1455 	tcf_chain_put(chain);
1456 	tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1457 				    extack);
1458 	return err;
1459 }
1460 
1461 static int tcf_block_bind(struct tcf_block *block,
1462 			  struct flow_block_offload *bo)
1463 {
1464 	struct flow_block_cb *block_cb, *next;
1465 	int err, i = 0;
1466 
1467 	lockdep_assert_held(&block->cb_lock);
1468 
1469 	list_for_each_entry(block_cb, &bo->cb_list, list) {
1470 		err = tcf_block_playback_offloads(block, block_cb->cb,
1471 						  block_cb->cb_priv, true,
1472 						  tcf_block_offload_in_use(block),
1473 						  bo->extack);
1474 		if (err)
1475 			goto err_unroll;
1476 		if (!bo->unlocked_driver_cb)
1477 			block->lockeddevcnt++;
1478 
1479 		i++;
1480 	}
1481 	list_splice(&bo->cb_list, &block->flow_block.cb_list);
1482 
1483 	return 0;
1484 
1485 err_unroll:
1486 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1487 		if (i-- > 0) {
1488 			list_del(&block_cb->list);
1489 			tcf_block_playback_offloads(block, block_cb->cb,
1490 						    block_cb->cb_priv, false,
1491 						    tcf_block_offload_in_use(block),
1492 						    NULL);
1493 			if (!bo->unlocked_driver_cb)
1494 				block->lockeddevcnt--;
1495 		}
1496 		flow_block_cb_free(block_cb);
1497 	}
1498 
1499 	return err;
1500 }
1501 
1502 static void tcf_block_unbind(struct tcf_block *block,
1503 			     struct flow_block_offload *bo)
1504 {
1505 	struct flow_block_cb *block_cb, *next;
1506 
1507 	lockdep_assert_held(&block->cb_lock);
1508 
1509 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1510 		tcf_block_playback_offloads(block, block_cb->cb,
1511 					    block_cb->cb_priv, false,
1512 					    tcf_block_offload_in_use(block),
1513 					    NULL);
1514 		list_del(&block_cb->list);
1515 		flow_block_cb_free(block_cb);
1516 		if (!bo->unlocked_driver_cb)
1517 			block->lockeddevcnt--;
1518 	}
1519 }
1520 
1521 static int tcf_block_setup(struct tcf_block *block,
1522 			   struct flow_block_offload *bo)
1523 {
1524 	int err;
1525 
1526 	switch (bo->command) {
1527 	case FLOW_BLOCK_BIND:
1528 		err = tcf_block_bind(block, bo);
1529 		break;
1530 	case FLOW_BLOCK_UNBIND:
1531 		err = 0;
1532 		tcf_block_unbind(block, bo);
1533 		break;
1534 	default:
1535 		WARN_ON_ONCE(1);
1536 		err = -EOPNOTSUPP;
1537 	}
1538 
1539 	return err;
1540 }
1541 
1542 /* Main classifier routine: scans classifier chain attached
1543  * to this qdisc, (optionally) tests for protocol and asks
1544  * specific classifiers.
1545  */
1546 static inline int __tcf_classify(struct sk_buff *skb,
1547 				 const struct tcf_proto *tp,
1548 				 const struct tcf_proto *orig_tp,
1549 				 struct tcf_result *res,
1550 				 bool compat_mode,
1551 				 u32 *last_executed_chain)
1552 {
1553 #ifdef CONFIG_NET_CLS_ACT
1554 	const int max_reclassify_loop = 16;
1555 	const struct tcf_proto *first_tp;
1556 	int limit = 0;
1557 
1558 reclassify:
1559 #endif
1560 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1561 		__be16 protocol = skb_protocol(skb, false);
1562 		int err;
1563 
1564 		if (tp->protocol != protocol &&
1565 		    tp->protocol != htons(ETH_P_ALL))
1566 			continue;
1567 
1568 		err = tc_classify(skb, tp, res);
1569 #ifdef CONFIG_NET_CLS_ACT
1570 		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1571 			first_tp = orig_tp;
1572 			*last_executed_chain = first_tp->chain->index;
1573 			goto reset;
1574 		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1575 			first_tp = res->goto_tp;
1576 			*last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1577 			goto reset;
1578 		}
1579 #endif
1580 		if (err >= 0)
1581 			return err;
1582 	}
1583 
1584 	return TC_ACT_UNSPEC; /* signal: continue lookup */
1585 #ifdef CONFIG_NET_CLS_ACT
1586 reset:
1587 	if (unlikely(limit++ >= max_reclassify_loop)) {
1588 		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1589 				       tp->chain->block->index,
1590 				       tp->prio & 0xffff,
1591 				       ntohs(tp->protocol));
1592 		return TC_ACT_SHOT;
1593 	}
1594 
1595 	tp = first_tp;
1596 	goto reclassify;
1597 #endif
1598 }
1599 
1600 int tcf_classify(struct sk_buff *skb,
1601 		 const struct tcf_block *block,
1602 		 const struct tcf_proto *tp,
1603 		 struct tcf_result *res, bool compat_mode)
1604 {
1605 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1606 	u32 last_executed_chain = 0;
1607 
1608 	return __tcf_classify(skb, tp, tp, res, compat_mode,
1609 			      &last_executed_chain);
1610 #else
1611 	u32 last_executed_chain = tp ? tp->chain->index : 0;
1612 	const struct tcf_proto *orig_tp = tp;
1613 	struct tc_skb_ext *ext;
1614 	int ret;
1615 
1616 	if (block) {
1617 		ext = skb_ext_find(skb, TC_SKB_EXT);
1618 
1619 		if (ext && ext->chain) {
1620 			struct tcf_chain *fchain;
1621 
1622 			fchain = tcf_chain_lookup_rcu(block, ext->chain);
1623 			if (!fchain)
1624 				return TC_ACT_SHOT;
1625 
1626 			/* Consume, so cloned/redirect skbs won't inherit ext */
1627 			skb_ext_del(skb, TC_SKB_EXT);
1628 
1629 			tp = rcu_dereference_bh(fchain->filter_chain);
1630 			last_executed_chain = fchain->index;
1631 		}
1632 	}
1633 
1634 	ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1635 			     &last_executed_chain);
1636 
1637 	if (tc_skb_ext_tc_enabled()) {
1638 		/* If we missed on some chain */
1639 		if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1640 			struct tc_skb_cb *cb = tc_skb_cb(skb);
1641 
1642 			ext = tc_skb_ext_alloc(skb);
1643 			if (WARN_ON_ONCE(!ext))
1644 				return TC_ACT_SHOT;
1645 			ext->chain = last_executed_chain;
1646 			ext->mru = cb->mru;
1647 			ext->post_ct = cb->post_ct;
1648 			ext->post_ct_snat = cb->post_ct_snat;
1649 			ext->post_ct_dnat = cb->post_ct_dnat;
1650 			ext->zone = cb->zone;
1651 		}
1652 	}
1653 
1654 	return ret;
1655 #endif
1656 }
1657 EXPORT_SYMBOL(tcf_classify);
1658 
1659 struct tcf_chain_info {
1660 	struct tcf_proto __rcu **pprev;
1661 	struct tcf_proto __rcu *next;
1662 };
1663 
1664 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1665 					   struct tcf_chain_info *chain_info)
1666 {
1667 	return tcf_chain_dereference(*chain_info->pprev, chain);
1668 }
1669 
1670 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1671 			       struct tcf_chain_info *chain_info,
1672 			       struct tcf_proto *tp)
1673 {
1674 	if (chain->flushing)
1675 		return -EAGAIN;
1676 
1677 	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1678 	if (*chain_info->pprev == chain->filter_chain)
1679 		tcf_chain0_head_change(chain, tp);
1680 	tcf_proto_get(tp);
1681 	rcu_assign_pointer(*chain_info->pprev, tp);
1682 
1683 	return 0;
1684 }
1685 
1686 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1687 				struct tcf_chain_info *chain_info,
1688 				struct tcf_proto *tp)
1689 {
1690 	struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1691 
1692 	tcf_proto_mark_delete(tp);
1693 	if (tp == chain->filter_chain)
1694 		tcf_chain0_head_change(chain, next);
1695 	RCU_INIT_POINTER(*chain_info->pprev, next);
1696 }
1697 
1698 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1699 					   struct tcf_chain_info *chain_info,
1700 					   u32 protocol, u32 prio,
1701 					   bool prio_allocate);
1702 
1703 /* Try to insert new proto.
1704  * If proto with specified priority already exists, free new proto
1705  * and return existing one.
1706  */
1707 
1708 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1709 						    struct tcf_proto *tp_new,
1710 						    u32 protocol, u32 prio,
1711 						    bool rtnl_held)
1712 {
1713 	struct tcf_chain_info chain_info;
1714 	struct tcf_proto *tp;
1715 	int err = 0;
1716 
1717 	mutex_lock(&chain->filter_chain_lock);
1718 
1719 	if (tcf_proto_exists_destroying(chain, tp_new)) {
1720 		mutex_unlock(&chain->filter_chain_lock);
1721 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1722 		return ERR_PTR(-EAGAIN);
1723 	}
1724 
1725 	tp = tcf_chain_tp_find(chain, &chain_info,
1726 			       protocol, prio, false);
1727 	if (!tp)
1728 		err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1729 	mutex_unlock(&chain->filter_chain_lock);
1730 
1731 	if (tp) {
1732 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1733 		tp_new = tp;
1734 	} else if (err) {
1735 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1736 		tp_new = ERR_PTR(err);
1737 	}
1738 
1739 	return tp_new;
1740 }
1741 
1742 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1743 				      struct tcf_proto *tp, bool rtnl_held,
1744 				      struct netlink_ext_ack *extack)
1745 {
1746 	struct tcf_chain_info chain_info;
1747 	struct tcf_proto *tp_iter;
1748 	struct tcf_proto **pprev;
1749 	struct tcf_proto *next;
1750 
1751 	mutex_lock(&chain->filter_chain_lock);
1752 
1753 	/* Atomically find and remove tp from chain. */
1754 	for (pprev = &chain->filter_chain;
1755 	     (tp_iter = tcf_chain_dereference(*pprev, chain));
1756 	     pprev = &tp_iter->next) {
1757 		if (tp_iter == tp) {
1758 			chain_info.pprev = pprev;
1759 			chain_info.next = tp_iter->next;
1760 			WARN_ON(tp_iter->deleting);
1761 			break;
1762 		}
1763 	}
1764 	/* Verify that tp still exists and no new filters were inserted
1765 	 * concurrently.
1766 	 * Mark tp for deletion if it is empty.
1767 	 */
1768 	if (!tp_iter || !tcf_proto_check_delete(tp)) {
1769 		mutex_unlock(&chain->filter_chain_lock);
1770 		return;
1771 	}
1772 
1773 	tcf_proto_signal_destroying(chain, tp);
1774 	next = tcf_chain_dereference(chain_info.next, chain);
1775 	if (tp == chain->filter_chain)
1776 		tcf_chain0_head_change(chain, next);
1777 	RCU_INIT_POINTER(*chain_info.pprev, next);
1778 	mutex_unlock(&chain->filter_chain_lock);
1779 
1780 	tcf_proto_put(tp, rtnl_held, extack);
1781 }
1782 
1783 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1784 					   struct tcf_chain_info *chain_info,
1785 					   u32 protocol, u32 prio,
1786 					   bool prio_allocate)
1787 {
1788 	struct tcf_proto **pprev;
1789 	struct tcf_proto *tp;
1790 
1791 	/* Check the chain for existence of proto-tcf with this priority */
1792 	for (pprev = &chain->filter_chain;
1793 	     (tp = tcf_chain_dereference(*pprev, chain));
1794 	     pprev = &tp->next) {
1795 		if (tp->prio >= prio) {
1796 			if (tp->prio == prio) {
1797 				if (prio_allocate ||
1798 				    (tp->protocol != protocol && protocol))
1799 					return ERR_PTR(-EINVAL);
1800 			} else {
1801 				tp = NULL;
1802 			}
1803 			break;
1804 		}
1805 	}
1806 	chain_info->pprev = pprev;
1807 	if (tp) {
1808 		chain_info->next = tp->next;
1809 		tcf_proto_get(tp);
1810 	} else {
1811 		chain_info->next = NULL;
1812 	}
1813 	return tp;
1814 }
1815 
1816 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1817 			 struct tcf_proto *tp, struct tcf_block *block,
1818 			 struct Qdisc *q, u32 parent, void *fh,
1819 			 u32 portid, u32 seq, u16 flags, int event,
1820 			 bool terse_dump, bool rtnl_held)
1821 {
1822 	struct tcmsg *tcm;
1823 	struct nlmsghdr  *nlh;
1824 	unsigned char *b = skb_tail_pointer(skb);
1825 
1826 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1827 	if (!nlh)
1828 		goto out_nlmsg_trim;
1829 	tcm = nlmsg_data(nlh);
1830 	tcm->tcm_family = AF_UNSPEC;
1831 	tcm->tcm__pad1 = 0;
1832 	tcm->tcm__pad2 = 0;
1833 	if (q) {
1834 		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1835 		tcm->tcm_parent = parent;
1836 	} else {
1837 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1838 		tcm->tcm_block_index = block->index;
1839 	}
1840 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1841 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1842 		goto nla_put_failure;
1843 	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1844 		goto nla_put_failure;
1845 	if (!fh) {
1846 		tcm->tcm_handle = 0;
1847 	} else if (terse_dump) {
1848 		if (tp->ops->terse_dump) {
1849 			if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1850 						rtnl_held) < 0)
1851 				goto nla_put_failure;
1852 		} else {
1853 			goto cls_op_not_supp;
1854 		}
1855 	} else {
1856 		if (tp->ops->dump &&
1857 		    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1858 			goto nla_put_failure;
1859 	}
1860 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1861 	return skb->len;
1862 
1863 out_nlmsg_trim:
1864 nla_put_failure:
1865 cls_op_not_supp:
1866 	nlmsg_trim(skb, b);
1867 	return -1;
1868 }
1869 
1870 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1871 			  struct nlmsghdr *n, struct tcf_proto *tp,
1872 			  struct tcf_block *block, struct Qdisc *q,
1873 			  u32 parent, void *fh, int event, bool unicast,
1874 			  bool rtnl_held)
1875 {
1876 	struct sk_buff *skb;
1877 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1878 	int err = 0;
1879 
1880 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1881 	if (!skb)
1882 		return -ENOBUFS;
1883 
1884 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1885 			  n->nlmsg_seq, n->nlmsg_flags, event,
1886 			  false, rtnl_held) <= 0) {
1887 		kfree_skb(skb);
1888 		return -EINVAL;
1889 	}
1890 
1891 	if (unicast)
1892 		err = rtnl_unicast(skb, net, portid);
1893 	else
1894 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1895 				     n->nlmsg_flags & NLM_F_ECHO);
1896 	return err;
1897 }
1898 
1899 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1900 			      struct nlmsghdr *n, struct tcf_proto *tp,
1901 			      struct tcf_block *block, struct Qdisc *q,
1902 			      u32 parent, void *fh, bool unicast, bool *last,
1903 			      bool rtnl_held, struct netlink_ext_ack *extack)
1904 {
1905 	struct sk_buff *skb;
1906 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1907 	int err;
1908 
1909 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1910 	if (!skb)
1911 		return -ENOBUFS;
1912 
1913 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1914 			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1915 			  false, rtnl_held) <= 0) {
1916 		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1917 		kfree_skb(skb);
1918 		return -EINVAL;
1919 	}
1920 
1921 	err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1922 	if (err) {
1923 		kfree_skb(skb);
1924 		return err;
1925 	}
1926 
1927 	if (unicast)
1928 		err = rtnl_unicast(skb, net, portid);
1929 	else
1930 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1931 				     n->nlmsg_flags & NLM_F_ECHO);
1932 	if (err < 0)
1933 		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1934 
1935 	return err;
1936 }
1937 
1938 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1939 				 struct tcf_block *block, struct Qdisc *q,
1940 				 u32 parent, struct nlmsghdr *n,
1941 				 struct tcf_chain *chain, int event)
1942 {
1943 	struct tcf_proto *tp;
1944 
1945 	for (tp = tcf_get_next_proto(chain, NULL);
1946 	     tp; tp = tcf_get_next_proto(chain, tp))
1947 		tfilter_notify(net, oskb, n, tp, block,
1948 			       q, parent, NULL, event, false, true);
1949 }
1950 
1951 static void tfilter_put(struct tcf_proto *tp, void *fh)
1952 {
1953 	if (tp->ops->put && fh)
1954 		tp->ops->put(tp, fh);
1955 }
1956 
1957 static bool is_qdisc_ingress(__u32 classid)
1958 {
1959 	return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
1960 }
1961 
1962 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1963 			  struct netlink_ext_ack *extack)
1964 {
1965 	struct net *net = sock_net(skb->sk);
1966 	struct nlattr *tca[TCA_MAX + 1];
1967 	char name[IFNAMSIZ];
1968 	struct tcmsg *t;
1969 	u32 protocol;
1970 	u32 prio;
1971 	bool prio_allocate;
1972 	u32 parent;
1973 	u32 chain_index;
1974 	struct Qdisc *q;
1975 	struct tcf_chain_info chain_info;
1976 	struct tcf_chain *chain;
1977 	struct tcf_block *block;
1978 	struct tcf_proto *tp;
1979 	unsigned long cl;
1980 	void *fh;
1981 	int err;
1982 	int tp_created;
1983 	bool rtnl_held = false;
1984 	u32 flags;
1985 
1986 replay:
1987 	tp_created = 0;
1988 
1989 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1990 				     rtm_tca_policy, extack);
1991 	if (err < 0)
1992 		return err;
1993 
1994 	t = nlmsg_data(n);
1995 	protocol = TC_H_MIN(t->tcm_info);
1996 	prio = TC_H_MAJ(t->tcm_info);
1997 	prio_allocate = false;
1998 	parent = t->tcm_parent;
1999 	tp = NULL;
2000 	cl = 0;
2001 	block = NULL;
2002 	q = NULL;
2003 	chain = NULL;
2004 	flags = 0;
2005 
2006 	if (prio == 0) {
2007 		/* If no priority is provided by the user,
2008 		 * we allocate one.
2009 		 */
2010 		if (n->nlmsg_flags & NLM_F_CREATE) {
2011 			prio = TC_H_MAKE(0x80000000U, 0U);
2012 			prio_allocate = true;
2013 		} else {
2014 			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2015 			return -ENOENT;
2016 		}
2017 	}
2018 
2019 	/* Find head of filter chain. */
2020 
2021 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2022 	if (err)
2023 		return err;
2024 
2025 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2026 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2027 		err = -EINVAL;
2028 		goto errout;
2029 	}
2030 
2031 	/* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2032 	 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2033 	 * type is not specified, classifier is not unlocked.
2034 	 */
2035 	if (rtnl_held ||
2036 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2037 	    !tcf_proto_is_unlocked(name)) {
2038 		rtnl_held = true;
2039 		rtnl_lock();
2040 	}
2041 
2042 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2043 	if (err)
2044 		goto errout;
2045 
2046 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2047 				 extack);
2048 	if (IS_ERR(block)) {
2049 		err = PTR_ERR(block);
2050 		goto errout;
2051 	}
2052 	block->classid = parent;
2053 
2054 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2055 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2056 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2057 		err = -EINVAL;
2058 		goto errout;
2059 	}
2060 	chain = tcf_chain_get(block, chain_index, true);
2061 	if (!chain) {
2062 		NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2063 		err = -ENOMEM;
2064 		goto errout;
2065 	}
2066 
2067 	mutex_lock(&chain->filter_chain_lock);
2068 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2069 			       prio, prio_allocate);
2070 	if (IS_ERR(tp)) {
2071 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2072 		err = PTR_ERR(tp);
2073 		goto errout_locked;
2074 	}
2075 
2076 	if (tp == NULL) {
2077 		struct tcf_proto *tp_new = NULL;
2078 
2079 		if (chain->flushing) {
2080 			err = -EAGAIN;
2081 			goto errout_locked;
2082 		}
2083 
2084 		/* Proto-tcf does not exist, create new one */
2085 
2086 		if (tca[TCA_KIND] == NULL || !protocol) {
2087 			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2088 			err = -EINVAL;
2089 			goto errout_locked;
2090 		}
2091 
2092 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2093 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2094 			err = -ENOENT;
2095 			goto errout_locked;
2096 		}
2097 
2098 		if (prio_allocate)
2099 			prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2100 							       &chain_info));
2101 
2102 		mutex_unlock(&chain->filter_chain_lock);
2103 		tp_new = tcf_proto_create(name, protocol, prio, chain,
2104 					  rtnl_held, extack);
2105 		if (IS_ERR(tp_new)) {
2106 			err = PTR_ERR(tp_new);
2107 			goto errout_tp;
2108 		}
2109 
2110 		tp_created = 1;
2111 		tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2112 						rtnl_held);
2113 		if (IS_ERR(tp)) {
2114 			err = PTR_ERR(tp);
2115 			goto errout_tp;
2116 		}
2117 	} else {
2118 		mutex_unlock(&chain->filter_chain_lock);
2119 	}
2120 
2121 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2122 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2123 		err = -EINVAL;
2124 		goto errout;
2125 	}
2126 
2127 	fh = tp->ops->get(tp, t->tcm_handle);
2128 
2129 	if (!fh) {
2130 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2131 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2132 			err = -ENOENT;
2133 			goto errout;
2134 		}
2135 	} else if (n->nlmsg_flags & NLM_F_EXCL) {
2136 		tfilter_put(tp, fh);
2137 		NL_SET_ERR_MSG(extack, "Filter already exists");
2138 		err = -EEXIST;
2139 		goto errout;
2140 	}
2141 
2142 	if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2143 		tfilter_put(tp, fh);
2144 		NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2145 		err = -EINVAL;
2146 		goto errout;
2147 	}
2148 
2149 	if (!(n->nlmsg_flags & NLM_F_CREATE))
2150 		flags |= TCA_ACT_FLAGS_REPLACE;
2151 	if (!rtnl_held)
2152 		flags |= TCA_ACT_FLAGS_NO_RTNL;
2153 	if (is_qdisc_ingress(parent))
2154 		flags |= TCA_ACT_FLAGS_AT_INGRESS;
2155 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2156 			      flags, extack);
2157 	if (err == 0) {
2158 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2159 			       RTM_NEWTFILTER, false, rtnl_held);
2160 		tfilter_put(tp, fh);
2161 		/* q pointer is NULL for shared blocks */
2162 		if (q)
2163 			q->flags &= ~TCQ_F_CAN_BYPASS;
2164 	}
2165 
2166 errout:
2167 	if (err && tp_created)
2168 		tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2169 errout_tp:
2170 	if (chain) {
2171 		if (tp && !IS_ERR(tp))
2172 			tcf_proto_put(tp, rtnl_held, NULL);
2173 		if (!tp_created)
2174 			tcf_chain_put(chain);
2175 	}
2176 	tcf_block_release(q, block, rtnl_held);
2177 
2178 	if (rtnl_held)
2179 		rtnl_unlock();
2180 
2181 	if (err == -EAGAIN) {
2182 		/* Take rtnl lock in case EAGAIN is caused by concurrent flush
2183 		 * of target chain.
2184 		 */
2185 		rtnl_held = true;
2186 		/* Replay the request. */
2187 		goto replay;
2188 	}
2189 	return err;
2190 
2191 errout_locked:
2192 	mutex_unlock(&chain->filter_chain_lock);
2193 	goto errout;
2194 }
2195 
2196 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2197 			  struct netlink_ext_ack *extack)
2198 {
2199 	struct net *net = sock_net(skb->sk);
2200 	struct nlattr *tca[TCA_MAX + 1];
2201 	char name[IFNAMSIZ];
2202 	struct tcmsg *t;
2203 	u32 protocol;
2204 	u32 prio;
2205 	u32 parent;
2206 	u32 chain_index;
2207 	struct Qdisc *q = NULL;
2208 	struct tcf_chain_info chain_info;
2209 	struct tcf_chain *chain = NULL;
2210 	struct tcf_block *block = NULL;
2211 	struct tcf_proto *tp = NULL;
2212 	unsigned long cl = 0;
2213 	void *fh = NULL;
2214 	int err;
2215 	bool rtnl_held = false;
2216 
2217 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2218 				     rtm_tca_policy, extack);
2219 	if (err < 0)
2220 		return err;
2221 
2222 	t = nlmsg_data(n);
2223 	protocol = TC_H_MIN(t->tcm_info);
2224 	prio = TC_H_MAJ(t->tcm_info);
2225 	parent = t->tcm_parent;
2226 
2227 	if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2228 		NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2229 		return -ENOENT;
2230 	}
2231 
2232 	/* Find head of filter chain. */
2233 
2234 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2235 	if (err)
2236 		return err;
2237 
2238 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2239 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2240 		err = -EINVAL;
2241 		goto errout;
2242 	}
2243 	/* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2244 	 * found), qdisc is not unlocked, classifier type is not specified,
2245 	 * classifier is not unlocked.
2246 	 */
2247 	if (!prio ||
2248 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2249 	    !tcf_proto_is_unlocked(name)) {
2250 		rtnl_held = true;
2251 		rtnl_lock();
2252 	}
2253 
2254 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2255 	if (err)
2256 		goto errout;
2257 
2258 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2259 				 extack);
2260 	if (IS_ERR(block)) {
2261 		err = PTR_ERR(block);
2262 		goto errout;
2263 	}
2264 
2265 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2266 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2267 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2268 		err = -EINVAL;
2269 		goto errout;
2270 	}
2271 	chain = tcf_chain_get(block, chain_index, false);
2272 	if (!chain) {
2273 		/* User requested flush on non-existent chain. Nothing to do,
2274 		 * so just return success.
2275 		 */
2276 		if (prio == 0) {
2277 			err = 0;
2278 			goto errout;
2279 		}
2280 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2281 		err = -ENOENT;
2282 		goto errout;
2283 	}
2284 
2285 	if (prio == 0) {
2286 		tfilter_notify_chain(net, skb, block, q, parent, n,
2287 				     chain, RTM_DELTFILTER);
2288 		tcf_chain_flush(chain, rtnl_held);
2289 		err = 0;
2290 		goto errout;
2291 	}
2292 
2293 	mutex_lock(&chain->filter_chain_lock);
2294 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2295 			       prio, false);
2296 	if (!tp || IS_ERR(tp)) {
2297 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2298 		err = tp ? PTR_ERR(tp) : -ENOENT;
2299 		goto errout_locked;
2300 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2301 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2302 		err = -EINVAL;
2303 		goto errout_locked;
2304 	} else if (t->tcm_handle == 0) {
2305 		tcf_proto_signal_destroying(chain, tp);
2306 		tcf_chain_tp_remove(chain, &chain_info, tp);
2307 		mutex_unlock(&chain->filter_chain_lock);
2308 
2309 		tcf_proto_put(tp, rtnl_held, NULL);
2310 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2311 			       RTM_DELTFILTER, false, rtnl_held);
2312 		err = 0;
2313 		goto errout;
2314 	}
2315 	mutex_unlock(&chain->filter_chain_lock);
2316 
2317 	fh = tp->ops->get(tp, t->tcm_handle);
2318 
2319 	if (!fh) {
2320 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2321 		err = -ENOENT;
2322 	} else {
2323 		bool last;
2324 
2325 		err = tfilter_del_notify(net, skb, n, tp, block,
2326 					 q, parent, fh, false, &last,
2327 					 rtnl_held, extack);
2328 
2329 		if (err)
2330 			goto errout;
2331 		if (last)
2332 			tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2333 	}
2334 
2335 errout:
2336 	if (chain) {
2337 		if (tp && !IS_ERR(tp))
2338 			tcf_proto_put(tp, rtnl_held, NULL);
2339 		tcf_chain_put(chain);
2340 	}
2341 	tcf_block_release(q, block, rtnl_held);
2342 
2343 	if (rtnl_held)
2344 		rtnl_unlock();
2345 
2346 	return err;
2347 
2348 errout_locked:
2349 	mutex_unlock(&chain->filter_chain_lock);
2350 	goto errout;
2351 }
2352 
2353 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2354 			  struct netlink_ext_ack *extack)
2355 {
2356 	struct net *net = sock_net(skb->sk);
2357 	struct nlattr *tca[TCA_MAX + 1];
2358 	char name[IFNAMSIZ];
2359 	struct tcmsg *t;
2360 	u32 protocol;
2361 	u32 prio;
2362 	u32 parent;
2363 	u32 chain_index;
2364 	struct Qdisc *q = NULL;
2365 	struct tcf_chain_info chain_info;
2366 	struct tcf_chain *chain = NULL;
2367 	struct tcf_block *block = NULL;
2368 	struct tcf_proto *tp = NULL;
2369 	unsigned long cl = 0;
2370 	void *fh = NULL;
2371 	int err;
2372 	bool rtnl_held = false;
2373 
2374 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2375 				     rtm_tca_policy, extack);
2376 	if (err < 0)
2377 		return err;
2378 
2379 	t = nlmsg_data(n);
2380 	protocol = TC_H_MIN(t->tcm_info);
2381 	prio = TC_H_MAJ(t->tcm_info);
2382 	parent = t->tcm_parent;
2383 
2384 	if (prio == 0) {
2385 		NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2386 		return -ENOENT;
2387 	}
2388 
2389 	/* Find head of filter chain. */
2390 
2391 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2392 	if (err)
2393 		return err;
2394 
2395 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2396 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2397 		err = -EINVAL;
2398 		goto errout;
2399 	}
2400 	/* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2401 	 * unlocked, classifier type is not specified, classifier is not
2402 	 * unlocked.
2403 	 */
2404 	if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2405 	    !tcf_proto_is_unlocked(name)) {
2406 		rtnl_held = true;
2407 		rtnl_lock();
2408 	}
2409 
2410 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2411 	if (err)
2412 		goto errout;
2413 
2414 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2415 				 extack);
2416 	if (IS_ERR(block)) {
2417 		err = PTR_ERR(block);
2418 		goto errout;
2419 	}
2420 
2421 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2422 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2423 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2424 		err = -EINVAL;
2425 		goto errout;
2426 	}
2427 	chain = tcf_chain_get(block, chain_index, false);
2428 	if (!chain) {
2429 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2430 		err = -EINVAL;
2431 		goto errout;
2432 	}
2433 
2434 	mutex_lock(&chain->filter_chain_lock);
2435 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2436 			       prio, false);
2437 	mutex_unlock(&chain->filter_chain_lock);
2438 	if (!tp || IS_ERR(tp)) {
2439 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2440 		err = tp ? PTR_ERR(tp) : -ENOENT;
2441 		goto errout;
2442 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2443 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2444 		err = -EINVAL;
2445 		goto errout;
2446 	}
2447 
2448 	fh = tp->ops->get(tp, t->tcm_handle);
2449 
2450 	if (!fh) {
2451 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2452 		err = -ENOENT;
2453 	} else {
2454 		err = tfilter_notify(net, skb, n, tp, block, q, parent,
2455 				     fh, RTM_NEWTFILTER, true, rtnl_held);
2456 		if (err < 0)
2457 			NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2458 	}
2459 
2460 	tfilter_put(tp, fh);
2461 errout:
2462 	if (chain) {
2463 		if (tp && !IS_ERR(tp))
2464 			tcf_proto_put(tp, rtnl_held, NULL);
2465 		tcf_chain_put(chain);
2466 	}
2467 	tcf_block_release(q, block, rtnl_held);
2468 
2469 	if (rtnl_held)
2470 		rtnl_unlock();
2471 
2472 	return err;
2473 }
2474 
2475 struct tcf_dump_args {
2476 	struct tcf_walker w;
2477 	struct sk_buff *skb;
2478 	struct netlink_callback *cb;
2479 	struct tcf_block *block;
2480 	struct Qdisc *q;
2481 	u32 parent;
2482 	bool terse_dump;
2483 };
2484 
2485 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2486 {
2487 	struct tcf_dump_args *a = (void *)arg;
2488 	struct net *net = sock_net(a->skb->sk);
2489 
2490 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2491 			     n, NETLINK_CB(a->cb->skb).portid,
2492 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2493 			     RTM_NEWTFILTER, a->terse_dump, true);
2494 }
2495 
2496 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2497 			   struct sk_buff *skb, struct netlink_callback *cb,
2498 			   long index_start, long *p_index, bool terse)
2499 {
2500 	struct net *net = sock_net(skb->sk);
2501 	struct tcf_block *block = chain->block;
2502 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2503 	struct tcf_proto *tp, *tp_prev;
2504 	struct tcf_dump_args arg;
2505 
2506 	for (tp = __tcf_get_next_proto(chain, NULL);
2507 	     tp;
2508 	     tp_prev = tp,
2509 		     tp = __tcf_get_next_proto(chain, tp),
2510 		     tcf_proto_put(tp_prev, true, NULL),
2511 		     (*p_index)++) {
2512 		if (*p_index < index_start)
2513 			continue;
2514 		if (TC_H_MAJ(tcm->tcm_info) &&
2515 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
2516 			continue;
2517 		if (TC_H_MIN(tcm->tcm_info) &&
2518 		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
2519 			continue;
2520 		if (*p_index > index_start)
2521 			memset(&cb->args[1], 0,
2522 			       sizeof(cb->args) - sizeof(cb->args[0]));
2523 		if (cb->args[1] == 0) {
2524 			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2525 					  NETLINK_CB(cb->skb).portid,
2526 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
2527 					  RTM_NEWTFILTER, false, true) <= 0)
2528 				goto errout;
2529 			cb->args[1] = 1;
2530 		}
2531 		if (!tp->ops->walk)
2532 			continue;
2533 		arg.w.fn = tcf_node_dump;
2534 		arg.skb = skb;
2535 		arg.cb = cb;
2536 		arg.block = block;
2537 		arg.q = q;
2538 		arg.parent = parent;
2539 		arg.w.stop = 0;
2540 		arg.w.skip = cb->args[1] - 1;
2541 		arg.w.count = 0;
2542 		arg.w.cookie = cb->args[2];
2543 		arg.terse_dump = terse;
2544 		tp->ops->walk(tp, &arg.w, true);
2545 		cb->args[2] = arg.w.cookie;
2546 		cb->args[1] = arg.w.count + 1;
2547 		if (arg.w.stop)
2548 			goto errout;
2549 	}
2550 	return true;
2551 
2552 errout:
2553 	tcf_proto_put(tp, true, NULL);
2554 	return false;
2555 }
2556 
2557 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2558 	[TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2559 };
2560 
2561 /* called with RTNL */
2562 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2563 {
2564 	struct tcf_chain *chain, *chain_prev;
2565 	struct net *net = sock_net(skb->sk);
2566 	struct nlattr *tca[TCA_MAX + 1];
2567 	struct Qdisc *q = NULL;
2568 	struct tcf_block *block;
2569 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2570 	bool terse_dump = false;
2571 	long index_start;
2572 	long index;
2573 	u32 parent;
2574 	int err;
2575 
2576 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2577 		return skb->len;
2578 
2579 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2580 				     tcf_tfilter_dump_policy, cb->extack);
2581 	if (err)
2582 		return err;
2583 
2584 	if (tca[TCA_DUMP_FLAGS]) {
2585 		struct nla_bitfield32 flags =
2586 			nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2587 
2588 		terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2589 	}
2590 
2591 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2592 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2593 		if (!block)
2594 			goto out;
2595 		/* If we work with block index, q is NULL and parent value
2596 		 * will never be used in the following code. The check
2597 		 * in tcf_fill_node prevents it. However, compiler does not
2598 		 * see that far, so set parent to zero to silence the warning
2599 		 * about parent being uninitialized.
2600 		 */
2601 		parent = 0;
2602 	} else {
2603 		const struct Qdisc_class_ops *cops;
2604 		struct net_device *dev;
2605 		unsigned long cl = 0;
2606 
2607 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2608 		if (!dev)
2609 			return skb->len;
2610 
2611 		parent = tcm->tcm_parent;
2612 		if (!parent)
2613 			q = rtnl_dereference(dev->qdisc);
2614 		else
2615 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2616 		if (!q)
2617 			goto out;
2618 		cops = q->ops->cl_ops;
2619 		if (!cops)
2620 			goto out;
2621 		if (!cops->tcf_block)
2622 			goto out;
2623 		if (TC_H_MIN(tcm->tcm_parent)) {
2624 			cl = cops->find(q, tcm->tcm_parent);
2625 			if (cl == 0)
2626 				goto out;
2627 		}
2628 		block = cops->tcf_block(q, cl, NULL);
2629 		if (!block)
2630 			goto out;
2631 		parent = block->classid;
2632 		if (tcf_block_shared(block))
2633 			q = NULL;
2634 	}
2635 
2636 	index_start = cb->args[0];
2637 	index = 0;
2638 
2639 	for (chain = __tcf_get_next_chain(block, NULL);
2640 	     chain;
2641 	     chain_prev = chain,
2642 		     chain = __tcf_get_next_chain(block, chain),
2643 		     tcf_chain_put(chain_prev)) {
2644 		if (tca[TCA_CHAIN] &&
2645 		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2646 			continue;
2647 		if (!tcf_chain_dump(chain, q, parent, skb, cb,
2648 				    index_start, &index, terse_dump)) {
2649 			tcf_chain_put(chain);
2650 			err = -EMSGSIZE;
2651 			break;
2652 		}
2653 	}
2654 
2655 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2656 		tcf_block_refcnt_put(block, true);
2657 	cb->args[0] = index;
2658 
2659 out:
2660 	/* If we did no progress, the error (EMSGSIZE) is real */
2661 	if (skb->len == 0 && err)
2662 		return err;
2663 	return skb->len;
2664 }
2665 
2666 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2667 			      void *tmplt_priv, u32 chain_index,
2668 			      struct net *net, struct sk_buff *skb,
2669 			      struct tcf_block *block,
2670 			      u32 portid, u32 seq, u16 flags, int event)
2671 {
2672 	unsigned char *b = skb_tail_pointer(skb);
2673 	const struct tcf_proto_ops *ops;
2674 	struct nlmsghdr *nlh;
2675 	struct tcmsg *tcm;
2676 	void *priv;
2677 
2678 	ops = tmplt_ops;
2679 	priv = tmplt_priv;
2680 
2681 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2682 	if (!nlh)
2683 		goto out_nlmsg_trim;
2684 	tcm = nlmsg_data(nlh);
2685 	tcm->tcm_family = AF_UNSPEC;
2686 	tcm->tcm__pad1 = 0;
2687 	tcm->tcm__pad2 = 0;
2688 	tcm->tcm_handle = 0;
2689 	if (block->q) {
2690 		tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2691 		tcm->tcm_parent = block->q->handle;
2692 	} else {
2693 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2694 		tcm->tcm_block_index = block->index;
2695 	}
2696 
2697 	if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2698 		goto nla_put_failure;
2699 
2700 	if (ops) {
2701 		if (nla_put_string(skb, TCA_KIND, ops->kind))
2702 			goto nla_put_failure;
2703 		if (ops->tmplt_dump(skb, net, priv) < 0)
2704 			goto nla_put_failure;
2705 	}
2706 
2707 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2708 	return skb->len;
2709 
2710 out_nlmsg_trim:
2711 nla_put_failure:
2712 	nlmsg_trim(skb, b);
2713 	return -EMSGSIZE;
2714 }
2715 
2716 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2717 			   u32 seq, u16 flags, int event, bool unicast)
2718 {
2719 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2720 	struct tcf_block *block = chain->block;
2721 	struct net *net = block->net;
2722 	struct sk_buff *skb;
2723 	int err = 0;
2724 
2725 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2726 	if (!skb)
2727 		return -ENOBUFS;
2728 
2729 	if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2730 			       chain->index, net, skb, block, portid,
2731 			       seq, flags, event) <= 0) {
2732 		kfree_skb(skb);
2733 		return -EINVAL;
2734 	}
2735 
2736 	if (unicast)
2737 		err = rtnl_unicast(skb, net, portid);
2738 	else
2739 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2740 				     flags & NLM_F_ECHO);
2741 
2742 	return err;
2743 }
2744 
2745 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2746 				  void *tmplt_priv, u32 chain_index,
2747 				  struct tcf_block *block, struct sk_buff *oskb,
2748 				  u32 seq, u16 flags, bool unicast)
2749 {
2750 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2751 	struct net *net = block->net;
2752 	struct sk_buff *skb;
2753 
2754 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2755 	if (!skb)
2756 		return -ENOBUFS;
2757 
2758 	if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2759 			       block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2760 		kfree_skb(skb);
2761 		return -EINVAL;
2762 	}
2763 
2764 	if (unicast)
2765 		return rtnl_unicast(skb, net, portid);
2766 
2767 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2768 }
2769 
2770 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2771 			      struct nlattr **tca,
2772 			      struct netlink_ext_ack *extack)
2773 {
2774 	const struct tcf_proto_ops *ops;
2775 	char name[IFNAMSIZ];
2776 	void *tmplt_priv;
2777 
2778 	/* If kind is not set, user did not specify template. */
2779 	if (!tca[TCA_KIND])
2780 		return 0;
2781 
2782 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2783 		NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2784 		return -EINVAL;
2785 	}
2786 
2787 	ops = tcf_proto_lookup_ops(name, true, extack);
2788 	if (IS_ERR(ops))
2789 		return PTR_ERR(ops);
2790 	if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2791 		NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2792 		return -EOPNOTSUPP;
2793 	}
2794 
2795 	tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2796 	if (IS_ERR(tmplt_priv)) {
2797 		module_put(ops->owner);
2798 		return PTR_ERR(tmplt_priv);
2799 	}
2800 	chain->tmplt_ops = ops;
2801 	chain->tmplt_priv = tmplt_priv;
2802 	return 0;
2803 }
2804 
2805 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2806 			       void *tmplt_priv)
2807 {
2808 	/* If template ops are set, no work to do for us. */
2809 	if (!tmplt_ops)
2810 		return;
2811 
2812 	tmplt_ops->tmplt_destroy(tmplt_priv);
2813 	module_put(tmplt_ops->owner);
2814 }
2815 
2816 /* Add/delete/get a chain */
2817 
2818 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2819 			struct netlink_ext_ack *extack)
2820 {
2821 	struct net *net = sock_net(skb->sk);
2822 	struct nlattr *tca[TCA_MAX + 1];
2823 	struct tcmsg *t;
2824 	u32 parent;
2825 	u32 chain_index;
2826 	struct Qdisc *q;
2827 	struct tcf_chain *chain;
2828 	struct tcf_block *block;
2829 	unsigned long cl;
2830 	int err;
2831 
2832 replay:
2833 	q = NULL;
2834 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2835 				     rtm_tca_policy, extack);
2836 	if (err < 0)
2837 		return err;
2838 
2839 	t = nlmsg_data(n);
2840 	parent = t->tcm_parent;
2841 	cl = 0;
2842 
2843 	block = tcf_block_find(net, &q, &parent, &cl,
2844 			       t->tcm_ifindex, t->tcm_block_index, extack);
2845 	if (IS_ERR(block))
2846 		return PTR_ERR(block);
2847 
2848 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2849 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2850 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2851 		err = -EINVAL;
2852 		goto errout_block;
2853 	}
2854 
2855 	mutex_lock(&block->lock);
2856 	chain = tcf_chain_lookup(block, chain_index);
2857 	if (n->nlmsg_type == RTM_NEWCHAIN) {
2858 		if (chain) {
2859 			if (tcf_chain_held_by_acts_only(chain)) {
2860 				/* The chain exists only because there is
2861 				 * some action referencing it.
2862 				 */
2863 				tcf_chain_hold(chain);
2864 			} else {
2865 				NL_SET_ERR_MSG(extack, "Filter chain already exists");
2866 				err = -EEXIST;
2867 				goto errout_block_locked;
2868 			}
2869 		} else {
2870 			if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2871 				NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2872 				err = -ENOENT;
2873 				goto errout_block_locked;
2874 			}
2875 			chain = tcf_chain_create(block, chain_index);
2876 			if (!chain) {
2877 				NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2878 				err = -ENOMEM;
2879 				goto errout_block_locked;
2880 			}
2881 		}
2882 	} else {
2883 		if (!chain || tcf_chain_held_by_acts_only(chain)) {
2884 			NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2885 			err = -EINVAL;
2886 			goto errout_block_locked;
2887 		}
2888 		tcf_chain_hold(chain);
2889 	}
2890 
2891 	if (n->nlmsg_type == RTM_NEWCHAIN) {
2892 		/* Modifying chain requires holding parent block lock. In case
2893 		 * the chain was successfully added, take a reference to the
2894 		 * chain. This ensures that an empty chain does not disappear at
2895 		 * the end of this function.
2896 		 */
2897 		tcf_chain_hold(chain);
2898 		chain->explicitly_created = true;
2899 	}
2900 	mutex_unlock(&block->lock);
2901 
2902 	switch (n->nlmsg_type) {
2903 	case RTM_NEWCHAIN:
2904 		err = tc_chain_tmplt_add(chain, net, tca, extack);
2905 		if (err) {
2906 			tcf_chain_put_explicitly_created(chain);
2907 			goto errout;
2908 		}
2909 
2910 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2911 				RTM_NEWCHAIN, false);
2912 		break;
2913 	case RTM_DELCHAIN:
2914 		tfilter_notify_chain(net, skb, block, q, parent, n,
2915 				     chain, RTM_DELTFILTER);
2916 		/* Flush the chain first as the user requested chain removal. */
2917 		tcf_chain_flush(chain, true);
2918 		/* In case the chain was successfully deleted, put a reference
2919 		 * to the chain previously taken during addition.
2920 		 */
2921 		tcf_chain_put_explicitly_created(chain);
2922 		break;
2923 	case RTM_GETCHAIN:
2924 		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2925 				      n->nlmsg_flags, n->nlmsg_type, true);
2926 		if (err < 0)
2927 			NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2928 		break;
2929 	default:
2930 		err = -EOPNOTSUPP;
2931 		NL_SET_ERR_MSG(extack, "Unsupported message type");
2932 		goto errout;
2933 	}
2934 
2935 errout:
2936 	tcf_chain_put(chain);
2937 errout_block:
2938 	tcf_block_release(q, block, true);
2939 	if (err == -EAGAIN)
2940 		/* Replay the request. */
2941 		goto replay;
2942 	return err;
2943 
2944 errout_block_locked:
2945 	mutex_unlock(&block->lock);
2946 	goto errout_block;
2947 }
2948 
2949 /* called with RTNL */
2950 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2951 {
2952 	struct net *net = sock_net(skb->sk);
2953 	struct nlattr *tca[TCA_MAX + 1];
2954 	struct Qdisc *q = NULL;
2955 	struct tcf_block *block;
2956 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2957 	struct tcf_chain *chain;
2958 	long index_start;
2959 	long index;
2960 	int err;
2961 
2962 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2963 		return skb->len;
2964 
2965 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2966 				     rtm_tca_policy, cb->extack);
2967 	if (err)
2968 		return err;
2969 
2970 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2971 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2972 		if (!block)
2973 			goto out;
2974 	} else {
2975 		const struct Qdisc_class_ops *cops;
2976 		struct net_device *dev;
2977 		unsigned long cl = 0;
2978 
2979 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2980 		if (!dev)
2981 			return skb->len;
2982 
2983 		if (!tcm->tcm_parent)
2984 			q = rtnl_dereference(dev->qdisc);
2985 		else
2986 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2987 
2988 		if (!q)
2989 			goto out;
2990 		cops = q->ops->cl_ops;
2991 		if (!cops)
2992 			goto out;
2993 		if (!cops->tcf_block)
2994 			goto out;
2995 		if (TC_H_MIN(tcm->tcm_parent)) {
2996 			cl = cops->find(q, tcm->tcm_parent);
2997 			if (cl == 0)
2998 				goto out;
2999 		}
3000 		block = cops->tcf_block(q, cl, NULL);
3001 		if (!block)
3002 			goto out;
3003 		if (tcf_block_shared(block))
3004 			q = NULL;
3005 	}
3006 
3007 	index_start = cb->args[0];
3008 	index = 0;
3009 
3010 	mutex_lock(&block->lock);
3011 	list_for_each_entry(chain, &block->chain_list, list) {
3012 		if ((tca[TCA_CHAIN] &&
3013 		     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3014 			continue;
3015 		if (index < index_start) {
3016 			index++;
3017 			continue;
3018 		}
3019 		if (tcf_chain_held_by_acts_only(chain))
3020 			continue;
3021 		err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3022 					 chain->index, net, skb, block,
3023 					 NETLINK_CB(cb->skb).portid,
3024 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3025 					 RTM_NEWCHAIN);
3026 		if (err <= 0)
3027 			break;
3028 		index++;
3029 	}
3030 	mutex_unlock(&block->lock);
3031 
3032 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3033 		tcf_block_refcnt_put(block, true);
3034 	cb->args[0] = index;
3035 
3036 out:
3037 	/* If we did no progress, the error (EMSGSIZE) is real */
3038 	if (skb->len == 0 && err)
3039 		return err;
3040 	return skb->len;
3041 }
3042 
3043 void tcf_exts_destroy(struct tcf_exts *exts)
3044 {
3045 #ifdef CONFIG_NET_CLS_ACT
3046 	if (exts->actions) {
3047 		tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3048 		kfree(exts->actions);
3049 	}
3050 	exts->nr_actions = 0;
3051 #endif
3052 }
3053 EXPORT_SYMBOL(tcf_exts_destroy);
3054 
3055 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3056 			 struct nlattr *rate_tlv, struct tcf_exts *exts,
3057 			 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3058 {
3059 #ifdef CONFIG_NET_CLS_ACT
3060 	{
3061 		int init_res[TCA_ACT_MAX_PRIO] = {};
3062 		struct tc_action *act;
3063 		size_t attr_size = 0;
3064 
3065 		if (exts->police && tb[exts->police]) {
3066 			struct tc_action_ops *a_o;
3067 
3068 			a_o = tc_action_load_ops(tb[exts->police], true,
3069 						 !(flags & TCA_ACT_FLAGS_NO_RTNL),
3070 						 extack);
3071 			if (IS_ERR(a_o))
3072 				return PTR_ERR(a_o);
3073 			flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3074 			act = tcf_action_init_1(net, tp, tb[exts->police],
3075 						rate_tlv, a_o, init_res, flags,
3076 						extack);
3077 			module_put(a_o->owner);
3078 			if (IS_ERR(act))
3079 				return PTR_ERR(act);
3080 
3081 			act->type = exts->type = TCA_OLD_COMPAT;
3082 			exts->actions[0] = act;
3083 			exts->nr_actions = 1;
3084 			tcf_idr_insert_many(exts->actions);
3085 		} else if (exts->action && tb[exts->action]) {
3086 			int err;
3087 
3088 			flags |= TCA_ACT_FLAGS_BIND;
3089 			err = tcf_action_init(net, tp, tb[exts->action],
3090 					      rate_tlv, exts->actions, init_res,
3091 					      &attr_size, flags, fl_flags,
3092 					      extack);
3093 			if (err < 0)
3094 				return err;
3095 			exts->nr_actions = err;
3096 		}
3097 	}
3098 #else
3099 	if ((exts->action && tb[exts->action]) ||
3100 	    (exts->police && tb[exts->police])) {
3101 		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3102 		return -EOPNOTSUPP;
3103 	}
3104 #endif
3105 
3106 	return 0;
3107 }
3108 EXPORT_SYMBOL(tcf_exts_validate_ex);
3109 
3110 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3111 		      struct nlattr *rate_tlv, struct tcf_exts *exts,
3112 		      u32 flags, struct netlink_ext_ack *extack)
3113 {
3114 	return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3115 				    flags, 0, extack);
3116 }
3117 EXPORT_SYMBOL(tcf_exts_validate);
3118 
3119 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3120 {
3121 #ifdef CONFIG_NET_CLS_ACT
3122 	struct tcf_exts old = *dst;
3123 
3124 	*dst = *src;
3125 	tcf_exts_destroy(&old);
3126 #endif
3127 }
3128 EXPORT_SYMBOL(tcf_exts_change);
3129 
3130 #ifdef CONFIG_NET_CLS_ACT
3131 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3132 {
3133 	if (exts->nr_actions == 0)
3134 		return NULL;
3135 	else
3136 		return exts->actions[0];
3137 }
3138 #endif
3139 
3140 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3141 {
3142 #ifdef CONFIG_NET_CLS_ACT
3143 	struct nlattr *nest;
3144 
3145 	if (exts->action && tcf_exts_has_actions(exts)) {
3146 		/*
3147 		 * again for backward compatible mode - we want
3148 		 * to work with both old and new modes of entering
3149 		 * tc data even if iproute2  was newer - jhs
3150 		 */
3151 		if (exts->type != TCA_OLD_COMPAT) {
3152 			nest = nla_nest_start_noflag(skb, exts->action);
3153 			if (nest == NULL)
3154 				goto nla_put_failure;
3155 
3156 			if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3157 			    < 0)
3158 				goto nla_put_failure;
3159 			nla_nest_end(skb, nest);
3160 		} else if (exts->police) {
3161 			struct tc_action *act = tcf_exts_first_act(exts);
3162 			nest = nla_nest_start_noflag(skb, exts->police);
3163 			if (nest == NULL || !act)
3164 				goto nla_put_failure;
3165 			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3166 				goto nla_put_failure;
3167 			nla_nest_end(skb, nest);
3168 		}
3169 	}
3170 	return 0;
3171 
3172 nla_put_failure:
3173 	nla_nest_cancel(skb, nest);
3174 	return -1;
3175 #else
3176 	return 0;
3177 #endif
3178 }
3179 EXPORT_SYMBOL(tcf_exts_dump);
3180 
3181 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3182 {
3183 #ifdef CONFIG_NET_CLS_ACT
3184 	struct nlattr *nest;
3185 
3186 	if (!exts->action || !tcf_exts_has_actions(exts))
3187 		return 0;
3188 
3189 	nest = nla_nest_start_noflag(skb, exts->action);
3190 	if (!nest)
3191 		goto nla_put_failure;
3192 
3193 	if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3194 		goto nla_put_failure;
3195 	nla_nest_end(skb, nest);
3196 	return 0;
3197 
3198 nla_put_failure:
3199 	nla_nest_cancel(skb, nest);
3200 	return -1;
3201 #else
3202 	return 0;
3203 #endif
3204 }
3205 EXPORT_SYMBOL(tcf_exts_terse_dump);
3206 
3207 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3208 {
3209 #ifdef CONFIG_NET_CLS_ACT
3210 	struct tc_action *a = tcf_exts_first_act(exts);
3211 	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3212 		return -1;
3213 #endif
3214 	return 0;
3215 }
3216 EXPORT_SYMBOL(tcf_exts_dump_stats);
3217 
3218 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3219 {
3220 	if (*flags & TCA_CLS_FLAGS_IN_HW)
3221 		return;
3222 	*flags |= TCA_CLS_FLAGS_IN_HW;
3223 	atomic_inc(&block->offloadcnt);
3224 }
3225 
3226 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3227 {
3228 	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3229 		return;
3230 	*flags &= ~TCA_CLS_FLAGS_IN_HW;
3231 	atomic_dec(&block->offloadcnt);
3232 }
3233 
3234 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3235 				      struct tcf_proto *tp, u32 *cnt,
3236 				      u32 *flags, u32 diff, bool add)
3237 {
3238 	lockdep_assert_held(&block->cb_lock);
3239 
3240 	spin_lock(&tp->lock);
3241 	if (add) {
3242 		if (!*cnt)
3243 			tcf_block_offload_inc(block, flags);
3244 		*cnt += diff;
3245 	} else {
3246 		*cnt -= diff;
3247 		if (!*cnt)
3248 			tcf_block_offload_dec(block, flags);
3249 	}
3250 	spin_unlock(&tp->lock);
3251 }
3252 
3253 static void
3254 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3255 			 u32 *cnt, u32 *flags)
3256 {
3257 	lockdep_assert_held(&block->cb_lock);
3258 
3259 	spin_lock(&tp->lock);
3260 	tcf_block_offload_dec(block, flags);
3261 	*cnt = 0;
3262 	spin_unlock(&tp->lock);
3263 }
3264 
3265 static int
3266 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3267 		   void *type_data, bool err_stop)
3268 {
3269 	struct flow_block_cb *block_cb;
3270 	int ok_count = 0;
3271 	int err;
3272 
3273 	list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3274 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
3275 		if (err) {
3276 			if (err_stop)
3277 				return err;
3278 		} else {
3279 			ok_count++;
3280 		}
3281 	}
3282 	return ok_count;
3283 }
3284 
3285 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3286 		     void *type_data, bool err_stop, bool rtnl_held)
3287 {
3288 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3289 	int ok_count;
3290 
3291 retry:
3292 	if (take_rtnl)
3293 		rtnl_lock();
3294 	down_read(&block->cb_lock);
3295 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3296 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3297 	 * obtain the locks in same order here.
3298 	 */
3299 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3300 		up_read(&block->cb_lock);
3301 		take_rtnl = true;
3302 		goto retry;
3303 	}
3304 
3305 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3306 
3307 	up_read(&block->cb_lock);
3308 	if (take_rtnl)
3309 		rtnl_unlock();
3310 	return ok_count;
3311 }
3312 EXPORT_SYMBOL(tc_setup_cb_call);
3313 
3314 /* Non-destructive filter add. If filter that wasn't already in hardware is
3315  * successfully offloaded, increment block offloads counter. On failure,
3316  * previously offloaded filter is considered to be intact and offloads counter
3317  * is not decremented.
3318  */
3319 
3320 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3321 		    enum tc_setup_type type, void *type_data, bool err_stop,
3322 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3323 {
3324 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3325 	int ok_count;
3326 
3327 retry:
3328 	if (take_rtnl)
3329 		rtnl_lock();
3330 	down_read(&block->cb_lock);
3331 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3332 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3333 	 * obtain the locks in same order here.
3334 	 */
3335 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3336 		up_read(&block->cb_lock);
3337 		take_rtnl = true;
3338 		goto retry;
3339 	}
3340 
3341 	/* Make sure all netdevs sharing this block are offload-capable. */
3342 	if (block->nooffloaddevcnt && err_stop) {
3343 		ok_count = -EOPNOTSUPP;
3344 		goto err_unlock;
3345 	}
3346 
3347 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3348 	if (ok_count < 0)
3349 		goto err_unlock;
3350 
3351 	if (tp->ops->hw_add)
3352 		tp->ops->hw_add(tp, type_data);
3353 	if (ok_count > 0)
3354 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3355 					  ok_count, true);
3356 err_unlock:
3357 	up_read(&block->cb_lock);
3358 	if (take_rtnl)
3359 		rtnl_unlock();
3360 	return min(ok_count, 0);
3361 }
3362 EXPORT_SYMBOL(tc_setup_cb_add);
3363 
3364 /* Destructive filter replace. If filter that wasn't already in hardware is
3365  * successfully offloaded, increment block offload counter. On failure,
3366  * previously offloaded filter is considered to be destroyed and offload counter
3367  * is decremented.
3368  */
3369 
3370 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3371 			enum tc_setup_type type, void *type_data, bool err_stop,
3372 			u32 *old_flags, unsigned int *old_in_hw_count,
3373 			u32 *new_flags, unsigned int *new_in_hw_count,
3374 			bool rtnl_held)
3375 {
3376 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3377 	int ok_count;
3378 
3379 retry:
3380 	if (take_rtnl)
3381 		rtnl_lock();
3382 	down_read(&block->cb_lock);
3383 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3384 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3385 	 * obtain the locks in same order here.
3386 	 */
3387 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3388 		up_read(&block->cb_lock);
3389 		take_rtnl = true;
3390 		goto retry;
3391 	}
3392 
3393 	/* Make sure all netdevs sharing this block are offload-capable. */
3394 	if (block->nooffloaddevcnt && err_stop) {
3395 		ok_count = -EOPNOTSUPP;
3396 		goto err_unlock;
3397 	}
3398 
3399 	tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3400 	if (tp->ops->hw_del)
3401 		tp->ops->hw_del(tp, type_data);
3402 
3403 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3404 	if (ok_count < 0)
3405 		goto err_unlock;
3406 
3407 	if (tp->ops->hw_add)
3408 		tp->ops->hw_add(tp, type_data);
3409 	if (ok_count > 0)
3410 		tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3411 					  new_flags, ok_count, true);
3412 err_unlock:
3413 	up_read(&block->cb_lock);
3414 	if (take_rtnl)
3415 		rtnl_unlock();
3416 	return min(ok_count, 0);
3417 }
3418 EXPORT_SYMBOL(tc_setup_cb_replace);
3419 
3420 /* Destroy filter and decrement block offload counter, if filter was previously
3421  * offloaded.
3422  */
3423 
3424 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3425 			enum tc_setup_type type, void *type_data, bool err_stop,
3426 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3427 {
3428 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3429 	int ok_count;
3430 
3431 retry:
3432 	if (take_rtnl)
3433 		rtnl_lock();
3434 	down_read(&block->cb_lock);
3435 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3436 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3437 	 * obtain the locks in same order here.
3438 	 */
3439 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3440 		up_read(&block->cb_lock);
3441 		take_rtnl = true;
3442 		goto retry;
3443 	}
3444 
3445 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3446 
3447 	tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3448 	if (tp->ops->hw_del)
3449 		tp->ops->hw_del(tp, type_data);
3450 
3451 	up_read(&block->cb_lock);
3452 	if (take_rtnl)
3453 		rtnl_unlock();
3454 	return min(ok_count, 0);
3455 }
3456 EXPORT_SYMBOL(tc_setup_cb_destroy);
3457 
3458 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3459 			  bool add, flow_setup_cb_t *cb,
3460 			  enum tc_setup_type type, void *type_data,
3461 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3462 {
3463 	int err = cb(type, type_data, cb_priv);
3464 
3465 	if (err) {
3466 		if (add && tc_skip_sw(*flags))
3467 			return err;
3468 	} else {
3469 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3470 					  add);
3471 	}
3472 
3473 	return 0;
3474 }
3475 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3476 
3477 static int tcf_act_get_cookie(struct flow_action_entry *entry,
3478 			      const struct tc_action *act)
3479 {
3480 	struct tc_cookie *cookie;
3481 	int err = 0;
3482 
3483 	rcu_read_lock();
3484 	cookie = rcu_dereference(act->act_cookie);
3485 	if (cookie) {
3486 		entry->cookie = flow_action_cookie_create(cookie->data,
3487 							  cookie->len,
3488 							  GFP_ATOMIC);
3489 		if (!entry->cookie)
3490 			err = -ENOMEM;
3491 	}
3492 	rcu_read_unlock();
3493 	return err;
3494 }
3495 
3496 static void tcf_act_put_cookie(struct flow_action_entry *entry)
3497 {
3498 	flow_action_cookie_destroy(entry->cookie);
3499 }
3500 
3501 void tc_cleanup_offload_action(struct flow_action *flow_action)
3502 {
3503 	struct flow_action_entry *entry;
3504 	int i;
3505 
3506 	flow_action_for_each(i, entry, flow_action) {
3507 		tcf_act_put_cookie(entry);
3508 		if (entry->destructor)
3509 			entry->destructor(entry->destructor_priv);
3510 	}
3511 }
3512 EXPORT_SYMBOL(tc_cleanup_offload_action);
3513 
3514 static int tc_setup_offload_act(struct tc_action *act,
3515 				struct flow_action_entry *entry,
3516 				u32 *index_inc,
3517 				struct netlink_ext_ack *extack)
3518 {
3519 #ifdef CONFIG_NET_CLS_ACT
3520 	if (act->ops->offload_act_setup) {
3521 		return act->ops->offload_act_setup(act, entry, index_inc, true,
3522 						   extack);
3523 	} else {
3524 		NL_SET_ERR_MSG(extack, "Action does not support offload");
3525 		return -EOPNOTSUPP;
3526 	}
3527 #else
3528 	return 0;
3529 #endif
3530 }
3531 
3532 int tc_setup_action(struct flow_action *flow_action,
3533 		    struct tc_action *actions[],
3534 		    struct netlink_ext_ack *extack)
3535 {
3536 	int i, j, k, index, err = 0;
3537 	struct tc_action *act;
3538 
3539 	BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3540 	BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3541 	BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3542 
3543 	if (!actions)
3544 		return 0;
3545 
3546 	j = 0;
3547 	tcf_act_for_each_action(i, act, actions) {
3548 		struct flow_action_entry *entry;
3549 
3550 		entry = &flow_action->entries[j];
3551 		spin_lock_bh(&act->tcfa_lock);
3552 		err = tcf_act_get_cookie(entry, act);
3553 		if (err)
3554 			goto err_out_locked;
3555 
3556 		index = 0;
3557 		err = tc_setup_offload_act(act, entry, &index, extack);
3558 		if (err)
3559 			goto err_out_locked;
3560 
3561 		for (k = 0; k < index ; k++) {
3562 			entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3563 			entry[k].hw_index = act->tcfa_index;
3564 		}
3565 
3566 		j += index;
3567 
3568 		spin_unlock_bh(&act->tcfa_lock);
3569 	}
3570 
3571 err_out:
3572 	if (err)
3573 		tc_cleanup_offload_action(flow_action);
3574 
3575 	return err;
3576 err_out_locked:
3577 	spin_unlock_bh(&act->tcfa_lock);
3578 	goto err_out;
3579 }
3580 
3581 int tc_setup_offload_action(struct flow_action *flow_action,
3582 			    const struct tcf_exts *exts,
3583 			    struct netlink_ext_ack *extack)
3584 {
3585 #ifdef CONFIG_NET_CLS_ACT
3586 	if (!exts)
3587 		return 0;
3588 
3589 	return tc_setup_action(flow_action, exts->actions, extack);
3590 #else
3591 	return 0;
3592 #endif
3593 }
3594 EXPORT_SYMBOL(tc_setup_offload_action);
3595 
3596 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3597 {
3598 	unsigned int num_acts = 0;
3599 	struct tc_action *act;
3600 	int i;
3601 
3602 	tcf_exts_for_each_action(i, act, exts) {
3603 		if (is_tcf_pedit(act))
3604 			num_acts += tcf_pedit_nkeys(act);
3605 		else
3606 			num_acts++;
3607 	}
3608 	return num_acts;
3609 }
3610 EXPORT_SYMBOL(tcf_exts_num_actions);
3611 
3612 #ifdef CONFIG_NET_CLS_ACT
3613 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3614 					u32 *p_block_index,
3615 					struct netlink_ext_ack *extack)
3616 {
3617 	*p_block_index = nla_get_u32(block_index_attr);
3618 	if (!*p_block_index) {
3619 		NL_SET_ERR_MSG(extack, "Block number may not be zero");
3620 		return -EINVAL;
3621 	}
3622 
3623 	return 0;
3624 }
3625 
3626 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3627 		    enum flow_block_binder_type binder_type,
3628 		    struct nlattr *block_index_attr,
3629 		    struct netlink_ext_ack *extack)
3630 {
3631 	u32 block_index;
3632 	int err;
3633 
3634 	if (!block_index_attr)
3635 		return 0;
3636 
3637 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3638 	if (err)
3639 		return err;
3640 
3641 	qe->info.binder_type = binder_type;
3642 	qe->info.chain_head_change = tcf_chain_head_change_dflt;
3643 	qe->info.chain_head_change_priv = &qe->filter_chain;
3644 	qe->info.block_index = block_index;
3645 
3646 	return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3647 }
3648 EXPORT_SYMBOL(tcf_qevent_init);
3649 
3650 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3651 {
3652 	if (qe->info.block_index)
3653 		tcf_block_put_ext(qe->block, sch, &qe->info);
3654 }
3655 EXPORT_SYMBOL(tcf_qevent_destroy);
3656 
3657 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3658 			       struct netlink_ext_ack *extack)
3659 {
3660 	u32 block_index;
3661 	int err;
3662 
3663 	if (!block_index_attr)
3664 		return 0;
3665 
3666 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3667 	if (err)
3668 		return err;
3669 
3670 	/* Bounce newly-configured block or change in block. */
3671 	if (block_index != qe->info.block_index) {
3672 		NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3673 		return -EINVAL;
3674 	}
3675 
3676 	return 0;
3677 }
3678 EXPORT_SYMBOL(tcf_qevent_validate_change);
3679 
3680 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3681 				  struct sk_buff **to_free, int *ret)
3682 {
3683 	struct tcf_result cl_res;
3684 	struct tcf_proto *fl;
3685 
3686 	if (!qe->info.block_index)
3687 		return skb;
3688 
3689 	fl = rcu_dereference_bh(qe->filter_chain);
3690 
3691 	switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3692 	case TC_ACT_SHOT:
3693 		qdisc_qstats_drop(sch);
3694 		__qdisc_drop(skb, to_free);
3695 		*ret = __NET_XMIT_BYPASS;
3696 		return NULL;
3697 	case TC_ACT_STOLEN:
3698 	case TC_ACT_QUEUED:
3699 	case TC_ACT_TRAP:
3700 		__qdisc_drop(skb, to_free);
3701 		*ret = __NET_XMIT_STOLEN;
3702 		return NULL;
3703 	case TC_ACT_REDIRECT:
3704 		skb_do_redirect(skb);
3705 		*ret = __NET_XMIT_STOLEN;
3706 		return NULL;
3707 	}
3708 
3709 	return skb;
3710 }
3711 EXPORT_SYMBOL(tcf_qevent_handle);
3712 
3713 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3714 {
3715 	if (!qe->info.block_index)
3716 		return 0;
3717 	return nla_put_u32(skb, attr_name, qe->info.block_index);
3718 }
3719 EXPORT_SYMBOL(tcf_qevent_dump);
3720 #endif
3721 
3722 static __net_init int tcf_net_init(struct net *net)
3723 {
3724 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3725 
3726 	spin_lock_init(&tn->idr_lock);
3727 	idr_init(&tn->idr);
3728 	return 0;
3729 }
3730 
3731 static void __net_exit tcf_net_exit(struct net *net)
3732 {
3733 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3734 
3735 	idr_destroy(&tn->idr);
3736 }
3737 
3738 static struct pernet_operations tcf_net_ops = {
3739 	.init = tcf_net_init,
3740 	.exit = tcf_net_exit,
3741 	.id   = &tcf_net_id,
3742 	.size = sizeof(struct tcf_net),
3743 };
3744 
3745 static int __init tc_filter_init(void)
3746 {
3747 	int err;
3748 
3749 	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3750 	if (!tc_filter_wq)
3751 		return -ENOMEM;
3752 
3753 	err = register_pernet_subsys(&tcf_net_ops);
3754 	if (err)
3755 		goto err_register_pernet_subsys;
3756 
3757 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3758 		      RTNL_FLAG_DOIT_UNLOCKED);
3759 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3760 		      RTNL_FLAG_DOIT_UNLOCKED);
3761 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3762 		      tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3763 	rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3764 	rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3765 	rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3766 		      tc_dump_chain, 0);
3767 
3768 	return 0;
3769 
3770 err_register_pernet_subsys:
3771 	destroy_workqueue(tc_filter_wq);
3772 	return err;
3773 }
3774 
3775 subsys_initcall(tc_filter_init);
3776