xref: /linux/net/sched/act_api.c (revision 79ac11393328fb1717d17c12e3c0eef0e9fa0647)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/act_api.c	Packet action API.
4  *
5  * Author:	Jamal Hadi Salim
6  */
7 
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <linux/kmod.h>
16 #include <linux/err.h>
17 #include <linux/module.h>
18 #include <net/net_namespace.h>
19 #include <net/sock.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/tc_act/tc_pedit.h>
23 #include <net/act_api.h>
24 #include <net/netlink.h>
25 #include <net/flow_offload.h>
26 #include <net/tc_wrapper.h>
27 
28 #ifdef CONFIG_INET
29 DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
30 EXPORT_SYMBOL_GPL(tcf_frag_xmit_count);
31 #endif
32 
33 int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
34 {
35 #ifdef CONFIG_INET
36 	if (static_branch_unlikely(&tcf_frag_xmit_count))
37 		return sch_frag_xmit_hook(skb, xmit);
38 #endif
39 
40 	return xmit(skb);
41 }
42 EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit);
43 
44 static void tcf_action_goto_chain_exec(const struct tc_action *a,
45 				       struct tcf_result *res)
46 {
47 	const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
48 
49 	res->goto_tp = rcu_dereference_bh(chain->filter_chain);
50 }
51 
52 static void tcf_free_cookie_rcu(struct rcu_head *p)
53 {
54 	struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
55 
56 	kfree(cookie->data);
57 	kfree(cookie);
58 }
59 
60 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
61 				  struct tc_cookie *new_cookie)
62 {
63 	struct tc_cookie *old;
64 
65 	old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
66 	if (old)
67 		call_rcu(&old->rcu, tcf_free_cookie_rcu);
68 }
69 
70 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
71 			     struct tcf_chain **newchain,
72 			     struct netlink_ext_ack *extack)
73 {
74 	int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
75 	u32 chain_index;
76 
77 	if (!opcode)
78 		ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
79 	else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
80 		ret = 0;
81 	if (ret) {
82 		NL_SET_ERR_MSG(extack, "invalid control action");
83 		goto end;
84 	}
85 
86 	if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
87 		chain_index = action & TC_ACT_EXT_VAL_MASK;
88 		if (!tp || !newchain) {
89 			ret = -EINVAL;
90 			NL_SET_ERR_MSG(extack,
91 				       "can't goto NULL proto/chain");
92 			goto end;
93 		}
94 		*newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
95 		if (!*newchain) {
96 			ret = -ENOMEM;
97 			NL_SET_ERR_MSG(extack,
98 				       "can't allocate goto_chain");
99 		}
100 	}
101 end:
102 	return ret;
103 }
104 EXPORT_SYMBOL(tcf_action_check_ctrlact);
105 
106 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
107 					 struct tcf_chain *goto_chain)
108 {
109 	a->tcfa_action = action;
110 	goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1);
111 	return goto_chain;
112 }
113 EXPORT_SYMBOL(tcf_action_set_ctrlact);
114 
115 /* XXX: For standalone actions, we don't need a RCU grace period either, because
116  * actions are always connected to filters and filters are already destroyed in
117  * RCU callbacks, so after a RCU grace period actions are already disconnected
118  * from filters. Readers later can not find us.
119  */
120 static void free_tcf(struct tc_action *p)
121 {
122 	struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
123 
124 	free_percpu(p->cpu_bstats);
125 	free_percpu(p->cpu_bstats_hw);
126 	free_percpu(p->cpu_qstats);
127 
128 	tcf_set_action_cookie(&p->user_cookie, NULL);
129 	if (chain)
130 		tcf_chain_put_by_act(chain);
131 
132 	kfree(p);
133 }
134 
135 static void offload_action_hw_count_set(struct tc_action *act,
136 					u32 hw_count)
137 {
138 	act->in_hw_count = hw_count;
139 }
140 
141 static void offload_action_hw_count_inc(struct tc_action *act,
142 					u32 hw_count)
143 {
144 	act->in_hw_count += hw_count;
145 }
146 
147 static void offload_action_hw_count_dec(struct tc_action *act,
148 					u32 hw_count)
149 {
150 	act->in_hw_count = act->in_hw_count > hw_count ?
151 			   act->in_hw_count - hw_count : 0;
152 }
153 
154 static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act)
155 {
156 	if (is_tcf_pedit(act))
157 		return tcf_pedit_nkeys(act);
158 	else
159 		return 1;
160 }
161 
162 static bool tc_act_skip_hw(u32 flags)
163 {
164 	return (flags & TCA_ACT_FLAGS_SKIP_HW) ? true : false;
165 }
166 
167 static bool tc_act_skip_sw(u32 flags)
168 {
169 	return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false;
170 }
171 
172 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
173 static bool tc_act_flags_valid(u32 flags)
174 {
175 	flags &= TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW;
176 
177 	return flags ^ (TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW);
178 }
179 
180 static int offload_action_init(struct flow_offload_action *fl_action,
181 			       struct tc_action *act,
182 			       enum offload_act_command  cmd,
183 			       struct netlink_ext_ack *extack)
184 {
185 	int err;
186 
187 	fl_action->extack = extack;
188 	fl_action->command = cmd;
189 	fl_action->index = act->tcfa_index;
190 	fl_action->cookie = (unsigned long)act;
191 
192 	if (act->ops->offload_act_setup) {
193 		spin_lock_bh(&act->tcfa_lock);
194 		err = act->ops->offload_act_setup(act, fl_action, NULL,
195 						  false, extack);
196 		spin_unlock_bh(&act->tcfa_lock);
197 		return err;
198 	}
199 
200 	return -EOPNOTSUPP;
201 }
202 
203 static int tcf_action_offload_cmd_ex(struct flow_offload_action *fl_act,
204 				     u32 *hw_count)
205 {
206 	int err;
207 
208 	err = flow_indr_dev_setup_offload(NULL, NULL, TC_SETUP_ACT,
209 					  fl_act, NULL, NULL);
210 	if (err < 0)
211 		return err;
212 
213 	if (hw_count)
214 		*hw_count = err;
215 
216 	return 0;
217 }
218 
219 static int tcf_action_offload_cmd_cb_ex(struct flow_offload_action *fl_act,
220 					u32 *hw_count,
221 					flow_indr_block_bind_cb_t *cb,
222 					void *cb_priv)
223 {
224 	int err;
225 
226 	err = cb(NULL, NULL, cb_priv, TC_SETUP_ACT, NULL, fl_act, NULL);
227 	if (err < 0)
228 		return err;
229 
230 	if (hw_count)
231 		*hw_count = 1;
232 
233 	return 0;
234 }
235 
236 static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
237 				  u32 *hw_count,
238 				  flow_indr_block_bind_cb_t *cb,
239 				  void *cb_priv)
240 {
241 	return cb ? tcf_action_offload_cmd_cb_ex(fl_act, hw_count,
242 						 cb, cb_priv) :
243 		    tcf_action_offload_cmd_ex(fl_act, hw_count);
244 }
245 
246 static int tcf_action_offload_add_ex(struct tc_action *action,
247 				     struct netlink_ext_ack *extack,
248 				     flow_indr_block_bind_cb_t *cb,
249 				     void *cb_priv)
250 {
251 	bool skip_sw = tc_act_skip_sw(action->tcfa_flags);
252 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
253 		[0] = action,
254 	};
255 	struct flow_offload_action *fl_action;
256 	u32 in_hw_count = 0;
257 	int num, err = 0;
258 
259 	if (tc_act_skip_hw(action->tcfa_flags))
260 		return 0;
261 
262 	num = tcf_offload_act_num_actions_single(action);
263 	fl_action = offload_action_alloc(num);
264 	if (!fl_action)
265 		return -ENOMEM;
266 
267 	err = offload_action_init(fl_action, action, FLOW_ACT_REPLACE, extack);
268 	if (err)
269 		goto fl_err;
270 
271 	err = tc_setup_action(&fl_action->action, actions, 0, extack);
272 	if (err) {
273 		NL_SET_ERR_MSG_MOD(extack,
274 				   "Failed to setup tc actions for offload");
275 		goto fl_err;
276 	}
277 
278 	err = tcf_action_offload_cmd(fl_action, &in_hw_count, cb, cb_priv);
279 	if (!err)
280 		cb ? offload_action_hw_count_inc(action, in_hw_count) :
281 		     offload_action_hw_count_set(action, in_hw_count);
282 
283 	if (skip_sw && !tc_act_in_hw(action))
284 		err = -EINVAL;
285 
286 	tc_cleanup_offload_action(&fl_action->action);
287 
288 fl_err:
289 	kfree(fl_action);
290 
291 	return err;
292 }
293 
294 /* offload the tc action after it is inserted */
295 static int tcf_action_offload_add(struct tc_action *action,
296 				  struct netlink_ext_ack *extack)
297 {
298 	return tcf_action_offload_add_ex(action, extack, NULL, NULL);
299 }
300 
301 int tcf_action_update_hw_stats(struct tc_action *action)
302 {
303 	struct flow_offload_action fl_act = {};
304 	int err;
305 
306 	err = offload_action_init(&fl_act, action, FLOW_ACT_STATS, NULL);
307 	if (err)
308 		return err;
309 
310 	err = tcf_action_offload_cmd(&fl_act, NULL, NULL, NULL);
311 	if (!err) {
312 		preempt_disable();
313 		tcf_action_stats_update(action, fl_act.stats.bytes,
314 					fl_act.stats.pkts,
315 					fl_act.stats.drops,
316 					fl_act.stats.lastused,
317 					true);
318 		preempt_enable();
319 		action->used_hw_stats = fl_act.stats.used_hw_stats;
320 		action->used_hw_stats_valid = true;
321 	} else {
322 		return -EOPNOTSUPP;
323 	}
324 
325 	return 0;
326 }
327 EXPORT_SYMBOL(tcf_action_update_hw_stats);
328 
329 static int tcf_action_offload_del_ex(struct tc_action *action,
330 				     flow_indr_block_bind_cb_t *cb,
331 				     void *cb_priv)
332 {
333 	struct flow_offload_action fl_act = {};
334 	u32 in_hw_count = 0;
335 	int err = 0;
336 
337 	if (!tc_act_in_hw(action))
338 		return 0;
339 
340 	err = offload_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL);
341 	if (err)
342 		return err;
343 
344 	err = tcf_action_offload_cmd(&fl_act, &in_hw_count, cb, cb_priv);
345 	if (err < 0)
346 		return err;
347 
348 	if (!cb && action->in_hw_count != in_hw_count)
349 		return -EINVAL;
350 
351 	/* do not need to update hw state when deleting action */
352 	if (cb && in_hw_count)
353 		offload_action_hw_count_dec(action, in_hw_count);
354 
355 	return 0;
356 }
357 
358 static int tcf_action_offload_del(struct tc_action *action)
359 {
360 	return tcf_action_offload_del_ex(action, NULL, NULL);
361 }
362 
363 static void tcf_action_cleanup(struct tc_action *p)
364 {
365 	tcf_action_offload_del(p);
366 	if (p->ops->cleanup)
367 		p->ops->cleanup(p);
368 
369 	gen_kill_estimator(&p->tcfa_rate_est);
370 	free_tcf(p);
371 }
372 
373 static int __tcf_action_put(struct tc_action *p, bool bind)
374 {
375 	struct tcf_idrinfo *idrinfo = p->idrinfo;
376 
377 	if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
378 		if (bind)
379 			atomic_dec(&p->tcfa_bindcnt);
380 		idr_remove(&idrinfo->action_idr, p->tcfa_index);
381 		mutex_unlock(&idrinfo->lock);
382 
383 		tcf_action_cleanup(p);
384 		return 1;
385 	}
386 
387 	if (bind)
388 		atomic_dec(&p->tcfa_bindcnt);
389 
390 	return 0;
391 }
392 
393 static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
394 {
395 	int ret = 0;
396 
397 	/* Release with strict==1 and bind==0 is only called through act API
398 	 * interface (classifiers always bind). Only case when action with
399 	 * positive reference count and zero bind count can exist is when it was
400 	 * also created with act API (unbinding last classifier will destroy the
401 	 * action if it was created by classifier). So only case when bind count
402 	 * can be changed after initial check is when unbound action is
403 	 * destroyed by act API while classifier binds to action with same id
404 	 * concurrently. This result either creation of new action(same behavior
405 	 * as before), or reusing existing action if concurrent process
406 	 * increments reference count before action is deleted. Both scenarios
407 	 * are acceptable.
408 	 */
409 	if (p) {
410 		if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
411 			return -EPERM;
412 
413 		if (__tcf_action_put(p, bind))
414 			ret = ACT_P_DELETED;
415 	}
416 
417 	return ret;
418 }
419 
420 int tcf_idr_release(struct tc_action *a, bool bind)
421 {
422 	const struct tc_action_ops *ops = a->ops;
423 	int ret;
424 
425 	ret = __tcf_idr_release(a, bind, false);
426 	if (ret == ACT_P_DELETED)
427 		module_put(ops->owner);
428 	return ret;
429 }
430 EXPORT_SYMBOL(tcf_idr_release);
431 
432 static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
433 {
434 	struct tc_cookie *user_cookie;
435 	u32 cookie_len = 0;
436 
437 	rcu_read_lock();
438 	user_cookie = rcu_dereference(act->user_cookie);
439 
440 	if (user_cookie)
441 		cookie_len = nla_total_size(user_cookie->len);
442 	rcu_read_unlock();
443 
444 	return  nla_total_size(0) /* action number nested */
445 		+ nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
446 		+ cookie_len /* TCA_ACT_COOKIE */
447 		+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */
448 		+ nla_total_size(0) /* TCA_ACT_STATS nested */
449 		+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
450 		/* TCA_STATS_BASIC */
451 		+ nla_total_size_64bit(sizeof(struct gnet_stats_basic))
452 		/* TCA_STATS_PKT64 */
453 		+ nla_total_size_64bit(sizeof(u64))
454 		/* TCA_STATS_QUEUE */
455 		+ nla_total_size_64bit(sizeof(struct gnet_stats_queue))
456 		+ nla_total_size(0) /* TCA_ACT_OPTIONS nested */
457 		+ nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
458 }
459 
460 static size_t tcf_action_full_attrs_size(size_t sz)
461 {
462 	return NLMSG_HDRLEN                     /* struct nlmsghdr */
463 		+ sizeof(struct tcamsg)
464 		+ nla_total_size(0)             /* TCA_ACT_TAB nested */
465 		+ sz;
466 }
467 
468 static size_t tcf_action_fill_size(const struct tc_action *act)
469 {
470 	size_t sz = tcf_action_shared_attrs_size(act);
471 
472 	if (act->ops->get_fill_size)
473 		return act->ops->get_fill_size(act) + sz;
474 	return sz;
475 }
476 
477 static int
478 tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act)
479 {
480 	unsigned char *b = skb_tail_pointer(skb);
481 	struct tc_cookie *cookie;
482 
483 	if (nla_put_string(skb, TCA_ACT_KIND, a->ops->kind))
484 		goto nla_put_failure;
485 	if (tcf_action_copy_stats(skb, a, 0))
486 		goto nla_put_failure;
487 	if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index))
488 		goto nla_put_failure;
489 
490 	rcu_read_lock();
491 	cookie = rcu_dereference(a->user_cookie);
492 	if (cookie) {
493 		if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
494 			rcu_read_unlock();
495 			goto nla_put_failure;
496 		}
497 	}
498 	rcu_read_unlock();
499 
500 	return 0;
501 
502 nla_put_failure:
503 	nlmsg_trim(skb, b);
504 	return -1;
505 }
506 
507 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
508 			   struct netlink_callback *cb)
509 {
510 	int err = 0, index = -1, s_i = 0, n_i = 0;
511 	u32 act_flags = cb->args[2];
512 	unsigned long jiffy_since = cb->args[3];
513 	struct nlattr *nest;
514 	struct idr *idr = &idrinfo->action_idr;
515 	struct tc_action *p;
516 	unsigned long id = 1;
517 	unsigned long tmp;
518 
519 	mutex_lock(&idrinfo->lock);
520 
521 	s_i = cb->args[0];
522 
523 	idr_for_each_entry_ul(idr, p, tmp, id) {
524 		index++;
525 		if (index < s_i)
526 			continue;
527 		if (IS_ERR(p))
528 			continue;
529 
530 		if (jiffy_since &&
531 		    time_after(jiffy_since,
532 			       (unsigned long)p->tcfa_tm.lastuse))
533 			continue;
534 
535 		tcf_action_update_hw_stats(p);
536 
537 		nest = nla_nest_start_noflag(skb, n_i);
538 		if (!nest) {
539 			index--;
540 			goto nla_put_failure;
541 		}
542 		err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ?
543 			tcf_action_dump_terse(skb, p, true) :
544 			tcf_action_dump_1(skb, p, 0, 0);
545 		if (err < 0) {
546 			index--;
547 			nlmsg_trim(skb, nest);
548 			goto done;
549 		}
550 		nla_nest_end(skb, nest);
551 		n_i++;
552 		if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) &&
553 		    n_i >= TCA_ACT_MAX_PRIO)
554 			goto done;
555 	}
556 done:
557 	if (index >= 0)
558 		cb->args[0] = index + 1;
559 
560 	mutex_unlock(&idrinfo->lock);
561 	if (n_i) {
562 		if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON)
563 			cb->args[1] = n_i;
564 	}
565 	return n_i;
566 
567 nla_put_failure:
568 	nla_nest_cancel(skb, nest);
569 	goto done;
570 }
571 
572 static int tcf_idr_release_unsafe(struct tc_action *p)
573 {
574 	if (atomic_read(&p->tcfa_bindcnt) > 0)
575 		return -EPERM;
576 
577 	if (refcount_dec_and_test(&p->tcfa_refcnt)) {
578 		idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
579 		tcf_action_cleanup(p);
580 		return ACT_P_DELETED;
581 	}
582 
583 	return 0;
584 }
585 
586 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
587 			  const struct tc_action_ops *ops,
588 			  struct netlink_ext_ack *extack)
589 {
590 	struct nlattr *nest;
591 	int n_i = 0;
592 	int ret = -EINVAL;
593 	struct idr *idr = &idrinfo->action_idr;
594 	struct tc_action *p;
595 	unsigned long id = 1;
596 	unsigned long tmp;
597 
598 	nest = nla_nest_start_noflag(skb, 0);
599 	if (nest == NULL)
600 		goto nla_put_failure;
601 	if (nla_put_string(skb, TCA_ACT_KIND, ops->kind))
602 		goto nla_put_failure;
603 
604 	ret = 0;
605 	mutex_lock(&idrinfo->lock);
606 	idr_for_each_entry_ul(idr, p, tmp, id) {
607 		if (IS_ERR(p))
608 			continue;
609 		ret = tcf_idr_release_unsafe(p);
610 		if (ret == ACT_P_DELETED)
611 			module_put(ops->owner);
612 		else if (ret < 0)
613 			break;
614 		n_i++;
615 	}
616 	mutex_unlock(&idrinfo->lock);
617 	if (ret < 0) {
618 		if (n_i)
619 			NL_SET_ERR_MSG(extack, "Unable to flush all TC actions");
620 		else
621 			goto nla_put_failure;
622 	}
623 
624 	ret = nla_put_u32(skb, TCA_FCNT, n_i);
625 	if (ret)
626 		goto nla_put_failure;
627 	nla_nest_end(skb, nest);
628 
629 	return n_i;
630 nla_put_failure:
631 	nla_nest_cancel(skb, nest);
632 	return ret;
633 }
634 
635 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
636 		       struct netlink_callback *cb, int type,
637 		       const struct tc_action_ops *ops,
638 		       struct netlink_ext_ack *extack)
639 {
640 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
641 
642 	if (type == RTM_DELACTION) {
643 		return tcf_del_walker(idrinfo, skb, ops, extack);
644 	} else if (type == RTM_GETACTION) {
645 		return tcf_dump_walker(idrinfo, skb, cb);
646 	} else {
647 		WARN(1, "tcf_generic_walker: unknown command %d\n", type);
648 		NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
649 		return -EINVAL;
650 	}
651 }
652 EXPORT_SYMBOL(tcf_generic_walker);
653 
654 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
655 {
656 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
657 	struct tc_action *p;
658 
659 	mutex_lock(&idrinfo->lock);
660 	p = idr_find(&idrinfo->action_idr, index);
661 	if (IS_ERR(p))
662 		p = NULL;
663 	else if (p)
664 		refcount_inc(&p->tcfa_refcnt);
665 	mutex_unlock(&idrinfo->lock);
666 
667 	if (p) {
668 		*a = p;
669 		return true;
670 	}
671 	return false;
672 }
673 EXPORT_SYMBOL(tcf_idr_search);
674 
675 static int __tcf_generic_walker(struct net *net, struct sk_buff *skb,
676 				struct netlink_callback *cb, int type,
677 				const struct tc_action_ops *ops,
678 				struct netlink_ext_ack *extack)
679 {
680 	struct tc_action_net *tn = net_generic(net, ops->net_id);
681 
682 	if (unlikely(ops->walk))
683 		return ops->walk(net, skb, cb, type, ops, extack);
684 
685 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
686 }
687 
688 static int __tcf_idr_search(struct net *net,
689 			    const struct tc_action_ops *ops,
690 			    struct tc_action **a, u32 index)
691 {
692 	struct tc_action_net *tn = net_generic(net, ops->net_id);
693 
694 	if (unlikely(ops->lookup))
695 		return ops->lookup(net, a, index);
696 
697 	return tcf_idr_search(tn, a, index);
698 }
699 
700 static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
701 {
702 	struct tc_action *p;
703 	int ret = 0;
704 
705 	mutex_lock(&idrinfo->lock);
706 	p = idr_find(&idrinfo->action_idr, index);
707 	if (!p) {
708 		mutex_unlock(&idrinfo->lock);
709 		return -ENOENT;
710 	}
711 
712 	if (!atomic_read(&p->tcfa_bindcnt)) {
713 		if (refcount_dec_and_test(&p->tcfa_refcnt)) {
714 			struct module *owner = p->ops->owner;
715 
716 			WARN_ON(p != idr_remove(&idrinfo->action_idr,
717 						p->tcfa_index));
718 			mutex_unlock(&idrinfo->lock);
719 
720 			tcf_action_cleanup(p);
721 			module_put(owner);
722 			return 0;
723 		}
724 		ret = 0;
725 	} else {
726 		ret = -EPERM;
727 	}
728 
729 	mutex_unlock(&idrinfo->lock);
730 	return ret;
731 }
732 
733 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
734 		   struct tc_action **a, const struct tc_action_ops *ops,
735 		   int bind, bool cpustats, u32 flags)
736 {
737 	struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
738 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
739 	int err = -ENOMEM;
740 
741 	if (unlikely(!p))
742 		return -ENOMEM;
743 	refcount_set(&p->tcfa_refcnt, 1);
744 	if (bind)
745 		atomic_set(&p->tcfa_bindcnt, 1);
746 
747 	if (cpustats) {
748 		p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
749 		if (!p->cpu_bstats)
750 			goto err1;
751 		p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
752 		if (!p->cpu_bstats_hw)
753 			goto err2;
754 		p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
755 		if (!p->cpu_qstats)
756 			goto err3;
757 	}
758 	gnet_stats_basic_sync_init(&p->tcfa_bstats);
759 	gnet_stats_basic_sync_init(&p->tcfa_bstats_hw);
760 	spin_lock_init(&p->tcfa_lock);
761 	p->tcfa_index = index;
762 	p->tcfa_tm.install = jiffies;
763 	p->tcfa_tm.lastuse = jiffies;
764 	p->tcfa_tm.firstuse = 0;
765 	p->tcfa_flags = flags;
766 	if (est) {
767 		err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
768 					&p->tcfa_rate_est,
769 					&p->tcfa_lock, false, est);
770 		if (err)
771 			goto err4;
772 	}
773 
774 	p->idrinfo = idrinfo;
775 	__module_get(ops->owner);
776 	p->ops = ops;
777 	*a = p;
778 	return 0;
779 err4:
780 	free_percpu(p->cpu_qstats);
781 err3:
782 	free_percpu(p->cpu_bstats_hw);
783 err2:
784 	free_percpu(p->cpu_bstats);
785 err1:
786 	kfree(p);
787 	return err;
788 }
789 EXPORT_SYMBOL(tcf_idr_create);
790 
791 int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
792 			      struct nlattr *est, struct tc_action **a,
793 			      const struct tc_action_ops *ops, int bind,
794 			      u32 flags)
795 {
796 	/* Set cpustats according to actions flags. */
797 	return tcf_idr_create(tn, index, est, a, ops, bind,
798 			      !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags);
799 }
800 EXPORT_SYMBOL(tcf_idr_create_from_flags);
801 
802 /* Cleanup idr index that was allocated but not initialized. */
803 
804 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
805 {
806 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
807 
808 	mutex_lock(&idrinfo->lock);
809 	/* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
810 	WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
811 	mutex_unlock(&idrinfo->lock);
812 }
813 EXPORT_SYMBOL(tcf_idr_cleanup);
814 
815 /* Check if action with specified index exists. If actions is found, increments
816  * its reference and bind counters, and return 1. Otherwise insert temporary
817  * error pointer (to prevent concurrent users from inserting actions with same
818  * index) and return 0.
819  */
820 
821 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
822 			struct tc_action **a, int bind)
823 {
824 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
825 	struct tc_action *p;
826 	int ret;
827 
828 again:
829 	mutex_lock(&idrinfo->lock);
830 	if (*index) {
831 		p = idr_find(&idrinfo->action_idr, *index);
832 		if (IS_ERR(p)) {
833 			/* This means that another process allocated
834 			 * index but did not assign the pointer yet.
835 			 */
836 			mutex_unlock(&idrinfo->lock);
837 			goto again;
838 		}
839 
840 		if (p) {
841 			refcount_inc(&p->tcfa_refcnt);
842 			if (bind)
843 				atomic_inc(&p->tcfa_bindcnt);
844 			*a = p;
845 			ret = 1;
846 		} else {
847 			*a = NULL;
848 			ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
849 					    *index, GFP_KERNEL);
850 			if (!ret)
851 				idr_replace(&idrinfo->action_idr,
852 					    ERR_PTR(-EBUSY), *index);
853 		}
854 	} else {
855 		*index = 1;
856 		*a = NULL;
857 		ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
858 				    UINT_MAX, GFP_KERNEL);
859 		if (!ret)
860 			idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
861 				    *index);
862 	}
863 	mutex_unlock(&idrinfo->lock);
864 	return ret;
865 }
866 EXPORT_SYMBOL(tcf_idr_check_alloc);
867 
868 void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
869 			 struct tcf_idrinfo *idrinfo)
870 {
871 	struct idr *idr = &idrinfo->action_idr;
872 	struct tc_action *p;
873 	int ret;
874 	unsigned long id = 1;
875 	unsigned long tmp;
876 
877 	idr_for_each_entry_ul(idr, p, tmp, id) {
878 		ret = __tcf_idr_release(p, false, true);
879 		if (ret == ACT_P_DELETED)
880 			module_put(ops->owner);
881 		else if (ret < 0)
882 			return;
883 	}
884 	idr_destroy(&idrinfo->action_idr);
885 }
886 EXPORT_SYMBOL(tcf_idrinfo_destroy);
887 
888 static LIST_HEAD(act_base);
889 static DEFINE_RWLOCK(act_mod_lock);
890 /* since act ops id is stored in pernet subsystem list,
891  * then there is no way to walk through only all the action
892  * subsystem, so we keep tc action pernet ops id for
893  * reoffload to walk through.
894  */
895 static LIST_HEAD(act_pernet_id_list);
896 static DEFINE_MUTEX(act_id_mutex);
897 struct tc_act_pernet_id {
898 	struct list_head list;
899 	unsigned int id;
900 };
901 
902 static int tcf_pernet_add_id_list(unsigned int id)
903 {
904 	struct tc_act_pernet_id *id_ptr;
905 	int ret = 0;
906 
907 	mutex_lock(&act_id_mutex);
908 	list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
909 		if (id_ptr->id == id) {
910 			ret = -EEXIST;
911 			goto err_out;
912 		}
913 	}
914 
915 	id_ptr = kzalloc(sizeof(*id_ptr), GFP_KERNEL);
916 	if (!id_ptr) {
917 		ret = -ENOMEM;
918 		goto err_out;
919 	}
920 	id_ptr->id = id;
921 
922 	list_add_tail(&id_ptr->list, &act_pernet_id_list);
923 
924 err_out:
925 	mutex_unlock(&act_id_mutex);
926 	return ret;
927 }
928 
929 static void tcf_pernet_del_id_list(unsigned int id)
930 {
931 	struct tc_act_pernet_id *id_ptr;
932 
933 	mutex_lock(&act_id_mutex);
934 	list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
935 		if (id_ptr->id == id) {
936 			list_del(&id_ptr->list);
937 			kfree(id_ptr);
938 			break;
939 		}
940 	}
941 	mutex_unlock(&act_id_mutex);
942 }
943 
944 int tcf_register_action(struct tc_action_ops *act,
945 			struct pernet_operations *ops)
946 {
947 	struct tc_action_ops *a;
948 	int ret;
949 
950 	if (!act->act || !act->dump || !act->init)
951 		return -EINVAL;
952 
953 	/* We have to register pernet ops before making the action ops visible,
954 	 * otherwise tcf_action_init_1() could get a partially initialized
955 	 * netns.
956 	 */
957 	ret = register_pernet_subsys(ops);
958 	if (ret)
959 		return ret;
960 
961 	if (ops->id) {
962 		ret = tcf_pernet_add_id_list(*ops->id);
963 		if (ret)
964 			goto err_id;
965 	}
966 
967 	write_lock(&act_mod_lock);
968 	list_for_each_entry(a, &act_base, head) {
969 		if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
970 			ret = -EEXIST;
971 			goto err_out;
972 		}
973 	}
974 	list_add_tail(&act->head, &act_base);
975 	write_unlock(&act_mod_lock);
976 
977 	return 0;
978 
979 err_out:
980 	write_unlock(&act_mod_lock);
981 	if (ops->id)
982 		tcf_pernet_del_id_list(*ops->id);
983 err_id:
984 	unregister_pernet_subsys(ops);
985 	return ret;
986 }
987 EXPORT_SYMBOL(tcf_register_action);
988 
989 int tcf_unregister_action(struct tc_action_ops *act,
990 			  struct pernet_operations *ops)
991 {
992 	struct tc_action_ops *a;
993 	int err = -ENOENT;
994 
995 	write_lock(&act_mod_lock);
996 	list_for_each_entry(a, &act_base, head) {
997 		if (a == act) {
998 			list_del(&act->head);
999 			err = 0;
1000 			break;
1001 		}
1002 	}
1003 	write_unlock(&act_mod_lock);
1004 	if (!err) {
1005 		unregister_pernet_subsys(ops);
1006 		if (ops->id)
1007 			tcf_pernet_del_id_list(*ops->id);
1008 	}
1009 	return err;
1010 }
1011 EXPORT_SYMBOL(tcf_unregister_action);
1012 
1013 /* lookup by name */
1014 static struct tc_action_ops *tc_lookup_action_n(char *kind)
1015 {
1016 	struct tc_action_ops *a, *res = NULL;
1017 
1018 	if (kind) {
1019 		read_lock(&act_mod_lock);
1020 		list_for_each_entry(a, &act_base, head) {
1021 			if (strcmp(kind, a->kind) == 0) {
1022 				if (try_module_get(a->owner))
1023 					res = a;
1024 				break;
1025 			}
1026 		}
1027 		read_unlock(&act_mod_lock);
1028 	}
1029 	return res;
1030 }
1031 
1032 /* lookup by nlattr */
1033 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
1034 {
1035 	struct tc_action_ops *a, *res = NULL;
1036 
1037 	if (kind) {
1038 		read_lock(&act_mod_lock);
1039 		list_for_each_entry(a, &act_base, head) {
1040 			if (nla_strcmp(kind, a->kind) == 0) {
1041 				if (try_module_get(a->owner))
1042 					res = a;
1043 				break;
1044 			}
1045 		}
1046 		read_unlock(&act_mod_lock);
1047 	}
1048 	return res;
1049 }
1050 
1051 /*TCA_ACT_MAX_PRIO is 32, there count up to 32 */
1052 #define TCA_ACT_MAX_PRIO_MASK 0x1FF
1053 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
1054 		    int nr_actions, struct tcf_result *res)
1055 {
1056 	u32 jmp_prgcnt = 0;
1057 	u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
1058 	int i;
1059 	int ret = TC_ACT_OK;
1060 
1061 	if (skb_skip_tc_classify(skb))
1062 		return TC_ACT_OK;
1063 
1064 restart_act_graph:
1065 	for (i = 0; i < nr_actions; i++) {
1066 		const struct tc_action *a = actions[i];
1067 		int repeat_ttl;
1068 
1069 		if (jmp_prgcnt > 0) {
1070 			jmp_prgcnt -= 1;
1071 			continue;
1072 		}
1073 
1074 		if (tc_act_skip_sw(a->tcfa_flags))
1075 			continue;
1076 
1077 		repeat_ttl = 32;
1078 repeat:
1079 		ret = tc_act(skb, a, res);
1080 		if (unlikely(ret == TC_ACT_REPEAT)) {
1081 			if (--repeat_ttl != 0)
1082 				goto repeat;
1083 			/* suspicious opcode, stop pipeline */
1084 			net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
1085 			return TC_ACT_OK;
1086 		}
1087 		if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
1088 			jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
1089 			if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
1090 				/* faulty opcode, stop pipeline */
1091 				return TC_ACT_OK;
1092 			} else {
1093 				jmp_ttl -= 1;
1094 				if (jmp_ttl > 0)
1095 					goto restart_act_graph;
1096 				else /* faulty graph, stop pipeline */
1097 					return TC_ACT_OK;
1098 			}
1099 		} else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
1100 			if (unlikely(!rcu_access_pointer(a->goto_chain))) {
1101 				tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
1102 				return TC_ACT_SHOT;
1103 			}
1104 			tcf_action_goto_chain_exec(a, res);
1105 		}
1106 
1107 		if (ret != TC_ACT_PIPE)
1108 			break;
1109 	}
1110 
1111 	return ret;
1112 }
1113 EXPORT_SYMBOL(tcf_action_exec);
1114 
1115 int tcf_action_destroy(struct tc_action *actions[], int bind)
1116 {
1117 	const struct tc_action_ops *ops;
1118 	struct tc_action *a;
1119 	int ret = 0, i;
1120 
1121 	tcf_act_for_each_action(i, a, actions) {
1122 		actions[i] = NULL;
1123 		ops = a->ops;
1124 		ret = __tcf_idr_release(a, bind, true);
1125 		if (ret == ACT_P_DELETED)
1126 			module_put(ops->owner);
1127 		else if (ret < 0)
1128 			return ret;
1129 	}
1130 	return ret;
1131 }
1132 
1133 static int tcf_action_put(struct tc_action *p)
1134 {
1135 	return __tcf_action_put(p, false);
1136 }
1137 
1138 static void tcf_action_put_many(struct tc_action *actions[])
1139 {
1140 	struct tc_action *a;
1141 	int i;
1142 
1143 	tcf_act_for_each_action(i, a, actions) {
1144 		const struct tc_action_ops *ops = a->ops;
1145 		if (tcf_action_put(a))
1146 			module_put(ops->owner);
1147 	}
1148 }
1149 
1150 static void tca_put_bound_many(struct tc_action *actions[], int init_res[])
1151 {
1152 	struct tc_action *a;
1153 	int i;
1154 
1155 	tcf_act_for_each_action(i, a, actions) {
1156 		const struct tc_action_ops *ops = a->ops;
1157 
1158 		if (init_res[i] == ACT_P_CREATED)
1159 			continue;
1160 
1161 		if (tcf_action_put(a))
1162 			module_put(ops->owner);
1163 	}
1164 }
1165 
1166 int
1167 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
1168 {
1169 	return a->ops->dump(skb, a, bind, ref);
1170 }
1171 
1172 int
1173 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
1174 {
1175 	int err = -EINVAL;
1176 	unsigned char *b = skb_tail_pointer(skb);
1177 	struct nlattr *nest;
1178 	u32 flags;
1179 
1180 	if (tcf_action_dump_terse(skb, a, false))
1181 		goto nla_put_failure;
1182 
1183 	if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
1184 	    nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
1185 			       a->hw_stats, TCA_ACT_HW_STATS_ANY))
1186 		goto nla_put_failure;
1187 
1188 	if (a->used_hw_stats_valid &&
1189 	    nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
1190 			       a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
1191 		goto nla_put_failure;
1192 
1193 	flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK;
1194 	if (flags &&
1195 	    nla_put_bitfield32(skb, TCA_ACT_FLAGS,
1196 			       flags, flags))
1197 		goto nla_put_failure;
1198 
1199 	if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
1200 		goto nla_put_failure;
1201 
1202 	nest = nla_nest_start_noflag(skb, TCA_ACT_OPTIONS);
1203 	if (nest == NULL)
1204 		goto nla_put_failure;
1205 	err = tcf_action_dump_old(skb, a, bind, ref);
1206 	if (err > 0) {
1207 		nla_nest_end(skb, nest);
1208 		return err;
1209 	}
1210 
1211 nla_put_failure:
1212 	nlmsg_trim(skb, b);
1213 	return -1;
1214 }
1215 EXPORT_SYMBOL(tcf_action_dump_1);
1216 
1217 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
1218 		    int bind, int ref, bool terse)
1219 {
1220 	struct tc_action *a;
1221 	int err = -EINVAL, i;
1222 	struct nlattr *nest;
1223 
1224 	tcf_act_for_each_action(i, a, actions) {
1225 		nest = nla_nest_start_noflag(skb, i + 1);
1226 		if (nest == NULL)
1227 			goto nla_put_failure;
1228 		err = terse ? tcf_action_dump_terse(skb, a, false) :
1229 			tcf_action_dump_1(skb, a, bind, ref);
1230 		if (err < 0)
1231 			goto errout;
1232 		nla_nest_end(skb, nest);
1233 	}
1234 
1235 	return 0;
1236 
1237 nla_put_failure:
1238 	err = -EINVAL;
1239 errout:
1240 	nla_nest_cancel(skb, nest);
1241 	return err;
1242 }
1243 
1244 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
1245 {
1246 	struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
1247 	if (!c)
1248 		return NULL;
1249 
1250 	c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
1251 	if (!c->data) {
1252 		kfree(c);
1253 		return NULL;
1254 	}
1255 	c->len = nla_len(tb[TCA_ACT_COOKIE]);
1256 
1257 	return c;
1258 }
1259 
1260 static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr)
1261 {
1262 	struct nla_bitfield32 hw_stats_bf;
1263 
1264 	/* If the user did not pass the attr, that means he does
1265 	 * not care about the type. Return "any" in that case
1266 	 * which is setting on all supported types.
1267 	 */
1268 	if (!hw_stats_attr)
1269 		return TCA_ACT_HW_STATS_ANY;
1270 	hw_stats_bf = nla_get_bitfield32(hw_stats_attr);
1271 	return hw_stats_bf.value;
1272 }
1273 
1274 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
1275 	[TCA_ACT_KIND]		= { .type = NLA_STRING },
1276 	[TCA_ACT_INDEX]		= { .type = NLA_U32 },
1277 	[TCA_ACT_COOKIE]	= { .type = NLA_BINARY,
1278 				    .len = TC_COOKIE_MAX_SIZE },
1279 	[TCA_ACT_OPTIONS]	= { .type = NLA_NESTED },
1280 	[TCA_ACT_FLAGS]		= NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS |
1281 							TCA_ACT_FLAGS_SKIP_HW |
1282 							TCA_ACT_FLAGS_SKIP_SW),
1283 	[TCA_ACT_HW_STATS]	= NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
1284 };
1285 
1286 void tcf_idr_insert_many(struct tc_action *actions[])
1287 {
1288 	struct tc_action *a;
1289 	int i;
1290 
1291 	tcf_act_for_each_action(i, a, actions) {
1292 		struct tcf_idrinfo *idrinfo;
1293 
1294 		idrinfo = a->idrinfo;
1295 		mutex_lock(&idrinfo->lock);
1296 		/* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if
1297 		 * it is just created, otherwise this is just a nop.
1298 		 */
1299 		idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
1300 		mutex_unlock(&idrinfo->lock);
1301 	}
1302 }
1303 
1304 struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police,
1305 					 bool rtnl_held,
1306 					 struct netlink_ext_ack *extack)
1307 {
1308 	struct nlattr *tb[TCA_ACT_MAX + 1];
1309 	struct tc_action_ops *a_o;
1310 	char act_name[IFNAMSIZ];
1311 	struct nlattr *kind;
1312 	int err;
1313 
1314 	if (!police) {
1315 		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1316 						  tcf_action_policy, extack);
1317 		if (err < 0)
1318 			return ERR_PTR(err);
1319 		err = -EINVAL;
1320 		kind = tb[TCA_ACT_KIND];
1321 		if (!kind) {
1322 			NL_SET_ERR_MSG(extack, "TC action kind must be specified");
1323 			return ERR_PTR(err);
1324 		}
1325 		if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) {
1326 			NL_SET_ERR_MSG(extack, "TC action name too long");
1327 			return ERR_PTR(err);
1328 		}
1329 	} else {
1330 		if (strscpy(act_name, "police", IFNAMSIZ) < 0) {
1331 			NL_SET_ERR_MSG(extack, "TC action name too long");
1332 			return ERR_PTR(-EINVAL);
1333 		}
1334 	}
1335 
1336 	a_o = tc_lookup_action_n(act_name);
1337 	if (a_o == NULL) {
1338 #ifdef CONFIG_MODULES
1339 		if (rtnl_held)
1340 			rtnl_unlock();
1341 		request_module("act_%s", act_name);
1342 		if (rtnl_held)
1343 			rtnl_lock();
1344 
1345 		a_o = tc_lookup_action_n(act_name);
1346 
1347 		/* We dropped the RTNL semaphore in order to
1348 		 * perform the module load.  So, even if we
1349 		 * succeeded in loading the module we have to
1350 		 * tell the caller to replay the request.  We
1351 		 * indicate this using -EAGAIN.
1352 		 */
1353 		if (a_o != NULL) {
1354 			module_put(a_o->owner);
1355 			return ERR_PTR(-EAGAIN);
1356 		}
1357 #endif
1358 		NL_SET_ERR_MSG(extack, "Failed to load TC action module");
1359 		return ERR_PTR(-ENOENT);
1360 	}
1361 
1362 	return a_o;
1363 }
1364 
1365 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
1366 				    struct nlattr *nla, struct nlattr *est,
1367 				    struct tc_action_ops *a_o, int *init_res,
1368 				    u32 flags, struct netlink_ext_ack *extack)
1369 {
1370 	bool police = flags & TCA_ACT_FLAGS_POLICE;
1371 	struct nla_bitfield32 userflags = { 0, 0 };
1372 	struct tc_cookie *user_cookie = NULL;
1373 	u8 hw_stats = TCA_ACT_HW_STATS_ANY;
1374 	struct nlattr *tb[TCA_ACT_MAX + 1];
1375 	struct tc_action *a;
1376 	int err;
1377 
1378 	/* backward compatibility for policer */
1379 	if (!police) {
1380 		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1381 						  tcf_action_policy, extack);
1382 		if (err < 0)
1383 			return ERR_PTR(err);
1384 		if (tb[TCA_ACT_COOKIE]) {
1385 			user_cookie = nla_memdup_cookie(tb);
1386 			if (!user_cookie) {
1387 				NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
1388 				err = -ENOMEM;
1389 				goto err_out;
1390 			}
1391 		}
1392 		hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
1393 		if (tb[TCA_ACT_FLAGS]) {
1394 			userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
1395 			if (!tc_act_flags_valid(userflags.value)) {
1396 				err = -EINVAL;
1397 				goto err_out;
1398 			}
1399 		}
1400 
1401 		err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp,
1402 				userflags.value | flags, extack);
1403 	} else {
1404 		err = a_o->init(net, nla, est, &a, tp, userflags.value | flags,
1405 				extack);
1406 	}
1407 	if (err < 0)
1408 		goto err_out;
1409 	*init_res = err;
1410 
1411 	if (!police && tb[TCA_ACT_COOKIE])
1412 		tcf_set_action_cookie(&a->user_cookie, user_cookie);
1413 
1414 	if (!police)
1415 		a->hw_stats = hw_stats;
1416 
1417 	return a;
1418 
1419 err_out:
1420 	if (user_cookie) {
1421 		kfree(user_cookie->data);
1422 		kfree(user_cookie);
1423 	}
1424 	return ERR_PTR(err);
1425 }
1426 
1427 static bool tc_act_bind(u32 flags)
1428 {
1429 	return !!(flags & TCA_ACT_FLAGS_BIND);
1430 }
1431 
1432 /* Returns numbers of initialized actions or negative error. */
1433 
1434 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
1435 		    struct nlattr *est, struct tc_action *actions[],
1436 		    int init_res[], size_t *attr_size,
1437 		    u32 flags, u32 fl_flags,
1438 		    struct netlink_ext_ack *extack)
1439 {
1440 	struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
1441 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1442 	struct tc_action *act;
1443 	size_t sz = 0;
1444 	int err;
1445 	int i;
1446 
1447 	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1448 					  extack);
1449 	if (err < 0)
1450 		return err;
1451 
1452 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1453 		struct tc_action_ops *a_o;
1454 
1455 		a_o = tc_action_load_ops(tb[i], flags & TCA_ACT_FLAGS_POLICE,
1456 					 !(flags & TCA_ACT_FLAGS_NO_RTNL),
1457 					 extack);
1458 		if (IS_ERR(a_o)) {
1459 			err = PTR_ERR(a_o);
1460 			goto err_mod;
1461 		}
1462 		ops[i - 1] = a_o;
1463 	}
1464 
1465 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1466 		act = tcf_action_init_1(net, tp, tb[i], est, ops[i - 1],
1467 					&init_res[i - 1], flags, extack);
1468 		if (IS_ERR(act)) {
1469 			err = PTR_ERR(act);
1470 			goto err;
1471 		}
1472 		sz += tcf_action_fill_size(act);
1473 		/* Start from index 0 */
1474 		actions[i - 1] = act;
1475 		if (tc_act_bind(flags)) {
1476 			bool skip_sw = tc_skip_sw(fl_flags);
1477 			bool skip_hw = tc_skip_hw(fl_flags);
1478 
1479 			if (tc_act_bind(act->tcfa_flags))
1480 				continue;
1481 			if (skip_sw != tc_act_skip_sw(act->tcfa_flags) ||
1482 			    skip_hw != tc_act_skip_hw(act->tcfa_flags)) {
1483 				NL_SET_ERR_MSG(extack,
1484 					       "Mismatch between action and filter offload flags");
1485 				err = -EINVAL;
1486 				goto err;
1487 			}
1488 		} else {
1489 			err = tcf_action_offload_add(act, extack);
1490 			if (tc_act_skip_sw(act->tcfa_flags) && err)
1491 				goto err;
1492 		}
1493 	}
1494 
1495 	/* We have to commit them all together, because if any error happened in
1496 	 * between, we could not handle the failure gracefully.
1497 	 */
1498 	tcf_idr_insert_many(actions);
1499 
1500 	*attr_size = tcf_action_full_attrs_size(sz);
1501 	err = i - 1;
1502 	goto err_mod;
1503 
1504 err:
1505 	tcf_action_destroy(actions, flags & TCA_ACT_FLAGS_BIND);
1506 err_mod:
1507 	for (i = 0; i < TCA_ACT_MAX_PRIO && ops[i]; i++)
1508 		module_put(ops[i]->owner);
1509 	return err;
1510 }
1511 
1512 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
1513 			     u64 drops, bool hw)
1514 {
1515 	if (a->cpu_bstats) {
1516 		_bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
1517 
1518 		this_cpu_ptr(a->cpu_qstats)->drops += drops;
1519 
1520 		if (hw)
1521 			_bstats_update(this_cpu_ptr(a->cpu_bstats_hw),
1522 				       bytes, packets);
1523 		return;
1524 	}
1525 
1526 	_bstats_update(&a->tcfa_bstats, bytes, packets);
1527 	a->tcfa_qstats.drops += drops;
1528 	if (hw)
1529 		_bstats_update(&a->tcfa_bstats_hw, bytes, packets);
1530 }
1531 EXPORT_SYMBOL(tcf_action_update_stats);
1532 
1533 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
1534 			  int compat_mode)
1535 {
1536 	int err = 0;
1537 	struct gnet_dump d;
1538 
1539 	if (p == NULL)
1540 		goto errout;
1541 
1542 	/* compat_mode being true specifies a call that is supposed
1543 	 * to add additional backward compatibility statistic TLVs.
1544 	 */
1545 	if (compat_mode) {
1546 		if (p->type == TCA_OLD_COMPAT)
1547 			err = gnet_stats_start_copy_compat(skb, 0,
1548 							   TCA_STATS,
1549 							   TCA_XSTATS,
1550 							   &p->tcfa_lock, &d,
1551 							   TCA_PAD);
1552 		else
1553 			return 0;
1554 	} else
1555 		err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
1556 					    &p->tcfa_lock, &d, TCA_ACT_PAD);
1557 
1558 	if (err < 0)
1559 		goto errout;
1560 
1561 	if (gnet_stats_copy_basic(&d, p->cpu_bstats,
1562 				  &p->tcfa_bstats, false) < 0 ||
1563 	    gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw,
1564 				     &p->tcfa_bstats_hw, false) < 0 ||
1565 	    gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
1566 	    gnet_stats_copy_queue(&d, p->cpu_qstats,
1567 				  &p->tcfa_qstats,
1568 				  p->tcfa_qstats.qlen) < 0)
1569 		goto errout;
1570 
1571 	if (gnet_stats_finish_copy(&d) < 0)
1572 		goto errout;
1573 
1574 	return 0;
1575 
1576 errout:
1577 	return -1;
1578 }
1579 
1580 static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
1581 			u32 portid, u32 seq, u16 flags, int event, int bind,
1582 			int ref, struct netlink_ext_ack *extack)
1583 {
1584 	struct tcamsg *t;
1585 	struct nlmsghdr *nlh;
1586 	unsigned char *b = skb_tail_pointer(skb);
1587 	struct nlattr *nest;
1588 
1589 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
1590 	if (!nlh)
1591 		goto out_nlmsg_trim;
1592 	t = nlmsg_data(nlh);
1593 	t->tca_family = AF_UNSPEC;
1594 	t->tca__pad1 = 0;
1595 	t->tca__pad2 = 0;
1596 
1597 	if (extack && extack->_msg &&
1598 	    nla_put_string(skb, TCA_ROOT_EXT_WARN_MSG, extack->_msg))
1599 		goto out_nlmsg_trim;
1600 
1601 	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1602 	if (!nest)
1603 		goto out_nlmsg_trim;
1604 
1605 	if (tcf_action_dump(skb, actions, bind, ref, false) < 0)
1606 		goto out_nlmsg_trim;
1607 
1608 	nla_nest_end(skb, nest);
1609 
1610 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1611 
1612 	return skb->len;
1613 
1614 out_nlmsg_trim:
1615 	nlmsg_trim(skb, b);
1616 	return -1;
1617 }
1618 
1619 static int
1620 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
1621 	       struct tc_action *actions[], int event,
1622 	       struct netlink_ext_ack *extack)
1623 {
1624 	struct sk_buff *skb;
1625 
1626 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1627 	if (!skb)
1628 		return -ENOBUFS;
1629 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
1630 			 0, 1, NULL) <= 0) {
1631 		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1632 		kfree_skb(skb);
1633 		return -EINVAL;
1634 	}
1635 
1636 	return rtnl_unicast(skb, net, portid);
1637 }
1638 
1639 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
1640 					  struct nlmsghdr *n, u32 portid,
1641 					  struct netlink_ext_ack *extack)
1642 {
1643 	struct nlattr *tb[TCA_ACT_MAX + 1];
1644 	const struct tc_action_ops *ops;
1645 	struct tc_action *a;
1646 	int index;
1647 	int err;
1648 
1649 	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1650 					  tcf_action_policy, extack);
1651 	if (err < 0)
1652 		goto err_out;
1653 
1654 	err = -EINVAL;
1655 	if (tb[TCA_ACT_INDEX] == NULL ||
1656 	    nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
1657 		NL_SET_ERR_MSG(extack, "Invalid TC action index value");
1658 		goto err_out;
1659 	}
1660 	index = nla_get_u32(tb[TCA_ACT_INDEX]);
1661 
1662 	err = -EINVAL;
1663 	ops = tc_lookup_action(tb[TCA_ACT_KIND]);
1664 	if (!ops) { /* could happen in batch of actions */
1665 		NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
1666 		goto err_out;
1667 	}
1668 	err = -ENOENT;
1669 	if (__tcf_idr_search(net, ops, &a, index) == 0) {
1670 		NL_SET_ERR_MSG(extack, "TC action with specified index not found");
1671 		goto err_mod;
1672 	}
1673 
1674 	module_put(ops->owner);
1675 	return a;
1676 
1677 err_mod:
1678 	module_put(ops->owner);
1679 err_out:
1680 	return ERR_PTR(err);
1681 }
1682 
1683 static int tca_action_flush(struct net *net, struct nlattr *nla,
1684 			    struct nlmsghdr *n, u32 portid,
1685 			    struct netlink_ext_ack *extack)
1686 {
1687 	struct sk_buff *skb;
1688 	unsigned char *b;
1689 	struct nlmsghdr *nlh;
1690 	struct tcamsg *t;
1691 	struct netlink_callback dcb;
1692 	struct nlattr *nest;
1693 	struct nlattr *tb[TCA_ACT_MAX + 1];
1694 	const struct tc_action_ops *ops;
1695 	struct nlattr *kind;
1696 	int err = -ENOMEM;
1697 
1698 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1699 	if (!skb)
1700 		return err;
1701 
1702 	b = skb_tail_pointer(skb);
1703 
1704 	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1705 					  tcf_action_policy, extack);
1706 	if (err < 0)
1707 		goto err_out;
1708 
1709 	err = -EINVAL;
1710 	kind = tb[TCA_ACT_KIND];
1711 	ops = tc_lookup_action(kind);
1712 	if (!ops) { /*some idjot trying to flush unknown action */
1713 		NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
1714 		goto err_out;
1715 	}
1716 
1717 	nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
1718 			sizeof(*t), 0);
1719 	if (!nlh) {
1720 		NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
1721 		goto out_module_put;
1722 	}
1723 	t = nlmsg_data(nlh);
1724 	t->tca_family = AF_UNSPEC;
1725 	t->tca__pad1 = 0;
1726 	t->tca__pad2 = 0;
1727 
1728 	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1729 	if (!nest) {
1730 		NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
1731 		goto out_module_put;
1732 	}
1733 
1734 	err = __tcf_generic_walker(net, skb, &dcb, RTM_DELACTION, ops, extack);
1735 	if (err <= 0) {
1736 		nla_nest_cancel(skb, nest);
1737 		goto out_module_put;
1738 	}
1739 
1740 	nla_nest_end(skb, nest);
1741 
1742 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1743 	nlh->nlmsg_flags |= NLM_F_ROOT;
1744 	module_put(ops->owner);
1745 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1746 			     n->nlmsg_flags & NLM_F_ECHO);
1747 	if (err < 0)
1748 		NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
1749 
1750 	return err;
1751 
1752 out_module_put:
1753 	module_put(ops->owner);
1754 err_out:
1755 	kfree_skb(skb);
1756 	return err;
1757 }
1758 
1759 static int tcf_action_delete(struct net *net, struct tc_action *actions[])
1760 {
1761 	struct tc_action *a;
1762 	int i;
1763 
1764 	tcf_act_for_each_action(i, a, actions) {
1765 		const struct tc_action_ops *ops = a->ops;
1766 		/* Actions can be deleted concurrently so we must save their
1767 		 * type and id to search again after reference is released.
1768 		 */
1769 		struct tcf_idrinfo *idrinfo = a->idrinfo;
1770 		u32 act_index = a->tcfa_index;
1771 
1772 		actions[i] = NULL;
1773 		if (tcf_action_put(a)) {
1774 			/* last reference, action was deleted concurrently */
1775 			module_put(ops->owner);
1776 		} else {
1777 			int ret;
1778 
1779 			/* now do the delete */
1780 			ret = tcf_idr_delete_index(idrinfo, act_index);
1781 			if (ret < 0)
1782 				return ret;
1783 		}
1784 	}
1785 	return 0;
1786 }
1787 
1788 static struct sk_buff *tcf_reoffload_del_notify_msg(struct net *net,
1789 						    struct tc_action *action)
1790 {
1791 	size_t attr_size = tcf_action_fill_size(action);
1792 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
1793 		[0] = action,
1794 	};
1795 	struct sk_buff *skb;
1796 
1797 	skb = alloc_skb(max(attr_size, NLMSG_GOODSIZE), GFP_KERNEL);
1798 	if (!skb)
1799 		return ERR_PTR(-ENOBUFS);
1800 
1801 	if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1, NULL) <= 0) {
1802 		kfree_skb(skb);
1803 		return ERR_PTR(-EINVAL);
1804 	}
1805 
1806 	return skb;
1807 }
1808 
1809 static int tcf_reoffload_del_notify(struct net *net, struct tc_action *action)
1810 {
1811 	const struct tc_action_ops *ops = action->ops;
1812 	struct sk_buff *skb;
1813 	int ret;
1814 
1815 	if (!rtnl_notify_needed(net, 0, RTNLGRP_TC)) {
1816 		skb = NULL;
1817 	} else {
1818 		skb = tcf_reoffload_del_notify_msg(net, action);
1819 		if (IS_ERR(skb))
1820 			return PTR_ERR(skb);
1821 	}
1822 
1823 	ret = tcf_idr_release_unsafe(action);
1824 	if (ret == ACT_P_DELETED) {
1825 		module_put(ops->owner);
1826 		ret = rtnetlink_maybe_send(skb, net, 0, RTNLGRP_TC, 0);
1827 	} else {
1828 		kfree_skb(skb);
1829 	}
1830 
1831 	return ret;
1832 }
1833 
1834 int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
1835 			    void *cb_priv, bool add)
1836 {
1837 	struct tc_act_pernet_id *id_ptr;
1838 	struct tcf_idrinfo *idrinfo;
1839 	struct tc_action_net *tn;
1840 	struct tc_action *p;
1841 	unsigned int act_id;
1842 	unsigned long tmp;
1843 	unsigned long id;
1844 	struct idr *idr;
1845 	struct net *net;
1846 	int ret;
1847 
1848 	if (!cb)
1849 		return -EINVAL;
1850 
1851 	down_read(&net_rwsem);
1852 	mutex_lock(&act_id_mutex);
1853 
1854 	for_each_net(net) {
1855 		list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
1856 			act_id = id_ptr->id;
1857 			tn = net_generic(net, act_id);
1858 			if (!tn)
1859 				continue;
1860 			idrinfo = tn->idrinfo;
1861 			if (!idrinfo)
1862 				continue;
1863 
1864 			mutex_lock(&idrinfo->lock);
1865 			idr = &idrinfo->action_idr;
1866 			idr_for_each_entry_ul(idr, p, tmp, id) {
1867 				if (IS_ERR(p) || tc_act_bind(p->tcfa_flags))
1868 					continue;
1869 				if (add) {
1870 					tcf_action_offload_add_ex(p, NULL, cb,
1871 								  cb_priv);
1872 					continue;
1873 				}
1874 
1875 				/* cb unregister to update hw count */
1876 				ret = tcf_action_offload_del_ex(p, cb, cb_priv);
1877 				if (ret < 0)
1878 					continue;
1879 				if (tc_act_skip_sw(p->tcfa_flags) &&
1880 				    !tc_act_in_hw(p))
1881 					tcf_reoffload_del_notify(net, p);
1882 			}
1883 			mutex_unlock(&idrinfo->lock);
1884 		}
1885 	}
1886 	mutex_unlock(&act_id_mutex);
1887 	up_read(&net_rwsem);
1888 
1889 	return 0;
1890 }
1891 
1892 static struct sk_buff *tcf_del_notify_msg(struct net *net, struct nlmsghdr *n,
1893 					  struct tc_action *actions[],
1894 					  u32 portid, size_t attr_size,
1895 					  struct netlink_ext_ack *extack)
1896 {
1897 	struct sk_buff *skb;
1898 
1899 	skb = alloc_skb(max(attr_size, NLMSG_GOODSIZE), GFP_KERNEL);
1900 	if (!skb)
1901 		return ERR_PTR(-ENOBUFS);
1902 
1903 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
1904 			 0, 2, extack) <= 0) {
1905 		NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
1906 		kfree_skb(skb);
1907 		return ERR_PTR(-EINVAL);
1908 	}
1909 
1910 	return skb;
1911 }
1912 
1913 static int tcf_del_notify(struct net *net, struct nlmsghdr *n,
1914 			  struct tc_action *actions[], u32 portid,
1915 			  size_t attr_size, struct netlink_ext_ack *extack)
1916 {
1917 	struct sk_buff *skb;
1918 	int ret;
1919 
1920 	if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) {
1921 		skb = NULL;
1922 	} else {
1923 		skb = tcf_del_notify_msg(net, n, actions, portid, attr_size,
1924 					 extack);
1925 		if (IS_ERR(skb))
1926 			return PTR_ERR(skb);
1927 	}
1928 
1929 	/* now do the delete */
1930 	ret = tcf_action_delete(net, actions);
1931 	if (ret < 0) {
1932 		NL_SET_ERR_MSG(extack, "Failed to delete TC action");
1933 		kfree_skb(skb);
1934 		return ret;
1935 	}
1936 
1937 	return rtnetlink_maybe_send(skb, net, portid, RTNLGRP_TC,
1938 				    n->nlmsg_flags & NLM_F_ECHO);
1939 }
1940 
1941 static int
1942 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1943 	      u32 portid, int event, struct netlink_ext_ack *extack)
1944 {
1945 	int i, ret;
1946 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1947 	struct tc_action *act;
1948 	size_t attr_size = 0;
1949 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1950 
1951 	ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1952 					  extack);
1953 	if (ret < 0)
1954 		return ret;
1955 
1956 	if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
1957 		if (tb[1])
1958 			return tca_action_flush(net, tb[1], n, portid, extack);
1959 
1960 		NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
1961 		return -EINVAL;
1962 	}
1963 
1964 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1965 		act = tcf_action_get_1(net, tb[i], n, portid, extack);
1966 		if (IS_ERR(act)) {
1967 			ret = PTR_ERR(act);
1968 			goto err;
1969 		}
1970 		attr_size += tcf_action_fill_size(act);
1971 		actions[i - 1] = act;
1972 	}
1973 
1974 	attr_size = tcf_action_full_attrs_size(attr_size);
1975 
1976 	if (event == RTM_GETACTION)
1977 		ret = tcf_get_notify(net, portid, n, actions, event, extack);
1978 	else { /* delete */
1979 		ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
1980 		if (ret)
1981 			goto err;
1982 		return 0;
1983 	}
1984 err:
1985 	tcf_action_put_many(actions);
1986 	return ret;
1987 }
1988 
1989 static struct sk_buff *tcf_add_notify_msg(struct net *net, struct nlmsghdr *n,
1990 					  struct tc_action *actions[],
1991 					  u32 portid, size_t attr_size,
1992 					  struct netlink_ext_ack *extack)
1993 {
1994 	struct sk_buff *skb;
1995 
1996 	skb = alloc_skb(max(attr_size, NLMSG_GOODSIZE), GFP_KERNEL);
1997 	if (!skb)
1998 		return ERR_PTR(-ENOBUFS);
1999 
2000 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
2001 			 RTM_NEWACTION, 0, 0, extack) <= 0) {
2002 		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
2003 		kfree_skb(skb);
2004 		return ERR_PTR(-EINVAL);
2005 	}
2006 
2007 	return skb;
2008 }
2009 
2010 static int tcf_add_notify(struct net *net, struct nlmsghdr *n,
2011 			  struct tc_action *actions[], u32 portid,
2012 			  size_t attr_size, struct netlink_ext_ack *extack)
2013 {
2014 	struct sk_buff *skb;
2015 
2016 	if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) {
2017 		skb = NULL;
2018 	} else {
2019 		skb = tcf_add_notify_msg(net, n, actions, portid, attr_size,
2020 					 extack);
2021 		if (IS_ERR(skb))
2022 			return PTR_ERR(skb);
2023 	}
2024 
2025 	return rtnetlink_maybe_send(skb, net, portid, RTNLGRP_TC,
2026 				    n->nlmsg_flags & NLM_F_ECHO);
2027 }
2028 
2029 static int tcf_action_add(struct net *net, struct nlattr *nla,
2030 			  struct nlmsghdr *n, u32 portid, u32 flags,
2031 			  struct netlink_ext_ack *extack)
2032 {
2033 	size_t attr_size = 0;
2034 	int loop, ret;
2035 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
2036 	int init_res[TCA_ACT_MAX_PRIO] = {};
2037 
2038 	for (loop = 0; loop < 10; loop++) {
2039 		ret = tcf_action_init(net, NULL, nla, NULL, actions, init_res,
2040 				      &attr_size, flags, 0, extack);
2041 		if (ret != -EAGAIN)
2042 			break;
2043 	}
2044 
2045 	if (ret < 0)
2046 		return ret;
2047 
2048 	ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
2049 
2050 	/* only put bound actions */
2051 	tca_put_bound_many(actions, init_res);
2052 
2053 	return ret;
2054 }
2055 
2056 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
2057 	[TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON |
2058 						 TCA_ACT_FLAG_TERSE_DUMP),
2059 	[TCA_ROOT_TIME_DELTA]      = { .type = NLA_U32 },
2060 };
2061 
2062 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
2063 			 struct netlink_ext_ack *extack)
2064 {
2065 	struct net *net = sock_net(skb->sk);
2066 	struct nlattr *tca[TCA_ROOT_MAX + 1];
2067 	u32 portid = NETLINK_CB(skb).portid;
2068 	u32 flags = 0;
2069 	int ret = 0;
2070 
2071 	if ((n->nlmsg_type != RTM_GETACTION) &&
2072 	    !netlink_capable(skb, CAP_NET_ADMIN))
2073 		return -EPERM;
2074 
2075 	ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca,
2076 				     TCA_ROOT_MAX, NULL, extack);
2077 	if (ret < 0)
2078 		return ret;
2079 
2080 	if (tca[TCA_ACT_TAB] == NULL) {
2081 		NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
2082 		return -EINVAL;
2083 	}
2084 
2085 	/* n->nlmsg_flags & NLM_F_CREATE */
2086 	switch (n->nlmsg_type) {
2087 	case RTM_NEWACTION:
2088 		/* we are going to assume all other flags
2089 		 * imply create only if it doesn't exist
2090 		 * Note that CREATE | EXCL implies that
2091 		 * but since we want avoid ambiguity (eg when flags
2092 		 * is zero) then just set this
2093 		 */
2094 		if (n->nlmsg_flags & NLM_F_REPLACE)
2095 			flags = TCA_ACT_FLAGS_REPLACE;
2096 		ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, flags,
2097 				     extack);
2098 		break;
2099 	case RTM_DELACTION:
2100 		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2101 				    portid, RTM_DELACTION, extack);
2102 		break;
2103 	case RTM_GETACTION:
2104 		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2105 				    portid, RTM_GETACTION, extack);
2106 		break;
2107 	default:
2108 		BUG();
2109 	}
2110 
2111 	return ret;
2112 }
2113 
2114 static struct nlattr *find_dump_kind(struct nlattr **nla)
2115 {
2116 	struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
2117 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
2118 	struct nlattr *kind;
2119 
2120 	tb1 = nla[TCA_ACT_TAB];
2121 	if (tb1 == NULL)
2122 		return NULL;
2123 
2124 	if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
2125 		return NULL;
2126 
2127 	if (tb[1] == NULL)
2128 		return NULL;
2129 	if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
2130 		return NULL;
2131 	kind = tb2[TCA_ACT_KIND];
2132 
2133 	return kind;
2134 }
2135 
2136 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
2137 {
2138 	struct net *net = sock_net(skb->sk);
2139 	struct nlmsghdr *nlh;
2140 	unsigned char *b = skb_tail_pointer(skb);
2141 	struct nlattr *nest;
2142 	struct tc_action_ops *a_o;
2143 	int ret = 0;
2144 	struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
2145 	struct nlattr *tb[TCA_ROOT_MAX + 1];
2146 	struct nlattr *count_attr = NULL;
2147 	unsigned long jiffy_since = 0;
2148 	struct nlattr *kind = NULL;
2149 	struct nla_bitfield32 bf;
2150 	u32 msecs_since = 0;
2151 	u32 act_count = 0;
2152 
2153 	ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
2154 				     TCA_ROOT_MAX, tcaa_policy, cb->extack);
2155 	if (ret < 0)
2156 		return ret;
2157 
2158 	kind = find_dump_kind(tb);
2159 	if (kind == NULL) {
2160 		pr_info("tc_dump_action: action bad kind\n");
2161 		return 0;
2162 	}
2163 
2164 	a_o = tc_lookup_action(kind);
2165 	if (a_o == NULL)
2166 		return 0;
2167 
2168 	cb->args[2] = 0;
2169 	if (tb[TCA_ROOT_FLAGS]) {
2170 		bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
2171 		cb->args[2] = bf.value;
2172 	}
2173 
2174 	if (tb[TCA_ROOT_TIME_DELTA]) {
2175 		msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
2176 	}
2177 
2178 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2179 			cb->nlh->nlmsg_type, sizeof(*t), 0);
2180 	if (!nlh)
2181 		goto out_module_put;
2182 
2183 	if (msecs_since)
2184 		jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
2185 
2186 	t = nlmsg_data(nlh);
2187 	t->tca_family = AF_UNSPEC;
2188 	t->tca__pad1 = 0;
2189 	t->tca__pad2 = 0;
2190 	cb->args[3] = jiffy_since;
2191 	count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
2192 	if (!count_attr)
2193 		goto out_module_put;
2194 
2195 	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
2196 	if (nest == NULL)
2197 		goto out_module_put;
2198 
2199 	ret = __tcf_generic_walker(net, skb, cb, RTM_GETACTION, a_o, NULL);
2200 	if (ret < 0)
2201 		goto out_module_put;
2202 
2203 	if (ret > 0) {
2204 		nla_nest_end(skb, nest);
2205 		ret = skb->len;
2206 		act_count = cb->args[1];
2207 		memcpy(nla_data(count_attr), &act_count, sizeof(u32));
2208 		cb->args[1] = 0;
2209 	} else
2210 		nlmsg_trim(skb, b);
2211 
2212 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2213 	if (NETLINK_CB(cb->skb).portid && ret)
2214 		nlh->nlmsg_flags |= NLM_F_MULTI;
2215 	module_put(a_o->owner);
2216 	return skb->len;
2217 
2218 out_module_put:
2219 	module_put(a_o->owner);
2220 	nlmsg_trim(skb, b);
2221 	return skb->len;
2222 }
2223 
2224 static int __init tc_action_init(void)
2225 {
2226 	rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
2227 	rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
2228 	rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
2229 		      0);
2230 
2231 	return 0;
2232 }
2233 
2234 subsys_initcall(tc_action_init);
2235