xref: /linux/net/sched/act_mirred.c (revision 1b98f357dadd6ea613a435fbaef1a5dd7b35fd21)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/act_mirred.c	packet mirroring and redirect actions
4  *
5  * Authors:	Jamal Hadi Salim (2002-4)
6  *
7  * TODO: Add ingress support (and socket redirect support)
8  */
9 
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/gfp.h>
19 #include <linux/if_arp.h>
20 #include <net/net_namespace.h>
21 #include <net/netlink.h>
22 #include <net/dst.h>
23 #include <net/pkt_sched.h>
24 #include <net/pkt_cls.h>
25 #include <linux/tc_act/tc_mirred.h>
26 #include <net/tc_act/tc_mirred.h>
27 #include <net/tc_wrapper.h>
28 
29 static LIST_HEAD(mirred_list);
30 static DEFINE_SPINLOCK(mirred_list_lock);
31 
32 #define MIRRED_NEST_LIMIT    4
33 
34 #ifndef CONFIG_PREEMPT_RT
35 static u8 tcf_mirred_nest_level_inc_return(void)
36 {
37 	return __this_cpu_inc_return(softnet_data.xmit.sched_mirred_nest);
38 }
39 
40 static void tcf_mirred_nest_level_dec(void)
41 {
42 	__this_cpu_dec(softnet_data.xmit.sched_mirred_nest);
43 }
44 
45 #else
46 static u8 tcf_mirred_nest_level_inc_return(void)
47 {
48 	return current->net_xmit.sched_mirred_nest++;
49 }
50 
51 static void tcf_mirred_nest_level_dec(void)
52 {
53 	current->net_xmit.sched_mirred_nest--;
54 }
55 #endif
56 
57 static bool tcf_mirred_is_act_redirect(int action)
58 {
59 	return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
60 }
61 
62 static bool tcf_mirred_act_wants_ingress(int action)
63 {
64 	switch (action) {
65 	case TCA_EGRESS_REDIR:
66 	case TCA_EGRESS_MIRROR:
67 		return false;
68 	case TCA_INGRESS_REDIR:
69 	case TCA_INGRESS_MIRROR:
70 		return true;
71 	default:
72 		BUG();
73 	}
74 }
75 
76 static bool tcf_mirred_can_reinsert(int action)
77 {
78 	switch (action) {
79 	case TC_ACT_SHOT:
80 	case TC_ACT_STOLEN:
81 	case TC_ACT_QUEUED:
82 	case TC_ACT_TRAP:
83 		return true;
84 	}
85 	return false;
86 }
87 
88 static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
89 {
90 	return rcu_dereference_protected(m->tcfm_dev,
91 					 lockdep_is_held(&m->tcf_lock));
92 }
93 
94 static void tcf_mirred_release(struct tc_action *a)
95 {
96 	struct tcf_mirred *m = to_mirred(a);
97 	struct net_device *dev;
98 
99 	spin_lock(&mirred_list_lock);
100 	list_del(&m->tcfm_list);
101 	spin_unlock(&mirred_list_lock);
102 
103 	/* last reference to action, no need to lock */
104 	dev = rcu_dereference_protected(m->tcfm_dev, 1);
105 	netdev_put(dev, &m->tcfm_dev_tracker);
106 }
107 
108 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
109 	[TCA_MIRRED_PARMS]	= { .len = sizeof(struct tc_mirred) },
110 	[TCA_MIRRED_BLOCKID]	= NLA_POLICY_MIN(NLA_U32, 1),
111 };
112 
113 static struct tc_action_ops act_mirred_ops;
114 
115 static void tcf_mirred_replace_dev(struct tcf_mirred *m,
116 				   struct net_device *ndev)
117 {
118 	struct net_device *odev;
119 
120 	odev = rcu_replace_pointer(m->tcfm_dev, ndev,
121 				   lockdep_is_held(&m->tcf_lock));
122 	netdev_put(odev, &m->tcfm_dev_tracker);
123 }
124 
125 static int tcf_mirred_init(struct net *net, struct nlattr *nla,
126 			   struct nlattr *est, struct tc_action **a,
127 			   struct tcf_proto *tp,
128 			   u32 flags, struct netlink_ext_ack *extack)
129 {
130 	struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id);
131 	bool bind = flags & TCA_ACT_FLAGS_BIND;
132 	struct nlattr *tb[TCA_MIRRED_MAX + 1];
133 	struct tcf_chain *goto_ch = NULL;
134 	bool mac_header_xmit = false;
135 	struct tc_mirred *parm;
136 	struct tcf_mirred *m;
137 	bool exists = false;
138 	int ret, err;
139 	u32 index;
140 
141 	if (!nla) {
142 		NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
143 		return -EINVAL;
144 	}
145 	ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
146 					  mirred_policy, extack);
147 	if (ret < 0)
148 		return ret;
149 	if (!tb[TCA_MIRRED_PARMS]) {
150 		NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
151 		return -EINVAL;
152 	}
153 	parm = nla_data(tb[TCA_MIRRED_PARMS]);
154 	index = parm->index;
155 	err = tcf_idr_check_alloc(tn, &index, a, bind);
156 	if (err < 0)
157 		return err;
158 	exists = err;
159 	if (exists && bind)
160 		return ACT_P_BOUND;
161 
162 	if (tb[TCA_MIRRED_BLOCKID] && parm->ifindex) {
163 		NL_SET_ERR_MSG_MOD(extack,
164 				   "Cannot specify Block ID and dev simultaneously");
165 		if (exists)
166 			tcf_idr_release(*a, bind);
167 		else
168 			tcf_idr_cleanup(tn, index);
169 
170 		return -EINVAL;
171 	}
172 
173 	switch (parm->eaction) {
174 	case TCA_EGRESS_MIRROR:
175 	case TCA_EGRESS_REDIR:
176 	case TCA_INGRESS_REDIR:
177 	case TCA_INGRESS_MIRROR:
178 		break;
179 	default:
180 		if (exists)
181 			tcf_idr_release(*a, bind);
182 		else
183 			tcf_idr_cleanup(tn, index);
184 		NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
185 		return -EINVAL;
186 	}
187 
188 	if (!exists) {
189 		if (!parm->ifindex && !tb[TCA_MIRRED_BLOCKID]) {
190 			tcf_idr_cleanup(tn, index);
191 			NL_SET_ERR_MSG_MOD(extack,
192 					   "Must specify device or block");
193 			return -EINVAL;
194 		}
195 		ret = tcf_idr_create_from_flags(tn, index, est, a,
196 						&act_mirred_ops, bind, flags);
197 		if (ret) {
198 			tcf_idr_cleanup(tn, index);
199 			return ret;
200 		}
201 		ret = ACT_P_CREATED;
202 	} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
203 		tcf_idr_release(*a, bind);
204 		return -EEXIST;
205 	}
206 
207 	m = to_mirred(*a);
208 	if (ret == ACT_P_CREATED)
209 		INIT_LIST_HEAD(&m->tcfm_list);
210 
211 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
212 	if (err < 0)
213 		goto release_idr;
214 
215 	spin_lock_bh(&m->tcf_lock);
216 
217 	if (parm->ifindex) {
218 		struct net_device *ndev;
219 
220 		ndev = dev_get_by_index(net, parm->ifindex);
221 		if (!ndev) {
222 			spin_unlock_bh(&m->tcf_lock);
223 			err = -ENODEV;
224 			goto put_chain;
225 		}
226 		mac_header_xmit = dev_is_mac_header_xmit(ndev);
227 		tcf_mirred_replace_dev(m, ndev);
228 		netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC);
229 		m->tcfm_mac_header_xmit = mac_header_xmit;
230 		m->tcfm_blockid = 0;
231 	} else if (tb[TCA_MIRRED_BLOCKID]) {
232 		tcf_mirred_replace_dev(m, NULL);
233 		m->tcfm_mac_header_xmit = false;
234 		m->tcfm_blockid = nla_get_u32(tb[TCA_MIRRED_BLOCKID]);
235 	}
236 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
237 	m->tcfm_eaction = parm->eaction;
238 	spin_unlock_bh(&m->tcf_lock);
239 	if (goto_ch)
240 		tcf_chain_put_by_act(goto_ch);
241 
242 	if (ret == ACT_P_CREATED) {
243 		spin_lock(&mirred_list_lock);
244 		list_add(&m->tcfm_list, &mirred_list);
245 		spin_unlock(&mirred_list_lock);
246 	}
247 
248 	return ret;
249 put_chain:
250 	if (goto_ch)
251 		tcf_chain_put_by_act(goto_ch);
252 release_idr:
253 	tcf_idr_release(*a, bind);
254 	return err;
255 }
256 
257 static int
258 tcf_mirred_forward(bool at_ingress, bool want_ingress, struct sk_buff *skb)
259 {
260 	int err;
261 
262 	if (!want_ingress)
263 		err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
264 	else if (!at_ingress)
265 		err = netif_rx(skb);
266 	else
267 		err = netif_receive_skb(skb);
268 
269 	return err;
270 }
271 
272 static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
273 			     struct net_device *dev,
274 			     const bool m_mac_header_xmit, int m_eaction,
275 			     int retval)
276 {
277 	struct sk_buff *skb_to_send = skb;
278 	bool want_ingress;
279 	bool is_redirect;
280 	bool expects_nh;
281 	bool at_ingress;
282 	bool dont_clone;
283 	int mac_len;
284 	bool at_nh;
285 	int err;
286 
287 	is_redirect = tcf_mirred_is_act_redirect(m_eaction);
288 	if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
289 		net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
290 				       dev->name);
291 		goto err_cant_do;
292 	}
293 
294 	/* we could easily avoid the clone only if called by ingress and clsact;
295 	 * since we can't easily detect the clsact caller, skip clone only for
296 	 * ingress - that covers the TC S/W datapath.
297 	 */
298 	at_ingress = skb_at_tc_ingress(skb);
299 	dont_clone = skb_at_tc_ingress(skb) && is_redirect &&
300 		tcf_mirred_can_reinsert(retval);
301 	if (!dont_clone) {
302 		skb_to_send = skb_clone(skb, GFP_ATOMIC);
303 		if (!skb_to_send)
304 			goto err_cant_do;
305 	}
306 
307 	want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
308 
309 	/* All mirred/redirected skbs should clear previous ct info */
310 	nf_reset_ct(skb_to_send);
311 	if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
312 		skb_dst_drop(skb_to_send);
313 
314 	expects_nh = want_ingress || !m_mac_header_xmit;
315 	at_nh = skb->data == skb_network_header(skb);
316 	if (at_nh != expects_nh) {
317 		mac_len = at_ingress ? skb->mac_len :
318 			  skb_network_offset(skb);
319 		if (expects_nh) {
320 			/* target device/action expect data at nh */
321 			skb_pull_rcsum(skb_to_send, mac_len);
322 		} else {
323 			/* target device/action expect data at mac */
324 			skb_push_rcsum(skb_to_send, mac_len);
325 		}
326 	}
327 
328 	skb_to_send->skb_iif = skb->dev->ifindex;
329 	skb_to_send->dev = dev;
330 
331 	if (is_redirect) {
332 		if (skb == skb_to_send)
333 			retval = TC_ACT_CONSUMED;
334 
335 		skb_set_redirected(skb_to_send, skb_to_send->tc_at_ingress);
336 
337 		err = tcf_mirred_forward(at_ingress, want_ingress, skb_to_send);
338 	} else {
339 		err = tcf_mirred_forward(at_ingress, want_ingress, skb_to_send);
340 	}
341 	if (err)
342 		tcf_action_inc_overlimit_qstats(&m->common);
343 
344 	return retval;
345 
346 err_cant_do:
347 	if (is_redirect)
348 		retval = TC_ACT_SHOT;
349 	tcf_action_inc_overlimit_qstats(&m->common);
350 	return retval;
351 }
352 
353 static int tcf_blockcast_redir(struct sk_buff *skb, struct tcf_mirred *m,
354 			       struct tcf_block *block, int m_eaction,
355 			       const u32 exception_ifindex, int retval)
356 {
357 	struct net_device *dev_prev = NULL;
358 	struct net_device *dev = NULL;
359 	unsigned long index;
360 	int mirred_eaction;
361 
362 	mirred_eaction = tcf_mirred_act_wants_ingress(m_eaction) ?
363 		TCA_INGRESS_MIRROR : TCA_EGRESS_MIRROR;
364 
365 	xa_for_each(&block->ports, index, dev) {
366 		if (index == exception_ifindex)
367 			continue;
368 
369 		if (!dev_prev)
370 			goto assign_prev;
371 
372 		tcf_mirred_to_dev(skb, m, dev_prev,
373 				  dev_is_mac_header_xmit(dev),
374 				  mirred_eaction, retval);
375 assign_prev:
376 		dev_prev = dev;
377 	}
378 
379 	if (dev_prev)
380 		return tcf_mirred_to_dev(skb, m, dev_prev,
381 					 dev_is_mac_header_xmit(dev_prev),
382 					 m_eaction, retval);
383 
384 	return retval;
385 }
386 
387 static int tcf_blockcast_mirror(struct sk_buff *skb, struct tcf_mirred *m,
388 				struct tcf_block *block, int m_eaction,
389 				const u32 exception_ifindex, int retval)
390 {
391 	struct net_device *dev = NULL;
392 	unsigned long index;
393 
394 	xa_for_each(&block->ports, index, dev) {
395 		if (index == exception_ifindex)
396 			continue;
397 
398 		tcf_mirred_to_dev(skb, m, dev,
399 				  dev_is_mac_header_xmit(dev),
400 				  m_eaction, retval);
401 	}
402 
403 	return retval;
404 }
405 
406 static int tcf_blockcast(struct sk_buff *skb, struct tcf_mirred *m,
407 			 const u32 blockid, struct tcf_result *res,
408 			 int retval)
409 {
410 	const u32 exception_ifindex = skb->dev->ifindex;
411 	struct tcf_block *block;
412 	bool is_redirect;
413 	int m_eaction;
414 
415 	m_eaction = READ_ONCE(m->tcfm_eaction);
416 	is_redirect = tcf_mirred_is_act_redirect(m_eaction);
417 
418 	/* we are already under rcu protection, so can call block lookup
419 	 * directly.
420 	 */
421 	block = tcf_block_lookup(dev_net(skb->dev), blockid);
422 	if (!block || xa_empty(&block->ports)) {
423 		tcf_action_inc_overlimit_qstats(&m->common);
424 		return retval;
425 	}
426 
427 	if (is_redirect)
428 		return tcf_blockcast_redir(skb, m, block, m_eaction,
429 					   exception_ifindex, retval);
430 
431 	/* If it's not redirect, it is mirror */
432 	return tcf_blockcast_mirror(skb, m, block, m_eaction, exception_ifindex,
433 				    retval);
434 }
435 
436 TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
437 				     const struct tc_action *a,
438 				     struct tcf_result *res)
439 {
440 	struct tcf_mirred *m = to_mirred(a);
441 	int retval = READ_ONCE(m->tcf_action);
442 	unsigned int nest_level;
443 	bool m_mac_header_xmit;
444 	struct net_device *dev;
445 	int m_eaction;
446 	u32 blockid;
447 
448 	nest_level = tcf_mirred_nest_level_inc_return();
449 	if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
450 		net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
451 				     netdev_name(skb->dev));
452 		retval = TC_ACT_SHOT;
453 		goto dec_nest_level;
454 	}
455 
456 	tcf_lastuse_update(&m->tcf_tm);
457 	tcf_action_update_bstats(&m->common, skb);
458 
459 	blockid = READ_ONCE(m->tcfm_blockid);
460 	if (blockid) {
461 		retval = tcf_blockcast(skb, m, blockid, res, retval);
462 		goto dec_nest_level;
463 	}
464 
465 	dev = rcu_dereference_bh(m->tcfm_dev);
466 	if (unlikely(!dev)) {
467 		pr_notice_once("tc mirred: target device is gone\n");
468 		tcf_action_inc_overlimit_qstats(&m->common);
469 		goto dec_nest_level;
470 	}
471 
472 	m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
473 	m_eaction = READ_ONCE(m->tcfm_eaction);
474 
475 	retval = tcf_mirred_to_dev(skb, m, dev, m_mac_header_xmit, m_eaction,
476 				   retval);
477 
478 dec_nest_level:
479 	tcf_mirred_nest_level_dec();
480 
481 	return retval;
482 }
483 
484 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
485 			     u64 drops, u64 lastuse, bool hw)
486 {
487 	struct tcf_mirred *m = to_mirred(a);
488 	struct tcf_t *tm = &m->tcf_tm;
489 
490 	tcf_action_update_stats(a, bytes, packets, drops, hw);
491 	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
492 }
493 
494 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
495 			   int ref)
496 {
497 	unsigned char *b = skb_tail_pointer(skb);
498 	struct tcf_mirred *m = to_mirred(a);
499 	struct tc_mirred opt = {
500 		.index   = m->tcf_index,
501 		.refcnt  = refcount_read(&m->tcf_refcnt) - ref,
502 		.bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
503 	};
504 	struct net_device *dev;
505 	struct tcf_t t;
506 	u32 blockid;
507 
508 	spin_lock_bh(&m->tcf_lock);
509 	opt.action = m->tcf_action;
510 	opt.eaction = m->tcfm_eaction;
511 	dev = tcf_mirred_dev_dereference(m);
512 	if (dev)
513 		opt.ifindex = dev->ifindex;
514 
515 	if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
516 		goto nla_put_failure;
517 
518 	blockid = m->tcfm_blockid;
519 	if (blockid && nla_put_u32(skb, TCA_MIRRED_BLOCKID, blockid))
520 		goto nla_put_failure;
521 
522 	tcf_tm_dump(&t, &m->tcf_tm);
523 	if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
524 		goto nla_put_failure;
525 	spin_unlock_bh(&m->tcf_lock);
526 
527 	return skb->len;
528 
529 nla_put_failure:
530 	spin_unlock_bh(&m->tcf_lock);
531 	nlmsg_trim(skb, b);
532 	return -1;
533 }
534 
535 static int mirred_device_event(struct notifier_block *unused,
536 			       unsigned long event, void *ptr)
537 {
538 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
539 	struct tcf_mirred *m;
540 
541 	ASSERT_RTNL();
542 	if (event == NETDEV_UNREGISTER) {
543 		spin_lock(&mirred_list_lock);
544 		list_for_each_entry(m, &mirred_list, tcfm_list) {
545 			spin_lock_bh(&m->tcf_lock);
546 			if (tcf_mirred_dev_dereference(m) == dev) {
547 				netdev_put(dev, &m->tcfm_dev_tracker);
548 				/* Note : no rcu grace period necessary, as
549 				 * net_device are already rcu protected.
550 				 */
551 				RCU_INIT_POINTER(m->tcfm_dev, NULL);
552 			}
553 			spin_unlock_bh(&m->tcf_lock);
554 		}
555 		spin_unlock(&mirred_list_lock);
556 	}
557 
558 	return NOTIFY_DONE;
559 }
560 
561 static struct notifier_block mirred_device_notifier = {
562 	.notifier_call = mirred_device_event,
563 };
564 
565 static void tcf_mirred_dev_put(void *priv)
566 {
567 	struct net_device *dev = priv;
568 
569 	dev_put(dev);
570 }
571 
572 static struct net_device *
573 tcf_mirred_get_dev(const struct tc_action *a,
574 		   tc_action_priv_destructor *destructor)
575 {
576 	struct tcf_mirred *m = to_mirred(a);
577 	struct net_device *dev;
578 
579 	rcu_read_lock();
580 	dev = rcu_dereference(m->tcfm_dev);
581 	if (dev) {
582 		dev_hold(dev);
583 		*destructor = tcf_mirred_dev_put;
584 	}
585 	rcu_read_unlock();
586 
587 	return dev;
588 }
589 
590 static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
591 {
592 	return nla_total_size(sizeof(struct tc_mirred));
593 }
594 
595 static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry,
596 				       const struct tc_action *act)
597 {
598 	entry->dev = act->ops->get_dev(act, &entry->destructor);
599 	if (!entry->dev)
600 		return;
601 	entry->destructor_priv = entry->dev;
602 }
603 
604 static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data,
605 					u32 *index_inc, bool bind,
606 					struct netlink_ext_ack *extack)
607 {
608 	if (bind) {
609 		struct flow_action_entry *entry = entry_data;
610 
611 		if (is_tcf_mirred_egress_redirect(act)) {
612 			entry->id = FLOW_ACTION_REDIRECT;
613 			tcf_offload_mirred_get_dev(entry, act);
614 		} else if (is_tcf_mirred_egress_mirror(act)) {
615 			entry->id = FLOW_ACTION_MIRRED;
616 			tcf_offload_mirred_get_dev(entry, act);
617 		} else if (is_tcf_mirred_ingress_redirect(act)) {
618 			entry->id = FLOW_ACTION_REDIRECT_INGRESS;
619 			tcf_offload_mirred_get_dev(entry, act);
620 		} else if (is_tcf_mirred_ingress_mirror(act)) {
621 			entry->id = FLOW_ACTION_MIRRED_INGRESS;
622 			tcf_offload_mirred_get_dev(entry, act);
623 		} else {
624 			NL_SET_ERR_MSG_MOD(extack, "Unsupported mirred offload");
625 			return -EOPNOTSUPP;
626 		}
627 		*index_inc = 1;
628 	} else {
629 		struct flow_offload_action *fl_action = entry_data;
630 
631 		if (is_tcf_mirred_egress_redirect(act))
632 			fl_action->id = FLOW_ACTION_REDIRECT;
633 		else if (is_tcf_mirred_egress_mirror(act))
634 			fl_action->id = FLOW_ACTION_MIRRED;
635 		else if (is_tcf_mirred_ingress_redirect(act))
636 			fl_action->id = FLOW_ACTION_REDIRECT_INGRESS;
637 		else if (is_tcf_mirred_ingress_mirror(act))
638 			fl_action->id = FLOW_ACTION_MIRRED_INGRESS;
639 		else
640 			return -EOPNOTSUPP;
641 	}
642 
643 	return 0;
644 }
645 
646 static struct tc_action_ops act_mirred_ops = {
647 	.kind		=	"mirred",
648 	.id		=	TCA_ID_MIRRED,
649 	.owner		=	THIS_MODULE,
650 	.act		=	tcf_mirred_act,
651 	.stats_update	=	tcf_stats_update,
652 	.dump		=	tcf_mirred_dump,
653 	.cleanup	=	tcf_mirred_release,
654 	.init		=	tcf_mirred_init,
655 	.get_fill_size	=	tcf_mirred_get_fill_size,
656 	.offload_act_setup =	tcf_mirred_offload_act_setup,
657 	.size		=	sizeof(struct tcf_mirred),
658 	.get_dev	=	tcf_mirred_get_dev,
659 };
660 MODULE_ALIAS_NET_ACT("mirred");
661 
662 static __net_init int mirred_init_net(struct net *net)
663 {
664 	struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id);
665 
666 	return tc_action_net_init(net, tn, &act_mirred_ops);
667 }
668 
669 static void __net_exit mirred_exit_net(struct list_head *net_list)
670 {
671 	tc_action_net_exit(net_list, act_mirred_ops.net_id);
672 }
673 
674 static struct pernet_operations mirred_net_ops = {
675 	.init = mirred_init_net,
676 	.exit_batch = mirred_exit_net,
677 	.id   = &act_mirred_ops.net_id,
678 	.size = sizeof(struct tc_action_net),
679 };
680 
681 MODULE_AUTHOR("Jamal Hadi Salim(2002)");
682 MODULE_DESCRIPTION("Device Mirror/redirect actions");
683 MODULE_LICENSE("GPL");
684 
685 static int __init mirred_init_module(void)
686 {
687 	int err = register_netdevice_notifier(&mirred_device_notifier);
688 	if (err)
689 		return err;
690 
691 	pr_info("Mirror/redirect action on\n");
692 	err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
693 	if (err)
694 		unregister_netdevice_notifier(&mirred_device_notifier);
695 
696 	return err;
697 }
698 
699 static void __exit mirred_cleanup_module(void)
700 {
701 	tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
702 	unregister_netdevice_notifier(&mirred_device_notifier);
703 }
704 
705 module_init(mirred_init_module);
706 module_exit(mirred_cleanup_module);
707