xref: /linux/net/sched/act_mirred.c (revision f2ec98566775dd4341ec1dcf93aa5859c60de826)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/act_mirred.c	packet mirroring and redirect actions
4  *
5  * Authors:	Jamal Hadi Salim (2002-4)
6  *
7  * TODO: Add ingress support (and socket redirect support)
8  */
9 
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/gfp.h>
19 #include <linux/if_arp.h>
20 #include <net/net_namespace.h>
21 #include <net/netlink.h>
22 #include <net/dst.h>
23 #include <net/pkt_sched.h>
24 #include <net/pkt_cls.h>
25 #include <linux/tc_act/tc_mirred.h>
26 #include <net/tc_act/tc_mirred.h>
27 #include <net/tc_wrapper.h>
28 
29 static LIST_HEAD(mirred_list);
30 static DEFINE_SPINLOCK(mirred_list_lock);
31 
32 #define MIRRED_NEST_LIMIT    4
33 static DEFINE_PER_CPU(unsigned int, mirred_nest_level);
34 
35 static bool tcf_mirred_is_act_redirect(int action)
36 {
37 	return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
38 }
39 
40 static bool tcf_mirred_act_wants_ingress(int action)
41 {
42 	switch (action) {
43 	case TCA_EGRESS_REDIR:
44 	case TCA_EGRESS_MIRROR:
45 		return false;
46 	case TCA_INGRESS_REDIR:
47 	case TCA_INGRESS_MIRROR:
48 		return true;
49 	default:
50 		BUG();
51 	}
52 }
53 
54 static bool tcf_mirred_can_reinsert(int action)
55 {
56 	switch (action) {
57 	case TC_ACT_SHOT:
58 	case TC_ACT_STOLEN:
59 	case TC_ACT_QUEUED:
60 	case TC_ACT_TRAP:
61 		return true;
62 	}
63 	return false;
64 }
65 
66 static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
67 {
68 	return rcu_dereference_protected(m->tcfm_dev,
69 					 lockdep_is_held(&m->tcf_lock));
70 }
71 
72 static void tcf_mirred_release(struct tc_action *a)
73 {
74 	struct tcf_mirred *m = to_mirred(a);
75 	struct net_device *dev;
76 
77 	spin_lock(&mirred_list_lock);
78 	list_del(&m->tcfm_list);
79 	spin_unlock(&mirred_list_lock);
80 
81 	/* last reference to action, no need to lock */
82 	dev = rcu_dereference_protected(m->tcfm_dev, 1);
83 	netdev_put(dev, &m->tcfm_dev_tracker);
84 }
85 
86 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
87 	[TCA_MIRRED_PARMS]	= { .len = sizeof(struct tc_mirred) },
88 	[TCA_MIRRED_BLOCKID]	= NLA_POLICY_MIN(NLA_U32, 1),
89 };
90 
91 static struct tc_action_ops act_mirred_ops;
92 
93 static void tcf_mirred_replace_dev(struct tcf_mirred *m,
94 				   struct net_device *ndev)
95 {
96 	struct net_device *odev;
97 
98 	odev = rcu_replace_pointer(m->tcfm_dev, ndev,
99 				   lockdep_is_held(&m->tcf_lock));
100 	netdev_put(odev, &m->tcfm_dev_tracker);
101 }
102 
103 static int tcf_mirred_init(struct net *net, struct nlattr *nla,
104 			   struct nlattr *est, struct tc_action **a,
105 			   struct tcf_proto *tp,
106 			   u32 flags, struct netlink_ext_ack *extack)
107 {
108 	struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id);
109 	bool bind = flags & TCA_ACT_FLAGS_BIND;
110 	struct nlattr *tb[TCA_MIRRED_MAX + 1];
111 	struct tcf_chain *goto_ch = NULL;
112 	bool mac_header_xmit = false;
113 	struct tc_mirred *parm;
114 	struct tcf_mirred *m;
115 	bool exists = false;
116 	int ret, err;
117 	u32 index;
118 
119 	if (!nla) {
120 		NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
121 		return -EINVAL;
122 	}
123 	ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
124 					  mirred_policy, extack);
125 	if (ret < 0)
126 		return ret;
127 	if (!tb[TCA_MIRRED_PARMS]) {
128 		NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
129 		return -EINVAL;
130 	}
131 	parm = nla_data(tb[TCA_MIRRED_PARMS]);
132 	index = parm->index;
133 	err = tcf_idr_check_alloc(tn, &index, a, bind);
134 	if (err < 0)
135 		return err;
136 	exists = err;
137 	if (exists && bind)
138 		return ACT_P_BOUND;
139 
140 	if (tb[TCA_MIRRED_BLOCKID] && parm->ifindex) {
141 		NL_SET_ERR_MSG_MOD(extack,
142 				   "Cannot specify Block ID and dev simultaneously");
143 		if (exists)
144 			tcf_idr_release(*a, bind);
145 		else
146 			tcf_idr_cleanup(tn, index);
147 
148 		return -EINVAL;
149 	}
150 
151 	switch (parm->eaction) {
152 	case TCA_EGRESS_MIRROR:
153 	case TCA_EGRESS_REDIR:
154 	case TCA_INGRESS_REDIR:
155 	case TCA_INGRESS_MIRROR:
156 		break;
157 	default:
158 		if (exists)
159 			tcf_idr_release(*a, bind);
160 		else
161 			tcf_idr_cleanup(tn, index);
162 		NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
163 		return -EINVAL;
164 	}
165 
166 	if (!exists) {
167 		if (!parm->ifindex && !tb[TCA_MIRRED_BLOCKID]) {
168 			tcf_idr_cleanup(tn, index);
169 			NL_SET_ERR_MSG_MOD(extack,
170 					   "Must specify device or block");
171 			return -EINVAL;
172 		}
173 		ret = tcf_idr_create_from_flags(tn, index, est, a,
174 						&act_mirred_ops, bind, flags);
175 		if (ret) {
176 			tcf_idr_cleanup(tn, index);
177 			return ret;
178 		}
179 		ret = ACT_P_CREATED;
180 	} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
181 		tcf_idr_release(*a, bind);
182 		return -EEXIST;
183 	}
184 
185 	m = to_mirred(*a);
186 	if (ret == ACT_P_CREATED)
187 		INIT_LIST_HEAD(&m->tcfm_list);
188 
189 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
190 	if (err < 0)
191 		goto release_idr;
192 
193 	spin_lock_bh(&m->tcf_lock);
194 
195 	if (parm->ifindex) {
196 		struct net_device *ndev;
197 
198 		ndev = dev_get_by_index(net, parm->ifindex);
199 		if (!ndev) {
200 			spin_unlock_bh(&m->tcf_lock);
201 			err = -ENODEV;
202 			goto put_chain;
203 		}
204 		mac_header_xmit = dev_is_mac_header_xmit(ndev);
205 		tcf_mirred_replace_dev(m, ndev);
206 		netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC);
207 		m->tcfm_mac_header_xmit = mac_header_xmit;
208 		m->tcfm_blockid = 0;
209 	} else if (tb[TCA_MIRRED_BLOCKID]) {
210 		tcf_mirred_replace_dev(m, NULL);
211 		m->tcfm_mac_header_xmit = false;
212 		m->tcfm_blockid = nla_get_u32(tb[TCA_MIRRED_BLOCKID]);
213 	}
214 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
215 	m->tcfm_eaction = parm->eaction;
216 	spin_unlock_bh(&m->tcf_lock);
217 	if (goto_ch)
218 		tcf_chain_put_by_act(goto_ch);
219 
220 	if (ret == ACT_P_CREATED) {
221 		spin_lock(&mirred_list_lock);
222 		list_add(&m->tcfm_list, &mirred_list);
223 		spin_unlock(&mirred_list_lock);
224 	}
225 
226 	return ret;
227 put_chain:
228 	if (goto_ch)
229 		tcf_chain_put_by_act(goto_ch);
230 release_idr:
231 	tcf_idr_release(*a, bind);
232 	return err;
233 }
234 
235 static bool is_mirred_nested(void)
236 {
237 	return unlikely(__this_cpu_read(mirred_nest_level) > 1);
238 }
239 
240 static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
241 {
242 	int err;
243 
244 	if (!want_ingress)
245 		err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
246 	else if (is_mirred_nested())
247 		err = netif_rx(skb);
248 	else
249 		err = netif_receive_skb(skb);
250 
251 	return err;
252 }
253 
254 static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
255 			     struct net_device *dev,
256 			     const bool m_mac_header_xmit, int m_eaction,
257 			     int retval)
258 {
259 	struct sk_buff *skb_to_send = skb;
260 	bool want_ingress;
261 	bool is_redirect;
262 	bool expects_nh;
263 	bool at_ingress;
264 	bool dont_clone;
265 	int mac_len;
266 	bool at_nh;
267 	int err;
268 
269 	is_redirect = tcf_mirred_is_act_redirect(m_eaction);
270 	if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
271 		net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
272 				       dev->name);
273 		err = -ENODEV;
274 		goto out;
275 	}
276 
277 	/* we could easily avoid the clone only if called by ingress and clsact;
278 	 * since we can't easily detect the clsact caller, skip clone only for
279 	 * ingress - that covers the TC S/W datapath.
280 	 */
281 	at_ingress = skb_at_tc_ingress(skb);
282 	dont_clone = skb_at_tc_ingress(skb) && is_redirect &&
283 		tcf_mirred_can_reinsert(retval);
284 	if (!dont_clone) {
285 		skb_to_send = skb_clone(skb, GFP_ATOMIC);
286 		if (!skb_to_send) {
287 			err =  -ENOMEM;
288 			goto out;
289 		}
290 	}
291 
292 	want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
293 
294 	/* All mirred/redirected skbs should clear previous ct info */
295 	nf_reset_ct(skb_to_send);
296 	if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
297 		skb_dst_drop(skb_to_send);
298 
299 	expects_nh = want_ingress || !m_mac_header_xmit;
300 	at_nh = skb->data == skb_network_header(skb);
301 	if (at_nh != expects_nh) {
302 		mac_len = at_ingress ? skb->mac_len :
303 			  skb_network_offset(skb);
304 		if (expects_nh) {
305 			/* target device/action expect data at nh */
306 			skb_pull_rcsum(skb_to_send, mac_len);
307 		} else {
308 			/* target device/action expect data at mac */
309 			skb_push_rcsum(skb_to_send, mac_len);
310 		}
311 	}
312 
313 	skb_to_send->skb_iif = skb->dev->ifindex;
314 	skb_to_send->dev = dev;
315 
316 	if (is_redirect) {
317 		if (skb == skb_to_send)
318 			retval = TC_ACT_CONSUMED;
319 
320 		skb_set_redirected(skb_to_send, skb_to_send->tc_at_ingress);
321 
322 		err = tcf_mirred_forward(want_ingress, skb_to_send);
323 	} else {
324 		err = tcf_mirred_forward(want_ingress, skb_to_send);
325 	}
326 
327 	if (err) {
328 out:
329 		tcf_action_inc_overlimit_qstats(&m->common);
330 		if (is_redirect)
331 			retval = TC_ACT_SHOT;
332 	}
333 
334 	return retval;
335 }
336 
337 static int tcf_blockcast_redir(struct sk_buff *skb, struct tcf_mirred *m,
338 			       struct tcf_block *block, int m_eaction,
339 			       const u32 exception_ifindex, int retval)
340 {
341 	struct net_device *dev_prev = NULL;
342 	struct net_device *dev = NULL;
343 	unsigned long index;
344 	int mirred_eaction;
345 
346 	mirred_eaction = tcf_mirred_act_wants_ingress(m_eaction) ?
347 		TCA_INGRESS_MIRROR : TCA_EGRESS_MIRROR;
348 
349 	xa_for_each(&block->ports, index, dev) {
350 		if (index == exception_ifindex)
351 			continue;
352 
353 		if (!dev_prev)
354 			goto assign_prev;
355 
356 		tcf_mirred_to_dev(skb, m, dev_prev,
357 				  dev_is_mac_header_xmit(dev),
358 				  mirred_eaction, retval);
359 assign_prev:
360 		dev_prev = dev;
361 	}
362 
363 	if (dev_prev)
364 		return tcf_mirred_to_dev(skb, m, dev_prev,
365 					 dev_is_mac_header_xmit(dev_prev),
366 					 m_eaction, retval);
367 
368 	return retval;
369 }
370 
371 static int tcf_blockcast_mirror(struct sk_buff *skb, struct tcf_mirred *m,
372 				struct tcf_block *block, int m_eaction,
373 				const u32 exception_ifindex, int retval)
374 {
375 	struct net_device *dev = NULL;
376 	unsigned long index;
377 
378 	xa_for_each(&block->ports, index, dev) {
379 		if (index == exception_ifindex)
380 			continue;
381 
382 		tcf_mirred_to_dev(skb, m, dev,
383 				  dev_is_mac_header_xmit(dev),
384 				  m_eaction, retval);
385 	}
386 
387 	return retval;
388 }
389 
390 static int tcf_blockcast(struct sk_buff *skb, struct tcf_mirred *m,
391 			 const u32 blockid, struct tcf_result *res,
392 			 int retval)
393 {
394 	const u32 exception_ifindex = skb->dev->ifindex;
395 	struct tcf_block *block;
396 	bool is_redirect;
397 	int m_eaction;
398 
399 	m_eaction = READ_ONCE(m->tcfm_eaction);
400 	is_redirect = tcf_mirred_is_act_redirect(m_eaction);
401 
402 	/* we are already under rcu protection, so can call block lookup
403 	 * directly.
404 	 */
405 	block = tcf_block_lookup(dev_net(skb->dev), blockid);
406 	if (!block || xa_empty(&block->ports)) {
407 		tcf_action_inc_overlimit_qstats(&m->common);
408 		return retval;
409 	}
410 
411 	if (is_redirect)
412 		return tcf_blockcast_redir(skb, m, block, m_eaction,
413 					   exception_ifindex, retval);
414 
415 	/* If it's not redirect, it is mirror */
416 	return tcf_blockcast_mirror(skb, m, block, m_eaction, exception_ifindex,
417 				    retval);
418 }
419 
420 TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
421 				     const struct tc_action *a,
422 				     struct tcf_result *res)
423 {
424 	struct tcf_mirred *m = to_mirred(a);
425 	int retval = READ_ONCE(m->tcf_action);
426 	unsigned int nest_level;
427 	bool m_mac_header_xmit;
428 	struct net_device *dev;
429 	int m_eaction;
430 	u32 blockid;
431 
432 	nest_level = __this_cpu_inc_return(mirred_nest_level);
433 	if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
434 		net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
435 				     netdev_name(skb->dev));
436 		retval = TC_ACT_SHOT;
437 		goto dec_nest_level;
438 	}
439 
440 	tcf_lastuse_update(&m->tcf_tm);
441 	tcf_action_update_bstats(&m->common, skb);
442 
443 	blockid = READ_ONCE(m->tcfm_blockid);
444 	if (blockid) {
445 		retval = tcf_blockcast(skb, m, blockid, res, retval);
446 		goto dec_nest_level;
447 	}
448 
449 	dev = rcu_dereference_bh(m->tcfm_dev);
450 	if (unlikely(!dev)) {
451 		pr_notice_once("tc mirred: target device is gone\n");
452 		tcf_action_inc_overlimit_qstats(&m->common);
453 		goto dec_nest_level;
454 	}
455 
456 	m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
457 	m_eaction = READ_ONCE(m->tcfm_eaction);
458 
459 	retval = tcf_mirred_to_dev(skb, m, dev, m_mac_header_xmit, m_eaction,
460 				   retval);
461 
462 dec_nest_level:
463 	__this_cpu_dec(mirred_nest_level);
464 
465 	return retval;
466 }
467 
468 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
469 			     u64 drops, u64 lastuse, bool hw)
470 {
471 	struct tcf_mirred *m = to_mirred(a);
472 	struct tcf_t *tm = &m->tcf_tm;
473 
474 	tcf_action_update_stats(a, bytes, packets, drops, hw);
475 	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
476 }
477 
478 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
479 			   int ref)
480 {
481 	unsigned char *b = skb_tail_pointer(skb);
482 	struct tcf_mirred *m = to_mirred(a);
483 	struct tc_mirred opt = {
484 		.index   = m->tcf_index,
485 		.refcnt  = refcount_read(&m->tcf_refcnt) - ref,
486 		.bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
487 	};
488 	struct net_device *dev;
489 	struct tcf_t t;
490 	u32 blockid;
491 
492 	spin_lock_bh(&m->tcf_lock);
493 	opt.action = m->tcf_action;
494 	opt.eaction = m->tcfm_eaction;
495 	dev = tcf_mirred_dev_dereference(m);
496 	if (dev)
497 		opt.ifindex = dev->ifindex;
498 
499 	if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
500 		goto nla_put_failure;
501 
502 	blockid = m->tcfm_blockid;
503 	if (blockid && nla_put_u32(skb, TCA_MIRRED_BLOCKID, blockid))
504 		goto nla_put_failure;
505 
506 	tcf_tm_dump(&t, &m->tcf_tm);
507 	if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
508 		goto nla_put_failure;
509 	spin_unlock_bh(&m->tcf_lock);
510 
511 	return skb->len;
512 
513 nla_put_failure:
514 	spin_unlock_bh(&m->tcf_lock);
515 	nlmsg_trim(skb, b);
516 	return -1;
517 }
518 
519 static int mirred_device_event(struct notifier_block *unused,
520 			       unsigned long event, void *ptr)
521 {
522 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
523 	struct tcf_mirred *m;
524 
525 	ASSERT_RTNL();
526 	if (event == NETDEV_UNREGISTER) {
527 		spin_lock(&mirred_list_lock);
528 		list_for_each_entry(m, &mirred_list, tcfm_list) {
529 			spin_lock_bh(&m->tcf_lock);
530 			if (tcf_mirred_dev_dereference(m) == dev) {
531 				netdev_put(dev, &m->tcfm_dev_tracker);
532 				/* Note : no rcu grace period necessary, as
533 				 * net_device are already rcu protected.
534 				 */
535 				RCU_INIT_POINTER(m->tcfm_dev, NULL);
536 			} else if (m->tcfm_blockid) {
537 				m->tcfm_blockid = 0;
538 			}
539 			spin_unlock_bh(&m->tcf_lock);
540 		}
541 		spin_unlock(&mirred_list_lock);
542 	}
543 
544 	return NOTIFY_DONE;
545 }
546 
547 static struct notifier_block mirred_device_notifier = {
548 	.notifier_call = mirred_device_event,
549 };
550 
551 static void tcf_mirred_dev_put(void *priv)
552 {
553 	struct net_device *dev = priv;
554 
555 	dev_put(dev);
556 }
557 
558 static struct net_device *
559 tcf_mirred_get_dev(const struct tc_action *a,
560 		   tc_action_priv_destructor *destructor)
561 {
562 	struct tcf_mirred *m = to_mirred(a);
563 	struct net_device *dev;
564 
565 	rcu_read_lock();
566 	dev = rcu_dereference(m->tcfm_dev);
567 	if (dev) {
568 		dev_hold(dev);
569 		*destructor = tcf_mirred_dev_put;
570 	}
571 	rcu_read_unlock();
572 
573 	return dev;
574 }
575 
576 static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
577 {
578 	return nla_total_size(sizeof(struct tc_mirred));
579 }
580 
581 static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry,
582 				       const struct tc_action *act)
583 {
584 	entry->dev = act->ops->get_dev(act, &entry->destructor);
585 	if (!entry->dev)
586 		return;
587 	entry->destructor_priv = entry->dev;
588 }
589 
590 static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data,
591 					u32 *index_inc, bool bind,
592 					struct netlink_ext_ack *extack)
593 {
594 	if (bind) {
595 		struct flow_action_entry *entry = entry_data;
596 
597 		if (is_tcf_mirred_egress_redirect(act)) {
598 			entry->id = FLOW_ACTION_REDIRECT;
599 			tcf_offload_mirred_get_dev(entry, act);
600 		} else if (is_tcf_mirred_egress_mirror(act)) {
601 			entry->id = FLOW_ACTION_MIRRED;
602 			tcf_offload_mirred_get_dev(entry, act);
603 		} else if (is_tcf_mirred_ingress_redirect(act)) {
604 			entry->id = FLOW_ACTION_REDIRECT_INGRESS;
605 			tcf_offload_mirred_get_dev(entry, act);
606 		} else if (is_tcf_mirred_ingress_mirror(act)) {
607 			entry->id = FLOW_ACTION_MIRRED_INGRESS;
608 			tcf_offload_mirred_get_dev(entry, act);
609 		} else {
610 			NL_SET_ERR_MSG_MOD(extack, "Unsupported mirred offload");
611 			return -EOPNOTSUPP;
612 		}
613 		*index_inc = 1;
614 	} else {
615 		struct flow_offload_action *fl_action = entry_data;
616 
617 		if (is_tcf_mirred_egress_redirect(act))
618 			fl_action->id = FLOW_ACTION_REDIRECT;
619 		else if (is_tcf_mirred_egress_mirror(act))
620 			fl_action->id = FLOW_ACTION_MIRRED;
621 		else if (is_tcf_mirred_ingress_redirect(act))
622 			fl_action->id = FLOW_ACTION_REDIRECT_INGRESS;
623 		else if (is_tcf_mirred_ingress_mirror(act))
624 			fl_action->id = FLOW_ACTION_MIRRED_INGRESS;
625 		else
626 			return -EOPNOTSUPP;
627 	}
628 
629 	return 0;
630 }
631 
632 static struct tc_action_ops act_mirred_ops = {
633 	.kind		=	"mirred",
634 	.id		=	TCA_ID_MIRRED,
635 	.owner		=	THIS_MODULE,
636 	.act		=	tcf_mirred_act,
637 	.stats_update	=	tcf_stats_update,
638 	.dump		=	tcf_mirred_dump,
639 	.cleanup	=	tcf_mirred_release,
640 	.init		=	tcf_mirred_init,
641 	.get_fill_size	=	tcf_mirred_get_fill_size,
642 	.offload_act_setup =	tcf_mirred_offload_act_setup,
643 	.size		=	sizeof(struct tcf_mirred),
644 	.get_dev	=	tcf_mirred_get_dev,
645 };
646 MODULE_ALIAS_NET_ACT("mirred");
647 
648 static __net_init int mirred_init_net(struct net *net)
649 {
650 	struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id);
651 
652 	return tc_action_net_init(net, tn, &act_mirred_ops);
653 }
654 
655 static void __net_exit mirred_exit_net(struct list_head *net_list)
656 {
657 	tc_action_net_exit(net_list, act_mirred_ops.net_id);
658 }
659 
660 static struct pernet_operations mirred_net_ops = {
661 	.init = mirred_init_net,
662 	.exit_batch = mirred_exit_net,
663 	.id   = &act_mirred_ops.net_id,
664 	.size = sizeof(struct tc_action_net),
665 };
666 
667 MODULE_AUTHOR("Jamal Hadi Salim(2002)");
668 MODULE_DESCRIPTION("Device Mirror/redirect actions");
669 MODULE_LICENSE("GPL");
670 
671 static int __init mirred_init_module(void)
672 {
673 	int err = register_netdevice_notifier(&mirred_device_notifier);
674 	if (err)
675 		return err;
676 
677 	pr_info("Mirror/redirect action on\n");
678 	err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
679 	if (err)
680 		unregister_netdevice_notifier(&mirred_device_notifier);
681 
682 	return err;
683 }
684 
685 static void __exit mirred_cleanup_module(void)
686 {
687 	tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
688 	unregister_netdevice_notifier(&mirred_device_notifier);
689 }
690 
691 module_init(mirred_init_module);
692 module_exit(mirred_cleanup_module);
693