xref: /linux/net/sched/act_ife.c (revision 22d55f02b8922a097cd4be1e2f131dfa7ef65901)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/ife.c	Inter-FE action based on ForCES WG InterFE LFB
4  *
5  *		Refer to:
6  *		draft-ietf-forces-interfelfb-03
7  *		and
8  *		netdev01 paper:
9  *		"Distributing Linux Traffic Control Classifier-Action
10  *		Subsystem"
11  *		Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
12  *
13  * copyright Jamal Hadi Salim (2015)
14 */
15 
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <net/net_namespace.h>
25 #include <net/netlink.h>
26 #include <net/pkt_sched.h>
27 #include <net/pkt_cls.h>
28 #include <uapi/linux/tc_act/tc_ife.h>
29 #include <net/tc_act/tc_ife.h>
30 #include <linux/etherdevice.h>
31 #include <net/ife.h>
32 
33 static unsigned int ife_net_id;
34 static int max_metacnt = IFE_META_MAX + 1;
35 static struct tc_action_ops act_ife_ops;
36 
37 static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = {
38 	[TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)},
39 	[TCA_IFE_DMAC] = { .len = ETH_ALEN},
40 	[TCA_IFE_SMAC] = { .len = ETH_ALEN},
41 	[TCA_IFE_TYPE] = { .type = NLA_U16},
42 };
43 
44 int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi)
45 {
46 	u16 edata = 0;
47 
48 	if (mi->metaval)
49 		edata = *(u16 *)mi->metaval;
50 	else if (metaval)
51 		edata = metaval;
52 
53 	if (!edata) /* will not encode */
54 		return 0;
55 
56 	edata = htons(edata);
57 	return ife_tlv_meta_encode(skbdata, mi->metaid, 2, &edata);
58 }
59 EXPORT_SYMBOL_GPL(ife_encode_meta_u16);
60 
61 int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi)
62 {
63 	if (mi->metaval)
64 		return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval);
65 	else
66 		return nla_put(skb, mi->metaid, 0, NULL);
67 }
68 EXPORT_SYMBOL_GPL(ife_get_meta_u32);
69 
70 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi)
71 {
72 	if (metaval || mi->metaval)
73 		return 8; /* T+L+V == 2+2+4 */
74 
75 	return 0;
76 }
77 EXPORT_SYMBOL_GPL(ife_check_meta_u32);
78 
79 int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi)
80 {
81 	if (metaval || mi->metaval)
82 		return 8; /* T+L+(V) == 2+2+(2+2bytepad) */
83 
84 	return 0;
85 }
86 EXPORT_SYMBOL_GPL(ife_check_meta_u16);
87 
88 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi)
89 {
90 	u32 edata = metaval;
91 
92 	if (mi->metaval)
93 		edata = *(u32 *)mi->metaval;
94 	else if (metaval)
95 		edata = metaval;
96 
97 	if (!edata) /* will not encode */
98 		return 0;
99 
100 	edata = htonl(edata);
101 	return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata);
102 }
103 EXPORT_SYMBOL_GPL(ife_encode_meta_u32);
104 
105 int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
106 {
107 	if (mi->metaval)
108 		return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval);
109 	else
110 		return nla_put(skb, mi->metaid, 0, NULL);
111 }
112 EXPORT_SYMBOL_GPL(ife_get_meta_u16);
113 
114 int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
115 {
116 	mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
117 	if (!mi->metaval)
118 		return -ENOMEM;
119 
120 	return 0;
121 }
122 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
123 
124 int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
125 {
126 	mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
127 	if (!mi->metaval)
128 		return -ENOMEM;
129 
130 	return 0;
131 }
132 EXPORT_SYMBOL_GPL(ife_alloc_meta_u16);
133 
134 void ife_release_meta_gen(struct tcf_meta_info *mi)
135 {
136 	kfree(mi->metaval);
137 }
138 EXPORT_SYMBOL_GPL(ife_release_meta_gen);
139 
140 int ife_validate_meta_u32(void *val, int len)
141 {
142 	if (len == sizeof(u32))
143 		return 0;
144 
145 	return -EINVAL;
146 }
147 EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
148 
149 int ife_validate_meta_u16(void *val, int len)
150 {
151 	/* length will not include padding */
152 	if (len == sizeof(u16))
153 		return 0;
154 
155 	return -EINVAL;
156 }
157 EXPORT_SYMBOL_GPL(ife_validate_meta_u16);
158 
159 static LIST_HEAD(ifeoplist);
160 static DEFINE_RWLOCK(ife_mod_lock);
161 
162 static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
163 {
164 	struct tcf_meta_ops *o;
165 
166 	read_lock(&ife_mod_lock);
167 	list_for_each_entry(o, &ifeoplist, list) {
168 		if (o->metaid == metaid) {
169 			if (!try_module_get(o->owner))
170 				o = NULL;
171 			read_unlock(&ife_mod_lock);
172 			return o;
173 		}
174 	}
175 	read_unlock(&ife_mod_lock);
176 
177 	return NULL;
178 }
179 
180 int register_ife_op(struct tcf_meta_ops *mops)
181 {
182 	struct tcf_meta_ops *m;
183 
184 	if (!mops->metaid || !mops->metatype || !mops->name ||
185 	    !mops->check_presence || !mops->encode || !mops->decode ||
186 	    !mops->get || !mops->alloc)
187 		return -EINVAL;
188 
189 	write_lock(&ife_mod_lock);
190 
191 	list_for_each_entry(m, &ifeoplist, list) {
192 		if (m->metaid == mops->metaid ||
193 		    (strcmp(mops->name, m->name) == 0)) {
194 			write_unlock(&ife_mod_lock);
195 			return -EEXIST;
196 		}
197 	}
198 
199 	if (!mops->release)
200 		mops->release = ife_release_meta_gen;
201 
202 	list_add_tail(&mops->list, &ifeoplist);
203 	write_unlock(&ife_mod_lock);
204 	return 0;
205 }
206 EXPORT_SYMBOL_GPL(unregister_ife_op);
207 
208 int unregister_ife_op(struct tcf_meta_ops *mops)
209 {
210 	struct tcf_meta_ops *m;
211 	int err = -ENOENT;
212 
213 	write_lock(&ife_mod_lock);
214 	list_for_each_entry(m, &ifeoplist, list) {
215 		if (m->metaid == mops->metaid) {
216 			list_del(&mops->list);
217 			err = 0;
218 			break;
219 		}
220 	}
221 	write_unlock(&ife_mod_lock);
222 
223 	return err;
224 }
225 EXPORT_SYMBOL_GPL(register_ife_op);
226 
227 static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
228 {
229 	int ret = 0;
230 	/* XXX: unfortunately cant use nla_policy at this point
231 	* because a length of 0 is valid in the case of
232 	* "allow". "use" semantics do enforce for proper
233 	* length and i couldve use nla_policy but it makes it hard
234 	* to use it just for that..
235 	*/
236 	if (ops->validate)
237 		return ops->validate(val, len);
238 
239 	if (ops->metatype == NLA_U32)
240 		ret = ife_validate_meta_u32(val, len);
241 	else if (ops->metatype == NLA_U16)
242 		ret = ife_validate_meta_u16(val, len);
243 
244 	return ret;
245 }
246 
247 #ifdef CONFIG_MODULES
248 static const char *ife_meta_id2name(u32 metaid)
249 {
250 	switch (metaid) {
251 	case IFE_META_SKBMARK:
252 		return "skbmark";
253 	case IFE_META_PRIO:
254 		return "skbprio";
255 	case IFE_META_TCINDEX:
256 		return "tcindex";
257 	default:
258 		return "unknown";
259 	}
260 }
261 #endif
262 
263 /* called when adding new meta information
264 */
265 static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held)
266 {
267 	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
268 	int ret = 0;
269 
270 	if (!ops) {
271 		ret = -ENOENT;
272 #ifdef CONFIG_MODULES
273 		if (rtnl_held)
274 			rtnl_unlock();
275 		request_module("ife-meta-%s", ife_meta_id2name(metaid));
276 		if (rtnl_held)
277 			rtnl_lock();
278 		ops = find_ife_oplist(metaid);
279 #endif
280 	}
281 
282 	if (ops) {
283 		ret = 0;
284 		if (len)
285 			ret = ife_validate_metatype(ops, val, len);
286 
287 		module_put(ops->owner);
288 	}
289 
290 	return ret;
291 }
292 
293 /* called when adding new meta information
294 */
295 static int __add_metainfo(const struct tcf_meta_ops *ops,
296 			  struct tcf_ife_info *ife, u32 metaid, void *metaval,
297 			  int len, bool atomic, bool exists)
298 {
299 	struct tcf_meta_info *mi = NULL;
300 	int ret = 0;
301 
302 	mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
303 	if (!mi)
304 		return -ENOMEM;
305 
306 	mi->metaid = metaid;
307 	mi->ops = ops;
308 	if (len > 0) {
309 		ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
310 		if (ret != 0) {
311 			kfree(mi);
312 			return ret;
313 		}
314 	}
315 
316 	if (exists)
317 		spin_lock_bh(&ife->tcf_lock);
318 	list_add_tail(&mi->metalist, &ife->metalist);
319 	if (exists)
320 		spin_unlock_bh(&ife->tcf_lock);
321 
322 	return ret;
323 }
324 
325 static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
326 				    struct tcf_ife_info *ife, u32 metaid,
327 				    bool exists)
328 {
329 	int ret;
330 
331 	if (!try_module_get(ops->owner))
332 		return -ENOENT;
333 	ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
334 	if (ret)
335 		module_put(ops->owner);
336 	return ret;
337 }
338 
339 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
340 			int len, bool exists)
341 {
342 	const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
343 	int ret;
344 
345 	if (!ops)
346 		return -ENOENT;
347 	ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
348 	if (ret)
349 		/*put back what find_ife_oplist took */
350 		module_put(ops->owner);
351 	return ret;
352 }
353 
354 static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
355 {
356 	struct tcf_meta_ops *o;
357 	int rc = 0;
358 	int installed = 0;
359 
360 	read_lock(&ife_mod_lock);
361 	list_for_each_entry(o, &ifeoplist, list) {
362 		rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
363 		if (rc == 0)
364 			installed += 1;
365 	}
366 	read_unlock(&ife_mod_lock);
367 
368 	if (installed)
369 		return 0;
370 	else
371 		return -EINVAL;
372 }
373 
374 static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
375 {
376 	struct tcf_meta_info *e;
377 	struct nlattr *nest;
378 	unsigned char *b = skb_tail_pointer(skb);
379 	int total_encoded = 0;
380 
381 	/*can only happen on decode */
382 	if (list_empty(&ife->metalist))
383 		return 0;
384 
385 	nest = nla_nest_start_noflag(skb, TCA_IFE_METALST);
386 	if (!nest)
387 		goto out_nlmsg_trim;
388 
389 	list_for_each_entry(e, &ife->metalist, metalist) {
390 		if (!e->ops->get(skb, e))
391 			total_encoded += 1;
392 	}
393 
394 	if (!total_encoded)
395 		goto out_nlmsg_trim;
396 
397 	nla_nest_end(skb, nest);
398 
399 	return 0;
400 
401 out_nlmsg_trim:
402 	nlmsg_trim(skb, b);
403 	return -1;
404 }
405 
406 /* under ife->tcf_lock */
407 static void _tcf_ife_cleanup(struct tc_action *a)
408 {
409 	struct tcf_ife_info *ife = to_ife(a);
410 	struct tcf_meta_info *e, *n;
411 
412 	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
413 		list_del(&e->metalist);
414 		if (e->metaval) {
415 			if (e->ops->release)
416 				e->ops->release(e);
417 			else
418 				kfree(e->metaval);
419 		}
420 		module_put(e->ops->owner);
421 		kfree(e);
422 	}
423 }
424 
425 static void tcf_ife_cleanup(struct tc_action *a)
426 {
427 	struct tcf_ife_info *ife = to_ife(a);
428 	struct tcf_ife_params *p;
429 
430 	spin_lock_bh(&ife->tcf_lock);
431 	_tcf_ife_cleanup(a);
432 	spin_unlock_bh(&ife->tcf_lock);
433 
434 	p = rcu_dereference_protected(ife->params, 1);
435 	if (p)
436 		kfree_rcu(p, rcu);
437 }
438 
439 static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
440 			     bool exists, bool rtnl_held)
441 {
442 	int len = 0;
443 	int rc = 0;
444 	int i = 0;
445 	void *val;
446 
447 	for (i = 1; i < max_metacnt; i++) {
448 		if (tb[i]) {
449 			val = nla_data(tb[i]);
450 			len = nla_len(tb[i]);
451 
452 			rc = load_metaops_and_vet(i, val, len, rtnl_held);
453 			if (rc != 0)
454 				return rc;
455 
456 			rc = add_metainfo(ife, i, val, len, exists);
457 			if (rc)
458 				return rc;
459 		}
460 	}
461 
462 	return rc;
463 }
464 
465 static int tcf_ife_init(struct net *net, struct nlattr *nla,
466 			struct nlattr *est, struct tc_action **a,
467 			int ovr, int bind, bool rtnl_held,
468 			struct tcf_proto *tp, struct netlink_ext_ack *extack)
469 {
470 	struct tc_action_net *tn = net_generic(net, ife_net_id);
471 	struct nlattr *tb[TCA_IFE_MAX + 1];
472 	struct nlattr *tb2[IFE_META_MAX + 1];
473 	struct tcf_chain *goto_ch = NULL;
474 	struct tcf_ife_params *p;
475 	struct tcf_ife_info *ife;
476 	u16 ife_type = ETH_P_IFE;
477 	struct tc_ife *parm;
478 	u8 *daddr = NULL;
479 	u8 *saddr = NULL;
480 	bool exists = false;
481 	int ret = 0;
482 	int err;
483 
484 	err = nla_parse_nested_deprecated(tb, TCA_IFE_MAX, nla, ife_policy,
485 					  NULL);
486 	if (err < 0)
487 		return err;
488 
489 	if (!tb[TCA_IFE_PARMS])
490 		return -EINVAL;
491 
492 	parm = nla_data(tb[TCA_IFE_PARMS]);
493 
494 	/* IFE_DECODE is 0 and indicates the opposite of IFE_ENCODE because
495 	 * they cannot run as the same time. Check on all other values which
496 	 * are not supported right now.
497 	 */
498 	if (parm->flags & ~IFE_ENCODE)
499 		return -EINVAL;
500 
501 	p = kzalloc(sizeof(*p), GFP_KERNEL);
502 	if (!p)
503 		return -ENOMEM;
504 
505 	err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
506 	if (err < 0) {
507 		kfree(p);
508 		return err;
509 	}
510 	exists = err;
511 	if (exists && bind) {
512 		kfree(p);
513 		return 0;
514 	}
515 
516 	if (!exists) {
517 		ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops,
518 				     bind, true);
519 		if (ret) {
520 			tcf_idr_cleanup(tn, parm->index);
521 			kfree(p);
522 			return ret;
523 		}
524 		ret = ACT_P_CREATED;
525 	} else if (!ovr) {
526 		tcf_idr_release(*a, bind);
527 		kfree(p);
528 		return -EEXIST;
529 	}
530 
531 	ife = to_ife(*a);
532 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
533 	if (err < 0)
534 		goto release_idr;
535 
536 	p->flags = parm->flags;
537 
538 	if (parm->flags & IFE_ENCODE) {
539 		if (tb[TCA_IFE_TYPE])
540 			ife_type = nla_get_u16(tb[TCA_IFE_TYPE]);
541 		if (tb[TCA_IFE_DMAC])
542 			daddr = nla_data(tb[TCA_IFE_DMAC]);
543 		if (tb[TCA_IFE_SMAC])
544 			saddr = nla_data(tb[TCA_IFE_SMAC]);
545 	}
546 
547 	if (parm->flags & IFE_ENCODE) {
548 		if (daddr)
549 			ether_addr_copy(p->eth_dst, daddr);
550 		else
551 			eth_zero_addr(p->eth_dst);
552 
553 		if (saddr)
554 			ether_addr_copy(p->eth_src, saddr);
555 		else
556 			eth_zero_addr(p->eth_src);
557 
558 		p->eth_type = ife_type;
559 	}
560 
561 
562 	if (ret == ACT_P_CREATED)
563 		INIT_LIST_HEAD(&ife->metalist);
564 
565 	if (tb[TCA_IFE_METALST]) {
566 		err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
567 						  tb[TCA_IFE_METALST], NULL,
568 						  NULL);
569 		if (err)
570 			goto metadata_parse_err;
571 		err = populate_metalist(ife, tb2, exists, rtnl_held);
572 		if (err)
573 			goto metadata_parse_err;
574 
575 	} else {
576 		/* if no passed metadata allow list or passed allow-all
577 		 * then here we process by adding as many supported metadatum
578 		 * as we can. You better have at least one else we are
579 		 * going to bail out
580 		 */
581 		err = use_all_metadata(ife, exists);
582 		if (err)
583 			goto metadata_parse_err;
584 	}
585 
586 	if (exists)
587 		spin_lock_bh(&ife->tcf_lock);
588 	/* protected by tcf_lock when modifying existing action */
589 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
590 	rcu_swap_protected(ife->params, p, 1);
591 
592 	if (exists)
593 		spin_unlock_bh(&ife->tcf_lock);
594 	if (goto_ch)
595 		tcf_chain_put_by_act(goto_ch);
596 	if (p)
597 		kfree_rcu(p, rcu);
598 
599 	if (ret == ACT_P_CREATED)
600 		tcf_idr_insert(tn, *a);
601 
602 	return ret;
603 metadata_parse_err:
604 	if (goto_ch)
605 		tcf_chain_put_by_act(goto_ch);
606 release_idr:
607 	kfree(p);
608 	tcf_idr_release(*a, bind);
609 	return err;
610 }
611 
612 static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
613 			int ref)
614 {
615 	unsigned char *b = skb_tail_pointer(skb);
616 	struct tcf_ife_info *ife = to_ife(a);
617 	struct tcf_ife_params *p;
618 	struct tc_ife opt = {
619 		.index = ife->tcf_index,
620 		.refcnt = refcount_read(&ife->tcf_refcnt) - ref,
621 		.bindcnt = atomic_read(&ife->tcf_bindcnt) - bind,
622 	};
623 	struct tcf_t t;
624 
625 	spin_lock_bh(&ife->tcf_lock);
626 	opt.action = ife->tcf_action;
627 	p = rcu_dereference_protected(ife->params,
628 				      lockdep_is_held(&ife->tcf_lock));
629 	opt.flags = p->flags;
630 
631 	if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt))
632 		goto nla_put_failure;
633 
634 	tcf_tm_dump(&t, &ife->tcf_tm);
635 	if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
636 		goto nla_put_failure;
637 
638 	if (!is_zero_ether_addr(p->eth_dst)) {
639 		if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, p->eth_dst))
640 			goto nla_put_failure;
641 	}
642 
643 	if (!is_zero_ether_addr(p->eth_src)) {
644 		if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, p->eth_src))
645 			goto nla_put_failure;
646 	}
647 
648 	if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type))
649 		goto nla_put_failure;
650 
651 	if (dump_metalist(skb, ife)) {
652 		/*ignore failure to dump metalist */
653 		pr_info("Failed to dump metalist\n");
654 	}
655 
656 	spin_unlock_bh(&ife->tcf_lock);
657 	return skb->len;
658 
659 nla_put_failure:
660 	spin_unlock_bh(&ife->tcf_lock);
661 	nlmsg_trim(skb, b);
662 	return -1;
663 }
664 
665 static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
666 			      u16 metaid, u16 mlen, void *mdata)
667 {
668 	struct tcf_meta_info *e;
669 
670 	/* XXX: use hash to speed up */
671 	list_for_each_entry(e, &ife->metalist, metalist) {
672 		if (metaid == e->metaid) {
673 			if (e->ops) {
674 				/* We check for decode presence already */
675 				return e->ops->decode(skb, mdata, mlen);
676 			}
677 		}
678 	}
679 
680 	return -ENOENT;
681 }
682 
683 static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
684 			  struct tcf_result *res)
685 {
686 	struct tcf_ife_info *ife = to_ife(a);
687 	int action = ife->tcf_action;
688 	u8 *ifehdr_end;
689 	u8 *tlv_data;
690 	u16 metalen;
691 
692 	bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
693 	tcf_lastuse_update(&ife->tcf_tm);
694 
695 	if (skb_at_tc_ingress(skb))
696 		skb_push(skb, skb->dev->hard_header_len);
697 
698 	tlv_data = ife_decode(skb, &metalen);
699 	if (unlikely(!tlv_data)) {
700 		qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
701 		return TC_ACT_SHOT;
702 	}
703 
704 	ifehdr_end = tlv_data + metalen;
705 	for (; tlv_data < ifehdr_end; tlv_data = ife_tlv_meta_next(tlv_data)) {
706 		u8 *curr_data;
707 		u16 mtype;
708 		u16 dlen;
709 
710 		curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype,
711 						&dlen, NULL);
712 		if (!curr_data) {
713 			qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
714 			return TC_ACT_SHOT;
715 		}
716 
717 		if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
718 			/* abuse overlimits to count when we receive metadata
719 			 * but dont have an ops for it
720 			 */
721 			pr_info_ratelimited("Unknown metaid %d dlen %d\n",
722 					    mtype, dlen);
723 			qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
724 		}
725 	}
726 
727 	if (WARN_ON(tlv_data != ifehdr_end)) {
728 		qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
729 		return TC_ACT_SHOT;
730 	}
731 
732 	skb->protocol = eth_type_trans(skb, skb->dev);
733 	skb_reset_network_header(skb);
734 
735 	return action;
736 }
737 
738 /*XXX: check if we can do this at install time instead of current
739  * send data path
740 **/
741 static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
742 {
743 	struct tcf_meta_info *e, *n;
744 	int tot_run_sz = 0, run_sz = 0;
745 
746 	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
747 		if (e->ops->check_presence) {
748 			run_sz = e->ops->check_presence(skb, e);
749 			tot_run_sz += run_sz;
750 		}
751 	}
752 
753 	return tot_run_sz;
754 }
755 
756 static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
757 			  struct tcf_result *res, struct tcf_ife_params *p)
758 {
759 	struct tcf_ife_info *ife = to_ife(a);
760 	int action = ife->tcf_action;
761 	struct ethhdr *oethh;	/* outer ether header */
762 	struct tcf_meta_info *e;
763 	/*
764 	   OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
765 	   where ORIGDATA = original ethernet header ...
766 	 */
767 	u16 metalen = ife_get_sz(skb, ife);
768 	int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
769 	unsigned int skboff = 0;
770 	int new_len = skb->len + hdrm;
771 	bool exceed_mtu = false;
772 	void *ife_meta;
773 	int err = 0;
774 
775 	if (!skb_at_tc_ingress(skb)) {
776 		if (new_len > skb->dev->mtu)
777 			exceed_mtu = true;
778 	}
779 
780 	bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
781 	tcf_lastuse_update(&ife->tcf_tm);
782 
783 	if (!metalen) {		/* no metadata to send */
784 		/* abuse overlimits to count when we allow packet
785 		 * with no metadata
786 		 */
787 		qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
788 		return action;
789 	}
790 	/* could be stupid policy setup or mtu config
791 	 * so lets be conservative.. */
792 	if ((action == TC_ACT_SHOT) || exceed_mtu) {
793 		qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
794 		return TC_ACT_SHOT;
795 	}
796 
797 	if (skb_at_tc_ingress(skb))
798 		skb_push(skb, skb->dev->hard_header_len);
799 
800 	ife_meta = ife_encode(skb, metalen);
801 
802 	spin_lock(&ife->tcf_lock);
803 
804 	/* XXX: we dont have a clever way of telling encode to
805 	 * not repeat some of the computations that are done by
806 	 * ops->presence_check...
807 	 */
808 	list_for_each_entry(e, &ife->metalist, metalist) {
809 		if (e->ops->encode) {
810 			err = e->ops->encode(skb, (void *)(ife_meta + skboff),
811 					     e);
812 		}
813 		if (err < 0) {
814 			/* too corrupt to keep around if overwritten */
815 			spin_unlock(&ife->tcf_lock);
816 			qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
817 			return TC_ACT_SHOT;
818 		}
819 		skboff += err;
820 	}
821 	spin_unlock(&ife->tcf_lock);
822 	oethh = (struct ethhdr *)skb->data;
823 
824 	if (!is_zero_ether_addr(p->eth_src))
825 		ether_addr_copy(oethh->h_source, p->eth_src);
826 	if (!is_zero_ether_addr(p->eth_dst))
827 		ether_addr_copy(oethh->h_dest, p->eth_dst);
828 	oethh->h_proto = htons(p->eth_type);
829 
830 	if (skb_at_tc_ingress(skb))
831 		skb_pull(skb, skb->dev->hard_header_len);
832 
833 	return action;
834 }
835 
836 static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
837 		       struct tcf_result *res)
838 {
839 	struct tcf_ife_info *ife = to_ife(a);
840 	struct tcf_ife_params *p;
841 	int ret;
842 
843 	p = rcu_dereference_bh(ife->params);
844 	if (p->flags & IFE_ENCODE) {
845 		ret = tcf_ife_encode(skb, a, res, p);
846 		return ret;
847 	}
848 
849 	return tcf_ife_decode(skb, a, res);
850 }
851 
852 static int tcf_ife_walker(struct net *net, struct sk_buff *skb,
853 			  struct netlink_callback *cb, int type,
854 			  const struct tc_action_ops *ops,
855 			  struct netlink_ext_ack *extack)
856 {
857 	struct tc_action_net *tn = net_generic(net, ife_net_id);
858 
859 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
860 }
861 
862 static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index)
863 {
864 	struct tc_action_net *tn = net_generic(net, ife_net_id);
865 
866 	return tcf_idr_search(tn, a, index);
867 }
868 
869 static struct tc_action_ops act_ife_ops = {
870 	.kind = "ife",
871 	.id = TCA_ID_IFE,
872 	.owner = THIS_MODULE,
873 	.act = tcf_ife_act,
874 	.dump = tcf_ife_dump,
875 	.cleanup = tcf_ife_cleanup,
876 	.init = tcf_ife_init,
877 	.walk = tcf_ife_walker,
878 	.lookup = tcf_ife_search,
879 	.size =	sizeof(struct tcf_ife_info),
880 };
881 
882 static __net_init int ife_init_net(struct net *net)
883 {
884 	struct tc_action_net *tn = net_generic(net, ife_net_id);
885 
886 	return tc_action_net_init(tn, &act_ife_ops);
887 }
888 
889 static void __net_exit ife_exit_net(struct list_head *net_list)
890 {
891 	tc_action_net_exit(net_list, ife_net_id);
892 }
893 
894 static struct pernet_operations ife_net_ops = {
895 	.init = ife_init_net,
896 	.exit_batch = ife_exit_net,
897 	.id   = &ife_net_id,
898 	.size = sizeof(struct tc_action_net),
899 };
900 
901 static int __init ife_init_module(void)
902 {
903 	return tcf_register_action(&act_ife_ops, &ife_net_ops);
904 }
905 
906 static void __exit ife_cleanup_module(void)
907 {
908 	tcf_unregister_action(&act_ife_ops, &ife_net_ops);
909 }
910 
911 module_init(ife_init_module);
912 module_exit(ife_cleanup_module);
913 
914 MODULE_AUTHOR("Jamal Hadi Salim(2015)");
915 MODULE_DESCRIPTION("Inter-FE LFB action");
916 MODULE_LICENSE("GPL");
917