xref: /linux/net/sched/cls_u32.c (revision 33dea5aae0320345af26ae9aba0894a930e0d4ec)
1 /*
2  * net/sched/cls_u32.c	Ugly (or Universal) 32bit key Packet Classifier.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  *	The filters are packed to hash tables of key nodes
12  *	with a set of 32bit key/mask pairs at every node.
13  *	Nodes reference next level hash tables etc.
14  *
15  *	This scheme is the best universal classifier I managed to
16  *	invent; it is not super-fast, but it is not slow (provided you
17  *	program it correctly), and general enough.  And its relative
18  *	speed grows as the number of rules becomes larger.
19  *
20  *	It seems that it represents the best middle point between
21  *	speed and manageability both by human and by machine.
22  *
23  *	It is especially useful for link sharing combined with QoS;
24  *	pure RSVP doesn't need such a general approach and can use
25  *	much simpler (and faster) schemes, sort of cls_rsvp.c.
26  *
27  *	JHS: We should remove the CONFIG_NET_CLS_IND from here
28  *	eventually when the meta match extension is made available
29  *
30  *	nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31  */
32 
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/percpu.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/skbuff.h>
42 #include <linux/bitmap.h>
43 #include <linux/netdevice.h>
44 #include <linux/hash.h>
45 #include <net/netlink.h>
46 #include <net/act_api.h>
47 #include <net/pkt_cls.h>
48 #include <linux/netdevice.h>
49 
50 struct tc_u_knode {
51 	struct tc_u_knode __rcu	*next;
52 	u32			handle;
53 	struct tc_u_hnode __rcu	*ht_up;
54 	struct tcf_exts		exts;
55 #ifdef CONFIG_NET_CLS_IND
56 	int			ifindex;
57 #endif
58 	u8			fshift;
59 	struct tcf_result	res;
60 	struct tc_u_hnode __rcu	*ht_down;
61 #ifdef CONFIG_CLS_U32_PERF
62 	struct tc_u32_pcnt __percpu *pf;
63 #endif
64 	u32			flags;
65 #ifdef CONFIG_CLS_U32_MARK
66 	u32			val;
67 	u32			mask;
68 	u32 __percpu		*pcpu_success;
69 #endif
70 	struct tcf_proto	*tp;
71 	union {
72 		struct work_struct	work;
73 		struct rcu_head		rcu;
74 	};
75 	/* The 'sel' field MUST be the last field in structure to allow for
76 	 * tc_u32_keys allocated at end of structure.
77 	 */
78 	struct tc_u32_sel	sel;
79 };
80 
81 struct tc_u_hnode {
82 	struct tc_u_hnode __rcu	*next;
83 	u32			handle;
84 	u32			prio;
85 	struct tc_u_common	*tp_c;
86 	int			refcnt;
87 	unsigned int		divisor;
88 	struct rcu_head		rcu;
89 	/* The 'ht' field MUST be the last field in structure to allow for
90 	 * more entries allocated at end of structure.
91 	 */
92 	struct tc_u_knode __rcu	*ht[1];
93 };
94 
95 struct tc_u_common {
96 	struct tc_u_hnode __rcu	*hlist;
97 	struct Qdisc		*q;
98 	int			refcnt;
99 	u32			hgenerator;
100 	struct hlist_node	hnode;
101 	struct rcu_head		rcu;
102 };
103 
104 static inline unsigned int u32_hash_fold(__be32 key,
105 					 const struct tc_u32_sel *sel,
106 					 u8 fshift)
107 {
108 	unsigned int h = ntohl(key & sel->hmask) >> fshift;
109 
110 	return h;
111 }
112 
113 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
114 			struct tcf_result *res)
115 {
116 	struct {
117 		struct tc_u_knode *knode;
118 		unsigned int	  off;
119 	} stack[TC_U32_MAXDEPTH];
120 
121 	struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
122 	unsigned int off = skb_network_offset(skb);
123 	struct tc_u_knode *n;
124 	int sdepth = 0;
125 	int off2 = 0;
126 	int sel = 0;
127 #ifdef CONFIG_CLS_U32_PERF
128 	int j;
129 #endif
130 	int i, r;
131 
132 next_ht:
133 	n = rcu_dereference_bh(ht->ht[sel]);
134 
135 next_knode:
136 	if (n) {
137 		struct tc_u32_key *key = n->sel.keys;
138 
139 #ifdef CONFIG_CLS_U32_PERF
140 		__this_cpu_inc(n->pf->rcnt);
141 		j = 0;
142 #endif
143 
144 		if (tc_skip_sw(n->flags)) {
145 			n = rcu_dereference_bh(n->next);
146 			goto next_knode;
147 		}
148 
149 #ifdef CONFIG_CLS_U32_MARK
150 		if ((skb->mark & n->mask) != n->val) {
151 			n = rcu_dereference_bh(n->next);
152 			goto next_knode;
153 		} else {
154 			__this_cpu_inc(*n->pcpu_success);
155 		}
156 #endif
157 
158 		for (i = n->sel.nkeys; i > 0; i--, key++) {
159 			int toff = off + key->off + (off2 & key->offmask);
160 			__be32 *data, hdata;
161 
162 			if (skb_headroom(skb) + toff > INT_MAX)
163 				goto out;
164 
165 			data = skb_header_pointer(skb, toff, 4, &hdata);
166 			if (!data)
167 				goto out;
168 			if ((*data ^ key->val) & key->mask) {
169 				n = rcu_dereference_bh(n->next);
170 				goto next_knode;
171 			}
172 #ifdef CONFIG_CLS_U32_PERF
173 			__this_cpu_inc(n->pf->kcnts[j]);
174 			j++;
175 #endif
176 		}
177 
178 		ht = rcu_dereference_bh(n->ht_down);
179 		if (!ht) {
180 check_terminal:
181 			if (n->sel.flags & TC_U32_TERMINAL) {
182 
183 				*res = n->res;
184 #ifdef CONFIG_NET_CLS_IND
185 				if (!tcf_match_indev(skb, n->ifindex)) {
186 					n = rcu_dereference_bh(n->next);
187 					goto next_knode;
188 				}
189 #endif
190 #ifdef CONFIG_CLS_U32_PERF
191 				__this_cpu_inc(n->pf->rhit);
192 #endif
193 				r = tcf_exts_exec(skb, &n->exts, res);
194 				if (r < 0) {
195 					n = rcu_dereference_bh(n->next);
196 					goto next_knode;
197 				}
198 
199 				return r;
200 			}
201 			n = rcu_dereference_bh(n->next);
202 			goto next_knode;
203 		}
204 
205 		/* PUSH */
206 		if (sdepth >= TC_U32_MAXDEPTH)
207 			goto deadloop;
208 		stack[sdepth].knode = n;
209 		stack[sdepth].off = off;
210 		sdepth++;
211 
212 		ht = rcu_dereference_bh(n->ht_down);
213 		sel = 0;
214 		if (ht->divisor) {
215 			__be32 *data, hdata;
216 
217 			data = skb_header_pointer(skb, off + n->sel.hoff, 4,
218 						  &hdata);
219 			if (!data)
220 				goto out;
221 			sel = ht->divisor & u32_hash_fold(*data, &n->sel,
222 							  n->fshift);
223 		}
224 		if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
225 			goto next_ht;
226 
227 		if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
228 			off2 = n->sel.off + 3;
229 			if (n->sel.flags & TC_U32_VAROFFSET) {
230 				__be16 *data, hdata;
231 
232 				data = skb_header_pointer(skb,
233 							  off + n->sel.offoff,
234 							  2, &hdata);
235 				if (!data)
236 					goto out;
237 				off2 += ntohs(n->sel.offmask & *data) >>
238 					n->sel.offshift;
239 			}
240 			off2 &= ~3;
241 		}
242 		if (n->sel.flags & TC_U32_EAT) {
243 			off += off2;
244 			off2 = 0;
245 		}
246 
247 		if (off < skb->len)
248 			goto next_ht;
249 	}
250 
251 	/* POP */
252 	if (sdepth--) {
253 		n = stack[sdepth].knode;
254 		ht = rcu_dereference_bh(n->ht_up);
255 		off = stack[sdepth].off;
256 		goto check_terminal;
257 	}
258 out:
259 	return -1;
260 
261 deadloop:
262 	net_warn_ratelimited("cls_u32: dead loop\n");
263 	return -1;
264 }
265 
266 static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
267 {
268 	struct tc_u_hnode *ht;
269 
270 	for (ht = rtnl_dereference(tp_c->hlist);
271 	     ht;
272 	     ht = rtnl_dereference(ht->next))
273 		if (ht->handle == handle)
274 			break;
275 
276 	return ht;
277 }
278 
279 static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
280 {
281 	unsigned int sel;
282 	struct tc_u_knode *n = NULL;
283 
284 	sel = TC_U32_HASH(handle);
285 	if (sel > ht->divisor)
286 		goto out;
287 
288 	for (n = rtnl_dereference(ht->ht[sel]);
289 	     n;
290 	     n = rtnl_dereference(n->next))
291 		if (n->handle == handle)
292 			break;
293 out:
294 	return n;
295 }
296 
297 
298 static void *u32_get(struct tcf_proto *tp, u32 handle)
299 {
300 	struct tc_u_hnode *ht;
301 	struct tc_u_common *tp_c = tp->data;
302 
303 	if (TC_U32_HTID(handle) == TC_U32_ROOT)
304 		ht = rtnl_dereference(tp->root);
305 	else
306 		ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
307 
308 	if (!ht)
309 		return NULL;
310 
311 	if (TC_U32_KEY(handle) == 0)
312 		return ht;
313 
314 	return u32_lookup_key(ht, handle);
315 }
316 
317 static u32 gen_new_htid(struct tc_u_common *tp_c)
318 {
319 	int i = 0x800;
320 
321 	/* hgenerator only used inside rtnl lock it is safe to increment
322 	 * without read _copy_ update semantics
323 	 */
324 	do {
325 		if (++tp_c->hgenerator == 0x7FF)
326 			tp_c->hgenerator = 1;
327 	} while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
328 
329 	return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
330 }
331 
332 static struct hlist_head *tc_u_common_hash;
333 
334 #define U32_HASH_SHIFT 10
335 #define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
336 
337 static unsigned int tc_u_hash(const struct tcf_proto *tp)
338 {
339 	struct net_device *dev = tp->q->dev_queue->dev;
340 	u32 qhandle = tp->q->handle;
341 	int ifindex = dev->ifindex;
342 
343 	return hash_64((u64)ifindex << 32 | qhandle, U32_HASH_SHIFT);
344 }
345 
346 static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
347 {
348 	struct tc_u_common *tc;
349 	unsigned int h;
350 
351 	h = tc_u_hash(tp);
352 	hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) {
353 		if (tc->q == tp->q)
354 			return tc;
355 	}
356 	return NULL;
357 }
358 
359 static int u32_init(struct tcf_proto *tp)
360 {
361 	struct tc_u_hnode *root_ht;
362 	struct tc_u_common *tp_c;
363 	unsigned int h;
364 
365 	tp_c = tc_u_common_find(tp);
366 
367 	root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
368 	if (root_ht == NULL)
369 		return -ENOBUFS;
370 
371 	root_ht->refcnt++;
372 	root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
373 	root_ht->prio = tp->prio;
374 
375 	if (tp_c == NULL) {
376 		tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
377 		if (tp_c == NULL) {
378 			kfree(root_ht);
379 			return -ENOBUFS;
380 		}
381 		tp_c->q = tp->q;
382 		INIT_HLIST_NODE(&tp_c->hnode);
383 
384 		h = tc_u_hash(tp);
385 		hlist_add_head(&tp_c->hnode, &tc_u_common_hash[h]);
386 	}
387 
388 	tp_c->refcnt++;
389 	RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
390 	rcu_assign_pointer(tp_c->hlist, root_ht);
391 	root_ht->tp_c = tp_c;
392 
393 	rcu_assign_pointer(tp->root, root_ht);
394 	tp->data = tp_c;
395 	return 0;
396 }
397 
398 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
399 			   bool free_pf)
400 {
401 	tcf_exts_destroy(&n->exts);
402 	tcf_exts_put_net(&n->exts);
403 	if (n->ht_down)
404 		n->ht_down->refcnt--;
405 #ifdef CONFIG_CLS_U32_PERF
406 	if (free_pf)
407 		free_percpu(n->pf);
408 #endif
409 #ifdef CONFIG_CLS_U32_MARK
410 	if (free_pf)
411 		free_percpu(n->pcpu_success);
412 #endif
413 	kfree(n);
414 	return 0;
415 }
416 
417 /* u32_delete_key_rcu should be called when free'ing a copied
418  * version of a tc_u_knode obtained from u32_init_knode(). When
419  * copies are obtained from u32_init_knode() the statistics are
420  * shared between the old and new copies to allow readers to
421  * continue to update the statistics during the copy. To support
422  * this the u32_delete_key_rcu variant does not free the percpu
423  * statistics.
424  */
425 static void u32_delete_key_work(struct work_struct *work)
426 {
427 	struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
428 
429 	rtnl_lock();
430 	u32_destroy_key(key->tp, key, false);
431 	rtnl_unlock();
432 }
433 
434 static void u32_delete_key_rcu(struct rcu_head *rcu)
435 {
436 	struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
437 
438 	INIT_WORK(&key->work, u32_delete_key_work);
439 	tcf_queue_work(&key->work);
440 }
441 
442 /* u32_delete_key_freepf_rcu is the rcu callback variant
443  * that free's the entire structure including the statistics
444  * percpu variables. Only use this if the key is not a copy
445  * returned by u32_init_knode(). See u32_delete_key_rcu()
446  * for the variant that should be used with keys return from
447  * u32_init_knode()
448  */
449 static void u32_delete_key_freepf_work(struct work_struct *work)
450 {
451 	struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
452 
453 	rtnl_lock();
454 	u32_destroy_key(key->tp, key, true);
455 	rtnl_unlock();
456 }
457 
458 static void u32_delete_key_freepf_rcu(struct rcu_head *rcu)
459 {
460 	struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
461 
462 	INIT_WORK(&key->work, u32_delete_key_freepf_work);
463 	tcf_queue_work(&key->work);
464 }
465 
466 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
467 {
468 	struct tc_u_knode __rcu **kp;
469 	struct tc_u_knode *pkp;
470 	struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
471 
472 	if (ht) {
473 		kp = &ht->ht[TC_U32_HASH(key->handle)];
474 		for (pkp = rtnl_dereference(*kp); pkp;
475 		     kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
476 			if (pkp == key) {
477 				RCU_INIT_POINTER(*kp, key->next);
478 
479 				tcf_unbind_filter(tp, &key->res);
480 				tcf_exts_get_net(&key->exts);
481 				call_rcu(&key->rcu, u32_delete_key_freepf_rcu);
482 				return 0;
483 			}
484 		}
485 	}
486 	WARN_ON(1);
487 	return 0;
488 }
489 
490 static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
491 {
492 	struct net_device *dev = tp->q->dev_queue->dev;
493 	struct tc_cls_u32_offload cls_u32 = {};
494 
495 	if (!tc_should_offload(dev, 0))
496 		return;
497 
498 	tc_cls_common_offload_init(&cls_u32.common, tp);
499 	cls_u32.command = TC_CLSU32_DELETE_KNODE;
500 	cls_u32.knode.handle = handle;
501 
502 	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32);
503 }
504 
505 static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
506 				u32 flags)
507 {
508 	struct net_device *dev = tp->q->dev_queue->dev;
509 	struct tc_cls_u32_offload cls_u32 = {};
510 	int err;
511 
512 	if (!tc_should_offload(dev, flags))
513 		return tc_skip_sw(flags) ? -EINVAL : 0;
514 
515 	tc_cls_common_offload_init(&cls_u32.common, tp);
516 	cls_u32.command = TC_CLSU32_NEW_HNODE;
517 	cls_u32.hnode.divisor = h->divisor;
518 	cls_u32.hnode.handle = h->handle;
519 	cls_u32.hnode.prio = h->prio;
520 
521 	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32);
522 	if (tc_skip_sw(flags))
523 		return err;
524 
525 	return 0;
526 }
527 
528 static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
529 {
530 	struct net_device *dev = tp->q->dev_queue->dev;
531 	struct tc_cls_u32_offload cls_u32 = {};
532 
533 	if (!tc_should_offload(dev, 0))
534 		return;
535 
536 	tc_cls_common_offload_init(&cls_u32.common, tp);
537 	cls_u32.command = TC_CLSU32_DELETE_HNODE;
538 	cls_u32.hnode.divisor = h->divisor;
539 	cls_u32.hnode.handle = h->handle;
540 	cls_u32.hnode.prio = h->prio;
541 
542 	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32);
543 }
544 
545 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
546 				u32 flags)
547 {
548 	struct net_device *dev = tp->q->dev_queue->dev;
549 	struct tc_cls_u32_offload cls_u32 = {};
550 	int err;
551 
552 	if (!tc_should_offload(dev, flags))
553 		return tc_skip_sw(flags) ? -EINVAL : 0;
554 
555 	tc_cls_common_offload_init(&cls_u32.common, tp);
556 	cls_u32.command = TC_CLSU32_REPLACE_KNODE;
557 	cls_u32.knode.handle = n->handle;
558 	cls_u32.knode.fshift = n->fshift;
559 #ifdef CONFIG_CLS_U32_MARK
560 	cls_u32.knode.val = n->val;
561 	cls_u32.knode.mask = n->mask;
562 #else
563 	cls_u32.knode.val = 0;
564 	cls_u32.knode.mask = 0;
565 #endif
566 	cls_u32.knode.sel = &n->sel;
567 	cls_u32.knode.exts = &n->exts;
568 	if (n->ht_down)
569 		cls_u32.knode.link_handle = n->ht_down->handle;
570 
571 	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32);
572 
573 	if (!err)
574 		n->flags |= TCA_CLS_FLAGS_IN_HW;
575 
576 	if (tc_skip_sw(flags))
577 		return err;
578 
579 	return 0;
580 }
581 
582 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
583 {
584 	struct tc_u_knode *n;
585 	unsigned int h;
586 
587 	for (h = 0; h <= ht->divisor; h++) {
588 		while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
589 			RCU_INIT_POINTER(ht->ht[h],
590 					 rtnl_dereference(n->next));
591 			tcf_unbind_filter(tp, &n->res);
592 			u32_remove_hw_knode(tp, n->handle);
593 			if (tcf_exts_get_net(&n->exts))
594 				call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
595 			else
596 				u32_destroy_key(n->tp, n, true);
597 		}
598 	}
599 }
600 
601 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
602 {
603 	struct tc_u_common *tp_c = tp->data;
604 	struct tc_u_hnode __rcu **hn;
605 	struct tc_u_hnode *phn;
606 
607 	WARN_ON(ht->refcnt);
608 
609 	u32_clear_hnode(tp, ht);
610 
611 	hn = &tp_c->hlist;
612 	for (phn = rtnl_dereference(*hn);
613 	     phn;
614 	     hn = &phn->next, phn = rtnl_dereference(*hn)) {
615 		if (phn == ht) {
616 			u32_clear_hw_hnode(tp, ht);
617 			RCU_INIT_POINTER(*hn, ht->next);
618 			kfree_rcu(ht, rcu);
619 			return 0;
620 		}
621 	}
622 
623 	return -ENOENT;
624 }
625 
626 static bool ht_empty(struct tc_u_hnode *ht)
627 {
628 	unsigned int h;
629 
630 	for (h = 0; h <= ht->divisor; h++)
631 		if (rcu_access_pointer(ht->ht[h]))
632 			return false;
633 
634 	return true;
635 }
636 
637 static void u32_destroy(struct tcf_proto *tp)
638 {
639 	struct tc_u_common *tp_c = tp->data;
640 	struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
641 
642 	WARN_ON(root_ht == NULL);
643 
644 	if (root_ht && --root_ht->refcnt == 0)
645 		u32_destroy_hnode(tp, root_ht);
646 
647 	if (--tp_c->refcnt == 0) {
648 		struct tc_u_hnode *ht;
649 
650 		hlist_del(&tp_c->hnode);
651 
652 		for (ht = rtnl_dereference(tp_c->hlist);
653 		     ht;
654 		     ht = rtnl_dereference(ht->next)) {
655 			ht->refcnt--;
656 			u32_clear_hnode(tp, ht);
657 		}
658 
659 		while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
660 			RCU_INIT_POINTER(tp_c->hlist, ht->next);
661 			kfree_rcu(ht, rcu);
662 		}
663 
664 		kfree(tp_c);
665 	}
666 
667 	tp->data = NULL;
668 }
669 
670 static int u32_delete(struct tcf_proto *tp, void *arg, bool *last)
671 {
672 	struct tc_u_hnode *ht = arg;
673 	struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
674 	struct tc_u_common *tp_c = tp->data;
675 	int ret = 0;
676 
677 	if (ht == NULL)
678 		goto out;
679 
680 	if (TC_U32_KEY(ht->handle)) {
681 		u32_remove_hw_knode(tp, ht->handle);
682 		ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
683 		goto out;
684 	}
685 
686 	if (root_ht == ht)
687 		return -EINVAL;
688 
689 	if (ht->refcnt == 1) {
690 		ht->refcnt--;
691 		u32_destroy_hnode(tp, ht);
692 	} else {
693 		return -EBUSY;
694 	}
695 
696 out:
697 	*last = true;
698 	if (root_ht) {
699 		if (root_ht->refcnt > 1) {
700 			*last = false;
701 			goto ret;
702 		}
703 		if (root_ht->refcnt == 1) {
704 			if (!ht_empty(root_ht)) {
705 				*last = false;
706 				goto ret;
707 			}
708 		}
709 	}
710 
711 	if (tp_c->refcnt > 1) {
712 		*last = false;
713 		goto ret;
714 	}
715 
716 	if (tp_c->refcnt == 1) {
717 		struct tc_u_hnode *ht;
718 
719 		for (ht = rtnl_dereference(tp_c->hlist);
720 		     ht;
721 		     ht = rtnl_dereference(ht->next))
722 			if (!ht_empty(ht)) {
723 				*last = false;
724 				break;
725 			}
726 	}
727 
728 ret:
729 	return ret;
730 }
731 
732 #define NR_U32_NODE (1<<12)
733 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
734 {
735 	struct tc_u_knode *n;
736 	unsigned long i;
737 	unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
738 					GFP_KERNEL);
739 	if (!bitmap)
740 		return handle | 0xFFF;
741 
742 	for (n = rtnl_dereference(ht->ht[TC_U32_HASH(handle)]);
743 	     n;
744 	     n = rtnl_dereference(n->next))
745 		set_bit(TC_U32_NODE(n->handle), bitmap);
746 
747 	i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
748 	if (i >= NR_U32_NODE)
749 		i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
750 
751 	kfree(bitmap);
752 	return handle | (i >= NR_U32_NODE ? 0xFFF : i);
753 }
754 
755 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
756 	[TCA_U32_CLASSID]	= { .type = NLA_U32 },
757 	[TCA_U32_HASH]		= { .type = NLA_U32 },
758 	[TCA_U32_LINK]		= { .type = NLA_U32 },
759 	[TCA_U32_DIVISOR]	= { .type = NLA_U32 },
760 	[TCA_U32_SEL]		= { .len = sizeof(struct tc_u32_sel) },
761 	[TCA_U32_INDEV]		= { .type = NLA_STRING, .len = IFNAMSIZ },
762 	[TCA_U32_MARK]		= { .len = sizeof(struct tc_u32_mark) },
763 	[TCA_U32_FLAGS]		= { .type = NLA_U32 },
764 };
765 
766 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
767 			 unsigned long base, struct tc_u_hnode *ht,
768 			 struct tc_u_knode *n, struct nlattr **tb,
769 			 struct nlattr *est, bool ovr)
770 {
771 	int err;
772 
773 	err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr);
774 	if (err < 0)
775 		return err;
776 
777 	if (tb[TCA_U32_LINK]) {
778 		u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
779 		struct tc_u_hnode *ht_down = NULL, *ht_old;
780 
781 		if (TC_U32_KEY(handle))
782 			return -EINVAL;
783 
784 		if (handle) {
785 			ht_down = u32_lookup_ht(ht->tp_c, handle);
786 
787 			if (ht_down == NULL)
788 				return -EINVAL;
789 			ht_down->refcnt++;
790 		}
791 
792 		ht_old = rtnl_dereference(n->ht_down);
793 		rcu_assign_pointer(n->ht_down, ht_down);
794 
795 		if (ht_old)
796 			ht_old->refcnt--;
797 	}
798 	if (tb[TCA_U32_CLASSID]) {
799 		n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
800 		tcf_bind_filter(tp, &n->res, base);
801 	}
802 
803 #ifdef CONFIG_NET_CLS_IND
804 	if (tb[TCA_U32_INDEV]) {
805 		int ret;
806 		ret = tcf_change_indev(net, tb[TCA_U32_INDEV]);
807 		if (ret < 0)
808 			return -EINVAL;
809 		n->ifindex = ret;
810 	}
811 #endif
812 	return 0;
813 }
814 
815 static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
816 			      struct tc_u_knode *n)
817 {
818 	struct tc_u_knode __rcu **ins;
819 	struct tc_u_knode *pins;
820 	struct tc_u_hnode *ht;
821 
822 	if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
823 		ht = rtnl_dereference(tp->root);
824 	else
825 		ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
826 
827 	ins = &ht->ht[TC_U32_HASH(n->handle)];
828 
829 	/* The node must always exist for it to be replaced if this is not the
830 	 * case then something went very wrong elsewhere.
831 	 */
832 	for (pins = rtnl_dereference(*ins); ;
833 	     ins = &pins->next, pins = rtnl_dereference(*ins))
834 		if (pins->handle == n->handle)
835 			break;
836 
837 	RCU_INIT_POINTER(n->next, pins->next);
838 	rcu_assign_pointer(*ins, n);
839 }
840 
841 static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
842 					 struct tc_u_knode *n)
843 {
844 	struct tc_u_knode *new;
845 	struct tc_u32_sel *s = &n->sel;
846 
847 	new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
848 		      GFP_KERNEL);
849 
850 	if (!new)
851 		return NULL;
852 
853 	RCU_INIT_POINTER(new->next, n->next);
854 	new->handle = n->handle;
855 	RCU_INIT_POINTER(new->ht_up, n->ht_up);
856 
857 #ifdef CONFIG_NET_CLS_IND
858 	new->ifindex = n->ifindex;
859 #endif
860 	new->fshift = n->fshift;
861 	new->res = n->res;
862 	new->flags = n->flags;
863 	RCU_INIT_POINTER(new->ht_down, n->ht_down);
864 
865 	/* bump reference count as long as we hold pointer to structure */
866 	if (new->ht_down)
867 		new->ht_down->refcnt++;
868 
869 #ifdef CONFIG_CLS_U32_PERF
870 	/* Statistics may be incremented by readers during update
871 	 * so we must keep them in tact. When the node is later destroyed
872 	 * a special destroy call must be made to not free the pf memory.
873 	 */
874 	new->pf = n->pf;
875 #endif
876 
877 #ifdef CONFIG_CLS_U32_MARK
878 	new->val = n->val;
879 	new->mask = n->mask;
880 	/* Similarly success statistics must be moved as pointers */
881 	new->pcpu_success = n->pcpu_success;
882 #endif
883 	new->tp = tp;
884 	memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
885 
886 	if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) {
887 		kfree(new);
888 		return NULL;
889 	}
890 
891 	return new;
892 }
893 
894 static int u32_change(struct net *net, struct sk_buff *in_skb,
895 		      struct tcf_proto *tp, unsigned long base, u32 handle,
896 		      struct nlattr **tca, void **arg, bool ovr)
897 {
898 	struct tc_u_common *tp_c = tp->data;
899 	struct tc_u_hnode *ht;
900 	struct tc_u_knode *n;
901 	struct tc_u32_sel *s;
902 	struct nlattr *opt = tca[TCA_OPTIONS];
903 	struct nlattr *tb[TCA_U32_MAX + 1];
904 	u32 htid, flags = 0;
905 	int err;
906 #ifdef CONFIG_CLS_U32_PERF
907 	size_t size;
908 #endif
909 
910 	if (opt == NULL)
911 		return handle ? -EINVAL : 0;
912 
913 	err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy, NULL);
914 	if (err < 0)
915 		return err;
916 
917 	if (tb[TCA_U32_FLAGS]) {
918 		flags = nla_get_u32(tb[TCA_U32_FLAGS]);
919 		if (!tc_flags_valid(flags))
920 			return -EINVAL;
921 	}
922 
923 	n = *arg;
924 	if (n) {
925 		struct tc_u_knode *new;
926 
927 		if (TC_U32_KEY(n->handle) == 0)
928 			return -EINVAL;
929 
930 		if (n->flags != flags)
931 			return -EINVAL;
932 
933 		new = u32_init_knode(tp, n);
934 		if (!new)
935 			return -ENOMEM;
936 
937 		err = u32_set_parms(net, tp, base,
938 				    rtnl_dereference(n->ht_up), new, tb,
939 				    tca[TCA_RATE], ovr);
940 
941 		if (err) {
942 			u32_destroy_key(tp, new, false);
943 			return err;
944 		}
945 
946 		err = u32_replace_hw_knode(tp, new, flags);
947 		if (err) {
948 			u32_destroy_key(tp, new, false);
949 			return err;
950 		}
951 
952 		if (!tc_in_hw(new->flags))
953 			new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
954 
955 		u32_replace_knode(tp, tp_c, new);
956 		tcf_unbind_filter(tp, &n->res);
957 		tcf_exts_get_net(&n->exts);
958 		call_rcu(&n->rcu, u32_delete_key_rcu);
959 		return 0;
960 	}
961 
962 	if (tb[TCA_U32_DIVISOR]) {
963 		unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
964 
965 		if (--divisor > 0x100)
966 			return -EINVAL;
967 		if (TC_U32_KEY(handle))
968 			return -EINVAL;
969 		if (handle == 0) {
970 			handle = gen_new_htid(tp->data);
971 			if (handle == 0)
972 				return -ENOMEM;
973 		}
974 		ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
975 		if (ht == NULL)
976 			return -ENOBUFS;
977 		ht->tp_c = tp_c;
978 		ht->refcnt = 1;
979 		ht->divisor = divisor;
980 		ht->handle = handle;
981 		ht->prio = tp->prio;
982 
983 		err = u32_replace_hw_hnode(tp, ht, flags);
984 		if (err) {
985 			kfree(ht);
986 			return err;
987 		}
988 
989 		RCU_INIT_POINTER(ht->next, tp_c->hlist);
990 		rcu_assign_pointer(tp_c->hlist, ht);
991 		*arg = ht;
992 
993 		return 0;
994 	}
995 
996 	if (tb[TCA_U32_HASH]) {
997 		htid = nla_get_u32(tb[TCA_U32_HASH]);
998 		if (TC_U32_HTID(htid) == TC_U32_ROOT) {
999 			ht = rtnl_dereference(tp->root);
1000 			htid = ht->handle;
1001 		} else {
1002 			ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
1003 			if (ht == NULL)
1004 				return -EINVAL;
1005 		}
1006 	} else {
1007 		ht = rtnl_dereference(tp->root);
1008 		htid = ht->handle;
1009 	}
1010 
1011 	if (ht->divisor < TC_U32_HASH(htid))
1012 		return -EINVAL;
1013 
1014 	if (handle) {
1015 		if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
1016 			return -EINVAL;
1017 		handle = htid | TC_U32_NODE(handle);
1018 	} else
1019 		handle = gen_new_kid(ht, htid);
1020 
1021 	if (tb[TCA_U32_SEL] == NULL)
1022 		return -EINVAL;
1023 
1024 	s = nla_data(tb[TCA_U32_SEL]);
1025 
1026 	n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
1027 	if (n == NULL)
1028 		return -ENOBUFS;
1029 
1030 #ifdef CONFIG_CLS_U32_PERF
1031 	size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
1032 	n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
1033 	if (!n->pf) {
1034 		kfree(n);
1035 		return -ENOBUFS;
1036 	}
1037 #endif
1038 
1039 	memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
1040 	RCU_INIT_POINTER(n->ht_up, ht);
1041 	n->handle = handle;
1042 	n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1043 	n->flags = flags;
1044 	n->tp = tp;
1045 
1046 	err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
1047 	if (err < 0)
1048 		goto errout;
1049 
1050 #ifdef CONFIG_CLS_U32_MARK
1051 	n->pcpu_success = alloc_percpu(u32);
1052 	if (!n->pcpu_success) {
1053 		err = -ENOMEM;
1054 		goto errout;
1055 	}
1056 
1057 	if (tb[TCA_U32_MARK]) {
1058 		struct tc_u32_mark *mark;
1059 
1060 		mark = nla_data(tb[TCA_U32_MARK]);
1061 		n->val = mark->val;
1062 		n->mask = mark->mask;
1063 	}
1064 #endif
1065 
1066 	err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
1067 	if (err == 0) {
1068 		struct tc_u_knode __rcu **ins;
1069 		struct tc_u_knode *pins;
1070 
1071 		err = u32_replace_hw_knode(tp, n, flags);
1072 		if (err)
1073 			goto errhw;
1074 
1075 		if (!tc_in_hw(n->flags))
1076 			n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1077 
1078 		ins = &ht->ht[TC_U32_HASH(handle)];
1079 		for (pins = rtnl_dereference(*ins); pins;
1080 		     ins = &pins->next, pins = rtnl_dereference(*ins))
1081 			if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1082 				break;
1083 
1084 		RCU_INIT_POINTER(n->next, pins);
1085 		rcu_assign_pointer(*ins, n);
1086 		*arg = n;
1087 		return 0;
1088 	}
1089 
1090 errhw:
1091 #ifdef CONFIG_CLS_U32_MARK
1092 	free_percpu(n->pcpu_success);
1093 #endif
1094 
1095 errout:
1096 	tcf_exts_destroy(&n->exts);
1097 #ifdef CONFIG_CLS_U32_PERF
1098 	free_percpu(n->pf);
1099 #endif
1100 	kfree(n);
1101 	return err;
1102 }
1103 
1104 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
1105 {
1106 	struct tc_u_common *tp_c = tp->data;
1107 	struct tc_u_hnode *ht;
1108 	struct tc_u_knode *n;
1109 	unsigned int h;
1110 
1111 	if (arg->stop)
1112 		return;
1113 
1114 	for (ht = rtnl_dereference(tp_c->hlist);
1115 	     ht;
1116 	     ht = rtnl_dereference(ht->next)) {
1117 		if (ht->prio != tp->prio)
1118 			continue;
1119 		if (arg->count >= arg->skip) {
1120 			if (arg->fn(tp, ht, arg) < 0) {
1121 				arg->stop = 1;
1122 				return;
1123 			}
1124 		}
1125 		arg->count++;
1126 		for (h = 0; h <= ht->divisor; h++) {
1127 			for (n = rtnl_dereference(ht->ht[h]);
1128 			     n;
1129 			     n = rtnl_dereference(n->next)) {
1130 				if (arg->count < arg->skip) {
1131 					arg->count++;
1132 					continue;
1133 				}
1134 				if (arg->fn(tp, n, arg) < 0) {
1135 					arg->stop = 1;
1136 					return;
1137 				}
1138 				arg->count++;
1139 			}
1140 		}
1141 	}
1142 }
1143 
1144 static void u32_bind_class(void *fh, u32 classid, unsigned long cl)
1145 {
1146 	struct tc_u_knode *n = fh;
1147 
1148 	if (n && n->res.classid == classid)
1149 		n->res.class = cl;
1150 }
1151 
1152 static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
1153 		    struct sk_buff *skb, struct tcmsg *t)
1154 {
1155 	struct tc_u_knode *n = fh;
1156 	struct tc_u_hnode *ht_up, *ht_down;
1157 	struct nlattr *nest;
1158 
1159 	if (n == NULL)
1160 		return skb->len;
1161 
1162 	t->tcm_handle = n->handle;
1163 
1164 	nest = nla_nest_start(skb, TCA_OPTIONS);
1165 	if (nest == NULL)
1166 		goto nla_put_failure;
1167 
1168 	if (TC_U32_KEY(n->handle) == 0) {
1169 		struct tc_u_hnode *ht = fh;
1170 		u32 divisor = ht->divisor + 1;
1171 
1172 		if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1173 			goto nla_put_failure;
1174 	} else {
1175 #ifdef CONFIG_CLS_U32_PERF
1176 		struct tc_u32_pcnt *gpf;
1177 		int cpu;
1178 #endif
1179 
1180 		if (nla_put(skb, TCA_U32_SEL,
1181 			    sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
1182 			    &n->sel))
1183 			goto nla_put_failure;
1184 
1185 		ht_up = rtnl_dereference(n->ht_up);
1186 		if (ht_up) {
1187 			u32 htid = n->handle & 0xFFFFF000;
1188 			if (nla_put_u32(skb, TCA_U32_HASH, htid))
1189 				goto nla_put_failure;
1190 		}
1191 		if (n->res.classid &&
1192 		    nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1193 			goto nla_put_failure;
1194 
1195 		ht_down = rtnl_dereference(n->ht_down);
1196 		if (ht_down &&
1197 		    nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1198 			goto nla_put_failure;
1199 
1200 		if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1201 			goto nla_put_failure;
1202 
1203 #ifdef CONFIG_CLS_U32_MARK
1204 		if ((n->val || n->mask)) {
1205 			struct tc_u32_mark mark = {.val = n->val,
1206 						   .mask = n->mask,
1207 						   .success = 0};
1208 			int cpum;
1209 
1210 			for_each_possible_cpu(cpum) {
1211 				__u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1212 
1213 				mark.success += cnt;
1214 			}
1215 
1216 			if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1217 				goto nla_put_failure;
1218 		}
1219 #endif
1220 
1221 		if (tcf_exts_dump(skb, &n->exts) < 0)
1222 			goto nla_put_failure;
1223 
1224 #ifdef CONFIG_NET_CLS_IND
1225 		if (n->ifindex) {
1226 			struct net_device *dev;
1227 			dev = __dev_get_by_index(net, n->ifindex);
1228 			if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1229 				goto nla_put_failure;
1230 		}
1231 #endif
1232 #ifdef CONFIG_CLS_U32_PERF
1233 		gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
1234 			      n->sel.nkeys * sizeof(u64),
1235 			      GFP_KERNEL);
1236 		if (!gpf)
1237 			goto nla_put_failure;
1238 
1239 		for_each_possible_cpu(cpu) {
1240 			int i;
1241 			struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1242 
1243 			gpf->rcnt += pf->rcnt;
1244 			gpf->rhit += pf->rhit;
1245 			for (i = 0; i < n->sel.nkeys; i++)
1246 				gpf->kcnts[i] += pf->kcnts[i];
1247 		}
1248 
1249 		if (nla_put_64bit(skb, TCA_U32_PCNT,
1250 				  sizeof(struct tc_u32_pcnt) +
1251 				  n->sel.nkeys * sizeof(u64),
1252 				  gpf, TCA_U32_PAD)) {
1253 			kfree(gpf);
1254 			goto nla_put_failure;
1255 		}
1256 		kfree(gpf);
1257 #endif
1258 	}
1259 
1260 	nla_nest_end(skb, nest);
1261 
1262 	if (TC_U32_KEY(n->handle))
1263 		if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1264 			goto nla_put_failure;
1265 	return skb->len;
1266 
1267 nla_put_failure:
1268 	nla_nest_cancel(skb, nest);
1269 	return -1;
1270 }
1271 
1272 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1273 	.kind		=	"u32",
1274 	.classify	=	u32_classify,
1275 	.init		=	u32_init,
1276 	.destroy	=	u32_destroy,
1277 	.get		=	u32_get,
1278 	.change		=	u32_change,
1279 	.delete		=	u32_delete,
1280 	.walk		=	u32_walk,
1281 	.dump		=	u32_dump,
1282 	.bind_class	=	u32_bind_class,
1283 	.owner		=	THIS_MODULE,
1284 };
1285 
1286 static int __init init_u32(void)
1287 {
1288 	int i, ret;
1289 
1290 	pr_info("u32 classifier\n");
1291 #ifdef CONFIG_CLS_U32_PERF
1292 	pr_info("    Performance counters on\n");
1293 #endif
1294 #ifdef CONFIG_NET_CLS_IND
1295 	pr_info("    input device check on\n");
1296 #endif
1297 #ifdef CONFIG_NET_CLS_ACT
1298 	pr_info("    Actions configured\n");
1299 #endif
1300 	tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1301 					  sizeof(struct hlist_head),
1302 					  GFP_KERNEL);
1303 	if (!tc_u_common_hash)
1304 		return -ENOMEM;
1305 
1306 	for (i = 0; i < U32_HASH_SIZE; i++)
1307 		INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1308 
1309 	ret = register_tcf_proto_ops(&cls_u32_ops);
1310 	if (ret)
1311 		kvfree(tc_u_common_hash);
1312 	return ret;
1313 }
1314 
1315 static void __exit exit_u32(void)
1316 {
1317 	unregister_tcf_proto_ops(&cls_u32_ops);
1318 	kvfree(tc_u_common_hash);
1319 }
1320 
1321 module_init(init_u32)
1322 module_exit(exit_u32)
1323 MODULE_LICENSE("GPL");
1324