xref: /linux/net/sched/sch_hhf.c (revision 0fc8f6200d2313278fbf4539bbab74677c685531)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* net/sched/sch_hhf.c		Heavy-Hitter Filter (HHF)
3  *
4  * Copyright (C) 2013 Terry Lam <vtlam@google.com>
5  * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com>
6  */
7 
8 #include <linux/jiffies.h>
9 #include <linux/module.h>
10 #include <linux/skbuff.h>
11 #include <linux/vmalloc.h>
12 #include <linux/siphash.h>
13 #include <net/pkt_sched.h>
14 #include <net/sock.h>
15 
16 /*	Heavy-Hitter Filter (HHF)
17  *
18  * Principles :
19  * Flows are classified into two buckets: non-heavy-hitter and heavy-hitter
20  * buckets. Initially, a new flow starts as non-heavy-hitter. Once classified
21  * as heavy-hitter, it is immediately switched to the heavy-hitter bucket.
22  * The buckets are dequeued by a Weighted Deficit Round Robin (WDRR) scheduler,
23  * in which the heavy-hitter bucket is served with less weight.
24  * In other words, non-heavy-hitters (e.g., short bursts of critical traffic)
25  * are isolated from heavy-hitters (e.g., persistent bulk traffic) and also have
26  * higher share of bandwidth.
27  *
28  * To capture heavy-hitters, we use the "multi-stage filter" algorithm in the
29  * following paper:
30  * [EV02] C. Estan and G. Varghese, "New Directions in Traffic Measurement and
31  * Accounting", in ACM SIGCOMM, 2002.
32  *
33  * Conceptually, a multi-stage filter comprises k independent hash functions
34  * and k counter arrays. Packets are indexed into k counter arrays by k hash
35  * functions, respectively. The counters are then increased by the packet sizes.
36  * Therefore,
37  *    - For a heavy-hitter flow: *all* of its k array counters must be large.
38  *    - For a non-heavy-hitter flow: some of its k array counters can be large
39  *      due to hash collision with other small flows; however, with high
40  *      probability, not *all* k counters are large.
41  *
42  * By the design of the multi-stage filter algorithm, the false negative rate
43  * (heavy-hitters getting away uncaptured) is zero. However, the algorithm is
44  * susceptible to false positives (non-heavy-hitters mistakenly classified as
45  * heavy-hitters).
46  * Therefore, we also implement the following optimizations to reduce false
47  * positives by avoiding unnecessary increment of the counter values:
48  *    - Optimization O1: once a heavy-hitter is identified, its bytes are not
49  *        accounted in the array counters. This technique is called "shielding"
50  *        in Section 3.3.1 of [EV02].
51  *    - Optimization O2: conservative update of counters
52  *                       (Section 3.3.2 of [EV02]),
53  *        New counter value = max {old counter value,
54  *                                 smallest counter value + packet bytes}
55  *
56  * Finally, we refresh the counters periodically since otherwise the counter
57  * values will keep accumulating.
58  *
59  * Once a flow is classified as heavy-hitter, we also save its per-flow state
60  * in an exact-matching flow table so that its subsequent packets can be
61  * dispatched to the heavy-hitter bucket accordingly.
62  *
63  *
64  * At a high level, this qdisc works as follows:
65  * Given a packet p:
66  *   - If the flow-id of p (e.g., TCP 5-tuple) is already in the exact-matching
67  *     heavy-hitter flow table, denoted table T, then send p to the heavy-hitter
68  *     bucket.
69  *   - Otherwise, forward p to the multi-stage filter, denoted filter F
70  *        + If F decides that p belongs to a non-heavy-hitter flow, then send p
71  *          to the non-heavy-hitter bucket.
72  *        + Otherwise, if F decides that p belongs to a new heavy-hitter flow,
73  *          then set up a new flow entry for the flow-id of p in the table T and
74  *          send p to the heavy-hitter bucket.
75  *
76  * In this implementation:
77  *   - T is a fixed-size hash-table with 1024 entries. Hash collision is
78  *     resolved by linked-list chaining.
79  *   - F has four counter arrays, each array containing 1024 32-bit counters.
80  *     That means 4 * 1024 * 32 bits = 16KB of memory.
81  *   - Since each array in F contains 1024 counters, 10 bits are sufficient to
82  *     index into each array.
83  *     Hence, instead of having four hash functions, we chop the 32-bit
84  *     skb-hash into three 10-bit chunks, and the remaining 10-bit chunk is
85  *     computed as XOR sum of those three chunks.
86  *   - We need to clear the counter arrays periodically; however, directly
87  *     memsetting 16KB of memory can lead to cache eviction and unwanted delay.
88  *     So by representing each counter by a valid bit, we only need to reset
89  *     4K of 1 bit (i.e. 512 bytes) instead of 16KB of memory.
90  *   - The Deficit Round Robin engine is taken from fq_codel implementation
91  *     (net/sched/sch_fq_codel.c). Note that wdrr_bucket corresponds to
92  *     fq_codel_flow in fq_codel implementation.
93  *
94  */
95 
96 /* Non-configurable parameters */
97 #define HH_FLOWS_CNT	 1024  /* number of entries in exact-matching table T */
98 #define HHF_ARRAYS_CNT	 4     /* number of arrays in multi-stage filter F */
99 #define HHF_ARRAYS_LEN	 1024  /* number of counters in each array of F */
100 #define HHF_BIT_MASK_LEN 10    /* masking 10 bits */
101 #define HHF_BIT_MASK	 0x3FF /* bitmask of 10 bits */
102 
103 #define WDRR_BUCKET_CNT  2     /* two buckets for Weighted DRR */
104 enum wdrr_bucket_idx {
105 	WDRR_BUCKET_FOR_HH	= 0, /* bucket id for heavy-hitters */
106 	WDRR_BUCKET_FOR_NON_HH	= 1  /* bucket id for non-heavy-hitters */
107 };
108 
109 #define hhf_time_before(a, b)	\
110 	(typecheck(u32, a) && typecheck(u32, b) && ((s32)((a) - (b)) < 0))
111 
112 /* Heavy-hitter per-flow state */
113 struct hh_flow_state {
114 	u32		 hash_id;	/* hash of flow-id (e.g. TCP 5-tuple) */
115 	u32		 hit_timestamp;	/* last time heavy-hitter was seen */
116 	struct list_head flowchain;	/* chaining under hash collision */
117 };
118 
119 /* Weighted Deficit Round Robin (WDRR) scheduler */
120 struct wdrr_bucket {
121 	struct sk_buff	  *head;
122 	struct sk_buff	  *tail;
123 	struct list_head  bucketchain;
124 	int		  deficit;
125 };
126 
127 struct hhf_sched_data {
128 	struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
129 	siphash_key_t	   perturbation;   /* hash perturbation */
130 	u32		   quantum;        /* psched_mtu(qdisc_dev(sch)); */
131 	u32		   drop_overlimit; /* number of times max qdisc packet
132 					    * limit was hit
133 					    */
134 	struct list_head   *hh_flows;       /* table T (currently active HHs) */
135 	u32		   hh_flows_limit;            /* max active HH allocs */
136 	u32		   hh_flows_overlimit; /* num of disallowed HH allocs */
137 	u32		   hh_flows_total_cnt;          /* total admitted HHs */
138 	u32		   hh_flows_current_cnt;        /* total current HHs  */
139 	u32		   *hhf_arrays[HHF_ARRAYS_CNT]; /* HH filter F */
140 	u32		   hhf_arrays_reset_timestamp;  /* last time hhf_arrays
141 							 * was reset
142 							 */
143 	unsigned long	   *hhf_valid_bits[HHF_ARRAYS_CNT]; /* shadow valid bits
144 							     * of hhf_arrays
145 							     */
146 	/* Similar to the "new_flows" vs. "old_flows" concept in fq_codel DRR */
147 	struct list_head   new_buckets; /* list of new buckets */
148 	struct list_head   old_buckets; /* list of old buckets */
149 
150 	/* Configurable HHF parameters */
151 	u32		   hhf_reset_timeout; /* interval to reset counter
152 					       * arrays in filter F
153 					       * (default 40ms)
154 					       */
155 	u32		   hhf_admit_bytes;   /* counter thresh to classify as
156 					       * HH (default 128KB).
157 					       * With these default values,
158 					       * 128KB / 40ms = 25 Mbps
159 					       * i.e., we expect to capture HHs
160 					       * sending > 25 Mbps.
161 					       */
162 	u32		   hhf_evict_timeout; /* aging threshold to evict idle
163 					       * HHs out of table T. This should
164 					       * be large enough to avoid
165 					       * reordering during HH eviction.
166 					       * (default 1s)
167 					       */
168 	u32		   hhf_non_hh_weight; /* WDRR weight for non-HHs
169 					       * (default 2,
170 					       *  i.e., non-HH : HH = 2 : 1)
171 					       */
172 };
173 
174 static u32 hhf_time_stamp(void)
175 {
176 	return jiffies;
177 }
178 
179 /* Looks up a heavy-hitter flow in a chaining list of table T. */
180 static struct hh_flow_state *seek_list(const u32 hash,
181 				       struct list_head *head,
182 				       struct hhf_sched_data *q)
183 {
184 	struct hh_flow_state *flow, *next;
185 	u32 now = hhf_time_stamp();
186 
187 	if (list_empty(head))
188 		return NULL;
189 
190 	list_for_each_entry_safe(flow, next, head, flowchain) {
191 		u32 prev = flow->hit_timestamp + q->hhf_evict_timeout;
192 
193 		if (hhf_time_before(prev, now)) {
194 			/* Delete expired heavy-hitters, but preserve one entry
195 			 * to avoid kzalloc() when next time this slot is hit.
196 			 */
197 			if (list_is_last(&flow->flowchain, head))
198 				return NULL;
199 			list_del(&flow->flowchain);
200 			kfree(flow);
201 			WRITE_ONCE(q->hh_flows_current_cnt,
202 				   q->hh_flows_current_cnt - 1);
203 		} else if (flow->hash_id == hash) {
204 			return flow;
205 		}
206 	}
207 	return NULL;
208 }
209 
210 /* Returns a flow state entry for a new heavy-hitter.  Either reuses an expired
211  * entry or dynamically alloc a new entry.
212  */
213 static struct hh_flow_state *alloc_new_hh(struct list_head *head,
214 					  struct hhf_sched_data *q)
215 {
216 	struct hh_flow_state *flow;
217 	u32 now = hhf_time_stamp();
218 
219 	if (!list_empty(head)) {
220 		/* Find an expired heavy-hitter flow entry. */
221 		list_for_each_entry(flow, head, flowchain) {
222 			u32 prev = flow->hit_timestamp + q->hhf_evict_timeout;
223 
224 			if (hhf_time_before(prev, now))
225 				return flow;
226 		}
227 	}
228 
229 	if (q->hh_flows_current_cnt >= q->hh_flows_limit) {
230 		WRITE_ONCE(q->hh_flows_overlimit, q->hh_flows_overlimit + 1);
231 		return NULL;
232 	}
233 	/* Create new entry. */
234 	flow = kzalloc_obj(struct hh_flow_state, GFP_ATOMIC);
235 	if (!flow)
236 		return NULL;
237 
238 	WRITE_ONCE(q->hh_flows_current_cnt, q->hh_flows_current_cnt + 1);
239 	INIT_LIST_HEAD(&flow->flowchain);
240 	list_add_tail(&flow->flowchain, head);
241 
242 	return flow;
243 }
244 
245 /* Assigns packets to WDRR buckets.  Implements a multi-stage filter to
246  * classify heavy-hitters.
247  */
248 static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
249 {
250 	struct hhf_sched_data *q = qdisc_priv(sch);
251 	u32 tmp_hash, hash;
252 	u32 xorsum, filter_pos[HHF_ARRAYS_CNT], flow_pos;
253 	struct hh_flow_state *flow;
254 	u32 pkt_len, min_hhf_val;
255 	int i;
256 	u32 prev;
257 	u32 now = hhf_time_stamp();
258 
259 	/* Reset the HHF counter arrays if this is the right time. */
260 	prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout;
261 	if (hhf_time_before(prev, now)) {
262 		for (i = 0; i < HHF_ARRAYS_CNT; i++)
263 			bitmap_zero(q->hhf_valid_bits[i], HHF_ARRAYS_LEN);
264 		q->hhf_arrays_reset_timestamp = now;
265 	}
266 
267 	/* Get hashed flow-id of the skb. */
268 	hash = skb_get_hash_perturb(skb, &q->perturbation);
269 
270 	/* Check if this packet belongs to an already established HH flow. */
271 	flow_pos = hash & HHF_BIT_MASK;
272 	flow = seek_list(hash, &q->hh_flows[flow_pos], q);
273 	if (flow) { /* found its HH flow */
274 		flow->hit_timestamp = now;
275 		return WDRR_BUCKET_FOR_HH;
276 	}
277 
278 	/* Now pass the packet through the multi-stage filter. */
279 	tmp_hash = hash;
280 	xorsum = 0;
281 	for (i = 0; i < HHF_ARRAYS_CNT - 1; i++) {
282 		/* Split the skb_hash into three 10-bit chunks. */
283 		filter_pos[i] = tmp_hash & HHF_BIT_MASK;
284 		xorsum ^= filter_pos[i];
285 		tmp_hash >>= HHF_BIT_MASK_LEN;
286 	}
287 	/* The last chunk is computed as XOR sum of other chunks. */
288 	filter_pos[HHF_ARRAYS_CNT - 1] = xorsum ^ tmp_hash;
289 
290 	pkt_len = qdisc_pkt_len(skb);
291 	min_hhf_val = ~0U;
292 	for (i = 0; i < HHF_ARRAYS_CNT; i++) {
293 		u32 val;
294 
295 		if (!test_bit(filter_pos[i], q->hhf_valid_bits[i])) {
296 			q->hhf_arrays[i][filter_pos[i]] = 0;
297 			__set_bit(filter_pos[i], q->hhf_valid_bits[i]);
298 		}
299 
300 		val = q->hhf_arrays[i][filter_pos[i]] + pkt_len;
301 		if (min_hhf_val > val)
302 			min_hhf_val = val;
303 	}
304 
305 	/* Found a new HH iff all counter values > HH admit threshold. */
306 	if (min_hhf_val > q->hhf_admit_bytes) {
307 		/* Just captured a new heavy-hitter. */
308 		flow = alloc_new_hh(&q->hh_flows[flow_pos], q);
309 		if (!flow) /* memory alloc problem */
310 			return WDRR_BUCKET_FOR_NON_HH;
311 		flow->hash_id = hash;
312 		flow->hit_timestamp = now;
313 		WRITE_ONCE(q->hh_flows_total_cnt, q->hh_flows_total_cnt + 1);
314 
315 		/* By returning without updating counters in q->hhf_arrays,
316 		 * we implicitly implement "shielding" (see Optimization O1).
317 		 */
318 		return WDRR_BUCKET_FOR_HH;
319 	}
320 
321 	/* Conservative update of HHF arrays (see Optimization O2). */
322 	for (i = 0; i < HHF_ARRAYS_CNT; i++) {
323 		if (q->hhf_arrays[i][filter_pos[i]] < min_hhf_val)
324 			q->hhf_arrays[i][filter_pos[i]] = min_hhf_val;
325 	}
326 	return WDRR_BUCKET_FOR_NON_HH;
327 }
328 
329 /* Removes one skb from head of bucket. */
330 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket)
331 {
332 	struct sk_buff *skb = bucket->head;
333 
334 	bucket->head = skb->next;
335 	skb_mark_not_on_list(skb);
336 	return skb;
337 }
338 
339 /* Tail-adds skb to bucket. */
340 static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb)
341 {
342 	if (bucket->head == NULL)
343 		bucket->head = skb;
344 	else
345 		bucket->tail->next = skb;
346 	bucket->tail = skb;
347 	skb->next = NULL;
348 }
349 
350 static unsigned int hhf_drop(struct Qdisc *sch, struct sk_buff **to_free)
351 {
352 	struct hhf_sched_data *q = qdisc_priv(sch);
353 	struct wdrr_bucket *bucket;
354 
355 	/* Always try to drop from heavy-hitters first. */
356 	bucket = &q->buckets[WDRR_BUCKET_FOR_HH];
357 	if (!bucket->head)
358 		bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH];
359 
360 	if (bucket->head) {
361 		struct sk_buff *skb = dequeue_head(bucket);
362 
363 		sch->q.qlen--;
364 		qdisc_qstats_backlog_dec(sch, skb);
365 		qdisc_drop(skb, sch, to_free);
366 	}
367 
368 	/* Return id of the bucket from which the packet was dropped. */
369 	return bucket - q->buckets;
370 }
371 
372 static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
373 		       struct sk_buff **to_free)
374 {
375 	struct hhf_sched_data *q = qdisc_priv(sch);
376 	enum wdrr_bucket_idx idx;
377 	struct wdrr_bucket *bucket;
378 	unsigned int prev_backlog;
379 
380 	idx = hhf_classify(skb, sch);
381 
382 	bucket = &q->buckets[idx];
383 	bucket_add(bucket, skb);
384 	qdisc_qstats_backlog_inc(sch, skb);
385 
386 	if (list_empty(&bucket->bucketchain)) {
387 		unsigned int weight;
388 
389 		/* The logic of new_buckets vs. old_buckets is the same as
390 		 * new_flows vs. old_flows in the implementation of fq_codel,
391 		 * i.e., short bursts of non-HHs should have strict priority.
392 		 */
393 		if (idx == WDRR_BUCKET_FOR_HH) {
394 			/* Always move heavy-hitters to old bucket. */
395 			weight = 1;
396 			list_add_tail(&bucket->bucketchain, &q->old_buckets);
397 		} else {
398 			weight = q->hhf_non_hh_weight;
399 			list_add_tail(&bucket->bucketchain, &q->new_buckets);
400 		}
401 		bucket->deficit = weight * q->quantum;
402 	}
403 	if (++sch->q.qlen <= sch->limit)
404 		return NET_XMIT_SUCCESS;
405 
406 	prev_backlog = sch->qstats.backlog;
407 	WRITE_ONCE(q->drop_overlimit, q->drop_overlimit + 1);
408 	/* Return Congestion Notification only if we dropped a packet from this
409 	 * bucket.
410 	 */
411 	if (hhf_drop(sch, to_free) == idx)
412 		return NET_XMIT_CN;
413 
414 	/* As we dropped a packet, better let upper stack know this. */
415 	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
416 	return NET_XMIT_SUCCESS;
417 }
418 
419 static struct sk_buff *hhf_dequeue(struct Qdisc *sch)
420 {
421 	struct hhf_sched_data *q = qdisc_priv(sch);
422 	struct sk_buff *skb = NULL;
423 	struct wdrr_bucket *bucket;
424 	struct list_head *head;
425 
426 begin:
427 	head = &q->new_buckets;
428 	if (list_empty(head)) {
429 		head = &q->old_buckets;
430 		if (list_empty(head))
431 			return NULL;
432 	}
433 	bucket = list_first_entry(head, struct wdrr_bucket, bucketchain);
434 
435 	if (bucket->deficit <= 0) {
436 		int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ?
437 			      1 : q->hhf_non_hh_weight;
438 
439 		bucket->deficit += weight * q->quantum;
440 		list_move_tail(&bucket->bucketchain, &q->old_buckets);
441 		goto begin;
442 	}
443 
444 	if (bucket->head) {
445 		skb = dequeue_head(bucket);
446 		sch->q.qlen--;
447 		qdisc_qstats_backlog_dec(sch, skb);
448 	}
449 
450 	if (!skb) {
451 		/* Force a pass through old_buckets to prevent starvation. */
452 		if ((head == &q->new_buckets) && !list_empty(&q->old_buckets))
453 			list_move_tail(&bucket->bucketchain, &q->old_buckets);
454 		else
455 			list_del_init(&bucket->bucketchain);
456 		goto begin;
457 	}
458 	qdisc_bstats_update(sch, skb);
459 	bucket->deficit -= qdisc_pkt_len(skb);
460 
461 	return skb;
462 }
463 
464 static void hhf_reset(struct Qdisc *sch)
465 {
466 	struct sk_buff *skb;
467 
468 	while ((skb = hhf_dequeue(sch)) != NULL)
469 		rtnl_kfree_skbs(skb, skb);
470 }
471 
472 static void hhf_destroy(struct Qdisc *sch)
473 {
474 	int i;
475 	struct hhf_sched_data *q = qdisc_priv(sch);
476 
477 	for (i = 0; i < HHF_ARRAYS_CNT; i++) {
478 		kvfree(q->hhf_arrays[i]);
479 		kvfree(q->hhf_valid_bits[i]);
480 	}
481 
482 	if (!q->hh_flows)
483 		return;
484 
485 	for (i = 0; i < HH_FLOWS_CNT; i++) {
486 		struct hh_flow_state *flow, *next;
487 		struct list_head *head = &q->hh_flows[i];
488 
489 		if (list_empty(head))
490 			continue;
491 		list_for_each_entry_safe(flow, next, head, flowchain) {
492 			list_del(&flow->flowchain);
493 			kfree(flow);
494 		}
495 	}
496 	kvfree(q->hh_flows);
497 }
498 
499 static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
500 	[TCA_HHF_BACKLOG_LIMIT]	 = { .type = NLA_U32 },
501 	[TCA_HHF_QUANTUM]	 = { .type = NLA_U32 },
502 	[TCA_HHF_HH_FLOWS_LIMIT] = { .type = NLA_U32 },
503 	[TCA_HHF_RESET_TIMEOUT]	 = { .type = NLA_U32 },
504 	[TCA_HHF_ADMIT_BYTES]	 = { .type = NLA_U32 },
505 	[TCA_HHF_EVICT_TIMEOUT]	 = { .type = NLA_U32 },
506 	[TCA_HHF_NON_HH_WEIGHT]	 = { .type = NLA_U32 },
507 };
508 
509 static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
510 		      struct netlink_ext_ack *extack)
511 {
512 	unsigned int dropped_pkts = 0, dropped_bytes = 0;
513 	struct hhf_sched_data *q = qdisc_priv(sch);
514 	struct nlattr *tb[TCA_HHF_MAX + 1];
515 	int err;
516 	u64 non_hh_quantum;
517 	u32 new_quantum = q->quantum;
518 	u32 new_hhf_non_hh_weight = q->hhf_non_hh_weight;
519 
520 	err = nla_parse_nested_deprecated(tb, TCA_HHF_MAX, opt, hhf_policy,
521 					  NULL);
522 	if (err < 0)
523 		return err;
524 
525 	if (tb[TCA_HHF_QUANTUM])
526 		new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]);
527 
528 	if (tb[TCA_HHF_NON_HH_WEIGHT])
529 		new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
530 
531 	non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
532 	if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
533 		return -EINVAL;
534 
535 	sch_tree_lock(sch);
536 
537 	if (tb[TCA_HHF_BACKLOG_LIMIT])
538 		WRITE_ONCE(sch->limit, nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]));
539 
540 	WRITE_ONCE(q->quantum, new_quantum);
541 	WRITE_ONCE(q->hhf_non_hh_weight, new_hhf_non_hh_weight);
542 
543 	if (tb[TCA_HHF_HH_FLOWS_LIMIT])
544 		WRITE_ONCE(q->hh_flows_limit,
545 			   nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]));
546 
547 	if (tb[TCA_HHF_RESET_TIMEOUT]) {
548 		u32 us = nla_get_u32(tb[TCA_HHF_RESET_TIMEOUT]);
549 
550 		WRITE_ONCE(q->hhf_reset_timeout,
551 			   usecs_to_jiffies(us));
552 	}
553 
554 	if (tb[TCA_HHF_ADMIT_BYTES])
555 		WRITE_ONCE(q->hhf_admit_bytes,
556 			   nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]));
557 
558 	if (tb[TCA_HHF_EVICT_TIMEOUT]) {
559 		u32 us = nla_get_u32(tb[TCA_HHF_EVICT_TIMEOUT]);
560 
561 		WRITE_ONCE(q->hhf_evict_timeout,
562 			   usecs_to_jiffies(us));
563 	}
564 
565 	while (sch->q.qlen > sch->limit) {
566 		struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
567 
568 		if (!skb)
569 			break;
570 
571 		dropped_pkts++;
572 		dropped_bytes += qdisc_pkt_len(skb);
573 		rtnl_kfree_skbs(skb, skb);
574 	}
575 	qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
576 
577 	sch_tree_unlock(sch);
578 	return 0;
579 }
580 
581 static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
582 		    struct netlink_ext_ack *extack)
583 {
584 	struct hhf_sched_data *q = qdisc_priv(sch);
585 	int i;
586 
587 	sch->limit = 1000;
588 	q->quantum = psched_mtu(qdisc_dev(sch));
589 	get_random_bytes(&q->perturbation, sizeof(q->perturbation));
590 	INIT_LIST_HEAD(&q->new_buckets);
591 	INIT_LIST_HEAD(&q->old_buckets);
592 
593 	/* Configurable HHF parameters */
594 	q->hhf_reset_timeout = HZ / 25; /* 40  ms */
595 	q->hhf_admit_bytes = 131072;    /* 128 KB */
596 	q->hhf_evict_timeout = HZ;      /* 1  sec */
597 	q->hhf_non_hh_weight = 2;
598 
599 	if (opt) {
600 		int err = hhf_change(sch, opt, extack);
601 
602 		if (err)
603 			return err;
604 	}
605 
606 	if (!q->hh_flows) {
607 		/* Initialize heavy-hitter flow table. */
608 		q->hh_flows = kvzalloc_objs(struct list_head, HH_FLOWS_CNT);
609 		if (!q->hh_flows)
610 			return -ENOMEM;
611 		for (i = 0; i < HH_FLOWS_CNT; i++)
612 			INIT_LIST_HEAD(&q->hh_flows[i]);
613 
614 		/* Cap max active HHs at twice len of hh_flows table. */
615 		q->hh_flows_limit = 2 * HH_FLOWS_CNT;
616 		q->hh_flows_overlimit = 0;
617 		q->hh_flows_total_cnt = 0;
618 		q->hh_flows_current_cnt = 0;
619 
620 		/* Initialize heavy-hitter filter arrays. */
621 		for (i = 0; i < HHF_ARRAYS_CNT; i++) {
622 			q->hhf_arrays[i] = kvcalloc(HHF_ARRAYS_LEN,
623 						    sizeof(u32),
624 						    GFP_KERNEL);
625 			if (!q->hhf_arrays[i]) {
626 				/* Note: hhf_destroy() will be called
627 				 * by our caller.
628 				 */
629 				return -ENOMEM;
630 			}
631 		}
632 		q->hhf_arrays_reset_timestamp = hhf_time_stamp();
633 
634 		/* Initialize valid bits of heavy-hitter filter arrays. */
635 		for (i = 0; i < HHF_ARRAYS_CNT; i++) {
636 			q->hhf_valid_bits[i] = kvzalloc(HHF_ARRAYS_LEN /
637 							  BITS_PER_BYTE, GFP_KERNEL);
638 			if (!q->hhf_valid_bits[i]) {
639 				/* Note: hhf_destroy() will be called
640 				 * by our caller.
641 				 */
642 				return -ENOMEM;
643 			}
644 		}
645 
646 		/* Initialize Weighted DRR buckets. */
647 		for (i = 0; i < WDRR_BUCKET_CNT; i++) {
648 			struct wdrr_bucket *bucket = q->buckets + i;
649 
650 			INIT_LIST_HEAD(&bucket->bucketchain);
651 		}
652 	}
653 
654 	return 0;
655 }
656 
657 static int hhf_dump(struct Qdisc *sch, struct sk_buff *skb)
658 {
659 	struct hhf_sched_data *q = qdisc_priv(sch);
660 	struct nlattr *opts;
661 
662 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
663 	if (opts == NULL)
664 		goto nla_put_failure;
665 
666 	if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, READ_ONCE(sch->limit)) ||
667 	    nla_put_u32(skb, TCA_HHF_QUANTUM, READ_ONCE(q->quantum)) ||
668 	    nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT,
669 			READ_ONCE(q->hh_flows_limit)) ||
670 	    nla_put_u32(skb, TCA_HHF_RESET_TIMEOUT,
671 			jiffies_to_usecs(READ_ONCE(q->hhf_reset_timeout))) ||
672 	    nla_put_u32(skb, TCA_HHF_ADMIT_BYTES,
673 			READ_ONCE(q->hhf_admit_bytes)) ||
674 	    nla_put_u32(skb, TCA_HHF_EVICT_TIMEOUT,
675 			jiffies_to_usecs(READ_ONCE(q->hhf_evict_timeout))) ||
676 	    nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT,
677 			READ_ONCE(q->hhf_non_hh_weight)))
678 		goto nla_put_failure;
679 
680 	return nla_nest_end(skb, opts);
681 
682 nla_put_failure:
683 	return -1;
684 }
685 
686 static int hhf_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
687 {
688 	struct hhf_sched_data *q = qdisc_priv(sch);
689 	struct tc_hhf_xstats st = {
690 		.drop_overlimit = READ_ONCE(q->drop_overlimit),
691 		.hh_overlimit	= READ_ONCE(q->hh_flows_overlimit),
692 		.hh_tot_count	= READ_ONCE(q->hh_flows_total_cnt),
693 		.hh_cur_count	= READ_ONCE(q->hh_flows_current_cnt),
694 	};
695 
696 	return gnet_stats_copy_app(d, &st, sizeof(st));
697 }
698 
699 static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
700 	.id		=	"hhf",
701 	.priv_size	=	sizeof(struct hhf_sched_data),
702 
703 	.enqueue	=	hhf_enqueue,
704 	.dequeue	=	hhf_dequeue,
705 	.peek		=	qdisc_peek_dequeued,
706 	.init		=	hhf_init,
707 	.reset		=	hhf_reset,
708 	.destroy	=	hhf_destroy,
709 	.change		=	hhf_change,
710 	.dump		=	hhf_dump,
711 	.dump_stats	=	hhf_dump_stats,
712 	.owner		=	THIS_MODULE,
713 };
714 MODULE_ALIAS_NET_SCH("hhf");
715 
716 static int __init hhf_module_init(void)
717 {
718 	return register_qdisc(&hhf_qdisc_ops);
719 }
720 
721 static void __exit hhf_module_exit(void)
722 {
723 	unregister_qdisc(&hhf_qdisc_ops);
724 }
725 
726 module_init(hhf_module_init)
727 module_exit(hhf_module_exit)
728 MODULE_AUTHOR("Terry Lam");
729 MODULE_AUTHOR("Nandita Dukkipati");
730 MODULE_LICENSE("GPL");
731 MODULE_DESCRIPTION("Heavy-Hitter Filter (HHF)");
732