1 /* net/sched/sch_hhf.c Heavy-Hitter Filter (HHF) 2 * 3 * Copyright (C) 2013 Terry Lam <vtlam@google.com> 4 * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com> 5 */ 6 7 #include <linux/jhash.h> 8 #include <linux/jiffies.h> 9 #include <linux/module.h> 10 #include <linux/skbuff.h> 11 #include <linux/vmalloc.h> 12 #include <net/pkt_sched.h> 13 #include <net/sock.h> 14 15 /* Heavy-Hitter Filter (HHF) 16 * 17 * Principles : 18 * Flows are classified into two buckets: non-heavy-hitter and heavy-hitter 19 * buckets. Initially, a new flow starts as non-heavy-hitter. Once classified 20 * as heavy-hitter, it is immediately switched to the heavy-hitter bucket. 21 * The buckets are dequeued by a Weighted Deficit Round Robin (WDRR) scheduler, 22 * in which the heavy-hitter bucket is served with less weight. 23 * In other words, non-heavy-hitters (e.g., short bursts of critical traffic) 24 * are isolated from heavy-hitters (e.g., persistent bulk traffic) and also have 25 * higher share of bandwidth. 26 * 27 * To capture heavy-hitters, we use the "multi-stage filter" algorithm in the 28 * following paper: 29 * [EV02] C. Estan and G. Varghese, "New Directions in Traffic Measurement and 30 * Accounting", in ACM SIGCOMM, 2002. 31 * 32 * Conceptually, a multi-stage filter comprises k independent hash functions 33 * and k counter arrays. Packets are indexed into k counter arrays by k hash 34 * functions, respectively. The counters are then increased by the packet sizes. 35 * Therefore, 36 * - For a heavy-hitter flow: *all* of its k array counters must be large. 37 * - For a non-heavy-hitter flow: some of its k array counters can be large 38 * due to hash collision with other small flows; however, with high 39 * probability, not *all* k counters are large. 40 * 41 * By the design of the multi-stage filter algorithm, the false negative rate 42 * (heavy-hitters getting away uncaptured) is zero. However, the algorithm is 43 * susceptible to false positives (non-heavy-hitters mistakenly classified as 44 * heavy-hitters). 45 * Therefore, we also implement the following optimizations to reduce false 46 * positives by avoiding unnecessary increment of the counter values: 47 * - Optimization O1: once a heavy-hitter is identified, its bytes are not 48 * accounted in the array counters. This technique is called "shielding" 49 * in Section 3.3.1 of [EV02]. 50 * - Optimization O2: conservative update of counters 51 * (Section 3.3.2 of [EV02]), 52 * New counter value = max {old counter value, 53 * smallest counter value + packet bytes} 54 * 55 * Finally, we refresh the counters periodically since otherwise the counter 56 * values will keep accumulating. 57 * 58 * Once a flow is classified as heavy-hitter, we also save its per-flow state 59 * in an exact-matching flow table so that its subsequent packets can be 60 * dispatched to the heavy-hitter bucket accordingly. 61 * 62 * 63 * At a high level, this qdisc works as follows: 64 * Given a packet p: 65 * - If the flow-id of p (e.g., TCP 5-tuple) is already in the exact-matching 66 * heavy-hitter flow table, denoted table T, then send p to the heavy-hitter 67 * bucket. 68 * - Otherwise, forward p to the multi-stage filter, denoted filter F 69 * + If F decides that p belongs to a non-heavy-hitter flow, then send p 70 * to the non-heavy-hitter bucket. 71 * + Otherwise, if F decides that p belongs to a new heavy-hitter flow, 72 * then set up a new flow entry for the flow-id of p in the table T and 73 * send p to the heavy-hitter bucket. 74 * 75 * In this implementation: 76 * - T is a fixed-size hash-table with 1024 entries. Hash collision is 77 * resolved by linked-list chaining. 78 * - F has four counter arrays, each array containing 1024 32-bit counters. 79 * That means 4 * 1024 * 32 bits = 16KB of memory. 80 * - Since each array in F contains 1024 counters, 10 bits are sufficient to 81 * index into each array. 82 * Hence, instead of having four hash functions, we chop the 32-bit 83 * skb-hash into three 10-bit chunks, and the remaining 10-bit chunk is 84 * computed as XOR sum of those three chunks. 85 * - We need to clear the counter arrays periodically; however, directly 86 * memsetting 16KB of memory can lead to cache eviction and unwanted delay. 87 * So by representing each counter by a valid bit, we only need to reset 88 * 4K of 1 bit (i.e. 512 bytes) instead of 16KB of memory. 89 * - The Deficit Round Robin engine is taken from fq_codel implementation 90 * (net/sched/sch_fq_codel.c). Note that wdrr_bucket corresponds to 91 * fq_codel_flow in fq_codel implementation. 92 * 93 */ 94 95 /* Non-configurable parameters */ 96 #define HH_FLOWS_CNT 1024 /* number of entries in exact-matching table T */ 97 #define HHF_ARRAYS_CNT 4 /* number of arrays in multi-stage filter F */ 98 #define HHF_ARRAYS_LEN 1024 /* number of counters in each array of F */ 99 #define HHF_BIT_MASK_LEN 10 /* masking 10 bits */ 100 #define HHF_BIT_MASK 0x3FF /* bitmask of 10 bits */ 101 102 #define WDRR_BUCKET_CNT 2 /* two buckets for Weighted DRR */ 103 enum wdrr_bucket_idx { 104 WDRR_BUCKET_FOR_HH = 0, /* bucket id for heavy-hitters */ 105 WDRR_BUCKET_FOR_NON_HH = 1 /* bucket id for non-heavy-hitters */ 106 }; 107 108 #define hhf_time_before(a, b) \ 109 (typecheck(u32, a) && typecheck(u32, b) && ((s32)((a) - (b)) < 0)) 110 111 /* Heavy-hitter per-flow state */ 112 struct hh_flow_state { 113 u32 hash_id; /* hash of flow-id (e.g. TCP 5-tuple) */ 114 u32 hit_timestamp; /* last time heavy-hitter was seen */ 115 struct list_head flowchain; /* chaining under hash collision */ 116 }; 117 118 /* Weighted Deficit Round Robin (WDRR) scheduler */ 119 struct wdrr_bucket { 120 struct sk_buff *head; 121 struct sk_buff *tail; 122 struct list_head bucketchain; 123 int deficit; 124 }; 125 126 struct hhf_sched_data { 127 struct wdrr_bucket buckets[WDRR_BUCKET_CNT]; 128 u32 perturbation; /* hash perturbation */ 129 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ 130 u32 drop_overlimit; /* number of times max qdisc packet 131 * limit was hit 132 */ 133 struct list_head *hh_flows; /* table T (currently active HHs) */ 134 u32 hh_flows_limit; /* max active HH allocs */ 135 u32 hh_flows_overlimit; /* num of disallowed HH allocs */ 136 u32 hh_flows_total_cnt; /* total admitted HHs */ 137 u32 hh_flows_current_cnt; /* total current HHs */ 138 u32 *hhf_arrays[HHF_ARRAYS_CNT]; /* HH filter F */ 139 u32 hhf_arrays_reset_timestamp; /* last time hhf_arrays 140 * was reset 141 */ 142 unsigned long *hhf_valid_bits[HHF_ARRAYS_CNT]; /* shadow valid bits 143 * of hhf_arrays 144 */ 145 /* Similar to the "new_flows" vs. "old_flows" concept in fq_codel DRR */ 146 struct list_head new_buckets; /* list of new buckets */ 147 struct list_head old_buckets; /* list of old buckets */ 148 149 /* Configurable HHF parameters */ 150 u32 hhf_reset_timeout; /* interval to reset counter 151 * arrays in filter F 152 * (default 40ms) 153 */ 154 u32 hhf_admit_bytes; /* counter thresh to classify as 155 * HH (default 128KB). 156 * With these default values, 157 * 128KB / 40ms = 25 Mbps 158 * i.e., we expect to capture HHs 159 * sending > 25 Mbps. 160 */ 161 u32 hhf_evict_timeout; /* aging threshold to evict idle 162 * HHs out of table T. This should 163 * be large enough to avoid 164 * reordering during HH eviction. 165 * (default 1s) 166 */ 167 u32 hhf_non_hh_weight; /* WDRR weight for non-HHs 168 * (default 2, 169 * i.e., non-HH : HH = 2 : 1) 170 */ 171 }; 172 173 static u32 hhf_time_stamp(void) 174 { 175 return jiffies; 176 } 177 178 /* Looks up a heavy-hitter flow in a chaining list of table T. */ 179 static struct hh_flow_state *seek_list(const u32 hash, 180 struct list_head *head, 181 struct hhf_sched_data *q) 182 { 183 struct hh_flow_state *flow, *next; 184 u32 now = hhf_time_stamp(); 185 186 if (list_empty(head)) 187 return NULL; 188 189 list_for_each_entry_safe(flow, next, head, flowchain) { 190 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; 191 192 if (hhf_time_before(prev, now)) { 193 /* Delete expired heavy-hitters, but preserve one entry 194 * to avoid kzalloc() when next time this slot is hit. 195 */ 196 if (list_is_last(&flow->flowchain, head)) 197 return NULL; 198 list_del(&flow->flowchain); 199 kfree(flow); 200 q->hh_flows_current_cnt--; 201 } else if (flow->hash_id == hash) { 202 return flow; 203 } 204 } 205 return NULL; 206 } 207 208 /* Returns a flow state entry for a new heavy-hitter. Either reuses an expired 209 * entry or dynamically alloc a new entry. 210 */ 211 static struct hh_flow_state *alloc_new_hh(struct list_head *head, 212 struct hhf_sched_data *q) 213 { 214 struct hh_flow_state *flow; 215 u32 now = hhf_time_stamp(); 216 217 if (!list_empty(head)) { 218 /* Find an expired heavy-hitter flow entry. */ 219 list_for_each_entry(flow, head, flowchain) { 220 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; 221 222 if (hhf_time_before(prev, now)) 223 return flow; 224 } 225 } 226 227 if (q->hh_flows_current_cnt >= q->hh_flows_limit) { 228 q->hh_flows_overlimit++; 229 return NULL; 230 } 231 /* Create new entry. */ 232 flow = kzalloc(sizeof(struct hh_flow_state), GFP_ATOMIC); 233 if (!flow) 234 return NULL; 235 236 q->hh_flows_current_cnt++; 237 INIT_LIST_HEAD(&flow->flowchain); 238 list_add_tail(&flow->flowchain, head); 239 240 return flow; 241 } 242 243 /* Assigns packets to WDRR buckets. Implements a multi-stage filter to 244 * classify heavy-hitters. 245 */ 246 static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch) 247 { 248 struct hhf_sched_data *q = qdisc_priv(sch); 249 u32 tmp_hash, hash; 250 u32 xorsum, filter_pos[HHF_ARRAYS_CNT], flow_pos; 251 struct hh_flow_state *flow; 252 u32 pkt_len, min_hhf_val; 253 int i; 254 u32 prev; 255 u32 now = hhf_time_stamp(); 256 257 /* Reset the HHF counter arrays if this is the right time. */ 258 prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout; 259 if (hhf_time_before(prev, now)) { 260 for (i = 0; i < HHF_ARRAYS_CNT; i++) 261 bitmap_zero(q->hhf_valid_bits[i], HHF_ARRAYS_LEN); 262 q->hhf_arrays_reset_timestamp = now; 263 } 264 265 /* Get hashed flow-id of the skb. */ 266 hash = skb_get_hash_perturb(skb, q->perturbation); 267 268 /* Check if this packet belongs to an already established HH flow. */ 269 flow_pos = hash & HHF_BIT_MASK; 270 flow = seek_list(hash, &q->hh_flows[flow_pos], q); 271 if (flow) { /* found its HH flow */ 272 flow->hit_timestamp = now; 273 return WDRR_BUCKET_FOR_HH; 274 } 275 276 /* Now pass the packet through the multi-stage filter. */ 277 tmp_hash = hash; 278 xorsum = 0; 279 for (i = 0; i < HHF_ARRAYS_CNT - 1; i++) { 280 /* Split the skb_hash into three 10-bit chunks. */ 281 filter_pos[i] = tmp_hash & HHF_BIT_MASK; 282 xorsum ^= filter_pos[i]; 283 tmp_hash >>= HHF_BIT_MASK_LEN; 284 } 285 /* The last chunk is computed as XOR sum of other chunks. */ 286 filter_pos[HHF_ARRAYS_CNT - 1] = xorsum ^ tmp_hash; 287 288 pkt_len = qdisc_pkt_len(skb); 289 min_hhf_val = ~0U; 290 for (i = 0; i < HHF_ARRAYS_CNT; i++) { 291 u32 val; 292 293 if (!test_bit(filter_pos[i], q->hhf_valid_bits[i])) { 294 q->hhf_arrays[i][filter_pos[i]] = 0; 295 __set_bit(filter_pos[i], q->hhf_valid_bits[i]); 296 } 297 298 val = q->hhf_arrays[i][filter_pos[i]] + pkt_len; 299 if (min_hhf_val > val) 300 min_hhf_val = val; 301 } 302 303 /* Found a new HH iff all counter values > HH admit threshold. */ 304 if (min_hhf_val > q->hhf_admit_bytes) { 305 /* Just captured a new heavy-hitter. */ 306 flow = alloc_new_hh(&q->hh_flows[flow_pos], q); 307 if (!flow) /* memory alloc problem */ 308 return WDRR_BUCKET_FOR_NON_HH; 309 flow->hash_id = hash; 310 flow->hit_timestamp = now; 311 q->hh_flows_total_cnt++; 312 313 /* By returning without updating counters in q->hhf_arrays, 314 * we implicitly implement "shielding" (see Optimization O1). 315 */ 316 return WDRR_BUCKET_FOR_HH; 317 } 318 319 /* Conservative update of HHF arrays (see Optimization O2). */ 320 for (i = 0; i < HHF_ARRAYS_CNT; i++) { 321 if (q->hhf_arrays[i][filter_pos[i]] < min_hhf_val) 322 q->hhf_arrays[i][filter_pos[i]] = min_hhf_val; 323 } 324 return WDRR_BUCKET_FOR_NON_HH; 325 } 326 327 /* Removes one skb from head of bucket. */ 328 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) 329 { 330 struct sk_buff *skb = bucket->head; 331 332 bucket->head = skb->next; 333 skb->next = NULL; 334 return skb; 335 } 336 337 /* Tail-adds skb to bucket. */ 338 static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) 339 { 340 if (bucket->head == NULL) 341 bucket->head = skb; 342 else 343 bucket->tail->next = skb; 344 bucket->tail = skb; 345 skb->next = NULL; 346 } 347 348 static unsigned int hhf_drop(struct Qdisc *sch) 349 { 350 struct hhf_sched_data *q = qdisc_priv(sch); 351 struct wdrr_bucket *bucket; 352 353 /* Always try to drop from heavy-hitters first. */ 354 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; 355 if (!bucket->head) 356 bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH]; 357 358 if (bucket->head) { 359 struct sk_buff *skb = dequeue_head(bucket); 360 361 sch->q.qlen--; 362 qdisc_qstats_drop(sch); 363 qdisc_qstats_backlog_dec(sch, skb); 364 kfree_skb(skb); 365 } 366 367 /* Return id of the bucket from which the packet was dropped. */ 368 return bucket - q->buckets; 369 } 370 371 static unsigned int hhf_qdisc_drop(struct Qdisc *sch) 372 { 373 unsigned int prev_backlog; 374 375 prev_backlog = sch->qstats.backlog; 376 hhf_drop(sch); 377 return prev_backlog - sch->qstats.backlog; 378 } 379 380 static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch) 381 { 382 struct hhf_sched_data *q = qdisc_priv(sch); 383 enum wdrr_bucket_idx idx; 384 struct wdrr_bucket *bucket; 385 unsigned int prev_backlog; 386 387 idx = hhf_classify(skb, sch); 388 389 bucket = &q->buckets[idx]; 390 bucket_add(bucket, skb); 391 qdisc_qstats_backlog_inc(sch, skb); 392 393 if (list_empty(&bucket->bucketchain)) { 394 unsigned int weight; 395 396 /* The logic of new_buckets vs. old_buckets is the same as 397 * new_flows vs. old_flows in the implementation of fq_codel, 398 * i.e., short bursts of non-HHs should have strict priority. 399 */ 400 if (idx == WDRR_BUCKET_FOR_HH) { 401 /* Always move heavy-hitters to old bucket. */ 402 weight = 1; 403 list_add_tail(&bucket->bucketchain, &q->old_buckets); 404 } else { 405 weight = q->hhf_non_hh_weight; 406 list_add_tail(&bucket->bucketchain, &q->new_buckets); 407 } 408 bucket->deficit = weight * q->quantum; 409 } 410 if (++sch->q.qlen <= sch->limit) 411 return NET_XMIT_SUCCESS; 412 413 prev_backlog = sch->qstats.backlog; 414 q->drop_overlimit++; 415 /* Return Congestion Notification only if we dropped a packet from this 416 * bucket. 417 */ 418 if (hhf_drop(sch) == idx) 419 return NET_XMIT_CN; 420 421 /* As we dropped a packet, better let upper stack know this. */ 422 qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog); 423 return NET_XMIT_SUCCESS; 424 } 425 426 static struct sk_buff *hhf_dequeue(struct Qdisc *sch) 427 { 428 struct hhf_sched_data *q = qdisc_priv(sch); 429 struct sk_buff *skb = NULL; 430 struct wdrr_bucket *bucket; 431 struct list_head *head; 432 433 begin: 434 head = &q->new_buckets; 435 if (list_empty(head)) { 436 head = &q->old_buckets; 437 if (list_empty(head)) 438 return NULL; 439 } 440 bucket = list_first_entry(head, struct wdrr_bucket, bucketchain); 441 442 if (bucket->deficit <= 0) { 443 int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ? 444 1 : q->hhf_non_hh_weight; 445 446 bucket->deficit += weight * q->quantum; 447 list_move_tail(&bucket->bucketchain, &q->old_buckets); 448 goto begin; 449 } 450 451 if (bucket->head) { 452 skb = dequeue_head(bucket); 453 sch->q.qlen--; 454 qdisc_qstats_backlog_dec(sch, skb); 455 } 456 457 if (!skb) { 458 /* Force a pass through old_buckets to prevent starvation. */ 459 if ((head == &q->new_buckets) && !list_empty(&q->old_buckets)) 460 list_move_tail(&bucket->bucketchain, &q->old_buckets); 461 else 462 list_del_init(&bucket->bucketchain); 463 goto begin; 464 } 465 qdisc_bstats_update(sch, skb); 466 bucket->deficit -= qdisc_pkt_len(skb); 467 468 return skb; 469 } 470 471 static void hhf_reset(struct Qdisc *sch) 472 { 473 struct sk_buff *skb; 474 475 while ((skb = hhf_dequeue(sch)) != NULL) 476 kfree_skb(skb); 477 } 478 479 static void *hhf_zalloc(size_t sz) 480 { 481 void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); 482 483 if (!ptr) 484 ptr = vzalloc(sz); 485 486 return ptr; 487 } 488 489 static void hhf_free(void *addr) 490 { 491 kvfree(addr); 492 } 493 494 static void hhf_destroy(struct Qdisc *sch) 495 { 496 int i; 497 struct hhf_sched_data *q = qdisc_priv(sch); 498 499 for (i = 0; i < HHF_ARRAYS_CNT; i++) { 500 hhf_free(q->hhf_arrays[i]); 501 hhf_free(q->hhf_valid_bits[i]); 502 } 503 504 for (i = 0; i < HH_FLOWS_CNT; i++) { 505 struct hh_flow_state *flow, *next; 506 struct list_head *head = &q->hh_flows[i]; 507 508 if (list_empty(head)) 509 continue; 510 list_for_each_entry_safe(flow, next, head, flowchain) { 511 list_del(&flow->flowchain); 512 kfree(flow); 513 } 514 } 515 hhf_free(q->hh_flows); 516 } 517 518 static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = { 519 [TCA_HHF_BACKLOG_LIMIT] = { .type = NLA_U32 }, 520 [TCA_HHF_QUANTUM] = { .type = NLA_U32 }, 521 [TCA_HHF_HH_FLOWS_LIMIT] = { .type = NLA_U32 }, 522 [TCA_HHF_RESET_TIMEOUT] = { .type = NLA_U32 }, 523 [TCA_HHF_ADMIT_BYTES] = { .type = NLA_U32 }, 524 [TCA_HHF_EVICT_TIMEOUT] = { .type = NLA_U32 }, 525 [TCA_HHF_NON_HH_WEIGHT] = { .type = NLA_U32 }, 526 }; 527 528 static int hhf_change(struct Qdisc *sch, struct nlattr *opt) 529 { 530 struct hhf_sched_data *q = qdisc_priv(sch); 531 struct nlattr *tb[TCA_HHF_MAX + 1]; 532 unsigned int qlen, prev_backlog; 533 int err; 534 u64 non_hh_quantum; 535 u32 new_quantum = q->quantum; 536 u32 new_hhf_non_hh_weight = q->hhf_non_hh_weight; 537 538 if (!opt) 539 return -EINVAL; 540 541 err = nla_parse_nested(tb, TCA_HHF_MAX, opt, hhf_policy); 542 if (err < 0) 543 return err; 544 545 if (tb[TCA_HHF_QUANTUM]) 546 new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]); 547 548 if (tb[TCA_HHF_NON_HH_WEIGHT]) 549 new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]); 550 551 non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight; 552 if (non_hh_quantum > INT_MAX) 553 return -EINVAL; 554 555 sch_tree_lock(sch); 556 557 if (tb[TCA_HHF_BACKLOG_LIMIT]) 558 sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]); 559 560 q->quantum = new_quantum; 561 q->hhf_non_hh_weight = new_hhf_non_hh_weight; 562 563 if (tb[TCA_HHF_HH_FLOWS_LIMIT]) 564 q->hh_flows_limit = nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]); 565 566 if (tb[TCA_HHF_RESET_TIMEOUT]) { 567 u32 us = nla_get_u32(tb[TCA_HHF_RESET_TIMEOUT]); 568 569 q->hhf_reset_timeout = usecs_to_jiffies(us); 570 } 571 572 if (tb[TCA_HHF_ADMIT_BYTES]) 573 q->hhf_admit_bytes = nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]); 574 575 if (tb[TCA_HHF_EVICT_TIMEOUT]) { 576 u32 us = nla_get_u32(tb[TCA_HHF_EVICT_TIMEOUT]); 577 578 q->hhf_evict_timeout = usecs_to_jiffies(us); 579 } 580 581 qlen = sch->q.qlen; 582 prev_backlog = sch->qstats.backlog; 583 while (sch->q.qlen > sch->limit) { 584 struct sk_buff *skb = hhf_dequeue(sch); 585 586 kfree_skb(skb); 587 } 588 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, 589 prev_backlog - sch->qstats.backlog); 590 591 sch_tree_unlock(sch); 592 return 0; 593 } 594 595 static int hhf_init(struct Qdisc *sch, struct nlattr *opt) 596 { 597 struct hhf_sched_data *q = qdisc_priv(sch); 598 int i; 599 600 sch->limit = 1000; 601 q->quantum = psched_mtu(qdisc_dev(sch)); 602 q->perturbation = prandom_u32(); 603 INIT_LIST_HEAD(&q->new_buckets); 604 INIT_LIST_HEAD(&q->old_buckets); 605 606 /* Configurable HHF parameters */ 607 q->hhf_reset_timeout = HZ / 25; /* 40 ms */ 608 q->hhf_admit_bytes = 131072; /* 128 KB */ 609 q->hhf_evict_timeout = HZ; /* 1 sec */ 610 q->hhf_non_hh_weight = 2; 611 612 if (opt) { 613 int err = hhf_change(sch, opt); 614 615 if (err) 616 return err; 617 } 618 619 if (!q->hh_flows) { 620 /* Initialize heavy-hitter flow table. */ 621 q->hh_flows = hhf_zalloc(HH_FLOWS_CNT * 622 sizeof(struct list_head)); 623 if (!q->hh_flows) 624 return -ENOMEM; 625 for (i = 0; i < HH_FLOWS_CNT; i++) 626 INIT_LIST_HEAD(&q->hh_flows[i]); 627 628 /* Cap max active HHs at twice len of hh_flows table. */ 629 q->hh_flows_limit = 2 * HH_FLOWS_CNT; 630 q->hh_flows_overlimit = 0; 631 q->hh_flows_total_cnt = 0; 632 q->hh_flows_current_cnt = 0; 633 634 /* Initialize heavy-hitter filter arrays. */ 635 for (i = 0; i < HHF_ARRAYS_CNT; i++) { 636 q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN * 637 sizeof(u32)); 638 if (!q->hhf_arrays[i]) { 639 hhf_destroy(sch); 640 return -ENOMEM; 641 } 642 } 643 q->hhf_arrays_reset_timestamp = hhf_time_stamp(); 644 645 /* Initialize valid bits of heavy-hitter filter arrays. */ 646 for (i = 0; i < HHF_ARRAYS_CNT; i++) { 647 q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN / 648 BITS_PER_BYTE); 649 if (!q->hhf_valid_bits[i]) { 650 hhf_destroy(sch); 651 return -ENOMEM; 652 } 653 } 654 655 /* Initialize Weighted DRR buckets. */ 656 for (i = 0; i < WDRR_BUCKET_CNT; i++) { 657 struct wdrr_bucket *bucket = q->buckets + i; 658 659 INIT_LIST_HEAD(&bucket->bucketchain); 660 } 661 } 662 663 return 0; 664 } 665 666 static int hhf_dump(struct Qdisc *sch, struct sk_buff *skb) 667 { 668 struct hhf_sched_data *q = qdisc_priv(sch); 669 struct nlattr *opts; 670 671 opts = nla_nest_start(skb, TCA_OPTIONS); 672 if (opts == NULL) 673 goto nla_put_failure; 674 675 if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, sch->limit) || 676 nla_put_u32(skb, TCA_HHF_QUANTUM, q->quantum) || 677 nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT, q->hh_flows_limit) || 678 nla_put_u32(skb, TCA_HHF_RESET_TIMEOUT, 679 jiffies_to_usecs(q->hhf_reset_timeout)) || 680 nla_put_u32(skb, TCA_HHF_ADMIT_BYTES, q->hhf_admit_bytes) || 681 nla_put_u32(skb, TCA_HHF_EVICT_TIMEOUT, 682 jiffies_to_usecs(q->hhf_evict_timeout)) || 683 nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT, q->hhf_non_hh_weight)) 684 goto nla_put_failure; 685 686 return nla_nest_end(skb, opts); 687 688 nla_put_failure: 689 return -1; 690 } 691 692 static int hhf_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 693 { 694 struct hhf_sched_data *q = qdisc_priv(sch); 695 struct tc_hhf_xstats st = { 696 .drop_overlimit = q->drop_overlimit, 697 .hh_overlimit = q->hh_flows_overlimit, 698 .hh_tot_count = q->hh_flows_total_cnt, 699 .hh_cur_count = q->hh_flows_current_cnt, 700 }; 701 702 return gnet_stats_copy_app(d, &st, sizeof(st)); 703 } 704 705 static struct Qdisc_ops hhf_qdisc_ops __read_mostly = { 706 .id = "hhf", 707 .priv_size = sizeof(struct hhf_sched_data), 708 709 .enqueue = hhf_enqueue, 710 .dequeue = hhf_dequeue, 711 .peek = qdisc_peek_dequeued, 712 .drop = hhf_qdisc_drop, 713 .init = hhf_init, 714 .reset = hhf_reset, 715 .destroy = hhf_destroy, 716 .change = hhf_change, 717 .dump = hhf_dump, 718 .dump_stats = hhf_dump_stats, 719 .owner = THIS_MODULE, 720 }; 721 722 static int __init hhf_module_init(void) 723 { 724 return register_qdisc(&hhf_qdisc_ops); 725 } 726 727 static void __exit hhf_module_exit(void) 728 { 729 unregister_qdisc(&hhf_qdisc_ops); 730 } 731 732 module_init(hhf_module_init) 733 module_exit(hhf_module_exit) 734 MODULE_AUTHOR("Terry Lam"); 735 MODULE_AUTHOR("Nandita Dukkipati"); 736 MODULE_LICENSE("GPL"); 737