1 /* 2 * FQ_Codel - The FlowQueue-Codel scheduler/AQM 3 * 4 * Copyright (C) 2016 Centre for Advanced Internet Architectures, 5 * Swinburne University of Technology, Melbourne, Australia. 6 * Portions of this code were made possible in part by a gift from 7 * The Comcast Innovation Fund. 8 * Implemented by Rasool Al-Saadi <ralsaadi@swin.edu.au> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #ifdef _KERNEL 33 #include <sys/malloc.h> 34 #include <sys/socket.h> 35 //#include <sys/socketvar.h> 36 #include <sys/kernel.h> 37 #include <sys/mbuf.h> 38 #include <sys/module.h> 39 #include <net/if.h> /* IFNAMSIZ */ 40 #include <netinet/in.h> 41 #include <netinet/ip_var.h> /* ipfw_rule_ref */ 42 #include <netinet/ip_fw.h> /* flow_id */ 43 #include <netinet/ip_dummynet.h> 44 45 #include <sys/lock.h> 46 #include <sys/proc.h> 47 #include <sys/rwlock.h> 48 49 #include <netpfil/ipfw/ip_fw_private.h> 50 #include <sys/sysctl.h> 51 #include <netinet/ip.h> 52 #include <netinet/ip6.h> 53 #include <netinet/ip_icmp.h> 54 #include <netinet/tcp.h> 55 #include <netinet/udp.h> 56 #include <sys/queue.h> 57 #include <sys/hash.h> 58 59 #include <netpfil/ipfw/dn_heap.h> 60 #include <netpfil/ipfw/ip_dn_private.h> 61 62 #include <netpfil/ipfw/dn_aqm.h> 63 #include <netpfil/ipfw/dn_aqm_codel.h> 64 #include <netpfil/ipfw/dn_sched.h> 65 #include <netpfil/ipfw/dn_sched_fq_codel.h> 66 #include <netpfil/ipfw/dn_sched_fq_codel_helper.h> 67 68 #else 69 #include <dn_test.h> 70 #endif 71 72 /* NOTE: In fq_codel module, we reimplements CoDel AQM functions 73 * because fq_codel use different flows (sub-queues) structure and 74 * dn_queue includes many variables not needed by a flow (sub-queue 75 * )i.e. avoid extra overhead (88 bytes vs 208 bytes). 76 * Also, CoDel functions manages stats of sub-queues as well as the main queue. 77 */ 78 79 #define DN_SCHED_FQ_CODEL 6 80 81 static struct dn_alg fq_codel_desc; 82 83 /* fq_codel default parameters including codel */ 84 struct dn_sch_fq_codel_parms 85 fq_codel_sysctl = {{5000 * AQM_TIME_1US, 100000 * AQM_TIME_1US, 86 CODEL_ECN_ENABLED}, 1024, 10240, 1514}; 87 88 static int 89 fqcodel_sysctl_interval_handler(SYSCTL_HANDLER_ARGS) 90 { 91 int error; 92 long value; 93 94 value = fq_codel_sysctl.ccfg.interval; 95 value /= AQM_TIME_1US; 96 error = sysctl_handle_long(oidp, &value, 0, req); 97 if (error != 0 || req->newptr == NULL) 98 return (error); 99 if (value < 1 || value > 100 * AQM_TIME_1S) 100 return (EINVAL); 101 fq_codel_sysctl.ccfg.interval = value * AQM_TIME_1US ; 102 103 return (0); 104 } 105 106 static int 107 fqcodel_sysctl_target_handler(SYSCTL_HANDLER_ARGS) 108 { 109 int error; 110 long value; 111 112 value = fq_codel_sysctl.ccfg.target; 113 value /= AQM_TIME_1US; 114 error = sysctl_handle_long(oidp, &value, 0, req); 115 if (error != 0 || req->newptr == NULL) 116 return (error); 117 if (value < 1 || value > 5 * AQM_TIME_1S) 118 return (EINVAL); 119 fq_codel_sysctl.ccfg.target = value * AQM_TIME_1US ; 120 121 return (0); 122 } 123 124 SYSBEGIN(f4) 125 126 SYSCTL_DECL(_net_inet); 127 SYSCTL_DECL(_net_inet_ip); 128 SYSCTL_DECL(_net_inet_ip_dummynet); 129 static SYSCTL_NODE(_net_inet_ip_dummynet, OID_AUTO, fqcodel, 130 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 131 "FQ_CODEL"); 132 133 #ifdef SYSCTL_NODE 134 135 SYSCTL_PROC(_net_inet_ip_dummynet_fqcodel, OID_AUTO, target, 136 CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 137 NULL, 0, fqcodel_sysctl_target_handler, "L", 138 "FQ_CoDel target in microsecond"); 139 SYSCTL_PROC(_net_inet_ip_dummynet_fqcodel, OID_AUTO, interval, 140 CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 141 NULL, 0, fqcodel_sysctl_interval_handler, "L", 142 "FQ_CoDel interval in microsecond"); 143 144 SYSCTL_UINT(_net_inet_ip_dummynet_fqcodel, OID_AUTO, quantum, 145 CTLFLAG_RW, &fq_codel_sysctl.quantum, 1514, "FQ_CoDel quantum"); 146 SYSCTL_UINT(_net_inet_ip_dummynet_fqcodel, OID_AUTO, flows, 147 CTLFLAG_RW, &fq_codel_sysctl.flows_cnt, 1024, 148 "Number of queues for FQ_CoDel"); 149 SYSCTL_UINT(_net_inet_ip_dummynet_fqcodel, OID_AUTO, limit, 150 CTLFLAG_RW, &fq_codel_sysctl.limit, 10240, "FQ_CoDel queues size limit"); 151 #endif 152 153 /* Drop a packet form the head of codel queue */ 154 static void 155 codel_drop_head(struct fq_codel_flow *q, struct fq_codel_si *si) 156 { 157 struct mbuf *m = q->mq.head; 158 159 if (m == NULL) 160 return; 161 q->mq.head = m->m_nextpkt; 162 163 fq_update_stats(q, si, -m->m_pkthdr.len, 1); 164 165 if (si->main_q.ni.length == 0) /* queue is now idle */ 166 si->main_q.q_time = V_dn_cfg.curr_time; 167 168 FREE_PKT(m); 169 } 170 171 /* Enqueue a packet 'm' to a queue 'q' and add timestamp to that packet. 172 * Return 1 when unable to add timestamp, otherwise return 0 173 */ 174 static int 175 codel_enqueue(struct fq_codel_flow *q, struct mbuf *m, struct fq_codel_si *si) 176 { 177 uint64_t len; 178 179 len = m->m_pkthdr.len; 180 /* finding maximum packet size */ 181 if (len > q->cst.maxpkt_size) 182 q->cst.maxpkt_size = len; 183 184 /* Add timestamp to mbuf as MTAG */ 185 struct m_tag *mtag; 186 mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); 187 if (mtag == NULL) 188 mtag = m_tag_alloc(MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, sizeof(aqm_time_t), 189 M_NOWAIT); 190 if (mtag == NULL) 191 goto drop; 192 *(aqm_time_t *)(mtag + 1) = AQM_UNOW; 193 m_tag_prepend(m, mtag); 194 195 if (m->m_pkthdr.rcvif != NULL) 196 m_rcvif_serialize(m); 197 198 mq_append(&q->mq, m); 199 fq_update_stats(q, si, len, 0); 200 return 0; 201 202 drop: 203 fq_update_stats(q, si, len, 1); 204 m_freem(m); 205 return 1; 206 } 207 208 /* 209 * Classify a packet to queue number using Jenkins hash function. 210 * Return: queue number 211 * the input of the hash are protocol no, perturbation, src IP, dst IP, 212 * src port, dst port, 213 */ 214 static inline int 215 fq_codel_classify_flow(struct mbuf *m, uint16_t fcount, struct fq_codel_si *si) 216 { 217 struct ip *ip; 218 struct tcphdr *th; 219 struct udphdr *uh; 220 uint8_t tuple[41]; 221 uint16_t hash=0; 222 223 ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off); 224 //#ifdef INET6 225 struct ip6_hdr *ip6; 226 int isip6; 227 isip6 = (ip->ip_v == 6); 228 229 if(isip6) { 230 ip6 = (struct ip6_hdr *)ip; 231 *((uint8_t *) &tuple[0]) = ip6->ip6_nxt; 232 *((uint32_t *) &tuple[1]) = si->perturbation; 233 memcpy(&tuple[5], ip6->ip6_src.s6_addr, 16); 234 memcpy(&tuple[21], ip6->ip6_dst.s6_addr, 16); 235 236 switch (ip6->ip6_nxt) { 237 case IPPROTO_TCP: 238 th = (struct tcphdr *)(ip6 + 1); 239 *((uint16_t *) &tuple[37]) = th->th_dport; 240 *((uint16_t *) &tuple[39]) = th->th_sport; 241 break; 242 243 case IPPROTO_UDP: 244 uh = (struct udphdr *)(ip6 + 1); 245 *((uint16_t *) &tuple[37]) = uh->uh_dport; 246 *((uint16_t *) &tuple[39]) = uh->uh_sport; 247 break; 248 default: 249 memset(&tuple[37], 0, 4); 250 } 251 252 hash = jenkins_hash(tuple, 41, HASHINIT) % fcount; 253 return hash; 254 } 255 //#endif 256 257 /* IPv4 */ 258 *((uint8_t *) &tuple[0]) = ip->ip_p; 259 *((uint32_t *) &tuple[1]) = si->perturbation; 260 *((uint32_t *) &tuple[5]) = ip->ip_src.s_addr; 261 *((uint32_t *) &tuple[9]) = ip->ip_dst.s_addr; 262 263 switch (ip->ip_p) { 264 case IPPROTO_TCP: 265 th = (struct tcphdr *)(ip + 1); 266 *((uint16_t *) &tuple[13]) = th->th_dport; 267 *((uint16_t *) &tuple[15]) = th->th_sport; 268 break; 269 270 case IPPROTO_UDP: 271 uh = (struct udphdr *)(ip + 1); 272 *((uint16_t *) &tuple[13]) = uh->uh_dport; 273 *((uint16_t *) &tuple[15]) = uh->uh_sport; 274 break; 275 default: 276 memset(&tuple[13], 0, 4); 277 } 278 hash = jenkins_hash(tuple, 17, HASHINIT) % fcount; 279 280 return hash; 281 } 282 283 /* 284 * Enqueue a packet into an appropriate queue according to 285 * FQ_CODEL algorithm. 286 */ 287 static int 288 fq_codel_enqueue(struct dn_sch_inst *_si, struct dn_queue *_q, 289 struct mbuf *m) 290 { 291 struct fq_codel_si *si; 292 struct fq_codel_schk *schk; 293 struct dn_sch_fq_codel_parms *param; 294 struct dn_queue *mainq; 295 int idx, drop, i, maxidx; 296 297 mainq = (struct dn_queue *)(_si + 1); 298 si = (struct fq_codel_si *)_si; 299 schk = (struct fq_codel_schk *)(si->_si.sched+1); 300 param = &schk->cfg; 301 302 /* classify a packet to queue number*/ 303 idx = fq_codel_classify_flow(m, param->flows_cnt, si); 304 /* enqueue packet into appropriate queue using CoDel AQM. 305 * Note: 'codel_enqueue' function returns 1 only when it unable to 306 * add timestamp to packet (no limit check)*/ 307 drop = codel_enqueue(&si->flows[idx], m, si); 308 309 /* codel unable to timestamp a packet */ 310 if (drop) 311 return 1; 312 313 /* If the flow (sub-queue) is not active ,then add it to the tail of 314 * new flows list, initialize and activate it. 315 */ 316 if (!si->flows[idx].active ) { 317 STAILQ_INSERT_TAIL(&si->newflows, &si->flows[idx], flowchain); 318 si->flows[idx].deficit = param->quantum; 319 si->flows[idx].cst.dropping = false; 320 si->flows[idx].cst.first_above_time = 0; 321 si->flows[idx].active = 1; 322 //D("activate %d",idx); 323 } 324 325 /* check the limit for all queues and remove a packet from the 326 * largest one 327 */ 328 if (mainq->ni.length > schk->cfg.limit) { D("over limit"); 329 /* find first active flow */ 330 for (maxidx = 0; maxidx < schk->cfg.flows_cnt; maxidx++) 331 if (si->flows[maxidx].active) 332 break; 333 if (maxidx < schk->cfg.flows_cnt) { 334 /* find the largest sub- queue */ 335 for (i = maxidx + 1; i < schk->cfg.flows_cnt; i++) 336 if (si->flows[i].active && si->flows[i].stats.length > 337 si->flows[maxidx].stats.length) 338 maxidx = i; 339 codel_drop_head(&si->flows[maxidx], si); 340 D("maxidx = %d",maxidx); 341 drop = 1; 342 } 343 } 344 345 return drop; 346 } 347 348 /* 349 * Dequeue a packet from an appropriate queue according to 350 * FQ_CODEL algorithm. 351 */ 352 static struct mbuf * 353 fq_codel_dequeue(struct dn_sch_inst *_si) 354 { 355 struct fq_codel_si *si; 356 struct fq_codel_schk *schk; 357 struct dn_sch_fq_codel_parms *param; 358 struct fq_codel_flow *f; 359 struct mbuf *mbuf; 360 struct fq_codel_list *fq_codel_flowlist; 361 362 si = (struct fq_codel_si *)_si; 363 schk = (struct fq_codel_schk *)(si->_si.sched+1); 364 param = &schk->cfg; 365 366 do { 367 /* select a list to start with */ 368 if (STAILQ_EMPTY(&si->newflows)) 369 fq_codel_flowlist = &si->oldflows; 370 else 371 fq_codel_flowlist = &si->newflows; 372 373 /* Both new and old queue lists are empty, return NULL */ 374 if (STAILQ_EMPTY(fq_codel_flowlist)) 375 return NULL; 376 377 f = STAILQ_FIRST(fq_codel_flowlist); 378 while (f != NULL) { 379 /* if there is no flow(sub-queue) deficit, increase deficit 380 * by quantum, move the flow to the tail of old flows list 381 * and try another flow. 382 * Otherwise, the flow will be used for dequeue. 383 */ 384 if (f->deficit < 0) { 385 f->deficit += param->quantum; 386 STAILQ_REMOVE_HEAD(fq_codel_flowlist, flowchain); 387 STAILQ_INSERT_TAIL(&si->oldflows, f, flowchain); 388 } else 389 break; 390 391 f = STAILQ_FIRST(fq_codel_flowlist); 392 } 393 394 /* the new flows list is empty, try old flows list */ 395 if (STAILQ_EMPTY(fq_codel_flowlist)) 396 continue; 397 398 /* Dequeue a packet from the selected flow */ 399 mbuf = fqc_codel_dequeue(f, si); 400 401 /* Codel did not return a packet */ 402 if (!mbuf) { 403 /* If the selected flow belongs to new flows list, then move 404 * it to the tail of old flows list. Otherwise, deactivate it and 405 * remove it from the old list and 406 */ 407 if (fq_codel_flowlist == &si->newflows) { 408 STAILQ_REMOVE_HEAD(fq_codel_flowlist, flowchain); 409 STAILQ_INSERT_TAIL(&si->oldflows, f, flowchain); 410 } else { 411 f->active = 0; 412 STAILQ_REMOVE_HEAD(fq_codel_flowlist, flowchain); 413 } 414 /* start again */ 415 continue; 416 } 417 418 /* we have a packet to return, 419 * update flow deficit and return the packet*/ 420 f->deficit -= mbuf->m_pkthdr.len; 421 return mbuf; 422 423 } while (1); 424 425 /* unreachable point */ 426 return NULL; 427 } 428 429 /* 430 * Initialize fq_codel scheduler instance. 431 * also, allocate memory for flows array. 432 */ 433 static int 434 fq_codel_new_sched(struct dn_sch_inst *_si) 435 { 436 struct fq_codel_si *si; 437 struct dn_queue *q; 438 struct fq_codel_schk *schk; 439 int i; 440 441 si = (struct fq_codel_si *)_si; 442 schk = (struct fq_codel_schk *)(_si->sched+1); 443 444 if(si->flows) { 445 D("si already configured!"); 446 return 0; 447 } 448 449 /* init the main queue */ 450 q = &si->main_q; 451 set_oid(&q->ni.oid, DN_QUEUE, sizeof(*q)); 452 q->_si = _si; 453 q->fs = _si->sched->fs; 454 455 /* allocate memory for flows array */ 456 si->flows = mallocarray(schk->cfg.flows_cnt, 457 sizeof(struct fq_codel_flow), M_DUMMYNET, M_NOWAIT | M_ZERO); 458 if (si->flows == NULL) { 459 D("cannot allocate memory for fq_codel configuration parameters"); 460 return ENOMEM ; 461 } 462 463 /* init perturbation for this si */ 464 si->perturbation = random(); 465 466 /* init the old and new flows lists */ 467 STAILQ_INIT(&si->newflows); 468 STAILQ_INIT(&si->oldflows); 469 470 /* init the flows (sub-queues) */ 471 for (i = 0; i < schk->cfg.flows_cnt; i++) { 472 /* init codel */ 473 si->flows[i].cst.maxpkt_size = 500; 474 } 475 476 fq_codel_desc.ref_count++; 477 return 0; 478 } 479 480 /* 481 * Free fq_codel scheduler instance. 482 */ 483 static int 484 fq_codel_free_sched(struct dn_sch_inst *_si) 485 { 486 struct fq_codel_si *si = (struct fq_codel_si *)_si ; 487 488 /* free the flows array */ 489 free(si->flows , M_DUMMYNET); 490 si->flows = NULL; 491 fq_codel_desc.ref_count--; 492 493 return 0; 494 } 495 496 /* 497 * Configure fq_codel scheduler. 498 * the configurations for the scheduler is passed from userland. 499 */ 500 static int 501 fq_codel_config(struct dn_schk *_schk) 502 { 503 struct fq_codel_schk *schk; 504 struct dn_extra_parms *ep; 505 struct dn_sch_fq_codel_parms *fqc_cfg; 506 507 schk = (struct fq_codel_schk *)(_schk+1); 508 ep = (struct dn_extra_parms *) _schk->cfg; 509 510 /* par array contains fq_codel configuration as follow 511 * Codel: 0- target,1- interval, 2- flags 512 * FQ_CODEL: 3- quantum, 4- limit, 5- flows 513 */ 514 if (ep && ep->oid.len ==sizeof(*ep) && 515 ep->oid.subtype == DN_SCH_PARAMS) { 516 fqc_cfg = &schk->cfg; 517 if (ep->par[0] < 0) 518 fqc_cfg->ccfg.target = fq_codel_sysctl.ccfg.target; 519 else 520 fqc_cfg->ccfg.target = ep->par[0] * AQM_TIME_1US; 521 522 if (ep->par[1] < 0) 523 fqc_cfg->ccfg.interval = fq_codel_sysctl.ccfg.interval; 524 else 525 fqc_cfg->ccfg.interval = ep->par[1] * AQM_TIME_1US; 526 527 if (ep->par[2] < 0) 528 fqc_cfg->ccfg.flags = 0; 529 else 530 fqc_cfg->ccfg.flags = ep->par[2]; 531 532 /* FQ configurations */ 533 if (ep->par[3] < 0) 534 fqc_cfg->quantum = fq_codel_sysctl.quantum; 535 else 536 fqc_cfg->quantum = ep->par[3]; 537 538 if (ep->par[4] < 0) 539 fqc_cfg->limit = fq_codel_sysctl.limit; 540 else 541 fqc_cfg->limit = ep->par[4]; 542 543 if (ep->par[5] < 0) 544 fqc_cfg->flows_cnt = fq_codel_sysctl.flows_cnt; 545 else 546 fqc_cfg->flows_cnt = ep->par[5]; 547 548 /* Bound the configurations */ 549 fqc_cfg->ccfg.target = BOUND_VAR(fqc_cfg->ccfg.target, 1 , 550 5 * AQM_TIME_1S); ; 551 fqc_cfg->ccfg.interval = BOUND_VAR(fqc_cfg->ccfg.interval, 1, 552 100 * AQM_TIME_1S); 553 554 fqc_cfg->quantum = BOUND_VAR(fqc_cfg->quantum,1, 9000); 555 fqc_cfg->limit= BOUND_VAR(fqc_cfg->limit,1,20480); 556 fqc_cfg->flows_cnt= BOUND_VAR(fqc_cfg->flows_cnt,1,65536); 557 } 558 else 559 return 1; 560 561 return 0; 562 } 563 564 /* 565 * Return fq_codel scheduler configurations 566 * the configurations for the scheduler is passed to userland. 567 */ 568 static int 569 fq_codel_getconfig (struct dn_schk *_schk, struct dn_extra_parms *ep) { 570 struct fq_codel_schk *schk = (struct fq_codel_schk *)(_schk+1); 571 struct dn_sch_fq_codel_parms *fqc_cfg; 572 573 fqc_cfg = &schk->cfg; 574 575 strcpy(ep->name, fq_codel_desc.name); 576 ep->par[0] = fqc_cfg->ccfg.target / AQM_TIME_1US; 577 ep->par[1] = fqc_cfg->ccfg.interval / AQM_TIME_1US; 578 ep->par[2] = fqc_cfg->ccfg.flags; 579 580 ep->par[3] = fqc_cfg->quantum; 581 ep->par[4] = fqc_cfg->limit; 582 ep->par[5] = fqc_cfg->flows_cnt; 583 584 return 0; 585 } 586 587 /* 588 * fq_codel scheduler descriptor 589 * contains the type of the scheduler, the name, the size of extra 590 * data structures, and function pointers. 591 */ 592 static struct dn_alg fq_codel_desc = { 593 _SI( .type = ) DN_SCHED_FQ_CODEL, 594 _SI( .name = ) "FQ_CODEL", 595 _SI( .flags = ) 0, 596 597 _SI( .schk_datalen = ) sizeof(struct fq_codel_schk), 598 _SI( .si_datalen = ) sizeof(struct fq_codel_si) - sizeof(struct dn_sch_inst), 599 _SI( .q_datalen = ) 0, 600 601 _SI( .enqueue = ) fq_codel_enqueue, 602 _SI( .dequeue = ) fq_codel_dequeue, 603 _SI( .config = ) fq_codel_config, /* new sched i.e. sched X config ...*/ 604 _SI( .destroy = ) NULL, /*sched x delete */ 605 _SI( .new_sched = ) fq_codel_new_sched, /* new schd instance */ 606 _SI( .free_sched = ) fq_codel_free_sched, /* delete schd instance */ 607 _SI( .new_fsk = ) NULL, 608 _SI( .free_fsk = ) NULL, 609 _SI( .new_queue = ) NULL, 610 _SI( .free_queue = ) NULL, 611 _SI( .getconfig = ) fq_codel_getconfig, 612 _SI( .ref_count = ) 0 613 }; 614 615 DECLARE_DNSCHED_MODULE(dn_fq_codel, &fq_codel_desc); 616