Lines Matching +full:static +full:- +full:beta
2 * FQ_PIE - The FlowQueue-PIE scheduler/AQM
8 * Implemented by Rasool Al-Saadi <ralsaadi@swin.edu.au>
33 * As there is no an office document for FQ-PIE specification, we used
34 * FQ-CoDel algorithm with some modifications to implement FQ-PIE.
35 * This FQ-PIE implementation is a beta version and have not been tested
36 * extensively. Our FQ-PIE uses stand-alone PIE AQM per sub-queue. By
95 /* flow (sub-queue) stats */
104 /* A flow of packets (sub-queue)*/
122 * sub-queues and the flows array pointer even after the scheduler instance
142 static struct dn_alg fq_pie_desc;
144 /* Default FQ-PIE parameters including PIE */
147 * alpha=0.125, beta=1.25, tupdate=15ms
148 * FQ-
157 static int
163 if (!strcmp(oidp->oid_name,"alpha")) in fqpie_sysctl_alpha_beta_handler()
166 value = fq_pie_sysctl.pcfg.beta; in fqpie_sysctl_alpha_beta_handler()
170 if (error != 0 || req->newptr == NULL) in fqpie_sysctl_alpha_beta_handler()
175 if (!strcmp(oidp->oid_name,"alpha")) in fqpie_sysctl_alpha_beta_handler()
178 fq_pie_sysctl.pcfg.beta = value; in fqpie_sysctl_alpha_beta_handler()
182 static int
188 if (!strcmp(oidp->oid_name,"target")) in fqpie_sysctl_target_tupdate_maxb_handler()
190 else if (!strcmp(oidp->oid_name,"tupdate")) in fqpie_sysctl_target_tupdate_maxb_handler()
197 if (error != 0 || req->newptr == NULL) in fqpie_sysctl_target_tupdate_maxb_handler()
203 if (!strcmp(oidp->oid_name,"target")) in fqpie_sysctl_target_tupdate_maxb_handler()
205 else if (!strcmp(oidp->oid_name,"tupdate")) in fqpie_sysctl_target_tupdate_maxb_handler()
212 static int
221 if (error != 0 || req->newptr == NULL) in fqpie_sysctl_max_ecnth_handler()
230 /* define FQ- PIE sysctl variables */
235 static SYSCTL_NODE(_net_inet_ip_dummynet, OID_AUTO, fqpie,
266 SYSCTL_PROC(_net_inet_ip_dummynet_fqpie, OID_AUTO, beta,
269 "beta scaled by 1000");
279 /* Helper function to update queue&main-queue and scheduler statistics.
280 * negative len & drop -> drop
281 * negative len -> dequeue
282 * positive len -> enqueue
283 * positive len + drop -> drop during enqueue
285 __inline static void
292 inc = -1; in fq_update_stats()
297 si->main_q.ni.drops ++; in fq_update_stats()
298 q->stats.drops ++; in fq_update_stats()
299 si->_si.ni.drops ++; in fq_update_stats()
305 si->main_q.ni.length += inc; in fq_update_stats()
306 si->main_q.ni.len_bytes += len; in fq_update_stats()
308 /*update sub-queue stats */ in fq_update_stats()
309 q->stats.length += inc; in fq_update_stats()
310 q->stats.len_bytes += len; in fq_update_stats()
313 si->_si.ni.length += inc; in fq_update_stats()
314 si->_si.ni.len_bytes += len; in fq_update_stats()
318 si->main_q.ni.tot_bytes += len; in fq_update_stats()
319 si->main_q.ni.tot_pkts ++; in fq_update_stats()
321 q->stats.tot_bytes +=len; in fq_update_stats()
322 q->stats.tot_pkts++; in fq_update_stats()
324 si->_si.ni.tot_bytes +=len; in fq_update_stats()
325 si->_si.ni.tot_pkts ++; in fq_update_stats()
331 * Extract a packet from the head of sub-queue 'q'
335 __inline static struct mbuf *
341 next: m = q->mq.head; in fq_pie_extract_head()
344 q->mq.head = m->m_nextpkt; in fq_pie_extract_head()
346 fq_update_stats(q, si, -m->m_pkthdr.len, 0); in fq_pie_extract_head()
348 if (si->main_q.ni.length == 0) /* queue is now idle */ in fq_pie_extract_head()
349 si->main_q.q_time = V_dn_cfg.curr_time; in fq_pie_extract_head()
363 if (m->m_pkthdr.rcvif != NULL && in fq_pie_extract_head()
373 * This function is called over tupdate ms and takes pointer of FQ-PIE
376 static void
380 struct pie_status *pst = &q->pst; in fq_calculate_drop_prob()
385 pprms = pst->parms; in fq_calculate_drop_prob()
386 prob = pst->drop_prob; in fq_calculate_drop_prob()
392 if (pprms->flags & PIE_DEPRATEEST_ENABLED) in fq_calculate_drop_prob()
393 pst->current_qdelay = ((uint64_t)q->stats.len_bytes * pst->avg_dq_time) in fq_calculate_drop_prob()
396 if (!q->stats.len_bytes) in fq_calculate_drop_prob()
397 pst->current_qdelay = 0; in fq_calculate_drop_prob()
400 p = (int64_t)pprms->alpha * in fq_calculate_drop_prob()
401 ((int64_t)pst->current_qdelay - (int64_t)pprms->qdelay_ref); in fq_calculate_drop_prob()
402 p +=(int64_t) pprms->beta * in fq_calculate_drop_prob()
403 ((int64_t)pst->current_qdelay - (int64_t)pst->qdelay_old); in fq_calculate_drop_prob()
408 p = -p; in fq_calculate_drop_prob()
411 /* We PIE_MAX_PROB shift by 12-bits to increase the division precision */ in fq_calculate_drop_prob()
414 /* auto-tune drop probability */ in fq_calculate_drop_prob()
433 prob = prob - p; in fq_calculate_drop_prob()
442 if ((pprms->flags & PIE_CAPDROP_ENABLED) && in fq_calculate_drop_prob()
464 if (pst->current_qdelay == 0 && pst->qdelay_old == 0) { in fq_calculate_drop_prob()
465 /* 0.98 ~= 1- 1/64 */ in fq_calculate_drop_prob()
466 prob = prob - (prob >> 6); in fq_calculate_drop_prob()
474 pst->drop_prob = prob; in fq_calculate_drop_prob()
477 pst->qdelay_old = pst->current_qdelay; in fq_calculate_drop_prob()
480 if ((pst->sflags & PIE_ACTIVE) && pst->burst_allowance) { in fq_calculate_drop_prob()
481 if (pst->burst_allowance > pprms->tupdate) in fq_calculate_drop_prob()
482 pst->burst_allowance -= pprms->tupdate; in fq_calculate_drop_prob()
484 pst->burst_allowance = 0; in fq_calculate_drop_prob()
487 if (pst->sflags & PIE_ACTIVE) in fq_calculate_drop_prob()
488 callout_reset_sbt(&pst->aqm_pie_callout, in fq_calculate_drop_prob()
489 (uint64_t)pprms->tupdate * SBT_1US, in fq_calculate_drop_prob()
492 mtx_unlock(&pst->lock_mtx); in fq_calculate_drop_prob()
498 __inline static void
501 struct pie_status *pst = &q->pst; in fq_activate_pie()
504 mtx_lock(&pst->lock_mtx); in fq_activate_pie()
505 pprms = pst->parms; in fq_activate_pie()
507 pprms = pst->parms; in fq_activate_pie()
508 pst->drop_prob = 0; in fq_activate_pie()
509 pst->qdelay_old = 0; in fq_activate_pie()
510 pst->burst_allowance = pprms->max_burst; in fq_activate_pie()
511 pst->accu_prob = 0; in fq_activate_pie()
512 pst->dq_count = 0; in fq_activate_pie()
513 pst->avg_dq_time = 0; in fq_activate_pie()
514 pst->sflags = PIE_INMEASUREMENT | PIE_ACTIVE; in fq_activate_pie()
515 pst->measurement_start = AQM_UNOW; in fq_activate_pie()
517 callout_reset_sbt(&pst->aqm_pie_callout, in fq_activate_pie()
518 (uint64_t)pprms->tupdate * SBT_1US, in fq_activate_pie()
521 mtx_unlock(&pst->lock_mtx); in fq_activate_pie()
527 __inline static void
530 mtx_lock(&pst->lock_mtx); in fq_deactivate_pie()
531 pst->sflags &= ~(PIE_ACTIVE | PIE_INMEASUREMENT); in fq_deactivate_pie()
532 callout_stop(&pst->aqm_pie_callout); in fq_deactivate_pie()
534 mtx_unlock(&pst->lock_mtx); in fq_deactivate_pie()
538 * Initialize PIE for sub-queue 'q'
540 static int
543 struct pie_status *pst=&q->pst; in pie_init()
544 struct dn_aqm_pie_parms *pprms = pst->parms; in pie_init()
551 q->psi_extra->nr_active_q++; in pie_init()
555 pst->one_third_q_size = (fqpie_schk->cfg.limit / in pie_init()
556 fqpie_schk->cfg.flows_cnt) / 3; in pie_init()
558 mtx_init(&pst->lock_mtx, "mtx_pie", NULL, MTX_DEF); in pie_init()
559 callout_init_mtx(&pst->aqm_pie_callout, &pst->lock_mtx, in pie_init()
568 * extra memory when number of active sub-queues reaches zero.
571 static void
575 struct pie_status *pst = &q->pst; in fqpie_callout_cleanup()
578 mtx_unlock(&pst->lock_mtx); in fqpie_callout_cleanup()
579 mtx_destroy(&pst->lock_mtx); in fqpie_callout_cleanup()
580 psi_extra = q->psi_extra; in fqpie_callout_cleanup()
583 psi_extra->nr_active_q--; in fqpie_callout_cleanup()
585 /* when all sub-queues are destroyed, free flows fq_pie extra vars memory */ in fqpie_callout_cleanup()
586 if (!psi_extra->nr_active_q) { in fqpie_callout_cleanup()
587 free(psi_extra->flows, M_DUMMYNET); in fqpie_callout_cleanup()
589 fq_pie_desc.ref_count--; in fqpie_callout_cleanup()
595 * Clean up PIE status for sub-queue 'q'
598 static int
601 struct pie_status *pst = &q->pst; in pie_cleanup()
603 mtx_lock(&pst->lock_mtx); in pie_cleanup()
604 callout_reset_sbt(&pst->aqm_pie_callout, in pie_cleanup()
606 mtx_unlock(&pst->lock_mtx); in pie_cleanup()
611 * Dequeue and return a pcaket from sub-queue 'q' or NULL if 'q' is empty.
614 static struct mbuf *
624 pst = &q->pst; in pie_dequeue()
625 pprms = q->pst.parms; in pie_dequeue()
629 !(pprms->flags & PIE_DEPRATEEST_ENABLED)); in pie_dequeue()
631 if (!m || !(pst->sflags & PIE_ACTIVE)) in pie_dequeue()
635 if (pprms->flags & PIE_DEPRATEEST_ENABLED) { in pie_dequeue()
637 if(pst->sflags & PIE_INMEASUREMENT) { in pie_dequeue()
638 pst->dq_count += m->m_pkthdr.len; in pie_dequeue()
640 if (pst->dq_count >= PIE_DQ_THRESHOLD) { in pie_dequeue()
641 dq_time = now - pst->measurement_start; in pie_dequeue()
647 if(pst->avg_dq_time == 0) in pie_dequeue()
648 pst->avg_dq_time = dq_time; in pie_dequeue()
656 pst->avg_dq_time = (dq_time* w in pie_dequeue()
657 + (pst->avg_dq_time * ((1L << 8) - w))) >> 8; in pie_dequeue()
658 pst->sflags &= ~PIE_INMEASUREMENT; in pie_dequeue()
667 if(!(pst->sflags & PIE_INMEASUREMENT) && in pie_dequeue()
668 q->stats.len_bytes >= PIE_DQ_THRESHOLD) { in pie_dequeue()
669 pst->sflags |= PIE_INMEASUREMENT; in pie_dequeue()
670 pst->measurement_start = now; in pie_dequeue()
671 pst->dq_count = 0; in pie_dequeue()
676 pst->current_qdelay = now - pkt_ts; in pie_dequeue()
682 * Enqueue a packet in q, subject to space and FQ-PIE queue management policy
683 * (whose parameters are in q->fs).
687 static int
695 len = m->m_pkthdr.len; in pie_enqueue()
696 pst = &q->pst; in pie_enqueue()
697 pprms = pst->parms; in pie_enqueue()
701 if (pst->sflags & PIE_ACTIVE && pst->burst_allowance == 0 in pie_enqueue()
702 && drop_early(pst, q->stats.len_bytes) == DROP) { in pie_enqueue()
707 if (pprms->flags & PIE_ECN_ENABLED && pst->drop_prob < in pie_enqueue()
708 (pprms->max_ecnth << (PIE_PROB_BITS - PIE_FIX_POINT_BITS)) in pie_enqueue()
716 if (!(pst->sflags & PIE_ACTIVE) && q->stats.len_bytes >= in pie_enqueue()
717 pst->one_third_q_size) { in pie_enqueue()
722 if (pst->drop_prob == 0 && pst->current_qdelay < (pprms->qdelay_ref >> 1) in pie_enqueue()
723 && pst->qdelay_old < (pprms->qdelay_ref >> 1)) { in pie_enqueue()
725 pst->burst_allowance = pprms->max_burst; in pie_enqueue()
726 if (pprms->flags & PIE_ON_OFF_MODE_ENABLED && q->stats.len_bytes<=0) in pie_enqueue()
731 if (t != DROP && !(pprms->flags & PIE_DEPRATEEST_ENABLED)) { in pie_enqueue()
747 if (m->m_pkthdr.rcvif != NULL) in pie_enqueue()
750 mq_append(&q->mq, m); in pie_enqueue()
755 pst->accu_prob = 0; in pie_enqueue()
763 /* Drop a packet form the head of FQ-PIE sub-queue */
764 static void
767 struct mbuf *m = q->mq.head; in pie_drop_head()
771 q->mq.head = m->m_nextpkt; in pie_drop_head()
773 fq_update_stats(q, si, -m->m_pkthdr.len, 1); in pie_drop_head()
775 if (si->main_q.ni.length == 0) /* queue is now idle */ in pie_drop_head()
776 si->main_q.q_time = V_dn_cfg.curr_time; in pie_drop_head()
778 q->pst.accu_prob = 0; in pie_drop_head()
789 static inline int
798 ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off); in fq_pie_classify_flow()
802 isip6 = (ip->ip_v == 6); in fq_pie_classify_flow()
806 *((uint8_t *) &tuple[0]) = ip6->ip6_nxt; in fq_pie_classify_flow()
807 *((uint32_t *) &tuple[1]) = si->perturbation; in fq_pie_classify_flow()
808 memcpy(&tuple[5], ip6->ip6_src.s6_addr, 16); in fq_pie_classify_flow()
809 memcpy(&tuple[21], ip6->ip6_dst.s6_addr, 16); in fq_pie_classify_flow()
811 switch (ip6->ip6_nxt) { in fq_pie_classify_flow()
814 *((uint16_t *) &tuple[37]) = th->th_dport; in fq_pie_classify_flow()
815 *((uint16_t *) &tuple[39]) = th->th_sport; in fq_pie_classify_flow()
820 *((uint16_t *) &tuple[37]) = uh->uh_dport; in fq_pie_classify_flow()
821 *((uint16_t *) &tuple[39]) = uh->uh_sport; in fq_pie_classify_flow()
833 *((uint8_t *) &tuple[0]) = ip->ip_p; in fq_pie_classify_flow()
834 *((uint32_t *) &tuple[1]) = si->perturbation; in fq_pie_classify_flow()
835 *((uint32_t *) &tuple[5]) = ip->ip_src.s_addr; in fq_pie_classify_flow()
836 *((uint32_t *) &tuple[9]) = ip->ip_dst.s_addr; in fq_pie_classify_flow()
838 switch (ip->ip_p) { in fq_pie_classify_flow()
841 *((uint16_t *) &tuple[13]) = th->th_dport; in fq_pie_classify_flow()
842 *((uint16_t *) &tuple[15]) = th->th_sport; in fq_pie_classify_flow()
847 *((uint16_t *) &tuple[13]) = uh->uh_dport; in fq_pie_classify_flow()
848 *((uint16_t *) &tuple[15]) = uh->uh_sport; in fq_pie_classify_flow()
860 * FQ-CoDe; algorithm.
862 static int
875 flows = si->si_extra->flows; in fq_pie_enqueue()
876 schk = (struct fq_pie_schk *)(si->_si.sched+1); in fq_pie_enqueue()
877 param = &schk->cfg; in fq_pie_enqueue()
880 idx = fq_pie_classify_flow(m, param->flows_cnt, si); in fq_pie_enqueue()
891 /* If the flow (sub-queue) is not active ,then add it to tail of in fq_pie_enqueue()
895 STAILQ_INSERT_TAIL(&si->newflows, &flows[idx], flowchain); in fq_pie_enqueue()
896 flows[idx].deficit = param->quantum; in fq_pie_enqueue()
904 if (mainq->ni.length > schk->cfg.limit) { in fq_pie_enqueue()
906 for (maxidx = 0; maxidx < schk->cfg.flows_cnt; maxidx++) in fq_pie_enqueue()
909 if (maxidx < schk->cfg.flows_cnt) { in fq_pie_enqueue()
910 /* find the largest sub- queue */ in fq_pie_enqueue()
911 for (i = maxidx + 1; i < schk->cfg.flows_cnt; i++) in fq_pie_enqueue()
925 * FQ-CoDel algorithm.
927 static struct mbuf *
938 schk = (struct fq_pie_schk *)(si->_si.sched+1); in fq_pie_dequeue()
939 param = &schk->cfg; in fq_pie_dequeue()
943 if (STAILQ_EMPTY(&si->newflows)) in fq_pie_dequeue()
944 fq_pie_flowlist = &si->oldflows; in fq_pie_dequeue()
946 fq_pie_flowlist = &si->newflows; in fq_pie_dequeue()
954 /* if there is no flow(sub-queue) deficit, increase deficit in fq_pie_dequeue()
959 if (f->deficit < 0) { in fq_pie_dequeue()
960 f->deficit += param->quantum; in fq_pie_dequeue()
962 STAILQ_INSERT_TAIL(&si->oldflows, f, flowchain); in fq_pie_dequeue()
982 if (fq_pie_flowlist == &si->newflows) { in fq_pie_dequeue()
984 STAILQ_INSERT_TAIL(&si->oldflows, f, flowchain); in fq_pie_dequeue()
986 f->active = 0; in fq_pie_dequeue()
987 fq_deactivate_pie(&f->pst); in fq_pie_dequeue()
996 f->deficit -= mbuf->m_pkthdr.len; in fq_pie_dequeue()
1009 static int
1019 schk = (struct fq_pie_schk *)(_si->sched+1); in fq_pie_new_sched()
1021 if(si->si_extra) { in fq_pie_new_sched()
1027 q = &si->main_q; in fq_pie_new_sched()
1028 set_oid(&q->ni.oid, DN_QUEUE, sizeof(*q)); in fq_pie_new_sched()
1029 q->_si = _si; in fq_pie_new_sched()
1030 q->fs = _si->sched->fs; in fq_pie_new_sched()
1033 si->si_extra = malloc(sizeof(struct fq_pie_si_extra), in fq_pie_new_sched()
1035 if (si->si_extra == NULL) { in fq_pie_new_sched()
1040 si->si_extra->flows = mallocarray(schk->cfg.flows_cnt, in fq_pie_new_sched()
1042 flows = si->si_extra->flows; in fq_pie_new_sched()
1044 free(si->si_extra, M_DUMMYNET); in fq_pie_new_sched()
1045 si->si_extra = NULL; in fq_pie_new_sched()
1051 si->perturbation = random(); in fq_pie_new_sched()
1052 si->si_extra->nr_active_q = 0; in fq_pie_new_sched()
1055 STAILQ_INIT(&si->newflows); in fq_pie_new_sched()
1056 STAILQ_INIT(&si->oldflows); in fq_pie_new_sched()
1058 /* init the flows (sub-queues) */ in fq_pie_new_sched()
1059 for (i = 0; i < schk->cfg.flows_cnt; i++) { in fq_pie_new_sched()
1060 flows[i].pst.parms = &schk->cfg.pcfg; in fq_pie_new_sched()
1061 flows[i].psi_extra = si->si_extra; in fq_pie_new_sched()
1075 static int
1084 schk = (struct fq_pie_schk *)(_si->sched+1); in fq_pie_free_sched()
1085 flows = si->si_extra->flows; in fq_pie_free_sched()
1086 for (i = 0; i < schk->cfg.flows_cnt; i++) { in fq_pie_free_sched()
1089 si->si_extra = NULL; in fq_pie_free_sched()
1094 * Configure FQ-PIE scheduler.
1097 static int
1105 ep = (struct dn_extra_parms *) _schk->cfg; in fq_pie_config()
1108 * PIE: 0- qdelay_ref,1- tupdate, 2- max_burst in fq_pie_config()
1109 * 3- max_ecnth, 4- alpha, 5- beta, 6- flags in fq_pie_config()
1110 * FQ_PIE: 7- quantum, 8- limit, 9- flows in fq_pie_config()
1112 if (ep && ep->oid.len ==sizeof(*ep) && in fq_pie_config()
1113 ep->oid.subtype == DN_SCH_PARAMS) { in fq_pie_config()
1114 fqp_cfg = &schk->cfg; in fq_pie_config()
1115 if (ep->par[0] < 0) in fq_pie_config()
1116 fqp_cfg->pcfg.qdelay_ref = fq_pie_sysctl.pcfg.qdelay_ref; in fq_pie_config()
1118 fqp_cfg->pcfg.qdelay_ref = ep->par[0]; in fq_pie_config()
1119 if (ep->par[1] < 0) in fq_pie_config()
1120 fqp_cfg->pcfg.tupdate = fq_pie_sysctl.pcfg.tupdate; in fq_pie_config()
1122 fqp_cfg->pcfg.tupdate = ep->par[1]; in fq_pie_config()
1123 if (ep->par[2] < 0) in fq_pie_config()
1124 fqp_cfg->pcfg.max_burst = fq_pie_sysctl.pcfg.max_burst; in fq_pie_config()
1126 fqp_cfg->pcfg.max_burst = ep->par[2]; in fq_pie_config()
1127 if (ep->par[3] < 0) in fq_pie_config()
1128 fqp_cfg->pcfg.max_ecnth = fq_pie_sysctl.pcfg.max_ecnth; in fq_pie_config()
1130 fqp_cfg->pcfg.max_ecnth = ep->par[3]; in fq_pie_config()
1131 if (ep->par[4] < 0) in fq_pie_config()
1132 fqp_cfg->pcfg.alpha = fq_pie_sysctl.pcfg.alpha; in fq_pie_config()
1134 fqp_cfg->pcfg.alpha = ep->par[4]; in fq_pie_config()
1135 if (ep->par[5] < 0) in fq_pie_config()
1136 fqp_cfg->pcfg.beta = fq_pie_sysctl.pcfg.beta; in fq_pie_config()
1138 fqp_cfg->pcfg.beta = ep->par[5]; in fq_pie_config()
1139 if (ep->par[6] < 0) in fq_pie_config()
1140 fqp_cfg->pcfg.flags = 0; in fq_pie_config()
1142 fqp_cfg->pcfg.flags = ep->par[6]; in fq_pie_config()
1145 if (ep->par[7] < 0) in fq_pie_config()
1146 fqp_cfg->quantum = fq_pie_sysctl.quantum; in fq_pie_config()
1148 fqp_cfg->quantum = ep->par[7]; in fq_pie_config()
1149 if (ep->par[8] < 0) in fq_pie_config()
1150 fqp_cfg->limit = fq_pie_sysctl.limit; in fq_pie_config()
1152 fqp_cfg->limit = ep->par[8]; in fq_pie_config()
1153 if (ep->par[9] < 0) in fq_pie_config()
1154 fqp_cfg->flows_cnt = fq_pie_sysctl.flows_cnt; in fq_pie_config()
1156 fqp_cfg->flows_cnt = ep->par[9]; in fq_pie_config()
1159 fqp_cfg->pcfg.qdelay_ref = BOUND_VAR(fqp_cfg->pcfg.qdelay_ref, in fq_pie_config()
1161 fqp_cfg->pcfg.tupdate = BOUND_VAR(fqp_cfg->pcfg.tupdate, in fq_pie_config()
1163 fqp_cfg->pcfg.max_burst = BOUND_VAR(fqp_cfg->pcfg.max_burst, in fq_pie_config()
1165 fqp_cfg->pcfg.max_ecnth = BOUND_VAR(fqp_cfg->pcfg.max_ecnth, in fq_pie_config()
1167 fqp_cfg->pcfg.alpha = BOUND_VAR(fqp_cfg->pcfg.alpha, 0, 7 * PIE_SCALE); in fq_pie_config()
1168 fqp_cfg->pcfg.beta = BOUND_VAR(fqp_cfg->pcfg.beta, 0, 7 * PIE_SCALE); in fq_pie_config()
1170 fqp_cfg->quantum = BOUND_VAR(fqp_cfg->quantum,1,9000); in fq_pie_config()
1171 fqp_cfg->limit= BOUND_VAR(fqp_cfg->limit,1,20480); in fq_pie_config()
1172 fqp_cfg->flows_cnt= BOUND_VAR(fqp_cfg->flows_cnt,1,65536); in fq_pie_config()
1183 * Return FQ-PIE scheduler configurations
1186 static int
1191 fqp_cfg = &schk->cfg; in fq_pie_getconfig()
1193 strcpy(ep->name, fq_pie_desc.name); in fq_pie_getconfig()
1194 ep->par[0] = fqp_cfg->pcfg.qdelay_ref; in fq_pie_getconfig()
1195 ep->par[1] = fqp_cfg->pcfg.tupdate; in fq_pie_getconfig()
1196 ep->par[2] = fqp_cfg->pcfg.max_burst; in fq_pie_getconfig()
1197 ep->par[3] = fqp_cfg->pcfg.max_ecnth; in fq_pie_getconfig()
1198 ep->par[4] = fqp_cfg->pcfg.alpha; in fq_pie_getconfig()
1199 ep->par[5] = fqp_cfg->pcfg.beta; in fq_pie_getconfig()
1200 ep->par[6] = fqp_cfg->pcfg.flags; in fq_pie_getconfig()
1202 ep->par[7] = fqp_cfg->quantum; in fq_pie_getconfig()
1203 ep->par[8] = fqp_cfg->limit; in fq_pie_getconfig()
1204 ep->par[9] = fqp_cfg->flows_cnt; in fq_pie_getconfig()
1210 * FQ-PIE scheduler descriptor
1214 static struct dn_alg fq_pie_desc = {
1220 _SI( .si_datalen = ) sizeof(struct fq_pie_si) - sizeof(struct dn_sch_inst),