Lines Matching +full:max +full:- +full:burst +full:- +full:len
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
109 if (error != 0 || req->newptr == NULL) in sysctl_hash_size()
129 if (error != 0 || req->newptr == NULL) in sysctl_limits()
185 CTLFLAG_RD | CTLFLAG_VNET, DC(red_max_pkt_size), 0, "RED Max packet size");
196 "Adjusted vs non-adjusted curr_time difference (ticks).");
242 if (mtag != NULL && mtag->m_tag_id == DN_AQM_MTAG_TS) { in dn_tag_get()
249 mtag->m_tag_cookie == MTAG_ABI_COMPAT && in dn_tag_get()
250 mtag->m_tag_id == PACKET_TAG_DUMMYNET, in dn_tag_get()
263 if (m->m_flags & M_STACK) { in mq_append()
268 ofs = m->m_data - m->__m_extbuf; in mq_append()
272 m, m->__m_extbuf, m->__m_extlen, ofs, m_new); in mq_append()
273 p = m_new->__m_extbuf; /* new pointer */ in mq_append()
274 l = m_new->__m_extlen; /* new len */ in mq_append()
275 if (l <= m->__m_extlen) { in mq_append()
280 m_new->m_flags &= ~M_STACK; in mq_append()
281 m_new->__m_extbuf = p; // point to new buffer in mq_append()
282 _pkt_copy(m->__m_extbuf, p, m->__m_extlen); in mq_append()
283 m_new->m_data = p + ofs; in mq_append()
287 if (q->head == NULL) in mq_append()
288 q->head = m; in mq_append()
290 q->tail->m_nextpkt = m; in mq_append()
291 q->count++; in mq_append()
292 q->tail = m; in mq_append()
293 m->m_nextpkt = NULL; in mq_append()
306 mnext = m->m_nextpkt; in dn_free_pkts()
312 red_drops (struct dn_queue *q, int len) in red_drops() argument
317 * RED calculates the average queue size (avg) using a low-pass filter in red_drops()
319 * avg <- (1-w_q) * avg + w_q * q_size in red_drops()
323 * avg = (1 - w_q)^(idle/s) in red_drops()
324 * where s is the time needed for transmitting a medium-sized packet. in red_drops()
331 struct dn_fsk *fs = q->fs; in red_drops()
335 uint32_t q_size = (fs->fs.flags & DN_QSIZE_BYTES) ? in red_drops()
336 q->ni.len_bytes : q->ni.length; in red_drops()
340 /* Queue is not empty, avg <- avg + (q_size - avg) * w_q */ in red_drops()
341 int diff = SCALE(q_size) - q->avg; in red_drops()
342 int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q); in red_drops()
344 q->avg += (int)v; in red_drops()
349 * (1 - * w_q)^(idle_time/s) where s is the time to send a in red_drops()
353 if (q->avg) { in red_drops()
354 u_int t = div64((V_dn_cfg.curr_time - q->q_time), fs->lookup_step); in red_drops()
356 q->avg = (t < fs->lookup_depth) ? in red_drops()
357 SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0; in red_drops()
362 if (q->avg < fs->min_th) { in red_drops()
363 q->count = -1; in red_drops()
366 if (q->avg >= fs->max_th) { /* average queue >= max threshold */ in red_drops()
367 if (fs->fs.flags & DN_IS_ECN) in red_drops()
369 if (fs->fs.flags & DN_IS_GENTLE_RED) { in red_drops()
371 * According to Gentle-RED, if avg is greater than in red_drops()
373 * p_b = c_3 * avg - c_4 in red_drops()
374 * where c_3 = (1 - max_p) / max_th in red_drops()
375 * c_4 = 1 - 2 * max_p in red_drops()
377 p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - in red_drops()
378 fs->c_4; in red_drops()
380 q->count = -1; in red_drops()
383 } else if (q->avg > fs->min_th) { in red_drops()
384 if (fs->fs.flags & DN_IS_ECN) in red_drops()
388 * p_b = c_1 * avg - c_2 in red_drops()
389 * where c_1 = max_p / (max_th - min_th) in red_drops()
390 * c_2 = max_p * min_th / (max_th - min_th) in red_drops()
392 p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2; in red_drops()
395 if (fs->fs.flags & DN_QSIZE_BYTES) in red_drops()
396 p_b = div64((p_b * len) , fs->max_pkt_size); in red_drops()
397 if (++q->count == 0) in red_drops()
398 q->random = random() & 0xffff; in red_drops()
401 * q->count counts packets arrived since last drop, so a greater in red_drops()
402 * value of q->count means a greater packet drop probability. in red_drops()
404 if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) { in red_drops()
405 q->count = 0; in red_drops()
407 q->random = random() & 0xffff; in red_drops()
427 ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off); in ecn_mark()
429 switch (ip->ip_v) { in ecn_mark()
434 if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT) in ecn_mark()
435 return (0); /* not-ECT */ in ecn_mark()
436 if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE) in ecn_mark()
440 * ecn-capable but not marked, in ecn_mark()
444 ip->ip_tos |= IPTOS_ECN_CE; in ecn_mark()
445 ip->ip_sum = cksum_adjust(ip->ip_sum, old, *(uint16_t *)ip); in ecn_mark()
454 flowlabel = ntohl(ip6->ip6_flow); in ecn_mark()
459 return (0); /* not-ECT */ in ecn_mark()
464 * ecn-capable but not marked, mark CE in ecn_mark()
467 ip6->ip6_flow = htonl(flowlabel); in ecn_mark()
477 * (whose parameters are in q->fs).
486 uint64_t len; in dn_enqueue() local
488 if (q->fs == NULL || q->_si == NULL) { in dn_enqueue()
490 __FUNCTION__, q->fs, q->_si); in dn_enqueue()
494 f = &(q->fs->fs); in dn_enqueue()
495 ni = &q->_si->ni; in dn_enqueue()
496 len = m->m_pkthdr.len; in dn_enqueue()
498 q->ni.tot_bytes += len; in dn_enqueue()
499 q->ni.tot_pkts++; in dn_enqueue()
500 ni->tot_bytes += len; in dn_enqueue()
501 ni->tot_pkts++; in dn_enqueue()
504 if (f->plr[0] || f->plr[1]) { in dn_enqueue()
505 if (__predict_true(f->plr[1] == 0)) { in dn_enqueue()
506 if (random() < f->plr[0]) in dn_enqueue()
509 switch (f->pl_state) { in dn_enqueue()
511 if (random() < f->plr[3]) in dn_enqueue()
512 f->pl_state = PLR_STATE_G; in dn_enqueue()
513 if (random() < f->plr[2]) in dn_enqueue()
518 if (random() < f->plr[1]) in dn_enqueue()
519 f->pl_state = PLR_STATE_B; in dn_enqueue()
520 if (random() < f->plr[0]) in dn_enqueue()
526 if (m->m_pkthdr.rcvif != NULL) in dn_enqueue()
530 if (q->fs->aqmfp) in dn_enqueue()
531 return q->fs->aqmfp->enqueue(q ,m); in dn_enqueue()
533 if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len)) { in dn_enqueue()
534 if (!(f->flags & DN_IS_ECN) || !ecn_mark(m)) in dn_enqueue()
537 if (f->flags & DN_QSIZE_BYTES) { in dn_enqueue()
538 if (q->ni.len_bytes > f->qsize) in dn_enqueue()
540 } else if (q->ni.length >= f->qsize) { in dn_enqueue()
543 mq_append(&q->mq, m); in dn_enqueue()
544 q->ni.length++; in dn_enqueue()
545 q->ni.len_bytes += len; in dn_enqueue()
546 ni->length++; in dn_enqueue()
547 ni->len_bytes += len; in dn_enqueue()
553 q->ni.drops++; in dn_enqueue()
554 ni->drops++; in dn_enqueue()
570 dline->oid.subtype = 0; /* not in heap */ in transmit_event()
571 while ((m = dline->mq.head) != NULL) { in transmit_event()
573 if (!DN_KEY_LEQ(pkt->output_time, now)) in transmit_event()
575 dline->mq.head = m->m_nextpkt; in transmit_event()
576 dline->mq.count--; in transmit_event()
577 if (m->m_pkthdr.rcvif != NULL && in transmit_event()
584 dline->oid.subtype = 1; /* in heap */ in transmit_event()
585 heap_insert(&V_dn_cfg.evheap, pkt->output_time, dline); in transmit_event()
599 struct dn_profile *pf = s->profile; in extra_bits()
601 if (!pf || pf->samples_no == 0) in extra_bits()
603 index = random() % pf->samples_no; in extra_bits()
604 bits = div64((uint64_t)pf->samples[index] * s->link.bandwidth, 1000); in extra_bits()
605 if (index >= pf->loss_level) { in extra_bits()
608 dt->dn_dir = DIR_DROP; in extra_bits()
621 struct dn_schk *s = si->sched; in serve_sched()
623 int delay_line_idle = (si->dline.mq.head == NULL); in serve_sched()
629 q->head = NULL; in serve_sched()
632 bw = s->link.bandwidth; in serve_sched()
633 si->kflags &= ~DN_ACTIVE; in serve_sched()
636 si->credit += (now - si->sched_time) * bw; in serve_sched()
638 si->credit = 0; in serve_sched()
639 si->sched_time = now; in serve_sched()
641 while (si->credit >= 0 && (m = s->fp->dequeue(si)) != NULL) { in serve_sched()
646 (m->m_pkthdr.len * 8 + extra_bits(m, s)); in serve_sched()
647 si->credit -= len_scaled; in serve_sched()
649 dn_tag_get(m)->output_time = V_dn_cfg.curr_time + s->link.delay ; in serve_sched()
650 if (m->m_pkthdr.rcvif != NULL) in serve_sched()
652 mq_append(&si->dline.mq, m); in serve_sched()
660 if (si->credit >= 0) { in serve_sched()
661 si->idle_time = now; in serve_sched()
665 t = div64(bw - 1 - si->credit, bw); in serve_sched()
667 dn_tag_get(m)->output_time += t; in serve_sched()
668 si->kflags |= DN_ACTIVE; in serve_sched()
672 transmit_event(q, &si->dline, now); in serve_sched()
673 return q->head; in serve_sched()
704 V_dn_cfg.tick_lost += pending - 1; in dummynet_task()
708 V_dn_cfg.tick_last = (t.tv_sec - V_dn_cfg.prev_t.tv_sec) * 1000000 + in dummynet_task()
709 (t.tv_usec - V_dn_cfg.prev_t.tv_usec); in dummynet_task()
711 V_dn_cfg.tick_delta = (V_dn_cfg.tick_last * hz - 1000000) / hz; in dummynet_task()
725 if (V_dn_cfg.tick_delta_sum - tick >= 0) { in dummynet_task()
733 V_dn_cfg.curr_time--; in dummynet_task()
734 V_dn_cfg.tick_diff--; in dummynet_task()
744 DN_KEY_LT(V_dn_cfg.curr_time, HEAP_TOP(&V_dn_cfg.evheap)->key)) in dummynet_task()
746 p = HEAP_TOP(&V_dn_cfg.evheap)->object; in dummynet_task()
748 if (p->type == DN_SCH_I) { in dummynet_task()
788 n = m->m_nextpkt; in dummynet_send()
789 m->m_nextpkt = NULL; in dummynet_send()
798 ifp = ifnet_byindexgen(pkt->if_index, pkt->if_idxgen); in dummynet_send()
799 if (((pkt->dn_dir == (DIR_OUT | PROTO_LAYER2)) || in dummynet_send()
800 (pkt->dn_dir == (DIR_OUT | PROTO_LAYER2 | PROTO_IPV6))) && in dummynet_send()
804 dst = pkt->dn_dir; in dummynet_send()
805 tag->m_tag_cookie = MTAG_IPFW_RULE; in dummynet_send()
806 tag->m_tag_id = 0; in dummynet_send()
844 if (m->m_len < ETHER_HDR_LEN && in dummynet_send()
850 ether_demux(m->m_pkthdr.rcvif, m); in dummynet_send()
884 dt->rule = fwa->rule; in tag_mbuf()
886 dt->rule.info &= (IPFW_ONEPASS | IPFW_IS_DUMMYNET); in tag_mbuf()
887 dt->dn_dir = dir; in tag_mbuf()
888 if (fwa->flags & IPFW_ARGS_OUT && fwa->ifp != NULL) { in tag_mbuf()
890 dt->if_index = fwa->ifp->if_index; in tag_mbuf()
891 dt->if_idxgen = fwa->ifp->if_idxgen; in tag_mbuf()
893 /* dt->output_time is updated as we move through */ in tag_mbuf()
894 dt->output_time = V_dn_cfg.curr_time; in tag_mbuf()
895 dt->iphdr_off = (dir & PROTO_LAYER2) ? ETHER_HDR_LEN : 0; in tag_mbuf()
914 fs_id = (fwa->rule.info & IPFW_INFO_MASK) + in dummynet_io()
915 ((fwa->rule.info & IPFW_IS_PIPE) ? 2*DN_MAX_ID : 0); in dummynet_io()
917 if (fwa->flags & IPFW_ARGS_IN) in dummynet_io()
921 if (fwa->flags & IPFW_ARGS_ETHER) in dummynet_io()
923 else if (fwa->flags & IPFW_ARGS_IP6) in dummynet_io()
934 if (fs->sched == NULL) /* should not happen */ in dummynet_io()
937 si = ipdn_si_find(fs->sched, &(fwa->f_id)); in dummynet_io()
944 if (fs->sched->fp->flags & DN_MULTIQUEUE) { in dummynet_io()
945 q = ipdn_q_find(fs, si, &(fwa->f_id)); in dummynet_io()
949 if (fs->sched->fp->enqueue(si, q, m)) { in dummynet_io()
954 V_dn_cfg.io_pkt_drop--; in dummynet_io()
959 if (si->kflags & DN_ACTIVE) { in dummynet_io()
965 if (si->idle_time < V_dn_cfg.curr_time) { in dummynet_io()
967 struct dn_link *p = &fs->sched->link; in dummynet_io()
969 si->sched_time = V_dn_cfg.curr_time; in dummynet_io()
970 si->credit = V_dn_cfg.io_fast ? p->bandwidth : 0; in dummynet_io()
971 if (p->burst) { in dummynet_io()
972 uint64_t burst = (V_dn_cfg.curr_time - si->idle_time) * p->bandwidth; in dummynet_io() local
973 if (burst > p->burst) in dummynet_io()
974 burst = p->burst; in dummynet_io()
975 si->credit += burst; in dummynet_io()
981 /* optimization -- pass it back to ipfw for immediate send */ in dummynet_io()
990 tag->m_tag_cookie = MTAG_IPFW_RULE; in dummynet_io()
991 tag->m_tag_id = 0; in dummynet_io()
993 if (m->m_nextpkt != NULL) { in dummynet_io()
995 m->m_nextpkt = NULL; in dummynet_io()
1014 return (fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS; in dummynet_io()