Lines Matching +full:avg +full:- +full:samples

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
105 if (error != 0 || req->newptr == NULL) in sysctl_hash_size()
125 if (error != 0 || req->newptr == NULL) in sysctl_limits()
192 "Adjusted vs non-adjusted curr_time difference (ticks).");
238 if (mtag != NULL && mtag->m_tag_id == DN_AQM_MTAG_TS) { in dn_tag_get()
245 mtag->m_tag_cookie == MTAG_ABI_COMPAT && in dn_tag_get()
246 mtag->m_tag_id == PACKET_TAG_DUMMYNET, in dn_tag_get()
259 if (m->m_flags & M_STACK) { in mq_append()
264 ofs = m->m_data - m->__m_extbuf; in mq_append()
268 m, m->__m_extbuf, m->__m_extlen, ofs, m_new); in mq_append()
269 p = m_new->__m_extbuf; /* new pointer */ in mq_append()
270 l = m_new->__m_extlen; /* new len */ in mq_append()
271 if (l <= m->__m_extlen) { in mq_append()
276 m_new->m_flags &= ~M_STACK; in mq_append()
277 m_new->__m_extbuf = p; // point to new buffer in mq_append()
278 _pkt_copy(m->__m_extbuf, p, m->__m_extlen); in mq_append()
279 m_new->m_data = p + ofs; in mq_append()
283 if (q->head == NULL) in mq_append()
284 q->head = m; in mq_append()
286 q->tail->m_nextpkt = m; in mq_append()
287 q->count++; in mq_append()
288 q->tail = m; in mq_append()
289 m->m_nextpkt = NULL; in mq_append()
302 mnext = m->m_nextpkt; in dn_free_pkts()
313 * RED calculates the average queue size (avg) using a low-pass filter in red_drops()
315 * avg <- (1-w_q) * avg + w_q * q_size in red_drops()
319 * avg = (1 - w_q)^(idle/s) in red_drops()
320 * where s is the time needed for transmitting a medium-sized packet. in red_drops()
322 * Now, if avg < min_th the packet is enqueued. in red_drops()
323 * If avg > max_th the packet is dropped. Otherwise, the packet is in red_drops()
324 * dropped with probability P function of avg. in red_drops()
327 struct dn_fsk *fs = q->fs; in red_drops()
331 uint32_t q_size = (fs->fs.flags & DN_QSIZE_BYTES) ? in red_drops()
332 q->ni.len_bytes : q->ni.length; in red_drops()
336 /* Queue is not empty, avg <- avg + (q_size - avg) * w_q */ in red_drops()
337 int diff = SCALE(q_size) - q->avg; in red_drops()
338 int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q); in red_drops()
340 q->avg += (int)v; in red_drops()
345 * (1 - * w_q)^(idle_time/s) where s is the time to send a in red_drops()
349 if (q->avg) { in red_drops()
350 u_int t = div64((V_dn_cfg.curr_time - q->q_time), fs->lookup_step); in red_drops()
352 q->avg = (t < fs->lookup_depth) ? in red_drops()
353 SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0; in red_drops()
358 if (q->avg < fs->min_th) { in red_drops()
359 q->count = -1; in red_drops()
362 if (q->avg >= fs->max_th) { /* average queue >= max threshold */ in red_drops()
363 if (fs->fs.flags & DN_IS_ECN) in red_drops()
365 if (fs->fs.flags & DN_IS_GENTLE_RED) { in red_drops()
367 * According to Gentle-RED, if avg is greater than in red_drops()
369 * p_b = c_3 * avg - c_4 in red_drops()
370 * where c_3 = (1 - max_p) / max_th in red_drops()
371 * c_4 = 1 - 2 * max_p in red_drops()
373 p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - in red_drops()
374 fs->c_4; in red_drops()
376 q->count = -1; in red_drops()
379 } else if (q->avg > fs->min_th) { in red_drops()
380 if (fs->fs.flags & DN_IS_ECN) in red_drops()
384 * p_b = c_1 * avg - c_2 in red_drops()
385 * where c_1 = max_p / (max_th - min_th) in red_drops()
386 * c_2 = max_p * min_th / (max_th - min_th) in red_drops()
388 p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2; in red_drops()
391 if (fs->fs.flags & DN_QSIZE_BYTES) in red_drops()
392 p_b = div64((p_b * len) , fs->max_pkt_size); in red_drops()
393 if (++q->count == 0) in red_drops()
394 q->random = random() & 0xffff; in red_drops()
397 * q->count counts packets arrived since last drop, so a greater in red_drops()
398 * value of q->count means a greater packet drop probability. in red_drops()
400 if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) { in red_drops()
401 q->count = 0; in red_drops()
403 q->random = random() & 0xffff; in red_drops()
423 ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off); in ecn_mark()
425 switch (ip->ip_v) { in ecn_mark()
430 if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT) in ecn_mark()
431 return (0); /* not-ECT */ in ecn_mark()
432 if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE) in ecn_mark()
436 * ecn-capable but not marked, in ecn_mark()
440 ip->ip_tos |= IPTOS_ECN_CE; in ecn_mark()
441 ip->ip_sum = cksum_adjust(ip->ip_sum, old, *(uint16_t *)ip); in ecn_mark()
450 flowlabel = ntohl(ip6->ip6_flow); in ecn_mark()
455 return (0); /* not-ECT */ in ecn_mark()
460 * ecn-capable but not marked, mark CE in ecn_mark()
463 ip6->ip6_flow = htonl(flowlabel); in ecn_mark()
473 * (whose parameters are in q->fs).
484 if (q->fs == NULL || q->_si == NULL) { in dn_enqueue()
486 __FUNCTION__, q->fs, q->_si); in dn_enqueue()
490 f = &(q->fs->fs); in dn_enqueue()
491 ni = &q->_si->ni; in dn_enqueue()
492 len = m->m_pkthdr.len; in dn_enqueue()
494 q->ni.tot_bytes += len; in dn_enqueue()
495 q->ni.tot_pkts++; in dn_enqueue()
496 ni->tot_bytes += len; in dn_enqueue()
497 ni->tot_pkts++; in dn_enqueue()
500 if (f->plr[0] || f->plr[1]) { in dn_enqueue()
501 if (__predict_true(f->plr[1] == 0)) { in dn_enqueue()
502 if (random() < f->plr[0]) in dn_enqueue()
505 switch (f->pl_state) { in dn_enqueue()
507 if (random() < f->plr[3]) in dn_enqueue()
508 f->pl_state = PLR_STATE_G; in dn_enqueue()
509 if (random() < f->plr[2]) in dn_enqueue()
514 if (random() < f->plr[1]) in dn_enqueue()
515 f->pl_state = PLR_STATE_B; in dn_enqueue()
516 if (random() < f->plr[0]) in dn_enqueue()
522 if (m->m_pkthdr.rcvif != NULL) in dn_enqueue()
526 if (q->fs->aqmfp) in dn_enqueue()
527 return q->fs->aqmfp->enqueue(q ,m); in dn_enqueue()
529 if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len)) { in dn_enqueue()
530 if (!(f->flags & DN_IS_ECN) || !ecn_mark(m)) in dn_enqueue()
533 if (f->flags & DN_QSIZE_BYTES) { in dn_enqueue()
534 if (q->ni.len_bytes > f->qsize) in dn_enqueue()
536 } else if (q->ni.length >= f->qsize) { in dn_enqueue()
539 mq_append(&q->mq, m); in dn_enqueue()
540 q->ni.length++; in dn_enqueue()
541 q->ni.len_bytes += len; in dn_enqueue()
542 ni->length++; in dn_enqueue()
543 ni->len_bytes += len; in dn_enqueue()
548 q->ni.drops++; in dn_enqueue()
549 ni->drops++; in dn_enqueue()
565 dline->oid.subtype = 0; /* not in heap */ in transmit_event()
566 while ((m = dline->mq.head) != NULL) { in transmit_event()
568 if (!DN_KEY_LEQ(pkt->output_time, now)) in transmit_event()
570 dline->mq.head = m->m_nextpkt; in transmit_event()
571 dline->mq.count--; in transmit_event()
572 if (m->m_pkthdr.rcvif != NULL && in transmit_event()
579 dline->oid.subtype = 1; /* in heap */ in transmit_event()
580 heap_insert(&V_dn_cfg.evheap, pkt->output_time, dline); in transmit_event()
586 * number of bits for the given data rate. The samples are
594 struct dn_profile *pf = s->profile; in extra_bits()
596 if (!pf || pf->samples_no == 0) in extra_bits()
598 index = random() % pf->samples_no; in extra_bits()
599 bits = div64((uint64_t)pf->samples[index] * s->link.bandwidth, 1000); in extra_bits()
600 if (index >= pf->loss_level) { in extra_bits()
603 dt->dn_dir = DIR_DROP; in extra_bits()
616 struct dn_schk *s = si->sched; in serve_sched()
618 int delay_line_idle = (si->dline.mq.head == NULL); in serve_sched()
624 q->head = NULL; in serve_sched()
627 bw = s->link.bandwidth; in serve_sched()
628 si->kflags &= ~DN_ACTIVE; in serve_sched()
631 si->credit += (now - si->sched_time) * bw; in serve_sched()
633 si->credit = 0; in serve_sched()
634 si->sched_time = now; in serve_sched()
636 while (si->credit >= 0 && (m = s->fp->dequeue(si)) != NULL) { in serve_sched()
641 (m->m_pkthdr.len * 8 + extra_bits(m, s)); in serve_sched()
642 si->credit -= len_scaled; in serve_sched()
644 dn_tag_get(m)->output_time = V_dn_cfg.curr_time + s->link.delay ; in serve_sched()
645 if (m->m_pkthdr.rcvif != NULL) in serve_sched()
647 mq_append(&si->dline.mq, m); in serve_sched()
655 if (si->credit >= 0) { in serve_sched()
656 si->idle_time = now; in serve_sched()
660 t = div64(bw - 1 - si->credit, bw); in serve_sched()
662 dn_tag_get(m)->output_time += t; in serve_sched()
663 si->kflags |= DN_ACTIVE; in serve_sched()
667 transmit_event(q, &si->dline, now); in serve_sched()
668 return q->head; in serve_sched()
699 V_dn_cfg.tick_lost += pending - 1; in dummynet_task()
703 V_dn_cfg.tick_last = (t.tv_sec - V_dn_cfg.prev_t.tv_sec) * 1000000 + in dummynet_task()
704 (t.tv_usec - V_dn_cfg.prev_t.tv_usec); in dummynet_task()
706 V_dn_cfg.tick_delta = (V_dn_cfg.tick_last * hz - 1000000) / hz; in dummynet_task()
720 if (V_dn_cfg.tick_delta_sum - tick >= 0) { in dummynet_task()
728 V_dn_cfg.curr_time--; in dummynet_task()
729 V_dn_cfg.tick_diff--; in dummynet_task()
739 DN_KEY_LT(V_dn_cfg.curr_time, HEAP_TOP(&V_dn_cfg.evheap)->key)) in dummynet_task()
741 p = HEAP_TOP(&V_dn_cfg.evheap)->object; in dummynet_task()
743 if (p->type == DN_SCH_I) { in dummynet_task()
783 n = m->m_nextpkt; in dummynet_send()
784 m->m_nextpkt = NULL; in dummynet_send()
793 ifp = ifnet_byindexgen(pkt->if_index, pkt->if_idxgen); in dummynet_send()
794 if (((pkt->dn_dir == (DIR_OUT | PROTO_LAYER2)) || in dummynet_send()
795 (pkt->dn_dir == (DIR_OUT | PROTO_LAYER2 | PROTO_IPV6))) && in dummynet_send()
799 dst = pkt->dn_dir; in dummynet_send()
800 tag->m_tag_cookie = MTAG_IPFW_RULE; in dummynet_send()
801 tag->m_tag_id = 0; in dummynet_send()
839 if (m->m_len < ETHER_HDR_LEN && in dummynet_send()
845 ether_demux(m->m_pkthdr.rcvif, m); in dummynet_send()
879 dt->rule = fwa->rule; in tag_mbuf()
881 dt->rule.info &= (IPFW_ONEPASS | IPFW_IS_DUMMYNET); in tag_mbuf()
882 dt->dn_dir = dir; in tag_mbuf()
883 if (fwa->flags & IPFW_ARGS_OUT && fwa->ifp != NULL) { in tag_mbuf()
885 dt->if_index = fwa->ifp->if_index; in tag_mbuf()
886 dt->if_idxgen = fwa->ifp->if_idxgen; in tag_mbuf()
888 /* dt->output_time is updated as we move through */ in tag_mbuf()
889 dt->output_time = V_dn_cfg.curr_time; in tag_mbuf()
890 dt->iphdr_off = (dir & PROTO_LAYER2) ? ETHER_HDR_LEN : 0; in tag_mbuf()
909 fs_id = (fwa->rule.info & IPFW_INFO_MASK) + in dummynet_io()
910 ((fwa->rule.info & IPFW_IS_PIPE) ? 2*DN_MAX_ID : 0); in dummynet_io()
912 if (fwa->flags & IPFW_ARGS_IN) in dummynet_io()
916 if (fwa->flags & IPFW_ARGS_ETHER) in dummynet_io()
918 else if (fwa->flags & IPFW_ARGS_IP6) in dummynet_io()
929 if (fs->sched == NULL) /* should not happen */ in dummynet_io()
932 si = ipdn_si_find(fs->sched, &(fwa->f_id)); in dummynet_io()
939 if (fs->sched->fp->flags & DN_MULTIQUEUE) { in dummynet_io()
940 q = ipdn_q_find(fs, si, &(fwa->f_id)); in dummynet_io()
944 if (fs->sched->fp->enqueue(si, q, m)) { in dummynet_io()
949 V_dn_cfg.io_pkt_drop--; in dummynet_io()
954 if (si->kflags & DN_ACTIVE) { in dummynet_io()
960 if (si->idle_time < V_dn_cfg.curr_time) { in dummynet_io()
962 struct dn_link *p = &fs->sched->link; in dummynet_io()
964 si->sched_time = V_dn_cfg.curr_time; in dummynet_io()
965 si->credit = V_dn_cfg.io_fast ? p->bandwidth : 0; in dummynet_io()
966 if (p->burst) { in dummynet_io()
967 uint64_t burst = (V_dn_cfg.curr_time - si->idle_time) * p->bandwidth; in dummynet_io()
968 if (burst > p->burst) in dummynet_io()
969 burst = p->burst; in dummynet_io()
970 si->credit += burst; in dummynet_io()
976 /* optimization -- pass it back to ipfw for immediate send */ in dummynet_io()
985 tag->m_tag_cookie = MTAG_IPFW_RULE; in dummynet_io()
986 tag->m_tag_id = 0; in dummynet_io()
988 if (m->m_nextpkt != NULL) { in dummynet_io()
990 m->m_nextpkt = NULL; in dummynet_io()
1008 return (fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS; in dummynet_io()