Lines Matching refs:CAKE_QUEUES

80 #define CAKE_QUEUES (1024)  macro
151 struct cake_flow flows[CAKE_QUEUES];
152 u32 backlogs[CAKE_QUEUES];
153 u32 tags[CAKE_QUEUES]; /* for set association */
154 u16 overflow_idx[CAKE_QUEUES];
155 struct cake_host hosts[CAKE_QUEUES]; /* for triple isolation */
205 struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS];
300 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
647 q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES)) in cake_inc_srchost_bulk_flow_count()
665 q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES)) in cake_inc_dsthost_bulk_flow_count()
782 reduced_hash = flow_hash % CAKE_QUEUES; in cake_hash()
845 srchost_idx = srchost_hash % CAKE_QUEUES; in cake_hash()
869 dsthost_idx = dsthost_hash % CAKE_QUEUES; in cake_hash()
1467 static const u32 a = CAKE_MAX_TINS * CAKE_QUEUES; in cake_heapify()
1504 while (i > 0 && i < CAKE_MAX_TINS * CAKE_QUEUES) { in cake_heapify_up()
1564 for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2 - 1; i >= 0; i--) in cake_drop()
1731 if (TC_H_MIN(res.classid) <= CAKE_QUEUES) in cake_classify()
1733 if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16)) in cake_classify()
1971 for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++) in cake_clear_tin()
2749 for (i = 1; i <= CAKE_QUEUES; i++) in cake_init()
2767 for (j = 0; j < CAKE_QUEUES; j++) { in cake_init()
3010 if (idx < CAKE_QUEUES * q->tin_cnt) { in cake_dump_class_stats()
3012 &q->tins[q->tin_order[idx / CAKE_QUEUES]]; in cake_dump_class_stats()
3015 flow = &b->flows[idx % CAKE_QUEUES]; in cake_dump_class_stats()
3026 qs.backlog = b->backlogs[idx % CAKE_QUEUES]; in cake_dump_class_stats()
3086 for (j = 0; j < CAKE_QUEUES; j++) { in cake_walk()
3091 if (!tc_qdisc_stats_dump(sch, i * CAKE_QUEUES + j + 1, in cake_walk()