1 /* 2 * Copyright (c) 2016 Qualcomm Atheros, Inc 3 * 4 * GPL v2 5 * 6 * Based on net/sched/sch_fq_codel.c 7 */ 8 #ifndef __NET_SCHED_FQ_IMPL_H 9 #define __NET_SCHED_FQ_IMPL_H 10 11 #include <net/fq.h> 12 13 /* functions that are embedded into includer */ 14 15 static void fq_adjust_removal(struct fq *fq, 16 struct fq_flow *flow, 17 struct sk_buff *skb) 18 { 19 struct fq_tin *tin = flow->tin; 20 21 tin->backlog_bytes -= skb->len; 22 tin->backlog_packets--; 23 flow->backlog -= skb->len; 24 fq->backlog--; 25 fq->memory_usage -= skb->truesize; 26 } 27 28 static void fq_rejigger_backlog(struct fq *fq, struct fq_flow *flow) 29 { 30 struct fq_flow *i; 31 32 if (flow->backlog == 0) { 33 list_del_init(&flow->backlogchain); 34 } else { 35 i = flow; 36 37 list_for_each_entry_continue(i, &fq->backlogs, backlogchain) 38 if (i->backlog < flow->backlog) 39 break; 40 41 list_move_tail(&flow->backlogchain, 42 &i->backlogchain); 43 } 44 } 45 46 static struct sk_buff *fq_flow_dequeue(struct fq *fq, 47 struct fq_flow *flow) 48 { 49 struct sk_buff *skb; 50 51 lockdep_assert_held(&fq->lock); 52 53 skb = __skb_dequeue(&flow->queue); 54 if (!skb) 55 return NULL; 56 57 fq_adjust_removal(fq, flow, skb); 58 fq_rejigger_backlog(fq, flow); 59 60 return skb; 61 } 62 63 static struct sk_buff *fq_tin_dequeue(struct fq *fq, 64 struct fq_tin *tin, 65 fq_tin_dequeue_t dequeue_func) 66 { 67 struct fq_flow *flow; 68 struct list_head *head; 69 struct sk_buff *skb; 70 71 lockdep_assert_held(&fq->lock); 72 73 begin: 74 head = &tin->new_flows; 75 if (list_empty(head)) { 76 head = &tin->old_flows; 77 if (list_empty(head)) 78 return NULL; 79 } 80 81 flow = list_first_entry(head, struct fq_flow, flowchain); 82 83 if (flow->deficit <= 0) { 84 flow->deficit += fq->quantum; 85 list_move_tail(&flow->flowchain, 86 &tin->old_flows); 87 goto begin; 88 } 89 90 skb = dequeue_func(fq, tin, flow); 91 if (!skb) { 92 /* force a pass through old_flows to prevent starvation */ 93 if ((head == &tin->new_flows) && 94 !list_empty(&tin->old_flows)) { 95 list_move_tail(&flow->flowchain, &tin->old_flows); 96 } else { 97 list_del_init(&flow->flowchain); 98 flow->tin = NULL; 99 } 100 goto begin; 101 } 102 103 flow->deficit -= skb->len; 104 tin->tx_bytes += skb->len; 105 tin->tx_packets++; 106 107 return skb; 108 } 109 110 static struct fq_flow *fq_flow_classify(struct fq *fq, 111 struct fq_tin *tin, 112 struct sk_buff *skb, 113 fq_flow_get_default_t get_default_func) 114 { 115 struct fq_flow *flow; 116 u32 hash; 117 u32 idx; 118 119 lockdep_assert_held(&fq->lock); 120 121 hash = skb_get_hash_perturb(skb, fq->perturbation); 122 idx = reciprocal_scale(hash, fq->flows_cnt); 123 flow = &fq->flows[idx]; 124 125 if (flow->tin && flow->tin != tin) { 126 flow = get_default_func(fq, tin, idx, skb); 127 tin->collisions++; 128 fq->collisions++; 129 } 130 131 if (!flow->tin) 132 tin->flows++; 133 134 return flow; 135 } 136 137 static void fq_recalc_backlog(struct fq *fq, 138 struct fq_tin *tin, 139 struct fq_flow *flow) 140 { 141 struct fq_flow *i; 142 143 if (list_empty(&flow->backlogchain)) 144 list_add_tail(&flow->backlogchain, &fq->backlogs); 145 146 i = flow; 147 list_for_each_entry_continue_reverse(i, &fq->backlogs, 148 backlogchain) 149 if (i->backlog > flow->backlog) 150 break; 151 152 list_move(&flow->backlogchain, &i->backlogchain); 153 } 154 155 static void fq_tin_enqueue(struct fq *fq, 156 struct fq_tin *tin, 157 struct sk_buff *skb, 158 fq_skb_free_t free_func, 159 fq_flow_get_default_t get_default_func) 160 { 161 struct fq_flow *flow; 162 163 lockdep_assert_held(&fq->lock); 164 165 flow = fq_flow_classify(fq, tin, skb, get_default_func); 166 167 flow->tin = tin; 168 flow->backlog += skb->len; 169 tin->backlog_bytes += skb->len; 170 tin->backlog_packets++; 171 fq->memory_usage += skb->truesize; 172 fq->backlog++; 173 174 fq_recalc_backlog(fq, tin, flow); 175 176 if (list_empty(&flow->flowchain)) { 177 flow->deficit = fq->quantum; 178 list_add_tail(&flow->flowchain, 179 &tin->new_flows); 180 } 181 182 __skb_queue_tail(&flow->queue, skb); 183 184 if (fq->backlog > fq->limit || fq->memory_usage > fq->memory_limit) { 185 flow = list_first_entry_or_null(&fq->backlogs, 186 struct fq_flow, 187 backlogchain); 188 if (!flow) 189 return; 190 191 skb = fq_flow_dequeue(fq, flow); 192 if (!skb) 193 return; 194 195 free_func(fq, flow->tin, flow, skb); 196 197 flow->tin->overlimit++; 198 fq->overlimit++; 199 if (fq->memory_usage > fq->memory_limit) 200 fq->overmemory++; 201 } 202 } 203 204 static void fq_flow_filter(struct fq *fq, 205 struct fq_flow *flow, 206 fq_skb_filter_t filter_func, 207 void *filter_data, 208 fq_skb_free_t free_func) 209 { 210 struct fq_tin *tin = flow->tin; 211 struct sk_buff *skb, *tmp; 212 213 lockdep_assert_held(&fq->lock); 214 215 skb_queue_walk_safe(&flow->queue, skb, tmp) { 216 if (!filter_func(fq, tin, flow, skb, filter_data)) 217 continue; 218 219 __skb_unlink(skb, &flow->queue); 220 fq_adjust_removal(fq, flow, skb); 221 free_func(fq, tin, flow, skb); 222 } 223 224 fq_rejigger_backlog(fq, flow); 225 } 226 227 static void fq_tin_filter(struct fq *fq, 228 struct fq_tin *tin, 229 fq_skb_filter_t filter_func, 230 void *filter_data, 231 fq_skb_free_t free_func) 232 { 233 struct fq_flow *flow; 234 235 lockdep_assert_held(&fq->lock); 236 237 list_for_each_entry(flow, &tin->new_flows, flowchain) 238 fq_flow_filter(fq, flow, filter_func, filter_data, free_func); 239 list_for_each_entry(flow, &tin->old_flows, flowchain) 240 fq_flow_filter(fq, flow, filter_func, filter_data, free_func); 241 } 242 243 static void fq_flow_reset(struct fq *fq, 244 struct fq_flow *flow, 245 fq_skb_free_t free_func) 246 { 247 struct sk_buff *skb; 248 249 while ((skb = fq_flow_dequeue(fq, flow))) 250 free_func(fq, flow->tin, flow, skb); 251 252 if (!list_empty(&flow->flowchain)) 253 list_del_init(&flow->flowchain); 254 255 if (!list_empty(&flow->backlogchain)) 256 list_del_init(&flow->backlogchain); 257 258 flow->tin = NULL; 259 260 WARN_ON_ONCE(flow->backlog); 261 } 262 263 static void fq_tin_reset(struct fq *fq, 264 struct fq_tin *tin, 265 fq_skb_free_t free_func) 266 { 267 struct list_head *head; 268 struct fq_flow *flow; 269 270 for (;;) { 271 head = &tin->new_flows; 272 if (list_empty(head)) { 273 head = &tin->old_flows; 274 if (list_empty(head)) 275 break; 276 } 277 278 flow = list_first_entry(head, struct fq_flow, flowchain); 279 fq_flow_reset(fq, flow, free_func); 280 } 281 282 WARN_ON_ONCE(tin->backlog_bytes); 283 WARN_ON_ONCE(tin->backlog_packets); 284 } 285 286 static void fq_flow_init(struct fq_flow *flow) 287 { 288 INIT_LIST_HEAD(&flow->flowchain); 289 INIT_LIST_HEAD(&flow->backlogchain); 290 __skb_queue_head_init(&flow->queue); 291 } 292 293 static void fq_tin_init(struct fq_tin *tin) 294 { 295 INIT_LIST_HEAD(&tin->new_flows); 296 INIT_LIST_HEAD(&tin->old_flows); 297 } 298 299 static int fq_init(struct fq *fq, int flows_cnt) 300 { 301 int i; 302 303 memset(fq, 0, sizeof(fq[0])); 304 INIT_LIST_HEAD(&fq->backlogs); 305 spin_lock_init(&fq->lock); 306 fq->flows_cnt = max_t(u32, flows_cnt, 1); 307 fq->perturbation = prandom_u32(); 308 fq->quantum = 300; 309 fq->limit = 8192; 310 fq->memory_limit = 16 << 20; /* 16 MBytes */ 311 312 fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); 313 if (!fq->flows) 314 return -ENOMEM; 315 316 for (i = 0; i < fq->flows_cnt; i++) 317 fq_flow_init(&fq->flows[i]); 318 319 return 0; 320 } 321 322 static void fq_reset(struct fq *fq, 323 fq_skb_free_t free_func) 324 { 325 int i; 326 327 for (i = 0; i < fq->flows_cnt; i++) 328 fq_flow_reset(fq, &fq->flows[i], free_func); 329 330 kfree(fq->flows); 331 fq->flows = NULL; 332 } 333 334 #endif 335