1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers 4 * 5 * Copyright (c) 2010, ST-Ericsson 6 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> 7 */ 8 9 #include <net/mac80211.h> 10 #include <linux/sched.h> 11 #include <linux/jiffies.h> 12 #include "queue.h" 13 #include "cw1200.h" 14 #include "debug.h" 15 16 /* private */ struct cw1200_queue_item 17 { 18 struct list_head head; 19 struct sk_buff *skb; 20 u32 packet_id; 21 unsigned long queue_timestamp; 22 unsigned long xmit_timestamp; 23 struct cw1200_txpriv txpriv; 24 u8 generation; 25 }; 26 27 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) 28 { 29 struct cw1200_queue_stats *stats = queue->stats; 30 if (queue->tx_locked_cnt++ == 0) { 31 pr_debug("[TX] Queue %d is locked.\n", 32 queue->queue_id); 33 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); 34 } 35 } 36 37 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) 38 { 39 struct cw1200_queue_stats *stats = queue->stats; 40 BUG_ON(!queue->tx_locked_cnt); 41 if (--queue->tx_locked_cnt == 0) { 42 pr_debug("[TX] Queue %d is unlocked.\n", 43 queue->queue_id); 44 ieee80211_wake_queue(stats->priv->hw, queue->queue_id); 45 } 46 } 47 48 static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation, 49 u8 *queue_id, u8 *item_generation, 50 u8 *item_id) 51 { 52 *item_id = (packet_id >> 0) & 0xFF; 53 *item_generation = (packet_id >> 8) & 0xFF; 54 *queue_id = (packet_id >> 16) & 0xFF; 55 *queue_generation = (packet_id >> 24) & 0xFF; 56 } 57 58 static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id, 59 u8 item_generation, u8 item_id) 60 { 61 return ((u32)item_id << 0) | 62 ((u32)item_generation << 8) | 63 ((u32)queue_id << 16) | 64 ((u32)queue_generation << 24); 65 } 66 67 static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats, 68 struct list_head *gc_list) 69 { 70 struct cw1200_queue_item *item, *tmp; 71 72 list_for_each_entry_safe(item, tmp, gc_list, head) { 73 list_del(&item->head); 74 stats->skb_dtor(stats->priv, item->skb, &item->txpriv); 75 kfree(item); 76 } 77 } 78 79 static void cw1200_queue_register_post_gc(struct list_head *gc_list, 80 struct cw1200_queue_item *item) 81 { 82 struct cw1200_queue_item *gc_item; 83 gc_item = kmemdup(item, sizeof(struct cw1200_queue_item), 84 GFP_ATOMIC); 85 BUG_ON(!gc_item); 86 list_add_tail(&gc_item->head, gc_list); 87 } 88 89 static void __cw1200_queue_gc(struct cw1200_queue *queue, 90 struct list_head *head, 91 bool unlock) 92 { 93 struct cw1200_queue_stats *stats = queue->stats; 94 struct cw1200_queue_item *item = NULL, *iter, *tmp; 95 bool wakeup_stats = false; 96 97 list_for_each_entry_safe(iter, tmp, &queue->queue, head) { 98 if (time_is_after_jiffies(iter->queue_timestamp + queue->ttl)) { 99 item = iter; 100 break; 101 } 102 --queue->num_queued; 103 --queue->link_map_cache[iter->txpriv.link_id]; 104 spin_lock_bh(&stats->lock); 105 --stats->num_queued; 106 if (!--stats->link_map_cache[iter->txpriv.link_id]) 107 wakeup_stats = true; 108 spin_unlock_bh(&stats->lock); 109 cw1200_debug_tx_ttl(stats->priv); 110 cw1200_queue_register_post_gc(head, iter); 111 iter->skb = NULL; 112 list_move_tail(&iter->head, &queue->free_pool); 113 } 114 115 if (wakeup_stats) 116 wake_up(&stats->wait_link_id_empty); 117 118 if (queue->overfull) { 119 if (queue->num_queued <= (queue->capacity >> 1)) { 120 queue->overfull = false; 121 if (unlock) 122 __cw1200_queue_unlock(queue); 123 } else if (item) { 124 unsigned long tmo = item->queue_timestamp + queue->ttl; 125 mod_timer(&queue->gc, tmo); 126 cw1200_pm_stay_awake(&stats->priv->pm_state, 127 tmo - jiffies); 128 } 129 } 130 } 131 132 static void cw1200_queue_gc(struct timer_list *t) 133 { 134 LIST_HEAD(list); 135 struct cw1200_queue *queue = 136 from_timer(queue, t, gc); 137 138 spin_lock_bh(&queue->lock); 139 __cw1200_queue_gc(queue, &list, true); 140 spin_unlock_bh(&queue->lock); 141 cw1200_queue_post_gc(queue->stats, &list); 142 } 143 144 int cw1200_queue_stats_init(struct cw1200_queue_stats *stats, 145 size_t map_capacity, 146 cw1200_queue_skb_dtor_t skb_dtor, 147 struct cw1200_common *priv) 148 { 149 memset(stats, 0, sizeof(*stats)); 150 stats->map_capacity = map_capacity; 151 stats->skb_dtor = skb_dtor; 152 stats->priv = priv; 153 spin_lock_init(&stats->lock); 154 init_waitqueue_head(&stats->wait_link_id_empty); 155 156 stats->link_map_cache = kcalloc(map_capacity, sizeof(int), 157 GFP_KERNEL); 158 if (!stats->link_map_cache) 159 return -ENOMEM; 160 161 return 0; 162 } 163 164 int cw1200_queue_init(struct cw1200_queue *queue, 165 struct cw1200_queue_stats *stats, 166 u8 queue_id, 167 size_t capacity, 168 unsigned long ttl) 169 { 170 size_t i; 171 172 memset(queue, 0, sizeof(*queue)); 173 queue->stats = stats; 174 queue->capacity = capacity; 175 queue->queue_id = queue_id; 176 queue->ttl = ttl; 177 INIT_LIST_HEAD(&queue->queue); 178 INIT_LIST_HEAD(&queue->pending); 179 INIT_LIST_HEAD(&queue->free_pool); 180 spin_lock_init(&queue->lock); 181 timer_setup(&queue->gc, cw1200_queue_gc, 0); 182 183 queue->pool = kcalloc(capacity, sizeof(struct cw1200_queue_item), 184 GFP_KERNEL); 185 if (!queue->pool) 186 return -ENOMEM; 187 188 queue->link_map_cache = kcalloc(stats->map_capacity, sizeof(int), 189 GFP_KERNEL); 190 if (!queue->link_map_cache) { 191 kfree(queue->pool); 192 queue->pool = NULL; 193 return -ENOMEM; 194 } 195 196 for (i = 0; i < capacity; ++i) 197 list_add_tail(&queue->pool[i].head, &queue->free_pool); 198 199 return 0; 200 } 201 202 int cw1200_queue_clear(struct cw1200_queue *queue) 203 { 204 int i; 205 LIST_HEAD(gc_list); 206 struct cw1200_queue_stats *stats = queue->stats; 207 struct cw1200_queue_item *item, *tmp; 208 209 spin_lock_bh(&queue->lock); 210 queue->generation++; 211 list_splice_tail_init(&queue->queue, &queue->pending); 212 list_for_each_entry_safe(item, tmp, &queue->pending, head) { 213 WARN_ON(!item->skb); 214 cw1200_queue_register_post_gc(&gc_list, item); 215 item->skb = NULL; 216 list_move_tail(&item->head, &queue->free_pool); 217 } 218 queue->num_queued = 0; 219 queue->num_pending = 0; 220 221 spin_lock_bh(&stats->lock); 222 for (i = 0; i < stats->map_capacity; ++i) { 223 stats->num_queued -= queue->link_map_cache[i]; 224 stats->link_map_cache[i] -= queue->link_map_cache[i]; 225 queue->link_map_cache[i] = 0; 226 } 227 spin_unlock_bh(&stats->lock); 228 if (queue->overfull) { 229 queue->overfull = false; 230 __cw1200_queue_unlock(queue); 231 } 232 spin_unlock_bh(&queue->lock); 233 wake_up(&stats->wait_link_id_empty); 234 cw1200_queue_post_gc(stats, &gc_list); 235 return 0; 236 } 237 238 void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats) 239 { 240 kfree(stats->link_map_cache); 241 stats->link_map_cache = NULL; 242 } 243 244 void cw1200_queue_deinit(struct cw1200_queue *queue) 245 { 246 cw1200_queue_clear(queue); 247 del_timer_sync(&queue->gc); 248 INIT_LIST_HEAD(&queue->free_pool); 249 kfree(queue->pool); 250 kfree(queue->link_map_cache); 251 queue->pool = NULL; 252 queue->link_map_cache = NULL; 253 queue->capacity = 0; 254 } 255 256 size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue, 257 u32 link_id_map) 258 { 259 size_t ret; 260 int i, bit; 261 size_t map_capacity = queue->stats->map_capacity; 262 263 if (!link_id_map) 264 return 0; 265 266 spin_lock_bh(&queue->lock); 267 if (link_id_map == (u32)-1) { 268 ret = queue->num_queued - queue->num_pending; 269 } else { 270 ret = 0; 271 for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) { 272 if (link_id_map & bit) 273 ret += queue->link_map_cache[i]; 274 } 275 } 276 spin_unlock_bh(&queue->lock); 277 return ret; 278 } 279 280 int cw1200_queue_put(struct cw1200_queue *queue, 281 struct sk_buff *skb, 282 struct cw1200_txpriv *txpriv) 283 { 284 int ret = 0; 285 struct cw1200_queue_stats *stats = queue->stats; 286 287 if (txpriv->link_id >= queue->stats->map_capacity) 288 return -EINVAL; 289 290 spin_lock_bh(&queue->lock); 291 if (!WARN_ON(list_empty(&queue->free_pool))) { 292 struct cw1200_queue_item *item = list_first_entry( 293 &queue->free_pool, struct cw1200_queue_item, head); 294 BUG_ON(item->skb); 295 296 list_move_tail(&item->head, &queue->queue); 297 item->skb = skb; 298 item->txpriv = *txpriv; 299 item->generation = 0; 300 item->packet_id = cw1200_queue_mk_packet_id(queue->generation, 301 queue->queue_id, 302 item->generation, 303 item - queue->pool); 304 item->queue_timestamp = jiffies; 305 306 ++queue->num_queued; 307 ++queue->link_map_cache[txpriv->link_id]; 308 309 spin_lock_bh(&stats->lock); 310 ++stats->num_queued; 311 ++stats->link_map_cache[txpriv->link_id]; 312 spin_unlock_bh(&stats->lock); 313 314 /* TX may happen in parallel sometimes. 315 * Leave extra queue slots so we don't overflow. 316 */ 317 if (queue->overfull == false && 318 queue->num_queued >= 319 (queue->capacity - (num_present_cpus() - 1))) { 320 queue->overfull = true; 321 __cw1200_queue_lock(queue); 322 mod_timer(&queue->gc, jiffies); 323 } 324 } else { 325 ret = -ENOENT; 326 } 327 spin_unlock_bh(&queue->lock); 328 return ret; 329 } 330 331 int cw1200_queue_get(struct cw1200_queue *queue, 332 u32 link_id_map, 333 struct wsm_tx **tx, 334 struct ieee80211_tx_info **tx_info, 335 const struct cw1200_txpriv **txpriv) 336 { 337 int ret = -ENOENT; 338 struct cw1200_queue_item *item; 339 struct cw1200_queue_stats *stats = queue->stats; 340 bool wakeup_stats = false; 341 342 spin_lock_bh(&queue->lock); 343 list_for_each_entry(item, &queue->queue, head) { 344 if (link_id_map & BIT(item->txpriv.link_id)) { 345 ret = 0; 346 break; 347 } 348 } 349 350 if (!WARN_ON(ret)) { 351 *tx = (struct wsm_tx *)item->skb->data; 352 *tx_info = IEEE80211_SKB_CB(item->skb); 353 *txpriv = &item->txpriv; 354 (*tx)->packet_id = item->packet_id; 355 list_move_tail(&item->head, &queue->pending); 356 ++queue->num_pending; 357 --queue->link_map_cache[item->txpriv.link_id]; 358 item->xmit_timestamp = jiffies; 359 360 spin_lock_bh(&stats->lock); 361 --stats->num_queued; 362 if (!--stats->link_map_cache[item->txpriv.link_id]) 363 wakeup_stats = true; 364 spin_unlock_bh(&stats->lock); 365 } 366 spin_unlock_bh(&queue->lock); 367 if (wakeup_stats) 368 wake_up(&stats->wait_link_id_empty); 369 return ret; 370 } 371 372 int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id) 373 { 374 int ret = 0; 375 u8 queue_generation, queue_id, item_generation, item_id; 376 struct cw1200_queue_item *item; 377 struct cw1200_queue_stats *stats = queue->stats; 378 379 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, 380 &item_generation, &item_id); 381 382 item = &queue->pool[item_id]; 383 384 spin_lock_bh(&queue->lock); 385 BUG_ON(queue_id != queue->queue_id); 386 if (queue_generation != queue->generation) { 387 ret = -ENOENT; 388 } else if (item_id >= (unsigned) queue->capacity) { 389 WARN_ON(1); 390 ret = -EINVAL; 391 } else if (item->generation != item_generation) { 392 WARN_ON(1); 393 ret = -ENOENT; 394 } else { 395 --queue->num_pending; 396 ++queue->link_map_cache[item->txpriv.link_id]; 397 398 spin_lock_bh(&stats->lock); 399 ++stats->num_queued; 400 ++stats->link_map_cache[item->txpriv.link_id]; 401 spin_unlock_bh(&stats->lock); 402 403 item->generation = ++item_generation; 404 item->packet_id = cw1200_queue_mk_packet_id(queue_generation, 405 queue_id, 406 item_generation, 407 item_id); 408 list_move(&item->head, &queue->queue); 409 } 410 spin_unlock_bh(&queue->lock); 411 return ret; 412 } 413 414 int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id) 415 { 416 int ret = 0; 417 u8 queue_generation, queue_id, item_generation, item_id; 418 struct cw1200_queue_item *item; 419 struct cw1200_queue_stats *stats = queue->stats; 420 struct sk_buff *gc_skb = NULL; 421 struct cw1200_txpriv gc_txpriv; 422 423 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, 424 &item_generation, &item_id); 425 426 item = &queue->pool[item_id]; 427 428 spin_lock_bh(&queue->lock); 429 BUG_ON(queue_id != queue->queue_id); 430 if (queue_generation != queue->generation) { 431 ret = -ENOENT; 432 } else if (item_id >= (unsigned) queue->capacity) { 433 WARN_ON(1); 434 ret = -EINVAL; 435 } else if (item->generation != item_generation) { 436 WARN_ON(1); 437 ret = -ENOENT; 438 } else { 439 gc_txpriv = item->txpriv; 440 gc_skb = item->skb; 441 item->skb = NULL; 442 --queue->num_pending; 443 --queue->num_queued; 444 ++queue->num_sent; 445 ++item->generation; 446 /* Do not use list_move_tail here, but list_move: 447 * try to utilize cache row. 448 */ 449 list_move(&item->head, &queue->free_pool); 450 451 if (queue->overfull && 452 (queue->num_queued <= (queue->capacity >> 1))) { 453 queue->overfull = false; 454 __cw1200_queue_unlock(queue); 455 } 456 } 457 spin_unlock_bh(&queue->lock); 458 459 if (gc_skb) 460 stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv); 461 462 return ret; 463 } 464 465 int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, 466 struct sk_buff **skb, 467 const struct cw1200_txpriv **txpriv) 468 { 469 int ret = 0; 470 u8 queue_generation, queue_id, item_generation, item_id; 471 struct cw1200_queue_item *item; 472 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, 473 &item_generation, &item_id); 474 475 item = &queue->pool[item_id]; 476 477 spin_lock_bh(&queue->lock); 478 BUG_ON(queue_id != queue->queue_id); 479 if (queue_generation != queue->generation) { 480 ret = -ENOENT; 481 } else if (item_id >= (unsigned) queue->capacity) { 482 WARN_ON(1); 483 ret = -EINVAL; 484 } else if (item->generation != item_generation) { 485 WARN_ON(1); 486 ret = -ENOENT; 487 } else { 488 *skb = item->skb; 489 *txpriv = &item->txpriv; 490 } 491 spin_unlock_bh(&queue->lock); 492 return ret; 493 } 494 495 void cw1200_queue_lock(struct cw1200_queue *queue) 496 { 497 spin_lock_bh(&queue->lock); 498 __cw1200_queue_lock(queue); 499 spin_unlock_bh(&queue->lock); 500 } 501 502 void cw1200_queue_unlock(struct cw1200_queue *queue) 503 { 504 spin_lock_bh(&queue->lock); 505 __cw1200_queue_unlock(queue); 506 spin_unlock_bh(&queue->lock); 507 } 508 509 bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, 510 unsigned long *timestamp, 511 u32 pending_frame_id) 512 { 513 struct cw1200_queue_item *item; 514 bool ret; 515 516 spin_lock_bh(&queue->lock); 517 ret = !list_empty(&queue->pending); 518 if (ret) { 519 list_for_each_entry(item, &queue->pending, head) { 520 if (item->packet_id != pending_frame_id) 521 if (time_before(item->xmit_timestamp, 522 *timestamp)) 523 *timestamp = item->xmit_timestamp; 524 } 525 } 526 spin_unlock_bh(&queue->lock); 527 return ret; 528 } 529 530 bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats, 531 u32 link_id_map) 532 { 533 bool empty = true; 534 535 spin_lock_bh(&stats->lock); 536 if (link_id_map == (u32)-1) { 537 empty = stats->num_queued == 0; 538 } else { 539 int i; 540 for (i = 0; i < stats->map_capacity; ++i) { 541 if (link_id_map & BIT(i)) { 542 if (stats->link_map_cache[i]) { 543 empty = false; 544 break; 545 } 546 } 547 } 548 } 549 spin_unlock_bh(&stats->lock); 550 551 return empty; 552 } 553