1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Light-weight single-linked queue. 4 * 5 * Entries are enqueued to the head of an llist, with no blocking. 6 * This can happen in any context. 7 * 8 * Entries are dequeued using a spinlock to protect against multiple 9 * access. The llist is staged in reverse order, and refreshed 10 * from the llist when it exhausts. 11 * 12 * This is particularly suitable when work items are queued in BH or 13 * IRQ context, and where work items are handled one at a time by 14 * dedicated threads. 15 */ 16 #include <linux/rcupdate.h> 17 #include <linux/lwq.h> 18 19 struct llist_node *__lwq_dequeue(struct lwq *q) 20 { 21 struct llist_node *this; 22 23 if (lwq_empty(q)) 24 return NULL; 25 spin_lock(&q->lock); 26 this = q->ready; 27 if (!this && !llist_empty(&q->new)) { 28 /* ensure queue doesn't appear transiently lwq_empty */ 29 smp_store_release(&q->ready, (void *)1); 30 this = llist_reverse_order(llist_del_all(&q->new)); 31 if (!this) 32 q->ready = NULL; 33 } 34 if (this) 35 q->ready = llist_next(this); 36 spin_unlock(&q->lock); 37 return this; 38 } 39 EXPORT_SYMBOL_GPL(__lwq_dequeue); 40 41 /** 42 * lwq_dequeue_all - dequeue all currently enqueued objects 43 * @q: the queue to dequeue from 44 * 45 * Remove and return a linked list of llist_nodes of all the objects that were 46 * in the queue. The first on the list will be the object that was least 47 * recently enqueued. 48 */ 49 struct llist_node *lwq_dequeue_all(struct lwq *q) 50 { 51 struct llist_node *r, *t, **ep; 52 53 if (lwq_empty(q)) 54 return NULL; 55 56 spin_lock(&q->lock); 57 r = q->ready; 58 q->ready = NULL; 59 t = llist_del_all(&q->new); 60 spin_unlock(&q->lock); 61 ep = &r; 62 while (*ep) 63 ep = &(*ep)->next; 64 *ep = llist_reverse_order(t); 65 return r; 66 } 67 EXPORT_SYMBOL_GPL(lwq_dequeue_all); 68 69 #if IS_ENABLED(CONFIG_LWQ_TEST) 70 71 #include <linux/module.h> 72 #include <linux/slab.h> 73 #include <linux/wait_bit.h> 74 #include <linux/kthread.h> 75 #include <linux/delay.h> 76 struct tnode { 77 struct lwq_node n; 78 int i; 79 int c; 80 }; 81 82 static int lwq_exercise(void *qv) 83 { 84 struct lwq *q = qv; 85 int cnt; 86 struct tnode *t; 87 88 for (cnt = 0; cnt < 10000; cnt++) { 89 wait_var_event(q, (t = lwq_dequeue(q, struct tnode, n)) != NULL); 90 t->c++; 91 if (lwq_enqueue(&t->n, q)) 92 wake_up_var(q); 93 } 94 while (!kthread_should_stop()) 95 schedule_timeout_idle(1); 96 return 0; 97 } 98 99 static int lwq_test(void) 100 { 101 int i; 102 struct lwq q; 103 struct llist_node *l, **t1, *t2; 104 struct tnode *t; 105 struct task_struct *threads[8]; 106 107 printk(KERN_INFO "testing lwq....\n"); 108 lwq_init(&q); 109 printk(KERN_INFO " lwq: run some threads\n"); 110 for (i = 0; i < ARRAY_SIZE(threads); i++) 111 threads[i] = kthread_run(lwq_exercise, &q, "lwq-test-%d", i); 112 for (i = 0; i < 100; i++) { 113 t = kmalloc(sizeof(*t), GFP_KERNEL); 114 if (!t) 115 break; 116 t->i = i; 117 t->c = 0; 118 if (lwq_enqueue(&t->n, &q)) 119 wake_up_var(&q); 120 } 121 /* wait for threads to exit */ 122 for (i = 0; i < ARRAY_SIZE(threads); i++) 123 if (!IS_ERR_OR_NULL(threads[i])) 124 kthread_stop(threads[i]); 125 printk(KERN_INFO " lwq: dequeue first 50:"); 126 for (i = 0; i < 50 ; i++) { 127 if (i && (i % 10) == 0) { 128 printk(KERN_CONT "\n"); 129 printk(KERN_INFO " lwq: ... "); 130 } 131 t = lwq_dequeue(&q, struct tnode, n); 132 if (t) 133 printk(KERN_CONT " %d(%d)", t->i, t->c); 134 kfree(t); 135 } 136 printk(KERN_CONT "\n"); 137 l = lwq_dequeue_all(&q); 138 printk(KERN_INFO " lwq: delete the multiples of 3 (test lwq_for_each_safe())\n"); 139 lwq_for_each_safe(t, t1, t2, &l, n) { 140 if ((t->i % 3) == 0) { 141 t->i = -1; 142 kfree(t); 143 t = NULL; 144 } 145 } 146 if (l) 147 lwq_enqueue_batch(l, &q); 148 printk(KERN_INFO " lwq: dequeue remaining:"); 149 while ((t = lwq_dequeue(&q, struct tnode, n)) != NULL) { 150 printk(KERN_CONT " %d", t->i); 151 kfree(t); 152 } 153 printk(KERN_CONT "\n"); 154 return 0; 155 } 156 157 module_init(lwq_test); 158 #endif /* CONFIG_LWQ_TEST*/ 159