1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Asynchronous refcounty things 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include <linux/closure.h> 10 #include <linux/debugfs.h> 11 #include <linux/export.h> 12 #include <linux/rcupdate.h> 13 #include <linux/seq_file.h> 14 #include <linux/sched/debug.h> 15 16 static inline void closure_put_after_sub(struct closure *cl, int flags) 17 { 18 int r = flags & CLOSURE_REMAINING_MASK; 19 20 if (WARN(flags & CLOSURE_GUARD_MASK, 21 "closure has guard bits set: %x (%u)", 22 flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r))) 23 r &= ~CLOSURE_GUARD_MASK; 24 25 if (!r) { 26 smp_acquire__after_ctrl_dep(); 27 28 WARN(flags & ~CLOSURE_DESTRUCTOR, 29 "closure ref hit 0 with incorrect flags set: %x (%u)", 30 flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags)); 31 32 cl->closure_get_happened = false; 33 34 if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) { 35 atomic_set(&cl->remaining, 36 CLOSURE_REMAINING_INITIALIZER); 37 closure_queue(cl); 38 } else { 39 struct closure *parent = cl->parent; 40 closure_fn *destructor = cl->fn; 41 42 closure_debug_destroy(cl); 43 44 if (destructor) 45 destructor(&cl->work); 46 47 if (parent) 48 closure_put(parent); 49 } 50 } 51 } 52 53 /* For clearing flags with the same atomic op as a put */ 54 void closure_sub(struct closure *cl, int v) 55 { 56 closure_put_after_sub(cl, atomic_sub_return_release(v, &cl->remaining)); 57 } 58 EXPORT_SYMBOL(closure_sub); 59 60 /* 61 * closure_put - decrement a closure's refcount 62 */ 63 void closure_put(struct closure *cl) 64 { 65 closure_put_after_sub(cl, atomic_dec_return_release(&cl->remaining)); 66 } 67 EXPORT_SYMBOL(closure_put); 68 69 /* 70 * closure_wake_up - wake up all closures on a wait list, without memory barrier 71 */ 72 void __closure_wake_up(struct closure_waitlist *wait_list) 73 { 74 struct llist_node *list; 75 struct closure *cl, *t; 76 struct llist_node *reverse = NULL; 77 78 list = llist_del_all(&wait_list->list); 79 80 /* We first reverse the list to preserve FIFO ordering and fairness */ 81 reverse = llist_reverse_order(list); 82 83 /* Then do the wakeups */ 84 llist_for_each_entry_safe(cl, t, reverse, list) { 85 closure_set_waiting(cl, 0); 86 closure_sub(cl, CLOSURE_WAITING + 1); 87 } 88 } 89 EXPORT_SYMBOL(__closure_wake_up); 90 91 /** 92 * closure_wait - add a closure to a waitlist 93 * @waitlist: will own a ref on @cl, which will be released when 94 * closure_wake_up() is called on @waitlist. 95 * @cl: closure pointer. 96 * 97 */ 98 bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl) 99 { 100 if (atomic_read(&cl->remaining) & CLOSURE_WAITING) 101 return false; 102 103 cl->closure_get_happened = true; 104 closure_set_waiting(cl, _RET_IP_); 105 atomic_add(CLOSURE_WAITING + 1, &cl->remaining); 106 llist_add(&cl->list, &waitlist->list); 107 108 return true; 109 } 110 EXPORT_SYMBOL(closure_wait); 111 112 struct closure_syncer { 113 struct task_struct *task; 114 int done; 115 }; 116 117 static CLOSURE_CALLBACK(closure_sync_fn) 118 { 119 struct closure *cl = container_of(ws, struct closure, work); 120 struct closure_syncer *s = cl->s; 121 struct task_struct *p; 122 123 rcu_read_lock(); 124 p = READ_ONCE(s->task); 125 s->done = 1; 126 wake_up_process(p); 127 rcu_read_unlock(); 128 } 129 130 void __sched __closure_sync(struct closure *cl) 131 { 132 struct closure_syncer s = { .task = current }; 133 134 cl->s = &s; 135 continue_at(cl, closure_sync_fn, NULL); 136 137 while (1) { 138 set_current_state(TASK_UNINTERRUPTIBLE); 139 if (s.done) 140 break; 141 schedule(); 142 } 143 144 __set_current_state(TASK_RUNNING); 145 } 146 EXPORT_SYMBOL(__closure_sync); 147 148 int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout) 149 { 150 struct closure_syncer s = { .task = current }; 151 int ret = 0; 152 153 cl->s = &s; 154 continue_at(cl, closure_sync_fn, NULL); 155 156 while (1) { 157 set_current_state(TASK_UNINTERRUPTIBLE); 158 if (s.done) 159 break; 160 if (!timeout) { 161 /* 162 * Carefully undo the continue_at() - but only if it 163 * hasn't completed, i.e. the final closure_put() hasn't 164 * happened yet: 165 */ 166 unsigned old, new, v = atomic_read(&cl->remaining); 167 do { 168 old = v; 169 if (!old || (old & CLOSURE_RUNNING)) 170 goto success; 171 172 new = old + CLOSURE_REMAINING_INITIALIZER; 173 } while ((v = atomic_cmpxchg(&cl->remaining, old, new)) != old); 174 ret = -ETIME; 175 } 176 177 timeout = schedule_timeout(timeout); 178 } 179 success: 180 __set_current_state(TASK_RUNNING); 181 return ret; 182 } 183 EXPORT_SYMBOL(__closure_sync_timeout); 184 185 #ifdef CONFIG_DEBUG_CLOSURES 186 187 static LIST_HEAD(closure_list); 188 static DEFINE_SPINLOCK(closure_list_lock); 189 190 void closure_debug_create(struct closure *cl) 191 { 192 unsigned long flags; 193 194 BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE); 195 cl->magic = CLOSURE_MAGIC_ALIVE; 196 197 spin_lock_irqsave(&closure_list_lock, flags); 198 list_add(&cl->all, &closure_list); 199 spin_unlock_irqrestore(&closure_list_lock, flags); 200 } 201 EXPORT_SYMBOL(closure_debug_create); 202 203 void closure_debug_destroy(struct closure *cl) 204 { 205 unsigned long flags; 206 207 BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE); 208 cl->magic = CLOSURE_MAGIC_DEAD; 209 210 spin_lock_irqsave(&closure_list_lock, flags); 211 list_del(&cl->all); 212 spin_unlock_irqrestore(&closure_list_lock, flags); 213 } 214 EXPORT_SYMBOL(closure_debug_destroy); 215 216 static int debug_show(struct seq_file *f, void *data) 217 { 218 struct closure *cl; 219 220 spin_lock_irq(&closure_list_lock); 221 222 list_for_each_entry(cl, &closure_list, all) { 223 int r = atomic_read(&cl->remaining); 224 225 seq_printf(f, "%p: %pS -> %pS p %p r %i ", 226 cl, (void *) cl->ip, cl->fn, cl->parent, 227 r & CLOSURE_REMAINING_MASK); 228 229 seq_printf(f, "%s%s\n", 230 test_bit(WORK_STRUCT_PENDING_BIT, 231 work_data_bits(&cl->work)) ? "Q" : "", 232 r & CLOSURE_RUNNING ? "R" : ""); 233 234 if (r & CLOSURE_WAITING) 235 seq_printf(f, " W %pS\n", 236 (void *) cl->waiting_on); 237 238 seq_puts(f, "\n"); 239 } 240 241 spin_unlock_irq(&closure_list_lock); 242 return 0; 243 } 244 245 DEFINE_SHOW_ATTRIBUTE(debug); 246 247 static int __init closure_debug_init(void) 248 { 249 debugfs_create_file("closures", 0400, NULL, NULL, &debug_fops); 250 return 0; 251 } 252 late_initcall(closure_debug_init) 253 254 #endif 255