1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2016 Facebook 3 */ 4 #include "percpu_freelist.h" 5 6 int pcpu_freelist_init(struct pcpu_freelist *s) 7 { 8 int cpu; 9 10 s->freelist = alloc_percpu(struct pcpu_freelist_head); 11 if (!s->freelist) 12 return -ENOMEM; 13 14 for_each_possible_cpu(cpu) { 15 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu); 16 17 raw_spin_lock_init(&head->lock); 18 head->first = NULL; 19 } 20 raw_spin_lock_init(&s->extralist.lock); 21 s->extralist.first = NULL; 22 return 0; 23 } 24 25 void pcpu_freelist_destroy(struct pcpu_freelist *s) 26 { 27 free_percpu(s->freelist); 28 } 29 30 static inline void pcpu_freelist_push_node(struct pcpu_freelist_head *head, 31 struct pcpu_freelist_node *node) 32 { 33 node->next = head->first; 34 WRITE_ONCE(head->first, node); 35 } 36 37 static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head, 38 struct pcpu_freelist_node *node) 39 { 40 raw_spin_lock(&head->lock); 41 pcpu_freelist_push_node(head, node); 42 raw_spin_unlock(&head->lock); 43 } 44 45 static inline bool pcpu_freelist_try_push_extra(struct pcpu_freelist *s, 46 struct pcpu_freelist_node *node) 47 { 48 if (!raw_spin_trylock(&s->extralist.lock)) 49 return false; 50 51 pcpu_freelist_push_node(&s->extralist, node); 52 raw_spin_unlock(&s->extralist.lock); 53 return true; 54 } 55 56 static inline void ___pcpu_freelist_push_nmi(struct pcpu_freelist *s, 57 struct pcpu_freelist_node *node) 58 { 59 int cpu, orig_cpu; 60 61 orig_cpu = raw_smp_processor_id(); 62 while (1) { 63 for_each_cpu_wrap(cpu, cpu_possible_mask, orig_cpu) { 64 struct pcpu_freelist_head *head; 65 66 head = per_cpu_ptr(s->freelist, cpu); 67 if (raw_spin_trylock(&head->lock)) { 68 pcpu_freelist_push_node(head, node); 69 raw_spin_unlock(&head->lock); 70 return; 71 } 72 } 73 74 /* cannot lock any per cpu lock, try extralist */ 75 if (pcpu_freelist_try_push_extra(s, node)) 76 return; 77 } 78 } 79 80 void __pcpu_freelist_push(struct pcpu_freelist *s, 81 struct pcpu_freelist_node *node) 82 { 83 if (in_nmi()) 84 ___pcpu_freelist_push_nmi(s, node); 85 else 86 ___pcpu_freelist_push(this_cpu_ptr(s->freelist), node); 87 } 88 89 void pcpu_freelist_push(struct pcpu_freelist *s, 90 struct pcpu_freelist_node *node) 91 { 92 unsigned long flags; 93 94 local_irq_save(flags); 95 __pcpu_freelist_push(s, node); 96 local_irq_restore(flags); 97 } 98 99 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, 100 u32 nr_elems) 101 { 102 struct pcpu_freelist_head *head; 103 int i, cpu, pcpu_entries; 104 105 pcpu_entries = nr_elems / num_possible_cpus() + 1; 106 i = 0; 107 108 for_each_possible_cpu(cpu) { 109 again: 110 head = per_cpu_ptr(s->freelist, cpu); 111 /* No locking required as this is not visible yet. */ 112 pcpu_freelist_push_node(head, buf); 113 i++; 114 buf += elem_size; 115 if (i == nr_elems) 116 break; 117 if (i % pcpu_entries) 118 goto again; 119 } 120 } 121 122 static struct pcpu_freelist_node *___pcpu_freelist_pop(struct pcpu_freelist *s) 123 { 124 struct pcpu_freelist_head *head; 125 struct pcpu_freelist_node *node; 126 int cpu; 127 128 for_each_cpu_wrap(cpu, cpu_possible_mask, raw_smp_processor_id()) { 129 head = per_cpu_ptr(s->freelist, cpu); 130 if (!READ_ONCE(head->first)) 131 continue; 132 raw_spin_lock(&head->lock); 133 node = head->first; 134 if (node) { 135 WRITE_ONCE(head->first, node->next); 136 raw_spin_unlock(&head->lock); 137 return node; 138 } 139 raw_spin_unlock(&head->lock); 140 } 141 142 /* per cpu lists are all empty, try extralist */ 143 if (!READ_ONCE(s->extralist.first)) 144 return NULL; 145 raw_spin_lock(&s->extralist.lock); 146 node = s->extralist.first; 147 if (node) 148 WRITE_ONCE(s->extralist.first, node->next); 149 raw_spin_unlock(&s->extralist.lock); 150 return node; 151 } 152 153 static struct pcpu_freelist_node * 154 ___pcpu_freelist_pop_nmi(struct pcpu_freelist *s) 155 { 156 struct pcpu_freelist_head *head; 157 struct pcpu_freelist_node *node; 158 int cpu; 159 160 for_each_cpu_wrap(cpu, cpu_possible_mask, raw_smp_processor_id()) { 161 head = per_cpu_ptr(s->freelist, cpu); 162 if (!READ_ONCE(head->first)) 163 continue; 164 if (raw_spin_trylock(&head->lock)) { 165 node = head->first; 166 if (node) { 167 WRITE_ONCE(head->first, node->next); 168 raw_spin_unlock(&head->lock); 169 return node; 170 } 171 raw_spin_unlock(&head->lock); 172 } 173 } 174 175 /* cannot pop from per cpu lists, try extralist */ 176 if (!READ_ONCE(s->extralist.first) || !raw_spin_trylock(&s->extralist.lock)) 177 return NULL; 178 node = s->extralist.first; 179 if (node) 180 WRITE_ONCE(s->extralist.first, node->next); 181 raw_spin_unlock(&s->extralist.lock); 182 return node; 183 } 184 185 struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s) 186 { 187 if (in_nmi()) 188 return ___pcpu_freelist_pop_nmi(s); 189 return ___pcpu_freelist_pop(s); 190 } 191 192 struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) 193 { 194 struct pcpu_freelist_node *ret; 195 unsigned long flags; 196 197 local_irq_save(flags); 198 ret = __pcpu_freelist_pop(s); 199 local_irq_restore(flags); 200 return ret; 201 } 202