1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2018 Mellanox Technologies */
3
4 #include <linux/jhash.h>
5 #include <linux/slab.h>
6 #include <linux/xarray.h>
7 #include <linux/hashtable.h>
8 #include <linux/refcount.h>
9 #include <linux/mlx5/driver.h>
10
11 #include "mapping.h"
12
13 #define MAPPING_GRACE_PERIOD 2000
14
15 static LIST_HEAD(shared_ctx_list);
16 static DEFINE_MUTEX(shared_ctx_lock);
17
18 struct mapping_ctx {
19 struct xarray xarray;
20 DECLARE_HASHTABLE(ht, 8);
21 struct mutex lock; /* Guards hashtable and xarray */
22 unsigned long max_id;
23 size_t data_size;
24 bool delayed_removal;
25 struct delayed_work dwork;
26 struct list_head pending_list;
27 spinlock_t pending_list_lock; /* Guards pending list */
28 u8 id[MLX5_SW_IMAGE_GUID_MAX_BYTES];
29 u8 id_len;
30 u8 type;
31 struct list_head list;
32 refcount_t refcount;
33 };
34
35 struct mapping_item {
36 struct rcu_head rcu;
37 struct list_head list;
38 unsigned long timeout;
39 struct hlist_node node;
40 int cnt;
41 u32 id;
42 char data[];
43 };
44
mapping_add(struct mapping_ctx * ctx,void * data,u32 * id)45 int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id)
46 {
47 struct mapping_item *mi;
48 int err = -ENOMEM;
49 u32 hash_key;
50
51 mutex_lock(&ctx->lock);
52
53 hash_key = jhash(data, ctx->data_size, 0);
54 hash_for_each_possible(ctx->ht, mi, node, hash_key) {
55 if (!memcmp(data, mi->data, ctx->data_size))
56 goto attach;
57 }
58
59 mi = kzalloc(sizeof(*mi) + ctx->data_size, GFP_KERNEL);
60 if (!mi)
61 goto err_alloc;
62
63 memcpy(mi->data, data, ctx->data_size);
64 hash_add(ctx->ht, &mi->node, hash_key);
65
66 err = xa_alloc(&ctx->xarray, &mi->id, mi, XA_LIMIT(1, ctx->max_id),
67 GFP_KERNEL);
68 if (err)
69 goto err_assign;
70 attach:
71 ++mi->cnt;
72 *id = mi->id;
73
74 mutex_unlock(&ctx->lock);
75
76 return 0;
77
78 err_assign:
79 hash_del(&mi->node);
80 kfree(mi);
81 err_alloc:
82 mutex_unlock(&ctx->lock);
83
84 return err;
85 }
86
mapping_remove_and_free(struct mapping_ctx * ctx,struct mapping_item * mi)87 static void mapping_remove_and_free(struct mapping_ctx *ctx,
88 struct mapping_item *mi)
89 {
90 xa_erase(&ctx->xarray, mi->id);
91 kfree_rcu(mi, rcu);
92 }
93
mapping_free_item(struct mapping_ctx * ctx,struct mapping_item * mi)94 static void mapping_free_item(struct mapping_ctx *ctx,
95 struct mapping_item *mi)
96 {
97 if (!ctx->delayed_removal) {
98 mapping_remove_and_free(ctx, mi);
99 return;
100 }
101
102 mi->timeout = jiffies + msecs_to_jiffies(MAPPING_GRACE_PERIOD);
103
104 spin_lock(&ctx->pending_list_lock);
105 list_add_tail(&mi->list, &ctx->pending_list);
106 spin_unlock(&ctx->pending_list_lock);
107
108 schedule_delayed_work(&ctx->dwork, MAPPING_GRACE_PERIOD);
109 }
110
mapping_remove(struct mapping_ctx * ctx,u32 id)111 int mapping_remove(struct mapping_ctx *ctx, u32 id)
112 {
113 unsigned long index = id;
114 struct mapping_item *mi;
115 int err = -ENOENT;
116
117 mutex_lock(&ctx->lock);
118 mi = xa_load(&ctx->xarray, index);
119 if (!mi)
120 goto out;
121 err = 0;
122
123 if (--mi->cnt > 0)
124 goto out;
125
126 hash_del(&mi->node);
127 mapping_free_item(ctx, mi);
128 out:
129 mutex_unlock(&ctx->lock);
130
131 return err;
132 }
133
mapping_find(struct mapping_ctx * ctx,u32 id,void * data)134 int mapping_find(struct mapping_ctx *ctx, u32 id, void *data)
135 {
136 unsigned long index = id;
137 struct mapping_item *mi;
138 int err = -ENOENT;
139
140 rcu_read_lock();
141 mi = xa_load(&ctx->xarray, index);
142 if (!mi)
143 goto err_find;
144
145 memcpy(data, mi->data, ctx->data_size);
146 err = 0;
147
148 err_find:
149 rcu_read_unlock();
150 return err;
151 }
152
153 static void
mapping_remove_and_free_list(struct mapping_ctx * ctx,struct list_head * list)154 mapping_remove_and_free_list(struct mapping_ctx *ctx, struct list_head *list)
155 {
156 struct mapping_item *mi;
157
158 list_for_each_entry(mi, list, list)
159 mapping_remove_and_free(ctx, mi);
160 }
161
mapping_work_handler(struct work_struct * work)162 static void mapping_work_handler(struct work_struct *work)
163 {
164 unsigned long min_timeout = 0, now = jiffies;
165 struct mapping_item *mi, *next;
166 LIST_HEAD(pending_items);
167 struct mapping_ctx *ctx;
168
169 ctx = container_of(work, struct mapping_ctx, dwork.work);
170
171 spin_lock(&ctx->pending_list_lock);
172 list_for_each_entry_safe(mi, next, &ctx->pending_list, list) {
173 if (time_after(now, mi->timeout))
174 list_move(&mi->list, &pending_items);
175 else if (!min_timeout ||
176 time_before(mi->timeout, min_timeout))
177 min_timeout = mi->timeout;
178 }
179 spin_unlock(&ctx->pending_list_lock);
180
181 mapping_remove_and_free_list(ctx, &pending_items);
182
183 if (min_timeout)
184 schedule_delayed_work(&ctx->dwork, abs(min_timeout - now));
185 }
186
mapping_flush_work(struct mapping_ctx * ctx)187 static void mapping_flush_work(struct mapping_ctx *ctx)
188 {
189 if (!ctx->delayed_removal)
190 return;
191
192 cancel_delayed_work_sync(&ctx->dwork);
193 mapping_remove_and_free_list(ctx, &ctx->pending_list);
194 }
195
196 struct mapping_ctx *
mapping_create(size_t data_size,u32 max_id,bool delayed_removal)197 mapping_create(size_t data_size, u32 max_id, bool delayed_removal)
198 {
199 struct mapping_ctx *ctx;
200
201 ctx = kzalloc_obj(*ctx);
202 if (!ctx)
203 return ERR_PTR(-ENOMEM);
204
205 ctx->max_id = max_id ? max_id : UINT_MAX;
206 ctx->data_size = data_size;
207
208 if (delayed_removal) {
209 INIT_DELAYED_WORK(&ctx->dwork, mapping_work_handler);
210 INIT_LIST_HEAD(&ctx->pending_list);
211 spin_lock_init(&ctx->pending_list_lock);
212 ctx->delayed_removal = true;
213 }
214
215 mutex_init(&ctx->lock);
216 xa_init_flags(&ctx->xarray, XA_FLAGS_ALLOC1);
217
218 refcount_set(&ctx->refcount, 1);
219 INIT_LIST_HEAD(&ctx->list);
220
221 return ctx;
222 }
223
224 struct mapping_ctx *
mapping_create_for_id(u8 * id,u8 id_len,u8 type,size_t data_size,u32 max_id,bool delayed_removal)225 mapping_create_for_id(u8 *id, u8 id_len, u8 type, size_t data_size, u32 max_id,
226 bool delayed_removal)
227 {
228 struct mapping_ctx *ctx;
229
230 mutex_lock(&shared_ctx_lock);
231 list_for_each_entry(ctx, &shared_ctx_list, list) {
232 if (ctx->type == type && ctx->id_len == id_len &&
233 !memcmp(id, ctx->id, id_len)) {
234 if (refcount_inc_not_zero(&ctx->refcount))
235 goto unlock;
236 break;
237 }
238 }
239
240 ctx = mapping_create(data_size, max_id, delayed_removal);
241 if (IS_ERR(ctx))
242 goto unlock;
243
244 memcpy(ctx->id, id, id_len);
245 ctx->id_len = id_len;
246 ctx->type = type;
247 list_add(&ctx->list, &shared_ctx_list);
248
249 unlock:
250 mutex_unlock(&shared_ctx_lock);
251 return ctx;
252 }
253
mapping_destroy(struct mapping_ctx * ctx)254 void mapping_destroy(struct mapping_ctx *ctx)
255 {
256 if (!refcount_dec_and_test(&ctx->refcount))
257 return;
258
259 mutex_lock(&shared_ctx_lock);
260 list_del(&ctx->list);
261 mutex_unlock(&shared_ctx_lock);
262
263 mapping_flush_work(ctx);
264 xa_destroy(&ctx->xarray);
265 mutex_destroy(&ctx->lock);
266
267 kfree(ctx);
268 }
269