1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2018 Mellanox Technologies */
3
4 #include <linux/mlx5/vport.h>
5 #include <linux/list.h>
6 #include "lib/devcom.h"
7 #include "lib/mlx5.h"
8 #include "mlx5_core.h"
9
10 static LIST_HEAD(devcom_dev_list);
11 static LIST_HEAD(devcom_comp_list);
12 /* protect device list */
13 static DEFINE_MUTEX(dev_list_lock);
14 /* protect component list */
15 static DEFINE_MUTEX(comp_list_lock);
16
17 #define devcom_for_each_component(iter) \
18 list_for_each_entry(iter, &devcom_comp_list, comp_list)
19
20 struct mlx5_devcom_dev {
21 struct list_head list;
22 struct mlx5_core_dev *dev;
23 struct kref ref;
24 };
25
26 struct mlx5_devcom_key {
27 u32 flags;
28 union mlx5_devcom_match_key key;
29 possible_net_t net;
30 };
31
32 struct mlx5_devcom_comp {
33 struct list_head comp_list;
34 enum mlx5_devcom_component id;
35 struct list_head comp_dev_list_head;
36 struct mlx5_devcom_key key;
37 mlx5_devcom_event_handler_t handler;
38 struct kref ref;
39 bool ready;
40 struct rw_semaphore sem;
41 struct lock_class_key lock_key;
42 };
43
44 struct mlx5_devcom_comp_dev {
45 struct list_head list;
46 struct mlx5_devcom_comp *comp;
47 struct mlx5_devcom_dev *devc;
48 void __rcu *data;
49 };
50
devcom_dev_exists(struct mlx5_core_dev * dev)51 static bool devcom_dev_exists(struct mlx5_core_dev *dev)
52 {
53 struct mlx5_devcom_dev *iter;
54
55 list_for_each_entry(iter, &devcom_dev_list, list)
56 if (iter->dev == dev)
57 return true;
58
59 return false;
60 }
61
62 static struct mlx5_devcom_dev *
mlx5_devcom_dev_alloc(struct mlx5_core_dev * dev)63 mlx5_devcom_dev_alloc(struct mlx5_core_dev *dev)
64 {
65 struct mlx5_devcom_dev *devc;
66
67 devc = kzalloc(sizeof(*devc), GFP_KERNEL);
68 if (!devc)
69 return NULL;
70
71 devc->dev = dev;
72 kref_init(&devc->ref);
73 return devc;
74 }
75
76 struct mlx5_devcom_dev *
mlx5_devcom_register_device(struct mlx5_core_dev * dev)77 mlx5_devcom_register_device(struct mlx5_core_dev *dev)
78 {
79 struct mlx5_devcom_dev *devc = NULL;
80
81 mutex_lock(&dev_list_lock);
82
83 if (devcom_dev_exists(dev)) {
84 mlx5_core_err(dev, "devcom device already exists");
85 goto out;
86 }
87
88 devc = mlx5_devcom_dev_alloc(dev);
89 if (!devc)
90 goto out;
91
92 list_add_tail(&devc->list, &devcom_dev_list);
93 out:
94 mutex_unlock(&dev_list_lock);
95 return devc;
96 }
97
98 static void
mlx5_devcom_dev_release(struct kref * ref)99 mlx5_devcom_dev_release(struct kref *ref)
100 {
101 struct mlx5_devcom_dev *devc = container_of(ref, struct mlx5_devcom_dev, ref);
102
103 mutex_lock(&dev_list_lock);
104 list_del(&devc->list);
105 mutex_unlock(&dev_list_lock);
106 kfree(devc);
107 }
108
mlx5_devcom_unregister_device(struct mlx5_devcom_dev * devc)109 void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc)
110 {
111 if (!devc)
112 return;
113
114 kref_put(&devc->ref, mlx5_devcom_dev_release);
115 }
116
117 static struct mlx5_devcom_comp *
mlx5_devcom_comp_alloc(u64 id,const struct mlx5_devcom_match_attr * attr,mlx5_devcom_event_handler_t handler)118 mlx5_devcom_comp_alloc(u64 id, const struct mlx5_devcom_match_attr *attr,
119 mlx5_devcom_event_handler_t handler)
120 {
121 struct mlx5_devcom_comp *comp;
122
123 comp = kzalloc(sizeof(*comp), GFP_KERNEL);
124 if (!comp)
125 return NULL;
126
127 comp->id = id;
128 comp->key.key = attr->key;
129 comp->key.flags = attr->flags;
130 if (attr->flags & MLX5_DEVCOM_MATCH_FLAGS_NS)
131 write_pnet(&comp->key.net, attr->net);
132 comp->handler = handler;
133 init_rwsem(&comp->sem);
134 lockdep_register_key(&comp->lock_key);
135 lockdep_set_class(&comp->sem, &comp->lock_key);
136 kref_init(&comp->ref);
137 INIT_LIST_HEAD(&comp->comp_dev_list_head);
138
139 return comp;
140 }
141
142 static void
mlx5_devcom_comp_release(struct kref * ref)143 mlx5_devcom_comp_release(struct kref *ref)
144 {
145 struct mlx5_devcom_comp *comp = container_of(ref, struct mlx5_devcom_comp, ref);
146
147 mutex_lock(&comp_list_lock);
148 list_del(&comp->comp_list);
149 mutex_unlock(&comp_list_lock);
150 lockdep_unregister_key(&comp->lock_key);
151 kfree(comp);
152 }
153
154 static struct mlx5_devcom_comp_dev *
devcom_alloc_comp_dev(struct mlx5_devcom_dev * devc,struct mlx5_devcom_comp * comp,void * data)155 devcom_alloc_comp_dev(struct mlx5_devcom_dev *devc,
156 struct mlx5_devcom_comp *comp,
157 void *data)
158 {
159 struct mlx5_devcom_comp_dev *devcom;
160
161 devcom = kzalloc(sizeof(*devcom), GFP_KERNEL);
162 if (!devcom)
163 return NULL;
164
165 kref_get(&devc->ref);
166 devcom->devc = devc;
167 devcom->comp = comp;
168 rcu_assign_pointer(devcom->data, data);
169
170 down_write(&comp->sem);
171 list_add_tail(&devcom->list, &comp->comp_dev_list_head);
172 up_write(&comp->sem);
173
174 return devcom;
175 }
176
177 static void
devcom_free_comp_dev(struct mlx5_devcom_comp_dev * devcom)178 devcom_free_comp_dev(struct mlx5_devcom_comp_dev *devcom)
179 {
180 struct mlx5_devcom_comp *comp = devcom->comp;
181
182 down_write(&comp->sem);
183 list_del(&devcom->list);
184 up_write(&comp->sem);
185
186 kref_put(&devcom->devc->ref, mlx5_devcom_dev_release);
187 kfree(devcom);
188 kref_put(&comp->ref, mlx5_devcom_comp_release);
189 }
190
191 static bool
devcom_component_equal(struct mlx5_devcom_comp * devcom,enum mlx5_devcom_component id,const struct mlx5_devcom_match_attr * attr)192 devcom_component_equal(struct mlx5_devcom_comp *devcom,
193 enum mlx5_devcom_component id,
194 const struct mlx5_devcom_match_attr *attr)
195 {
196 if (devcom->id != id)
197 return false;
198
199 if (devcom->key.flags != attr->flags)
200 return false;
201
202 if (memcmp(&devcom->key.key, &attr->key, sizeof(devcom->key.key)))
203 return false;
204
205 if (devcom->key.flags & MLX5_DEVCOM_MATCH_FLAGS_NS &&
206 !net_eq(read_pnet(&devcom->key.net), attr->net))
207 return false;
208
209 return true;
210 }
211
212 static struct mlx5_devcom_comp *
devcom_component_get(struct mlx5_devcom_dev * devc,enum mlx5_devcom_component id,const struct mlx5_devcom_match_attr * attr,mlx5_devcom_event_handler_t handler)213 devcom_component_get(struct mlx5_devcom_dev *devc,
214 enum mlx5_devcom_component id,
215 const struct mlx5_devcom_match_attr *attr,
216 mlx5_devcom_event_handler_t handler)
217 {
218 struct mlx5_devcom_comp *comp;
219
220 devcom_for_each_component(comp) {
221 if (devcom_component_equal(comp, id, attr)) {
222 if (handler == comp->handler) {
223 kref_get(&comp->ref);
224 return comp;
225 }
226
227 mlx5_core_err(devc->dev,
228 "Cannot register existing devcom component with different handler\n");
229 return ERR_PTR(-EINVAL);
230 }
231 }
232
233 return NULL;
234 }
235
236 struct mlx5_devcom_comp_dev *
mlx5_devcom_register_component(struct mlx5_devcom_dev * devc,enum mlx5_devcom_component id,const struct mlx5_devcom_match_attr * attr,mlx5_devcom_event_handler_t handler,void * data)237 mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
238 enum mlx5_devcom_component id,
239 const struct mlx5_devcom_match_attr *attr,
240 mlx5_devcom_event_handler_t handler,
241 void *data)
242 {
243 struct mlx5_devcom_comp_dev *devcom = NULL;
244 struct mlx5_devcom_comp *comp;
245
246 if (!devc)
247 return NULL;
248
249 mutex_lock(&comp_list_lock);
250 comp = devcom_component_get(devc, id, attr, handler);
251 if (IS_ERR(comp))
252 goto out_unlock;
253
254 if (!comp) {
255 comp = mlx5_devcom_comp_alloc(id, attr, handler);
256 if (!comp)
257 goto out_unlock;
258
259 list_add_tail(&comp->comp_list, &devcom_comp_list);
260 }
261 mutex_unlock(&comp_list_lock);
262
263 devcom = devcom_alloc_comp_dev(devc, comp, data);
264 if (!devcom)
265 kref_put(&comp->ref, mlx5_devcom_comp_release);
266
267 return devcom;
268
269 out_unlock:
270 mutex_unlock(&comp_list_lock);
271 return devcom;
272 }
273
mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev * devcom)274 void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom)
275 {
276 if (!devcom)
277 return;
278
279 devcom_free_comp_dev(devcom);
280 }
281
mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev * devcom)282 int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom)
283 {
284 struct mlx5_devcom_comp *comp = devcom->comp;
285
286 return kref_read(&comp->ref);
287 }
288
mlx5_devcom_send_event(struct mlx5_devcom_comp_dev * devcom,int event,int rollback_event,void * event_data)289 int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
290 int event, int rollback_event,
291 void *event_data)
292 {
293 struct mlx5_devcom_comp_dev *pos;
294 struct mlx5_devcom_comp *comp;
295 int err = 0;
296 void *data;
297
298 if (!devcom)
299 return -ENODEV;
300
301 comp = devcom->comp;
302 down_write(&comp->sem);
303 list_for_each_entry(pos, &comp->comp_dev_list_head, list) {
304 data = rcu_dereference_protected(pos->data, lockdep_is_held(&comp->sem));
305
306 if (pos != devcom && data) {
307 err = comp->handler(event, data, event_data);
308 if (err)
309 goto rollback;
310 }
311 }
312
313 up_write(&comp->sem);
314 return 0;
315
316 rollback:
317 if (list_entry_is_head(pos, &comp->comp_dev_list_head, list))
318 goto out;
319 pos = list_prev_entry(pos, list);
320 list_for_each_entry_from_reverse(pos, &comp->comp_dev_list_head, list) {
321 data = rcu_dereference_protected(pos->data, lockdep_is_held(&comp->sem));
322
323 if (pos != devcom && data)
324 comp->handler(rollback_event, data, event_data);
325 }
326 out:
327 up_write(&comp->sem);
328 return err;
329 }
330
mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev * devcom,bool ready)331 void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready)
332 {
333 WARN_ON(!rwsem_is_locked(&devcom->comp->sem));
334
335 WRITE_ONCE(devcom->comp->ready, ready);
336 }
337
mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev * devcom)338 bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom)
339 {
340 if (!devcom)
341 return false;
342
343 return READ_ONCE(devcom->comp->ready);
344 }
345
mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev * devcom)346 bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom)
347 {
348 struct mlx5_devcom_comp *comp;
349
350 if (!devcom)
351 return false;
352
353 comp = devcom->comp;
354 down_read(&comp->sem);
355 if (!READ_ONCE(comp->ready)) {
356 up_read(&comp->sem);
357 return false;
358 }
359
360 return true;
361 }
362
mlx5_devcom_for_each_peer_end(struct mlx5_devcom_comp_dev * devcom)363 void mlx5_devcom_for_each_peer_end(struct mlx5_devcom_comp_dev *devcom)
364 {
365 up_read(&devcom->comp->sem);
366 }
367
mlx5_devcom_get_next_peer_data(struct mlx5_devcom_comp_dev * devcom,struct mlx5_devcom_comp_dev ** pos)368 void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom_comp_dev *devcom,
369 struct mlx5_devcom_comp_dev **pos)
370 {
371 struct mlx5_devcom_comp *comp = devcom->comp;
372 struct mlx5_devcom_comp_dev *tmp;
373 void *data;
374
375 tmp = list_prepare_entry(*pos, &comp->comp_dev_list_head, list);
376
377 list_for_each_entry_continue(tmp, &comp->comp_dev_list_head, list) {
378 if (tmp != devcom) {
379 data = rcu_dereference_protected(tmp->data, lockdep_is_held(&comp->sem));
380 if (data)
381 break;
382 }
383 }
384
385 if (list_entry_is_head(tmp, &comp->comp_dev_list_head, list))
386 return NULL;
387
388 *pos = tmp;
389 return data;
390 }
391
mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev * devcom,struct mlx5_devcom_comp_dev ** pos)392 void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
393 struct mlx5_devcom_comp_dev **pos)
394 {
395 struct mlx5_devcom_comp *comp = devcom->comp;
396 struct mlx5_devcom_comp_dev *tmp;
397 void *data;
398
399 tmp = list_prepare_entry(*pos, &comp->comp_dev_list_head, list);
400
401 list_for_each_entry_continue(tmp, &comp->comp_dev_list_head, list) {
402 if (tmp != devcom) {
403 /* This can change concurrently, however 'data' pointer will remain
404 * valid for the duration of RCU read section.
405 */
406 if (!READ_ONCE(comp->ready))
407 return NULL;
408 data = rcu_dereference(tmp->data);
409 if (data)
410 break;
411 }
412 }
413
414 if (list_entry_is_head(tmp, &comp->comp_dev_list_head, list))
415 return NULL;
416
417 *pos = tmp;
418 return data;
419 }
420
mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev * devcom)421 void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom)
422 {
423 if (!devcom)
424 return;
425 down_write(&devcom->comp->sem);
426 }
427
mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev * devcom)428 void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom)
429 {
430 if (!devcom)
431 return;
432 up_write(&devcom->comp->sem);
433 }
434
mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev * devcom)435 int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom)
436 {
437 if (!devcom)
438 return 0;
439 return down_write_trylock(&devcom->comp->sem);
440 }
441