1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 */
6
7 #include <net/genetlink.h>
8 #define CREATE_TRACE_POINTS
9 #include <trace/events/devlink.h>
10
11 #include "devl_internal.h"
12
13 EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg);
14 EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr);
15 EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_trap_report);
16
17 DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
18
devlinks_xa_get(unsigned long index)19 static struct devlink *devlinks_xa_get(unsigned long index)
20 {
21 struct devlink *devlink;
22
23 rcu_read_lock();
24 devlink = xa_find(&devlinks, &index, index, DEVLINK_REGISTERED);
25 if (!devlink || !devlink_try_get(devlink))
26 devlink = NULL;
27 rcu_read_unlock();
28 return devlink;
29 }
30
31 /* devlink_rels xarray contains 1:1 relationships between
32 * devlink object and related nested devlink instance.
33 * The xarray index is used to get the nested object from
34 * the nested-in object code.
35 */
36 static DEFINE_XARRAY_FLAGS(devlink_rels, XA_FLAGS_ALLOC1);
37
38 #define DEVLINK_REL_IN_USE XA_MARK_0
39
40 struct devlink_rel {
41 u32 index;
42 refcount_t refcount;
43 u32 devlink_index;
44 struct {
45 u32 devlink_index;
46 u32 obj_index;
47 devlink_rel_notify_cb_t *notify_cb;
48 devlink_rel_cleanup_cb_t *cleanup_cb;
49 struct delayed_work notify_work;
50 } nested_in;
51 };
52
devlink_rel_free(struct devlink_rel * rel)53 static void devlink_rel_free(struct devlink_rel *rel)
54 {
55 xa_erase(&devlink_rels, rel->index);
56 kfree(rel);
57 }
58
__devlink_rel_get(struct devlink_rel * rel)59 static void __devlink_rel_get(struct devlink_rel *rel)
60 {
61 refcount_inc(&rel->refcount);
62 }
63
__devlink_rel_put(struct devlink_rel * rel)64 static void __devlink_rel_put(struct devlink_rel *rel)
65 {
66 if (refcount_dec_and_test(&rel->refcount))
67 devlink_rel_free(rel);
68 }
69
devlink_rel_nested_in_notify_work(struct work_struct * work)70 static void devlink_rel_nested_in_notify_work(struct work_struct *work)
71 {
72 struct devlink_rel *rel = container_of(work, struct devlink_rel,
73 nested_in.notify_work.work);
74 struct devlink *devlink;
75
76 devlink = devlinks_xa_get(rel->nested_in.devlink_index);
77 if (!devlink)
78 goto rel_put;
79 if (!devl_trylock(devlink)) {
80 devlink_put(devlink);
81 goto reschedule_work;
82 }
83 if (!devl_is_registered(devlink)) {
84 devl_unlock(devlink);
85 devlink_put(devlink);
86 goto rel_put;
87 }
88 if (!xa_get_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE))
89 rel->nested_in.cleanup_cb(devlink, rel->nested_in.obj_index, rel->index);
90 rel->nested_in.notify_cb(devlink, rel->nested_in.obj_index);
91 devl_unlock(devlink);
92 devlink_put(devlink);
93
94 rel_put:
95 __devlink_rel_put(rel);
96 return;
97
98 reschedule_work:
99 schedule_delayed_work(&rel->nested_in.notify_work, 1);
100 }
101
devlink_rel_nested_in_notify_work_schedule(struct devlink_rel * rel)102 static void devlink_rel_nested_in_notify_work_schedule(struct devlink_rel *rel)
103 {
104 __devlink_rel_get(rel);
105 schedule_delayed_work(&rel->nested_in.notify_work, 0);
106 }
107
devlink_rel_alloc(void)108 static struct devlink_rel *devlink_rel_alloc(void)
109 {
110 struct devlink_rel *rel;
111 static u32 next;
112 int err;
113
114 rel = kzalloc_obj(*rel);
115 if (!rel)
116 return ERR_PTR(-ENOMEM);
117
118 err = xa_alloc_cyclic(&devlink_rels, &rel->index, rel,
119 xa_limit_32b, &next, GFP_KERNEL);
120 if (err < 0) {
121 kfree(rel);
122 return ERR_PTR(err);
123 }
124
125 refcount_set(&rel->refcount, 1);
126 INIT_DELAYED_WORK(&rel->nested_in.notify_work,
127 &devlink_rel_nested_in_notify_work);
128 return rel;
129 }
130
devlink_rel_put(struct devlink * devlink)131 static void devlink_rel_put(struct devlink *devlink)
132 {
133 struct devlink_rel *rel = devlink->rel;
134
135 if (!rel)
136 return;
137 xa_clear_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE);
138 devlink_rel_nested_in_notify_work_schedule(rel);
139 __devlink_rel_put(rel);
140 devlink->rel = NULL;
141 }
142
devlink_rel_nested_in_clear(u32 rel_index)143 void devlink_rel_nested_in_clear(u32 rel_index)
144 {
145 xa_clear_mark(&devlink_rels, rel_index, DEVLINK_REL_IN_USE);
146 }
147
devlink_rel_nested_in_add(u32 * rel_index,u32 devlink_index,u32 obj_index,devlink_rel_notify_cb_t * notify_cb,devlink_rel_cleanup_cb_t * cleanup_cb,struct devlink * devlink)148 int devlink_rel_nested_in_add(u32 *rel_index, u32 devlink_index,
149 u32 obj_index, devlink_rel_notify_cb_t *notify_cb,
150 devlink_rel_cleanup_cb_t *cleanup_cb,
151 struct devlink *devlink)
152 {
153 struct devlink_rel *rel = devlink_rel_alloc();
154
155 ASSERT_DEVLINK_NOT_REGISTERED(devlink);
156
157 if (IS_ERR(rel))
158 return PTR_ERR(rel);
159
160 rel->devlink_index = devlink->index;
161 rel->nested_in.devlink_index = devlink_index;
162 rel->nested_in.obj_index = obj_index;
163 rel->nested_in.notify_cb = notify_cb;
164 rel->nested_in.cleanup_cb = cleanup_cb;
165 *rel_index = rel->index;
166 xa_set_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE);
167 devlink->rel = rel;
168 return 0;
169 }
170
171 /**
172 * devlink_rel_nested_in_notify - Notify the object this devlink
173 * instance is nested in.
174 * @devlink: devlink
175 *
176 * This is called upon network namespace change of devlink instance.
177 * In case this devlink instance is nested in another devlink object,
178 * a notification of a change of this object should be sent
179 * over netlink. The parent devlink instance lock needs to be
180 * taken during the notification preparation.
181 * Since the parent may or may not be locked, 'work' is utilized.
182 */
devlink_rel_nested_in_notify(struct devlink * devlink)183 void devlink_rel_nested_in_notify(struct devlink *devlink)
184 {
185 struct devlink_rel *rel = devlink->rel;
186
187 if (!rel)
188 return;
189 devlink_rel_nested_in_notify_work_schedule(rel);
190 }
191
devlink_rel_find(unsigned long rel_index)192 static struct devlink_rel *devlink_rel_find(unsigned long rel_index)
193 {
194 return xa_find(&devlink_rels, &rel_index, rel_index,
195 DEVLINK_REL_IN_USE);
196 }
197
devlink_rel_devlink_get(u32 rel_index)198 static struct devlink *devlink_rel_devlink_get(u32 rel_index)
199 {
200 struct devlink_rel *rel;
201 u32 devlink_index;
202
203 if (!rel_index)
204 return NULL;
205 xa_lock(&devlink_rels);
206 rel = devlink_rel_find(rel_index);
207 if (rel)
208 devlink_index = rel->devlink_index;
209 xa_unlock(&devlink_rels);
210 if (!rel)
211 return NULL;
212 return devlinks_xa_get(devlink_index);
213 }
214
devlink_rel_devlink_handle_put(struct sk_buff * msg,struct devlink * devlink,u32 rel_index,int attrtype,bool * msg_updated)215 int devlink_rel_devlink_handle_put(struct sk_buff *msg, struct devlink *devlink,
216 u32 rel_index, int attrtype,
217 bool *msg_updated)
218 {
219 struct net *net = devlink_net(devlink);
220 struct devlink *rel_devlink;
221 int err;
222
223 rel_devlink = devlink_rel_devlink_get(rel_index);
224 if (!rel_devlink)
225 return 0;
226 err = devlink_nl_put_nested_handle(msg, net, rel_devlink, attrtype);
227 devlink_put(rel_devlink);
228 if (!err && msg_updated)
229 *msg_updated = true;
230 return err;
231 }
232
devlink_priv(struct devlink * devlink)233 void *devlink_priv(struct devlink *devlink)
234 {
235 return &devlink->priv;
236 }
237 EXPORT_SYMBOL_GPL(devlink_priv);
238
priv_to_devlink(void * priv)239 struct devlink *priv_to_devlink(void *priv)
240 {
241 return container_of(priv, struct devlink, priv);
242 }
243 EXPORT_SYMBOL_GPL(priv_to_devlink);
244
devlink_to_dev(const struct devlink * devlink)245 struct device *devlink_to_dev(const struct devlink *devlink)
246 {
247 return devlink->dev;
248 }
249 EXPORT_SYMBOL_GPL(devlink_to_dev);
250
devlink_net(const struct devlink * devlink)251 struct net *devlink_net(const struct devlink *devlink)
252 {
253 return read_pnet(&devlink->_net);
254 }
255 EXPORT_SYMBOL_GPL(devlink_net);
256
devl_assert_locked(struct devlink * devlink)257 void devl_assert_locked(struct devlink *devlink)
258 {
259 lockdep_assert_held(&devlink->lock);
260 }
261 EXPORT_SYMBOL_GPL(devl_assert_locked);
262
263 #ifdef CONFIG_LOCKDEP
264 /* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */
devl_lock_is_held(struct devlink * devlink)265 bool devl_lock_is_held(struct devlink *devlink)
266 {
267 return lockdep_is_held(&devlink->lock);
268 }
269 EXPORT_SYMBOL_GPL(devl_lock_is_held);
270 #endif
271
devl_lock(struct devlink * devlink)272 void devl_lock(struct devlink *devlink)
273 {
274 mutex_lock(&devlink->lock);
275 }
276 EXPORT_SYMBOL_GPL(devl_lock);
277
devl_trylock(struct devlink * devlink)278 int devl_trylock(struct devlink *devlink)
279 {
280 return mutex_trylock(&devlink->lock);
281 }
282 EXPORT_SYMBOL_GPL(devl_trylock);
283
devl_unlock(struct devlink * devlink)284 void devl_unlock(struct devlink *devlink)
285 {
286 mutex_unlock(&devlink->lock);
287 }
288 EXPORT_SYMBOL_GPL(devl_unlock);
289
290 /**
291 * devlink_try_get() - try to obtain a reference on a devlink instance
292 * @devlink: instance to reference
293 *
294 * Obtain a reference on a devlink instance. A reference on a devlink instance
295 * only implies that it's safe to take the instance lock. It does not imply
296 * that the instance is registered, use devl_is_registered() after taking
297 * the instance lock to check registration status.
298 */
devlink_try_get(struct devlink * devlink)299 struct devlink *__must_check devlink_try_get(struct devlink *devlink)
300 {
301 if (refcount_inc_not_zero(&devlink->refcount))
302 return devlink;
303 return NULL;
304 }
305
devlink_release(struct work_struct * work)306 static void devlink_release(struct work_struct *work)
307 {
308 struct devlink *devlink;
309
310 devlink = container_of(to_rcu_work(work), struct devlink, rwork);
311
312 mutex_destroy(&devlink->lock);
313 lockdep_unregister_key(&devlink->lock_key);
314 put_device(devlink->dev);
315 kvfree(devlink);
316 }
317
devlink_put(struct devlink * devlink)318 void devlink_put(struct devlink *devlink)
319 {
320 if (refcount_dec_and_test(&devlink->refcount))
321 queue_rcu_work(system_percpu_wq, &devlink->rwork);
322 }
323
devlinks_xa_find_get(struct net * net,unsigned long * indexp)324 struct devlink *devlinks_xa_find_get(struct net *net, unsigned long *indexp)
325 {
326 struct devlink *devlink = NULL;
327
328 rcu_read_lock();
329 retry:
330 devlink = xa_find(&devlinks, indexp, ULONG_MAX, DEVLINK_REGISTERED);
331 if (!devlink)
332 goto unlock;
333
334 if (!devlink_try_get(devlink))
335 goto next;
336 if (!net_eq(devlink_net(devlink), net)) {
337 devlink_put(devlink);
338 goto next;
339 }
340 unlock:
341 rcu_read_unlock();
342 return devlink;
343
344 next:
345 (*indexp)++;
346 goto retry;
347 }
348
349 /**
350 * devl_register - Register devlink instance
351 * @devlink: devlink
352 */
devl_register(struct devlink * devlink)353 int devl_register(struct devlink *devlink)
354 {
355 ASSERT_DEVLINK_NOT_REGISTERED(devlink);
356 devl_assert_locked(devlink);
357
358 xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
359 devlink_notify_register(devlink);
360 devlink_rel_nested_in_notify(devlink);
361
362 return 0;
363 }
364 EXPORT_SYMBOL_GPL(devl_register);
365
devlink_register(struct devlink * devlink)366 void devlink_register(struct devlink *devlink)
367 {
368 devl_lock(devlink);
369 devl_register(devlink);
370 devl_unlock(devlink);
371 }
372 EXPORT_SYMBOL_GPL(devlink_register);
373
374 /**
375 * devl_unregister - Unregister devlink instance
376 * @devlink: devlink
377 */
devl_unregister(struct devlink * devlink)378 void devl_unregister(struct devlink *devlink)
379 {
380 ASSERT_DEVLINK_REGISTERED(devlink);
381 devl_assert_locked(devlink);
382
383 devlink_notify_unregister(devlink);
384 xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
385 devlink_rel_put(devlink);
386 }
387 EXPORT_SYMBOL_GPL(devl_unregister);
388
devlink_unregister(struct devlink * devlink)389 void devlink_unregister(struct devlink *devlink)
390 {
391 devl_lock(devlink);
392 devl_unregister(devlink);
393 devl_unlock(devlink);
394 }
395 EXPORT_SYMBOL_GPL(devlink_unregister);
396
397 /**
398 * devlink_alloc_ns - Allocate new devlink instance resources
399 * in specific namespace
400 *
401 * @ops: ops
402 * @priv_size: size of user private data
403 * @net: net namespace
404 * @dev: parent device
405 *
406 * Allocate new devlink instance resources, including devlink index
407 * and name.
408 */
devlink_alloc_ns(const struct devlink_ops * ops,size_t priv_size,struct net * net,struct device * dev)409 struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
410 size_t priv_size, struct net *net,
411 struct device *dev)
412 {
413 struct devlink *devlink;
414 static u32 last_id;
415 int ret;
416
417 WARN_ON(!ops || !dev);
418 if (!devlink_reload_actions_valid(ops))
419 return NULL;
420
421 devlink = kvzalloc_flex(*devlink, priv, priv_size);
422 if (!devlink)
423 return NULL;
424
425 ret = xa_alloc_cyclic(&devlinks, &devlink->index, devlink, xa_limit_31b,
426 &last_id, GFP_KERNEL);
427 if (ret < 0)
428 goto err_xa_alloc;
429
430 devlink->dev = get_device(dev);
431 devlink->ops = ops;
432 xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC);
433 xa_init_flags(&devlink->params, XA_FLAGS_ALLOC);
434 xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC);
435 xa_init_flags(&devlink->nested_rels, XA_FLAGS_ALLOC);
436 write_pnet(&devlink->_net, net);
437 INIT_LIST_HEAD(&devlink->rate_list);
438 INIT_LIST_HEAD(&devlink->linecard_list);
439 INIT_LIST_HEAD(&devlink->sb_list);
440 INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
441 INIT_LIST_HEAD(&devlink->resource_list);
442 INIT_LIST_HEAD(&devlink->region_list);
443 INIT_LIST_HEAD(&devlink->reporter_list);
444 INIT_LIST_HEAD(&devlink->trap_list);
445 INIT_LIST_HEAD(&devlink->trap_group_list);
446 INIT_LIST_HEAD(&devlink->trap_policer_list);
447 INIT_RCU_WORK(&devlink->rwork, devlink_release);
448 lockdep_register_key(&devlink->lock_key);
449 mutex_init(&devlink->lock);
450 lockdep_set_class(&devlink->lock, &devlink->lock_key);
451 refcount_set(&devlink->refcount, 1);
452
453 return devlink;
454
455 err_xa_alloc:
456 kvfree(devlink);
457 return NULL;
458 }
459 EXPORT_SYMBOL_GPL(devlink_alloc_ns);
460
461 /**
462 * devlink_free - Free devlink instance resources
463 *
464 * @devlink: devlink
465 */
devlink_free(struct devlink * devlink)466 void devlink_free(struct devlink *devlink)
467 {
468 ASSERT_DEVLINK_NOT_REGISTERED(devlink);
469
470 WARN_ON(!list_empty(&devlink->trap_policer_list));
471 WARN_ON(!list_empty(&devlink->trap_group_list));
472 WARN_ON(!list_empty(&devlink->trap_list));
473 WARN_ON(!list_empty(&devlink->reporter_list));
474 WARN_ON(!list_empty(&devlink->region_list));
475 WARN_ON(!list_empty(&devlink->resource_list));
476 WARN_ON(!list_empty(&devlink->dpipe_table_list));
477 WARN_ON(!list_empty(&devlink->sb_list));
478 WARN_ON(devlink_rates_check(devlink, NULL, NULL));
479 WARN_ON(!list_empty(&devlink->linecard_list));
480 WARN_ON(!xa_empty(&devlink->ports));
481
482 xa_destroy(&devlink->nested_rels);
483 xa_destroy(&devlink->snapshot_ids);
484 xa_destroy(&devlink->params);
485 xa_destroy(&devlink->ports);
486
487 xa_erase(&devlinks, devlink->index);
488
489 devlink_put(devlink);
490 }
491 EXPORT_SYMBOL_GPL(devlink_free);
492
devlink_pernet_pre_exit(struct net * net)493 static void __net_exit devlink_pernet_pre_exit(struct net *net)
494 {
495 struct devlink *devlink;
496 u32 actions_performed;
497 unsigned long index;
498 int err;
499
500 /* In case network namespace is getting destroyed, reload
501 * all devlink instances from this namespace into init_net.
502 */
503 devlinks_xa_for_each_registered_get(net, index, devlink) {
504 devl_dev_lock(devlink, true);
505 err = 0;
506 if (devl_is_registered(devlink))
507 err = devlink_reload(devlink, &init_net,
508 DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
509 DEVLINK_RELOAD_LIMIT_UNSPEC,
510 &actions_performed, NULL);
511 devl_dev_unlock(devlink, true);
512 devlink_put(devlink);
513 if (err && err != -EOPNOTSUPP)
514 pr_warn("Failed to reload devlink instance into init_net\n");
515 }
516 }
517
518 static struct pernet_operations devlink_pernet_ops __net_initdata = {
519 .pre_exit = devlink_pernet_pre_exit,
520 };
521
522 static struct notifier_block devlink_port_netdevice_nb = {
523 .notifier_call = devlink_port_netdevice_event,
524 };
525
devlink_init(void)526 static int __init devlink_init(void)
527 {
528 int err;
529
530 err = register_pernet_subsys(&devlink_pernet_ops);
531 if (err)
532 goto out;
533 err = genl_register_family(&devlink_nl_family);
534 if (err)
535 goto out_unreg_pernet_subsys;
536 err = register_netdevice_notifier(&devlink_port_netdevice_nb);
537 if (!err)
538 return 0;
539
540 genl_unregister_family(&devlink_nl_family);
541
542 out_unreg_pernet_subsys:
543 unregister_pernet_subsys(&devlink_pernet_ops);
544 out:
545 WARN_ON(err);
546 return err;
547 }
548
549 subsys_initcall(devlink_init);
550