xref: /linux/net/devlink/core.c (revision a4c6d53e5fd61829f707b7a723dd2937ed67c803)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5  */
6 
7 #include <net/genetlink.h>
8 #define CREATE_TRACE_POINTS
9 #include <trace/events/devlink.h>
10 
11 #include "devl_internal.h"
12 
13 EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg);
14 EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr);
15 EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_trap_report);
16 
17 DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
18 
19 static struct devlink *devlinks_xa_get(unsigned long index)
20 {
21 	struct devlink *devlink;
22 
23 	rcu_read_lock();
24 	devlink = xa_find(&devlinks, &index, index, DEVLINK_REGISTERED);
25 	if (!devlink || !devlink_try_get(devlink))
26 		devlink = NULL;
27 	rcu_read_unlock();
28 	return devlink;
29 }
30 
31 /* devlink_rels xarray contains 1:1 relationships between
32  * devlink object and related nested devlink instance.
33  * The xarray index is used to get the nested object from
34  * the nested-in object code.
35  */
36 static DEFINE_XARRAY_FLAGS(devlink_rels, XA_FLAGS_ALLOC1);
37 
38 #define DEVLINK_REL_IN_USE XA_MARK_0
39 
40 struct devlink_rel {
41 	u32 index;
42 	refcount_t refcount;
43 	u32 devlink_index;
44 	struct {
45 		u32 devlink_index;
46 		u32 obj_index;
47 		devlink_rel_notify_cb_t *notify_cb;
48 		devlink_rel_cleanup_cb_t *cleanup_cb;
49 		struct delayed_work notify_work;
50 	} nested_in;
51 };
52 
53 static void devlink_rel_free(struct devlink_rel *rel)
54 {
55 	xa_erase(&devlink_rels, rel->index);
56 	kfree(rel);
57 }
58 
59 static void __devlink_rel_get(struct devlink_rel *rel)
60 {
61 	refcount_inc(&rel->refcount);
62 }
63 
64 static void __devlink_rel_put(struct devlink_rel *rel)
65 {
66 	if (refcount_dec_and_test(&rel->refcount))
67 		devlink_rel_free(rel);
68 }
69 
70 static void devlink_rel_nested_in_notify_work(struct work_struct *work)
71 {
72 	struct devlink_rel *rel = container_of(work, struct devlink_rel,
73 					       nested_in.notify_work.work);
74 	struct devlink *devlink;
75 
76 	devlink = devlinks_xa_get(rel->nested_in.devlink_index);
77 	if (!devlink)
78 		goto rel_put;
79 	if (!devl_trylock(devlink)) {
80 		devlink_put(devlink);
81 		goto reschedule_work;
82 	}
83 	if (!devl_is_registered(devlink)) {
84 		devl_unlock(devlink);
85 		devlink_put(devlink);
86 		goto rel_put;
87 	}
88 	if (!xa_get_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE))
89 		rel->nested_in.cleanup_cb(devlink, rel->nested_in.obj_index, rel->index);
90 	rel->nested_in.notify_cb(devlink, rel->nested_in.obj_index);
91 	devl_unlock(devlink);
92 	devlink_put(devlink);
93 
94 rel_put:
95 	__devlink_rel_put(rel);
96 	return;
97 
98 reschedule_work:
99 	schedule_delayed_work(&rel->nested_in.notify_work, 1);
100 }
101 
102 static void devlink_rel_nested_in_notify_work_schedule(struct devlink_rel *rel)
103 {
104 	__devlink_rel_get(rel);
105 	schedule_delayed_work(&rel->nested_in.notify_work, 0);
106 }
107 
108 static struct devlink_rel *devlink_rel_alloc(void)
109 {
110 	struct devlink_rel *rel;
111 	static u32 next;
112 	int err;
113 
114 	rel = kzalloc_obj(*rel);
115 	if (!rel)
116 		return ERR_PTR(-ENOMEM);
117 
118 	err = xa_alloc_cyclic(&devlink_rels, &rel->index, rel,
119 			      xa_limit_32b, &next, GFP_KERNEL);
120 	if (err < 0) {
121 		kfree(rel);
122 		return ERR_PTR(err);
123 	}
124 
125 	refcount_set(&rel->refcount, 1);
126 	INIT_DELAYED_WORK(&rel->nested_in.notify_work,
127 			  &devlink_rel_nested_in_notify_work);
128 	return rel;
129 }
130 
131 static void devlink_rel_put(struct devlink *devlink)
132 {
133 	struct devlink_rel *rel = devlink->rel;
134 
135 	if (!rel)
136 		return;
137 	xa_clear_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE);
138 	devlink_rel_nested_in_notify_work_schedule(rel);
139 	__devlink_rel_put(rel);
140 	devlink->rel = NULL;
141 }
142 
143 void devlink_rel_nested_in_clear(u32 rel_index)
144 {
145 	xa_clear_mark(&devlink_rels, rel_index, DEVLINK_REL_IN_USE);
146 }
147 
148 int devlink_rel_nested_in_add(u32 *rel_index, u32 devlink_index,
149 			      u32 obj_index, devlink_rel_notify_cb_t *notify_cb,
150 			      devlink_rel_cleanup_cb_t *cleanup_cb,
151 			      struct devlink *devlink)
152 {
153 	struct devlink_rel *rel = devlink_rel_alloc();
154 
155 	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
156 
157 	if (IS_ERR(rel))
158 		return PTR_ERR(rel);
159 
160 	rel->devlink_index = devlink->index;
161 	rel->nested_in.devlink_index = devlink_index;
162 	rel->nested_in.obj_index = obj_index;
163 	rel->nested_in.notify_cb = notify_cb;
164 	rel->nested_in.cleanup_cb = cleanup_cb;
165 	*rel_index = rel->index;
166 	xa_set_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE);
167 	devlink->rel = rel;
168 	return 0;
169 }
170 
171 /**
172  * devlink_rel_nested_in_notify - Notify the object this devlink
173  *				  instance is nested in.
174  * @devlink: devlink
175  *
176  * This is called upon network namespace change of devlink instance.
177  * In case this devlink instance is nested in another devlink object,
178  * a notification of a change of this object should be sent
179  * over netlink. The parent devlink instance lock needs to be
180  * taken during the notification preparation.
181  * Since the parent may or may not be locked, 'work' is utilized.
182  */
183 void devlink_rel_nested_in_notify(struct devlink *devlink)
184 {
185 	struct devlink_rel *rel = devlink->rel;
186 
187 	if (!rel)
188 		return;
189 	devlink_rel_nested_in_notify_work_schedule(rel);
190 }
191 
192 static struct devlink_rel *devlink_rel_find(unsigned long rel_index)
193 {
194 	return xa_find(&devlink_rels, &rel_index, rel_index,
195 		       DEVLINK_REL_IN_USE);
196 }
197 
198 static struct devlink *devlink_rel_devlink_get(u32 rel_index)
199 {
200 	struct devlink_rel *rel;
201 	u32 devlink_index;
202 
203 	if (!rel_index)
204 		return NULL;
205 	xa_lock(&devlink_rels);
206 	rel = devlink_rel_find(rel_index);
207 	if (rel)
208 		devlink_index = rel->devlink_index;
209 	xa_unlock(&devlink_rels);
210 	if (!rel)
211 		return NULL;
212 	return devlinks_xa_get(devlink_index);
213 }
214 
215 int devlink_rel_devlink_handle_put(struct sk_buff *msg, struct devlink *devlink,
216 				   u32 rel_index, int attrtype,
217 				   bool *msg_updated)
218 {
219 	struct net *net = devlink_net(devlink);
220 	struct devlink *rel_devlink;
221 	int err;
222 
223 	rel_devlink = devlink_rel_devlink_get(rel_index);
224 	if (!rel_devlink)
225 		return 0;
226 	err = devlink_nl_put_nested_handle(msg, net, rel_devlink, attrtype);
227 	devlink_put(rel_devlink);
228 	if (!err && msg_updated)
229 		*msg_updated = true;
230 	return err;
231 }
232 
233 void *devlink_priv(struct devlink *devlink)
234 {
235 	return &devlink->priv;
236 }
237 EXPORT_SYMBOL_GPL(devlink_priv);
238 
239 struct devlink *priv_to_devlink(void *priv)
240 {
241 	return container_of(priv, struct devlink, priv);
242 }
243 EXPORT_SYMBOL_GPL(priv_to_devlink);
244 
245 struct device *devlink_to_dev(const struct devlink *devlink)
246 {
247 	return devlink->dev;
248 }
249 EXPORT_SYMBOL_GPL(devlink_to_dev);
250 
251 const char *devlink_bus_name(const struct devlink *devlink)
252 {
253 	return devlink->dev ? devlink->dev->bus->name : DEVLINK_INDEX_BUS_NAME;
254 }
255 EXPORT_SYMBOL_GPL(devlink_bus_name);
256 
257 const char *devlink_dev_name(const struct devlink *devlink)
258 {
259 	return devlink->dev ? dev_name(devlink->dev) : devlink->dev_name_index;
260 }
261 EXPORT_SYMBOL_GPL(devlink_dev_name);
262 
263 const char *devlink_dev_driver_name(const struct devlink *devlink)
264 {
265 	return devlink->dev_driver->name;
266 }
267 EXPORT_SYMBOL_GPL(devlink_dev_driver_name);
268 
269 struct net *devlink_net(const struct devlink *devlink)
270 {
271 	return read_pnet(&devlink->_net);
272 }
273 EXPORT_SYMBOL_GPL(devlink_net);
274 
275 void devl_assert_locked(struct devlink *devlink)
276 {
277 	lockdep_assert_held(&devlink->lock);
278 }
279 EXPORT_SYMBOL_GPL(devl_assert_locked);
280 
281 #ifdef CONFIG_LOCKDEP
282 /* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */
283 bool devl_lock_is_held(struct devlink *devlink)
284 {
285 	return lockdep_is_held(&devlink->lock);
286 }
287 EXPORT_SYMBOL_GPL(devl_lock_is_held);
288 #endif
289 
290 void devl_lock(struct devlink *devlink)
291 {
292 	mutex_lock(&devlink->lock);
293 }
294 EXPORT_SYMBOL_GPL(devl_lock);
295 
296 int devl_trylock(struct devlink *devlink)
297 {
298 	return mutex_trylock(&devlink->lock);
299 }
300 EXPORT_SYMBOL_GPL(devl_trylock);
301 
302 void devl_unlock(struct devlink *devlink)
303 {
304 	mutex_unlock(&devlink->lock);
305 }
306 EXPORT_SYMBOL_GPL(devl_unlock);
307 
308 /**
309  * devlink_try_get() - try to obtain a reference on a devlink instance
310  * @devlink: instance to reference
311  *
312  * Obtain a reference on a devlink instance. A reference on a devlink instance
313  * only implies that it's safe to take the instance lock. It does not imply
314  * that the instance is registered, use devl_is_registered() after taking
315  * the instance lock to check registration status.
316  */
317 struct devlink *__must_check devlink_try_get(struct devlink *devlink)
318 {
319 	if (refcount_inc_not_zero(&devlink->refcount))
320 		return devlink;
321 	return NULL;
322 }
323 
324 static void devlink_release(struct work_struct *work)
325 {
326 	struct devlink *devlink;
327 
328 	devlink = container_of(to_rcu_work(work), struct devlink, rwork);
329 
330 	mutex_destroy(&devlink->lock);
331 	lockdep_unregister_key(&devlink->lock_key);
332 	if (devlink->dev)
333 		put_device(devlink->dev);
334 	else
335 		kfree(devlink->dev_name_index);
336 	kvfree(devlink);
337 }
338 
339 void devlink_put(struct devlink *devlink)
340 {
341 	if (refcount_dec_and_test(&devlink->refcount))
342 		queue_rcu_work(system_percpu_wq, &devlink->rwork);
343 }
344 
345 static struct devlink *__devlinks_xa_find_get(struct net *net,
346 					      unsigned long *indexp,
347 					      unsigned long end)
348 {
349 	struct devlink *devlink = NULL;
350 
351 	rcu_read_lock();
352 retry:
353 	devlink = xa_find(&devlinks, indexp, end, DEVLINK_REGISTERED);
354 	if (!devlink)
355 		goto unlock;
356 
357 	if (!devlink_try_get(devlink))
358 		goto next;
359 	if (!net_eq(devlink_net(devlink), net)) {
360 		devlink_put(devlink);
361 		goto next;
362 	}
363 unlock:
364 	rcu_read_unlock();
365 	return devlink;
366 
367 next:
368 	(*indexp)++;
369 	goto retry;
370 }
371 
372 struct devlink *devlinks_xa_find_get(struct net *net, unsigned long *indexp)
373 {
374 	return __devlinks_xa_find_get(net, indexp, ULONG_MAX);
375 }
376 
377 struct devlink *devlinks_xa_lookup_get(struct net *net, unsigned long index)
378 {
379 	return __devlinks_xa_find_get(net, &index, index);
380 }
381 
382 /**
383  * devl_register - Register devlink instance
384  * @devlink: devlink
385  */
386 int devl_register(struct devlink *devlink)
387 {
388 	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
389 	devl_assert_locked(devlink);
390 
391 	xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
392 	devlink_notify_register(devlink);
393 	devlink_rel_nested_in_notify(devlink);
394 
395 	return 0;
396 }
397 EXPORT_SYMBOL_GPL(devl_register);
398 
399 void devlink_register(struct devlink *devlink)
400 {
401 	devl_lock(devlink);
402 	devl_register(devlink);
403 	devl_unlock(devlink);
404 }
405 EXPORT_SYMBOL_GPL(devlink_register);
406 
407 /**
408  * devl_unregister - Unregister devlink instance
409  * @devlink: devlink
410  */
411 void devl_unregister(struct devlink *devlink)
412 {
413 	ASSERT_DEVLINK_REGISTERED(devlink);
414 	devl_assert_locked(devlink);
415 
416 	devlink_notify_unregister(devlink);
417 	xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
418 	devlink_rel_put(devlink);
419 }
420 EXPORT_SYMBOL_GPL(devl_unregister);
421 
422 void devlink_unregister(struct devlink *devlink)
423 {
424 	devl_lock(devlink);
425 	devl_unregister(devlink);
426 	devl_unlock(devlink);
427 }
428 EXPORT_SYMBOL_GPL(devlink_unregister);
429 
430 struct devlink *__devlink_alloc(const struct devlink_ops *ops, size_t priv_size,
431 				struct net *net, struct device *dev,
432 				const struct device_driver *dev_driver)
433 {
434 	struct devlink *devlink;
435 	static u32 last_id;
436 	int ret;
437 
438 	WARN_ON(!ops || !dev_driver);
439 	if (!devlink_reload_actions_valid(ops))
440 		return NULL;
441 
442 	devlink = kvzalloc_flex(*devlink, priv, priv_size);
443 	if (!devlink)
444 		return NULL;
445 
446 	ret = xa_alloc_cyclic(&devlinks, &devlink->index, devlink, xa_limit_31b,
447 			      &last_id, GFP_KERNEL);
448 	if (ret < 0)
449 		goto err_xa_alloc;
450 
451 	if (dev) {
452 		devlink->dev = get_device(dev);
453 	} else {
454 		devlink->dev_name_index = kasprintf(GFP_KERNEL, "%u", devlink->index);
455 		if (!devlink->dev_name_index)
456 			goto err_kasprintf;
457 	}
458 
459 	devlink->ops = ops;
460 	devlink->dev_driver = dev_driver;
461 	xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC);
462 	xa_init_flags(&devlink->params, XA_FLAGS_ALLOC);
463 	xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC);
464 	xa_init_flags(&devlink->nested_rels, XA_FLAGS_ALLOC);
465 	write_pnet(&devlink->_net, net);
466 	INIT_LIST_HEAD(&devlink->rate_list);
467 	INIT_LIST_HEAD(&devlink->linecard_list);
468 	INIT_LIST_HEAD(&devlink->sb_list);
469 	INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
470 	INIT_LIST_HEAD(&devlink->resource_list);
471 	INIT_LIST_HEAD(&devlink->region_list);
472 	INIT_LIST_HEAD(&devlink->reporter_list);
473 	INIT_LIST_HEAD(&devlink->trap_list);
474 	INIT_LIST_HEAD(&devlink->trap_group_list);
475 	INIT_LIST_HEAD(&devlink->trap_policer_list);
476 	INIT_RCU_WORK(&devlink->rwork, devlink_release);
477 	lockdep_register_key(&devlink->lock_key);
478 	mutex_init(&devlink->lock);
479 	lockdep_set_class(&devlink->lock, &devlink->lock_key);
480 	refcount_set(&devlink->refcount, 1);
481 
482 	return devlink;
483 
484 err_kasprintf:
485 	xa_erase(&devlinks, devlink->index);
486 err_xa_alloc:
487 	kvfree(devlink);
488 	return NULL;
489 }
490 
491 /**
492  *	devlink_alloc_ns - Allocate new devlink instance resources
493  *	in specific namespace
494  *
495  *	@ops: ops
496  *	@priv_size: size of user private data
497  *	@net: net namespace
498  *	@dev: parent device
499  *
500  *	Allocate new devlink instance resources, including devlink index
501  *	and name.
502  */
503 struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
504 				 size_t priv_size, struct net *net,
505 				 struct device *dev)
506 {
507 	WARN_ON(!dev);
508 	return __devlink_alloc(ops, priv_size, net, dev, dev->driver);
509 }
510 EXPORT_SYMBOL_GPL(devlink_alloc_ns);
511 
512 /**
513  *	devlink_free - Free devlink instance resources
514  *
515  *	@devlink: devlink
516  */
517 void devlink_free(struct devlink *devlink)
518 {
519 	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
520 
521 	WARN_ON(!list_empty(&devlink->trap_policer_list));
522 	WARN_ON(!list_empty(&devlink->trap_group_list));
523 	WARN_ON(!list_empty(&devlink->trap_list));
524 	WARN_ON(!list_empty(&devlink->reporter_list));
525 	WARN_ON(!list_empty(&devlink->region_list));
526 	WARN_ON(!list_empty(&devlink->resource_list));
527 	WARN_ON(!list_empty(&devlink->dpipe_table_list));
528 	WARN_ON(!list_empty(&devlink->sb_list));
529 	WARN_ON(devlink_rates_check(devlink, NULL, NULL));
530 	WARN_ON(!list_empty(&devlink->linecard_list));
531 	WARN_ON(!xa_empty(&devlink->ports));
532 
533 	xa_destroy(&devlink->nested_rels);
534 	xa_destroy(&devlink->snapshot_ids);
535 	xa_destroy(&devlink->params);
536 	xa_destroy(&devlink->ports);
537 
538 	xa_erase(&devlinks, devlink->index);
539 
540 	devlink_put(devlink);
541 }
542 EXPORT_SYMBOL_GPL(devlink_free);
543 
544 static void __net_exit devlink_pernet_pre_exit(struct net *net)
545 {
546 	struct devlink *devlink;
547 	u32 actions_performed;
548 	unsigned long index;
549 	int err;
550 
551 	/* In case network namespace is getting destroyed, reload
552 	 * all devlink instances from this namespace into init_net.
553 	 */
554 	devlinks_xa_for_each_registered_get(net, index, devlink) {
555 		devl_dev_lock(devlink, true);
556 		err = 0;
557 		if (devl_is_registered(devlink))
558 			err = devlink_reload(devlink, &init_net,
559 					     DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
560 					     DEVLINK_RELOAD_LIMIT_UNSPEC,
561 					     &actions_performed, NULL);
562 		devl_dev_unlock(devlink, true);
563 		devlink_put(devlink);
564 		if (err && err != -EOPNOTSUPP)
565 			pr_warn("Failed to reload devlink instance into init_net\n");
566 	}
567 }
568 
569 static struct pernet_operations devlink_pernet_ops __net_initdata = {
570 	.pre_exit = devlink_pernet_pre_exit,
571 };
572 
573 static struct notifier_block devlink_port_netdevice_nb = {
574 	.notifier_call = devlink_port_netdevice_event,
575 };
576 
577 static int __init devlink_init(void)
578 {
579 	int err;
580 
581 	err = register_pernet_subsys(&devlink_pernet_ops);
582 	if (err)
583 		goto out;
584 	err = genl_register_family(&devlink_nl_family);
585 	if (err)
586 		goto out_unreg_pernet_subsys;
587 	err = register_netdevice_notifier(&devlink_port_netdevice_nb);
588 	if (!err)
589 		return 0;
590 
591 	genl_unregister_family(&devlink_nl_family);
592 
593 out_unreg_pernet_subsys:
594 	unregister_pernet_subsys(&devlink_pernet_ops);
595 out:
596 	WARN_ON(err);
597 	return err;
598 }
599 
600 subsys_initcall(devlink_init);
601