xref: /linux/net/devlink/core.c (revision 7681a4f58fb9c338d6dfe1181607f84c793d77de)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5  */
6 
7 #include <net/genetlink.h>
8 
9 #include "devl_internal.h"
10 
11 DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
12 
13 void *devlink_priv(struct devlink *devlink)
14 {
15 	return &devlink->priv;
16 }
17 EXPORT_SYMBOL_GPL(devlink_priv);
18 
19 struct devlink *priv_to_devlink(void *priv)
20 {
21 	return container_of(priv, struct devlink, priv);
22 }
23 EXPORT_SYMBOL_GPL(priv_to_devlink);
24 
25 struct device *devlink_to_dev(const struct devlink *devlink)
26 {
27 	return devlink->dev;
28 }
29 EXPORT_SYMBOL_GPL(devlink_to_dev);
30 
31 struct net *devlink_net(const struct devlink *devlink)
32 {
33 	return read_pnet(&devlink->_net);
34 }
35 EXPORT_SYMBOL_GPL(devlink_net);
36 
37 void devl_assert_locked(struct devlink *devlink)
38 {
39 	lockdep_assert_held(&devlink->lock);
40 }
41 EXPORT_SYMBOL_GPL(devl_assert_locked);
42 
43 #ifdef CONFIG_LOCKDEP
44 /* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */
45 bool devl_lock_is_held(struct devlink *devlink)
46 {
47 	return lockdep_is_held(&devlink->lock);
48 }
49 EXPORT_SYMBOL_GPL(devl_lock_is_held);
50 #endif
51 
52 void devl_lock(struct devlink *devlink)
53 {
54 	mutex_lock(&devlink->lock);
55 }
56 EXPORT_SYMBOL_GPL(devl_lock);
57 
58 int devl_trylock(struct devlink *devlink)
59 {
60 	return mutex_trylock(&devlink->lock);
61 }
62 EXPORT_SYMBOL_GPL(devl_trylock);
63 
64 void devl_unlock(struct devlink *devlink)
65 {
66 	mutex_unlock(&devlink->lock);
67 }
68 EXPORT_SYMBOL_GPL(devl_unlock);
69 
70 /**
71  * devlink_try_get() - try to obtain a reference on a devlink instance
72  * @devlink: instance to reference
73  *
74  * Obtain a reference on a devlink instance. A reference on a devlink instance
75  * only implies that it's safe to take the instance lock. It does not imply
76  * that the instance is registered, use devl_is_registered() after taking
77  * the instance lock to check registration status.
78  */
79 struct devlink *__must_check devlink_try_get(struct devlink *devlink)
80 {
81 	if (refcount_inc_not_zero(&devlink->refcount))
82 		return devlink;
83 	return NULL;
84 }
85 
86 static void devlink_release(struct work_struct *work)
87 {
88 	struct devlink *devlink;
89 
90 	devlink = container_of(to_rcu_work(work), struct devlink, rwork);
91 
92 	mutex_destroy(&devlink->lock);
93 	lockdep_unregister_key(&devlink->lock_key);
94 	kfree(devlink);
95 }
96 
97 void devlink_put(struct devlink *devlink)
98 {
99 	if (refcount_dec_and_test(&devlink->refcount))
100 		queue_rcu_work(system_wq, &devlink->rwork);
101 }
102 
103 struct devlink *devlinks_xa_find_get(struct net *net, unsigned long *indexp)
104 {
105 	struct devlink *devlink = NULL;
106 
107 	rcu_read_lock();
108 retry:
109 	devlink = xa_find(&devlinks, indexp, ULONG_MAX, DEVLINK_REGISTERED);
110 	if (!devlink)
111 		goto unlock;
112 
113 	if (!devlink_try_get(devlink))
114 		goto next;
115 	if (!net_eq(devlink_net(devlink), net)) {
116 		devlink_put(devlink);
117 		goto next;
118 	}
119 unlock:
120 	rcu_read_unlock();
121 	return devlink;
122 
123 next:
124 	(*indexp)++;
125 	goto retry;
126 }
127 
128 /**
129  *	devlink_set_features - Set devlink supported features
130  *
131  *	@devlink: devlink
132  *	@features: devlink support features
133  *
134  *	This interface allows us to set reload ops separatelly from
135  *	the devlink_alloc.
136  */
137 void devlink_set_features(struct devlink *devlink, u64 features)
138 {
139 	WARN_ON(features & DEVLINK_F_RELOAD &&
140 		!devlink_reload_supported(devlink->ops));
141 	devlink->features = features;
142 }
143 EXPORT_SYMBOL_GPL(devlink_set_features);
144 
145 /**
146  * devl_register - Register devlink instance
147  * @devlink: devlink
148  */
149 int devl_register(struct devlink *devlink)
150 {
151 	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
152 	devl_assert_locked(devlink);
153 
154 	xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
155 	devlink_notify_register(devlink);
156 
157 	return 0;
158 }
159 EXPORT_SYMBOL_GPL(devl_register);
160 
161 void devlink_register(struct devlink *devlink)
162 {
163 	devl_lock(devlink);
164 	devl_register(devlink);
165 	devl_unlock(devlink);
166 }
167 EXPORT_SYMBOL_GPL(devlink_register);
168 
169 /**
170  * devl_unregister - Unregister devlink instance
171  * @devlink: devlink
172  */
173 void devl_unregister(struct devlink *devlink)
174 {
175 	ASSERT_DEVLINK_REGISTERED(devlink);
176 	devl_assert_locked(devlink);
177 
178 	devlink_notify_unregister(devlink);
179 	xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
180 }
181 EXPORT_SYMBOL_GPL(devl_unregister);
182 
183 void devlink_unregister(struct devlink *devlink)
184 {
185 	devl_lock(devlink);
186 	devl_unregister(devlink);
187 	devl_unlock(devlink);
188 }
189 EXPORT_SYMBOL_GPL(devlink_unregister);
190 
191 /**
192  *	devlink_alloc_ns - Allocate new devlink instance resources
193  *	in specific namespace
194  *
195  *	@ops: ops
196  *	@priv_size: size of user private data
197  *	@net: net namespace
198  *	@dev: parent device
199  *
200  *	Allocate new devlink instance resources, including devlink index
201  *	and name.
202  */
203 struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
204 				 size_t priv_size, struct net *net,
205 				 struct device *dev)
206 {
207 	struct devlink *devlink;
208 	static u32 last_id;
209 	int ret;
210 
211 	WARN_ON(!ops || !dev);
212 	if (!devlink_reload_actions_valid(ops))
213 		return NULL;
214 
215 	devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
216 	if (!devlink)
217 		return NULL;
218 
219 	ret = xa_alloc_cyclic(&devlinks, &devlink->index, devlink, xa_limit_31b,
220 			      &last_id, GFP_KERNEL);
221 	if (ret < 0)
222 		goto err_xa_alloc;
223 
224 	devlink->netdevice_nb.notifier_call = devlink_port_netdevice_event;
225 	ret = register_netdevice_notifier_net(net, &devlink->netdevice_nb);
226 	if (ret)
227 		goto err_register_netdevice_notifier;
228 
229 	devlink->dev = dev;
230 	devlink->ops = ops;
231 	xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC);
232 	xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC);
233 	write_pnet(&devlink->_net, net);
234 	INIT_LIST_HEAD(&devlink->rate_list);
235 	INIT_LIST_HEAD(&devlink->linecard_list);
236 	INIT_LIST_HEAD(&devlink->sb_list);
237 	INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
238 	INIT_LIST_HEAD(&devlink->resource_list);
239 	INIT_LIST_HEAD(&devlink->param_list);
240 	INIT_LIST_HEAD(&devlink->region_list);
241 	INIT_LIST_HEAD(&devlink->reporter_list);
242 	INIT_LIST_HEAD(&devlink->trap_list);
243 	INIT_LIST_HEAD(&devlink->trap_group_list);
244 	INIT_LIST_HEAD(&devlink->trap_policer_list);
245 	INIT_RCU_WORK(&devlink->rwork, devlink_release);
246 	lockdep_register_key(&devlink->lock_key);
247 	mutex_init(&devlink->lock);
248 	lockdep_set_class(&devlink->lock, &devlink->lock_key);
249 	refcount_set(&devlink->refcount, 1);
250 
251 	return devlink;
252 
253 err_register_netdevice_notifier:
254 	xa_erase(&devlinks, devlink->index);
255 err_xa_alloc:
256 	kfree(devlink);
257 	return NULL;
258 }
259 EXPORT_SYMBOL_GPL(devlink_alloc_ns);
260 
261 /**
262  *	devlink_free - Free devlink instance resources
263  *
264  *	@devlink: devlink
265  */
266 void devlink_free(struct devlink *devlink)
267 {
268 	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
269 
270 	WARN_ON(!list_empty(&devlink->trap_policer_list));
271 	WARN_ON(!list_empty(&devlink->trap_group_list));
272 	WARN_ON(!list_empty(&devlink->trap_list));
273 	WARN_ON(!list_empty(&devlink->reporter_list));
274 	WARN_ON(!list_empty(&devlink->region_list));
275 	WARN_ON(!list_empty(&devlink->param_list));
276 	WARN_ON(!list_empty(&devlink->resource_list));
277 	WARN_ON(!list_empty(&devlink->dpipe_table_list));
278 	WARN_ON(!list_empty(&devlink->sb_list));
279 	WARN_ON(!list_empty(&devlink->rate_list));
280 	WARN_ON(!list_empty(&devlink->linecard_list));
281 	WARN_ON(!xa_empty(&devlink->ports));
282 
283 	xa_destroy(&devlink->snapshot_ids);
284 	xa_destroy(&devlink->ports);
285 
286 	WARN_ON_ONCE(unregister_netdevice_notifier_net(devlink_net(devlink),
287 						       &devlink->netdevice_nb));
288 
289 	xa_erase(&devlinks, devlink->index);
290 
291 	devlink_put(devlink);
292 }
293 EXPORT_SYMBOL_GPL(devlink_free);
294 
295 static void __net_exit devlink_pernet_pre_exit(struct net *net)
296 {
297 	struct devlink *devlink;
298 	u32 actions_performed;
299 	unsigned long index;
300 	int err;
301 
302 	/* In case network namespace is getting destroyed, reload
303 	 * all devlink instances from this namespace into init_net.
304 	 */
305 	devlinks_xa_for_each_registered_get(net, index, devlink) {
306 		WARN_ON(!(devlink->features & DEVLINK_F_RELOAD));
307 		devl_lock(devlink);
308 		err = 0;
309 		if (devl_is_registered(devlink))
310 			err = devlink_reload(devlink, &init_net,
311 					     DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
312 					     DEVLINK_RELOAD_LIMIT_UNSPEC,
313 					     &actions_performed, NULL);
314 		devl_unlock(devlink);
315 		devlink_put(devlink);
316 
317 		if (err && err != -EOPNOTSUPP)
318 			pr_warn("Failed to reload devlink instance into init_net\n");
319 	}
320 }
321 
322 static struct pernet_operations devlink_pernet_ops __net_initdata = {
323 	.pre_exit = devlink_pernet_pre_exit,
324 };
325 
326 static int __init devlink_init(void)
327 {
328 	int err;
329 
330 	err = genl_register_family(&devlink_nl_family);
331 	if (err)
332 		goto out;
333 	err = register_pernet_subsys(&devlink_pernet_ops);
334 
335 out:
336 	WARN_ON(err);
337 	return err;
338 }
339 
340 subsys_initcall(devlink_init);
341