1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> 5 */ 6 7 #include <net/genetlink.h> 8 #define CREATE_TRACE_POINTS 9 #include <trace/events/devlink.h> 10 11 #include "devl_internal.h" 12 13 EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg); 14 EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr); 15 EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_trap_report); 16 17 DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC); 18 19 static struct devlink *devlinks_xa_get(unsigned long index) 20 { 21 struct devlink *devlink; 22 23 rcu_read_lock(); 24 devlink = xa_find(&devlinks, &index, index, DEVLINK_REGISTERED); 25 if (!devlink || !devlink_try_get(devlink)) 26 devlink = NULL; 27 rcu_read_unlock(); 28 return devlink; 29 } 30 31 /* devlink_rels xarray contains 1:1 relationships between 32 * devlink object and related nested devlink instance. 33 * The xarray index is used to get the nested object from 34 * the nested-in object code. 35 */ 36 static DEFINE_XARRAY_FLAGS(devlink_rels, XA_FLAGS_ALLOC1); 37 38 #define DEVLINK_REL_IN_USE XA_MARK_0 39 40 struct devlink_rel { 41 u32 index; 42 refcount_t refcount; 43 u32 devlink_index; 44 struct { 45 u32 devlink_index; 46 u32 obj_index; 47 devlink_rel_notify_cb_t *notify_cb; 48 devlink_rel_cleanup_cb_t *cleanup_cb; 49 struct work_struct notify_work; 50 } nested_in; 51 }; 52 53 static void devlink_rel_free(struct devlink_rel *rel) 54 { 55 xa_erase(&devlink_rels, rel->index); 56 kfree(rel); 57 } 58 59 static void __devlink_rel_get(struct devlink_rel *rel) 60 { 61 refcount_inc(&rel->refcount); 62 } 63 64 static void __devlink_rel_put(struct devlink_rel *rel) 65 { 66 if (refcount_dec_and_test(&rel->refcount)) 67 devlink_rel_free(rel); 68 } 69 70 static void devlink_rel_nested_in_notify_work(struct work_struct *work) 71 { 72 struct devlink_rel *rel = container_of(work, struct devlink_rel, 73 nested_in.notify_work); 74 struct devlink *devlink; 75 76 devlink = devlinks_xa_get(rel->nested_in.devlink_index); 77 if (!devlink) 78 goto rel_put; 79 if (!devl_trylock(devlink)) { 80 devlink_put(devlink); 81 goto reschedule_work; 82 } 83 if (!devl_is_registered(devlink)) { 84 devl_unlock(devlink); 85 devlink_put(devlink); 86 goto rel_put; 87 } 88 if (!xa_get_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE)) 89 rel->nested_in.cleanup_cb(devlink, rel->nested_in.obj_index, rel->index); 90 rel->nested_in.notify_cb(devlink, rel->nested_in.obj_index); 91 devl_unlock(devlink); 92 devlink_put(devlink); 93 94 rel_put: 95 __devlink_rel_put(rel); 96 return; 97 98 reschedule_work: 99 schedule_work(&rel->nested_in.notify_work); 100 } 101 102 static void devlink_rel_nested_in_notify_work_schedule(struct devlink_rel *rel) 103 { 104 __devlink_rel_get(rel); 105 schedule_work(&rel->nested_in.notify_work); 106 } 107 108 static struct devlink_rel *devlink_rel_alloc(void) 109 { 110 struct devlink_rel *rel; 111 static u32 next; 112 int err; 113 114 rel = kzalloc(sizeof(*rel), GFP_KERNEL); 115 if (!rel) 116 return ERR_PTR(-ENOMEM); 117 118 err = xa_alloc_cyclic(&devlink_rels, &rel->index, rel, 119 xa_limit_32b, &next, GFP_KERNEL); 120 if (err) { 121 kfree(rel); 122 return ERR_PTR(err); 123 } 124 125 refcount_set(&rel->refcount, 1); 126 INIT_WORK(&rel->nested_in.notify_work, 127 &devlink_rel_nested_in_notify_work); 128 return rel; 129 } 130 131 static void devlink_rel_put(struct devlink *devlink) 132 { 133 struct devlink_rel *rel = devlink->rel; 134 135 if (!rel) 136 return; 137 xa_clear_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE); 138 devlink_rel_nested_in_notify_work_schedule(rel); 139 __devlink_rel_put(rel); 140 devlink->rel = NULL; 141 } 142 143 void devlink_rel_nested_in_clear(u32 rel_index) 144 { 145 xa_clear_mark(&devlink_rels, rel_index, DEVLINK_REL_IN_USE); 146 } 147 148 int devlink_rel_nested_in_add(u32 *rel_index, u32 devlink_index, 149 u32 obj_index, devlink_rel_notify_cb_t *notify_cb, 150 devlink_rel_cleanup_cb_t *cleanup_cb, 151 struct devlink *devlink) 152 { 153 struct devlink_rel *rel = devlink_rel_alloc(); 154 155 ASSERT_DEVLINK_NOT_REGISTERED(devlink); 156 157 if (IS_ERR(rel)) 158 return PTR_ERR(rel); 159 160 rel->devlink_index = devlink->index; 161 rel->nested_in.devlink_index = devlink_index; 162 rel->nested_in.obj_index = obj_index; 163 rel->nested_in.notify_cb = notify_cb; 164 rel->nested_in.cleanup_cb = cleanup_cb; 165 *rel_index = rel->index; 166 xa_set_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE); 167 devlink->rel = rel; 168 return 0; 169 } 170 171 void devlink_rel_nested_in_notify(struct devlink *devlink) 172 { 173 struct devlink_rel *rel = devlink->rel; 174 175 if (!rel) 176 return; 177 devlink_rel_nested_in_notify_work_schedule(rel); 178 } 179 180 static struct devlink_rel *devlink_rel_find(unsigned long rel_index) 181 { 182 return xa_find(&devlink_rels, &rel_index, rel_index, 183 DEVLINK_REL_IN_USE); 184 } 185 186 static struct devlink *devlink_rel_devlink_get_lock(u32 rel_index) 187 { 188 struct devlink *devlink; 189 struct devlink_rel *rel; 190 u32 devlink_index; 191 192 if (!rel_index) 193 return NULL; 194 xa_lock(&devlink_rels); 195 rel = devlink_rel_find(rel_index); 196 if (rel) 197 devlink_index = rel->devlink_index; 198 xa_unlock(&devlink_rels); 199 if (!rel) 200 return NULL; 201 devlink = devlinks_xa_get(devlink_index); 202 if (!devlink) 203 return NULL; 204 devl_lock(devlink); 205 if (!devl_is_registered(devlink)) { 206 devl_unlock(devlink); 207 devlink_put(devlink); 208 return NULL; 209 } 210 return devlink; 211 } 212 213 int devlink_rel_devlink_handle_put(struct sk_buff *msg, struct devlink *devlink, 214 u32 rel_index, int attrtype, 215 bool *msg_updated) 216 { 217 struct net *net = devlink_net(devlink); 218 struct devlink *rel_devlink; 219 int err; 220 221 rel_devlink = devlink_rel_devlink_get_lock(rel_index); 222 if (!rel_devlink) 223 return 0; 224 err = devlink_nl_put_nested_handle(msg, net, rel_devlink, attrtype); 225 devl_unlock(rel_devlink); 226 devlink_put(rel_devlink); 227 if (!err && msg_updated) 228 *msg_updated = true; 229 return err; 230 } 231 232 void *devlink_priv(struct devlink *devlink) 233 { 234 return &devlink->priv; 235 } 236 EXPORT_SYMBOL_GPL(devlink_priv); 237 238 struct devlink *priv_to_devlink(void *priv) 239 { 240 return container_of(priv, struct devlink, priv); 241 } 242 EXPORT_SYMBOL_GPL(priv_to_devlink); 243 244 struct device *devlink_to_dev(const struct devlink *devlink) 245 { 246 return devlink->dev; 247 } 248 EXPORT_SYMBOL_GPL(devlink_to_dev); 249 250 struct net *devlink_net(const struct devlink *devlink) 251 { 252 return read_pnet(&devlink->_net); 253 } 254 EXPORT_SYMBOL_GPL(devlink_net); 255 256 void devl_assert_locked(struct devlink *devlink) 257 { 258 lockdep_assert_held(&devlink->lock); 259 } 260 EXPORT_SYMBOL_GPL(devl_assert_locked); 261 262 #ifdef CONFIG_LOCKDEP 263 /* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */ 264 bool devl_lock_is_held(struct devlink *devlink) 265 { 266 return lockdep_is_held(&devlink->lock); 267 } 268 EXPORT_SYMBOL_GPL(devl_lock_is_held); 269 #endif 270 271 void devl_lock(struct devlink *devlink) 272 { 273 mutex_lock(&devlink->lock); 274 } 275 EXPORT_SYMBOL_GPL(devl_lock); 276 277 int devl_trylock(struct devlink *devlink) 278 { 279 return mutex_trylock(&devlink->lock); 280 } 281 EXPORT_SYMBOL_GPL(devl_trylock); 282 283 void devl_unlock(struct devlink *devlink) 284 { 285 mutex_unlock(&devlink->lock); 286 } 287 EXPORT_SYMBOL_GPL(devl_unlock); 288 289 /** 290 * devlink_try_get() - try to obtain a reference on a devlink instance 291 * @devlink: instance to reference 292 * 293 * Obtain a reference on a devlink instance. A reference on a devlink instance 294 * only implies that it's safe to take the instance lock. It does not imply 295 * that the instance is registered, use devl_is_registered() after taking 296 * the instance lock to check registration status. 297 */ 298 struct devlink *__must_check devlink_try_get(struct devlink *devlink) 299 { 300 if (refcount_inc_not_zero(&devlink->refcount)) 301 return devlink; 302 return NULL; 303 } 304 305 static void devlink_release(struct work_struct *work) 306 { 307 struct devlink *devlink; 308 309 devlink = container_of(to_rcu_work(work), struct devlink, rwork); 310 311 mutex_destroy(&devlink->lock); 312 lockdep_unregister_key(&devlink->lock_key); 313 kfree(devlink); 314 } 315 316 void devlink_put(struct devlink *devlink) 317 { 318 if (refcount_dec_and_test(&devlink->refcount)) 319 queue_rcu_work(system_wq, &devlink->rwork); 320 } 321 322 struct devlink *devlinks_xa_find_get(struct net *net, unsigned long *indexp) 323 { 324 struct devlink *devlink = NULL; 325 326 rcu_read_lock(); 327 retry: 328 devlink = xa_find(&devlinks, indexp, ULONG_MAX, DEVLINK_REGISTERED); 329 if (!devlink) 330 goto unlock; 331 332 if (!devlink_try_get(devlink)) 333 goto next; 334 if (!net_eq(devlink_net(devlink), net)) { 335 devlink_put(devlink); 336 goto next; 337 } 338 unlock: 339 rcu_read_unlock(); 340 return devlink; 341 342 next: 343 (*indexp)++; 344 goto retry; 345 } 346 347 /** 348 * devl_register - Register devlink instance 349 * @devlink: devlink 350 */ 351 int devl_register(struct devlink *devlink) 352 { 353 ASSERT_DEVLINK_NOT_REGISTERED(devlink); 354 devl_assert_locked(devlink); 355 356 xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED); 357 devlink_notify_register(devlink); 358 devlink_rel_nested_in_notify(devlink); 359 360 return 0; 361 } 362 EXPORT_SYMBOL_GPL(devl_register); 363 364 void devlink_register(struct devlink *devlink) 365 { 366 devl_lock(devlink); 367 devl_register(devlink); 368 devl_unlock(devlink); 369 } 370 EXPORT_SYMBOL_GPL(devlink_register); 371 372 /** 373 * devl_unregister - Unregister devlink instance 374 * @devlink: devlink 375 */ 376 void devl_unregister(struct devlink *devlink) 377 { 378 ASSERT_DEVLINK_REGISTERED(devlink); 379 devl_assert_locked(devlink); 380 381 devlink_notify_unregister(devlink); 382 xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED); 383 devlink_rel_put(devlink); 384 } 385 EXPORT_SYMBOL_GPL(devl_unregister); 386 387 void devlink_unregister(struct devlink *devlink) 388 { 389 devl_lock(devlink); 390 devl_unregister(devlink); 391 devl_unlock(devlink); 392 } 393 EXPORT_SYMBOL_GPL(devlink_unregister); 394 395 /** 396 * devlink_alloc_ns - Allocate new devlink instance resources 397 * in specific namespace 398 * 399 * @ops: ops 400 * @priv_size: size of user private data 401 * @net: net namespace 402 * @dev: parent device 403 * 404 * Allocate new devlink instance resources, including devlink index 405 * and name. 406 */ 407 struct devlink *devlink_alloc_ns(const struct devlink_ops *ops, 408 size_t priv_size, struct net *net, 409 struct device *dev) 410 { 411 struct devlink *devlink; 412 static u32 last_id; 413 int ret; 414 415 WARN_ON(!ops || !dev); 416 if (!devlink_reload_actions_valid(ops)) 417 return NULL; 418 419 devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL); 420 if (!devlink) 421 return NULL; 422 423 ret = xa_alloc_cyclic(&devlinks, &devlink->index, devlink, xa_limit_31b, 424 &last_id, GFP_KERNEL); 425 if (ret < 0) 426 goto err_xa_alloc; 427 428 devlink->dev = dev; 429 devlink->ops = ops; 430 xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC); 431 xa_init_flags(&devlink->params, XA_FLAGS_ALLOC); 432 xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC); 433 xa_init_flags(&devlink->nested_rels, XA_FLAGS_ALLOC); 434 write_pnet(&devlink->_net, net); 435 INIT_LIST_HEAD(&devlink->rate_list); 436 INIT_LIST_HEAD(&devlink->linecard_list); 437 INIT_LIST_HEAD(&devlink->sb_list); 438 INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list); 439 INIT_LIST_HEAD(&devlink->resource_list); 440 INIT_LIST_HEAD(&devlink->region_list); 441 INIT_LIST_HEAD(&devlink->reporter_list); 442 INIT_LIST_HEAD(&devlink->trap_list); 443 INIT_LIST_HEAD(&devlink->trap_group_list); 444 INIT_LIST_HEAD(&devlink->trap_policer_list); 445 INIT_RCU_WORK(&devlink->rwork, devlink_release); 446 lockdep_register_key(&devlink->lock_key); 447 mutex_init(&devlink->lock); 448 lockdep_set_class(&devlink->lock, &devlink->lock_key); 449 refcount_set(&devlink->refcount, 1); 450 451 return devlink; 452 453 err_xa_alloc: 454 kfree(devlink); 455 return NULL; 456 } 457 EXPORT_SYMBOL_GPL(devlink_alloc_ns); 458 459 /** 460 * devlink_free - Free devlink instance resources 461 * 462 * @devlink: devlink 463 */ 464 void devlink_free(struct devlink *devlink) 465 { 466 ASSERT_DEVLINK_NOT_REGISTERED(devlink); 467 468 WARN_ON(!list_empty(&devlink->trap_policer_list)); 469 WARN_ON(!list_empty(&devlink->trap_group_list)); 470 WARN_ON(!list_empty(&devlink->trap_list)); 471 WARN_ON(!list_empty(&devlink->reporter_list)); 472 WARN_ON(!list_empty(&devlink->region_list)); 473 WARN_ON(!list_empty(&devlink->resource_list)); 474 WARN_ON(!list_empty(&devlink->dpipe_table_list)); 475 WARN_ON(!list_empty(&devlink->sb_list)); 476 WARN_ON(!list_empty(&devlink->rate_list)); 477 WARN_ON(!list_empty(&devlink->linecard_list)); 478 WARN_ON(!xa_empty(&devlink->ports)); 479 480 xa_destroy(&devlink->nested_rels); 481 xa_destroy(&devlink->snapshot_ids); 482 xa_destroy(&devlink->params); 483 xa_destroy(&devlink->ports); 484 485 xa_erase(&devlinks, devlink->index); 486 487 devlink_put(devlink); 488 } 489 EXPORT_SYMBOL_GPL(devlink_free); 490 491 static void __net_exit devlink_pernet_pre_exit(struct net *net) 492 { 493 struct devlink *devlink; 494 u32 actions_performed; 495 unsigned long index; 496 int err; 497 498 /* In case network namespace is getting destroyed, reload 499 * all devlink instances from this namespace into init_net. 500 */ 501 devlinks_xa_for_each_registered_get(net, index, devlink) { 502 devl_lock(devlink); 503 err = 0; 504 if (devl_is_registered(devlink)) 505 err = devlink_reload(devlink, &init_net, 506 DEVLINK_RELOAD_ACTION_DRIVER_REINIT, 507 DEVLINK_RELOAD_LIMIT_UNSPEC, 508 &actions_performed, NULL); 509 devl_unlock(devlink); 510 devlink_put(devlink); 511 if (err && err != -EOPNOTSUPP) 512 pr_warn("Failed to reload devlink instance into init_net\n"); 513 } 514 } 515 516 static struct pernet_operations devlink_pernet_ops __net_initdata = { 517 .pre_exit = devlink_pernet_pre_exit, 518 }; 519 520 static struct notifier_block devlink_port_netdevice_nb = { 521 .notifier_call = devlink_port_netdevice_event, 522 }; 523 524 static int __init devlink_init(void) 525 { 526 int err; 527 528 err = genl_register_family(&devlink_nl_family); 529 if (err) 530 goto out; 531 err = register_pernet_subsys(&devlink_pernet_ops); 532 if (err) 533 goto out; 534 err = register_netdevice_notifier(&devlink_port_netdevice_nb); 535 536 out: 537 WARN_ON(err); 538 return err; 539 } 540 541 subsys_initcall(devlink_init); 542