1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/devres.c - device resource management 4 * 5 * Copyright (c) 2006 SUSE Linux Products GmbH 6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/module.h> 11 #include <linux/slab.h> 12 #include <linux/percpu.h> 13 14 #include <asm/sections.h> 15 16 #include "base.h" 17 #include "trace.h" 18 19 struct devres { 20 struct devres_node node; 21 dr_release_t release; 22 /* 23 * Some archs want to perform DMA into kmalloc caches 24 * and need a guaranteed alignment larger than 25 * the alignment of a 64-bit integer. 26 * Thus we use ARCH_DMA_MINALIGN for data[] which will force the same 27 * alignment for struct devres when allocated by kmalloc(). 28 */ 29 u8 __aligned(ARCH_DMA_MINALIGN) data[]; 30 }; 31 32 struct devres_group { 33 struct devres_node node[2]; 34 void *id; 35 int color; 36 /* -- 8 pointers */ 37 }; 38 39 void devres_node_init(struct devres_node *node, 40 dr_node_release_t release, 41 dr_node_free_t free_node) 42 { 43 INIT_LIST_HEAD(&node->entry); 44 node->release = release; 45 node->free_node = free_node; 46 } 47 48 static inline void free_node(struct devres_node *node) 49 { 50 node->free_node(node); 51 } 52 53 void devres_set_node_dbginfo(struct devres_node *node, const char *name, 54 size_t size) 55 { 56 node->name = name; 57 node->size = size; 58 } 59 60 #ifdef CONFIG_DEBUG_DEVRES 61 static int log_devres = 0; 62 module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR); 63 64 static void devres_dbg(struct device *dev, struct devres_node *node, 65 const char *op) 66 { 67 if (unlikely(log_devres)) 68 dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n", 69 op, node, node->name, node->size); 70 } 71 #else /* CONFIG_DEBUG_DEVRES */ 72 #define devres_dbg(dev, node, op) do {} while (0) 73 #endif /* CONFIG_DEBUG_DEVRES */ 74 75 static void devres_log(struct device *dev, struct devres_node *node, 76 const char *op) 77 { 78 trace_devres_log(dev, op, node, node->name, node->size); 79 devres_dbg(dev, node, op); 80 } 81 82 /* 83 * Release functions for devres group. These callbacks are used only 84 * for identification. 85 */ 86 static void group_open_release(struct device *dev, struct devres_node *node) 87 { 88 /* noop */ 89 } 90 91 static void group_close_release(struct device *dev, struct devres_node *node) 92 { 93 /* noop */ 94 } 95 96 static struct devres_group *node_to_group(struct devres_node *node) 97 { 98 if (node->release == &group_open_release) 99 return container_of(node, struct devres_group, node[0]); 100 if (node->release == &group_close_release) 101 return container_of(node, struct devres_group, node[1]); 102 return NULL; 103 } 104 105 static bool check_dr_size(size_t size, size_t *tot_size) 106 { 107 /* We must catch any near-SIZE_MAX cases that could overflow. */ 108 if (unlikely(check_add_overflow(sizeof(struct devres), 109 size, tot_size))) 110 return false; 111 112 /* Actually allocate the full kmalloc bucket size. */ 113 *tot_size = kmalloc_size_roundup(*tot_size); 114 115 return true; 116 } 117 118 static void dr_node_release(struct device *dev, struct devres_node *node) 119 { 120 struct devres *dr = container_of(node, struct devres, node); 121 122 dr->release(dev, dr->data); 123 } 124 125 static void dr_node_free(struct devres_node *node) 126 { 127 struct devres *dr = container_of(node, struct devres, node); 128 129 kfree(dr); 130 } 131 132 static __always_inline struct devres *alloc_dr(dr_release_t release, 133 size_t size, gfp_t gfp, int nid) 134 { 135 size_t tot_size; 136 struct devres *dr; 137 138 if (!check_dr_size(size, &tot_size)) 139 return NULL; 140 141 dr = kmalloc_node_track_caller(tot_size, gfp, nid); 142 if (unlikely(!dr)) 143 return NULL; 144 145 /* No need to clear memory twice */ 146 if (!(gfp & __GFP_ZERO)) 147 memset(dr, 0, offsetof(struct devres, data)); 148 149 devres_node_init(&dr->node, dr_node_release, dr_node_free); 150 dr->release = release; 151 return dr; 152 } 153 154 static void add_dr(struct device *dev, struct devres_node *node) 155 { 156 devres_log(dev, node, "ADD"); 157 BUG_ON(!list_empty(&node->entry)); 158 list_add_tail(&node->entry, &dev->devres_head); 159 } 160 161 static void replace_dr(struct device *dev, 162 struct devres_node *old, struct devres_node *new) 163 { 164 devres_log(dev, old, "REPLACE"); 165 BUG_ON(!list_empty(&new->entry)); 166 list_replace(&old->entry, &new->entry); 167 } 168 169 /** 170 * __devres_alloc_node - Allocate device resource data 171 * @release: Release function devres will be associated with 172 * @size: Allocation size 173 * @gfp: Allocation flags 174 * @nid: NUMA node 175 * @name: Name of the resource 176 * 177 * Allocate devres of @size bytes. The allocated area is zeroed, then 178 * associated with @release. The returned pointer can be passed to 179 * other devres_*() functions. 180 * 181 * RETURNS: 182 * Pointer to allocated devres on success, NULL on failure. 183 */ 184 void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, 185 const char *name) 186 { 187 struct devres *dr; 188 189 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); 190 if (unlikely(!dr)) 191 return NULL; 192 devres_set_node_dbginfo(&dr->node, name, size); 193 return dr->data; 194 } 195 EXPORT_SYMBOL_GPL(__devres_alloc_node); 196 197 /** 198 * devres_for_each_res - Resource iterator 199 * @dev: Device to iterate resource from 200 * @release: Look for resources associated with this release function 201 * @match: Match function (optional) 202 * @match_data: Data for the match function 203 * @fn: Function to be called for each matched resource. 204 * @data: Data for @fn, the 3rd parameter of @fn 205 * 206 * Call @fn for each devres of @dev which is associated with @release 207 * and for which @match returns 1. 208 * 209 * RETURNS: 210 * void 211 */ 212 void devres_for_each_res(struct device *dev, dr_release_t release, 213 dr_match_t match, void *match_data, 214 void (*fn)(struct device *, void *, void *), 215 void *data) 216 { 217 struct devres_node *node; 218 struct devres_node *tmp; 219 220 if (!fn) 221 return; 222 223 guard(spinlock_irqsave)(&dev->devres_lock); 224 list_for_each_entry_safe_reverse(node, tmp, 225 &dev->devres_head, entry) { 226 struct devres *dr = container_of(node, struct devres, node); 227 228 if (node->release != dr_node_release) 229 continue; 230 if (dr->release != release) 231 continue; 232 if (match && !match(dev, dr->data, match_data)) 233 continue; 234 fn(dev, dr->data, data); 235 } 236 } 237 EXPORT_SYMBOL_GPL(devres_for_each_res); 238 239 static inline void free_dr(struct devres *dr) 240 { 241 free_node(&dr->node); 242 } 243 244 /** 245 * devres_free - Free device resource data 246 * @res: Pointer to devres data to free 247 * 248 * Free devres created with devres_alloc(). 249 */ 250 void devres_free(void *res) 251 { 252 if (res) { 253 struct devres *dr = container_of(res, struct devres, data); 254 255 BUG_ON(!list_empty(&dr->node.entry)); 256 free_dr(dr); 257 } 258 } 259 EXPORT_SYMBOL_GPL(devres_free); 260 261 void devres_node_add(struct device *dev, struct devres_node *node) 262 { 263 guard(spinlock_irqsave)(&dev->devres_lock); 264 265 add_dr(dev, node); 266 } 267 268 /** 269 * devres_add - Register device resource 270 * @dev: Device to add resource to 271 * @res: Resource to register 272 * 273 * Register devres @res to @dev. @res should have been allocated 274 * using devres_alloc(). On driver detach, the associated release 275 * function will be invoked and devres will be freed automatically. 276 */ 277 void devres_add(struct device *dev, void *res) 278 { 279 struct devres *dr = container_of(res, struct devres, data); 280 281 devres_node_add(dev, &dr->node); 282 } 283 EXPORT_SYMBOL_GPL(devres_add); 284 285 static struct devres *find_dr(struct device *dev, dr_release_t release, 286 dr_match_t match, void *match_data) 287 { 288 struct devres_node *node; 289 290 list_for_each_entry_reverse(node, &dev->devres_head, entry) { 291 struct devres *dr = container_of(node, struct devres, node); 292 293 if (node->release != dr_node_release) 294 continue; 295 if (dr->release != release) 296 continue; 297 if (match && !match(dev, dr->data, match_data)) 298 continue; 299 return dr; 300 } 301 302 return NULL; 303 } 304 305 /** 306 * devres_find - Find device resource 307 * @dev: Device to lookup resource from 308 * @release: Look for resources associated with this release function 309 * @match: Match function (optional) 310 * @match_data: Data for the match function 311 * 312 * Find the latest devres of @dev which is associated with @release 313 * and for which @match returns 1. If @match is NULL, it's considered 314 * to match all. 315 * 316 * RETURNS: 317 * Pointer to found devres, NULL if not found. 318 */ 319 void *devres_find(struct device *dev, dr_release_t release, 320 dr_match_t match, void *match_data) 321 { 322 struct devres *dr; 323 324 guard(spinlock_irqsave)(&dev->devres_lock); 325 dr = find_dr(dev, release, match, match_data); 326 if (dr) 327 return dr->data; 328 329 return NULL; 330 } 331 EXPORT_SYMBOL_GPL(devres_find); 332 333 /** 334 * devres_get - Find devres, if non-existent, add one atomically 335 * @dev: Device to lookup or add devres for 336 * @new_res: Pointer to new initialized devres to add if not found 337 * @match: Match function (optional) 338 * @match_data: Data for the match function 339 * 340 * Find the latest devres of @dev which has the same release function 341 * as @new_res and for which @match return 1. If found, @new_res is 342 * freed; otherwise, @new_res is added atomically. 343 * 344 * RETURNS: 345 * Pointer to found or added devres. 346 */ 347 void *devres_get(struct device *dev, void *new_res, 348 dr_match_t match, void *match_data) 349 { 350 struct devres *new_dr = container_of(new_res, struct devres, data); 351 struct devres *dr; 352 unsigned long flags; 353 354 spin_lock_irqsave(&dev->devres_lock, flags); 355 dr = find_dr(dev, new_dr->release, match, match_data); 356 if (!dr) { 357 add_dr(dev, &new_dr->node); 358 dr = new_dr; 359 new_res = NULL; 360 } 361 spin_unlock_irqrestore(&dev->devres_lock, flags); 362 devres_free(new_res); 363 364 return dr->data; 365 } 366 EXPORT_SYMBOL_GPL(devres_get); 367 368 bool devres_node_remove(struct device *dev, struct devres_node *node) 369 { 370 struct devres_node *__node; 371 372 guard(spinlock_irqsave)(&dev->devres_lock); 373 list_for_each_entry_reverse(__node, &dev->devres_head, entry) { 374 if (__node == node) { 375 list_del_init(&node->entry); 376 devres_log(dev, node, "REM"); 377 return true; 378 } 379 } 380 381 return false; 382 } 383 384 /** 385 * devres_remove - Find a device resource and remove it 386 * @dev: Device to find resource from 387 * @release: Look for resources associated with this release function 388 * @match: Match function (optional) 389 * @match_data: Data for the match function 390 * 391 * Find the latest devres of @dev associated with @release and for 392 * which @match returns 1. If @match is NULL, it's considered to 393 * match all. If found, the resource is removed atomically and 394 * returned. 395 * 396 * RETURNS: 397 * Pointer to removed devres on success, NULL if not found. 398 */ 399 void *devres_remove(struct device *dev, dr_release_t release, 400 dr_match_t match, void *match_data) 401 { 402 struct devres *dr; 403 404 guard(spinlock_irqsave)(&dev->devres_lock); 405 dr = find_dr(dev, release, match, match_data); 406 if (dr) { 407 list_del_init(&dr->node.entry); 408 devres_log(dev, &dr->node, "REM"); 409 return dr->data; 410 } 411 412 return NULL; 413 } 414 EXPORT_SYMBOL_GPL(devres_remove); 415 416 /** 417 * devres_destroy - Find a device resource and destroy it 418 * @dev: Device to find resource from 419 * @release: Look for resources associated with this release function 420 * @match: Match function (optional) 421 * @match_data: Data for the match function 422 * 423 * Find the latest devres of @dev associated with @release and for 424 * which @match returns 1. If @match is NULL, it's considered to 425 * match all. If found, the resource is removed atomically and freed. 426 * 427 * Note that the release function for the resource will not be called, 428 * only the devres-allocated data will be freed. The caller becomes 429 * responsible for freeing any other data. 430 * 431 * RETURNS: 432 * 0 if devres is found and freed, -ENOENT if not found. 433 */ 434 int devres_destroy(struct device *dev, dr_release_t release, 435 dr_match_t match, void *match_data) 436 { 437 void *res; 438 439 res = devres_remove(dev, release, match, match_data); 440 if (unlikely(!res)) 441 return -ENOENT; 442 443 devres_free(res); 444 return 0; 445 } 446 EXPORT_SYMBOL_GPL(devres_destroy); 447 448 449 /** 450 * devres_release - Find a device resource and destroy it, calling release 451 * @dev: Device to find resource from 452 * @release: Look for resources associated with this release function 453 * @match: Match function (optional) 454 * @match_data: Data for the match function 455 * 456 * Find the latest devres of @dev associated with @release and for 457 * which @match returns 1. If @match is NULL, it's considered to 458 * match all. If found, the resource is removed atomically, the 459 * release function called and the resource freed. 460 * 461 * RETURNS: 462 * 0 if devres is found and freed, -ENOENT if not found. 463 */ 464 int devres_release(struct device *dev, dr_release_t release, 465 dr_match_t match, void *match_data) 466 { 467 void *res; 468 469 res = devres_remove(dev, release, match, match_data); 470 if (unlikely(!res)) 471 return -ENOENT; 472 473 (*release)(dev, res); 474 devres_free(res); 475 return 0; 476 } 477 EXPORT_SYMBOL_GPL(devres_release); 478 479 static int remove_nodes(struct device *dev, 480 struct list_head *first, struct list_head *end, 481 struct list_head *todo) 482 { 483 struct devres_node *node, *n; 484 int cnt = 0, nr_groups = 0; 485 486 /* First pass - move normal devres entries to @todo and clear 487 * devres_group colors. 488 */ 489 node = list_entry(first, struct devres_node, entry); 490 list_for_each_entry_safe_from(node, n, end, entry) { 491 struct devres_group *grp; 492 493 grp = node_to_group(node); 494 if (grp) { 495 /* clear color of group markers in the first pass */ 496 grp->color = 0; 497 nr_groups++; 498 } else { 499 /* regular devres entry */ 500 if (&node->entry == first) 501 first = first->next; 502 list_move_tail(&node->entry, todo); 503 cnt++; 504 } 505 } 506 507 if (!nr_groups) 508 return cnt; 509 510 /* Second pass - Scan groups and color them. A group gets 511 * color value of two iff the group is wholly contained in 512 * [current node, end). That is, for a closed group, both opening 513 * and closing markers should be in the range, while just the 514 * opening marker is enough for an open group. 515 */ 516 node = list_entry(first, struct devres_node, entry); 517 list_for_each_entry_safe_from(node, n, end, entry) { 518 struct devres_group *grp; 519 520 grp = node_to_group(node); 521 BUG_ON(!grp || list_empty(&grp->node[0].entry)); 522 523 grp->color++; 524 if (list_empty(&grp->node[1].entry)) 525 grp->color++; 526 527 BUG_ON(grp->color <= 0 || grp->color > 2); 528 if (grp->color == 2) { 529 /* No need to update current node or end. The removed 530 * nodes are always before both. 531 */ 532 list_move_tail(&grp->node[0].entry, todo); 533 list_del_init(&grp->node[1].entry); 534 } 535 } 536 537 return cnt; 538 } 539 540 static void release_nodes(struct device *dev, struct list_head *todo) 541 { 542 struct devres_node *node, *tmp; 543 544 list_for_each_entry_safe_reverse(node, tmp, todo, entry) { 545 devres_log(dev, node, "REL"); 546 node->release(dev, node); 547 free_node(node); 548 } 549 } 550 551 /** 552 * devres_release_all - Release all managed resources 553 * @dev: Device to release resources for 554 * 555 * Release all resources associated with @dev. This function is 556 * called on driver detach. 557 */ 558 int devres_release_all(struct device *dev) 559 { 560 unsigned long flags; 561 LIST_HEAD(todo); 562 int cnt; 563 564 /* Looks like an uninitialized device structure */ 565 if (WARN_ON(dev->devres_head.next == NULL)) 566 return -ENODEV; 567 568 /* Nothing to release if list is empty */ 569 if (list_empty(&dev->devres_head)) 570 return 0; 571 572 spin_lock_irqsave(&dev->devres_lock, flags); 573 cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo); 574 spin_unlock_irqrestore(&dev->devres_lock, flags); 575 576 release_nodes(dev, &todo); 577 return cnt; 578 } 579 580 static void devres_group_free(struct devres_node *node) 581 { 582 struct devres_group *grp = container_of(node, struct devres_group, node[0]); 583 584 kfree(grp); 585 } 586 587 /** 588 * devres_open_group - Open a new devres group 589 * @dev: Device to open devres group for 590 * @id: Separator ID 591 * @gfp: Allocation flags 592 * 593 * Open a new devres group for @dev with @id. For @id, using a 594 * pointer to an object which won't be used for another group is 595 * recommended. If @id is NULL, address-wise unique ID is created. 596 * 597 * RETURNS: 598 * ID of the new group, NULL on failure. 599 */ 600 void *devres_open_group(struct device *dev, void *id, gfp_t gfp) 601 { 602 struct devres_group *grp; 603 604 grp = kmalloc_obj(*grp, gfp); 605 if (unlikely(!grp)) 606 return NULL; 607 608 devres_node_init(&grp->node[0], &group_open_release, devres_group_free); 609 devres_node_init(&grp->node[1], &group_close_release, NULL); 610 devres_set_node_dbginfo(&grp->node[0], "grp<", 0); 611 devres_set_node_dbginfo(&grp->node[1], "grp>", 0); 612 grp->id = grp; 613 if (id) 614 grp->id = id; 615 grp->color = 0; 616 617 devres_node_add(dev, &grp->node[0]); 618 return grp->id; 619 } 620 EXPORT_SYMBOL_GPL(devres_open_group); 621 622 /* 623 * Find devres group with ID @id. If @id is NULL, look for the latest open 624 * group. 625 */ 626 static struct devres_group *find_group(struct device *dev, void *id) 627 { 628 struct devres_node *node; 629 630 list_for_each_entry_reverse(node, &dev->devres_head, entry) { 631 struct devres_group *grp; 632 633 if (node->release != &group_open_release) 634 continue; 635 636 grp = container_of(node, struct devres_group, node[0]); 637 638 if (id) { 639 if (grp->id == id) 640 return grp; 641 } else if (list_empty(&grp->node[1].entry)) 642 return grp; 643 } 644 645 return NULL; 646 } 647 648 /** 649 * devres_close_group - Close a devres group 650 * @dev: Device to close devres group for 651 * @id: ID of target group, can be NULL 652 * 653 * Close the group identified by @id. If @id is NULL, the latest open 654 * group is selected. 655 */ 656 void devres_close_group(struct device *dev, void *id) 657 { 658 struct devres_group *grp; 659 660 guard(spinlock_irqsave)(&dev->devres_lock); 661 grp = find_group(dev, id); 662 if (grp) 663 add_dr(dev, &grp->node[1]); 664 else 665 WARN_ON(1); 666 } 667 EXPORT_SYMBOL_GPL(devres_close_group); 668 669 /** 670 * devres_remove_group - Remove a devres group 671 * @dev: Device to remove group for 672 * @id: ID of target group, can be NULL 673 * 674 * Remove the group identified by @id. If @id is NULL, the latest 675 * open group is selected. Note that removing a group doesn't affect 676 * any other resources. 677 */ 678 void devres_remove_group(struct device *dev, void *id) 679 { 680 struct devres_group *grp; 681 unsigned long flags; 682 683 spin_lock_irqsave(&dev->devres_lock, flags); 684 685 grp = find_group(dev, id); 686 if (grp) { 687 list_del_init(&grp->node[0].entry); 688 list_del_init(&grp->node[1].entry); 689 devres_log(dev, &grp->node[0], "REM"); 690 } else 691 WARN_ON(1); 692 693 spin_unlock_irqrestore(&dev->devres_lock, flags); 694 695 kfree(grp); 696 } 697 EXPORT_SYMBOL_GPL(devres_remove_group); 698 699 /** 700 * devres_release_group - Release resources in a devres group 701 * @dev: Device to release group for 702 * @id: ID of target group, can be NULL 703 * 704 * Release all resources in the group identified by @id. If @id is 705 * NULL, the latest open group is selected. The selected group and 706 * groups properly nested inside the selected group are removed. 707 * 708 * RETURNS: 709 * The number of released non-group resources. 710 */ 711 int devres_release_group(struct device *dev, void *id) 712 { 713 struct devres_group *grp; 714 unsigned long flags; 715 LIST_HEAD(todo); 716 int cnt = 0; 717 718 spin_lock_irqsave(&dev->devres_lock, flags); 719 grp = find_group(dev, id); 720 if (grp) { 721 struct list_head *first = &grp->node[0].entry; 722 struct list_head *end = &dev->devres_head; 723 724 if (!list_empty(&grp->node[1].entry)) 725 end = grp->node[1].entry.next; 726 727 cnt = remove_nodes(dev, first, end, &todo); 728 } else if (list_empty(&dev->devres_head)) { 729 /* 730 * dev is probably dying via devres_release_all(): groups 731 * have already been removed and are on the process of 732 * being released - don't touch and don't warn. 733 */ 734 } else { 735 WARN_ON(1); 736 } 737 spin_unlock_irqrestore(&dev->devres_lock, flags); 738 739 release_nodes(dev, &todo); 740 741 return cnt; 742 } 743 EXPORT_SYMBOL_GPL(devres_release_group); 744 745 /* 746 * Custom devres actions allow inserting a simple function call 747 * into the teardown sequence. 748 */ 749 750 struct action_devres { 751 void *data; 752 void (*action)(void *); 753 }; 754 755 struct devres_action { 756 struct devres_node node; 757 struct action_devres action; 758 }; 759 760 static int devm_action_match(struct devres_action *devres, struct action_devres *target) 761 { 762 return devres->action.action == target->action && 763 devres->action.data == target->data; 764 } 765 766 static void devm_action_release(struct device *dev, struct devres_node *node) 767 { 768 struct devres_action *devres = container_of(node, struct devres_action, node); 769 770 devres->action.action(devres->action.data); 771 } 772 773 static void devm_action_free(struct devres_node *node) 774 { 775 struct devres_action *action = container_of(node, struct devres_action, node); 776 777 kfree(action); 778 } 779 780 /** 781 * __devm_add_action() - add a custom action to list of managed resources 782 * @dev: Device that owns the action 783 * @action: Function that should be called 784 * @data: Pointer to data passed to @action implementation 785 * @name: Name of the resource (for debugging purposes) 786 * 787 * This adds a custom action to the list of managed resources so that 788 * it gets executed as part of standard resource unwinding. 789 */ 790 int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name) 791 { 792 struct devres_action *devres; 793 794 devres = kzalloc_obj(*devres); 795 if (!devres) 796 return -ENOMEM; 797 798 devres_node_init(&devres->node, devm_action_release, devm_action_free); 799 devres_set_node_dbginfo(&devres->node, name, sizeof(*devres)); 800 801 devres->action.data = data; 802 devres->action.action = action; 803 804 devres_node_add(dev, &devres->node); 805 return 0; 806 } 807 EXPORT_SYMBOL_GPL(__devm_add_action); 808 809 static struct devres_action *devres_action_find(struct device *dev, 810 void (*action)(void *), 811 void *data) 812 { 813 struct devres_node *node; 814 struct action_devres target = { 815 .data = data, 816 .action = action, 817 }; 818 819 list_for_each_entry_reverse(node, &dev->devres_head, entry) { 820 struct devres_action *dr = container_of(node, struct devres_action, node); 821 822 if (node->release != devm_action_release) 823 continue; 824 if (devm_action_match(dr, &target)) 825 return dr; 826 } 827 828 return NULL; 829 } 830 831 bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data) 832 { 833 guard(spinlock_irqsave)(&dev->devres_lock); 834 835 return !!devres_action_find(dev, action, data); 836 } 837 EXPORT_SYMBOL_GPL(devm_is_action_added); 838 839 static struct devres_action *remove_action(struct device *dev, 840 void (*action)(void *), 841 void *data) 842 { 843 struct devres_action *dr; 844 845 guard(spinlock_irqsave)(&dev->devres_lock); 846 847 dr = devres_action_find(dev, action, data); 848 if (!dr) 849 return ERR_PTR(-ENOENT); 850 851 list_del_init(&dr->node.entry); 852 devres_log(dev, &dr->node, "REM"); 853 854 return dr; 855 } 856 857 /** 858 * devm_remove_action_nowarn() - removes previously added custom action 859 * @dev: Device that owns the action 860 * @action: Function implementing the action 861 * @data: Pointer to data passed to @action implementation 862 * 863 * Removes instance of @action previously added by devm_add_action(). 864 * Both action and data should match one of the existing entries. 865 * 866 * In contrast to devm_remove_action(), this function does not WARN() if no 867 * entry could have been found. 868 * 869 * This should only be used if the action is contained in an object with 870 * independent lifetime management, e.g. the Devres rust abstraction. 871 * 872 * Causing the warning from regular driver code most likely indicates an abuse 873 * of the devres API. 874 * 875 * Returns: 0 on success, -ENOENT if no entry could have been found. 876 */ 877 int devm_remove_action_nowarn(struct device *dev, 878 void (*action)(void *), 879 void *data) 880 { 881 struct devres_action *dr; 882 883 dr = remove_action(dev, action, data); 884 if (IS_ERR(dr)) 885 return PTR_ERR(dr); 886 887 kfree(dr); 888 889 return 0; 890 } 891 EXPORT_SYMBOL_GPL(devm_remove_action_nowarn); 892 893 /** 894 * devm_release_action() - release previously added custom action 895 * @dev: Device that owns the action 896 * @action: Function implementing the action 897 * @data: Pointer to data passed to @action implementation 898 * 899 * Releases and removes instance of @action previously added by 900 * devm_add_action(). Both action and data should match one of the 901 * existing entries. 902 */ 903 void devm_release_action(struct device *dev, void (*action)(void *), void *data) 904 { 905 struct devres_action *dr; 906 907 dr = remove_action(dev, action, data); 908 if (WARN_ON(IS_ERR(dr))) 909 return; 910 911 dr->action.action(dr->action.data); 912 913 kfree(dr); 914 } 915 EXPORT_SYMBOL_GPL(devm_release_action); 916 917 /* 918 * Managed kmalloc/kfree 919 */ 920 static void devm_kmalloc_release(struct device *dev, void *res) 921 { 922 /* noop */ 923 } 924 925 static int devm_kmalloc_match(struct device *dev, void *res, void *data) 926 { 927 return res == data; 928 } 929 930 /** 931 * devm_kmalloc - Resource-managed kmalloc 932 * @dev: Device to allocate memory for 933 * @size: Allocation size 934 * @gfp: Allocation gfp flags 935 * 936 * Managed kmalloc. Memory allocated with this function is 937 * automatically freed on driver detach. Like all other devres 938 * resources, guaranteed alignment is unsigned long long. 939 * 940 * RETURNS: 941 * Pointer to allocated memory on success, NULL on failure. 942 */ 943 void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) 944 { 945 struct devres *dr; 946 947 if (unlikely(!size)) 948 return ZERO_SIZE_PTR; 949 950 /* use raw alloc_dr for kmalloc caller tracing */ 951 dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev)); 952 if (unlikely(!dr)) 953 return NULL; 954 955 /* 956 * This is named devm_kzalloc_release for historical reasons 957 * The initial implementation did not support kmalloc, only kzalloc 958 */ 959 devres_set_node_dbginfo(&dr->node, "devm_kzalloc_release", size); 960 devres_add(dev, dr->data); 961 return dr->data; 962 } 963 EXPORT_SYMBOL_GPL(devm_kmalloc); 964 965 /** 966 * devm_krealloc - Resource-managed krealloc() 967 * @dev: Device to re-allocate memory for 968 * @ptr: Pointer to the memory chunk to re-allocate 969 * @new_size: New allocation size 970 * @gfp: Allocation gfp flags 971 * 972 * Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc(). 973 * Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR, 974 * it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the 975 * previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't 976 * change the order in which the release callback for the re-alloc'ed devres 977 * will be called (except when falling back to devm_kmalloc() or when freeing 978 * resources when new_size is zero). The contents of the memory are preserved 979 * up to the lesser of new and old sizes. 980 */ 981 void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp) 982 { 983 size_t total_new_size, total_old_size; 984 struct devres *old_dr, *new_dr; 985 unsigned long flags; 986 987 if (unlikely(!new_size)) { 988 devm_kfree(dev, ptr); 989 return ZERO_SIZE_PTR; 990 } 991 992 if (unlikely(ZERO_OR_NULL_PTR(ptr))) 993 return devm_kmalloc(dev, new_size, gfp); 994 995 if (WARN_ON(is_kernel_rodata((unsigned long)ptr))) 996 /* 997 * We cannot reliably realloc a const string returned by 998 * devm_kstrdup_const(). 999 */ 1000 return NULL; 1001 1002 if (!check_dr_size(new_size, &total_new_size)) 1003 return NULL; 1004 1005 total_old_size = ksize(container_of(ptr, struct devres, data)); 1006 if (total_old_size == 0) { 1007 WARN(1, "Pointer doesn't point to dynamically allocated memory."); 1008 return NULL; 1009 } 1010 1011 /* 1012 * If new size is smaller or equal to the actual number of bytes 1013 * allocated previously - just return the same pointer. 1014 */ 1015 if (total_new_size <= total_old_size) 1016 return ptr; 1017 1018 /* 1019 * Otherwise: allocate new, larger chunk. We need to allocate before 1020 * taking the lock as most probably the caller uses GFP_KERNEL. 1021 * alloc_dr() will call check_dr_size() to reserve extra memory 1022 * for struct devres automatically, so size @new_size user request 1023 * is delivered to it directly as devm_kmalloc() does. 1024 */ 1025 new_dr = alloc_dr(devm_kmalloc_release, 1026 new_size, gfp, dev_to_node(dev)); 1027 if (!new_dr) 1028 return NULL; 1029 1030 devres_set_node_dbginfo(&new_dr->node, "devm_krealloc_release", new_size); 1031 1032 /* 1033 * The spinlock protects the linked list against concurrent 1034 * modifications but not the resource itself. 1035 */ 1036 spin_lock_irqsave(&dev->devres_lock, flags); 1037 1038 old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr); 1039 if (!old_dr) { 1040 spin_unlock_irqrestore(&dev->devres_lock, flags); 1041 free_dr(new_dr); 1042 WARN(1, "Memory chunk not managed or managed by a different device."); 1043 return NULL; 1044 } 1045 1046 replace_dr(dev, &old_dr->node, &new_dr->node); 1047 1048 spin_unlock_irqrestore(&dev->devres_lock, flags); 1049 1050 /* 1051 * We can copy the memory contents after releasing the lock as we're 1052 * no longer modifying the list links. 1053 */ 1054 memcpy(new_dr->data, old_dr->data, 1055 total_old_size - offsetof(struct devres, data)); 1056 /* 1057 * Same for releasing the old devres - it's now been removed from the 1058 * list. This is also the reason why we must not use devm_kfree() - the 1059 * links are no longer valid. 1060 */ 1061 free_dr(old_dr); 1062 1063 return new_dr->data; 1064 } 1065 EXPORT_SYMBOL_GPL(devm_krealloc); 1066 1067 /** 1068 * devm_kstrdup - Allocate resource managed space and 1069 * copy an existing string into that. 1070 * @dev: Device to allocate memory for 1071 * @s: the string to duplicate 1072 * @gfp: the GFP mask used in the devm_kmalloc() call when 1073 * allocating memory 1074 * RETURNS: 1075 * Pointer to allocated string on success, NULL on failure. 1076 */ 1077 char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) 1078 { 1079 if (!s) 1080 return NULL; 1081 1082 return devm_kmemdup(dev, s, strlen(s) + 1, gfp); 1083 } 1084 EXPORT_SYMBOL_GPL(devm_kstrdup); 1085 1086 /** 1087 * devm_kstrdup_const - resource managed conditional string duplication 1088 * @dev: device for which to duplicate the string 1089 * @s: the string to duplicate 1090 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 1091 * 1092 * Strings allocated by devm_kstrdup_const will be automatically freed when 1093 * the associated device is detached. 1094 * 1095 * RETURNS: 1096 * Source string if it is in .rodata section otherwise it falls back to 1097 * devm_kstrdup. 1098 */ 1099 const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp) 1100 { 1101 if (is_kernel_rodata((unsigned long)s)) 1102 return s; 1103 1104 return devm_kstrdup(dev, s, gfp); 1105 } 1106 EXPORT_SYMBOL_GPL(devm_kstrdup_const); 1107 1108 /** 1109 * devm_kvasprintf - Allocate resource managed space and format a string 1110 * into that. 1111 * @dev: Device to allocate memory for 1112 * @gfp: the GFP mask used in the devm_kmalloc() call when 1113 * allocating memory 1114 * @fmt: The printf()-style format string 1115 * @ap: Arguments for the format string 1116 * RETURNS: 1117 * Pointer to allocated string on success, NULL on failure. 1118 */ 1119 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, 1120 va_list ap) 1121 { 1122 unsigned int len; 1123 char *p; 1124 va_list aq; 1125 1126 va_copy(aq, ap); 1127 len = vsnprintf(NULL, 0, fmt, aq); 1128 va_end(aq); 1129 1130 p = devm_kmalloc(dev, len+1, gfp); 1131 if (!p) 1132 return NULL; 1133 1134 vsnprintf(p, len+1, fmt, ap); 1135 1136 return p; 1137 } 1138 EXPORT_SYMBOL(devm_kvasprintf); 1139 1140 /** 1141 * devm_kasprintf - Allocate resource managed space and format a string 1142 * into that. 1143 * @dev: Device to allocate memory for 1144 * @gfp: the GFP mask used in the devm_kmalloc() call when 1145 * allocating memory 1146 * @fmt: The printf()-style format string 1147 * @...: Arguments for the format string 1148 * RETURNS: 1149 * Pointer to allocated string on success, NULL on failure. 1150 */ 1151 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1152 { 1153 va_list ap; 1154 char *p; 1155 1156 va_start(ap, fmt); 1157 p = devm_kvasprintf(dev, gfp, fmt, ap); 1158 va_end(ap); 1159 1160 return p; 1161 } 1162 EXPORT_SYMBOL_GPL(devm_kasprintf); 1163 1164 /** 1165 * devm_kfree - Resource-managed kfree 1166 * @dev: Device this memory belongs to 1167 * @p: Memory to free 1168 * 1169 * Free memory allocated with devm_kmalloc(). 1170 */ 1171 void devm_kfree(struct device *dev, const void *p) 1172 { 1173 int rc; 1174 1175 /* 1176 * Special cases: pointer to a string in .rodata returned by 1177 * devm_kstrdup_const() or NULL/ZERO ptr. 1178 */ 1179 if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p))) 1180 return; 1181 1182 rc = devres_destroy(dev, devm_kmalloc_release, 1183 devm_kmalloc_match, (void *)p); 1184 WARN_ON(rc); 1185 } 1186 EXPORT_SYMBOL_GPL(devm_kfree); 1187 1188 /** 1189 * devm_kmemdup - Resource-managed kmemdup 1190 * @dev: Device this memory belongs to 1191 * @src: Memory region to duplicate 1192 * @len: Memory region length 1193 * @gfp: GFP mask to use 1194 * 1195 * Duplicate region of a memory using resource managed kmalloc 1196 */ 1197 void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) 1198 { 1199 void *p; 1200 1201 p = devm_kmalloc(dev, len, gfp); 1202 if (p) 1203 memcpy(p, src, len); 1204 1205 return p; 1206 } 1207 EXPORT_SYMBOL_GPL(devm_kmemdup); 1208 1209 /** 1210 * devm_kmemdup_const - conditionally duplicate and manage a region of memory 1211 * 1212 * @dev: Device this memory belongs to 1213 * @src: memory region to duplicate 1214 * @len: memory region length, 1215 * @gfp: GFP mask to use 1216 * 1217 * Return: source address if it is in .rodata or the return value of kmemdup() 1218 * to which the function falls back otherwise. 1219 */ 1220 const void * 1221 devm_kmemdup_const(struct device *dev, const void *src, size_t len, gfp_t gfp) 1222 { 1223 if (is_kernel_rodata((unsigned long)src)) 1224 return src; 1225 1226 return devm_kmemdup(dev, src, len, gfp); 1227 } 1228 EXPORT_SYMBOL_GPL(devm_kmemdup_const); 1229 1230 struct pages_devres { 1231 unsigned long addr; 1232 unsigned int order; 1233 }; 1234 1235 static int devm_pages_match(struct device *dev, void *res, void *p) 1236 { 1237 struct pages_devres *devres = res; 1238 struct pages_devres *target = p; 1239 1240 return devres->addr == target->addr; 1241 } 1242 1243 static void devm_pages_release(struct device *dev, void *res) 1244 { 1245 struct pages_devres *devres = res; 1246 1247 free_pages(devres->addr, devres->order); 1248 } 1249 1250 /** 1251 * devm_get_free_pages - Resource-managed __get_free_pages 1252 * @dev: Device to allocate memory for 1253 * @gfp_mask: Allocation gfp flags 1254 * @order: Allocation size is (1 << order) pages 1255 * 1256 * Managed get_free_pages. Memory allocated with this function is 1257 * automatically freed on driver detach. 1258 * 1259 * RETURNS: 1260 * Address of allocated memory on success, 0 on failure. 1261 */ 1262 1263 unsigned long devm_get_free_pages(struct device *dev, 1264 gfp_t gfp_mask, unsigned int order) 1265 { 1266 struct pages_devres *devres; 1267 unsigned long addr; 1268 1269 addr = __get_free_pages(gfp_mask, order); 1270 1271 if (unlikely(!addr)) 1272 return 0; 1273 1274 devres = devres_alloc(devm_pages_release, 1275 sizeof(struct pages_devres), GFP_KERNEL); 1276 if (unlikely(!devres)) { 1277 free_pages(addr, order); 1278 return 0; 1279 } 1280 1281 devres->addr = addr; 1282 devres->order = order; 1283 1284 devres_add(dev, devres); 1285 return addr; 1286 } 1287 EXPORT_SYMBOL_GPL(devm_get_free_pages); 1288 1289 /** 1290 * devm_free_pages - Resource-managed free_pages 1291 * @dev: Device this memory belongs to 1292 * @addr: Memory to free 1293 * 1294 * Free memory allocated with devm_get_free_pages(). Unlike free_pages, 1295 * there is no need to supply the @order. 1296 */ 1297 void devm_free_pages(struct device *dev, unsigned long addr) 1298 { 1299 struct pages_devres devres = { .addr = addr }; 1300 1301 WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match, 1302 &devres)); 1303 } 1304 EXPORT_SYMBOL_GPL(devm_free_pages); 1305 1306 static void devm_percpu_release(struct device *dev, void *pdata) 1307 { 1308 void __percpu *p; 1309 1310 p = *(void __percpu **)pdata; 1311 free_percpu(p); 1312 } 1313 1314 /** 1315 * __devm_alloc_percpu - Resource-managed alloc_percpu 1316 * @dev: Device to allocate per-cpu memory for 1317 * @size: Size of per-cpu memory to allocate 1318 * @align: Alignment of per-cpu memory to allocate 1319 * 1320 * Managed alloc_percpu. Per-cpu memory allocated with this function is 1321 * automatically freed on driver detach. 1322 * 1323 * RETURNS: 1324 * Pointer to allocated memory on success, NULL on failure. 1325 */ 1326 void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, 1327 size_t align) 1328 { 1329 void *p; 1330 void __percpu *pcpu; 1331 1332 pcpu = __alloc_percpu(size, align); 1333 if (!pcpu) 1334 return NULL; 1335 1336 p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL); 1337 if (!p) { 1338 free_percpu(pcpu); 1339 return NULL; 1340 } 1341 1342 *(void __percpu **)p = pcpu; 1343 1344 devres_add(dev, p); 1345 1346 return pcpu; 1347 } 1348 EXPORT_SYMBOL_GPL(__devm_alloc_percpu); 1349