1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/devres.c - device resource management 4 * 5 * Copyright (c) 2006 SUSE Linux Products GmbH 6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/module.h> 11 #include <linux/slab.h> 12 #include <linux/percpu.h> 13 14 #include <asm/sections.h> 15 16 #include "base.h" 17 #include "trace.h" 18 19 struct devres_node { 20 struct list_head entry; 21 dr_release_t release; 22 const char *name; 23 size_t size; 24 }; 25 26 struct devres { 27 struct devres_node node; 28 /* 29 * Some archs want to perform DMA into kmalloc caches 30 * and need a guaranteed alignment larger than 31 * the alignment of a 64-bit integer. 32 * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same 33 * buffer alignment as if it was allocated by plain kmalloc(). 34 */ 35 u8 __aligned(ARCH_KMALLOC_MINALIGN) data[]; 36 }; 37 38 struct devres_group { 39 struct devres_node node[2]; 40 void *id; 41 int color; 42 /* -- 8 pointers */ 43 }; 44 45 static void set_node_dbginfo(struct devres_node *node, const char *name, 46 size_t size) 47 { 48 node->name = name; 49 node->size = size; 50 } 51 52 #ifdef CONFIG_DEBUG_DEVRES 53 static int log_devres = 0; 54 module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR); 55 56 static void devres_dbg(struct device *dev, struct devres_node *node, 57 const char *op) 58 { 59 if (unlikely(log_devres)) 60 dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n", 61 op, node, node->name, node->size); 62 } 63 #else /* CONFIG_DEBUG_DEVRES */ 64 #define devres_dbg(dev, node, op) do {} while (0) 65 #endif /* CONFIG_DEBUG_DEVRES */ 66 67 static void devres_log(struct device *dev, struct devres_node *node, 68 const char *op) 69 { 70 trace_devres_log(dev, op, node, node->name, node->size); 71 devres_dbg(dev, node, op); 72 } 73 74 /* 75 * Release functions for devres group. These callbacks are used only 76 * for identification. 77 */ 78 static void group_open_release(struct device *dev, void *res) 79 { 80 /* noop */ 81 } 82 83 static void group_close_release(struct device *dev, void *res) 84 { 85 /* noop */ 86 } 87 88 static struct devres_group * node_to_group(struct devres_node *node) 89 { 90 if (node->release == &group_open_release) 91 return container_of(node, struct devres_group, node[0]); 92 if (node->release == &group_close_release) 93 return container_of(node, struct devres_group, node[1]); 94 return NULL; 95 } 96 97 static bool check_dr_size(size_t size, size_t *tot_size) 98 { 99 /* We must catch any near-SIZE_MAX cases that could overflow. */ 100 if (unlikely(check_add_overflow(sizeof(struct devres), 101 size, tot_size))) 102 return false; 103 104 return true; 105 } 106 107 static __always_inline struct devres * alloc_dr(dr_release_t release, 108 size_t size, gfp_t gfp, int nid) 109 { 110 size_t tot_size; 111 struct devres *dr; 112 113 if (!check_dr_size(size, &tot_size)) 114 return NULL; 115 116 dr = kmalloc_node_track_caller(tot_size, gfp, nid); 117 if (unlikely(!dr)) 118 return NULL; 119 120 /* No need to clear memory twice */ 121 if (!(gfp & __GFP_ZERO)) 122 memset(dr, 0, offsetof(struct devres, data)); 123 124 INIT_LIST_HEAD(&dr->node.entry); 125 dr->node.release = release; 126 return dr; 127 } 128 129 static void add_dr(struct device *dev, struct devres_node *node) 130 { 131 devres_log(dev, node, "ADD"); 132 BUG_ON(!list_empty(&node->entry)); 133 list_add_tail(&node->entry, &dev->devres_head); 134 } 135 136 static void replace_dr(struct device *dev, 137 struct devres_node *old, struct devres_node *new) 138 { 139 devres_log(dev, old, "REPLACE"); 140 BUG_ON(!list_empty(&new->entry)); 141 list_replace(&old->entry, &new->entry); 142 } 143 144 /** 145 * __devres_alloc_node - Allocate device resource data 146 * @release: Release function devres will be associated with 147 * @size: Allocation size 148 * @gfp: Allocation flags 149 * @nid: NUMA node 150 * @name: Name of the resource 151 * 152 * Allocate devres of @size bytes. The allocated area is zeroed, then 153 * associated with @release. The returned pointer can be passed to 154 * other devres_*() functions. 155 * 156 * RETURNS: 157 * Pointer to allocated devres on success, NULL on failure. 158 */ 159 void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, 160 const char *name) 161 { 162 struct devres *dr; 163 164 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); 165 if (unlikely(!dr)) 166 return NULL; 167 set_node_dbginfo(&dr->node, name, size); 168 return dr->data; 169 } 170 EXPORT_SYMBOL_GPL(__devres_alloc_node); 171 172 /** 173 * devres_for_each_res - Resource iterator 174 * @dev: Device to iterate resource from 175 * @release: Look for resources associated with this release function 176 * @match: Match function (optional) 177 * @match_data: Data for the match function 178 * @fn: Function to be called for each matched resource. 179 * @data: Data for @fn, the 3rd parameter of @fn 180 * 181 * Call @fn for each devres of @dev which is associated with @release 182 * and for which @match returns 1. 183 * 184 * RETURNS: 185 * void 186 */ 187 void devres_for_each_res(struct device *dev, dr_release_t release, 188 dr_match_t match, void *match_data, 189 void (*fn)(struct device *, void *, void *), 190 void *data) 191 { 192 struct devres_node *node; 193 struct devres_node *tmp; 194 unsigned long flags; 195 196 if (!fn) 197 return; 198 199 spin_lock_irqsave(&dev->devres_lock, flags); 200 list_for_each_entry_safe_reverse(node, tmp, 201 &dev->devres_head, entry) { 202 struct devres *dr = container_of(node, struct devres, node); 203 204 if (node->release != release) 205 continue; 206 if (match && !match(dev, dr->data, match_data)) 207 continue; 208 fn(dev, dr->data, data); 209 } 210 spin_unlock_irqrestore(&dev->devres_lock, flags); 211 } 212 EXPORT_SYMBOL_GPL(devres_for_each_res); 213 214 /** 215 * devres_free - Free device resource data 216 * @res: Pointer to devres data to free 217 * 218 * Free devres created with devres_alloc(). 219 */ 220 void devres_free(void *res) 221 { 222 if (res) { 223 struct devres *dr = container_of(res, struct devres, data); 224 225 BUG_ON(!list_empty(&dr->node.entry)); 226 kfree(dr); 227 } 228 } 229 EXPORT_SYMBOL_GPL(devres_free); 230 231 /** 232 * devres_add - Register device resource 233 * @dev: Device to add resource to 234 * @res: Resource to register 235 * 236 * Register devres @res to @dev. @res should have been allocated 237 * using devres_alloc(). On driver detach, the associated release 238 * function will be invoked and devres will be freed automatically. 239 */ 240 void devres_add(struct device *dev, void *res) 241 { 242 struct devres *dr = container_of(res, struct devres, data); 243 unsigned long flags; 244 245 spin_lock_irqsave(&dev->devres_lock, flags); 246 add_dr(dev, &dr->node); 247 spin_unlock_irqrestore(&dev->devres_lock, flags); 248 } 249 EXPORT_SYMBOL_GPL(devres_add); 250 251 static struct devres *find_dr(struct device *dev, dr_release_t release, 252 dr_match_t match, void *match_data) 253 { 254 struct devres_node *node; 255 256 list_for_each_entry_reverse(node, &dev->devres_head, entry) { 257 struct devres *dr = container_of(node, struct devres, node); 258 259 if (node->release != release) 260 continue; 261 if (match && !match(dev, dr->data, match_data)) 262 continue; 263 return dr; 264 } 265 266 return NULL; 267 } 268 269 /** 270 * devres_find - Find device resource 271 * @dev: Device to lookup resource from 272 * @release: Look for resources associated with this release function 273 * @match: Match function (optional) 274 * @match_data: Data for the match function 275 * 276 * Find the latest devres of @dev which is associated with @release 277 * and for which @match returns 1. If @match is NULL, it's considered 278 * to match all. 279 * 280 * RETURNS: 281 * Pointer to found devres, NULL if not found. 282 */ 283 void * devres_find(struct device *dev, dr_release_t release, 284 dr_match_t match, void *match_data) 285 { 286 struct devres *dr; 287 unsigned long flags; 288 289 spin_lock_irqsave(&dev->devres_lock, flags); 290 dr = find_dr(dev, release, match, match_data); 291 spin_unlock_irqrestore(&dev->devres_lock, flags); 292 293 if (dr) 294 return dr->data; 295 return NULL; 296 } 297 EXPORT_SYMBOL_GPL(devres_find); 298 299 /** 300 * devres_get - Find devres, if non-existent, add one atomically 301 * @dev: Device to lookup or add devres for 302 * @new_res: Pointer to new initialized devres to add if not found 303 * @match: Match function (optional) 304 * @match_data: Data for the match function 305 * 306 * Find the latest devres of @dev which has the same release function 307 * as @new_res and for which @match return 1. If found, @new_res is 308 * freed; otherwise, @new_res is added atomically. 309 * 310 * RETURNS: 311 * Pointer to found or added devres. 312 */ 313 void * devres_get(struct device *dev, void *new_res, 314 dr_match_t match, void *match_data) 315 { 316 struct devres *new_dr = container_of(new_res, struct devres, data); 317 struct devres *dr; 318 unsigned long flags; 319 320 spin_lock_irqsave(&dev->devres_lock, flags); 321 dr = find_dr(dev, new_dr->node.release, match, match_data); 322 if (!dr) { 323 add_dr(dev, &new_dr->node); 324 dr = new_dr; 325 new_res = NULL; 326 } 327 spin_unlock_irqrestore(&dev->devres_lock, flags); 328 devres_free(new_res); 329 330 return dr->data; 331 } 332 EXPORT_SYMBOL_GPL(devres_get); 333 334 /** 335 * devres_remove - Find a device resource and remove it 336 * @dev: Device to find resource from 337 * @release: Look for resources associated with this release function 338 * @match: Match function (optional) 339 * @match_data: Data for the match function 340 * 341 * Find the latest devres of @dev associated with @release and for 342 * which @match returns 1. If @match is NULL, it's considered to 343 * match all. If found, the resource is removed atomically and 344 * returned. 345 * 346 * RETURNS: 347 * Pointer to removed devres on success, NULL if not found. 348 */ 349 void * devres_remove(struct device *dev, dr_release_t release, 350 dr_match_t match, void *match_data) 351 { 352 struct devres *dr; 353 unsigned long flags; 354 355 spin_lock_irqsave(&dev->devres_lock, flags); 356 dr = find_dr(dev, release, match, match_data); 357 if (dr) { 358 list_del_init(&dr->node.entry); 359 devres_log(dev, &dr->node, "REM"); 360 } 361 spin_unlock_irqrestore(&dev->devres_lock, flags); 362 363 if (dr) 364 return dr->data; 365 return NULL; 366 } 367 EXPORT_SYMBOL_GPL(devres_remove); 368 369 /** 370 * devres_destroy - Find a device resource and destroy it 371 * @dev: Device to find resource from 372 * @release: Look for resources associated with this release function 373 * @match: Match function (optional) 374 * @match_data: Data for the match function 375 * 376 * Find the latest devres of @dev associated with @release and for 377 * which @match returns 1. If @match is NULL, it's considered to 378 * match all. If found, the resource is removed atomically and freed. 379 * 380 * Note that the release function for the resource will not be called, 381 * only the devres-allocated data will be freed. The caller becomes 382 * responsible for freeing any other data. 383 * 384 * RETURNS: 385 * 0 if devres is found and freed, -ENOENT if not found. 386 */ 387 int devres_destroy(struct device *dev, dr_release_t release, 388 dr_match_t match, void *match_data) 389 { 390 void *res; 391 392 res = devres_remove(dev, release, match, match_data); 393 if (unlikely(!res)) 394 return -ENOENT; 395 396 devres_free(res); 397 return 0; 398 } 399 EXPORT_SYMBOL_GPL(devres_destroy); 400 401 402 /** 403 * devres_release - Find a device resource and destroy it, calling release 404 * @dev: Device to find resource from 405 * @release: Look for resources associated with this release function 406 * @match: Match function (optional) 407 * @match_data: Data for the match function 408 * 409 * Find the latest devres of @dev associated with @release and for 410 * which @match returns 1. If @match is NULL, it's considered to 411 * match all. If found, the resource is removed atomically, the 412 * release function called and the resource freed. 413 * 414 * RETURNS: 415 * 0 if devres is found and freed, -ENOENT if not found. 416 */ 417 int devres_release(struct device *dev, dr_release_t release, 418 dr_match_t match, void *match_data) 419 { 420 void *res; 421 422 res = devres_remove(dev, release, match, match_data); 423 if (unlikely(!res)) 424 return -ENOENT; 425 426 (*release)(dev, res); 427 devres_free(res); 428 return 0; 429 } 430 EXPORT_SYMBOL_GPL(devres_release); 431 432 static int remove_nodes(struct device *dev, 433 struct list_head *first, struct list_head *end, 434 struct list_head *todo) 435 { 436 struct devres_node *node, *n; 437 int cnt = 0, nr_groups = 0; 438 439 /* First pass - move normal devres entries to @todo and clear 440 * devres_group colors. 441 */ 442 node = list_entry(first, struct devres_node, entry); 443 list_for_each_entry_safe_from(node, n, end, entry) { 444 struct devres_group *grp; 445 446 grp = node_to_group(node); 447 if (grp) { 448 /* clear color of group markers in the first pass */ 449 grp->color = 0; 450 nr_groups++; 451 } else { 452 /* regular devres entry */ 453 if (&node->entry == first) 454 first = first->next; 455 list_move_tail(&node->entry, todo); 456 cnt++; 457 } 458 } 459 460 if (!nr_groups) 461 return cnt; 462 463 /* Second pass - Scan groups and color them. A group gets 464 * color value of two iff the group is wholly contained in 465 * [current node, end). That is, for a closed group, both opening 466 * and closing markers should be in the range, while just the 467 * opening marker is enough for an open group. 468 */ 469 node = list_entry(first, struct devres_node, entry); 470 list_for_each_entry_safe_from(node, n, end, entry) { 471 struct devres_group *grp; 472 473 grp = node_to_group(node); 474 BUG_ON(!grp || list_empty(&grp->node[0].entry)); 475 476 grp->color++; 477 if (list_empty(&grp->node[1].entry)) 478 grp->color++; 479 480 BUG_ON(grp->color <= 0 || grp->color > 2); 481 if (grp->color == 2) { 482 /* No need to update current node or end. The removed 483 * nodes are always before both. 484 */ 485 list_move_tail(&grp->node[0].entry, todo); 486 list_del_init(&grp->node[1].entry); 487 } 488 } 489 490 return cnt; 491 } 492 493 static void release_nodes(struct device *dev, struct list_head *todo) 494 { 495 struct devres *dr, *tmp; 496 497 /* Release. Note that both devres and devres_group are 498 * handled as devres in the following loop. This is safe. 499 */ 500 list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) { 501 devres_log(dev, &dr->node, "REL"); 502 dr->node.release(dev, dr->data); 503 kfree(dr); 504 } 505 } 506 507 /** 508 * devres_release_all - Release all managed resources 509 * @dev: Device to release resources for 510 * 511 * Release all resources associated with @dev. This function is 512 * called on driver detach. 513 */ 514 int devres_release_all(struct device *dev) 515 { 516 unsigned long flags; 517 LIST_HEAD(todo); 518 int cnt; 519 520 /* Looks like an uninitialized device structure */ 521 if (WARN_ON(dev->devres_head.next == NULL)) 522 return -ENODEV; 523 524 /* Nothing to release if list is empty */ 525 if (list_empty(&dev->devres_head)) 526 return 0; 527 528 spin_lock_irqsave(&dev->devres_lock, flags); 529 cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo); 530 spin_unlock_irqrestore(&dev->devres_lock, flags); 531 532 release_nodes(dev, &todo); 533 return cnt; 534 } 535 536 /** 537 * devres_open_group - Open a new devres group 538 * @dev: Device to open devres group for 539 * @id: Separator ID 540 * @gfp: Allocation flags 541 * 542 * Open a new devres group for @dev with @id. For @id, using a 543 * pointer to an object which won't be used for another group is 544 * recommended. If @id is NULL, address-wise unique ID is created. 545 * 546 * RETURNS: 547 * ID of the new group, NULL on failure. 548 */ 549 void * devres_open_group(struct device *dev, void *id, gfp_t gfp) 550 { 551 struct devres_group *grp; 552 unsigned long flags; 553 554 grp = kmalloc(sizeof(*grp), gfp); 555 if (unlikely(!grp)) 556 return NULL; 557 558 grp->node[0].release = &group_open_release; 559 grp->node[1].release = &group_close_release; 560 INIT_LIST_HEAD(&grp->node[0].entry); 561 INIT_LIST_HEAD(&grp->node[1].entry); 562 set_node_dbginfo(&grp->node[0], "grp<", 0); 563 set_node_dbginfo(&grp->node[1], "grp>", 0); 564 grp->id = grp; 565 if (id) 566 grp->id = id; 567 568 spin_lock_irqsave(&dev->devres_lock, flags); 569 add_dr(dev, &grp->node[0]); 570 spin_unlock_irqrestore(&dev->devres_lock, flags); 571 return grp->id; 572 } 573 EXPORT_SYMBOL_GPL(devres_open_group); 574 575 /* Find devres group with ID @id. If @id is NULL, look for the latest. */ 576 static struct devres_group * find_group(struct device *dev, void *id) 577 { 578 struct devres_node *node; 579 580 list_for_each_entry_reverse(node, &dev->devres_head, entry) { 581 struct devres_group *grp; 582 583 if (node->release != &group_open_release) 584 continue; 585 586 grp = container_of(node, struct devres_group, node[0]); 587 588 if (id) { 589 if (grp->id == id) 590 return grp; 591 } else if (list_empty(&grp->node[1].entry)) 592 return grp; 593 } 594 595 return NULL; 596 } 597 598 /** 599 * devres_close_group - Close a devres group 600 * @dev: Device to close devres group for 601 * @id: ID of target group, can be NULL 602 * 603 * Close the group identified by @id. If @id is NULL, the latest open 604 * group is selected. 605 */ 606 void devres_close_group(struct device *dev, void *id) 607 { 608 struct devres_group *grp; 609 unsigned long flags; 610 611 spin_lock_irqsave(&dev->devres_lock, flags); 612 613 grp = find_group(dev, id); 614 if (grp) 615 add_dr(dev, &grp->node[1]); 616 else 617 WARN_ON(1); 618 619 spin_unlock_irqrestore(&dev->devres_lock, flags); 620 } 621 EXPORT_SYMBOL_GPL(devres_close_group); 622 623 /** 624 * devres_remove_group - Remove a devres group 625 * @dev: Device to remove group for 626 * @id: ID of target group, can be NULL 627 * 628 * Remove the group identified by @id. If @id is NULL, the latest 629 * open group is selected. Note that removing a group doesn't affect 630 * any other resources. 631 */ 632 void devres_remove_group(struct device *dev, void *id) 633 { 634 struct devres_group *grp; 635 unsigned long flags; 636 637 spin_lock_irqsave(&dev->devres_lock, flags); 638 639 grp = find_group(dev, id); 640 if (grp) { 641 list_del_init(&grp->node[0].entry); 642 list_del_init(&grp->node[1].entry); 643 devres_log(dev, &grp->node[0], "REM"); 644 } else 645 WARN_ON(1); 646 647 spin_unlock_irqrestore(&dev->devres_lock, flags); 648 649 kfree(grp); 650 } 651 EXPORT_SYMBOL_GPL(devres_remove_group); 652 653 /** 654 * devres_release_group - Release resources in a devres group 655 * @dev: Device to release group for 656 * @id: ID of target group, can be NULL 657 * 658 * Release all resources in the group identified by @id. If @id is 659 * NULL, the latest open group is selected. The selected group and 660 * groups properly nested inside the selected group are removed. 661 * 662 * RETURNS: 663 * The number of released non-group resources. 664 */ 665 int devres_release_group(struct device *dev, void *id) 666 { 667 struct devres_group *grp; 668 unsigned long flags; 669 LIST_HEAD(todo); 670 int cnt = 0; 671 672 spin_lock_irqsave(&dev->devres_lock, flags); 673 674 grp = find_group(dev, id); 675 if (grp) { 676 struct list_head *first = &grp->node[0].entry; 677 struct list_head *end = &dev->devres_head; 678 679 if (!list_empty(&grp->node[1].entry)) 680 end = grp->node[1].entry.next; 681 682 cnt = remove_nodes(dev, first, end, &todo); 683 spin_unlock_irqrestore(&dev->devres_lock, flags); 684 685 release_nodes(dev, &todo); 686 } else { 687 WARN_ON(1); 688 spin_unlock_irqrestore(&dev->devres_lock, flags); 689 } 690 691 return cnt; 692 } 693 EXPORT_SYMBOL_GPL(devres_release_group); 694 695 /* 696 * Custom devres actions allow inserting a simple function call 697 * into the teardown sequence. 698 */ 699 700 struct action_devres { 701 void *data; 702 void (*action)(void *); 703 }; 704 705 static int devm_action_match(struct device *dev, void *res, void *p) 706 { 707 struct action_devres *devres = res; 708 struct action_devres *target = p; 709 710 return devres->action == target->action && 711 devres->data == target->data; 712 } 713 714 static void devm_action_release(struct device *dev, void *res) 715 { 716 struct action_devres *devres = res; 717 718 devres->action(devres->data); 719 } 720 721 /** 722 * devm_add_action() - add a custom action to list of managed resources 723 * @dev: Device that owns the action 724 * @action: Function that should be called 725 * @data: Pointer to data passed to @action implementation 726 * 727 * This adds a custom action to the list of managed resources so that 728 * it gets executed as part of standard resource unwinding. 729 */ 730 int devm_add_action(struct device *dev, void (*action)(void *), void *data) 731 { 732 struct action_devres *devres; 733 734 devres = devres_alloc(devm_action_release, 735 sizeof(struct action_devres), GFP_KERNEL); 736 if (!devres) 737 return -ENOMEM; 738 739 devres->data = data; 740 devres->action = action; 741 742 devres_add(dev, devres); 743 return 0; 744 } 745 EXPORT_SYMBOL_GPL(devm_add_action); 746 747 /** 748 * devm_remove_action() - removes previously added custom action 749 * @dev: Device that owns the action 750 * @action: Function implementing the action 751 * @data: Pointer to data passed to @action implementation 752 * 753 * Removes instance of @action previously added by devm_add_action(). 754 * Both action and data should match one of the existing entries. 755 */ 756 void devm_remove_action(struct device *dev, void (*action)(void *), void *data) 757 { 758 struct action_devres devres = { 759 .data = data, 760 .action = action, 761 }; 762 763 WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match, 764 &devres)); 765 } 766 EXPORT_SYMBOL_GPL(devm_remove_action); 767 768 /** 769 * devm_release_action() - release previously added custom action 770 * @dev: Device that owns the action 771 * @action: Function implementing the action 772 * @data: Pointer to data passed to @action implementation 773 * 774 * Releases and removes instance of @action previously added by 775 * devm_add_action(). Both action and data should match one of the 776 * existing entries. 777 */ 778 void devm_release_action(struct device *dev, void (*action)(void *), void *data) 779 { 780 struct action_devres devres = { 781 .data = data, 782 .action = action, 783 }; 784 785 WARN_ON(devres_release(dev, devm_action_release, devm_action_match, 786 &devres)); 787 788 } 789 EXPORT_SYMBOL_GPL(devm_release_action); 790 791 /* 792 * Managed kmalloc/kfree 793 */ 794 static void devm_kmalloc_release(struct device *dev, void *res) 795 { 796 /* noop */ 797 } 798 799 static int devm_kmalloc_match(struct device *dev, void *res, void *data) 800 { 801 return res == data; 802 } 803 804 /** 805 * devm_kmalloc - Resource-managed kmalloc 806 * @dev: Device to allocate memory for 807 * @size: Allocation size 808 * @gfp: Allocation gfp flags 809 * 810 * Managed kmalloc. Memory allocated with this function is 811 * automatically freed on driver detach. Like all other devres 812 * resources, guaranteed alignment is unsigned long long. 813 * 814 * RETURNS: 815 * Pointer to allocated memory on success, NULL on failure. 816 */ 817 void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) 818 { 819 struct devres *dr; 820 821 if (unlikely(!size)) 822 return ZERO_SIZE_PTR; 823 824 /* use raw alloc_dr for kmalloc caller tracing */ 825 dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev)); 826 if (unlikely(!dr)) 827 return NULL; 828 829 /* 830 * This is named devm_kzalloc_release for historical reasons 831 * The initial implementation did not support kmalloc, only kzalloc 832 */ 833 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size); 834 devres_add(dev, dr->data); 835 return dr->data; 836 } 837 EXPORT_SYMBOL_GPL(devm_kmalloc); 838 839 /** 840 * devm_krealloc - Resource-managed krealloc() 841 * @dev: Device to re-allocate memory for 842 * @ptr: Pointer to the memory chunk to re-allocate 843 * @new_size: New allocation size 844 * @gfp: Allocation gfp flags 845 * 846 * Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc(). 847 * Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR, 848 * it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the 849 * previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't 850 * change the order in which the release callback for the re-alloc'ed devres 851 * will be called (except when falling back to devm_kmalloc() or when freeing 852 * resources when new_size is zero). The contents of the memory are preserved 853 * up to the lesser of new and old sizes. 854 */ 855 void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp) 856 { 857 size_t total_new_size, total_old_size; 858 struct devres *old_dr, *new_dr; 859 unsigned long flags; 860 861 if (unlikely(!new_size)) { 862 devm_kfree(dev, ptr); 863 return ZERO_SIZE_PTR; 864 } 865 866 if (unlikely(ZERO_OR_NULL_PTR(ptr))) 867 return devm_kmalloc(dev, new_size, gfp); 868 869 if (WARN_ON(is_kernel_rodata((unsigned long)ptr))) 870 /* 871 * We cannot reliably realloc a const string returned by 872 * devm_kstrdup_const(). 873 */ 874 return NULL; 875 876 if (!check_dr_size(new_size, &total_new_size)) 877 return NULL; 878 879 total_old_size = ksize(container_of(ptr, struct devres, data)); 880 if (total_old_size == 0) { 881 WARN(1, "Pointer doesn't point to dynamically allocated memory."); 882 return NULL; 883 } 884 885 /* 886 * If new size is smaller or equal to the actual number of bytes 887 * allocated previously - just return the same pointer. 888 */ 889 if (total_new_size <= total_old_size) 890 return ptr; 891 892 /* 893 * Otherwise: allocate new, larger chunk. We need to allocate before 894 * taking the lock as most probably the caller uses GFP_KERNEL. 895 */ 896 new_dr = alloc_dr(devm_kmalloc_release, 897 total_new_size, gfp, dev_to_node(dev)); 898 if (!new_dr) 899 return NULL; 900 901 /* 902 * The spinlock protects the linked list against concurrent 903 * modifications but not the resource itself. 904 */ 905 spin_lock_irqsave(&dev->devres_lock, flags); 906 907 old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr); 908 if (!old_dr) { 909 spin_unlock_irqrestore(&dev->devres_lock, flags); 910 kfree(new_dr); 911 WARN(1, "Memory chunk not managed or managed by a different device."); 912 return NULL; 913 } 914 915 replace_dr(dev, &old_dr->node, &new_dr->node); 916 917 spin_unlock_irqrestore(&dev->devres_lock, flags); 918 919 /* 920 * We can copy the memory contents after releasing the lock as we're 921 * no longer modifying the list links. 922 */ 923 memcpy(new_dr->data, old_dr->data, 924 total_old_size - offsetof(struct devres, data)); 925 /* 926 * Same for releasing the old devres - it's now been removed from the 927 * list. This is also the reason why we must not use devm_kfree() - the 928 * links are no longer valid. 929 */ 930 kfree(old_dr); 931 932 return new_dr->data; 933 } 934 EXPORT_SYMBOL_GPL(devm_krealloc); 935 936 /** 937 * devm_kstrdup - Allocate resource managed space and 938 * copy an existing string into that. 939 * @dev: Device to allocate memory for 940 * @s: the string to duplicate 941 * @gfp: the GFP mask used in the devm_kmalloc() call when 942 * allocating memory 943 * RETURNS: 944 * Pointer to allocated string on success, NULL on failure. 945 */ 946 char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) 947 { 948 size_t size; 949 char *buf; 950 951 if (!s) 952 return NULL; 953 954 size = strlen(s) + 1; 955 buf = devm_kmalloc(dev, size, gfp); 956 if (buf) 957 memcpy(buf, s, size); 958 return buf; 959 } 960 EXPORT_SYMBOL_GPL(devm_kstrdup); 961 962 /** 963 * devm_kstrdup_const - resource managed conditional string duplication 964 * @dev: device for which to duplicate the string 965 * @s: the string to duplicate 966 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 967 * 968 * Strings allocated by devm_kstrdup_const will be automatically freed when 969 * the associated device is detached. 970 * 971 * RETURNS: 972 * Source string if it is in .rodata section otherwise it falls back to 973 * devm_kstrdup. 974 */ 975 const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp) 976 { 977 if (is_kernel_rodata((unsigned long)s)) 978 return s; 979 980 return devm_kstrdup(dev, s, gfp); 981 } 982 EXPORT_SYMBOL_GPL(devm_kstrdup_const); 983 984 /** 985 * devm_kvasprintf - Allocate resource managed space and format a string 986 * into that. 987 * @dev: Device to allocate memory for 988 * @gfp: the GFP mask used in the devm_kmalloc() call when 989 * allocating memory 990 * @fmt: The printf()-style format string 991 * @ap: Arguments for the format string 992 * RETURNS: 993 * Pointer to allocated string on success, NULL on failure. 994 */ 995 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, 996 va_list ap) 997 { 998 unsigned int len; 999 char *p; 1000 va_list aq; 1001 1002 va_copy(aq, ap); 1003 len = vsnprintf(NULL, 0, fmt, aq); 1004 va_end(aq); 1005 1006 p = devm_kmalloc(dev, len+1, gfp); 1007 if (!p) 1008 return NULL; 1009 1010 vsnprintf(p, len+1, fmt, ap); 1011 1012 return p; 1013 } 1014 EXPORT_SYMBOL(devm_kvasprintf); 1015 1016 /** 1017 * devm_kasprintf - Allocate resource managed space and format a string 1018 * into that. 1019 * @dev: Device to allocate memory for 1020 * @gfp: the GFP mask used in the devm_kmalloc() call when 1021 * allocating memory 1022 * @fmt: The printf()-style format string 1023 * @...: Arguments for the format string 1024 * RETURNS: 1025 * Pointer to allocated string on success, NULL on failure. 1026 */ 1027 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1028 { 1029 va_list ap; 1030 char *p; 1031 1032 va_start(ap, fmt); 1033 p = devm_kvasprintf(dev, gfp, fmt, ap); 1034 va_end(ap); 1035 1036 return p; 1037 } 1038 EXPORT_SYMBOL_GPL(devm_kasprintf); 1039 1040 /** 1041 * devm_kfree - Resource-managed kfree 1042 * @dev: Device this memory belongs to 1043 * @p: Memory to free 1044 * 1045 * Free memory allocated with devm_kmalloc(). 1046 */ 1047 void devm_kfree(struct device *dev, const void *p) 1048 { 1049 int rc; 1050 1051 /* 1052 * Special cases: pointer to a string in .rodata returned by 1053 * devm_kstrdup_const() or NULL/ZERO ptr. 1054 */ 1055 if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p))) 1056 return; 1057 1058 rc = devres_destroy(dev, devm_kmalloc_release, 1059 devm_kmalloc_match, (void *)p); 1060 WARN_ON(rc); 1061 } 1062 EXPORT_SYMBOL_GPL(devm_kfree); 1063 1064 /** 1065 * devm_kmemdup - Resource-managed kmemdup 1066 * @dev: Device this memory belongs to 1067 * @src: Memory region to duplicate 1068 * @len: Memory region length 1069 * @gfp: GFP mask to use 1070 * 1071 * Duplicate region of a memory using resource managed kmalloc 1072 */ 1073 void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) 1074 { 1075 void *p; 1076 1077 p = devm_kmalloc(dev, len, gfp); 1078 if (p) 1079 memcpy(p, src, len); 1080 1081 return p; 1082 } 1083 EXPORT_SYMBOL_GPL(devm_kmemdup); 1084 1085 struct pages_devres { 1086 unsigned long addr; 1087 unsigned int order; 1088 }; 1089 1090 static int devm_pages_match(struct device *dev, void *res, void *p) 1091 { 1092 struct pages_devres *devres = res; 1093 struct pages_devres *target = p; 1094 1095 return devres->addr == target->addr; 1096 } 1097 1098 static void devm_pages_release(struct device *dev, void *res) 1099 { 1100 struct pages_devres *devres = res; 1101 1102 free_pages(devres->addr, devres->order); 1103 } 1104 1105 /** 1106 * devm_get_free_pages - Resource-managed __get_free_pages 1107 * @dev: Device to allocate memory for 1108 * @gfp_mask: Allocation gfp flags 1109 * @order: Allocation size is (1 << order) pages 1110 * 1111 * Managed get_free_pages. Memory allocated with this function is 1112 * automatically freed on driver detach. 1113 * 1114 * RETURNS: 1115 * Address of allocated memory on success, 0 on failure. 1116 */ 1117 1118 unsigned long devm_get_free_pages(struct device *dev, 1119 gfp_t gfp_mask, unsigned int order) 1120 { 1121 struct pages_devres *devres; 1122 unsigned long addr; 1123 1124 addr = __get_free_pages(gfp_mask, order); 1125 1126 if (unlikely(!addr)) 1127 return 0; 1128 1129 devres = devres_alloc(devm_pages_release, 1130 sizeof(struct pages_devres), GFP_KERNEL); 1131 if (unlikely(!devres)) { 1132 free_pages(addr, order); 1133 return 0; 1134 } 1135 1136 devres->addr = addr; 1137 devres->order = order; 1138 1139 devres_add(dev, devres); 1140 return addr; 1141 } 1142 EXPORT_SYMBOL_GPL(devm_get_free_pages); 1143 1144 /** 1145 * devm_free_pages - Resource-managed free_pages 1146 * @dev: Device this memory belongs to 1147 * @addr: Memory to free 1148 * 1149 * Free memory allocated with devm_get_free_pages(). Unlike free_pages, 1150 * there is no need to supply the @order. 1151 */ 1152 void devm_free_pages(struct device *dev, unsigned long addr) 1153 { 1154 struct pages_devres devres = { .addr = addr }; 1155 1156 WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match, 1157 &devres)); 1158 } 1159 EXPORT_SYMBOL_GPL(devm_free_pages); 1160 1161 static void devm_percpu_release(struct device *dev, void *pdata) 1162 { 1163 void __percpu *p; 1164 1165 p = *(void __percpu **)pdata; 1166 free_percpu(p); 1167 } 1168 1169 static int devm_percpu_match(struct device *dev, void *data, void *p) 1170 { 1171 struct devres *devr = container_of(data, struct devres, data); 1172 1173 return *(void **)devr->data == p; 1174 } 1175 1176 /** 1177 * __devm_alloc_percpu - Resource-managed alloc_percpu 1178 * @dev: Device to allocate per-cpu memory for 1179 * @size: Size of per-cpu memory to allocate 1180 * @align: Alignment of per-cpu memory to allocate 1181 * 1182 * Managed alloc_percpu. Per-cpu memory allocated with this function is 1183 * automatically freed on driver detach. 1184 * 1185 * RETURNS: 1186 * Pointer to allocated memory on success, NULL on failure. 1187 */ 1188 void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, 1189 size_t align) 1190 { 1191 void *p; 1192 void __percpu *pcpu; 1193 1194 pcpu = __alloc_percpu(size, align); 1195 if (!pcpu) 1196 return NULL; 1197 1198 p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL); 1199 if (!p) { 1200 free_percpu(pcpu); 1201 return NULL; 1202 } 1203 1204 *(void __percpu **)p = pcpu; 1205 1206 devres_add(dev, p); 1207 1208 return pcpu; 1209 } 1210 EXPORT_SYMBOL_GPL(__devm_alloc_percpu); 1211 1212 /** 1213 * devm_free_percpu - Resource-managed free_percpu 1214 * @dev: Device this memory belongs to 1215 * @pdata: Per-cpu memory to free 1216 * 1217 * Free memory allocated with devm_alloc_percpu(). 1218 */ 1219 void devm_free_percpu(struct device *dev, void __percpu *pdata) 1220 { 1221 WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match, 1222 (__force void *)pdata)); 1223 } 1224 EXPORT_SYMBOL_GPL(devm_free_percpu); 1225