1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Hardware spinlock framework 4 * 5 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com 6 * 7 * Contact: Ohad Ben-Cohen <ohad@wizery.com> 8 */ 9 10 #define pr_fmt(fmt) "%s: " fmt, __func__ 11 12 #include <linux/delay.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/spinlock.h> 16 #include <linux/types.h> 17 #include <linux/err.h> 18 #include <linux/jiffies.h> 19 #include <linux/radix-tree.h> 20 #include <linux/hwspinlock.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/mutex.h> 23 #include <linux/of.h> 24 25 #include "hwspinlock_internal.h" 26 27 /* retry delay used in atomic context */ 28 #define HWSPINLOCK_RETRY_DELAY_US 100 29 30 /* radix tree tags */ 31 #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */ 32 33 /* 34 * A radix tree is used to maintain the available hwspinlock instances. 35 * The tree associates hwspinlock pointers with their integer key id, 36 * and provides easy-to-use API which makes the hwspinlock core code simple 37 * and easy to read. 38 * 39 * Radix trees are quick on lookups, and reasonably efficient in terms of 40 * storage, especially with high density usages such as this framework 41 * requires (a continuous range of integer keys, beginning with zero, is 42 * used as the ID's of the hwspinlock instances). 43 * 44 * The radix tree API supports tagging items in the tree, which this 45 * framework uses to mark unused hwspinlock instances (see the 46 * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the 47 * tree, looking for an unused hwspinlock instance, is now reduced to a 48 * single radix tree API call. 49 */ 50 static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); 51 52 /* 53 * Synchronization of access to the tree is achieved using this mutex, 54 * as the radix-tree API requires that users provide all synchronisation. 55 * A mutex is needed because we're using non-atomic radix tree allocations. 56 */ 57 static DEFINE_MUTEX(hwspinlock_tree_lock); 58 59 60 /** 61 * __hwspin_trylock() - attempt to lock a specific hwspinlock 62 * @hwlock: an hwspinlock which we want to trylock 63 * @mode: controls whether local interrupts are disabled or not 64 * @flags: a pointer where the caller's interrupt state will be saved at (if 65 * requested) 66 * 67 * This function attempts to lock an hwspinlock, and will immediately 68 * fail if the hwspinlock is already taken. 69 * 70 * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine 71 * of getting hardware lock with mutex or spinlock. Since in some scenarios, 72 * user need some time-consuming or sleepable operations under the hardware 73 * lock, they need one sleepable lock (like mutex) to protect the operations. 74 * 75 * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful 76 * return from this function, preemption (and possibly interrupts) is disabled, 77 * so the caller must not sleep, and is advised to release the hwspinlock as 78 * soon as possible. This is required in order to minimize remote cores polling 79 * on the hardware interconnect. 80 * 81 * The user decides whether local interrupts are disabled or not, and if yes, 82 * whether he wants their previous state to be saved. It is up to the user 83 * to choose the appropriate @mode of operation, exactly the same way users 84 * should decide between spin_trylock, spin_trylock_irq and 85 * spin_trylock_irqsave. 86 * 87 * Returns: %0 if we successfully locked the hwspinlock or -EBUSY if 88 * the hwspinlock was already taken. 89 * 90 * This function will never sleep. 91 */ 92 int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) 93 { 94 int ret; 95 96 if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE))) 97 return -EINVAL; 98 99 /* 100 * This spin_lock{_irq, _irqsave} serves three purposes: 101 * 102 * 1. Disable preemption, in order to minimize the period of time 103 * in which the hwspinlock is taken. This is important in order 104 * to minimize the possible polling on the hardware interconnect 105 * by a remote user of this lock. 106 * 2. Make the hwspinlock SMP-safe (so we can take it from 107 * additional contexts on the local host). 108 * 3. Ensure that in_atomic/might_sleep checks catch potential 109 * problems with hwspinlock usage (e.g. scheduler checks like 110 * 'scheduling while atomic' etc.) 111 */ 112 switch (mode) { 113 case HWLOCK_IRQSTATE: 114 ret = spin_trylock_irqsave(&hwlock->lock, *flags); 115 break; 116 case HWLOCK_IRQ: 117 ret = spin_trylock_irq(&hwlock->lock); 118 break; 119 case HWLOCK_RAW: 120 case HWLOCK_IN_ATOMIC: 121 ret = 1; 122 break; 123 default: 124 ret = spin_trylock(&hwlock->lock); 125 break; 126 } 127 128 /* is lock already taken by another context on the local cpu ? */ 129 if (!ret) 130 return -EBUSY; 131 132 /* try to take the hwspinlock device */ 133 ret = hwlock->bank->ops->trylock(hwlock); 134 135 /* if hwlock is already taken, undo spin_trylock_* and exit */ 136 if (!ret) { 137 switch (mode) { 138 case HWLOCK_IRQSTATE: 139 spin_unlock_irqrestore(&hwlock->lock, *flags); 140 break; 141 case HWLOCK_IRQ: 142 spin_unlock_irq(&hwlock->lock); 143 break; 144 case HWLOCK_RAW: 145 case HWLOCK_IN_ATOMIC: 146 /* Nothing to do */ 147 break; 148 default: 149 spin_unlock(&hwlock->lock); 150 break; 151 } 152 153 return -EBUSY; 154 } 155 156 /* 157 * We can be sure the other core's memory operations 158 * are observable to us only _after_ we successfully take 159 * the hwspinlock, and we must make sure that subsequent memory 160 * operations (both reads and writes) will not be reordered before 161 * we actually took the hwspinlock. 162 * 163 * Note: the implicit memory barrier of the spinlock above is too 164 * early, so we need this additional explicit memory barrier. 165 */ 166 mb(); 167 168 return 0; 169 } 170 EXPORT_SYMBOL_GPL(__hwspin_trylock); 171 172 /** 173 * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit 174 * @hwlock: the hwspinlock to be locked 175 * @to: timeout value in msecs 176 * @mode: mode which controls whether local interrupts are disabled or not 177 * @flags: a pointer to where the caller's interrupt state will be saved at (if 178 * requested) 179 * 180 * This function locks the given @hwlock. If the @hwlock 181 * is already taken, the function will busy loop waiting for it to 182 * be released, but give up after @timeout msecs have elapsed. 183 * 184 * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine 185 * of getting hardware lock with mutex or spinlock. Since in some scenarios, 186 * user need some time-consuming or sleepable operations under the hardware 187 * lock, they need one sleepable lock (like mutex) to protect the operations. 188 * 189 * If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout 190 * is handled with busy-waiting delays, hence shall not exceed few msecs. 191 * 192 * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful 193 * return from this function, preemption (and possibly interrupts) is disabled, 194 * so the caller must not sleep, and is advised to release the hwspinlock as 195 * soon as possible. This is required in order to minimize remote cores polling 196 * on the hardware interconnect. 197 * 198 * The user decides whether local interrupts are disabled or not, and if yes, 199 * whether he wants their previous state to be saved. It is up to the user 200 * to choose the appropriate @mode of operation, exactly the same way users 201 * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave. 202 * 203 * Returns: %0 when the @hwlock was successfully taken, and an appropriate 204 * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still 205 * busy after @timeout msecs). 206 * 207 * The function will never sleep. 208 */ 209 int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, 210 int mode, unsigned long *flags) 211 { 212 int ret; 213 unsigned long expire, atomic_delay = 0; 214 215 expire = msecs_to_jiffies(to) + jiffies; 216 217 for (;;) { 218 /* Try to take the hwspinlock */ 219 ret = __hwspin_trylock(hwlock, mode, flags); 220 if (ret != -EBUSY) 221 break; 222 223 /* 224 * The lock is already taken, let's check if the user wants 225 * us to try again 226 */ 227 if (mode == HWLOCK_IN_ATOMIC) { 228 udelay(HWSPINLOCK_RETRY_DELAY_US); 229 atomic_delay += HWSPINLOCK_RETRY_DELAY_US; 230 if (atomic_delay > to * 1000) 231 return -ETIMEDOUT; 232 } else { 233 if (time_is_before_eq_jiffies(expire)) 234 return -ETIMEDOUT; 235 } 236 237 /* 238 * Allow platform-specific relax handlers to prevent 239 * hogging the interconnect (no sleeping, though) 240 */ 241 if (hwlock->bank->ops->relax) 242 hwlock->bank->ops->relax(hwlock); 243 } 244 245 return ret; 246 } 247 EXPORT_SYMBOL_GPL(__hwspin_lock_timeout); 248 249 /** 250 * __hwspin_unlock() - unlock a specific hwspinlock 251 * @hwlock: a previously-acquired hwspinlock which we want to unlock 252 * @mode: controls whether local interrupts needs to be restored or not 253 * @flags: previous caller's interrupt state to restore (if requested) 254 * 255 * This function will unlock a specific hwspinlock, enable preemption and 256 * (possibly) enable interrupts or restore their previous state. 257 * @hwlock must be already locked before calling this function: it is a bug 258 * to call unlock on a @hwlock that is already unlocked. 259 * 260 * The user decides whether local interrupts should be enabled or not, and 261 * if yes, whether he wants their previous state to be restored. It is up 262 * to the user to choose the appropriate @mode of operation, exactly the 263 * same way users decide between spin_unlock, spin_unlock_irq and 264 * spin_unlock_irqrestore. 265 * 266 * The function will never sleep. 267 */ 268 void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) 269 { 270 if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE))) 271 return; 272 273 /* 274 * We must make sure that memory operations (both reads and writes), 275 * done before unlocking the hwspinlock, will not be reordered 276 * after the lock is released. 277 * 278 * That's the purpose of this explicit memory barrier. 279 * 280 * Note: the memory barrier induced by the spin_unlock below is too 281 * late; the other core is going to access memory soon after it will 282 * take the hwspinlock, and by then we want to be sure our memory 283 * operations are already observable. 284 */ 285 mb(); 286 287 hwlock->bank->ops->unlock(hwlock); 288 289 /* Undo the spin_trylock{_irq, _irqsave} called while locking */ 290 switch (mode) { 291 case HWLOCK_IRQSTATE: 292 spin_unlock_irqrestore(&hwlock->lock, *flags); 293 break; 294 case HWLOCK_IRQ: 295 spin_unlock_irq(&hwlock->lock); 296 break; 297 case HWLOCK_RAW: 298 case HWLOCK_IN_ATOMIC: 299 /* Nothing to do */ 300 break; 301 default: 302 spin_unlock(&hwlock->lock); 303 break; 304 } 305 } 306 EXPORT_SYMBOL_GPL(__hwspin_unlock); 307 308 /** 309 * hwspin_lock_bust() - bust a specific hwspinlock 310 * @hwlock: a previously-acquired hwspinlock which we want to bust 311 * @id: identifier of the remote lock holder, if applicable 312 * 313 * This function will bust a hwspinlock that was previously acquired as 314 * long as the current owner of the lock matches the id given by the caller. 315 * 316 * Context: Process context. 317 * 318 * Returns: 0 on success, or -EINVAL if the hwspinlock does not exist, or 319 * the bust operation fails, and -EOPNOTSUPP if the bust operation is not 320 * defined for the hwspinlock. 321 */ 322 int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id) 323 { 324 if (WARN_ON(!hwlock)) 325 return -EINVAL; 326 327 if (!hwlock->bank->ops->bust) { 328 pr_err("bust operation not defined\n"); 329 return -EOPNOTSUPP; 330 } 331 332 return hwlock->bank->ops->bust(hwlock, id); 333 } 334 EXPORT_SYMBOL_GPL(hwspin_lock_bust); 335 336 /** 337 * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id 338 * @hwlock_spec: hwlock specifier as found in the device tree 339 * 340 * This is a simple translation function, suitable for hwspinlock platform 341 * drivers that only has a lock specifier length of 1. 342 * 343 * Returns: a relative index of the lock within a specified bank on success, 344 * or -EINVAL on invalid specifier cell count. 345 */ 346 static inline int 347 of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec) 348 { 349 if (WARN_ON(hwlock_spec->args_count != 1)) 350 return -EINVAL; 351 352 return hwlock_spec->args[0]; 353 } 354 355 /** 356 * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock 357 * @np: device node from which to request the specific hwlock 358 * @index: index of the hwlock in the list of values 359 * 360 * This function provides a means for DT users of the hwspinlock module to 361 * get the global lock id of a specific hwspinlock using the phandle of the 362 * hwspinlock device, so that it can be requested using the normal 363 * hwspin_lock_request_specific() API. 364 * 365 * Returns: the global lock id number on success, -EPROBE_DEFER if the 366 * hwspinlock device is not yet registered, -EINVAL on invalid args 367 * specifier value or an appropriate error as returned from the OF parsing 368 * of the DT client node. 369 */ 370 int of_hwspin_lock_get_id(struct device_node *np, int index) 371 { 372 struct of_phandle_args args; 373 struct hwspinlock *hwlock; 374 struct radix_tree_iter iter; 375 void **slot; 376 int id; 377 int ret; 378 379 ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index, 380 &args); 381 if (ret) 382 return ret; 383 384 if (!of_device_is_available(args.np)) { 385 ret = -ENOENT; 386 goto out; 387 } 388 389 /* Find the hwspinlock device: we need its base_id */ 390 ret = -EPROBE_DEFER; 391 rcu_read_lock(); 392 radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) { 393 hwlock = radix_tree_deref_slot(slot); 394 if (unlikely(!hwlock)) 395 continue; 396 if (radix_tree_deref_retry(hwlock)) { 397 slot = radix_tree_iter_retry(&iter); 398 continue; 399 } 400 401 if (device_match_of_node(hwlock->bank->dev, args.np)) { 402 ret = 0; 403 break; 404 } 405 } 406 rcu_read_unlock(); 407 if (ret < 0) 408 goto out; 409 410 id = of_hwspin_lock_simple_xlate(&args); 411 if (id < 0 || id >= hwlock->bank->num_locks) { 412 ret = -EINVAL; 413 goto out; 414 } 415 id += hwlock->bank->base_id; 416 417 out: 418 of_node_put(args.np); 419 return ret ? ret : id; 420 } 421 EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id); 422 423 /** 424 * of_hwspin_lock_get_id_byname() - get lock id for an specified hwlock name 425 * @np: device node from which to request the specific hwlock 426 * @name: hwlock name 427 * 428 * This function provides a means for DT users of the hwspinlock module to 429 * get the global lock id of a specific hwspinlock using the specified name of 430 * the hwspinlock device, so that it can be requested using the normal 431 * hwspin_lock_request_specific() API. 432 * 433 * Returns: the global lock id number on success, -EPROBE_DEFER if the 434 * hwspinlock device is not yet registered, -EINVAL on invalid args 435 * specifier value or an appropriate error as returned from the OF parsing 436 * of the DT client node. 437 */ 438 int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name) 439 { 440 int index; 441 442 if (!name) 443 return -EINVAL; 444 445 index = of_property_match_string(np, "hwlock-names", name); 446 if (index < 0) 447 return index; 448 449 return of_hwspin_lock_get_id(np, index); 450 } 451 EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id_byname); 452 453 static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id) 454 { 455 struct hwspinlock *tmp; 456 int ret; 457 458 mutex_lock(&hwspinlock_tree_lock); 459 460 ret = radix_tree_insert(&hwspinlock_tree, id, hwlock); 461 if (ret) { 462 if (ret == -EEXIST) 463 pr_err("hwspinlock id %d already exists!\n", id); 464 goto out; 465 } 466 467 /* mark this hwspinlock as available */ 468 tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); 469 470 /* self-sanity check which should never fail */ 471 WARN_ON(tmp != hwlock); 472 473 out: 474 mutex_unlock(&hwspinlock_tree_lock); 475 return 0; 476 } 477 478 static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id) 479 { 480 struct hwspinlock *hwlock = NULL; 481 int ret; 482 483 mutex_lock(&hwspinlock_tree_lock); 484 485 /* make sure the hwspinlock is not in use (tag is set) */ 486 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); 487 if (ret == 0) { 488 pr_err("hwspinlock %d still in use (or not present)\n", id); 489 goto out; 490 } 491 492 hwlock = radix_tree_delete(&hwspinlock_tree, id); 493 if (!hwlock) { 494 pr_err("failed to delete hwspinlock %d\n", id); 495 goto out; 496 } 497 498 out: 499 mutex_unlock(&hwspinlock_tree_lock); 500 return hwlock; 501 } 502 503 /** 504 * hwspin_lock_register() - register a new hw spinlock device 505 * @bank: the hwspinlock device, which usually provides numerous hw locks 506 * @dev: the backing device 507 * @ops: hwspinlock handlers for this device 508 * @base_id: id of the first hardware spinlock in this bank 509 * @num_locks: number of hwspinlocks provided by this device 510 * 511 * This function should be called from the underlying platform-specific 512 * implementation, to register a new hwspinlock device instance. 513 * 514 * Should be called from a process context (might sleep) 515 * 516 * Returns: %0 on success, or an appropriate error code on failure 517 */ 518 int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev, 519 const struct hwspinlock_ops *ops, int base_id, int num_locks) 520 { 521 struct hwspinlock *hwlock; 522 int ret = 0, i; 523 524 if (!bank || !ops || !dev || !num_locks || !ops->trylock || 525 !ops->unlock) { 526 pr_err("invalid parameters\n"); 527 return -EINVAL; 528 } 529 530 bank->dev = dev; 531 bank->ops = ops; 532 bank->base_id = base_id; 533 bank->num_locks = num_locks; 534 535 for (i = 0; i < num_locks; i++) { 536 hwlock = &bank->lock[i]; 537 538 spin_lock_init(&hwlock->lock); 539 hwlock->bank = bank; 540 541 ret = hwspin_lock_register_single(hwlock, base_id + i); 542 if (ret) 543 goto reg_failed; 544 } 545 546 return 0; 547 548 reg_failed: 549 while (--i >= 0) 550 hwspin_lock_unregister_single(base_id + i); 551 return ret; 552 } 553 EXPORT_SYMBOL_GPL(hwspin_lock_register); 554 555 /** 556 * hwspin_lock_unregister() - unregister an hw spinlock device 557 * @bank: the hwspinlock device, which usually provides numerous hw locks 558 * 559 * This function should be called from the underlying platform-specific 560 * implementation, to unregister an existing (and unused) hwspinlock. 561 * 562 * Should be called from a process context (might sleep) 563 * 564 * Returns: %0 on success, or an appropriate error code on failure 565 */ 566 int hwspin_lock_unregister(struct hwspinlock_device *bank) 567 { 568 struct hwspinlock *hwlock, *tmp; 569 int i; 570 571 for (i = 0; i < bank->num_locks; i++) { 572 hwlock = &bank->lock[i]; 573 574 tmp = hwspin_lock_unregister_single(bank->base_id + i); 575 if (!tmp) 576 return -EBUSY; 577 578 /* self-sanity check that should never fail */ 579 WARN_ON(tmp != hwlock); 580 } 581 582 return 0; 583 } 584 EXPORT_SYMBOL_GPL(hwspin_lock_unregister); 585 586 static void devm_hwspin_lock_unreg(struct device *dev, void *res) 587 { 588 hwspin_lock_unregister(*(struct hwspinlock_device **)res); 589 } 590 591 static int devm_hwspin_lock_device_match(struct device *dev, void *res, 592 void *data) 593 { 594 struct hwspinlock_device **bank = res; 595 596 if (WARN_ON(!bank || !*bank)) 597 return 0; 598 599 return *bank == data; 600 } 601 602 /** 603 * devm_hwspin_lock_unregister() - unregister an hw spinlock device for 604 * a managed device 605 * @dev: the backing device 606 * @bank: the hwspinlock device, which usually provides numerous hw locks 607 * 608 * This function should be called from the underlying platform-specific 609 * implementation, to unregister an existing (and unused) hwspinlock. 610 * 611 * Should be called from a process context (might sleep) 612 * 613 * Returns: %0 on success, or an appropriate error code on failure 614 */ 615 int devm_hwspin_lock_unregister(struct device *dev, 616 struct hwspinlock_device *bank) 617 { 618 int ret; 619 620 ret = devres_release(dev, devm_hwspin_lock_unreg, 621 devm_hwspin_lock_device_match, bank); 622 WARN_ON(ret); 623 624 return ret; 625 } 626 EXPORT_SYMBOL_GPL(devm_hwspin_lock_unregister); 627 628 /** 629 * devm_hwspin_lock_register() - register a new hw spinlock device for 630 * a managed device 631 * @dev: the backing device 632 * @bank: the hwspinlock device, which usually provides numerous hw locks 633 * @ops: hwspinlock handlers for this device 634 * @base_id: id of the first hardware spinlock in this bank 635 * @num_locks: number of hwspinlocks provided by this device 636 * 637 * This function should be called from the underlying platform-specific 638 * implementation, to register a new hwspinlock device instance. 639 * 640 * Should be called from a process context (might sleep) 641 * 642 * Returns: %0 on success, or an appropriate error code on failure 643 */ 644 int devm_hwspin_lock_register(struct device *dev, 645 struct hwspinlock_device *bank, 646 const struct hwspinlock_ops *ops, 647 int base_id, int num_locks) 648 { 649 struct hwspinlock_device **ptr; 650 int ret; 651 652 ptr = devres_alloc(devm_hwspin_lock_unreg, sizeof(*ptr), GFP_KERNEL); 653 if (!ptr) 654 return -ENOMEM; 655 656 ret = hwspin_lock_register(bank, dev, ops, base_id, num_locks); 657 if (!ret) { 658 *ptr = bank; 659 devres_add(dev, ptr); 660 } else { 661 devres_free(ptr); 662 } 663 664 return ret; 665 } 666 EXPORT_SYMBOL_GPL(devm_hwspin_lock_register); 667 668 /** 669 * __hwspin_lock_request() - tag an hwspinlock as used and power it up 670 * @hwlock: the target hwspinlock 671 * 672 * This is an internal function that prepares an hwspinlock instance 673 * before it is given to the user. The function assumes that 674 * hwspinlock_tree_lock is taken. 675 * 676 * Returns: %0 or positive to indicate success, and a negative value to 677 * indicate an error (with the appropriate error code) 678 */ 679 static int __hwspin_lock_request(struct hwspinlock *hwlock) 680 { 681 struct device *dev = hwlock->bank->dev; 682 struct hwspinlock *tmp; 683 int ret; 684 685 /* prevent underlying implementation from being removed */ 686 if (!try_module_get(dev->driver->owner)) { 687 dev_err(dev, "%s: can't get owner\n", __func__); 688 return -EINVAL; 689 } 690 691 /* notify PM core that power is now needed */ 692 ret = pm_runtime_get_sync(dev); 693 if (ret < 0 && ret != -EACCES) { 694 dev_err(dev, "%s: can't power on device\n", __func__); 695 pm_runtime_put_noidle(dev); 696 module_put(dev->driver->owner); 697 return ret; 698 } 699 700 ret = 0; 701 702 /* mark hwspinlock as used, should not fail */ 703 tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock), 704 HWSPINLOCK_UNUSED); 705 706 /* self-sanity check that should never fail */ 707 WARN_ON(tmp != hwlock); 708 709 return ret; 710 } 711 712 /** 713 * hwspin_lock_get_id() - retrieve id number of a given hwspinlock 714 * @hwlock: a valid hwspinlock instance 715 * 716 * Returns: the id number of a given @hwlock, or -EINVAL if @hwlock is invalid. 717 */ 718 int hwspin_lock_get_id(struct hwspinlock *hwlock) 719 { 720 if (!hwlock) { 721 pr_err("invalid hwlock\n"); 722 return -EINVAL; 723 } 724 725 return hwlock_to_id(hwlock); 726 } 727 EXPORT_SYMBOL_GPL(hwspin_lock_get_id); 728 729 /** 730 * hwspin_lock_request() - request an hwspinlock 731 * 732 * This function should be called by users of the hwspinlock device, 733 * in order to dynamically assign them an unused hwspinlock. 734 * Usually the user of this lock will then have to communicate the lock's id 735 * to the remote core before it can be used for synchronization (to get the 736 * id of a given hwlock, use hwspin_lock_get_id()). 737 * 738 * Should be called from a process context (might sleep) 739 * 740 * Returns: the address of the assigned hwspinlock, or %NULL on error 741 */ 742 struct hwspinlock *hwspin_lock_request(void) 743 { 744 struct hwspinlock *hwlock; 745 int ret; 746 747 mutex_lock(&hwspinlock_tree_lock); 748 749 /* look for an unused lock */ 750 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, 751 0, 1, HWSPINLOCK_UNUSED); 752 if (ret == 0) { 753 pr_warn("a free hwspinlock is not available\n"); 754 hwlock = NULL; 755 goto out; 756 } 757 758 /* sanity check that should never fail */ 759 WARN_ON(ret > 1); 760 761 /* mark as used and power up */ 762 ret = __hwspin_lock_request(hwlock); 763 if (ret < 0) 764 hwlock = NULL; 765 766 out: 767 mutex_unlock(&hwspinlock_tree_lock); 768 return hwlock; 769 } 770 EXPORT_SYMBOL_GPL(hwspin_lock_request); 771 772 /** 773 * hwspin_lock_request_specific() - request for a specific hwspinlock 774 * @id: index of the specific hwspinlock that is requested 775 * 776 * This function should be called by users of the hwspinlock module, 777 * in order to assign them a specific hwspinlock. 778 * Usually early board code will be calling this function in order to 779 * reserve specific hwspinlock ids for predefined purposes. 780 * 781 * Should be called from a process context (might sleep) 782 * 783 * Returns: the address of the assigned hwspinlock, or %NULL on error 784 */ 785 struct hwspinlock *hwspin_lock_request_specific(unsigned int id) 786 { 787 struct hwspinlock *hwlock; 788 int ret; 789 790 mutex_lock(&hwspinlock_tree_lock); 791 792 /* make sure this hwspinlock exists */ 793 hwlock = radix_tree_lookup(&hwspinlock_tree, id); 794 if (!hwlock) { 795 pr_warn("hwspinlock %u does not exist\n", id); 796 goto out; 797 } 798 799 /* sanity check (this shouldn't happen) */ 800 WARN_ON(hwlock_to_id(hwlock) != id); 801 802 /* make sure this hwspinlock is unused */ 803 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); 804 if (ret == 0) { 805 pr_warn("hwspinlock %u is already in use\n", id); 806 hwlock = NULL; 807 goto out; 808 } 809 810 /* mark as used and power up */ 811 ret = __hwspin_lock_request(hwlock); 812 if (ret < 0) 813 hwlock = NULL; 814 815 out: 816 mutex_unlock(&hwspinlock_tree_lock); 817 return hwlock; 818 } 819 EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); 820 821 /** 822 * hwspin_lock_free() - free a specific hwspinlock 823 * @hwlock: the specific hwspinlock to free 824 * 825 * This function mark @hwlock as free again. 826 * Should only be called with an @hwlock that was retrieved from 827 * an earlier call to hwspin_lock_request{_specific}. 828 * 829 * Should be called from a process context (might sleep) 830 * 831 * Returns: %0 on success, or an appropriate error code on failure 832 */ 833 int hwspin_lock_free(struct hwspinlock *hwlock) 834 { 835 struct device *dev; 836 struct hwspinlock *tmp; 837 int ret; 838 839 if (!hwlock) { 840 pr_err("invalid hwlock\n"); 841 return -EINVAL; 842 } 843 844 dev = hwlock->bank->dev; 845 mutex_lock(&hwspinlock_tree_lock); 846 847 /* make sure the hwspinlock is used */ 848 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock), 849 HWSPINLOCK_UNUSED); 850 if (ret == 1) { 851 dev_err(dev, "%s: hwlock is already free\n", __func__); 852 dump_stack(); 853 ret = -EINVAL; 854 goto out; 855 } 856 857 /* notify the underlying device that power is not needed */ 858 pm_runtime_put(dev); 859 860 /* mark this hwspinlock as available */ 861 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock), 862 HWSPINLOCK_UNUSED); 863 864 /* sanity check (this shouldn't happen) */ 865 WARN_ON(tmp != hwlock); 866 867 module_put(dev->driver->owner); 868 869 out: 870 mutex_unlock(&hwspinlock_tree_lock); 871 return ret; 872 } 873 EXPORT_SYMBOL_GPL(hwspin_lock_free); 874 875 static int devm_hwspin_lock_match(struct device *dev, void *res, void *data) 876 { 877 struct hwspinlock **hwlock = res; 878 879 if (WARN_ON(!hwlock || !*hwlock)) 880 return 0; 881 882 return *hwlock == data; 883 } 884 885 static void devm_hwspin_lock_release(struct device *dev, void *res) 886 { 887 hwspin_lock_free(*(struct hwspinlock **)res); 888 } 889 890 /** 891 * devm_hwspin_lock_free() - free a specific hwspinlock for a managed device 892 * @dev: the device to free the specific hwspinlock 893 * @hwlock: the specific hwspinlock to free 894 * 895 * This function mark @hwlock as free again. 896 * Should only be called with an @hwlock that was retrieved from 897 * an earlier call to hwspin_lock_request{_specific}. 898 * 899 * Should be called from a process context (might sleep) 900 * 901 * Returns: %0 on success, or an appropriate error code on failure 902 */ 903 int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock) 904 { 905 int ret; 906 907 ret = devres_release(dev, devm_hwspin_lock_release, 908 devm_hwspin_lock_match, hwlock); 909 WARN_ON(ret); 910 911 return ret; 912 } 913 EXPORT_SYMBOL_GPL(devm_hwspin_lock_free); 914 915 /** 916 * devm_hwspin_lock_request() - request an hwspinlock for a managed device 917 * @dev: the device to request an hwspinlock 918 * 919 * This function should be called by users of the hwspinlock device, 920 * in order to dynamically assign them an unused hwspinlock. 921 * Usually the user of this lock will then have to communicate the lock's id 922 * to the remote core before it can be used for synchronization (to get the 923 * id of a given hwlock, use hwspin_lock_get_id()). 924 * 925 * Should be called from a process context (might sleep) 926 * 927 * Returns: the address of the assigned hwspinlock, or %NULL on error 928 */ 929 struct hwspinlock *devm_hwspin_lock_request(struct device *dev) 930 { 931 struct hwspinlock **ptr, *hwlock; 932 933 ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL); 934 if (!ptr) 935 return NULL; 936 937 hwlock = hwspin_lock_request(); 938 if (hwlock) { 939 *ptr = hwlock; 940 devres_add(dev, ptr); 941 } else { 942 devres_free(ptr); 943 } 944 945 return hwlock; 946 } 947 EXPORT_SYMBOL_GPL(devm_hwspin_lock_request); 948 949 /** 950 * devm_hwspin_lock_request_specific() - request for a specific hwspinlock for 951 * a managed device 952 * @dev: the device to request the specific hwspinlock 953 * @id: index of the specific hwspinlock that is requested 954 * 955 * This function should be called by users of the hwspinlock module, 956 * in order to assign them a specific hwspinlock. 957 * Usually early board code will be calling this function in order to 958 * reserve specific hwspinlock ids for predefined purposes. 959 * 960 * Should be called from a process context (might sleep) 961 * 962 * Returns: the address of the assigned hwspinlock, or %NULL on error 963 */ 964 struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev, 965 unsigned int id) 966 { 967 struct hwspinlock **ptr, *hwlock; 968 969 ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL); 970 if (!ptr) 971 return NULL; 972 973 hwlock = hwspin_lock_request_specific(id); 974 if (hwlock) { 975 *ptr = hwlock; 976 devres_add(dev, ptr); 977 } else { 978 devres_free(ptr); 979 } 980 981 return hwlock; 982 } 983 EXPORT_SYMBOL_GPL(devm_hwspin_lock_request_specific); 984 985 MODULE_DESCRIPTION("Hardware spinlock interface"); 986 MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>"); 987