xref: /linux/drivers/hwspinlock/hwspinlock_core.c (revision eed4edda910fe34dfae8c6bfbcf57f4593a54295)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hardware spinlock framework
4  *
5  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
6  *
7  * Contact: Ohad Ben-Cohen <ohad@wizery.com>
8  */
9 
10 #define pr_fmt(fmt)    "%s: " fmt, __func__
11 
12 #include <linux/delay.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/spinlock.h>
16 #include <linux/types.h>
17 #include <linux/err.h>
18 #include <linux/jiffies.h>
19 #include <linux/radix-tree.h>
20 #include <linux/hwspinlock.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/mutex.h>
23 #include <linux/of.h>
24 
25 #include "hwspinlock_internal.h"
26 
27 /* retry delay used in atomic context */
28 #define HWSPINLOCK_RETRY_DELAY_US	100
29 
30 /* radix tree tags */
31 #define HWSPINLOCK_UNUSED	(0) /* tags an hwspinlock as unused */
32 
33 /*
34  * A radix tree is used to maintain the available hwspinlock instances.
35  * The tree associates hwspinlock pointers with their integer key id,
36  * and provides easy-to-use API which makes the hwspinlock core code simple
37  * and easy to read.
38  *
39  * Radix trees are quick on lookups, and reasonably efficient in terms of
40  * storage, especially with high density usages such as this framework
41  * requires (a continuous range of integer keys, beginning with zero, is
42  * used as the ID's of the hwspinlock instances).
43  *
44  * The radix tree API supports tagging items in the tree, which this
45  * framework uses to mark unused hwspinlock instances (see the
46  * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
47  * tree, looking for an unused hwspinlock instance, is now reduced to a
48  * single radix tree API call.
49  */
50 static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
51 
52 /*
53  * Synchronization of access to the tree is achieved using this mutex,
54  * as the radix-tree API requires that users provide all synchronisation.
55  * A mutex is needed because we're using non-atomic radix tree allocations.
56  */
57 static DEFINE_MUTEX(hwspinlock_tree_lock);
58 
59 
60 /**
61  * __hwspin_trylock() - attempt to lock a specific hwspinlock
62  * @hwlock: an hwspinlock which we want to trylock
63  * @mode: controls whether local interrupts are disabled or not
64  * @flags: a pointer where the caller's interrupt state will be saved at (if
65  *         requested)
66  *
67  * This function attempts to lock an hwspinlock, and will immediately
68  * fail if the hwspinlock is already taken.
69  *
70  * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
71  * of getting hardware lock with mutex or spinlock. Since in some scenarios,
72  * user need some time-consuming or sleepable operations under the hardware
73  * lock, they need one sleepable lock (like mutex) to protect the operations.
74  *
75  * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
76  * return from this function, preemption (and possibly interrupts) is disabled,
77  * so the caller must not sleep, and is advised to release the hwspinlock as
78  * soon as possible. This is required in order to minimize remote cores polling
79  * on the hardware interconnect.
80  *
81  * The user decides whether local interrupts are disabled or not, and if yes,
82  * whether he wants their previous state to be saved. It is up to the user
83  * to choose the appropriate @mode of operation, exactly the same way users
84  * should decide between spin_trylock, spin_trylock_irq and
85  * spin_trylock_irqsave.
86  *
87  * Returns: %0 if we successfully locked the hwspinlock or -EBUSY if
88  * the hwspinlock was already taken.
89  *
90  * This function will never sleep.
91  */
92 int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
93 {
94 	int ret;
95 
96 	if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
97 		return -EINVAL;
98 
99 	/*
100 	 * This spin_lock{_irq, _irqsave} serves three purposes:
101 	 *
102 	 * 1. Disable preemption, in order to minimize the period of time
103 	 *    in which the hwspinlock is taken. This is important in order
104 	 *    to minimize the possible polling on the hardware interconnect
105 	 *    by a remote user of this lock.
106 	 * 2. Make the hwspinlock SMP-safe (so we can take it from
107 	 *    additional contexts on the local host).
108 	 * 3. Ensure that in_atomic/might_sleep checks catch potential
109 	 *    problems with hwspinlock usage (e.g. scheduler checks like
110 	 *    'scheduling while atomic' etc.)
111 	 */
112 	switch (mode) {
113 	case HWLOCK_IRQSTATE:
114 		ret = spin_trylock_irqsave(&hwlock->lock, *flags);
115 		break;
116 	case HWLOCK_IRQ:
117 		ret = spin_trylock_irq(&hwlock->lock);
118 		break;
119 	case HWLOCK_RAW:
120 	case HWLOCK_IN_ATOMIC:
121 		ret = 1;
122 		break;
123 	default:
124 		ret = spin_trylock(&hwlock->lock);
125 		break;
126 	}
127 
128 	/* is lock already taken by another context on the local cpu ? */
129 	if (!ret)
130 		return -EBUSY;
131 
132 	/* try to take the hwspinlock device */
133 	ret = hwlock->bank->ops->trylock(hwlock);
134 
135 	/* if hwlock is already taken, undo spin_trylock_* and exit */
136 	if (!ret) {
137 		switch (mode) {
138 		case HWLOCK_IRQSTATE:
139 			spin_unlock_irqrestore(&hwlock->lock, *flags);
140 			break;
141 		case HWLOCK_IRQ:
142 			spin_unlock_irq(&hwlock->lock);
143 			break;
144 		case HWLOCK_RAW:
145 		case HWLOCK_IN_ATOMIC:
146 			/* Nothing to do */
147 			break;
148 		default:
149 			spin_unlock(&hwlock->lock);
150 			break;
151 		}
152 
153 		return -EBUSY;
154 	}
155 
156 	/*
157 	 * We can be sure the other core's memory operations
158 	 * are observable to us only _after_ we successfully take
159 	 * the hwspinlock, and we must make sure that subsequent memory
160 	 * operations (both reads and writes) will not be reordered before
161 	 * we actually took the hwspinlock.
162 	 *
163 	 * Note: the implicit memory barrier of the spinlock above is too
164 	 * early, so we need this additional explicit memory barrier.
165 	 */
166 	mb();
167 
168 	return 0;
169 }
170 EXPORT_SYMBOL_GPL(__hwspin_trylock);
171 
172 /**
173  * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
174  * @hwlock: the hwspinlock to be locked
175  * @to: timeout value in msecs
176  * @mode: mode which controls whether local interrupts are disabled or not
177  * @flags: a pointer to where the caller's interrupt state will be saved at (if
178  *         requested)
179  *
180  * This function locks the given @hwlock. If the @hwlock
181  * is already taken, the function will busy loop waiting for it to
182  * be released, but give up after @timeout msecs have elapsed.
183  *
184  * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
185  * of getting hardware lock with mutex or spinlock. Since in some scenarios,
186  * user need some time-consuming or sleepable operations under the hardware
187  * lock, they need one sleepable lock (like mutex) to protect the operations.
188  *
189  * If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout
190  * is handled with busy-waiting delays, hence shall not exceed few msecs.
191  *
192  * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
193  * return from this function, preemption (and possibly interrupts) is disabled,
194  * so the caller must not sleep, and is advised to release the hwspinlock as
195  * soon as possible. This is required in order to minimize remote cores polling
196  * on the hardware interconnect.
197  *
198  * The user decides whether local interrupts are disabled or not, and if yes,
199  * whether he wants their previous state to be saved. It is up to the user
200  * to choose the appropriate @mode of operation, exactly the same way users
201  * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
202  *
203  * Returns: %0 when the @hwlock was successfully taken, and an appropriate
204  * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
205  * busy after @timeout msecs).
206  *
207  * The function will never sleep.
208  */
209 int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
210 					int mode, unsigned long *flags)
211 {
212 	int ret;
213 	unsigned long expire, atomic_delay = 0;
214 
215 	expire = msecs_to_jiffies(to) + jiffies;
216 
217 	for (;;) {
218 		/* Try to take the hwspinlock */
219 		ret = __hwspin_trylock(hwlock, mode, flags);
220 		if (ret != -EBUSY)
221 			break;
222 
223 		/*
224 		 * The lock is already taken, let's check if the user wants
225 		 * us to try again
226 		 */
227 		if (mode == HWLOCK_IN_ATOMIC) {
228 			udelay(HWSPINLOCK_RETRY_DELAY_US);
229 			atomic_delay += HWSPINLOCK_RETRY_DELAY_US;
230 			if (atomic_delay > to * 1000)
231 				return -ETIMEDOUT;
232 		} else {
233 			if (time_is_before_eq_jiffies(expire))
234 				return -ETIMEDOUT;
235 		}
236 
237 		/*
238 		 * Allow platform-specific relax handlers to prevent
239 		 * hogging the interconnect (no sleeping, though)
240 		 */
241 		if (hwlock->bank->ops->relax)
242 			hwlock->bank->ops->relax(hwlock);
243 	}
244 
245 	return ret;
246 }
247 EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
248 
249 /**
250  * __hwspin_unlock() - unlock a specific hwspinlock
251  * @hwlock: a previously-acquired hwspinlock which we want to unlock
252  * @mode: controls whether local interrupts needs to be restored or not
253  * @flags: previous caller's interrupt state to restore (if requested)
254  *
255  * This function will unlock a specific hwspinlock, enable preemption and
256  * (possibly) enable interrupts or restore their previous state.
257  * @hwlock must be already locked before calling this function: it is a bug
258  * to call unlock on a @hwlock that is already unlocked.
259  *
260  * The user decides whether local interrupts should be enabled or not, and
261  * if yes, whether he wants their previous state to be restored. It is up
262  * to the user to choose the appropriate @mode of operation, exactly the
263  * same way users decide between spin_unlock, spin_unlock_irq and
264  * spin_unlock_irqrestore.
265  *
266  * The function will never sleep.
267  */
268 void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
269 {
270 	if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
271 		return;
272 
273 	/*
274 	 * We must make sure that memory operations (both reads and writes),
275 	 * done before unlocking the hwspinlock, will not be reordered
276 	 * after the lock is released.
277 	 *
278 	 * That's the purpose of this explicit memory barrier.
279 	 *
280 	 * Note: the memory barrier induced by the spin_unlock below is too
281 	 * late; the other core is going to access memory soon after it will
282 	 * take the hwspinlock, and by then we want to be sure our memory
283 	 * operations are already observable.
284 	 */
285 	mb();
286 
287 	hwlock->bank->ops->unlock(hwlock);
288 
289 	/* Undo the spin_trylock{_irq, _irqsave} called while locking */
290 	switch (mode) {
291 	case HWLOCK_IRQSTATE:
292 		spin_unlock_irqrestore(&hwlock->lock, *flags);
293 		break;
294 	case HWLOCK_IRQ:
295 		spin_unlock_irq(&hwlock->lock);
296 		break;
297 	case HWLOCK_RAW:
298 	case HWLOCK_IN_ATOMIC:
299 		/* Nothing to do */
300 		break;
301 	default:
302 		spin_unlock(&hwlock->lock);
303 		break;
304 	}
305 }
306 EXPORT_SYMBOL_GPL(__hwspin_unlock);
307 
308 /**
309  * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
310  * @hwlock_spec: hwlock specifier as found in the device tree
311  *
312  * This is a simple translation function, suitable for hwspinlock platform
313  * drivers that only has a lock specifier length of 1.
314  *
315  * Returns: a relative index of the lock within a specified bank on success,
316  * or -EINVAL on invalid specifier cell count.
317  */
318 static inline int
319 of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec)
320 {
321 	if (WARN_ON(hwlock_spec->args_count != 1))
322 		return -EINVAL;
323 
324 	return hwlock_spec->args[0];
325 }
326 
327 /**
328  * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock
329  * @np: device node from which to request the specific hwlock
330  * @index: index of the hwlock in the list of values
331  *
332  * This function provides a means for DT users of the hwspinlock module to
333  * get the global lock id of a specific hwspinlock using the phandle of the
334  * hwspinlock device, so that it can be requested using the normal
335  * hwspin_lock_request_specific() API.
336  *
337  * Returns: the global lock id number on success, -EPROBE_DEFER if the
338  * hwspinlock device is not yet registered, -EINVAL on invalid args
339  * specifier value or an appropriate error as returned from the OF parsing
340  * of the DT client node.
341  */
342 int of_hwspin_lock_get_id(struct device_node *np, int index)
343 {
344 	struct of_phandle_args args;
345 	struct hwspinlock *hwlock;
346 	struct radix_tree_iter iter;
347 	void **slot;
348 	int id;
349 	int ret;
350 
351 	ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index,
352 					 &args);
353 	if (ret)
354 		return ret;
355 
356 	if (!of_device_is_available(args.np)) {
357 		ret = -ENOENT;
358 		goto out;
359 	}
360 
361 	/* Find the hwspinlock device: we need its base_id */
362 	ret = -EPROBE_DEFER;
363 	rcu_read_lock();
364 	radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) {
365 		hwlock = radix_tree_deref_slot(slot);
366 		if (unlikely(!hwlock))
367 			continue;
368 		if (radix_tree_deref_retry(hwlock)) {
369 			slot = radix_tree_iter_retry(&iter);
370 			continue;
371 		}
372 
373 		if (device_match_of_node(hwlock->bank->dev, args.np)) {
374 			ret = 0;
375 			break;
376 		}
377 	}
378 	rcu_read_unlock();
379 	if (ret < 0)
380 		goto out;
381 
382 	id = of_hwspin_lock_simple_xlate(&args);
383 	if (id < 0 || id >= hwlock->bank->num_locks) {
384 		ret = -EINVAL;
385 		goto out;
386 	}
387 	id += hwlock->bank->base_id;
388 
389 out:
390 	of_node_put(args.np);
391 	return ret ? ret : id;
392 }
393 EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
394 
395 /**
396  * of_hwspin_lock_get_id_byname() - get lock id for an specified hwlock name
397  * @np: device node from which to request the specific hwlock
398  * @name: hwlock name
399  *
400  * This function provides a means for DT users of the hwspinlock module to
401  * get the global lock id of a specific hwspinlock using the specified name of
402  * the hwspinlock device, so that it can be requested using the normal
403  * hwspin_lock_request_specific() API.
404  *
405  * Returns: the global lock id number on success, -EPROBE_DEFER if the
406  * hwspinlock device is not yet registered, -EINVAL on invalid args
407  * specifier value or an appropriate error as returned from the OF parsing
408  * of the DT client node.
409  */
410 int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
411 {
412 	int index;
413 
414 	if (!name)
415 		return -EINVAL;
416 
417 	index = of_property_match_string(np, "hwlock-names", name);
418 	if (index < 0)
419 		return index;
420 
421 	return of_hwspin_lock_get_id(np, index);
422 }
423 EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id_byname);
424 
425 static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
426 {
427 	struct hwspinlock *tmp;
428 	int ret;
429 
430 	mutex_lock(&hwspinlock_tree_lock);
431 
432 	ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
433 	if (ret) {
434 		if (ret == -EEXIST)
435 			pr_err("hwspinlock id %d already exists!\n", id);
436 		goto out;
437 	}
438 
439 	/* mark this hwspinlock as available */
440 	tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
441 
442 	/* self-sanity check which should never fail */
443 	WARN_ON(tmp != hwlock);
444 
445 out:
446 	mutex_unlock(&hwspinlock_tree_lock);
447 	return 0;
448 }
449 
450 static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
451 {
452 	struct hwspinlock *hwlock = NULL;
453 	int ret;
454 
455 	mutex_lock(&hwspinlock_tree_lock);
456 
457 	/* make sure the hwspinlock is not in use (tag is set) */
458 	ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
459 	if (ret == 0) {
460 		pr_err("hwspinlock %d still in use (or not present)\n", id);
461 		goto out;
462 	}
463 
464 	hwlock = radix_tree_delete(&hwspinlock_tree, id);
465 	if (!hwlock) {
466 		pr_err("failed to delete hwspinlock %d\n", id);
467 		goto out;
468 	}
469 
470 out:
471 	mutex_unlock(&hwspinlock_tree_lock);
472 	return hwlock;
473 }
474 
475 /**
476  * hwspin_lock_register() - register a new hw spinlock device
477  * @bank: the hwspinlock device, which usually provides numerous hw locks
478  * @dev: the backing device
479  * @ops: hwspinlock handlers for this device
480  * @base_id: id of the first hardware spinlock in this bank
481  * @num_locks: number of hwspinlocks provided by this device
482  *
483  * This function should be called from the underlying platform-specific
484  * implementation, to register a new hwspinlock device instance.
485  *
486  * Should be called from a process context (might sleep)
487  *
488  * Returns: %0 on success, or an appropriate error code on failure
489  */
490 int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
491 		const struct hwspinlock_ops *ops, int base_id, int num_locks)
492 {
493 	struct hwspinlock *hwlock;
494 	int ret = 0, i;
495 
496 	if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
497 							!ops->unlock) {
498 		pr_err("invalid parameters\n");
499 		return -EINVAL;
500 	}
501 
502 	bank->dev = dev;
503 	bank->ops = ops;
504 	bank->base_id = base_id;
505 	bank->num_locks = num_locks;
506 
507 	for (i = 0; i < num_locks; i++) {
508 		hwlock = &bank->lock[i];
509 
510 		spin_lock_init(&hwlock->lock);
511 		hwlock->bank = bank;
512 
513 		ret = hwspin_lock_register_single(hwlock, base_id + i);
514 		if (ret)
515 			goto reg_failed;
516 	}
517 
518 	return 0;
519 
520 reg_failed:
521 	while (--i >= 0)
522 		hwspin_lock_unregister_single(base_id + i);
523 	return ret;
524 }
525 EXPORT_SYMBOL_GPL(hwspin_lock_register);
526 
527 /**
528  * hwspin_lock_unregister() - unregister an hw spinlock device
529  * @bank: the hwspinlock device, which usually provides numerous hw locks
530  *
531  * This function should be called from the underlying platform-specific
532  * implementation, to unregister an existing (and unused) hwspinlock.
533  *
534  * Should be called from a process context (might sleep)
535  *
536  * Returns: %0 on success, or an appropriate error code on failure
537  */
538 int hwspin_lock_unregister(struct hwspinlock_device *bank)
539 {
540 	struct hwspinlock *hwlock, *tmp;
541 	int i;
542 
543 	for (i = 0; i < bank->num_locks; i++) {
544 		hwlock = &bank->lock[i];
545 
546 		tmp = hwspin_lock_unregister_single(bank->base_id + i);
547 		if (!tmp)
548 			return -EBUSY;
549 
550 		/* self-sanity check that should never fail */
551 		WARN_ON(tmp != hwlock);
552 	}
553 
554 	return 0;
555 }
556 EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
557 
558 static void devm_hwspin_lock_unreg(struct device *dev, void *res)
559 {
560 	hwspin_lock_unregister(*(struct hwspinlock_device **)res);
561 }
562 
563 static int devm_hwspin_lock_device_match(struct device *dev, void *res,
564 					 void *data)
565 {
566 	struct hwspinlock_device **bank = res;
567 
568 	if (WARN_ON(!bank || !*bank))
569 		return 0;
570 
571 	return *bank == data;
572 }
573 
574 /**
575  * devm_hwspin_lock_unregister() - unregister an hw spinlock device for
576  *				   a managed device
577  * @dev: the backing device
578  * @bank: the hwspinlock device, which usually provides numerous hw locks
579  *
580  * This function should be called from the underlying platform-specific
581  * implementation, to unregister an existing (and unused) hwspinlock.
582  *
583  * Should be called from a process context (might sleep)
584  *
585  * Returns: %0 on success, or an appropriate error code on failure
586  */
587 int devm_hwspin_lock_unregister(struct device *dev,
588 				struct hwspinlock_device *bank)
589 {
590 	int ret;
591 
592 	ret = devres_release(dev, devm_hwspin_lock_unreg,
593 			     devm_hwspin_lock_device_match, bank);
594 	WARN_ON(ret);
595 
596 	return ret;
597 }
598 EXPORT_SYMBOL_GPL(devm_hwspin_lock_unregister);
599 
600 /**
601  * devm_hwspin_lock_register() - register a new hw spinlock device for
602  *				 a managed device
603  * @dev: the backing device
604  * @bank: the hwspinlock device, which usually provides numerous hw locks
605  * @ops: hwspinlock handlers for this device
606  * @base_id: id of the first hardware spinlock in this bank
607  * @num_locks: number of hwspinlocks provided by this device
608  *
609  * This function should be called from the underlying platform-specific
610  * implementation, to register a new hwspinlock device instance.
611  *
612  * Should be called from a process context (might sleep)
613  *
614  * Returns: %0 on success, or an appropriate error code on failure
615  */
616 int devm_hwspin_lock_register(struct device *dev,
617 			      struct hwspinlock_device *bank,
618 			      const struct hwspinlock_ops *ops,
619 			      int base_id, int num_locks)
620 {
621 	struct hwspinlock_device **ptr;
622 	int ret;
623 
624 	ptr = devres_alloc(devm_hwspin_lock_unreg, sizeof(*ptr), GFP_KERNEL);
625 	if (!ptr)
626 		return -ENOMEM;
627 
628 	ret = hwspin_lock_register(bank, dev, ops, base_id, num_locks);
629 	if (!ret) {
630 		*ptr = bank;
631 		devres_add(dev, ptr);
632 	} else {
633 		devres_free(ptr);
634 	}
635 
636 	return ret;
637 }
638 EXPORT_SYMBOL_GPL(devm_hwspin_lock_register);
639 
640 /**
641  * __hwspin_lock_request() - tag an hwspinlock as used and power it up
642  * @hwlock: the target hwspinlock
643  *
644  * This is an internal function that prepares an hwspinlock instance
645  * before it is given to the user. The function assumes that
646  * hwspinlock_tree_lock is taken.
647  *
648  * Returns: %0 or positive to indicate success, and a negative value to
649  * indicate an error (with the appropriate error code)
650  */
651 static int __hwspin_lock_request(struct hwspinlock *hwlock)
652 {
653 	struct device *dev = hwlock->bank->dev;
654 	struct hwspinlock *tmp;
655 	int ret;
656 
657 	/* prevent underlying implementation from being removed */
658 	if (!try_module_get(dev->driver->owner)) {
659 		dev_err(dev, "%s: can't get owner\n", __func__);
660 		return -EINVAL;
661 	}
662 
663 	/* notify PM core that power is now needed */
664 	ret = pm_runtime_get_sync(dev);
665 	if (ret < 0 && ret != -EACCES) {
666 		dev_err(dev, "%s: can't power on device\n", __func__);
667 		pm_runtime_put_noidle(dev);
668 		module_put(dev->driver->owner);
669 		return ret;
670 	}
671 
672 	ret = 0;
673 
674 	/* mark hwspinlock as used, should not fail */
675 	tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
676 							HWSPINLOCK_UNUSED);
677 
678 	/* self-sanity check that should never fail */
679 	WARN_ON(tmp != hwlock);
680 
681 	return ret;
682 }
683 
684 /**
685  * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
686  * @hwlock: a valid hwspinlock instance
687  *
688  * Returns: the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
689  */
690 int hwspin_lock_get_id(struct hwspinlock *hwlock)
691 {
692 	if (!hwlock) {
693 		pr_err("invalid hwlock\n");
694 		return -EINVAL;
695 	}
696 
697 	return hwlock_to_id(hwlock);
698 }
699 EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
700 
701 /**
702  * hwspin_lock_request() - request an hwspinlock
703  *
704  * This function should be called by users of the hwspinlock device,
705  * in order to dynamically assign them an unused hwspinlock.
706  * Usually the user of this lock will then have to communicate the lock's id
707  * to the remote core before it can be used for synchronization (to get the
708  * id of a given hwlock, use hwspin_lock_get_id()).
709  *
710  * Should be called from a process context (might sleep)
711  *
712  * Returns: the address of the assigned hwspinlock, or %NULL on error
713  */
714 struct hwspinlock *hwspin_lock_request(void)
715 {
716 	struct hwspinlock *hwlock;
717 	int ret;
718 
719 	mutex_lock(&hwspinlock_tree_lock);
720 
721 	/* look for an unused lock */
722 	ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
723 						0, 1, HWSPINLOCK_UNUSED);
724 	if (ret == 0) {
725 		pr_warn("a free hwspinlock is not available\n");
726 		hwlock = NULL;
727 		goto out;
728 	}
729 
730 	/* sanity check that should never fail */
731 	WARN_ON(ret > 1);
732 
733 	/* mark as used and power up */
734 	ret = __hwspin_lock_request(hwlock);
735 	if (ret < 0)
736 		hwlock = NULL;
737 
738 out:
739 	mutex_unlock(&hwspinlock_tree_lock);
740 	return hwlock;
741 }
742 EXPORT_SYMBOL_GPL(hwspin_lock_request);
743 
744 /**
745  * hwspin_lock_request_specific() - request for a specific hwspinlock
746  * @id: index of the specific hwspinlock that is requested
747  *
748  * This function should be called by users of the hwspinlock module,
749  * in order to assign them a specific hwspinlock.
750  * Usually early board code will be calling this function in order to
751  * reserve specific hwspinlock ids for predefined purposes.
752  *
753  * Should be called from a process context (might sleep)
754  *
755  * Returns: the address of the assigned hwspinlock, or %NULL on error
756  */
757 struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
758 {
759 	struct hwspinlock *hwlock;
760 	int ret;
761 
762 	mutex_lock(&hwspinlock_tree_lock);
763 
764 	/* make sure this hwspinlock exists */
765 	hwlock = radix_tree_lookup(&hwspinlock_tree, id);
766 	if (!hwlock) {
767 		pr_warn("hwspinlock %u does not exist\n", id);
768 		goto out;
769 	}
770 
771 	/* sanity check (this shouldn't happen) */
772 	WARN_ON(hwlock_to_id(hwlock) != id);
773 
774 	/* make sure this hwspinlock is unused */
775 	ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
776 	if (ret == 0) {
777 		pr_warn("hwspinlock %u is already in use\n", id);
778 		hwlock = NULL;
779 		goto out;
780 	}
781 
782 	/* mark as used and power up */
783 	ret = __hwspin_lock_request(hwlock);
784 	if (ret < 0)
785 		hwlock = NULL;
786 
787 out:
788 	mutex_unlock(&hwspinlock_tree_lock);
789 	return hwlock;
790 }
791 EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
792 
793 /**
794  * hwspin_lock_free() - free a specific hwspinlock
795  * @hwlock: the specific hwspinlock to free
796  *
797  * This function mark @hwlock as free again.
798  * Should only be called with an @hwlock that was retrieved from
799  * an earlier call to hwspin_lock_request{_specific}.
800  *
801  * Should be called from a process context (might sleep)
802  *
803  * Returns: %0 on success, or an appropriate error code on failure
804  */
805 int hwspin_lock_free(struct hwspinlock *hwlock)
806 {
807 	struct device *dev;
808 	struct hwspinlock *tmp;
809 	int ret;
810 
811 	if (!hwlock) {
812 		pr_err("invalid hwlock\n");
813 		return -EINVAL;
814 	}
815 
816 	dev = hwlock->bank->dev;
817 	mutex_lock(&hwspinlock_tree_lock);
818 
819 	/* make sure the hwspinlock is used */
820 	ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
821 							HWSPINLOCK_UNUSED);
822 	if (ret == 1) {
823 		dev_err(dev, "%s: hwlock is already free\n", __func__);
824 		dump_stack();
825 		ret = -EINVAL;
826 		goto out;
827 	}
828 
829 	/* notify the underlying device that power is not needed */
830 	pm_runtime_put(dev);
831 
832 	/* mark this hwspinlock as available */
833 	tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
834 							HWSPINLOCK_UNUSED);
835 
836 	/* sanity check (this shouldn't happen) */
837 	WARN_ON(tmp != hwlock);
838 
839 	module_put(dev->driver->owner);
840 
841 out:
842 	mutex_unlock(&hwspinlock_tree_lock);
843 	return ret;
844 }
845 EXPORT_SYMBOL_GPL(hwspin_lock_free);
846 
847 static int devm_hwspin_lock_match(struct device *dev, void *res, void *data)
848 {
849 	struct hwspinlock **hwlock = res;
850 
851 	if (WARN_ON(!hwlock || !*hwlock))
852 		return 0;
853 
854 	return *hwlock == data;
855 }
856 
857 static void devm_hwspin_lock_release(struct device *dev, void *res)
858 {
859 	hwspin_lock_free(*(struct hwspinlock **)res);
860 }
861 
862 /**
863  * devm_hwspin_lock_free() - free a specific hwspinlock for a managed device
864  * @dev: the device to free the specific hwspinlock
865  * @hwlock: the specific hwspinlock to free
866  *
867  * This function mark @hwlock as free again.
868  * Should only be called with an @hwlock that was retrieved from
869  * an earlier call to hwspin_lock_request{_specific}.
870  *
871  * Should be called from a process context (might sleep)
872  *
873  * Returns: %0 on success, or an appropriate error code on failure
874  */
875 int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
876 {
877 	int ret;
878 
879 	ret = devres_release(dev, devm_hwspin_lock_release,
880 			     devm_hwspin_lock_match, hwlock);
881 	WARN_ON(ret);
882 
883 	return ret;
884 }
885 EXPORT_SYMBOL_GPL(devm_hwspin_lock_free);
886 
887 /**
888  * devm_hwspin_lock_request() - request an hwspinlock for a managed device
889  * @dev: the device to request an hwspinlock
890  *
891  * This function should be called by users of the hwspinlock device,
892  * in order to dynamically assign them an unused hwspinlock.
893  * Usually the user of this lock will then have to communicate the lock's id
894  * to the remote core before it can be used for synchronization (to get the
895  * id of a given hwlock, use hwspin_lock_get_id()).
896  *
897  * Should be called from a process context (might sleep)
898  *
899  * Returns: the address of the assigned hwspinlock, or %NULL on error
900  */
901 struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
902 {
903 	struct hwspinlock **ptr, *hwlock;
904 
905 	ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
906 	if (!ptr)
907 		return NULL;
908 
909 	hwlock = hwspin_lock_request();
910 	if (hwlock) {
911 		*ptr = hwlock;
912 		devres_add(dev, ptr);
913 	} else {
914 		devres_free(ptr);
915 	}
916 
917 	return hwlock;
918 }
919 EXPORT_SYMBOL_GPL(devm_hwspin_lock_request);
920 
921 /**
922  * devm_hwspin_lock_request_specific() - request for a specific hwspinlock for
923  *					 a managed device
924  * @dev: the device to request the specific hwspinlock
925  * @id: index of the specific hwspinlock that is requested
926  *
927  * This function should be called by users of the hwspinlock module,
928  * in order to assign them a specific hwspinlock.
929  * Usually early board code will be calling this function in order to
930  * reserve specific hwspinlock ids for predefined purposes.
931  *
932  * Should be called from a process context (might sleep)
933  *
934  * Returns: the address of the assigned hwspinlock, or %NULL on error
935  */
936 struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
937 						     unsigned int id)
938 {
939 	struct hwspinlock **ptr, *hwlock;
940 
941 	ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
942 	if (!ptr)
943 		return NULL;
944 
945 	hwlock = hwspin_lock_request_specific(id);
946 	if (hwlock) {
947 		*ptr = hwlock;
948 		devres_add(dev, ptr);
949 	} else {
950 		devres_free(ptr);
951 	}
952 
953 	return hwlock;
954 }
955 EXPORT_SYMBOL_GPL(devm_hwspin_lock_request_specific);
956 
957 MODULE_DESCRIPTION("Hardware spinlock interface");
958 MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
959