xref: /linux/drivers/base/power/qos.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * Devices PM QoS constraints management
3  *
4  * Copyright (C) 2011 Texas Instruments, Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *
11  * This module exposes the interface to kernel space for specifying
12  * per-device PM QoS dependencies. It provides infrastructure for registration
13  * of:
14  *
15  * Dependents on a QoS value : register requests
16  * Watchers of QoS value : get notified when target QoS value changes
17  *
18  * This QoS design is best effort based. Dependents register their QoS needs.
19  * Watchers register to keep track of the current QoS needs of the system.
20  * Watchers can register a per-device notification callback using the
21  * dev_pm_qos_*_notifier API. The notification chain data is stored in the
22  * per-device constraint data struct.
23  *
24  * Note about the per-device constraint data struct allocation:
25  * . The per-device constraints data struct ptr is tored into the device
26  *    dev_pm_info.
27  * . To minimize the data usage by the per-device constraints, the data struct
28  *   is only allocated at the first call to dev_pm_qos_add_request.
29  * . The data is later free'd when the device is removed from the system.
30  *  . A global mutex protects the constraints users from the data being
31  *     allocated and free'd.
32  */
33 
34 #include <linux/pm_qos.h>
35 #include <linux/spinlock.h>
36 #include <linux/slab.h>
37 #include <linux/device.h>
38 #include <linux/mutex.h>
39 #include <linux/export.h>
40 #include <linux/pm_runtime.h>
41 #include <linux/err.h>
42 #include <trace/events/power.h>
43 
44 #include "power.h"
45 
46 static DEFINE_MUTEX(dev_pm_qos_mtx);
47 static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
48 
49 /**
50  * __dev_pm_qos_flags - Check PM QoS flags for a given device.
51  * @dev: Device to check the PM QoS flags for.
52  * @mask: Flags to check against.
53  *
54  * This routine must be called with dev->power.lock held.
55  */
56 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
57 {
58 	struct dev_pm_qos *qos = dev->power.qos;
59 	struct pm_qos_flags *pqf;
60 	s32 val;
61 
62 	lockdep_assert_held(&dev->power.lock);
63 
64 	if (IS_ERR_OR_NULL(qos))
65 		return PM_QOS_FLAGS_UNDEFINED;
66 
67 	pqf = &qos->flags;
68 	if (list_empty(&pqf->list))
69 		return PM_QOS_FLAGS_UNDEFINED;
70 
71 	val = pqf->effective_flags & mask;
72 	if (val)
73 		return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
74 
75 	return PM_QOS_FLAGS_NONE;
76 }
77 
78 /**
79  * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
80  * @dev: Device to check the PM QoS flags for.
81  * @mask: Flags to check against.
82  */
83 enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
84 {
85 	unsigned long irqflags;
86 	enum pm_qos_flags_status ret;
87 
88 	spin_lock_irqsave(&dev->power.lock, irqflags);
89 	ret = __dev_pm_qos_flags(dev, mask);
90 	spin_unlock_irqrestore(&dev->power.lock, irqflags);
91 
92 	return ret;
93 }
94 EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
95 
96 /**
97  * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
98  * @dev: Device to get the PM QoS constraint value for.
99  *
100  * This routine must be called with dev->power.lock held.
101  */
102 s32 __dev_pm_qos_read_value(struct device *dev)
103 {
104 	lockdep_assert_held(&dev->power.lock);
105 
106 	return dev_pm_qos_raw_read_value(dev);
107 }
108 
109 /**
110  * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
111  * @dev: Device to get the PM QoS constraint value for.
112  */
113 s32 dev_pm_qos_read_value(struct device *dev)
114 {
115 	unsigned long flags;
116 	s32 ret;
117 
118 	spin_lock_irqsave(&dev->power.lock, flags);
119 	ret = __dev_pm_qos_read_value(dev);
120 	spin_unlock_irqrestore(&dev->power.lock, flags);
121 
122 	return ret;
123 }
124 
125 /**
126  * apply_constraint - Add/modify/remove device PM QoS request.
127  * @req: Constraint request to apply
128  * @action: Action to perform (add/update/remove).
129  * @value: Value to assign to the QoS request.
130  *
131  * Internal function to update the constraints list using the PM QoS core
132  * code and if needed call the per-device callbacks.
133  */
134 static int apply_constraint(struct dev_pm_qos_request *req,
135 			    enum pm_qos_req_action action, s32 value)
136 {
137 	struct dev_pm_qos *qos = req->dev->power.qos;
138 	int ret;
139 
140 	switch(req->type) {
141 	case DEV_PM_QOS_RESUME_LATENCY:
142 		ret = pm_qos_update_target(&qos->resume_latency,
143 					   &req->data.pnode, action, value);
144 		break;
145 	case DEV_PM_QOS_LATENCY_TOLERANCE:
146 		ret = pm_qos_update_target(&qos->latency_tolerance,
147 					   &req->data.pnode, action, value);
148 		if (ret) {
149 			value = pm_qos_read_value(&qos->latency_tolerance);
150 			req->dev->power.set_latency_tolerance(req->dev, value);
151 		}
152 		break;
153 	case DEV_PM_QOS_FLAGS:
154 		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
155 					  action, value);
156 		break;
157 	default:
158 		ret = -EINVAL;
159 	}
160 
161 	return ret;
162 }
163 
164 /*
165  * dev_pm_qos_constraints_allocate
166  * @dev: device to allocate data for
167  *
168  * Called at the first call to add_request, for constraint data allocation
169  * Must be called with the dev_pm_qos_mtx mutex held
170  */
171 static int dev_pm_qos_constraints_allocate(struct device *dev)
172 {
173 	struct dev_pm_qos *qos;
174 	struct pm_qos_constraints *c;
175 	struct blocking_notifier_head *n;
176 
177 	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
178 	if (!qos)
179 		return -ENOMEM;
180 
181 	n = kzalloc(sizeof(*n), GFP_KERNEL);
182 	if (!n) {
183 		kfree(qos);
184 		return -ENOMEM;
185 	}
186 	BLOCKING_INIT_NOTIFIER_HEAD(n);
187 
188 	c = &qos->resume_latency;
189 	plist_head_init(&c->list);
190 	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
191 	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
192 	c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
193 	c->type = PM_QOS_MIN;
194 	c->notifiers = n;
195 
196 	c = &qos->latency_tolerance;
197 	plist_head_init(&c->list);
198 	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
199 	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
200 	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
201 	c->type = PM_QOS_MIN;
202 
203 	INIT_LIST_HEAD(&qos->flags.list);
204 
205 	spin_lock_irq(&dev->power.lock);
206 	dev->power.qos = qos;
207 	spin_unlock_irq(&dev->power.lock);
208 
209 	return 0;
210 }
211 
212 static void __dev_pm_qos_hide_latency_limit(struct device *dev);
213 static void __dev_pm_qos_hide_flags(struct device *dev);
214 
215 /**
216  * dev_pm_qos_constraints_destroy
217  * @dev: target device
218  *
219  * Called from the device PM subsystem on device removal under device_pm_lock().
220  */
221 void dev_pm_qos_constraints_destroy(struct device *dev)
222 {
223 	struct dev_pm_qos *qos;
224 	struct dev_pm_qos_request *req, *tmp;
225 	struct pm_qos_constraints *c;
226 	struct pm_qos_flags *f;
227 
228 	mutex_lock(&dev_pm_qos_sysfs_mtx);
229 
230 	/*
231 	 * If the device's PM QoS resume latency limit or PM QoS flags have been
232 	 * exposed to user space, they have to be hidden at this point.
233 	 */
234 	pm_qos_sysfs_remove_resume_latency(dev);
235 	pm_qos_sysfs_remove_flags(dev);
236 
237 	mutex_lock(&dev_pm_qos_mtx);
238 
239 	__dev_pm_qos_hide_latency_limit(dev);
240 	__dev_pm_qos_hide_flags(dev);
241 
242 	qos = dev->power.qos;
243 	if (!qos)
244 		goto out;
245 
246 	/* Flush the constraints lists for the device. */
247 	c = &qos->resume_latency;
248 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
249 		/*
250 		 * Update constraints list and call the notification
251 		 * callbacks if needed
252 		 */
253 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
254 		memset(req, 0, sizeof(*req));
255 	}
256 	c = &qos->latency_tolerance;
257 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
258 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
259 		memset(req, 0, sizeof(*req));
260 	}
261 	f = &qos->flags;
262 	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
263 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
264 		memset(req, 0, sizeof(*req));
265 	}
266 
267 	spin_lock_irq(&dev->power.lock);
268 	dev->power.qos = ERR_PTR(-ENODEV);
269 	spin_unlock_irq(&dev->power.lock);
270 
271 	kfree(qos->resume_latency.notifiers);
272 	kfree(qos);
273 
274  out:
275 	mutex_unlock(&dev_pm_qos_mtx);
276 
277 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
278 }
279 
280 static bool dev_pm_qos_invalid_request(struct device *dev,
281 				       struct dev_pm_qos_request *req)
282 {
283 	return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
284 			&& !dev->power.set_latency_tolerance);
285 }
286 
287 static int __dev_pm_qos_add_request(struct device *dev,
288 				    struct dev_pm_qos_request *req,
289 				    enum dev_pm_qos_req_type type, s32 value)
290 {
291 	int ret = 0;
292 
293 	if (!dev || dev_pm_qos_invalid_request(dev, req))
294 		return -EINVAL;
295 
296 	if (WARN(dev_pm_qos_request_active(req),
297 		 "%s() called for already added request\n", __func__))
298 		return -EINVAL;
299 
300 	if (IS_ERR(dev->power.qos))
301 		ret = -ENODEV;
302 	else if (!dev->power.qos)
303 		ret = dev_pm_qos_constraints_allocate(dev);
304 
305 	trace_dev_pm_qos_add_request(dev_name(dev), type, value);
306 	if (!ret) {
307 		req->dev = dev;
308 		req->type = type;
309 		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
310 	}
311 	return ret;
312 }
313 
314 /**
315  * dev_pm_qos_add_request - inserts new qos request into the list
316  * @dev: target device for the constraint
317  * @req: pointer to a preallocated handle
318  * @type: type of the request
319  * @value: defines the qos request
320  *
321  * This function inserts a new entry in the device constraints list of
322  * requested qos performance characteristics. It recomputes the aggregate
323  * QoS expectations of parameters and initializes the dev_pm_qos_request
324  * handle.  Caller needs to save this handle for later use in updates and
325  * removal.
326  *
327  * Returns 1 if the aggregated constraint value has changed,
328  * 0 if the aggregated constraint value has not changed,
329  * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
330  * to allocate for data structures, -ENODEV if the device has just been removed
331  * from the system.
332  *
333  * Callers should ensure that the target device is not RPM_SUSPENDED before
334  * using this function for requests of type DEV_PM_QOS_FLAGS.
335  */
336 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
337 			   enum dev_pm_qos_req_type type, s32 value)
338 {
339 	int ret;
340 
341 	mutex_lock(&dev_pm_qos_mtx);
342 	ret = __dev_pm_qos_add_request(dev, req, type, value);
343 	mutex_unlock(&dev_pm_qos_mtx);
344 	return ret;
345 }
346 EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
347 
348 /**
349  * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
350  * @req : PM QoS request to modify.
351  * @new_value: New value to request.
352  */
353 static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
354 				       s32 new_value)
355 {
356 	s32 curr_value;
357 	int ret = 0;
358 
359 	if (!req) /*guard against callers passing in null */
360 		return -EINVAL;
361 
362 	if (WARN(!dev_pm_qos_request_active(req),
363 		 "%s() called for unknown object\n", __func__))
364 		return -EINVAL;
365 
366 	if (IS_ERR_OR_NULL(req->dev->power.qos))
367 		return -ENODEV;
368 
369 	switch(req->type) {
370 	case DEV_PM_QOS_RESUME_LATENCY:
371 	case DEV_PM_QOS_LATENCY_TOLERANCE:
372 		curr_value = req->data.pnode.prio;
373 		break;
374 	case DEV_PM_QOS_FLAGS:
375 		curr_value = req->data.flr.flags;
376 		break;
377 	default:
378 		return -EINVAL;
379 	}
380 
381 	trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
382 					new_value);
383 	if (curr_value != new_value)
384 		ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
385 
386 	return ret;
387 }
388 
389 /**
390  * dev_pm_qos_update_request - modifies an existing qos request
391  * @req : handle to list element holding a dev_pm_qos request to use
392  * @new_value: defines the qos request
393  *
394  * Updates an existing dev PM qos request along with updating the
395  * target value.
396  *
397  * Attempts are made to make this code callable on hot code paths.
398  *
399  * Returns 1 if the aggregated constraint value has changed,
400  * 0 if the aggregated constraint value has not changed,
401  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
402  * removed from the system
403  *
404  * Callers should ensure that the target device is not RPM_SUSPENDED before
405  * using this function for requests of type DEV_PM_QOS_FLAGS.
406  */
407 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
408 {
409 	int ret;
410 
411 	mutex_lock(&dev_pm_qos_mtx);
412 	ret = __dev_pm_qos_update_request(req, new_value);
413 	mutex_unlock(&dev_pm_qos_mtx);
414 	return ret;
415 }
416 EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
417 
418 static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
419 {
420 	int ret;
421 
422 	if (!req) /*guard against callers passing in null */
423 		return -EINVAL;
424 
425 	if (WARN(!dev_pm_qos_request_active(req),
426 		 "%s() called for unknown object\n", __func__))
427 		return -EINVAL;
428 
429 	if (IS_ERR_OR_NULL(req->dev->power.qos))
430 		return -ENODEV;
431 
432 	trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
433 					PM_QOS_DEFAULT_VALUE);
434 	ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
435 	memset(req, 0, sizeof(*req));
436 	return ret;
437 }
438 
439 /**
440  * dev_pm_qos_remove_request - modifies an existing qos request
441  * @req: handle to request list element
442  *
443  * Will remove pm qos request from the list of constraints and
444  * recompute the current target value. Call this on slow code paths.
445  *
446  * Returns 1 if the aggregated constraint value has changed,
447  * 0 if the aggregated constraint value has not changed,
448  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
449  * removed from the system
450  *
451  * Callers should ensure that the target device is not RPM_SUSPENDED before
452  * using this function for requests of type DEV_PM_QOS_FLAGS.
453  */
454 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
455 {
456 	int ret;
457 
458 	mutex_lock(&dev_pm_qos_mtx);
459 	ret = __dev_pm_qos_remove_request(req);
460 	mutex_unlock(&dev_pm_qos_mtx);
461 	return ret;
462 }
463 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
464 
465 /**
466  * dev_pm_qos_add_notifier - sets notification entry for changes to target value
467  * of per-device PM QoS constraints
468  *
469  * @dev: target device for the constraint
470  * @notifier: notifier block managed by caller.
471  *
472  * Will register the notifier into a notification chain that gets called
473  * upon changes to the target value for the device.
474  *
475  * If the device's constraints object doesn't exist when this routine is called,
476  * it will be created (or error code will be returned if that fails).
477  */
478 int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
479 {
480 	int ret = 0;
481 
482 	mutex_lock(&dev_pm_qos_mtx);
483 
484 	if (IS_ERR(dev->power.qos))
485 		ret = -ENODEV;
486 	else if (!dev->power.qos)
487 		ret = dev_pm_qos_constraints_allocate(dev);
488 
489 	if (!ret)
490 		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
491 						       notifier);
492 
493 	mutex_unlock(&dev_pm_qos_mtx);
494 	return ret;
495 }
496 EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
497 
498 /**
499  * dev_pm_qos_remove_notifier - deletes notification for changes to target value
500  * of per-device PM QoS constraints
501  *
502  * @dev: target device for the constraint
503  * @notifier: notifier block to be removed.
504  *
505  * Will remove the notifier from the notification chain that gets called
506  * upon changes to the target value.
507  */
508 int dev_pm_qos_remove_notifier(struct device *dev,
509 			       struct notifier_block *notifier)
510 {
511 	int retval = 0;
512 
513 	mutex_lock(&dev_pm_qos_mtx);
514 
515 	/* Silently return if the constraints object is not present. */
516 	if (!IS_ERR_OR_NULL(dev->power.qos))
517 		retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
518 							    notifier);
519 
520 	mutex_unlock(&dev_pm_qos_mtx);
521 	return retval;
522 }
523 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
524 
525 /**
526  * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
527  * @dev: Device whose ancestor to add the request for.
528  * @req: Pointer to the preallocated handle.
529  * @type: Type of the request.
530  * @value: Constraint latency value.
531  */
532 int dev_pm_qos_add_ancestor_request(struct device *dev,
533 				    struct dev_pm_qos_request *req,
534 				    enum dev_pm_qos_req_type type, s32 value)
535 {
536 	struct device *ancestor = dev->parent;
537 	int ret = -ENODEV;
538 
539 	switch (type) {
540 	case DEV_PM_QOS_RESUME_LATENCY:
541 		while (ancestor && !ancestor->power.ignore_children)
542 			ancestor = ancestor->parent;
543 
544 		break;
545 	case DEV_PM_QOS_LATENCY_TOLERANCE:
546 		while (ancestor && !ancestor->power.set_latency_tolerance)
547 			ancestor = ancestor->parent;
548 
549 		break;
550 	default:
551 		ancestor = NULL;
552 	}
553 	if (ancestor)
554 		ret = dev_pm_qos_add_request(ancestor, req, type, value);
555 
556 	if (ret < 0)
557 		req->dev = NULL;
558 
559 	return ret;
560 }
561 EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
562 
563 static void __dev_pm_qos_drop_user_request(struct device *dev,
564 					   enum dev_pm_qos_req_type type)
565 {
566 	struct dev_pm_qos_request *req = NULL;
567 
568 	switch(type) {
569 	case DEV_PM_QOS_RESUME_LATENCY:
570 		req = dev->power.qos->resume_latency_req;
571 		dev->power.qos->resume_latency_req = NULL;
572 		break;
573 	case DEV_PM_QOS_LATENCY_TOLERANCE:
574 		req = dev->power.qos->latency_tolerance_req;
575 		dev->power.qos->latency_tolerance_req = NULL;
576 		break;
577 	case DEV_PM_QOS_FLAGS:
578 		req = dev->power.qos->flags_req;
579 		dev->power.qos->flags_req = NULL;
580 		break;
581 	}
582 	__dev_pm_qos_remove_request(req);
583 	kfree(req);
584 }
585 
586 static void dev_pm_qos_drop_user_request(struct device *dev,
587 					 enum dev_pm_qos_req_type type)
588 {
589 	mutex_lock(&dev_pm_qos_mtx);
590 	__dev_pm_qos_drop_user_request(dev, type);
591 	mutex_unlock(&dev_pm_qos_mtx);
592 }
593 
594 /**
595  * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
596  * @dev: Device whose PM QoS latency limit is to be exposed to user space.
597  * @value: Initial value of the latency limit.
598  */
599 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
600 {
601 	struct dev_pm_qos_request *req;
602 	int ret;
603 
604 	if (!device_is_registered(dev) || value < 0)
605 		return -EINVAL;
606 
607 	req = kzalloc(sizeof(*req), GFP_KERNEL);
608 	if (!req)
609 		return -ENOMEM;
610 
611 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
612 	if (ret < 0) {
613 		kfree(req);
614 		return ret;
615 	}
616 
617 	mutex_lock(&dev_pm_qos_sysfs_mtx);
618 
619 	mutex_lock(&dev_pm_qos_mtx);
620 
621 	if (IS_ERR_OR_NULL(dev->power.qos))
622 		ret = -ENODEV;
623 	else if (dev->power.qos->resume_latency_req)
624 		ret = -EEXIST;
625 
626 	if (ret < 0) {
627 		__dev_pm_qos_remove_request(req);
628 		kfree(req);
629 		mutex_unlock(&dev_pm_qos_mtx);
630 		goto out;
631 	}
632 	dev->power.qos->resume_latency_req = req;
633 
634 	mutex_unlock(&dev_pm_qos_mtx);
635 
636 	ret = pm_qos_sysfs_add_resume_latency(dev);
637 	if (ret)
638 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
639 
640  out:
641 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
642 	return ret;
643 }
644 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
645 
646 static void __dev_pm_qos_hide_latency_limit(struct device *dev)
647 {
648 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
649 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
650 }
651 
652 /**
653  * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
654  * @dev: Device whose PM QoS latency limit is to be hidden from user space.
655  */
656 void dev_pm_qos_hide_latency_limit(struct device *dev)
657 {
658 	mutex_lock(&dev_pm_qos_sysfs_mtx);
659 
660 	pm_qos_sysfs_remove_resume_latency(dev);
661 
662 	mutex_lock(&dev_pm_qos_mtx);
663 	__dev_pm_qos_hide_latency_limit(dev);
664 	mutex_unlock(&dev_pm_qos_mtx);
665 
666 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
667 }
668 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
669 
670 /**
671  * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
672  * @dev: Device whose PM QoS flags are to be exposed to user space.
673  * @val: Initial values of the flags.
674  */
675 int dev_pm_qos_expose_flags(struct device *dev, s32 val)
676 {
677 	struct dev_pm_qos_request *req;
678 	int ret;
679 
680 	if (!device_is_registered(dev))
681 		return -EINVAL;
682 
683 	req = kzalloc(sizeof(*req), GFP_KERNEL);
684 	if (!req)
685 		return -ENOMEM;
686 
687 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
688 	if (ret < 0) {
689 		kfree(req);
690 		return ret;
691 	}
692 
693 	pm_runtime_get_sync(dev);
694 	mutex_lock(&dev_pm_qos_sysfs_mtx);
695 
696 	mutex_lock(&dev_pm_qos_mtx);
697 
698 	if (IS_ERR_OR_NULL(dev->power.qos))
699 		ret = -ENODEV;
700 	else if (dev->power.qos->flags_req)
701 		ret = -EEXIST;
702 
703 	if (ret < 0) {
704 		__dev_pm_qos_remove_request(req);
705 		kfree(req);
706 		mutex_unlock(&dev_pm_qos_mtx);
707 		goto out;
708 	}
709 	dev->power.qos->flags_req = req;
710 
711 	mutex_unlock(&dev_pm_qos_mtx);
712 
713 	ret = pm_qos_sysfs_add_flags(dev);
714 	if (ret)
715 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
716 
717  out:
718 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
719 	pm_runtime_put(dev);
720 	return ret;
721 }
722 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
723 
724 static void __dev_pm_qos_hide_flags(struct device *dev)
725 {
726 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
727 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
728 }
729 
730 /**
731  * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
732  * @dev: Device whose PM QoS flags are to be hidden from user space.
733  */
734 void dev_pm_qos_hide_flags(struct device *dev)
735 {
736 	pm_runtime_get_sync(dev);
737 	mutex_lock(&dev_pm_qos_sysfs_mtx);
738 
739 	pm_qos_sysfs_remove_flags(dev);
740 
741 	mutex_lock(&dev_pm_qos_mtx);
742 	__dev_pm_qos_hide_flags(dev);
743 	mutex_unlock(&dev_pm_qos_mtx);
744 
745 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
746 	pm_runtime_put(dev);
747 }
748 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
749 
750 /**
751  * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
752  * @dev: Device to update the PM QoS flags request for.
753  * @mask: Flags to set/clear.
754  * @set: Whether to set or clear the flags (true means set).
755  */
756 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
757 {
758 	s32 value;
759 	int ret;
760 
761 	pm_runtime_get_sync(dev);
762 	mutex_lock(&dev_pm_qos_mtx);
763 
764 	if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
765 		ret = -EINVAL;
766 		goto out;
767 	}
768 
769 	value = dev_pm_qos_requested_flags(dev);
770 	if (set)
771 		value |= mask;
772 	else
773 		value &= ~mask;
774 
775 	ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
776 
777  out:
778 	mutex_unlock(&dev_pm_qos_mtx);
779 	pm_runtime_put(dev);
780 	return ret;
781 }
782 
783 /**
784  * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
785  * @dev: Device to obtain the user space latency tolerance for.
786  */
787 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
788 {
789 	s32 ret;
790 
791 	mutex_lock(&dev_pm_qos_mtx);
792 	ret = IS_ERR_OR_NULL(dev->power.qos)
793 		|| !dev->power.qos->latency_tolerance_req ?
794 			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
795 			dev->power.qos->latency_tolerance_req->data.pnode.prio;
796 	mutex_unlock(&dev_pm_qos_mtx);
797 	return ret;
798 }
799 
800 /**
801  * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
802  * @dev: Device to update the user space latency tolerance for.
803  * @val: New user space latency tolerance for @dev (negative values disable).
804  */
805 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
806 {
807 	int ret;
808 
809 	mutex_lock(&dev_pm_qos_mtx);
810 
811 	if (IS_ERR_OR_NULL(dev->power.qos)
812 	    || !dev->power.qos->latency_tolerance_req) {
813 		struct dev_pm_qos_request *req;
814 
815 		if (val < 0) {
816 			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
817 				ret = 0;
818 			else
819 				ret = -EINVAL;
820 			goto out;
821 		}
822 		req = kzalloc(sizeof(*req), GFP_KERNEL);
823 		if (!req) {
824 			ret = -ENOMEM;
825 			goto out;
826 		}
827 		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
828 		if (ret < 0) {
829 			kfree(req);
830 			goto out;
831 		}
832 		dev->power.qos->latency_tolerance_req = req;
833 	} else {
834 		if (val < 0) {
835 			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
836 			ret = 0;
837 		} else {
838 			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
839 		}
840 	}
841 
842  out:
843 	mutex_unlock(&dev_pm_qos_mtx);
844 	return ret;
845 }
846 EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
847 
848 /**
849  * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
850  * @dev: Device whose latency tolerance to expose
851  */
852 int dev_pm_qos_expose_latency_tolerance(struct device *dev)
853 {
854 	int ret;
855 
856 	if (!dev->power.set_latency_tolerance)
857 		return -EINVAL;
858 
859 	mutex_lock(&dev_pm_qos_sysfs_mtx);
860 	ret = pm_qos_sysfs_add_latency_tolerance(dev);
861 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
862 
863 	return ret;
864 }
865 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
866 
867 /**
868  * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
869  * @dev: Device whose latency tolerance to hide
870  */
871 void dev_pm_qos_hide_latency_tolerance(struct device *dev)
872 {
873 	mutex_lock(&dev_pm_qos_sysfs_mtx);
874 	pm_qos_sysfs_remove_latency_tolerance(dev);
875 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
876 
877 	/* Remove the request from user space now */
878 	pm_runtime_get_sync(dev);
879 	dev_pm_qos_update_user_latency_tolerance(dev,
880 		PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
881 	pm_runtime_put(dev);
882 }
883 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
884