xref: /linux/drivers/base/power/qos.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Devices PM QoS constraints management
4  *
5  * Copyright (C) 2011 Texas Instruments, Inc.
6  *
7  * This module exposes the interface to kernel space for specifying
8  * per-device PM QoS dependencies. It provides infrastructure for registration
9  * of:
10  *
11  * Dependents on a QoS value : register requests
12  * Watchers of QoS value : get notified when target QoS value changes
13  *
14  * This QoS design is best effort based. Dependents register their QoS needs.
15  * Watchers register to keep track of the current QoS needs of the system.
16  * Watchers can register a per-device notification callback using the
17  * dev_pm_qos_*_notifier API. The notification chain data is stored in the
18  * per-device constraint data struct.
19  *
20  * Note about the per-device constraint data struct allocation:
21  * . The per-device constraints data struct ptr is stored into the device
22  *    dev_pm_info.
23  * . To minimize the data usage by the per-device constraints, the data struct
24  *   is only allocated at the first call to dev_pm_qos_add_request.
25  * . The data is later free'd when the device is removed from the system.
26  *  . A global mutex protects the constraints users from the data being
27  *     allocated and free'd.
28  */
29 
30 #include <linux/pm_qos.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/device.h>
34 #include <linux/mutex.h>
35 #include <linux/export.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/err.h>
38 #include <trace/events/power.h>
39 
40 #include "power.h"
41 
42 static DEFINE_MUTEX(dev_pm_qos_mtx);
43 static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
44 
45 /**
46  * __dev_pm_qos_flags - Check PM QoS flags for a given device.
47  * @dev: Device to check the PM QoS flags for.
48  * @mask: Flags to check against.
49  *
50  * This routine must be called with dev->power.lock held.
51  */
__dev_pm_qos_flags(struct device * dev,s32 mask)52 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
53 {
54 	struct dev_pm_qos *qos = dev->power.qos;
55 	struct pm_qos_flags *pqf;
56 	s32 val;
57 
58 	lockdep_assert_held(&dev->power.lock);
59 
60 	if (IS_ERR_OR_NULL(qos))
61 		return PM_QOS_FLAGS_UNDEFINED;
62 
63 	pqf = &qos->flags;
64 	if (list_empty(&pqf->list))
65 		return PM_QOS_FLAGS_UNDEFINED;
66 
67 	val = pqf->effective_flags & mask;
68 	if (val)
69 		return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
70 
71 	return PM_QOS_FLAGS_NONE;
72 }
73 
74 /**
75  * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
76  * @dev: Device to check the PM QoS flags for.
77  * @mask: Flags to check against.
78  */
dev_pm_qos_flags(struct device * dev,s32 mask)79 enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
80 {
81 	unsigned long irqflags;
82 	enum pm_qos_flags_status ret;
83 
84 	spin_lock_irqsave(&dev->power.lock, irqflags);
85 	ret = __dev_pm_qos_flags(dev, mask);
86 	spin_unlock_irqrestore(&dev->power.lock, irqflags);
87 
88 	return ret;
89 }
90 EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
91 
92 /**
93  * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
94  * @dev: Device to get the PM QoS constraint value for.
95  *
96  * This routine must be called with dev->power.lock held.
97  */
__dev_pm_qos_resume_latency(struct device * dev)98 s32 __dev_pm_qos_resume_latency(struct device *dev)
99 {
100 	lockdep_assert_held(&dev->power.lock);
101 
102 	return dev_pm_qos_raw_resume_latency(dev);
103 }
104 
105 /**
106  * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
107  * @dev: Device to get the PM QoS constraint value for.
108  * @type: QoS request type.
109  */
dev_pm_qos_read_value(struct device * dev,enum dev_pm_qos_req_type type)110 s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
111 {
112 	struct dev_pm_qos *qos = dev->power.qos;
113 	unsigned long flags;
114 	s32 ret;
115 
116 	spin_lock_irqsave(&dev->power.lock, flags);
117 
118 	switch (type) {
119 	case DEV_PM_QOS_RESUME_LATENCY:
120 		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
121 			: pm_qos_read_value(&qos->resume_latency);
122 		break;
123 	case DEV_PM_QOS_MIN_FREQUENCY:
124 		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
125 			: freq_qos_read_value(&qos->freq, FREQ_QOS_MIN);
126 		break;
127 	case DEV_PM_QOS_MAX_FREQUENCY:
128 		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
129 			: freq_qos_read_value(&qos->freq, FREQ_QOS_MAX);
130 		break;
131 	default:
132 		WARN_ON(1);
133 		ret = 0;
134 	}
135 
136 	spin_unlock_irqrestore(&dev->power.lock, flags);
137 
138 	return ret;
139 }
140 
141 /**
142  * apply_constraint - Add/modify/remove device PM QoS request.
143  * @req: Constraint request to apply
144  * @action: Action to perform (add/update/remove).
145  * @value: Value to assign to the QoS request.
146  *
147  * Internal function to update the constraints list using the PM QoS core
148  * code and if needed call the per-device callbacks.
149  */
apply_constraint(struct dev_pm_qos_request * req,enum pm_qos_req_action action,s32 value)150 static int apply_constraint(struct dev_pm_qos_request *req,
151 			    enum pm_qos_req_action action, s32 value)
152 {
153 	struct dev_pm_qos *qos = req->dev->power.qos;
154 	int ret;
155 
156 	switch(req->type) {
157 	case DEV_PM_QOS_RESUME_LATENCY:
158 		if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
159 			value = 0;
160 
161 		ret = pm_qos_update_target(&qos->resume_latency,
162 					   &req->data.pnode, action, value);
163 		break;
164 	case DEV_PM_QOS_LATENCY_TOLERANCE:
165 		ret = pm_qos_update_target(&qos->latency_tolerance,
166 					   &req->data.pnode, action, value);
167 		if (ret) {
168 			value = pm_qos_read_value(&qos->latency_tolerance);
169 			req->dev->power.set_latency_tolerance(req->dev, value);
170 		}
171 		break;
172 	case DEV_PM_QOS_MIN_FREQUENCY:
173 	case DEV_PM_QOS_MAX_FREQUENCY:
174 		ret = freq_qos_apply(&req->data.freq, action, value);
175 		break;
176 	case DEV_PM_QOS_FLAGS:
177 		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
178 					  action, value);
179 		break;
180 	default:
181 		ret = -EINVAL;
182 	}
183 
184 	return ret;
185 }
186 
187 /*
188  * dev_pm_qos_constraints_allocate
189  * @dev: device to allocate data for
190  *
191  * Called at the first call to add_request, for constraint data allocation
192  * Must be called with the dev_pm_qos_mtx mutex held
193  */
dev_pm_qos_constraints_allocate(struct device * dev)194 static int dev_pm_qos_constraints_allocate(struct device *dev)
195 {
196 	struct dev_pm_qos *qos;
197 	struct pm_qos_constraints *c;
198 	struct blocking_notifier_head *n;
199 
200 	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
201 	if (!qos)
202 		return -ENOMEM;
203 
204 	n = kcalloc(3, sizeof(*n), GFP_KERNEL);
205 	if (!n) {
206 		kfree(qos);
207 		return -ENOMEM;
208 	}
209 
210 	c = &qos->resume_latency;
211 	plist_head_init(&c->list);
212 	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
213 	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
214 	c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
215 	c->type = PM_QOS_MIN;
216 	c->notifiers = n;
217 	BLOCKING_INIT_NOTIFIER_HEAD(n);
218 
219 	c = &qos->latency_tolerance;
220 	plist_head_init(&c->list);
221 	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
222 	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
223 	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
224 	c->type = PM_QOS_MIN;
225 
226 	freq_constraints_init(&qos->freq);
227 
228 	INIT_LIST_HEAD(&qos->flags.list);
229 
230 	spin_lock_irq(&dev->power.lock);
231 	dev->power.qos = qos;
232 	spin_unlock_irq(&dev->power.lock);
233 
234 	return 0;
235 }
236 
237 static void __dev_pm_qos_hide_latency_limit(struct device *dev);
238 static void __dev_pm_qos_hide_flags(struct device *dev);
239 
240 /**
241  * dev_pm_qos_constraints_destroy
242  * @dev: target device
243  *
244  * Called from the device PM subsystem on device removal under device_pm_lock().
245  */
dev_pm_qos_constraints_destroy(struct device * dev)246 void dev_pm_qos_constraints_destroy(struct device *dev)
247 {
248 	struct dev_pm_qos *qos;
249 	struct dev_pm_qos_request *req, *tmp;
250 	struct pm_qos_constraints *c;
251 	struct pm_qos_flags *f;
252 
253 	mutex_lock(&dev_pm_qos_sysfs_mtx);
254 
255 	/*
256 	 * If the device's PM QoS resume latency limit or PM QoS flags have been
257 	 * exposed to user space, they have to be hidden at this point.
258 	 */
259 	pm_qos_sysfs_remove_resume_latency(dev);
260 	pm_qos_sysfs_remove_flags(dev);
261 
262 	mutex_lock(&dev_pm_qos_mtx);
263 
264 	__dev_pm_qos_hide_latency_limit(dev);
265 	__dev_pm_qos_hide_flags(dev);
266 
267 	qos = dev->power.qos;
268 	if (!qos)
269 		goto out;
270 
271 	/* Flush the constraints lists for the device. */
272 	c = &qos->resume_latency;
273 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
274 		/*
275 		 * Update constraints list and call the notification
276 		 * callbacks if needed
277 		 */
278 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
279 		memset(req, 0, sizeof(*req));
280 	}
281 
282 	c = &qos->latency_tolerance;
283 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
284 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
285 		memset(req, 0, sizeof(*req));
286 	}
287 
288 	c = &qos->freq.min_freq;
289 	plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
290 		apply_constraint(req, PM_QOS_REMOVE_REQ,
291 				 PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
292 		memset(req, 0, sizeof(*req));
293 	}
294 
295 	c = &qos->freq.max_freq;
296 	plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
297 		apply_constraint(req, PM_QOS_REMOVE_REQ,
298 				 PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
299 		memset(req, 0, sizeof(*req));
300 	}
301 
302 	f = &qos->flags;
303 	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
304 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
305 		memset(req, 0, sizeof(*req));
306 	}
307 
308 	spin_lock_irq(&dev->power.lock);
309 	dev->power.qos = ERR_PTR(-ENODEV);
310 	spin_unlock_irq(&dev->power.lock);
311 
312 	kfree(qos->resume_latency.notifiers);
313 	kfree(qos);
314 
315  out:
316 	mutex_unlock(&dev_pm_qos_mtx);
317 
318 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
319 }
320 
dev_pm_qos_invalid_req_type(struct device * dev,enum dev_pm_qos_req_type type)321 static bool dev_pm_qos_invalid_req_type(struct device *dev,
322 					enum dev_pm_qos_req_type type)
323 {
324 	return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
325 	       !dev->power.set_latency_tolerance;
326 }
327 
__dev_pm_qos_add_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)328 static int __dev_pm_qos_add_request(struct device *dev,
329 				    struct dev_pm_qos_request *req,
330 				    enum dev_pm_qos_req_type type, s32 value)
331 {
332 	int ret = 0;
333 
334 	if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
335 		return -EINVAL;
336 
337 	if (WARN(dev_pm_qos_request_active(req),
338 		 "%s() called for already added request\n", __func__))
339 		return -EINVAL;
340 
341 	if (IS_ERR(dev->power.qos))
342 		ret = -ENODEV;
343 	else if (!dev->power.qos)
344 		ret = dev_pm_qos_constraints_allocate(dev);
345 
346 	trace_dev_pm_qos_add_request(dev_name(dev), type, value);
347 	if (ret)
348 		return ret;
349 
350 	req->dev = dev;
351 	req->type = type;
352 	if (req->type == DEV_PM_QOS_MIN_FREQUENCY)
353 		ret = freq_qos_add_request(&dev->power.qos->freq,
354 					   &req->data.freq,
355 					   FREQ_QOS_MIN, value);
356 	else if (req->type == DEV_PM_QOS_MAX_FREQUENCY)
357 		ret = freq_qos_add_request(&dev->power.qos->freq,
358 					   &req->data.freq,
359 					   FREQ_QOS_MAX, value);
360 	else
361 		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
362 
363 	return ret;
364 }
365 
366 /**
367  * dev_pm_qos_add_request - inserts new qos request into the list
368  * @dev: target device for the constraint
369  * @req: pointer to a preallocated handle
370  * @type: type of the request
371  * @value: defines the qos request
372  *
373  * This function inserts a new entry in the device constraints list of
374  * requested qos performance characteristics. It recomputes the aggregate
375  * QoS expectations of parameters and initializes the dev_pm_qos_request
376  * handle.  Caller needs to save this handle for later use in updates and
377  * removal.
378  *
379  * Returns 1 if the aggregated constraint value has changed,
380  * 0 if the aggregated constraint value has not changed,
381  * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
382  * to allocate for data structures, -ENODEV if the device has just been removed
383  * from the system.
384  *
385  * Callers should ensure that the target device is not RPM_SUSPENDED before
386  * using this function for requests of type DEV_PM_QOS_FLAGS.
387  */
dev_pm_qos_add_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)388 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
389 			   enum dev_pm_qos_req_type type, s32 value)
390 {
391 	int ret;
392 
393 	mutex_lock(&dev_pm_qos_mtx);
394 	ret = __dev_pm_qos_add_request(dev, req, type, value);
395 	mutex_unlock(&dev_pm_qos_mtx);
396 	return ret;
397 }
398 EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
399 
400 /**
401  * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
402  * @req : PM QoS request to modify.
403  * @new_value: New value to request.
404  */
__dev_pm_qos_update_request(struct dev_pm_qos_request * req,s32 new_value)405 static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
406 				       s32 new_value)
407 {
408 	s32 curr_value;
409 	int ret = 0;
410 
411 	if (!req) /*guard against callers passing in null */
412 		return -EINVAL;
413 
414 	if (WARN(!dev_pm_qos_request_active(req),
415 		 "%s() called for unknown object\n", __func__))
416 		return -EINVAL;
417 
418 	if (IS_ERR_OR_NULL(req->dev->power.qos))
419 		return -ENODEV;
420 
421 	switch(req->type) {
422 	case DEV_PM_QOS_RESUME_LATENCY:
423 	case DEV_PM_QOS_LATENCY_TOLERANCE:
424 		curr_value = req->data.pnode.prio;
425 		break;
426 	case DEV_PM_QOS_MIN_FREQUENCY:
427 	case DEV_PM_QOS_MAX_FREQUENCY:
428 		curr_value = req->data.freq.pnode.prio;
429 		break;
430 	case DEV_PM_QOS_FLAGS:
431 		curr_value = req->data.flr.flags;
432 		break;
433 	default:
434 		return -EINVAL;
435 	}
436 
437 	trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
438 					new_value);
439 	if (curr_value != new_value)
440 		ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
441 
442 	return ret;
443 }
444 
445 /**
446  * dev_pm_qos_update_request - modifies an existing qos request
447  * @req : handle to list element holding a dev_pm_qos request to use
448  * @new_value: defines the qos request
449  *
450  * Updates an existing dev PM qos request along with updating the
451  * target value.
452  *
453  * Attempts are made to make this code callable on hot code paths.
454  *
455  * Returns 1 if the aggregated constraint value has changed,
456  * 0 if the aggregated constraint value has not changed,
457  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
458  * removed from the system
459  *
460  * Callers should ensure that the target device is not RPM_SUSPENDED before
461  * using this function for requests of type DEV_PM_QOS_FLAGS.
462  */
dev_pm_qos_update_request(struct dev_pm_qos_request * req,s32 new_value)463 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
464 {
465 	int ret;
466 
467 	mutex_lock(&dev_pm_qos_mtx);
468 	ret = __dev_pm_qos_update_request(req, new_value);
469 	mutex_unlock(&dev_pm_qos_mtx);
470 	return ret;
471 }
472 EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
473 
__dev_pm_qos_remove_request(struct dev_pm_qos_request * req)474 static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
475 {
476 	int ret;
477 
478 	if (!req) /*guard against callers passing in null */
479 		return -EINVAL;
480 
481 	if (WARN(!dev_pm_qos_request_active(req),
482 		 "%s() called for unknown object\n", __func__))
483 		return -EINVAL;
484 
485 	if (IS_ERR_OR_NULL(req->dev->power.qos))
486 		return -ENODEV;
487 
488 	trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
489 					PM_QOS_DEFAULT_VALUE);
490 	ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
491 	memset(req, 0, sizeof(*req));
492 	return ret;
493 }
494 
495 /**
496  * dev_pm_qos_remove_request - modifies an existing qos request
497  * @req: handle to request list element
498  *
499  * Will remove pm qos request from the list of constraints and
500  * recompute the current target value. Call this on slow code paths.
501  *
502  * Returns 1 if the aggregated constraint value has changed,
503  * 0 if the aggregated constraint value has not changed,
504  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
505  * removed from the system
506  *
507  * Callers should ensure that the target device is not RPM_SUSPENDED before
508  * using this function for requests of type DEV_PM_QOS_FLAGS.
509  */
dev_pm_qos_remove_request(struct dev_pm_qos_request * req)510 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
511 {
512 	int ret;
513 
514 	mutex_lock(&dev_pm_qos_mtx);
515 	ret = __dev_pm_qos_remove_request(req);
516 	mutex_unlock(&dev_pm_qos_mtx);
517 	return ret;
518 }
519 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
520 
521 /**
522  * dev_pm_qos_add_notifier - sets notification entry for changes to target value
523  * of per-device PM QoS constraints
524  *
525  * @dev: target device for the constraint
526  * @notifier: notifier block managed by caller.
527  * @type: request type.
528  *
529  * Will register the notifier into a notification chain that gets called
530  * upon changes to the target value for the device.
531  *
532  * If the device's constraints object doesn't exist when this routine is called,
533  * it will be created (or error code will be returned if that fails).
534  */
dev_pm_qos_add_notifier(struct device * dev,struct notifier_block * notifier,enum dev_pm_qos_req_type type)535 int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
536 			    enum dev_pm_qos_req_type type)
537 {
538 	int ret = 0;
539 
540 	mutex_lock(&dev_pm_qos_mtx);
541 
542 	if (IS_ERR(dev->power.qos))
543 		ret = -ENODEV;
544 	else if (!dev->power.qos)
545 		ret = dev_pm_qos_constraints_allocate(dev);
546 
547 	if (ret)
548 		goto unlock;
549 
550 	switch (type) {
551 	case DEV_PM_QOS_RESUME_LATENCY:
552 		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
553 						       notifier);
554 		break;
555 	case DEV_PM_QOS_MIN_FREQUENCY:
556 		ret = freq_qos_add_notifier(&dev->power.qos->freq,
557 					    FREQ_QOS_MIN, notifier);
558 		break;
559 	case DEV_PM_QOS_MAX_FREQUENCY:
560 		ret = freq_qos_add_notifier(&dev->power.qos->freq,
561 					    FREQ_QOS_MAX, notifier);
562 		break;
563 	default:
564 		WARN_ON(1);
565 		ret = -EINVAL;
566 	}
567 
568 unlock:
569 	mutex_unlock(&dev_pm_qos_mtx);
570 	return ret;
571 }
572 EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
573 
574 /**
575  * dev_pm_qos_remove_notifier - deletes notification for changes to target value
576  * of per-device PM QoS constraints
577  *
578  * @dev: target device for the constraint
579  * @notifier: notifier block to be removed.
580  * @type: request type.
581  *
582  * Will remove the notifier from the notification chain that gets called
583  * upon changes to the target value.
584  */
dev_pm_qos_remove_notifier(struct device * dev,struct notifier_block * notifier,enum dev_pm_qos_req_type type)585 int dev_pm_qos_remove_notifier(struct device *dev,
586 			       struct notifier_block *notifier,
587 			       enum dev_pm_qos_req_type type)
588 {
589 	int ret = 0;
590 
591 	mutex_lock(&dev_pm_qos_mtx);
592 
593 	/* Silently return if the constraints object is not present. */
594 	if (IS_ERR_OR_NULL(dev->power.qos))
595 		goto unlock;
596 
597 	switch (type) {
598 	case DEV_PM_QOS_RESUME_LATENCY:
599 		ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
600 							 notifier);
601 		break;
602 	case DEV_PM_QOS_MIN_FREQUENCY:
603 		ret = freq_qos_remove_notifier(&dev->power.qos->freq,
604 					       FREQ_QOS_MIN, notifier);
605 		break;
606 	case DEV_PM_QOS_MAX_FREQUENCY:
607 		ret = freq_qos_remove_notifier(&dev->power.qos->freq,
608 					       FREQ_QOS_MAX, notifier);
609 		break;
610 	default:
611 		WARN_ON(1);
612 		ret = -EINVAL;
613 	}
614 
615 unlock:
616 	mutex_unlock(&dev_pm_qos_mtx);
617 	return ret;
618 }
619 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
620 
621 /**
622  * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
623  * @dev: Device whose ancestor to add the request for.
624  * @req: Pointer to the preallocated handle.
625  * @type: Type of the request.
626  * @value: Constraint latency value.
627  */
dev_pm_qos_add_ancestor_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)628 int dev_pm_qos_add_ancestor_request(struct device *dev,
629 				    struct dev_pm_qos_request *req,
630 				    enum dev_pm_qos_req_type type, s32 value)
631 {
632 	struct device *ancestor = dev->parent;
633 	int ret = -ENODEV;
634 
635 	switch (type) {
636 	case DEV_PM_QOS_RESUME_LATENCY:
637 		while (ancestor && !ancestor->power.ignore_children)
638 			ancestor = ancestor->parent;
639 
640 		break;
641 	case DEV_PM_QOS_LATENCY_TOLERANCE:
642 		while (ancestor && !ancestor->power.set_latency_tolerance)
643 			ancestor = ancestor->parent;
644 
645 		break;
646 	default:
647 		ancestor = NULL;
648 	}
649 	if (ancestor)
650 		ret = dev_pm_qos_add_request(ancestor, req, type, value);
651 
652 	if (ret < 0)
653 		req->dev = NULL;
654 
655 	return ret;
656 }
657 EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
658 
__dev_pm_qos_drop_user_request(struct device * dev,enum dev_pm_qos_req_type type)659 static void __dev_pm_qos_drop_user_request(struct device *dev,
660 					   enum dev_pm_qos_req_type type)
661 {
662 	struct dev_pm_qos_request *req = NULL;
663 
664 	switch(type) {
665 	case DEV_PM_QOS_RESUME_LATENCY:
666 		req = dev->power.qos->resume_latency_req;
667 		dev->power.qos->resume_latency_req = NULL;
668 		break;
669 	case DEV_PM_QOS_LATENCY_TOLERANCE:
670 		req = dev->power.qos->latency_tolerance_req;
671 		dev->power.qos->latency_tolerance_req = NULL;
672 		break;
673 	case DEV_PM_QOS_FLAGS:
674 		req = dev->power.qos->flags_req;
675 		dev->power.qos->flags_req = NULL;
676 		break;
677 	default:
678 		WARN_ON(1);
679 		return;
680 	}
681 	__dev_pm_qos_remove_request(req);
682 	kfree(req);
683 }
684 
dev_pm_qos_drop_user_request(struct device * dev,enum dev_pm_qos_req_type type)685 static void dev_pm_qos_drop_user_request(struct device *dev,
686 					 enum dev_pm_qos_req_type type)
687 {
688 	mutex_lock(&dev_pm_qos_mtx);
689 	__dev_pm_qos_drop_user_request(dev, type);
690 	mutex_unlock(&dev_pm_qos_mtx);
691 }
692 
693 /**
694  * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
695  * @dev: Device whose PM QoS latency limit is to be exposed to user space.
696  * @value: Initial value of the latency limit.
697  */
dev_pm_qos_expose_latency_limit(struct device * dev,s32 value)698 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
699 {
700 	struct dev_pm_qos_request *req;
701 	int ret;
702 
703 	if (!device_is_registered(dev) || value < 0)
704 		return -EINVAL;
705 
706 	req = kzalloc(sizeof(*req), GFP_KERNEL);
707 	if (!req)
708 		return -ENOMEM;
709 
710 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
711 	if (ret < 0) {
712 		kfree(req);
713 		return ret;
714 	}
715 
716 	mutex_lock(&dev_pm_qos_sysfs_mtx);
717 
718 	mutex_lock(&dev_pm_qos_mtx);
719 
720 	if (IS_ERR_OR_NULL(dev->power.qos))
721 		ret = -ENODEV;
722 	else if (dev->power.qos->resume_latency_req)
723 		ret = -EEXIST;
724 
725 	if (ret < 0) {
726 		__dev_pm_qos_remove_request(req);
727 		kfree(req);
728 		mutex_unlock(&dev_pm_qos_mtx);
729 		goto out;
730 	}
731 	dev->power.qos->resume_latency_req = req;
732 
733 	mutex_unlock(&dev_pm_qos_mtx);
734 
735 	ret = pm_qos_sysfs_add_resume_latency(dev);
736 	if (ret)
737 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
738 
739  out:
740 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
741 	return ret;
742 }
743 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
744 
__dev_pm_qos_hide_latency_limit(struct device * dev)745 static void __dev_pm_qos_hide_latency_limit(struct device *dev)
746 {
747 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
748 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
749 }
750 
751 /**
752  * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
753  * @dev: Device whose PM QoS latency limit is to be hidden from user space.
754  */
dev_pm_qos_hide_latency_limit(struct device * dev)755 void dev_pm_qos_hide_latency_limit(struct device *dev)
756 {
757 	mutex_lock(&dev_pm_qos_sysfs_mtx);
758 
759 	pm_qos_sysfs_remove_resume_latency(dev);
760 
761 	mutex_lock(&dev_pm_qos_mtx);
762 	__dev_pm_qos_hide_latency_limit(dev);
763 	mutex_unlock(&dev_pm_qos_mtx);
764 
765 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
766 }
767 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
768 
769 /**
770  * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
771  * @dev: Device whose PM QoS flags are to be exposed to user space.
772  * @val: Initial values of the flags.
773  */
dev_pm_qos_expose_flags(struct device * dev,s32 val)774 int dev_pm_qos_expose_flags(struct device *dev, s32 val)
775 {
776 	struct dev_pm_qos_request *req;
777 	int ret;
778 
779 	if (!device_is_registered(dev))
780 		return -EINVAL;
781 
782 	req = kzalloc(sizeof(*req), GFP_KERNEL);
783 	if (!req)
784 		return -ENOMEM;
785 
786 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
787 	if (ret < 0) {
788 		kfree(req);
789 		return ret;
790 	}
791 
792 	pm_runtime_get_sync(dev);
793 	mutex_lock(&dev_pm_qos_sysfs_mtx);
794 
795 	mutex_lock(&dev_pm_qos_mtx);
796 
797 	if (IS_ERR_OR_NULL(dev->power.qos))
798 		ret = -ENODEV;
799 	else if (dev->power.qos->flags_req)
800 		ret = -EEXIST;
801 
802 	if (ret < 0) {
803 		__dev_pm_qos_remove_request(req);
804 		kfree(req);
805 		mutex_unlock(&dev_pm_qos_mtx);
806 		goto out;
807 	}
808 	dev->power.qos->flags_req = req;
809 
810 	mutex_unlock(&dev_pm_qos_mtx);
811 
812 	ret = pm_qos_sysfs_add_flags(dev);
813 	if (ret)
814 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
815 
816  out:
817 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
818 	pm_runtime_put(dev);
819 	return ret;
820 }
821 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
822 
__dev_pm_qos_hide_flags(struct device * dev)823 static void __dev_pm_qos_hide_flags(struct device *dev)
824 {
825 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
826 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
827 }
828 
829 /**
830  * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
831  * @dev: Device whose PM QoS flags are to be hidden from user space.
832  */
dev_pm_qos_hide_flags(struct device * dev)833 void dev_pm_qos_hide_flags(struct device *dev)
834 {
835 	pm_runtime_get_sync(dev);
836 	mutex_lock(&dev_pm_qos_sysfs_mtx);
837 
838 	pm_qos_sysfs_remove_flags(dev);
839 
840 	mutex_lock(&dev_pm_qos_mtx);
841 	__dev_pm_qos_hide_flags(dev);
842 	mutex_unlock(&dev_pm_qos_mtx);
843 
844 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
845 	pm_runtime_put(dev);
846 }
847 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
848 
849 /**
850  * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
851  * @dev: Device to update the PM QoS flags request for.
852  * @mask: Flags to set/clear.
853  * @set: Whether to set or clear the flags (true means set).
854  */
dev_pm_qos_update_flags(struct device * dev,s32 mask,bool set)855 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
856 {
857 	s32 value;
858 	int ret;
859 
860 	pm_runtime_get_sync(dev);
861 	mutex_lock(&dev_pm_qos_mtx);
862 
863 	if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
864 		ret = -EINVAL;
865 		goto out;
866 	}
867 
868 	value = dev_pm_qos_requested_flags(dev);
869 	if (set)
870 		value |= mask;
871 	else
872 		value &= ~mask;
873 
874 	ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
875 
876  out:
877 	mutex_unlock(&dev_pm_qos_mtx);
878 	pm_runtime_put(dev);
879 	return ret;
880 }
881 
882 /**
883  * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
884  * @dev: Device to obtain the user space latency tolerance for.
885  */
dev_pm_qos_get_user_latency_tolerance(struct device * dev)886 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
887 {
888 	s32 ret;
889 
890 	mutex_lock(&dev_pm_qos_mtx);
891 	ret = IS_ERR_OR_NULL(dev->power.qos)
892 		|| !dev->power.qos->latency_tolerance_req ?
893 			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
894 			dev->power.qos->latency_tolerance_req->data.pnode.prio;
895 	mutex_unlock(&dev_pm_qos_mtx);
896 	return ret;
897 }
898 
899 /**
900  * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
901  * @dev: Device to update the user space latency tolerance for.
902  * @val: New user space latency tolerance for @dev (negative values disable).
903  */
dev_pm_qos_update_user_latency_tolerance(struct device * dev,s32 val)904 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
905 {
906 	int ret;
907 
908 	mutex_lock(&dev_pm_qos_mtx);
909 
910 	if (IS_ERR_OR_NULL(dev->power.qos)
911 	    || !dev->power.qos->latency_tolerance_req) {
912 		struct dev_pm_qos_request *req;
913 
914 		if (val < 0) {
915 			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
916 				ret = 0;
917 			else
918 				ret = -EINVAL;
919 			goto out;
920 		}
921 		req = kzalloc(sizeof(*req), GFP_KERNEL);
922 		if (!req) {
923 			ret = -ENOMEM;
924 			goto out;
925 		}
926 		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
927 		if (ret < 0) {
928 			kfree(req);
929 			goto out;
930 		}
931 		dev->power.qos->latency_tolerance_req = req;
932 	} else {
933 		if (val < 0) {
934 			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
935 			ret = 0;
936 		} else {
937 			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
938 		}
939 	}
940 
941  out:
942 	mutex_unlock(&dev_pm_qos_mtx);
943 	return ret;
944 }
945 EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
946 
947 /**
948  * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
949  * @dev: Device whose latency tolerance to expose
950  */
dev_pm_qos_expose_latency_tolerance(struct device * dev)951 int dev_pm_qos_expose_latency_tolerance(struct device *dev)
952 {
953 	int ret;
954 
955 	if (!dev->power.set_latency_tolerance)
956 		return -EINVAL;
957 
958 	mutex_lock(&dev_pm_qos_sysfs_mtx);
959 	ret = pm_qos_sysfs_add_latency_tolerance(dev);
960 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
961 
962 	return ret;
963 }
964 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
965 
966 /**
967  * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
968  * @dev: Device whose latency tolerance to hide
969  */
dev_pm_qos_hide_latency_tolerance(struct device * dev)970 void dev_pm_qos_hide_latency_tolerance(struct device *dev)
971 {
972 	mutex_lock(&dev_pm_qos_sysfs_mtx);
973 	pm_qos_sysfs_remove_latency_tolerance(dev);
974 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
975 
976 	/* Remove the request from user space now */
977 	pm_runtime_get_sync(dev);
978 	dev_pm_qos_update_user_latency_tolerance(dev,
979 		PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
980 	pm_runtime_put(dev);
981 }
982 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
983