xref: /linux/kernel/power/qos.c (revision de19ca6fd72c7dd45ad82403e7b3fe9c74ef6767)
1 /*
2  * This module exposes the interface to kernel space for specifying
3  * QoS dependencies.  It provides infrastructure for registration of:
4  *
5  * Dependents on a QoS value : register requests
6  * Watchers of QoS value : get notified when target QoS value changes
7  *
8  * This QoS design is best effort based.  Dependents register their QoS needs.
9  * Watchers register to keep track of the current QoS needs of the system.
10  *
11  * There are 3 basic classes of QoS parameter: latency, timeout, throughput
12  * each have defined units:
13  * latency: usec
14  * timeout: usec <-- currently not used.
15  * throughput: kbs (kilo byte / sec)
16  *
17  * There are lists of pm_qos_objects each one wrapping requests, notifiers
18  *
19  * User mode requests on a QOS parameter register themselves to the
20  * subsystem by opening the device node /dev/... and writing there request to
21  * the node.  As long as the process holds a file handle open to the node the
22  * client continues to be accounted for.  Upon file release the usermode
23  * request is removed and a new qos target is computed.  This way when the
24  * request that the application has is cleaned up when closes the file
25  * pointer or exits the pm_qos_object will get an opportunity to clean up.
26  *
27  * Mark Gross <mgross@linux.intel.com>
28  */
29 
30 /*#define DEBUG*/
31 
32 #include <linux/pm_qos.h>
33 #include <linux/sched.h>
34 #include <linux/spinlock.h>
35 #include <linux/slab.h>
36 #include <linux/time.h>
37 #include <linux/fs.h>
38 #include <linux/device.h>
39 #include <linux/miscdevice.h>
40 #include <linux/string.h>
41 #include <linux/platform_device.h>
42 #include <linux/init.h>
43 #include <linux/kernel.h>
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 
47 #include <linux/uaccess.h>
48 #include <linux/export.h>
49 #include <trace/events/power.h>
50 
51 /*
52  * locking rule: all changes to constraints or notifiers lists
53  * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
54  * held, taken with _irqsave.  One lock to rule them all
55  */
56 struct pm_qos_object {
57 	struct pm_qos_constraints *constraints;
58 	struct miscdevice pm_qos_power_miscdev;
59 	char *name;
60 };
61 
62 static DEFINE_SPINLOCK(pm_qos_lock);
63 
64 static struct pm_qos_object null_pm_qos;
65 
66 static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
67 static struct pm_qos_constraints cpu_dma_constraints = {
68 	.list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
69 	.target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
70 	.default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
71 	.no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
72 	.type = PM_QOS_MIN,
73 	.notifiers = &cpu_dma_lat_notifier,
74 };
75 static struct pm_qos_object cpu_dma_pm_qos = {
76 	.constraints = &cpu_dma_constraints,
77 	.name = "cpu_dma_latency",
78 };
79 
80 static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
81 static struct pm_qos_constraints network_lat_constraints = {
82 	.list = PLIST_HEAD_INIT(network_lat_constraints.list),
83 	.target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
84 	.default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
85 	.no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
86 	.type = PM_QOS_MIN,
87 	.notifiers = &network_lat_notifier,
88 };
89 static struct pm_qos_object network_lat_pm_qos = {
90 	.constraints = &network_lat_constraints,
91 	.name = "network_latency",
92 };
93 
94 
95 static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
96 static struct pm_qos_constraints network_tput_constraints = {
97 	.list = PLIST_HEAD_INIT(network_tput_constraints.list),
98 	.target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
99 	.default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
100 	.no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
101 	.type = PM_QOS_MAX,
102 	.notifiers = &network_throughput_notifier,
103 };
104 static struct pm_qos_object network_throughput_pm_qos = {
105 	.constraints = &network_tput_constraints,
106 	.name = "network_throughput",
107 };
108 
109 
110 static BLOCKING_NOTIFIER_HEAD(memory_bandwidth_notifier);
111 static struct pm_qos_constraints memory_bw_constraints = {
112 	.list = PLIST_HEAD_INIT(memory_bw_constraints.list),
113 	.target_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
114 	.default_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
115 	.no_constraint_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
116 	.type = PM_QOS_SUM,
117 	.notifiers = &memory_bandwidth_notifier,
118 };
119 static struct pm_qos_object memory_bandwidth_pm_qos = {
120 	.constraints = &memory_bw_constraints,
121 	.name = "memory_bandwidth",
122 };
123 
124 
125 static struct pm_qos_object *pm_qos_array[] = {
126 	&null_pm_qos,
127 	&cpu_dma_pm_qos,
128 	&network_lat_pm_qos,
129 	&network_throughput_pm_qos,
130 	&memory_bandwidth_pm_qos,
131 };
132 
133 static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
134 		size_t count, loff_t *f_pos);
135 static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
136 		size_t count, loff_t *f_pos);
137 static int pm_qos_power_open(struct inode *inode, struct file *filp);
138 static int pm_qos_power_release(struct inode *inode, struct file *filp);
139 
140 static const struct file_operations pm_qos_power_fops = {
141 	.write = pm_qos_power_write,
142 	.read = pm_qos_power_read,
143 	.open = pm_qos_power_open,
144 	.release = pm_qos_power_release,
145 	.llseek = noop_llseek,
146 };
147 
148 /* unlocked internal variant */
149 static inline int pm_qos_get_value(struct pm_qos_constraints *c)
150 {
151 	struct plist_node *node;
152 	int total_value = 0;
153 
154 	if (plist_head_empty(&c->list))
155 		return c->no_constraint_value;
156 
157 	switch (c->type) {
158 	case PM_QOS_MIN:
159 		return plist_first(&c->list)->prio;
160 
161 	case PM_QOS_MAX:
162 		return plist_last(&c->list)->prio;
163 
164 	case PM_QOS_SUM:
165 		plist_for_each(node, &c->list)
166 			total_value += node->prio;
167 
168 		return total_value;
169 
170 	default:
171 		/* runtime check for not using enum */
172 		BUG();
173 		return PM_QOS_DEFAULT_VALUE;
174 	}
175 }
176 
177 s32 pm_qos_read_value(struct pm_qos_constraints *c)
178 {
179 	return c->target_value;
180 }
181 
182 static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
183 {
184 	c->target_value = value;
185 }
186 
187 static inline int pm_qos_get_value(struct pm_qos_constraints *c);
188 static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused)
189 {
190 	struct pm_qos_object *qos = (struct pm_qos_object *)s->private;
191 	struct pm_qos_constraints *c;
192 	struct pm_qos_request *req;
193 	char *type;
194 	unsigned long flags;
195 	int tot_reqs = 0;
196 	int active_reqs = 0;
197 
198 	if (IS_ERR_OR_NULL(qos)) {
199 		pr_err("%s: bad qos param!\n", __func__);
200 		return -EINVAL;
201 	}
202 	c = qos->constraints;
203 	if (IS_ERR_OR_NULL(c)) {
204 		pr_err("%s: Bad constraints on qos?\n", __func__);
205 		return -EINVAL;
206 	}
207 
208 	/* Lock to ensure we have a snapshot */
209 	spin_lock_irqsave(&pm_qos_lock, flags);
210 	if (plist_head_empty(&c->list)) {
211 		seq_puts(s, "Empty!\n");
212 		goto out;
213 	}
214 
215 	switch (c->type) {
216 	case PM_QOS_MIN:
217 		type = "Minimum";
218 		break;
219 	case PM_QOS_MAX:
220 		type = "Maximum";
221 		break;
222 	case PM_QOS_SUM:
223 		type = "Sum";
224 		break;
225 	default:
226 		type = "Unknown";
227 	}
228 
229 	plist_for_each_entry(req, &c->list, node) {
230 		char *state = "Default";
231 
232 		if ((req->node).prio != c->default_value) {
233 			active_reqs++;
234 			state = "Active";
235 		}
236 		tot_reqs++;
237 		seq_printf(s, "%d: %d: %s\n", tot_reqs,
238 			   (req->node).prio, state);
239 	}
240 
241 	seq_printf(s, "Type=%s, Value=%d, Requests: active=%d / total=%d\n",
242 		   type, pm_qos_get_value(c), active_reqs, tot_reqs);
243 
244 out:
245 	spin_unlock_irqrestore(&pm_qos_lock, flags);
246 	return 0;
247 }
248 
249 static int pm_qos_dbg_open(struct inode *inode, struct file *file)
250 {
251 	return single_open(file, pm_qos_dbg_show_requests,
252 			   inode->i_private);
253 }
254 
255 static const struct file_operations pm_qos_debug_fops = {
256 	.open           = pm_qos_dbg_open,
257 	.read           = seq_read,
258 	.llseek         = seq_lseek,
259 	.release        = single_release,
260 };
261 
262 /**
263  * pm_qos_update_target - manages the constraints list and calls the notifiers
264  *  if needed
265  * @c: constraints data struct
266  * @node: request to add to the list, to update or to remove
267  * @action: action to take on the constraints list
268  * @value: value of the request to add or update
269  *
270  * This function returns 1 if the aggregated constraint value has changed, 0
271  *  otherwise.
272  */
273 int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
274 			 enum pm_qos_req_action action, int value)
275 {
276 	unsigned long flags;
277 	int prev_value, curr_value, new_value;
278 	int ret;
279 
280 	spin_lock_irqsave(&pm_qos_lock, flags);
281 	prev_value = pm_qos_get_value(c);
282 	if (value == PM_QOS_DEFAULT_VALUE)
283 		new_value = c->default_value;
284 	else
285 		new_value = value;
286 
287 	switch (action) {
288 	case PM_QOS_REMOVE_REQ:
289 		plist_del(node, &c->list);
290 		break;
291 	case PM_QOS_UPDATE_REQ:
292 		/*
293 		 * to change the list, we atomically remove, reinit
294 		 * with new value and add, then see if the extremal
295 		 * changed
296 		 */
297 		plist_del(node, &c->list);
298 		/* fall through */
299 	case PM_QOS_ADD_REQ:
300 		plist_node_init(node, new_value);
301 		plist_add(node, &c->list);
302 		break;
303 	default:
304 		/* no action */
305 		;
306 	}
307 
308 	curr_value = pm_qos_get_value(c);
309 	pm_qos_set_value(c, curr_value);
310 
311 	spin_unlock_irqrestore(&pm_qos_lock, flags);
312 
313 	trace_pm_qos_update_target(action, prev_value, curr_value);
314 	if (prev_value != curr_value) {
315 		ret = 1;
316 		if (c->notifiers)
317 			blocking_notifier_call_chain(c->notifiers,
318 						     (unsigned long)curr_value,
319 						     NULL);
320 	} else {
321 		ret = 0;
322 	}
323 	return ret;
324 }
325 
326 /**
327  * pm_qos_flags_remove_req - Remove device PM QoS flags request.
328  * @pqf: Device PM QoS flags set to remove the request from.
329  * @req: Request to remove from the set.
330  */
331 static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
332 				    struct pm_qos_flags_request *req)
333 {
334 	s32 val = 0;
335 
336 	list_del(&req->node);
337 	list_for_each_entry(req, &pqf->list, node)
338 		val |= req->flags;
339 
340 	pqf->effective_flags = val;
341 }
342 
343 /**
344  * pm_qos_update_flags - Update a set of PM QoS flags.
345  * @pqf: Set of flags to update.
346  * @req: Request to add to the set, to modify, or to remove from the set.
347  * @action: Action to take on the set.
348  * @val: Value of the request to add or modify.
349  *
350  * Update the given set of PM QoS flags and call notifiers if the aggregate
351  * value has changed.  Returns 1 if the aggregate constraint value has changed,
352  * 0 otherwise.
353  */
354 bool pm_qos_update_flags(struct pm_qos_flags *pqf,
355 			 struct pm_qos_flags_request *req,
356 			 enum pm_qos_req_action action, s32 val)
357 {
358 	unsigned long irqflags;
359 	s32 prev_value, curr_value;
360 
361 	spin_lock_irqsave(&pm_qos_lock, irqflags);
362 
363 	prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
364 
365 	switch (action) {
366 	case PM_QOS_REMOVE_REQ:
367 		pm_qos_flags_remove_req(pqf, req);
368 		break;
369 	case PM_QOS_UPDATE_REQ:
370 		pm_qos_flags_remove_req(pqf, req);
371 		/* fall through */
372 	case PM_QOS_ADD_REQ:
373 		req->flags = val;
374 		INIT_LIST_HEAD(&req->node);
375 		list_add_tail(&req->node, &pqf->list);
376 		pqf->effective_flags |= val;
377 		break;
378 	default:
379 		/* no action */
380 		;
381 	}
382 
383 	curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
384 
385 	spin_unlock_irqrestore(&pm_qos_lock, irqflags);
386 
387 	trace_pm_qos_update_flags(action, prev_value, curr_value);
388 	return prev_value != curr_value;
389 }
390 
391 /**
392  * pm_qos_request - returns current system wide qos expectation
393  * @pm_qos_class: identification of which qos value is requested
394  *
395  * This function returns the current target value.
396  */
397 int pm_qos_request(int pm_qos_class)
398 {
399 	return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
400 }
401 EXPORT_SYMBOL_GPL(pm_qos_request);
402 
403 int pm_qos_request_active(struct pm_qos_request *req)
404 {
405 	return req->pm_qos_class != 0;
406 }
407 EXPORT_SYMBOL_GPL(pm_qos_request_active);
408 
409 static void __pm_qos_update_request(struct pm_qos_request *req,
410 			   s32 new_value)
411 {
412 	trace_pm_qos_update_request(req->pm_qos_class, new_value);
413 
414 	if (new_value != req->node.prio)
415 		pm_qos_update_target(
416 			pm_qos_array[req->pm_qos_class]->constraints,
417 			&req->node, PM_QOS_UPDATE_REQ, new_value);
418 }
419 
420 /**
421  * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
422  * @work: work struct for the delayed work (timeout)
423  *
424  * This cancels the timeout request by falling back to the default at timeout.
425  */
426 static void pm_qos_work_fn(struct work_struct *work)
427 {
428 	struct pm_qos_request *req = container_of(to_delayed_work(work),
429 						  struct pm_qos_request,
430 						  work);
431 
432 	__pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
433 }
434 
435 /**
436  * pm_qos_add_request - inserts new qos request into the list
437  * @req: pointer to a preallocated handle
438  * @pm_qos_class: identifies which list of qos request to use
439  * @value: defines the qos request
440  *
441  * This function inserts a new entry in the pm_qos_class list of requested qos
442  * performance characteristics.  It recomputes the aggregate QoS expectations
443  * for the pm_qos_class of parameters and initializes the pm_qos_request
444  * handle.  Caller needs to save this handle for later use in updates and
445  * removal.
446  */
447 
448 void pm_qos_add_request(struct pm_qos_request *req,
449 			int pm_qos_class, s32 value)
450 {
451 	if (!req) /*guard against callers passing in null */
452 		return;
453 
454 	if (pm_qos_request_active(req)) {
455 		WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
456 		return;
457 	}
458 	req->pm_qos_class = pm_qos_class;
459 	INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
460 	trace_pm_qos_add_request(pm_qos_class, value);
461 	pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
462 			     &req->node, PM_QOS_ADD_REQ, value);
463 }
464 EXPORT_SYMBOL_GPL(pm_qos_add_request);
465 
466 /**
467  * pm_qos_update_request - modifies an existing qos request
468  * @req : handle to list element holding a pm_qos request to use
469  * @value: defines the qos request
470  *
471  * Updates an existing qos request for the pm_qos_class of parameters along
472  * with updating the target pm_qos_class value.
473  *
474  * Attempts are made to make this code callable on hot code paths.
475  */
476 void pm_qos_update_request(struct pm_qos_request *req,
477 			   s32 new_value)
478 {
479 	if (!req) /*guard against callers passing in null */
480 		return;
481 
482 	if (!pm_qos_request_active(req)) {
483 		WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
484 		return;
485 	}
486 
487 	cancel_delayed_work_sync(&req->work);
488 	__pm_qos_update_request(req, new_value);
489 }
490 EXPORT_SYMBOL_GPL(pm_qos_update_request);
491 
492 /**
493  * pm_qos_update_request_timeout - modifies an existing qos request temporarily.
494  * @req : handle to list element holding a pm_qos request to use
495  * @new_value: defines the temporal qos request
496  * @timeout_us: the effective duration of this qos request in usecs.
497  *
498  * After timeout_us, this qos request is cancelled automatically.
499  */
500 void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
501 				   unsigned long timeout_us)
502 {
503 	if (!req)
504 		return;
505 	if (WARN(!pm_qos_request_active(req),
506 		 "%s called for unknown object.", __func__))
507 		return;
508 
509 	cancel_delayed_work_sync(&req->work);
510 
511 	trace_pm_qos_update_request_timeout(req->pm_qos_class,
512 					    new_value, timeout_us);
513 	if (new_value != req->node.prio)
514 		pm_qos_update_target(
515 			pm_qos_array[req->pm_qos_class]->constraints,
516 			&req->node, PM_QOS_UPDATE_REQ, new_value);
517 
518 	schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
519 }
520 
521 /**
522  * pm_qos_remove_request - modifies an existing qos request
523  * @req: handle to request list element
524  *
525  * Will remove pm qos request from the list of constraints and
526  * recompute the current target value for the pm_qos_class.  Call this
527  * on slow code paths.
528  */
529 void pm_qos_remove_request(struct pm_qos_request *req)
530 {
531 	if (!req) /*guard against callers passing in null */
532 		return;
533 		/* silent return to keep pcm code cleaner */
534 
535 	if (!pm_qos_request_active(req)) {
536 		WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
537 		return;
538 	}
539 
540 	cancel_delayed_work_sync(&req->work);
541 
542 	trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
543 	pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
544 			     &req->node, PM_QOS_REMOVE_REQ,
545 			     PM_QOS_DEFAULT_VALUE);
546 	memset(req, 0, sizeof(*req));
547 }
548 EXPORT_SYMBOL_GPL(pm_qos_remove_request);
549 
550 /**
551  * pm_qos_add_notifier - sets notification entry for changes to target value
552  * @pm_qos_class: identifies which qos target changes should be notified.
553  * @notifier: notifier block managed by caller.
554  *
555  * will register the notifier into a notification chain that gets called
556  * upon changes to the pm_qos_class target value.
557  */
558 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
559 {
560 	int retval;
561 
562 	retval = blocking_notifier_chain_register(
563 			pm_qos_array[pm_qos_class]->constraints->notifiers,
564 			notifier);
565 
566 	return retval;
567 }
568 EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
569 
570 /**
571  * pm_qos_remove_notifier - deletes notification entry from chain.
572  * @pm_qos_class: identifies which qos target changes are notified.
573  * @notifier: notifier block to be removed.
574  *
575  * will remove the notifier from the notification chain that gets called
576  * upon changes to the pm_qos_class target value.
577  */
578 int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
579 {
580 	int retval;
581 
582 	retval = blocking_notifier_chain_unregister(
583 			pm_qos_array[pm_qos_class]->constraints->notifiers,
584 			notifier);
585 
586 	return retval;
587 }
588 EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
589 
590 /* User space interface to PM QoS classes via misc devices */
591 static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d)
592 {
593 	qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
594 	qos->pm_qos_power_miscdev.name = qos->name;
595 	qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
596 
597 	if (d) {
598 		(void)debugfs_create_file(qos->name, S_IRUGO, d,
599 					  (void *)qos, &pm_qos_debug_fops);
600 	}
601 
602 	return misc_register(&qos->pm_qos_power_miscdev);
603 }
604 
605 static int find_pm_qos_object_by_minor(int minor)
606 {
607 	int pm_qos_class;
608 
609 	for (pm_qos_class = PM_QOS_CPU_DMA_LATENCY;
610 		pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
611 		if (minor ==
612 			pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
613 			return pm_qos_class;
614 	}
615 	return -1;
616 }
617 
618 static int pm_qos_power_open(struct inode *inode, struct file *filp)
619 {
620 	long pm_qos_class;
621 
622 	pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
623 	if (pm_qos_class >= PM_QOS_CPU_DMA_LATENCY) {
624 		struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
625 		if (!req)
626 			return -ENOMEM;
627 
628 		pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
629 		filp->private_data = req;
630 
631 		return 0;
632 	}
633 	return -EPERM;
634 }
635 
636 static int pm_qos_power_release(struct inode *inode, struct file *filp)
637 {
638 	struct pm_qos_request *req;
639 
640 	req = filp->private_data;
641 	pm_qos_remove_request(req);
642 	kfree(req);
643 
644 	return 0;
645 }
646 
647 
648 static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
649 		size_t count, loff_t *f_pos)
650 {
651 	s32 value;
652 	unsigned long flags;
653 	struct pm_qos_request *req = filp->private_data;
654 
655 	if (!req)
656 		return -EINVAL;
657 	if (!pm_qos_request_active(req))
658 		return -EINVAL;
659 
660 	spin_lock_irqsave(&pm_qos_lock, flags);
661 	value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
662 	spin_unlock_irqrestore(&pm_qos_lock, flags);
663 
664 	return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
665 }
666 
667 static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
668 		size_t count, loff_t *f_pos)
669 {
670 	s32 value;
671 	struct pm_qos_request *req;
672 
673 	if (count == sizeof(s32)) {
674 		if (copy_from_user(&value, buf, sizeof(s32)))
675 			return -EFAULT;
676 	} else {
677 		int ret;
678 
679 		ret = kstrtos32_from_user(buf, count, 16, &value);
680 		if (ret)
681 			return ret;
682 	}
683 
684 	req = filp->private_data;
685 	pm_qos_update_request(req, value);
686 
687 	return count;
688 }
689 
690 
691 static int __init pm_qos_power_init(void)
692 {
693 	int ret = 0;
694 	int i;
695 	struct dentry *d;
696 
697 	BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
698 
699 	d = debugfs_create_dir("pm_qos", NULL);
700 	if (IS_ERR_OR_NULL(d))
701 		d = NULL;
702 
703 	for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
704 		ret = register_pm_qos_misc(pm_qos_array[i], d);
705 		if (ret < 0) {
706 			pr_err("%s: %s setup failed\n",
707 			       __func__, pm_qos_array[i]->name);
708 			return ret;
709 		}
710 	}
711 
712 	return ret;
713 }
714 
715 late_initcall(pm_qos_power_init);
716