1 /* 2 * Devices PM QoS constraints management 3 * 4 * Copyright (C) 2011 Texas Instruments, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * 11 * This module exposes the interface to kernel space for specifying 12 * per-device PM QoS dependencies. It provides infrastructure for registration 13 * of: 14 * 15 * Dependents on a QoS value : register requests 16 * Watchers of QoS value : get notified when target QoS value changes 17 * 18 * This QoS design is best effort based. Dependents register their QoS needs. 19 * Watchers register to keep track of the current QoS needs of the system. 20 * Watchers can register different types of notification callbacks: 21 * . a per-device notification callback using the dev_pm_qos_*_notifier API. 22 * The notification chain data is stored in the per-device constraint 23 * data struct. 24 * . a system-wide notification callback using the dev_pm_qos_*_global_notifier 25 * API. The notification chain data is stored in a static variable. 26 * 27 * Note about the per-device constraint data struct allocation: 28 * . The per-device constraints data struct ptr is tored into the device 29 * dev_pm_info. 30 * . To minimize the data usage by the per-device constraints, the data struct 31 * is only allocated at the first call to dev_pm_qos_add_request. 32 * . The data is later free'd when the device is removed from the system. 33 * . A global mutex protects the constraints users from the data being 34 * allocated and free'd. 35 */ 36 37 #include <linux/pm_qos.h> 38 #include <linux/spinlock.h> 39 #include <linux/slab.h> 40 #include <linux/device.h> 41 #include <linux/mutex.h> 42 #include <linux/export.h> 43 #include <linux/pm_runtime.h> 44 #include <linux/err.h> 45 #include <trace/events/power.h> 46 47 #include "power.h" 48 49 static DEFINE_MUTEX(dev_pm_qos_mtx); 50 static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx); 51 52 static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); 53 54 /** 55 * __dev_pm_qos_flags - Check PM QoS flags for a given device. 56 * @dev: Device to check the PM QoS flags for. 57 * @mask: Flags to check against. 58 * 59 * This routine must be called with dev->power.lock held. 60 */ 61 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) 62 { 63 struct dev_pm_qos *qos = dev->power.qos; 64 struct pm_qos_flags *pqf; 65 s32 val; 66 67 lockdep_assert_held(&dev->power.lock); 68 69 if (IS_ERR_OR_NULL(qos)) 70 return PM_QOS_FLAGS_UNDEFINED; 71 72 pqf = &qos->flags; 73 if (list_empty(&pqf->list)) 74 return PM_QOS_FLAGS_UNDEFINED; 75 76 val = pqf->effective_flags & mask; 77 if (val) 78 return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME; 79 80 return PM_QOS_FLAGS_NONE; 81 } 82 83 /** 84 * dev_pm_qos_flags - Check PM QoS flags for a given device (locked). 85 * @dev: Device to check the PM QoS flags for. 86 * @mask: Flags to check against. 87 */ 88 enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask) 89 { 90 unsigned long irqflags; 91 enum pm_qos_flags_status ret; 92 93 spin_lock_irqsave(&dev->power.lock, irqflags); 94 ret = __dev_pm_qos_flags(dev, mask); 95 spin_unlock_irqrestore(&dev->power.lock, irqflags); 96 97 return ret; 98 } 99 EXPORT_SYMBOL_GPL(dev_pm_qos_flags); 100 101 /** 102 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device. 103 * @dev: Device to get the PM QoS constraint value for. 104 * 105 * This routine must be called with dev->power.lock held. 106 */ 107 s32 __dev_pm_qos_read_value(struct device *dev) 108 { 109 lockdep_assert_held(&dev->power.lock); 110 111 return IS_ERR_OR_NULL(dev->power.qos) ? 112 0 : pm_qos_read_value(&dev->power.qos->resume_latency); 113 } 114 115 /** 116 * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked). 117 * @dev: Device to get the PM QoS constraint value for. 118 */ 119 s32 dev_pm_qos_read_value(struct device *dev) 120 { 121 unsigned long flags; 122 s32 ret; 123 124 spin_lock_irqsave(&dev->power.lock, flags); 125 ret = __dev_pm_qos_read_value(dev); 126 spin_unlock_irqrestore(&dev->power.lock, flags); 127 128 return ret; 129 } 130 131 /** 132 * apply_constraint - Add/modify/remove device PM QoS request. 133 * @req: Constraint request to apply 134 * @action: Action to perform (add/update/remove). 135 * @value: Value to assign to the QoS request. 136 * 137 * Internal function to update the constraints list using the PM QoS core 138 * code and if needed call the per-device and the global notification 139 * callbacks 140 */ 141 static int apply_constraint(struct dev_pm_qos_request *req, 142 enum pm_qos_req_action action, s32 value) 143 { 144 struct dev_pm_qos *qos = req->dev->power.qos; 145 int ret; 146 147 switch(req->type) { 148 case DEV_PM_QOS_RESUME_LATENCY: 149 ret = pm_qos_update_target(&qos->resume_latency, 150 &req->data.pnode, action, value); 151 if (ret) { 152 value = pm_qos_read_value(&qos->resume_latency); 153 blocking_notifier_call_chain(&dev_pm_notifiers, 154 (unsigned long)value, 155 req); 156 } 157 break; 158 case DEV_PM_QOS_LATENCY_TOLERANCE: 159 ret = pm_qos_update_target(&qos->latency_tolerance, 160 &req->data.pnode, action, value); 161 if (ret) { 162 value = pm_qos_read_value(&qos->latency_tolerance); 163 req->dev->power.set_latency_tolerance(req->dev, value); 164 } 165 break; 166 case DEV_PM_QOS_FLAGS: 167 ret = pm_qos_update_flags(&qos->flags, &req->data.flr, 168 action, value); 169 break; 170 default: 171 ret = -EINVAL; 172 } 173 174 return ret; 175 } 176 177 /* 178 * dev_pm_qos_constraints_allocate 179 * @dev: device to allocate data for 180 * 181 * Called at the first call to add_request, for constraint data allocation 182 * Must be called with the dev_pm_qos_mtx mutex held 183 */ 184 static int dev_pm_qos_constraints_allocate(struct device *dev) 185 { 186 struct dev_pm_qos *qos; 187 struct pm_qos_constraints *c; 188 struct blocking_notifier_head *n; 189 190 qos = kzalloc(sizeof(*qos), GFP_KERNEL); 191 if (!qos) 192 return -ENOMEM; 193 194 n = kzalloc(sizeof(*n), GFP_KERNEL); 195 if (!n) { 196 kfree(qos); 197 return -ENOMEM; 198 } 199 BLOCKING_INIT_NOTIFIER_HEAD(n); 200 201 c = &qos->resume_latency; 202 plist_head_init(&c->list); 203 c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; 204 c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; 205 c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; 206 c->type = PM_QOS_MIN; 207 c->notifiers = n; 208 209 c = &qos->latency_tolerance; 210 plist_head_init(&c->list); 211 c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; 212 c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; 213 c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; 214 c->type = PM_QOS_MIN; 215 216 INIT_LIST_HEAD(&qos->flags.list); 217 218 spin_lock_irq(&dev->power.lock); 219 dev->power.qos = qos; 220 spin_unlock_irq(&dev->power.lock); 221 222 return 0; 223 } 224 225 static void __dev_pm_qos_hide_latency_limit(struct device *dev); 226 static void __dev_pm_qos_hide_flags(struct device *dev); 227 228 /** 229 * dev_pm_qos_constraints_destroy 230 * @dev: target device 231 * 232 * Called from the device PM subsystem on device removal under device_pm_lock(). 233 */ 234 void dev_pm_qos_constraints_destroy(struct device *dev) 235 { 236 struct dev_pm_qos *qos; 237 struct dev_pm_qos_request *req, *tmp; 238 struct pm_qos_constraints *c; 239 struct pm_qos_flags *f; 240 241 mutex_lock(&dev_pm_qos_sysfs_mtx); 242 243 /* 244 * If the device's PM QoS resume latency limit or PM QoS flags have been 245 * exposed to user space, they have to be hidden at this point. 246 */ 247 pm_qos_sysfs_remove_resume_latency(dev); 248 pm_qos_sysfs_remove_flags(dev); 249 250 mutex_lock(&dev_pm_qos_mtx); 251 252 __dev_pm_qos_hide_latency_limit(dev); 253 __dev_pm_qos_hide_flags(dev); 254 255 qos = dev->power.qos; 256 if (!qos) 257 goto out; 258 259 /* Flush the constraints lists for the device. */ 260 c = &qos->resume_latency; 261 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { 262 /* 263 * Update constraints list and call the notification 264 * callbacks if needed 265 */ 266 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); 267 memset(req, 0, sizeof(*req)); 268 } 269 c = &qos->latency_tolerance; 270 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { 271 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); 272 memset(req, 0, sizeof(*req)); 273 } 274 f = &qos->flags; 275 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { 276 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); 277 memset(req, 0, sizeof(*req)); 278 } 279 280 spin_lock_irq(&dev->power.lock); 281 dev->power.qos = ERR_PTR(-ENODEV); 282 spin_unlock_irq(&dev->power.lock); 283 284 kfree(c->notifiers); 285 kfree(qos); 286 287 out: 288 mutex_unlock(&dev_pm_qos_mtx); 289 290 mutex_unlock(&dev_pm_qos_sysfs_mtx); 291 } 292 293 static bool dev_pm_qos_invalid_request(struct device *dev, 294 struct dev_pm_qos_request *req) 295 { 296 return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE 297 && !dev->power.set_latency_tolerance); 298 } 299 300 static int __dev_pm_qos_add_request(struct device *dev, 301 struct dev_pm_qos_request *req, 302 enum dev_pm_qos_req_type type, s32 value) 303 { 304 int ret = 0; 305 306 if (!dev || dev_pm_qos_invalid_request(dev, req)) 307 return -EINVAL; 308 309 if (WARN(dev_pm_qos_request_active(req), 310 "%s() called for already added request\n", __func__)) 311 return -EINVAL; 312 313 if (IS_ERR(dev->power.qos)) 314 ret = -ENODEV; 315 else if (!dev->power.qos) 316 ret = dev_pm_qos_constraints_allocate(dev); 317 318 trace_dev_pm_qos_add_request(dev_name(dev), type, value); 319 if (!ret) { 320 req->dev = dev; 321 req->type = type; 322 ret = apply_constraint(req, PM_QOS_ADD_REQ, value); 323 } 324 return ret; 325 } 326 327 /** 328 * dev_pm_qos_add_request - inserts new qos request into the list 329 * @dev: target device for the constraint 330 * @req: pointer to a preallocated handle 331 * @type: type of the request 332 * @value: defines the qos request 333 * 334 * This function inserts a new entry in the device constraints list of 335 * requested qos performance characteristics. It recomputes the aggregate 336 * QoS expectations of parameters and initializes the dev_pm_qos_request 337 * handle. Caller needs to save this handle for later use in updates and 338 * removal. 339 * 340 * Returns 1 if the aggregated constraint value has changed, 341 * 0 if the aggregated constraint value has not changed, 342 * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory 343 * to allocate for data structures, -ENODEV if the device has just been removed 344 * from the system. 345 * 346 * Callers should ensure that the target device is not RPM_SUSPENDED before 347 * using this function for requests of type DEV_PM_QOS_FLAGS. 348 */ 349 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, 350 enum dev_pm_qos_req_type type, s32 value) 351 { 352 int ret; 353 354 mutex_lock(&dev_pm_qos_mtx); 355 ret = __dev_pm_qos_add_request(dev, req, type, value); 356 mutex_unlock(&dev_pm_qos_mtx); 357 return ret; 358 } 359 EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); 360 361 /** 362 * __dev_pm_qos_update_request - Modify an existing device PM QoS request. 363 * @req : PM QoS request to modify. 364 * @new_value: New value to request. 365 */ 366 static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, 367 s32 new_value) 368 { 369 s32 curr_value; 370 int ret = 0; 371 372 if (!req) /*guard against callers passing in null */ 373 return -EINVAL; 374 375 if (WARN(!dev_pm_qos_request_active(req), 376 "%s() called for unknown object\n", __func__)) 377 return -EINVAL; 378 379 if (IS_ERR_OR_NULL(req->dev->power.qos)) 380 return -ENODEV; 381 382 switch(req->type) { 383 case DEV_PM_QOS_RESUME_LATENCY: 384 case DEV_PM_QOS_LATENCY_TOLERANCE: 385 curr_value = req->data.pnode.prio; 386 break; 387 case DEV_PM_QOS_FLAGS: 388 curr_value = req->data.flr.flags; 389 break; 390 default: 391 return -EINVAL; 392 } 393 394 trace_dev_pm_qos_update_request(dev_name(req->dev), req->type, 395 new_value); 396 if (curr_value != new_value) 397 ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value); 398 399 return ret; 400 } 401 402 /** 403 * dev_pm_qos_update_request - modifies an existing qos request 404 * @req : handle to list element holding a dev_pm_qos request to use 405 * @new_value: defines the qos request 406 * 407 * Updates an existing dev PM qos request along with updating the 408 * target value. 409 * 410 * Attempts are made to make this code callable on hot code paths. 411 * 412 * Returns 1 if the aggregated constraint value has changed, 413 * 0 if the aggregated constraint value has not changed, 414 * -EINVAL in case of wrong parameters, -ENODEV if the device has been 415 * removed from the system 416 * 417 * Callers should ensure that the target device is not RPM_SUSPENDED before 418 * using this function for requests of type DEV_PM_QOS_FLAGS. 419 */ 420 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) 421 { 422 int ret; 423 424 mutex_lock(&dev_pm_qos_mtx); 425 ret = __dev_pm_qos_update_request(req, new_value); 426 mutex_unlock(&dev_pm_qos_mtx); 427 return ret; 428 } 429 EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); 430 431 static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req) 432 { 433 int ret; 434 435 if (!req) /*guard against callers passing in null */ 436 return -EINVAL; 437 438 if (WARN(!dev_pm_qos_request_active(req), 439 "%s() called for unknown object\n", __func__)) 440 return -EINVAL; 441 442 if (IS_ERR_OR_NULL(req->dev->power.qos)) 443 return -ENODEV; 444 445 trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type, 446 PM_QOS_DEFAULT_VALUE); 447 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); 448 memset(req, 0, sizeof(*req)); 449 return ret; 450 } 451 452 /** 453 * dev_pm_qos_remove_request - modifies an existing qos request 454 * @req: handle to request list element 455 * 456 * Will remove pm qos request from the list of constraints and 457 * recompute the current target value. Call this on slow code paths. 458 * 459 * Returns 1 if the aggregated constraint value has changed, 460 * 0 if the aggregated constraint value has not changed, 461 * -EINVAL in case of wrong parameters, -ENODEV if the device has been 462 * removed from the system 463 * 464 * Callers should ensure that the target device is not RPM_SUSPENDED before 465 * using this function for requests of type DEV_PM_QOS_FLAGS. 466 */ 467 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) 468 { 469 int ret; 470 471 mutex_lock(&dev_pm_qos_mtx); 472 ret = __dev_pm_qos_remove_request(req); 473 mutex_unlock(&dev_pm_qos_mtx); 474 return ret; 475 } 476 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); 477 478 /** 479 * dev_pm_qos_add_notifier - sets notification entry for changes to target value 480 * of per-device PM QoS constraints 481 * 482 * @dev: target device for the constraint 483 * @notifier: notifier block managed by caller. 484 * 485 * Will register the notifier into a notification chain that gets called 486 * upon changes to the target value for the device. 487 * 488 * If the device's constraints object doesn't exist when this routine is called, 489 * it will be created (or error code will be returned if that fails). 490 */ 491 int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) 492 { 493 int ret = 0; 494 495 mutex_lock(&dev_pm_qos_mtx); 496 497 if (IS_ERR(dev->power.qos)) 498 ret = -ENODEV; 499 else if (!dev->power.qos) 500 ret = dev_pm_qos_constraints_allocate(dev); 501 502 if (!ret) 503 ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers, 504 notifier); 505 506 mutex_unlock(&dev_pm_qos_mtx); 507 return ret; 508 } 509 EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); 510 511 /** 512 * dev_pm_qos_remove_notifier - deletes notification for changes to target value 513 * of per-device PM QoS constraints 514 * 515 * @dev: target device for the constraint 516 * @notifier: notifier block to be removed. 517 * 518 * Will remove the notifier from the notification chain that gets called 519 * upon changes to the target value. 520 */ 521 int dev_pm_qos_remove_notifier(struct device *dev, 522 struct notifier_block *notifier) 523 { 524 int retval = 0; 525 526 mutex_lock(&dev_pm_qos_mtx); 527 528 /* Silently return if the constraints object is not present. */ 529 if (!IS_ERR_OR_NULL(dev->power.qos)) 530 retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers, 531 notifier); 532 533 mutex_unlock(&dev_pm_qos_mtx); 534 return retval; 535 } 536 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); 537 538 /** 539 * dev_pm_qos_add_global_notifier - sets notification entry for changes to 540 * target value of the PM QoS constraints for any device 541 * 542 * @notifier: notifier block managed by caller. 543 * 544 * Will register the notifier into a notification chain that gets called 545 * upon changes to the target value for any device. 546 */ 547 int dev_pm_qos_add_global_notifier(struct notifier_block *notifier) 548 { 549 return blocking_notifier_chain_register(&dev_pm_notifiers, notifier); 550 } 551 EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier); 552 553 /** 554 * dev_pm_qos_remove_global_notifier - deletes notification for changes to 555 * target value of PM QoS constraints for any device 556 * 557 * @notifier: notifier block to be removed. 558 * 559 * Will remove the notifier from the notification chain that gets called 560 * upon changes to the target value for any device. 561 */ 562 int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier) 563 { 564 return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); 565 } 566 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); 567 568 /** 569 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. 570 * @dev: Device whose ancestor to add the request for. 571 * @req: Pointer to the preallocated handle. 572 * @type: Type of the request. 573 * @value: Constraint latency value. 574 */ 575 int dev_pm_qos_add_ancestor_request(struct device *dev, 576 struct dev_pm_qos_request *req, 577 enum dev_pm_qos_req_type type, s32 value) 578 { 579 struct device *ancestor = dev->parent; 580 int ret = -ENODEV; 581 582 switch (type) { 583 case DEV_PM_QOS_RESUME_LATENCY: 584 while (ancestor && !ancestor->power.ignore_children) 585 ancestor = ancestor->parent; 586 587 break; 588 case DEV_PM_QOS_LATENCY_TOLERANCE: 589 while (ancestor && !ancestor->power.set_latency_tolerance) 590 ancestor = ancestor->parent; 591 592 break; 593 default: 594 ancestor = NULL; 595 } 596 if (ancestor) 597 ret = dev_pm_qos_add_request(ancestor, req, type, value); 598 599 if (ret < 0) 600 req->dev = NULL; 601 602 return ret; 603 } 604 EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); 605 606 static void __dev_pm_qos_drop_user_request(struct device *dev, 607 enum dev_pm_qos_req_type type) 608 { 609 struct dev_pm_qos_request *req = NULL; 610 611 switch(type) { 612 case DEV_PM_QOS_RESUME_LATENCY: 613 req = dev->power.qos->resume_latency_req; 614 dev->power.qos->resume_latency_req = NULL; 615 break; 616 case DEV_PM_QOS_LATENCY_TOLERANCE: 617 req = dev->power.qos->latency_tolerance_req; 618 dev->power.qos->latency_tolerance_req = NULL; 619 break; 620 case DEV_PM_QOS_FLAGS: 621 req = dev->power.qos->flags_req; 622 dev->power.qos->flags_req = NULL; 623 break; 624 } 625 __dev_pm_qos_remove_request(req); 626 kfree(req); 627 } 628 629 static void dev_pm_qos_drop_user_request(struct device *dev, 630 enum dev_pm_qos_req_type type) 631 { 632 mutex_lock(&dev_pm_qos_mtx); 633 __dev_pm_qos_drop_user_request(dev, type); 634 mutex_unlock(&dev_pm_qos_mtx); 635 } 636 637 /** 638 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. 639 * @dev: Device whose PM QoS latency limit is to be exposed to user space. 640 * @value: Initial value of the latency limit. 641 */ 642 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) 643 { 644 struct dev_pm_qos_request *req; 645 int ret; 646 647 if (!device_is_registered(dev) || value < 0) 648 return -EINVAL; 649 650 req = kzalloc(sizeof(*req), GFP_KERNEL); 651 if (!req) 652 return -ENOMEM; 653 654 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value); 655 if (ret < 0) { 656 kfree(req); 657 return ret; 658 } 659 660 mutex_lock(&dev_pm_qos_sysfs_mtx); 661 662 mutex_lock(&dev_pm_qos_mtx); 663 664 if (IS_ERR_OR_NULL(dev->power.qos)) 665 ret = -ENODEV; 666 else if (dev->power.qos->resume_latency_req) 667 ret = -EEXIST; 668 669 if (ret < 0) { 670 __dev_pm_qos_remove_request(req); 671 kfree(req); 672 mutex_unlock(&dev_pm_qos_mtx); 673 goto out; 674 } 675 dev->power.qos->resume_latency_req = req; 676 677 mutex_unlock(&dev_pm_qos_mtx); 678 679 ret = pm_qos_sysfs_add_resume_latency(dev); 680 if (ret) 681 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY); 682 683 out: 684 mutex_unlock(&dev_pm_qos_sysfs_mtx); 685 return ret; 686 } 687 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); 688 689 static void __dev_pm_qos_hide_latency_limit(struct device *dev) 690 { 691 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req) 692 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY); 693 } 694 695 /** 696 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. 697 * @dev: Device whose PM QoS latency limit is to be hidden from user space. 698 */ 699 void dev_pm_qos_hide_latency_limit(struct device *dev) 700 { 701 mutex_lock(&dev_pm_qos_sysfs_mtx); 702 703 pm_qos_sysfs_remove_resume_latency(dev); 704 705 mutex_lock(&dev_pm_qos_mtx); 706 __dev_pm_qos_hide_latency_limit(dev); 707 mutex_unlock(&dev_pm_qos_mtx); 708 709 mutex_unlock(&dev_pm_qos_sysfs_mtx); 710 } 711 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); 712 713 /** 714 * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space. 715 * @dev: Device whose PM QoS flags are to be exposed to user space. 716 * @val: Initial values of the flags. 717 */ 718 int dev_pm_qos_expose_flags(struct device *dev, s32 val) 719 { 720 struct dev_pm_qos_request *req; 721 int ret; 722 723 if (!device_is_registered(dev)) 724 return -EINVAL; 725 726 req = kzalloc(sizeof(*req), GFP_KERNEL); 727 if (!req) 728 return -ENOMEM; 729 730 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); 731 if (ret < 0) { 732 kfree(req); 733 return ret; 734 } 735 736 pm_runtime_get_sync(dev); 737 mutex_lock(&dev_pm_qos_sysfs_mtx); 738 739 mutex_lock(&dev_pm_qos_mtx); 740 741 if (IS_ERR_OR_NULL(dev->power.qos)) 742 ret = -ENODEV; 743 else if (dev->power.qos->flags_req) 744 ret = -EEXIST; 745 746 if (ret < 0) { 747 __dev_pm_qos_remove_request(req); 748 kfree(req); 749 mutex_unlock(&dev_pm_qos_mtx); 750 goto out; 751 } 752 dev->power.qos->flags_req = req; 753 754 mutex_unlock(&dev_pm_qos_mtx); 755 756 ret = pm_qos_sysfs_add_flags(dev); 757 if (ret) 758 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 759 760 out: 761 mutex_unlock(&dev_pm_qos_sysfs_mtx); 762 pm_runtime_put(dev); 763 return ret; 764 } 765 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); 766 767 static void __dev_pm_qos_hide_flags(struct device *dev) 768 { 769 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) 770 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 771 } 772 773 /** 774 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. 775 * @dev: Device whose PM QoS flags are to be hidden from user space. 776 */ 777 void dev_pm_qos_hide_flags(struct device *dev) 778 { 779 pm_runtime_get_sync(dev); 780 mutex_lock(&dev_pm_qos_sysfs_mtx); 781 782 pm_qos_sysfs_remove_flags(dev); 783 784 mutex_lock(&dev_pm_qos_mtx); 785 __dev_pm_qos_hide_flags(dev); 786 mutex_unlock(&dev_pm_qos_mtx); 787 788 mutex_unlock(&dev_pm_qos_sysfs_mtx); 789 pm_runtime_put(dev); 790 } 791 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); 792 793 /** 794 * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space. 795 * @dev: Device to update the PM QoS flags request for. 796 * @mask: Flags to set/clear. 797 * @set: Whether to set or clear the flags (true means set). 798 */ 799 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) 800 { 801 s32 value; 802 int ret; 803 804 pm_runtime_get_sync(dev); 805 mutex_lock(&dev_pm_qos_mtx); 806 807 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) { 808 ret = -EINVAL; 809 goto out; 810 } 811 812 value = dev_pm_qos_requested_flags(dev); 813 if (set) 814 value |= mask; 815 else 816 value &= ~mask; 817 818 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); 819 820 out: 821 mutex_unlock(&dev_pm_qos_mtx); 822 pm_runtime_put(dev); 823 return ret; 824 } 825 826 /** 827 * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance. 828 * @dev: Device to obtain the user space latency tolerance for. 829 */ 830 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) 831 { 832 s32 ret; 833 834 mutex_lock(&dev_pm_qos_mtx); 835 ret = IS_ERR_OR_NULL(dev->power.qos) 836 || !dev->power.qos->latency_tolerance_req ? 837 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT : 838 dev->power.qos->latency_tolerance_req->data.pnode.prio; 839 mutex_unlock(&dev_pm_qos_mtx); 840 return ret; 841 } 842 843 /** 844 * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance. 845 * @dev: Device to update the user space latency tolerance for. 846 * @val: New user space latency tolerance for @dev (negative values disable). 847 */ 848 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) 849 { 850 int ret; 851 852 mutex_lock(&dev_pm_qos_mtx); 853 854 if (IS_ERR_OR_NULL(dev->power.qos) 855 || !dev->power.qos->latency_tolerance_req) { 856 struct dev_pm_qos_request *req; 857 858 if (val < 0) { 859 ret = -EINVAL; 860 goto out; 861 } 862 req = kzalloc(sizeof(*req), GFP_KERNEL); 863 if (!req) { 864 ret = -ENOMEM; 865 goto out; 866 } 867 ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val); 868 if (ret < 0) { 869 kfree(req); 870 goto out; 871 } 872 dev->power.qos->latency_tolerance_req = req; 873 } else { 874 if (val < 0) { 875 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE); 876 ret = 0; 877 } else { 878 ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val); 879 } 880 } 881 882 out: 883 mutex_unlock(&dev_pm_qos_mtx); 884 return ret; 885 } 886 887 /** 888 * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace 889 * @dev: Device whose latency tolerance to expose 890 */ 891 int dev_pm_qos_expose_latency_tolerance(struct device *dev) 892 { 893 int ret; 894 895 if (!dev->power.set_latency_tolerance) 896 return -EINVAL; 897 898 mutex_lock(&dev_pm_qos_sysfs_mtx); 899 ret = pm_qos_sysfs_add_latency_tolerance(dev); 900 mutex_unlock(&dev_pm_qos_sysfs_mtx); 901 902 return ret; 903 } 904 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance); 905 906 /** 907 * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace 908 * @dev: Device whose latency tolerance to hide 909 */ 910 void dev_pm_qos_hide_latency_tolerance(struct device *dev) 911 { 912 mutex_lock(&dev_pm_qos_sysfs_mtx); 913 pm_qos_sysfs_remove_latency_tolerance(dev); 914 mutex_unlock(&dev_pm_qos_sysfs_mtx); 915 916 /* Remove the request from user space now */ 917 pm_runtime_get_sync(dev); 918 dev_pm_qos_update_user_latency_tolerance(dev, 919 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT); 920 pm_runtime_put(dev); 921 } 922 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance); 923