1 /* 2 * Devices PM QoS constraints management 3 * 4 * Copyright (C) 2011 Texas Instruments, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * 11 * This module exposes the interface to kernel space for specifying 12 * per-device PM QoS dependencies. It provides infrastructure for registration 13 * of: 14 * 15 * Dependents on a QoS value : register requests 16 * Watchers of QoS value : get notified when target QoS value changes 17 * 18 * This QoS design is best effort based. Dependents register their QoS needs. 19 * Watchers register to keep track of the current QoS needs of the system. 20 * Watchers can register different types of notification callbacks: 21 * . a per-device notification callback using the dev_pm_qos_*_notifier API. 22 * The notification chain data is stored in the per-device constraint 23 * data struct. 24 * . a system-wide notification callback using the dev_pm_qos_*_global_notifier 25 * API. The notification chain data is stored in a static variable. 26 * 27 * Note about the per-device constraint data struct allocation: 28 * . The per-device constraints data struct ptr is tored into the device 29 * dev_pm_info. 30 * . To minimize the data usage by the per-device constraints, the data struct 31 * is only allocated at the first call to dev_pm_qos_add_request. 32 * . The data is later free'd when the device is removed from the system. 33 * . A global mutex protects the constraints users from the data being 34 * allocated and free'd. 35 */ 36 37 #include <linux/pm_qos.h> 38 #include <linux/spinlock.h> 39 #include <linux/slab.h> 40 #include <linux/device.h> 41 #include <linux/mutex.h> 42 #include <linux/export.h> 43 #include <linux/pm_runtime.h> 44 #include <linux/err.h> 45 46 #include "power.h" 47 48 static DEFINE_MUTEX(dev_pm_qos_mtx); 49 50 static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); 51 52 /** 53 * __dev_pm_qos_flags - Check PM QoS flags for a given device. 54 * @dev: Device to check the PM QoS flags for. 55 * @mask: Flags to check against. 56 * 57 * This routine must be called with dev->power.lock held. 58 */ 59 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) 60 { 61 struct dev_pm_qos *qos = dev->power.qos; 62 struct pm_qos_flags *pqf; 63 s32 val; 64 65 if (IS_ERR_OR_NULL(qos)) 66 return PM_QOS_FLAGS_UNDEFINED; 67 68 pqf = &qos->flags; 69 if (list_empty(&pqf->list)) 70 return PM_QOS_FLAGS_UNDEFINED; 71 72 val = pqf->effective_flags & mask; 73 if (val) 74 return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME; 75 76 return PM_QOS_FLAGS_NONE; 77 } 78 79 /** 80 * dev_pm_qos_flags - Check PM QoS flags for a given device (locked). 81 * @dev: Device to check the PM QoS flags for. 82 * @mask: Flags to check against. 83 */ 84 enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask) 85 { 86 unsigned long irqflags; 87 enum pm_qos_flags_status ret; 88 89 spin_lock_irqsave(&dev->power.lock, irqflags); 90 ret = __dev_pm_qos_flags(dev, mask); 91 spin_unlock_irqrestore(&dev->power.lock, irqflags); 92 93 return ret; 94 } 95 EXPORT_SYMBOL_GPL(dev_pm_qos_flags); 96 97 /** 98 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device. 99 * @dev: Device to get the PM QoS constraint value for. 100 * 101 * This routine must be called with dev->power.lock held. 102 */ 103 s32 __dev_pm_qos_read_value(struct device *dev) 104 { 105 return IS_ERR_OR_NULL(dev->power.qos) ? 106 0 : pm_qos_read_value(&dev->power.qos->latency); 107 } 108 109 /** 110 * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked). 111 * @dev: Device to get the PM QoS constraint value for. 112 */ 113 s32 dev_pm_qos_read_value(struct device *dev) 114 { 115 unsigned long flags; 116 s32 ret; 117 118 spin_lock_irqsave(&dev->power.lock, flags); 119 ret = __dev_pm_qos_read_value(dev); 120 spin_unlock_irqrestore(&dev->power.lock, flags); 121 122 return ret; 123 } 124 125 /** 126 * apply_constraint - Add/modify/remove device PM QoS request. 127 * @req: Constraint request to apply 128 * @action: Action to perform (add/update/remove). 129 * @value: Value to assign to the QoS request. 130 * 131 * Internal function to update the constraints list using the PM QoS core 132 * code and if needed call the per-device and the global notification 133 * callbacks 134 */ 135 static int apply_constraint(struct dev_pm_qos_request *req, 136 enum pm_qos_req_action action, s32 value) 137 { 138 struct dev_pm_qos *qos = req->dev->power.qos; 139 int ret; 140 141 switch(req->type) { 142 case DEV_PM_QOS_LATENCY: 143 ret = pm_qos_update_target(&qos->latency, &req->data.pnode, 144 action, value); 145 if (ret) { 146 value = pm_qos_read_value(&qos->latency); 147 blocking_notifier_call_chain(&dev_pm_notifiers, 148 (unsigned long)value, 149 req); 150 } 151 break; 152 case DEV_PM_QOS_FLAGS: 153 ret = pm_qos_update_flags(&qos->flags, &req->data.flr, 154 action, value); 155 break; 156 default: 157 ret = -EINVAL; 158 } 159 160 return ret; 161 } 162 163 /* 164 * dev_pm_qos_constraints_allocate 165 * @dev: device to allocate data for 166 * 167 * Called at the first call to add_request, for constraint data allocation 168 * Must be called with the dev_pm_qos_mtx mutex held 169 */ 170 static int dev_pm_qos_constraints_allocate(struct device *dev) 171 { 172 struct dev_pm_qos *qos; 173 struct pm_qos_constraints *c; 174 struct blocking_notifier_head *n; 175 176 qos = kzalloc(sizeof(*qos), GFP_KERNEL); 177 if (!qos) 178 return -ENOMEM; 179 180 n = kzalloc(sizeof(*n), GFP_KERNEL); 181 if (!n) { 182 kfree(qos); 183 return -ENOMEM; 184 } 185 BLOCKING_INIT_NOTIFIER_HEAD(n); 186 187 c = &qos->latency; 188 plist_head_init(&c->list); 189 c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; 190 c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; 191 c->type = PM_QOS_MIN; 192 c->notifiers = n; 193 194 INIT_LIST_HEAD(&qos->flags.list); 195 196 spin_lock_irq(&dev->power.lock); 197 dev->power.qos = qos; 198 spin_unlock_irq(&dev->power.lock); 199 200 return 0; 201 } 202 203 static void __dev_pm_qos_hide_latency_limit(struct device *dev); 204 static void __dev_pm_qos_hide_flags(struct device *dev); 205 206 /** 207 * dev_pm_qos_constraints_destroy 208 * @dev: target device 209 * 210 * Called from the device PM subsystem on device removal under device_pm_lock(). 211 */ 212 void dev_pm_qos_constraints_destroy(struct device *dev) 213 { 214 struct dev_pm_qos *qos; 215 struct dev_pm_qos_request *req, *tmp; 216 struct pm_qos_constraints *c; 217 struct pm_qos_flags *f; 218 219 mutex_lock(&dev_pm_qos_mtx); 220 221 /* 222 * If the device's PM QoS resume latency limit or PM QoS flags have been 223 * exposed to user space, they have to be hidden at this point. 224 */ 225 __dev_pm_qos_hide_latency_limit(dev); 226 __dev_pm_qos_hide_flags(dev); 227 228 qos = dev->power.qos; 229 if (!qos) 230 goto out; 231 232 /* Flush the constraints lists for the device. */ 233 c = &qos->latency; 234 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { 235 /* 236 * Update constraints list and call the notification 237 * callbacks if needed 238 */ 239 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); 240 memset(req, 0, sizeof(*req)); 241 } 242 f = &qos->flags; 243 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { 244 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); 245 memset(req, 0, sizeof(*req)); 246 } 247 248 spin_lock_irq(&dev->power.lock); 249 dev->power.qos = ERR_PTR(-ENODEV); 250 spin_unlock_irq(&dev->power.lock); 251 252 kfree(c->notifiers); 253 kfree(qos); 254 255 out: 256 mutex_unlock(&dev_pm_qos_mtx); 257 } 258 259 /** 260 * dev_pm_qos_add_request - inserts new qos request into the list 261 * @dev: target device for the constraint 262 * @req: pointer to a preallocated handle 263 * @type: type of the request 264 * @value: defines the qos request 265 * 266 * This function inserts a new entry in the device constraints list of 267 * requested qos performance characteristics. It recomputes the aggregate 268 * QoS expectations of parameters and initializes the dev_pm_qos_request 269 * handle. Caller needs to save this handle for later use in updates and 270 * removal. 271 * 272 * Returns 1 if the aggregated constraint value has changed, 273 * 0 if the aggregated constraint value has not changed, 274 * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory 275 * to allocate for data structures, -ENODEV if the device has just been removed 276 * from the system. 277 * 278 * Callers should ensure that the target device is not RPM_SUSPENDED before 279 * using this function for requests of type DEV_PM_QOS_FLAGS. 280 */ 281 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, 282 enum dev_pm_qos_req_type type, s32 value) 283 { 284 int ret = 0; 285 286 if (!dev || !req) /*guard against callers passing in null */ 287 return -EINVAL; 288 289 if (WARN(dev_pm_qos_request_active(req), 290 "%s() called for already added request\n", __func__)) 291 return -EINVAL; 292 293 mutex_lock(&dev_pm_qos_mtx); 294 295 if (IS_ERR(dev->power.qos)) 296 ret = -ENODEV; 297 else if (!dev->power.qos) 298 ret = dev_pm_qos_constraints_allocate(dev); 299 300 if (!ret) { 301 req->dev = dev; 302 req->type = type; 303 ret = apply_constraint(req, PM_QOS_ADD_REQ, value); 304 } 305 306 mutex_unlock(&dev_pm_qos_mtx); 307 308 return ret; 309 } 310 EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); 311 312 /** 313 * __dev_pm_qos_update_request - Modify an existing device PM QoS request. 314 * @req : PM QoS request to modify. 315 * @new_value: New value to request. 316 */ 317 static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, 318 s32 new_value) 319 { 320 s32 curr_value; 321 int ret = 0; 322 323 if (!req) /*guard against callers passing in null */ 324 return -EINVAL; 325 326 if (WARN(!dev_pm_qos_request_active(req), 327 "%s() called for unknown object\n", __func__)) 328 return -EINVAL; 329 330 if (IS_ERR_OR_NULL(req->dev->power.qos)) 331 return -ENODEV; 332 333 switch(req->type) { 334 case DEV_PM_QOS_LATENCY: 335 curr_value = req->data.pnode.prio; 336 break; 337 case DEV_PM_QOS_FLAGS: 338 curr_value = req->data.flr.flags; 339 break; 340 default: 341 return -EINVAL; 342 } 343 344 if (curr_value != new_value) 345 ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value); 346 347 return ret; 348 } 349 350 /** 351 * dev_pm_qos_update_request - modifies an existing qos request 352 * @req : handle to list element holding a dev_pm_qos request to use 353 * @new_value: defines the qos request 354 * 355 * Updates an existing dev PM qos request along with updating the 356 * target value. 357 * 358 * Attempts are made to make this code callable on hot code paths. 359 * 360 * Returns 1 if the aggregated constraint value has changed, 361 * 0 if the aggregated constraint value has not changed, 362 * -EINVAL in case of wrong parameters, -ENODEV if the device has been 363 * removed from the system 364 * 365 * Callers should ensure that the target device is not RPM_SUSPENDED before 366 * using this function for requests of type DEV_PM_QOS_FLAGS. 367 */ 368 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) 369 { 370 int ret; 371 372 mutex_lock(&dev_pm_qos_mtx); 373 ret = __dev_pm_qos_update_request(req, new_value); 374 mutex_unlock(&dev_pm_qos_mtx); 375 return ret; 376 } 377 EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); 378 379 static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req) 380 { 381 int ret; 382 383 if (!req) /*guard against callers passing in null */ 384 return -EINVAL; 385 386 if (WARN(!dev_pm_qos_request_active(req), 387 "%s() called for unknown object\n", __func__)) 388 return -EINVAL; 389 390 if (IS_ERR_OR_NULL(req->dev->power.qos)) 391 return -ENODEV; 392 393 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); 394 memset(req, 0, sizeof(*req)); 395 return ret; 396 } 397 398 /** 399 * dev_pm_qos_remove_request - modifies an existing qos request 400 * @req: handle to request list element 401 * 402 * Will remove pm qos request from the list of constraints and 403 * recompute the current target value. Call this on slow code paths. 404 * 405 * Returns 1 if the aggregated constraint value has changed, 406 * 0 if the aggregated constraint value has not changed, 407 * -EINVAL in case of wrong parameters, -ENODEV if the device has been 408 * removed from the system 409 * 410 * Callers should ensure that the target device is not RPM_SUSPENDED before 411 * using this function for requests of type DEV_PM_QOS_FLAGS. 412 */ 413 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) 414 { 415 int ret; 416 417 mutex_lock(&dev_pm_qos_mtx); 418 ret = __dev_pm_qos_remove_request(req); 419 mutex_unlock(&dev_pm_qos_mtx); 420 return ret; 421 } 422 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); 423 424 /** 425 * dev_pm_qos_add_notifier - sets notification entry for changes to target value 426 * of per-device PM QoS constraints 427 * 428 * @dev: target device for the constraint 429 * @notifier: notifier block managed by caller. 430 * 431 * Will register the notifier into a notification chain that gets called 432 * upon changes to the target value for the device. 433 * 434 * If the device's constraints object doesn't exist when this routine is called, 435 * it will be created (or error code will be returned if that fails). 436 */ 437 int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) 438 { 439 int ret = 0; 440 441 mutex_lock(&dev_pm_qos_mtx); 442 443 if (IS_ERR(dev->power.qos)) 444 ret = -ENODEV; 445 else if (!dev->power.qos) 446 ret = dev_pm_qos_constraints_allocate(dev); 447 448 if (!ret) 449 ret = blocking_notifier_chain_register( 450 dev->power.qos->latency.notifiers, notifier); 451 452 mutex_unlock(&dev_pm_qos_mtx); 453 return ret; 454 } 455 EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); 456 457 /** 458 * dev_pm_qos_remove_notifier - deletes notification for changes to target value 459 * of per-device PM QoS constraints 460 * 461 * @dev: target device for the constraint 462 * @notifier: notifier block to be removed. 463 * 464 * Will remove the notifier from the notification chain that gets called 465 * upon changes to the target value. 466 */ 467 int dev_pm_qos_remove_notifier(struct device *dev, 468 struct notifier_block *notifier) 469 { 470 int retval = 0; 471 472 mutex_lock(&dev_pm_qos_mtx); 473 474 /* Silently return if the constraints object is not present. */ 475 if (!IS_ERR_OR_NULL(dev->power.qos)) 476 retval = blocking_notifier_chain_unregister( 477 dev->power.qos->latency.notifiers, 478 notifier); 479 480 mutex_unlock(&dev_pm_qos_mtx); 481 return retval; 482 } 483 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); 484 485 /** 486 * dev_pm_qos_add_global_notifier - sets notification entry for changes to 487 * target value of the PM QoS constraints for any device 488 * 489 * @notifier: notifier block managed by caller. 490 * 491 * Will register the notifier into a notification chain that gets called 492 * upon changes to the target value for any device. 493 */ 494 int dev_pm_qos_add_global_notifier(struct notifier_block *notifier) 495 { 496 return blocking_notifier_chain_register(&dev_pm_notifiers, notifier); 497 } 498 EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier); 499 500 /** 501 * dev_pm_qos_remove_global_notifier - deletes notification for changes to 502 * target value of PM QoS constraints for any device 503 * 504 * @notifier: notifier block to be removed. 505 * 506 * Will remove the notifier from the notification chain that gets called 507 * upon changes to the target value for any device. 508 */ 509 int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier) 510 { 511 return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); 512 } 513 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); 514 515 /** 516 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. 517 * @dev: Device whose ancestor to add the request for. 518 * @req: Pointer to the preallocated handle. 519 * @value: Constraint latency value. 520 */ 521 int dev_pm_qos_add_ancestor_request(struct device *dev, 522 struct dev_pm_qos_request *req, s32 value) 523 { 524 struct device *ancestor = dev->parent; 525 int ret = -ENODEV; 526 527 while (ancestor && !ancestor->power.ignore_children) 528 ancestor = ancestor->parent; 529 530 if (ancestor) 531 ret = dev_pm_qos_add_request(ancestor, req, 532 DEV_PM_QOS_LATENCY, value); 533 534 if (ret < 0) 535 req->dev = NULL; 536 537 return ret; 538 } 539 EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); 540 541 #ifdef CONFIG_PM_RUNTIME 542 static void __dev_pm_qos_drop_user_request(struct device *dev, 543 enum dev_pm_qos_req_type type) 544 { 545 struct dev_pm_qos_request *req = NULL; 546 547 switch(type) { 548 case DEV_PM_QOS_LATENCY: 549 req = dev->power.qos->latency_req; 550 dev->power.qos->latency_req = NULL; 551 break; 552 case DEV_PM_QOS_FLAGS: 553 req = dev->power.qos->flags_req; 554 dev->power.qos->flags_req = NULL; 555 break; 556 } 557 __dev_pm_qos_remove_request(req); 558 kfree(req); 559 } 560 561 /** 562 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. 563 * @dev: Device whose PM QoS latency limit is to be exposed to user space. 564 * @value: Initial value of the latency limit. 565 */ 566 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) 567 { 568 struct dev_pm_qos_request *req; 569 int ret; 570 571 if (!device_is_registered(dev) || value < 0) 572 return -EINVAL; 573 574 req = kzalloc(sizeof(*req), GFP_KERNEL); 575 if (!req) 576 return -ENOMEM; 577 578 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); 579 if (ret < 0) { 580 kfree(req); 581 return ret; 582 } 583 584 mutex_lock(&dev_pm_qos_mtx); 585 586 if (IS_ERR_OR_NULL(dev->power.qos)) 587 ret = -ENODEV; 588 else if (dev->power.qos->latency_req) 589 ret = -EEXIST; 590 591 if (ret < 0) { 592 __dev_pm_qos_remove_request(req); 593 kfree(req); 594 goto out; 595 } 596 597 dev->power.qos->latency_req = req; 598 ret = pm_qos_sysfs_add_latency(dev); 599 if (ret) 600 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 601 602 out: 603 mutex_unlock(&dev_pm_qos_mtx); 604 return ret; 605 } 606 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); 607 608 static void __dev_pm_qos_hide_latency_limit(struct device *dev) 609 { 610 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) { 611 pm_qos_sysfs_remove_latency(dev); 612 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 613 } 614 } 615 616 /** 617 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. 618 * @dev: Device whose PM QoS latency limit is to be hidden from user space. 619 */ 620 void dev_pm_qos_hide_latency_limit(struct device *dev) 621 { 622 mutex_lock(&dev_pm_qos_mtx); 623 __dev_pm_qos_hide_latency_limit(dev); 624 mutex_unlock(&dev_pm_qos_mtx); 625 } 626 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); 627 628 /** 629 * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space. 630 * @dev: Device whose PM QoS flags are to be exposed to user space. 631 * @val: Initial values of the flags. 632 */ 633 int dev_pm_qos_expose_flags(struct device *dev, s32 val) 634 { 635 struct dev_pm_qos_request *req; 636 int ret; 637 638 if (!device_is_registered(dev)) 639 return -EINVAL; 640 641 req = kzalloc(sizeof(*req), GFP_KERNEL); 642 if (!req) 643 return -ENOMEM; 644 645 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); 646 if (ret < 0) { 647 kfree(req); 648 return ret; 649 } 650 651 pm_runtime_get_sync(dev); 652 mutex_lock(&dev_pm_qos_mtx); 653 654 if (IS_ERR_OR_NULL(dev->power.qos)) 655 ret = -ENODEV; 656 else if (dev->power.qos->flags_req) 657 ret = -EEXIST; 658 659 if (ret < 0) { 660 __dev_pm_qos_remove_request(req); 661 kfree(req); 662 goto out; 663 } 664 665 dev->power.qos->flags_req = req; 666 ret = pm_qos_sysfs_add_flags(dev); 667 if (ret) 668 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 669 670 out: 671 mutex_unlock(&dev_pm_qos_mtx); 672 pm_runtime_put(dev); 673 return ret; 674 } 675 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); 676 677 static void __dev_pm_qos_hide_flags(struct device *dev) 678 { 679 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) { 680 pm_qos_sysfs_remove_flags(dev); 681 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 682 } 683 } 684 685 /** 686 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. 687 * @dev: Device whose PM QoS flags are to be hidden from user space. 688 */ 689 void dev_pm_qos_hide_flags(struct device *dev) 690 { 691 pm_runtime_get_sync(dev); 692 mutex_lock(&dev_pm_qos_mtx); 693 __dev_pm_qos_hide_flags(dev); 694 mutex_unlock(&dev_pm_qos_mtx); 695 pm_runtime_put(dev); 696 } 697 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); 698 699 /** 700 * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space. 701 * @dev: Device to update the PM QoS flags request for. 702 * @mask: Flags to set/clear. 703 * @set: Whether to set or clear the flags (true means set). 704 */ 705 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) 706 { 707 s32 value; 708 int ret; 709 710 pm_runtime_get_sync(dev); 711 mutex_lock(&dev_pm_qos_mtx); 712 713 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) { 714 ret = -EINVAL; 715 goto out; 716 } 717 718 value = dev_pm_qos_requested_flags(dev); 719 if (set) 720 value |= mask; 721 else 722 value &= ~mask; 723 724 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); 725 726 out: 727 mutex_unlock(&dev_pm_qos_mtx); 728 pm_runtime_put(dev); 729 return ret; 730 } 731 #else /* !CONFIG_PM_RUNTIME */ 732 static void __dev_pm_qos_hide_latency_limit(struct device *dev) {} 733 static void __dev_pm_qos_hide_flags(struct device *dev) {} 734 #endif /* CONFIG_PM_RUNTIME */ 735