1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2019, Linaro Ltd 4 */ 5 #include <linux/clk-provider.h> 6 #include <linux/debugfs.h> 7 #include <linux/interrupt.h> 8 #include <linux/io.h> 9 #include <linux/mailbox_client.h> 10 #include <linux/module.h> 11 #include <linux/of_platform.h> 12 #include <linux/platform_device.h> 13 #include <linux/thermal.h> 14 #include <linux/slab.h> 15 #include <linux/string_choices.h> 16 #include <linux/soc/qcom/qcom_aoss.h> 17 18 #define CREATE_TRACE_POINTS 19 #include "trace-aoss.h" 20 21 #define QMP_DESC_MAGIC 0x0 22 #define QMP_DESC_VERSION 0x4 23 #define QMP_DESC_FEATURES 0x8 24 25 /* AOP-side offsets */ 26 #define QMP_DESC_UCORE_LINK_STATE 0xc 27 #define QMP_DESC_UCORE_LINK_STATE_ACK 0x10 28 #define QMP_DESC_UCORE_CH_STATE 0x14 29 #define QMP_DESC_UCORE_CH_STATE_ACK 0x18 30 #define QMP_DESC_UCORE_MBOX_SIZE 0x1c 31 #define QMP_DESC_UCORE_MBOX_OFFSET 0x20 32 33 /* Linux-side offsets */ 34 #define QMP_DESC_MCORE_LINK_STATE 0x24 35 #define QMP_DESC_MCORE_LINK_STATE_ACK 0x28 36 #define QMP_DESC_MCORE_CH_STATE 0x2c 37 #define QMP_DESC_MCORE_CH_STATE_ACK 0x30 38 #define QMP_DESC_MCORE_MBOX_SIZE 0x34 39 #define QMP_DESC_MCORE_MBOX_OFFSET 0x38 40 41 #define QMP_STATE_UP GENMASK(15, 0) 42 #define QMP_STATE_DOWN GENMASK(31, 16) 43 44 #define QMP_MAGIC 0x4d41494c /* mail */ 45 #define QMP_VERSION 1 46 47 /* 64 bytes is enough to store the requests and provides padding to 4 bytes */ 48 #define QMP_MSG_LEN 64 49 50 #define QMP_NUM_COOLING_RESOURCES 2 51 52 #define QMP_DEBUGFS_FILES 4 53 54 static bool qmp_cdev_max_state = 1; 55 56 struct qmp_cooling_device { 57 struct thermal_cooling_device *cdev; 58 struct qmp *qmp; 59 char *name; 60 bool state; 61 }; 62 63 /** 64 * struct qmp - driver state for QMP implementation 65 * @msgram: iomem referencing the message RAM used for communication 66 * @dev: reference to QMP device 67 * @mbox_client: mailbox client used to ring the doorbell on transmit 68 * @mbox_chan: mailbox channel used to ring the doorbell on transmit 69 * @offset: offset within @msgram where messages should be written 70 * @size: maximum size of the messages to be transmitted 71 * @event: wait_queue for synchronization with the IRQ 72 * @tx_lock: provides synchronization between multiple callers of qmp_send() 73 * @qdss_clk: QDSS clock hw struct 74 * @cooling_devs: thermal cooling devices 75 * @debugfs_root: directory for the developer/tester interface 76 * @debugfs_files: array of individual debugfs entries under debugfs_root 77 */ 78 struct qmp { 79 void __iomem *msgram; 80 struct device *dev; 81 82 struct mbox_client mbox_client; 83 struct mbox_chan *mbox_chan; 84 85 size_t offset; 86 size_t size; 87 88 wait_queue_head_t event; 89 90 struct mutex tx_lock; 91 92 struct clk_hw qdss_clk; 93 struct qmp_cooling_device *cooling_devs; 94 struct dentry *debugfs_root; 95 struct dentry *debugfs_files[QMP_DEBUGFS_FILES]; 96 }; 97 98 static void qmp_kick(struct qmp *qmp) 99 { 100 mbox_send_message(qmp->mbox_chan, NULL); 101 mbox_client_txdone(qmp->mbox_chan, 0); 102 } 103 104 static bool qmp_magic_valid(struct qmp *qmp) 105 { 106 return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC; 107 } 108 109 static bool qmp_link_acked(struct qmp *qmp) 110 { 111 return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP; 112 } 113 114 static bool qmp_mcore_channel_acked(struct qmp *qmp) 115 { 116 return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP; 117 } 118 119 static bool qmp_ucore_channel_up(struct qmp *qmp) 120 { 121 return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP; 122 } 123 124 static int qmp_open(struct qmp *qmp) 125 { 126 int ret; 127 u32 val; 128 129 if (!qmp_magic_valid(qmp)) { 130 dev_err(qmp->dev, "QMP magic doesn't match\n"); 131 return -EINVAL; 132 } 133 134 val = readl(qmp->msgram + QMP_DESC_VERSION); 135 if (val != QMP_VERSION) { 136 dev_err(qmp->dev, "unsupported QMP version %d\n", val); 137 return -EINVAL; 138 } 139 140 qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET); 141 qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE); 142 if (!qmp->size) { 143 dev_err(qmp->dev, "invalid mailbox size\n"); 144 return -EINVAL; 145 } 146 147 /* Ack remote core's link state */ 148 val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE); 149 writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK); 150 151 /* Set local core's link state to up */ 152 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 153 154 qmp_kick(qmp); 155 156 ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ); 157 if (!ret) { 158 dev_err(qmp->dev, "ucore didn't ack link\n"); 159 goto timeout_close_link; 160 } 161 162 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 163 164 qmp_kick(qmp); 165 166 ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ); 167 if (!ret) { 168 dev_err(qmp->dev, "ucore didn't open channel\n"); 169 goto timeout_close_channel; 170 } 171 172 /* Ack remote core's channel state */ 173 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK); 174 175 qmp_kick(qmp); 176 177 ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ); 178 if (!ret) { 179 dev_err(qmp->dev, "ucore didn't ack channel\n"); 180 goto timeout_close_channel; 181 } 182 183 return 0; 184 185 timeout_close_channel: 186 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 187 188 timeout_close_link: 189 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 190 qmp_kick(qmp); 191 192 return -ETIMEDOUT; 193 } 194 195 static void qmp_close(struct qmp *qmp) 196 { 197 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 198 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 199 qmp_kick(qmp); 200 } 201 202 static irqreturn_t qmp_intr(int irq, void *data) 203 { 204 struct qmp *qmp = data; 205 206 wake_up_all(&qmp->event); 207 208 return IRQ_HANDLED; 209 } 210 211 static bool qmp_message_empty(struct qmp *qmp) 212 { 213 return readl(qmp->msgram + qmp->offset) == 0; 214 } 215 216 /** 217 * qmp_send() - send a message to the AOSS 218 * @qmp: qmp context 219 * @fmt: format string for message to be sent 220 * @...: arguments for the format string 221 * 222 * Transmit message to AOSS and wait for the AOSS to acknowledge the message. 223 * data must not be longer than the mailbox size. Access is synchronized by 224 * this implementation. 225 * 226 * Return: 0 on success, negative errno on failure 227 */ 228 int __printf(2, 3) qmp_send(struct qmp *qmp, const char *fmt, ...) 229 { 230 char buf[QMP_MSG_LEN]; 231 long time_left; 232 va_list args; 233 int len; 234 int ret; 235 236 if (WARN_ON(IS_ERR_OR_NULL(qmp) || !fmt)) 237 return -EINVAL; 238 239 memset(buf, 0, sizeof(buf)); 240 va_start(args, fmt); 241 len = vsnprintf(buf, sizeof(buf), fmt, args); 242 va_end(args); 243 244 if (WARN_ON(len >= sizeof(buf))) 245 return -EINVAL; 246 247 mutex_lock(&qmp->tx_lock); 248 249 trace_aoss_send(buf); 250 251 /* The message RAM only implements 32-bit accesses */ 252 __iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32), 253 buf, sizeof(buf) / sizeof(u32)); 254 writel(sizeof(buf), qmp->msgram + qmp->offset); 255 256 /* Read back length to confirm data written in message RAM */ 257 readl(qmp->msgram + qmp->offset); 258 qmp_kick(qmp); 259 260 time_left = wait_event_interruptible_timeout(qmp->event, 261 qmp_message_empty(qmp), HZ); 262 if (!time_left) { 263 dev_err(qmp->dev, "ucore did not ack channel\n"); 264 ret = -ETIMEDOUT; 265 266 /* Clear message from buffer */ 267 writel(0, qmp->msgram + qmp->offset); 268 } else { 269 ret = 0; 270 } 271 272 trace_aoss_send_done(buf, ret); 273 274 mutex_unlock(&qmp->tx_lock); 275 276 return ret; 277 } 278 EXPORT_SYMBOL_GPL(qmp_send); 279 280 static int qmp_qdss_clk_prepare(struct clk_hw *hw) 281 { 282 static const char *buf = "{class: clock, res: qdss, val: 1}"; 283 struct qmp *qmp = container_of(hw, struct qmp, qdss_clk); 284 285 return qmp_send(qmp, buf); 286 } 287 288 static void qmp_qdss_clk_unprepare(struct clk_hw *hw) 289 { 290 static const char *buf = "{class: clock, res: qdss, val: 0}"; 291 struct qmp *qmp = container_of(hw, struct qmp, qdss_clk); 292 293 qmp_send(qmp, buf); 294 } 295 296 static const struct clk_ops qmp_qdss_clk_ops = { 297 .prepare = qmp_qdss_clk_prepare, 298 .unprepare = qmp_qdss_clk_unprepare, 299 }; 300 301 static int qmp_qdss_clk_add(struct qmp *qmp) 302 { 303 static const struct clk_init_data qdss_init = { 304 .ops = &qmp_qdss_clk_ops, 305 .name = "qdss", 306 }; 307 int ret; 308 309 qmp->qdss_clk.init = &qdss_init; 310 ret = clk_hw_register(qmp->dev, &qmp->qdss_clk); 311 if (ret < 0) { 312 dev_err(qmp->dev, "failed to register qdss clock\n"); 313 return ret; 314 } 315 316 ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get, 317 &qmp->qdss_clk); 318 if (ret < 0) { 319 dev_err(qmp->dev, "unable to register of clk hw provider\n"); 320 clk_hw_unregister(&qmp->qdss_clk); 321 } 322 323 return ret; 324 } 325 326 static void qmp_qdss_clk_remove(struct qmp *qmp) 327 { 328 of_clk_del_provider(qmp->dev->of_node); 329 clk_hw_unregister(&qmp->qdss_clk); 330 } 331 332 static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev, 333 unsigned long *state) 334 { 335 *state = qmp_cdev_max_state; 336 return 0; 337 } 338 339 static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev, 340 unsigned long *state) 341 { 342 struct qmp_cooling_device *qmp_cdev = cdev->devdata; 343 344 *state = qmp_cdev->state; 345 return 0; 346 } 347 348 static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev, 349 unsigned long state) 350 { 351 struct qmp_cooling_device *qmp_cdev = cdev->devdata; 352 bool cdev_state; 353 int ret; 354 355 /* Normalize state */ 356 cdev_state = !!state; 357 358 if (qmp_cdev->state == state) 359 return 0; 360 361 ret = qmp_send(qmp_cdev->qmp, "{class: volt_flr, event:zero_temp, res:%s, value:%s}", 362 qmp_cdev->name, str_on_off(cdev_state)); 363 if (!ret) 364 qmp_cdev->state = cdev_state; 365 366 return ret; 367 } 368 369 static const struct thermal_cooling_device_ops qmp_cooling_device_ops = { 370 .get_max_state = qmp_cdev_get_max_state, 371 .get_cur_state = qmp_cdev_get_cur_state, 372 .set_cur_state = qmp_cdev_set_cur_state, 373 }; 374 375 static int qmp_cooling_device_add(struct qmp *qmp, 376 struct qmp_cooling_device *qmp_cdev, 377 struct device_node *node) 378 { 379 char *cdev_name = (char *)node->name; 380 381 qmp_cdev->qmp = qmp; 382 qmp_cdev->state = !qmp_cdev_max_state; 383 qmp_cdev->name = cdev_name; 384 qmp_cdev->cdev = devm_thermal_of_cooling_device_register 385 (qmp->dev, node, 386 cdev_name, 387 qmp_cdev, &qmp_cooling_device_ops); 388 389 if (IS_ERR(qmp_cdev->cdev)) 390 dev_err(qmp->dev, "unable to register %s cooling device\n", 391 cdev_name); 392 393 return PTR_ERR_OR_ZERO(qmp_cdev->cdev); 394 } 395 396 static int qmp_cooling_devices_register(struct qmp *qmp) 397 { 398 struct device_node *np; 399 int count = 0; 400 int ret; 401 402 np = qmp->dev->of_node; 403 404 qmp->cooling_devs = devm_kcalloc(qmp->dev, QMP_NUM_COOLING_RESOURCES, 405 sizeof(*qmp->cooling_devs), 406 GFP_KERNEL); 407 408 if (!qmp->cooling_devs) 409 return -ENOMEM; 410 411 for_each_available_child_of_node_scoped(np, child) { 412 if (!of_property_present(child, "#cooling-cells")) 413 continue; 414 ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++], 415 child); 416 if (ret) 417 goto unroll; 418 } 419 420 if (!count) 421 devm_kfree(qmp->dev, qmp->cooling_devs); 422 423 return 0; 424 425 unroll: 426 while (--count >= 0) 427 thermal_cooling_device_unregister 428 (qmp->cooling_devs[count].cdev); 429 devm_kfree(qmp->dev, qmp->cooling_devs); 430 431 return ret; 432 } 433 434 static void qmp_cooling_devices_remove(struct qmp *qmp) 435 { 436 int i; 437 438 for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++) 439 thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev); 440 } 441 442 /** 443 * qmp_get() - get a qmp handle from a device 444 * @dev: client device pointer 445 * 446 * Return: handle to qmp device on success, ERR_PTR() on failure 447 */ 448 struct qmp *qmp_get(struct device *dev) 449 { 450 struct platform_device *pdev; 451 struct device_node *np; 452 struct qmp *qmp; 453 454 if (!dev || !dev->of_node) 455 return ERR_PTR(-EINVAL); 456 457 np = of_parse_phandle(dev->of_node, "qcom,qmp", 0); 458 if (!np) 459 return ERR_PTR(-ENODEV); 460 461 pdev = of_find_device_by_node(np); 462 of_node_put(np); 463 if (!pdev) 464 return ERR_PTR(-EINVAL); 465 466 qmp = platform_get_drvdata(pdev); 467 468 if (!qmp) { 469 put_device(&pdev->dev); 470 return ERR_PTR(-EPROBE_DEFER); 471 } 472 return qmp; 473 } 474 EXPORT_SYMBOL_GPL(qmp_get); 475 476 /** 477 * qmp_put() - release a qmp handle 478 * @qmp: qmp handle obtained from qmp_get() 479 */ 480 void qmp_put(struct qmp *qmp) 481 { 482 /* 483 * Match get_device() inside of_find_device_by_node() in 484 * qmp_get() 485 */ 486 if (!IS_ERR_OR_NULL(qmp)) 487 put_device(qmp->dev); 488 } 489 EXPORT_SYMBOL_GPL(qmp_put); 490 491 struct qmp_debugfs_entry { 492 const char *name; 493 const char *fmt; 494 bool is_bool; 495 const char *true_val; 496 const char *false_val; 497 }; 498 499 static const struct qmp_debugfs_entry qmp_debugfs_entries[QMP_DEBUGFS_FILES] = { 500 { "ddr_frequency_mhz", "{class: ddr, res: fixed, val: %u}", false }, 501 { "prevent_aoss_sleep", "{class: aoss_slp, res: sleep: %s}", true, "enable", "disable" }, 502 { "prevent_cx_collapse", "{class: cx_mol, res: cx, val: %s}", true, "mol", "off" }, 503 { "prevent_ddr_collapse", "{class: ddr_mol, res: ddr, val: %s}", true, "mol", "off" }, 504 }; 505 506 static ssize_t qmp_debugfs_write(struct file *file, const char __user *user_buf, 507 size_t count, loff_t *pos) 508 { 509 const struct qmp_debugfs_entry *entry = NULL; 510 struct qmp *qmp = file->private_data; 511 char buf[QMP_MSG_LEN]; 512 unsigned int uint_val; 513 const char *str_val; 514 bool bool_val; 515 int ret; 516 int i; 517 518 for (i = 0; i < ARRAY_SIZE(qmp->debugfs_files); i++) { 519 if (qmp->debugfs_files[i] == file->f_path.dentry) { 520 entry = &qmp_debugfs_entries[i]; 521 break; 522 } 523 } 524 if (WARN_ON(!entry)) 525 return -EFAULT; 526 527 if (entry->is_bool) { 528 ret = kstrtobool_from_user(user_buf, count, &bool_val); 529 if (ret) 530 return ret; 531 532 str_val = bool_val ? entry->true_val : entry->false_val; 533 534 ret = snprintf(buf, sizeof(buf), entry->fmt, str_val); 535 if (ret >= sizeof(buf)) 536 return -EINVAL; 537 } else { 538 ret = kstrtou32_from_user(user_buf, count, 0, &uint_val); 539 if (ret) 540 return ret; 541 542 ret = snprintf(buf, sizeof(buf), entry->fmt, uint_val); 543 if (ret >= sizeof(buf)) 544 return -EINVAL; 545 } 546 547 ret = qmp_send(qmp, buf); 548 if (ret < 0) 549 return ret; 550 551 return count; 552 } 553 554 static const struct file_operations qmp_debugfs_fops = { 555 .open = simple_open, 556 .write = qmp_debugfs_write, 557 }; 558 559 static void qmp_debugfs_create(struct qmp *qmp) 560 { 561 const struct qmp_debugfs_entry *entry; 562 int i; 563 564 qmp->debugfs_root = debugfs_create_dir("qcom_aoss", NULL); 565 566 for (i = 0; i < ARRAY_SIZE(qmp->debugfs_files); i++) { 567 entry = &qmp_debugfs_entries[i]; 568 569 qmp->debugfs_files[i] = debugfs_create_file(entry->name, 0200, 570 qmp->debugfs_root, 571 qmp, 572 &qmp_debugfs_fops); 573 } 574 } 575 576 static int qmp_probe(struct platform_device *pdev) 577 { 578 struct qmp *qmp; 579 int irq; 580 int ret; 581 582 qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL); 583 if (!qmp) 584 return -ENOMEM; 585 586 qmp->dev = &pdev->dev; 587 init_waitqueue_head(&qmp->event); 588 mutex_init(&qmp->tx_lock); 589 590 qmp->msgram = devm_platform_ioremap_resource(pdev, 0); 591 if (IS_ERR(qmp->msgram)) 592 return PTR_ERR(qmp->msgram); 593 594 qmp->mbox_client.dev = &pdev->dev; 595 qmp->mbox_client.knows_txdone = true; 596 qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0); 597 if (IS_ERR(qmp->mbox_chan)) { 598 dev_err(&pdev->dev, "failed to acquire ipc mailbox\n"); 599 return PTR_ERR(qmp->mbox_chan); 600 } 601 602 irq = platform_get_irq(pdev, 0); 603 ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0, 604 "aoss-qmp", qmp); 605 if (ret < 0) { 606 dev_err(&pdev->dev, "failed to request interrupt\n"); 607 goto err_free_mbox; 608 } 609 610 ret = qmp_open(qmp); 611 if (ret < 0) 612 goto err_free_mbox; 613 614 ret = qmp_qdss_clk_add(qmp); 615 if (ret) 616 goto err_close_qmp; 617 618 ret = qmp_cooling_devices_register(qmp); 619 if (ret) 620 dev_err(&pdev->dev, "failed to register aoss cooling devices\n"); 621 622 platform_set_drvdata(pdev, qmp); 623 624 qmp_debugfs_create(qmp); 625 626 return 0; 627 628 err_close_qmp: 629 qmp_close(qmp); 630 err_free_mbox: 631 mbox_free_channel(qmp->mbox_chan); 632 633 return ret; 634 } 635 636 static void qmp_remove(struct platform_device *pdev) 637 { 638 struct qmp *qmp = platform_get_drvdata(pdev); 639 640 debugfs_remove_recursive(qmp->debugfs_root); 641 642 qmp_qdss_clk_remove(qmp); 643 qmp_cooling_devices_remove(qmp); 644 645 qmp_close(qmp); 646 mbox_free_channel(qmp->mbox_chan); 647 } 648 649 static const struct of_device_id qmp_dt_match[] = { 650 { .compatible = "qcom,sc7180-aoss-qmp", }, 651 { .compatible = "qcom,sc7280-aoss-qmp", }, 652 { .compatible = "qcom,sdm845-aoss-qmp", }, 653 { .compatible = "qcom,sm8150-aoss-qmp", }, 654 { .compatible = "qcom,sm8250-aoss-qmp", }, 655 { .compatible = "qcom,sm8350-aoss-qmp", }, 656 { .compatible = "qcom,aoss-qmp", }, 657 {} 658 }; 659 MODULE_DEVICE_TABLE(of, qmp_dt_match); 660 661 static struct platform_driver qmp_driver = { 662 .driver = { 663 .name = "qcom_aoss_qmp", 664 .of_match_table = qmp_dt_match, 665 .suppress_bind_attrs = true, 666 }, 667 .probe = qmp_probe, 668 .remove = qmp_remove, 669 }; 670 module_platform_driver(qmp_driver); 671 672 MODULE_DESCRIPTION("Qualcomm AOSS QMP driver"); 673 MODULE_LICENSE("GPL v2"); 674