1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2019, Linaro Ltd 4 */ 5 #include <linux/clk-provider.h> 6 #include <linux/debugfs.h> 7 #include <linux/interrupt.h> 8 #include <linux/io.h> 9 #include <linux/mailbox_client.h> 10 #include <linux/module.h> 11 #include <linux/of_platform.h> 12 #include <linux/platform_device.h> 13 #include <linux/thermal.h> 14 #include <linux/slab.h> 15 #include <linux/soc/qcom/qcom_aoss.h> 16 17 #define CREATE_TRACE_POINTS 18 #include "trace-aoss.h" 19 20 #define QMP_DESC_MAGIC 0x0 21 #define QMP_DESC_VERSION 0x4 22 #define QMP_DESC_FEATURES 0x8 23 24 /* AOP-side offsets */ 25 #define QMP_DESC_UCORE_LINK_STATE 0xc 26 #define QMP_DESC_UCORE_LINK_STATE_ACK 0x10 27 #define QMP_DESC_UCORE_CH_STATE 0x14 28 #define QMP_DESC_UCORE_CH_STATE_ACK 0x18 29 #define QMP_DESC_UCORE_MBOX_SIZE 0x1c 30 #define QMP_DESC_UCORE_MBOX_OFFSET 0x20 31 32 /* Linux-side offsets */ 33 #define QMP_DESC_MCORE_LINK_STATE 0x24 34 #define QMP_DESC_MCORE_LINK_STATE_ACK 0x28 35 #define QMP_DESC_MCORE_CH_STATE 0x2c 36 #define QMP_DESC_MCORE_CH_STATE_ACK 0x30 37 #define QMP_DESC_MCORE_MBOX_SIZE 0x34 38 #define QMP_DESC_MCORE_MBOX_OFFSET 0x38 39 40 #define QMP_STATE_UP GENMASK(15, 0) 41 #define QMP_STATE_DOWN GENMASK(31, 16) 42 43 #define QMP_MAGIC 0x4d41494c /* mail */ 44 #define QMP_VERSION 1 45 46 /* 64 bytes is enough to store the requests and provides padding to 4 bytes */ 47 #define QMP_MSG_LEN 64 48 49 #define QMP_NUM_COOLING_RESOURCES 2 50 51 #define QMP_DEBUGFS_FILES 4 52 53 static bool qmp_cdev_max_state = 1; 54 55 struct qmp_cooling_device { 56 struct thermal_cooling_device *cdev; 57 struct qmp *qmp; 58 char *name; 59 bool state; 60 }; 61 62 /** 63 * struct qmp - driver state for QMP implementation 64 * @msgram: iomem referencing the message RAM used for communication 65 * @dev: reference to QMP device 66 * @mbox_client: mailbox client used to ring the doorbell on transmit 67 * @mbox_chan: mailbox channel used to ring the doorbell on transmit 68 * @offset: offset within @msgram where messages should be written 69 * @size: maximum size of the messages to be transmitted 70 * @event: wait_queue for synchronization with the IRQ 71 * @tx_lock: provides synchronization between multiple callers of qmp_send() 72 * @qdss_clk: QDSS clock hw struct 73 * @cooling_devs: thermal cooling devices 74 * @debugfs_root: directory for the developer/tester interface 75 * @debugfs_files: array of individual debugfs entries under debugfs_root 76 */ 77 struct qmp { 78 void __iomem *msgram; 79 struct device *dev; 80 81 struct mbox_client mbox_client; 82 struct mbox_chan *mbox_chan; 83 84 size_t offset; 85 size_t size; 86 87 wait_queue_head_t event; 88 89 struct mutex tx_lock; 90 91 struct clk_hw qdss_clk; 92 struct qmp_cooling_device *cooling_devs; 93 struct dentry *debugfs_root; 94 struct dentry *debugfs_files[QMP_DEBUGFS_FILES]; 95 }; 96 97 static void qmp_kick(struct qmp *qmp) 98 { 99 mbox_send_message(qmp->mbox_chan, NULL); 100 mbox_client_txdone(qmp->mbox_chan, 0); 101 } 102 103 static bool qmp_magic_valid(struct qmp *qmp) 104 { 105 return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC; 106 } 107 108 static bool qmp_link_acked(struct qmp *qmp) 109 { 110 return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP; 111 } 112 113 static bool qmp_mcore_channel_acked(struct qmp *qmp) 114 { 115 return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP; 116 } 117 118 static bool qmp_ucore_channel_up(struct qmp *qmp) 119 { 120 return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP; 121 } 122 123 static int qmp_open(struct qmp *qmp) 124 { 125 int ret; 126 u32 val; 127 128 if (!qmp_magic_valid(qmp)) { 129 dev_err(qmp->dev, "QMP magic doesn't match\n"); 130 return -EINVAL; 131 } 132 133 val = readl(qmp->msgram + QMP_DESC_VERSION); 134 if (val != QMP_VERSION) { 135 dev_err(qmp->dev, "unsupported QMP version %d\n", val); 136 return -EINVAL; 137 } 138 139 qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET); 140 qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE); 141 if (!qmp->size) { 142 dev_err(qmp->dev, "invalid mailbox size\n"); 143 return -EINVAL; 144 } 145 146 /* Ack remote core's link state */ 147 val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE); 148 writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK); 149 150 /* Set local core's link state to up */ 151 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 152 153 qmp_kick(qmp); 154 155 ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ); 156 if (!ret) { 157 dev_err(qmp->dev, "ucore didn't ack link\n"); 158 goto timeout_close_link; 159 } 160 161 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 162 163 qmp_kick(qmp); 164 165 ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ); 166 if (!ret) { 167 dev_err(qmp->dev, "ucore didn't open channel\n"); 168 goto timeout_close_channel; 169 } 170 171 /* Ack remote core's channel state */ 172 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK); 173 174 qmp_kick(qmp); 175 176 ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ); 177 if (!ret) { 178 dev_err(qmp->dev, "ucore didn't ack channel\n"); 179 goto timeout_close_channel; 180 } 181 182 return 0; 183 184 timeout_close_channel: 185 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 186 187 timeout_close_link: 188 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 189 qmp_kick(qmp); 190 191 return -ETIMEDOUT; 192 } 193 194 static void qmp_close(struct qmp *qmp) 195 { 196 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 197 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 198 qmp_kick(qmp); 199 } 200 201 static irqreturn_t qmp_intr(int irq, void *data) 202 { 203 struct qmp *qmp = data; 204 205 wake_up_all(&qmp->event); 206 207 return IRQ_HANDLED; 208 } 209 210 static bool qmp_message_empty(struct qmp *qmp) 211 { 212 return readl(qmp->msgram + qmp->offset) == 0; 213 } 214 215 /** 216 * qmp_send() - send a message to the AOSS 217 * @qmp: qmp context 218 * @fmt: format string for message to be sent 219 * @...: arguments for the format string 220 * 221 * Transmit message to AOSS and wait for the AOSS to acknowledge the message. 222 * data must not be longer than the mailbox size. Access is synchronized by 223 * this implementation. 224 * 225 * Return: 0 on success, negative errno on failure 226 */ 227 int __printf(2, 3) qmp_send(struct qmp *qmp, const char *fmt, ...) 228 { 229 char buf[QMP_MSG_LEN]; 230 long time_left; 231 va_list args; 232 int len; 233 int ret; 234 235 if (WARN_ON(IS_ERR_OR_NULL(qmp) || !fmt)) 236 return -EINVAL; 237 238 memset(buf, 0, sizeof(buf)); 239 va_start(args, fmt); 240 len = vsnprintf(buf, sizeof(buf), fmt, args); 241 va_end(args); 242 243 if (WARN_ON(len >= sizeof(buf))) 244 return -EINVAL; 245 246 mutex_lock(&qmp->tx_lock); 247 248 trace_aoss_send(buf); 249 250 /* The message RAM only implements 32-bit accesses */ 251 __iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32), 252 buf, sizeof(buf) / sizeof(u32)); 253 writel(sizeof(buf), qmp->msgram + qmp->offset); 254 255 /* Read back length to confirm data written in message RAM */ 256 readl(qmp->msgram + qmp->offset); 257 qmp_kick(qmp); 258 259 time_left = wait_event_interruptible_timeout(qmp->event, 260 qmp_message_empty(qmp), HZ); 261 if (!time_left) { 262 dev_err(qmp->dev, "ucore did not ack channel\n"); 263 ret = -ETIMEDOUT; 264 265 /* Clear message from buffer */ 266 writel(0, qmp->msgram + qmp->offset); 267 } else { 268 ret = 0; 269 } 270 271 trace_aoss_send_done(buf, ret); 272 273 mutex_unlock(&qmp->tx_lock); 274 275 return ret; 276 } 277 EXPORT_SYMBOL_GPL(qmp_send); 278 279 static int qmp_qdss_clk_prepare(struct clk_hw *hw) 280 { 281 static const char *buf = "{class: clock, res: qdss, val: 1}"; 282 struct qmp *qmp = container_of(hw, struct qmp, qdss_clk); 283 284 return qmp_send(qmp, buf); 285 } 286 287 static void qmp_qdss_clk_unprepare(struct clk_hw *hw) 288 { 289 static const char *buf = "{class: clock, res: qdss, val: 0}"; 290 struct qmp *qmp = container_of(hw, struct qmp, qdss_clk); 291 292 qmp_send(qmp, buf); 293 } 294 295 static const struct clk_ops qmp_qdss_clk_ops = { 296 .prepare = qmp_qdss_clk_prepare, 297 .unprepare = qmp_qdss_clk_unprepare, 298 }; 299 300 static int qmp_qdss_clk_add(struct qmp *qmp) 301 { 302 static const struct clk_init_data qdss_init = { 303 .ops = &qmp_qdss_clk_ops, 304 .name = "qdss", 305 }; 306 int ret; 307 308 qmp->qdss_clk.init = &qdss_init; 309 ret = clk_hw_register(qmp->dev, &qmp->qdss_clk); 310 if (ret < 0) { 311 dev_err(qmp->dev, "failed to register qdss clock\n"); 312 return ret; 313 } 314 315 ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get, 316 &qmp->qdss_clk); 317 if (ret < 0) { 318 dev_err(qmp->dev, "unable to register of clk hw provider\n"); 319 clk_hw_unregister(&qmp->qdss_clk); 320 } 321 322 return ret; 323 } 324 325 static void qmp_qdss_clk_remove(struct qmp *qmp) 326 { 327 of_clk_del_provider(qmp->dev->of_node); 328 clk_hw_unregister(&qmp->qdss_clk); 329 } 330 331 static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev, 332 unsigned long *state) 333 { 334 *state = qmp_cdev_max_state; 335 return 0; 336 } 337 338 static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev, 339 unsigned long *state) 340 { 341 struct qmp_cooling_device *qmp_cdev = cdev->devdata; 342 343 *state = qmp_cdev->state; 344 return 0; 345 } 346 347 static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev, 348 unsigned long state) 349 { 350 struct qmp_cooling_device *qmp_cdev = cdev->devdata; 351 bool cdev_state; 352 int ret; 353 354 /* Normalize state */ 355 cdev_state = !!state; 356 357 if (qmp_cdev->state == state) 358 return 0; 359 360 ret = qmp_send(qmp_cdev->qmp, "{class: volt_flr, event:zero_temp, res:%s, value:%s}", 361 qmp_cdev->name, cdev_state ? "on" : "off"); 362 if (!ret) 363 qmp_cdev->state = cdev_state; 364 365 return ret; 366 } 367 368 static const struct thermal_cooling_device_ops qmp_cooling_device_ops = { 369 .get_max_state = qmp_cdev_get_max_state, 370 .get_cur_state = qmp_cdev_get_cur_state, 371 .set_cur_state = qmp_cdev_set_cur_state, 372 }; 373 374 static int qmp_cooling_device_add(struct qmp *qmp, 375 struct qmp_cooling_device *qmp_cdev, 376 struct device_node *node) 377 { 378 char *cdev_name = (char *)node->name; 379 380 qmp_cdev->qmp = qmp; 381 qmp_cdev->state = !qmp_cdev_max_state; 382 qmp_cdev->name = cdev_name; 383 qmp_cdev->cdev = devm_thermal_of_cooling_device_register 384 (qmp->dev, node, 385 cdev_name, 386 qmp_cdev, &qmp_cooling_device_ops); 387 388 if (IS_ERR(qmp_cdev->cdev)) 389 dev_err(qmp->dev, "unable to register %s cooling device\n", 390 cdev_name); 391 392 return PTR_ERR_OR_ZERO(qmp_cdev->cdev); 393 } 394 395 static int qmp_cooling_devices_register(struct qmp *qmp) 396 { 397 struct device_node *np; 398 int count = 0; 399 int ret; 400 401 np = qmp->dev->of_node; 402 403 qmp->cooling_devs = devm_kcalloc(qmp->dev, QMP_NUM_COOLING_RESOURCES, 404 sizeof(*qmp->cooling_devs), 405 GFP_KERNEL); 406 407 if (!qmp->cooling_devs) 408 return -ENOMEM; 409 410 for_each_available_child_of_node_scoped(np, child) { 411 if (!of_property_present(child, "#cooling-cells")) 412 continue; 413 ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++], 414 child); 415 if (ret) 416 goto unroll; 417 } 418 419 if (!count) 420 devm_kfree(qmp->dev, qmp->cooling_devs); 421 422 return 0; 423 424 unroll: 425 while (--count >= 0) 426 thermal_cooling_device_unregister 427 (qmp->cooling_devs[count].cdev); 428 devm_kfree(qmp->dev, qmp->cooling_devs); 429 430 return ret; 431 } 432 433 static void qmp_cooling_devices_remove(struct qmp *qmp) 434 { 435 int i; 436 437 for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++) 438 thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev); 439 } 440 441 /** 442 * qmp_get() - get a qmp handle from a device 443 * @dev: client device pointer 444 * 445 * Return: handle to qmp device on success, ERR_PTR() on failure 446 */ 447 struct qmp *qmp_get(struct device *dev) 448 { 449 struct platform_device *pdev; 450 struct device_node *np; 451 struct qmp *qmp; 452 453 if (!dev || !dev->of_node) 454 return ERR_PTR(-EINVAL); 455 456 np = of_parse_phandle(dev->of_node, "qcom,qmp", 0); 457 if (!np) 458 return ERR_PTR(-ENODEV); 459 460 pdev = of_find_device_by_node(np); 461 of_node_put(np); 462 if (!pdev) 463 return ERR_PTR(-EINVAL); 464 465 qmp = platform_get_drvdata(pdev); 466 467 if (!qmp) { 468 put_device(&pdev->dev); 469 return ERR_PTR(-EPROBE_DEFER); 470 } 471 return qmp; 472 } 473 EXPORT_SYMBOL_GPL(qmp_get); 474 475 /** 476 * qmp_put() - release a qmp handle 477 * @qmp: qmp handle obtained from qmp_get() 478 */ 479 void qmp_put(struct qmp *qmp) 480 { 481 /* 482 * Match get_device() inside of_find_device_by_node() in 483 * qmp_get() 484 */ 485 if (!IS_ERR_OR_NULL(qmp)) 486 put_device(qmp->dev); 487 } 488 EXPORT_SYMBOL_GPL(qmp_put); 489 490 struct qmp_debugfs_entry { 491 const char *name; 492 const char *fmt; 493 bool is_bool; 494 const char *true_val; 495 const char *false_val; 496 }; 497 498 static const struct qmp_debugfs_entry qmp_debugfs_entries[QMP_DEBUGFS_FILES] = { 499 { "ddr_frequency_mhz", "{class: ddr, res: fixed, val: %u}", false }, 500 { "prevent_aoss_sleep", "{class: aoss_slp, res: sleep: %s}", true, "enable", "disable" }, 501 { "prevent_cx_collapse", "{class: cx_mol, res: cx, val: %s}", true, "mol", "off" }, 502 { "prevent_ddr_collapse", "{class: ddr_mol, res: ddr, val: %s}", true, "mol", "off" }, 503 }; 504 505 static ssize_t qmp_debugfs_write(struct file *file, const char __user *user_buf, 506 size_t count, loff_t *pos) 507 { 508 const struct qmp_debugfs_entry *entry = NULL; 509 struct qmp *qmp = file->private_data; 510 char buf[QMP_MSG_LEN]; 511 unsigned int uint_val; 512 const char *str_val; 513 bool bool_val; 514 int ret; 515 int i; 516 517 for (i = 0; i < ARRAY_SIZE(qmp->debugfs_files); i++) { 518 if (qmp->debugfs_files[i] == file->f_path.dentry) { 519 entry = &qmp_debugfs_entries[i]; 520 break; 521 } 522 } 523 if (WARN_ON(!entry)) 524 return -EFAULT; 525 526 if (entry->is_bool) { 527 ret = kstrtobool_from_user(user_buf, count, &bool_val); 528 if (ret) 529 return ret; 530 531 str_val = bool_val ? entry->true_val : entry->false_val; 532 533 ret = snprintf(buf, sizeof(buf), entry->fmt, str_val); 534 if (ret >= sizeof(buf)) 535 return -EINVAL; 536 } else { 537 ret = kstrtou32_from_user(user_buf, count, 0, &uint_val); 538 if (ret) 539 return ret; 540 541 ret = snprintf(buf, sizeof(buf), entry->fmt, uint_val); 542 if (ret >= sizeof(buf)) 543 return -EINVAL; 544 } 545 546 ret = qmp_send(qmp, buf); 547 if (ret < 0) 548 return ret; 549 550 return count; 551 } 552 553 static const struct file_operations qmp_debugfs_fops = { 554 .open = simple_open, 555 .write = qmp_debugfs_write, 556 }; 557 558 static void qmp_debugfs_create(struct qmp *qmp) 559 { 560 const struct qmp_debugfs_entry *entry; 561 int i; 562 563 qmp->debugfs_root = debugfs_create_dir("qcom_aoss", NULL); 564 565 for (i = 0; i < ARRAY_SIZE(qmp->debugfs_files); i++) { 566 entry = &qmp_debugfs_entries[i]; 567 568 qmp->debugfs_files[i] = debugfs_create_file(entry->name, 0200, 569 qmp->debugfs_root, 570 qmp, 571 &qmp_debugfs_fops); 572 } 573 } 574 575 static int qmp_probe(struct platform_device *pdev) 576 { 577 struct qmp *qmp; 578 int irq; 579 int ret; 580 581 qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL); 582 if (!qmp) 583 return -ENOMEM; 584 585 qmp->dev = &pdev->dev; 586 init_waitqueue_head(&qmp->event); 587 mutex_init(&qmp->tx_lock); 588 589 qmp->msgram = devm_platform_ioremap_resource(pdev, 0); 590 if (IS_ERR(qmp->msgram)) 591 return PTR_ERR(qmp->msgram); 592 593 qmp->mbox_client.dev = &pdev->dev; 594 qmp->mbox_client.knows_txdone = true; 595 qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0); 596 if (IS_ERR(qmp->mbox_chan)) { 597 dev_err(&pdev->dev, "failed to acquire ipc mailbox\n"); 598 return PTR_ERR(qmp->mbox_chan); 599 } 600 601 irq = platform_get_irq(pdev, 0); 602 ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0, 603 "aoss-qmp", qmp); 604 if (ret < 0) { 605 dev_err(&pdev->dev, "failed to request interrupt\n"); 606 goto err_free_mbox; 607 } 608 609 ret = qmp_open(qmp); 610 if (ret < 0) 611 goto err_free_mbox; 612 613 ret = qmp_qdss_clk_add(qmp); 614 if (ret) 615 goto err_close_qmp; 616 617 ret = qmp_cooling_devices_register(qmp); 618 if (ret) 619 dev_err(&pdev->dev, "failed to register aoss cooling devices\n"); 620 621 platform_set_drvdata(pdev, qmp); 622 623 qmp_debugfs_create(qmp); 624 625 return 0; 626 627 err_close_qmp: 628 qmp_close(qmp); 629 err_free_mbox: 630 mbox_free_channel(qmp->mbox_chan); 631 632 return ret; 633 } 634 635 static void qmp_remove(struct platform_device *pdev) 636 { 637 struct qmp *qmp = platform_get_drvdata(pdev); 638 639 debugfs_remove_recursive(qmp->debugfs_root); 640 641 qmp_qdss_clk_remove(qmp); 642 qmp_cooling_devices_remove(qmp); 643 644 qmp_close(qmp); 645 mbox_free_channel(qmp->mbox_chan); 646 } 647 648 static const struct of_device_id qmp_dt_match[] = { 649 { .compatible = "qcom,sc7180-aoss-qmp", }, 650 { .compatible = "qcom,sc7280-aoss-qmp", }, 651 { .compatible = "qcom,sdm845-aoss-qmp", }, 652 { .compatible = "qcom,sm8150-aoss-qmp", }, 653 { .compatible = "qcom,sm8250-aoss-qmp", }, 654 { .compatible = "qcom,sm8350-aoss-qmp", }, 655 { .compatible = "qcom,aoss-qmp", }, 656 {} 657 }; 658 MODULE_DEVICE_TABLE(of, qmp_dt_match); 659 660 static struct platform_driver qmp_driver = { 661 .driver = { 662 .name = "qcom_aoss_qmp", 663 .of_match_table = qmp_dt_match, 664 .suppress_bind_attrs = true, 665 }, 666 .probe = qmp_probe, 667 .remove = qmp_remove, 668 }; 669 module_platform_driver(qmp_driver); 670 671 MODULE_DESCRIPTION("Qualcomm AOSS QMP driver"); 672 MODULE_LICENSE("GPL v2"); 673