1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015, Sony Mobile Communications AB. 4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 5 */ 6 7 #include <linux/interrupt.h> 8 #include <linux/list.h> 9 #include <linux/io.h> 10 #include <linux/of.h> 11 #include <linux/irq.h> 12 #include <linux/irqdomain.h> 13 #include <linux/mailbox_client.h> 14 #include <linux/mfd/syscon.h> 15 #include <linux/module.h> 16 #include <linux/platform_device.h> 17 #include <linux/pm_wakeirq.h> 18 #include <linux/regmap.h> 19 #include <linux/soc/qcom/smem.h> 20 #include <linux/soc/qcom/smem_state.h> 21 #include <linux/spinlock.h> 22 23 /* 24 * The Shared Memory Point to Point (SMP2P) protocol facilitates communication 25 * of a single 32-bit value between two processors. Each value has a single 26 * writer (the local side) and a single reader (the remote side). Values are 27 * uniquely identified in the system by the directed edge (local processor ID 28 * to remote processor ID) and a string identifier. 29 * 30 * Each processor is responsible for creating the outgoing SMEM items and each 31 * item is writable by the local processor and readable by the remote 32 * processor. By using two separate SMEM items that are single-reader and 33 * single-writer, SMP2P does not require any remote locking mechanisms. 34 * 35 * The driver uses the Linux GPIO and interrupt framework to expose a virtual 36 * GPIO for each outbound entry and a virtual interrupt controller for each 37 * inbound entry. 38 */ 39 40 #define SMP2P_MAX_ENTRY 16 41 #define SMP2P_MAX_ENTRY_NAME 16 42 43 #define SMP2P_FEATURE_SSR_ACK 0x1 44 #define SMP2P_FLAGS_RESTART_DONE_BIT 0 45 #define SMP2P_FLAGS_RESTART_ACK_BIT 1 46 47 #define SMP2P_MAGIC 0x504d5324 48 #define SMP2P_ALL_FEATURES SMP2P_FEATURE_SSR_ACK 49 50 /** 51 * struct smp2p_smem_item - in memory communication structure 52 * @magic: magic number 53 * @version: version - must be 1 54 * @features: features flag - currently unused 55 * @local_pid: processor id of sending end 56 * @remote_pid: processor id of receiving end 57 * @total_entries: number of entries - always SMP2P_MAX_ENTRY 58 * @valid_entries: number of allocated entries 59 * @flags: 60 * @entries: individual communication entries 61 * @entries.name: name of the entry 62 * @entries.value: content of the entry 63 */ 64 struct smp2p_smem_item { 65 u32 magic; 66 u8 version; 67 unsigned features:24; 68 u16 local_pid; 69 u16 remote_pid; 70 u16 total_entries; 71 u16 valid_entries; 72 u32 flags; 73 74 struct { 75 u8 name[SMP2P_MAX_ENTRY_NAME]; 76 u32 value; 77 } entries[SMP2P_MAX_ENTRY]; 78 } __packed; 79 80 /** 81 * struct smp2p_entry - driver context matching one entry 82 * @node: list entry to keep track of allocated entries 83 * @smp2p: reference to the device driver context 84 * @name: name of the entry, to match against smp2p_smem_item 85 * @value: pointer to smp2p_smem_item entry value 86 * @last_value: last handled value 87 * @domain: irq_domain for inbound entries 88 * @irq_enabled:bitmap to track enabled irq bits 89 * @irq_rising: bitmap to mark irq bits for rising detection 90 * @irq_falling:bitmap to mark irq bits for falling detection 91 * @state: smem state handle 92 * @lock: spinlock to protect read-modify-write of the value 93 */ 94 struct smp2p_entry { 95 struct list_head node; 96 struct qcom_smp2p *smp2p; 97 98 const char *name; 99 u32 *value; 100 u32 last_value; 101 102 struct irq_domain *domain; 103 DECLARE_BITMAP(irq_enabled, 32); 104 DECLARE_BITMAP(irq_rising, 32); 105 DECLARE_BITMAP(irq_falling, 32); 106 107 struct qcom_smem_state *state; 108 109 spinlock_t lock; 110 }; 111 112 #define SMP2P_INBOUND 0 113 #define SMP2P_OUTBOUND 1 114 115 /** 116 * struct qcom_smp2p - device driver context 117 * @dev: device driver handle 118 * @in: pointer to the inbound smem item 119 * @out: pointer to the outbound smem item 120 * @smem_items: ids of the two smem items 121 * @valid_entries: already scanned inbound entries 122 * @ssr_ack_enabled: SMP2P_FEATURE_SSR_ACK feature is supported and was enabled 123 * @ssr_ack: current cached state of the local ack bit 124 * @negotiation_done: whether negotiating finished 125 * @local_pid: processor id of the inbound edge 126 * @remote_pid: processor id of the outbound edge 127 * @ipc_regmap: regmap for the outbound ipc 128 * @ipc_offset: offset within the regmap 129 * @ipc_bit: bit in regmap@offset to kick to signal remote processor 130 * @mbox_client: mailbox client handle 131 * @mbox_chan: apcs ipc mailbox channel handle 132 * @inbound: list of inbound entries 133 * @outbound: list of outbound entries 134 */ 135 struct qcom_smp2p { 136 struct device *dev; 137 138 struct smp2p_smem_item *in; 139 struct smp2p_smem_item *out; 140 141 unsigned smem_items[SMP2P_OUTBOUND + 1]; 142 143 unsigned valid_entries; 144 145 bool ssr_ack_enabled; 146 bool ssr_ack; 147 bool negotiation_done; 148 149 unsigned local_pid; 150 unsigned remote_pid; 151 152 struct regmap *ipc_regmap; 153 int ipc_offset; 154 int ipc_bit; 155 156 struct mbox_client mbox_client; 157 struct mbox_chan *mbox_chan; 158 159 struct list_head inbound; 160 struct list_head outbound; 161 }; 162 163 static void qcom_smp2p_kick(struct qcom_smp2p *smp2p) 164 { 165 /* Make sure any updated data is written before the kick */ 166 wmb(); 167 168 if (smp2p->mbox_chan) { 169 mbox_send_message(smp2p->mbox_chan, NULL); 170 mbox_client_txdone(smp2p->mbox_chan, 0); 171 } else { 172 regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit)); 173 } 174 } 175 176 static bool qcom_smp2p_check_ssr(struct qcom_smp2p *smp2p) 177 { 178 struct smp2p_smem_item *in = smp2p->in; 179 bool restart; 180 181 if (!smp2p->ssr_ack_enabled) 182 return false; 183 184 restart = in->flags & BIT(SMP2P_FLAGS_RESTART_DONE_BIT); 185 186 return restart != smp2p->ssr_ack; 187 } 188 189 static void qcom_smp2p_do_ssr_ack(struct qcom_smp2p *smp2p) 190 { 191 struct smp2p_smem_item *out = smp2p->out; 192 u32 val; 193 194 smp2p->ssr_ack = !smp2p->ssr_ack; 195 196 val = out->flags & ~BIT(SMP2P_FLAGS_RESTART_ACK_BIT); 197 if (smp2p->ssr_ack) 198 val |= BIT(SMP2P_FLAGS_RESTART_ACK_BIT); 199 out->flags = val; 200 201 qcom_smp2p_kick(smp2p); 202 } 203 204 static void qcom_smp2p_negotiate(struct qcom_smp2p *smp2p) 205 { 206 struct smp2p_smem_item *out = smp2p->out; 207 struct smp2p_smem_item *in = smp2p->in; 208 209 if (in->version == out->version) { 210 out->features &= in->features; 211 212 if (out->features & SMP2P_FEATURE_SSR_ACK) 213 smp2p->ssr_ack_enabled = true; 214 215 smp2p->negotiation_done = true; 216 } 217 } 218 219 static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p) 220 { 221 struct smp2p_smem_item *in; 222 struct smp2p_entry *entry; 223 int irq_pin; 224 u32 status; 225 char buf[SMP2P_MAX_ENTRY_NAME]; 226 u32 val; 227 int i; 228 229 in = smp2p->in; 230 231 /* Match newly created entries */ 232 for (i = smp2p->valid_entries; i < in->valid_entries; i++) { 233 list_for_each_entry(entry, &smp2p->inbound, node) { 234 memcpy(buf, in->entries[i].name, sizeof(buf)); 235 if (!strcmp(buf, entry->name)) { 236 entry->value = &in->entries[i].value; 237 break; 238 } 239 } 240 } 241 smp2p->valid_entries = i; 242 243 /* Fire interrupts based on any value changes */ 244 list_for_each_entry(entry, &smp2p->inbound, node) { 245 /* Ignore entries not yet allocated by the remote side */ 246 if (!entry->value) 247 continue; 248 249 val = readl(entry->value); 250 251 status = val ^ entry->last_value; 252 entry->last_value = val; 253 254 /* No changes of this entry? */ 255 if (!status) 256 continue; 257 258 for_each_set_bit(i, entry->irq_enabled, 32) { 259 if (!(status & BIT(i))) 260 continue; 261 262 if ((val & BIT(i) && test_bit(i, entry->irq_rising)) || 263 (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) { 264 irq_pin = irq_find_mapping(entry->domain, i); 265 handle_nested_irq(irq_pin); 266 } 267 } 268 } 269 } 270 271 /** 272 * qcom_smp2p_intr() - interrupt handler for incoming notifications 273 * @irq: unused 274 * @data: smp2p driver context 275 * 276 * Handle notifications from the remote side to handle newly allocated entries 277 * or any changes to the state bits of existing entries. 278 * 279 * Return: %IRQ_HANDLED 280 */ 281 static irqreturn_t qcom_smp2p_intr(int irq, void *data) 282 { 283 struct smp2p_smem_item *in; 284 struct qcom_smp2p *smp2p = data; 285 unsigned int smem_id = smp2p->smem_items[SMP2P_INBOUND]; 286 unsigned int pid = smp2p->remote_pid; 287 bool ack_restart; 288 size_t size; 289 290 in = smp2p->in; 291 292 /* Acquire smem item, if not already found */ 293 if (!in) { 294 in = qcom_smem_get(pid, smem_id, &size); 295 if (IS_ERR(in)) { 296 dev_err(smp2p->dev, 297 "Unable to acquire remote smp2p item\n"); 298 goto out; 299 } 300 301 smp2p->in = in; 302 } 303 304 if (!smp2p->negotiation_done) 305 qcom_smp2p_negotiate(smp2p); 306 307 if (smp2p->negotiation_done) { 308 ack_restart = qcom_smp2p_check_ssr(smp2p); 309 qcom_smp2p_notify_in(smp2p); 310 311 if (ack_restart) 312 qcom_smp2p_do_ssr_ack(smp2p); 313 } 314 315 out: 316 return IRQ_HANDLED; 317 } 318 319 static void smp2p_mask_irq(struct irq_data *irqd) 320 { 321 struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd); 322 irq_hw_number_t irq = irqd_to_hwirq(irqd); 323 324 clear_bit(irq, entry->irq_enabled); 325 } 326 327 static void smp2p_unmask_irq(struct irq_data *irqd) 328 { 329 struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd); 330 irq_hw_number_t irq = irqd_to_hwirq(irqd); 331 332 set_bit(irq, entry->irq_enabled); 333 } 334 335 static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type) 336 { 337 struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd); 338 irq_hw_number_t irq = irqd_to_hwirq(irqd); 339 340 if (!(type & IRQ_TYPE_EDGE_BOTH)) 341 return -EINVAL; 342 343 if (type & IRQ_TYPE_EDGE_RISING) 344 set_bit(irq, entry->irq_rising); 345 else 346 clear_bit(irq, entry->irq_rising); 347 348 if (type & IRQ_TYPE_EDGE_FALLING) 349 set_bit(irq, entry->irq_falling); 350 else 351 clear_bit(irq, entry->irq_falling); 352 353 return 0; 354 } 355 356 static struct irq_chip smp2p_irq_chip = { 357 .name = "smp2p", 358 .irq_mask = smp2p_mask_irq, 359 .irq_unmask = smp2p_unmask_irq, 360 .irq_set_type = smp2p_set_irq_type, 361 }; 362 363 static int smp2p_irq_map(struct irq_domain *d, 364 unsigned int irq, 365 irq_hw_number_t hw) 366 { 367 struct smp2p_entry *entry = d->host_data; 368 369 irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq); 370 irq_set_chip_data(irq, entry); 371 irq_set_nested_thread(irq, 1); 372 irq_set_noprobe(irq); 373 374 return 0; 375 } 376 377 static const struct irq_domain_ops smp2p_irq_ops = { 378 .map = smp2p_irq_map, 379 .xlate = irq_domain_xlate_twocell, 380 }; 381 382 static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p, 383 struct smp2p_entry *entry, 384 struct device_node *node) 385 { 386 entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry); 387 if (!entry->domain) { 388 dev_err(smp2p->dev, "failed to add irq_domain\n"); 389 return -ENOMEM; 390 } 391 392 return 0; 393 } 394 395 static int smp2p_update_bits(void *data, u32 mask, u32 value) 396 { 397 struct smp2p_entry *entry = data; 398 unsigned long flags; 399 u32 orig; 400 u32 val; 401 402 spin_lock_irqsave(&entry->lock, flags); 403 val = orig = readl(entry->value); 404 val &= ~mask; 405 val |= value; 406 writel(val, entry->value); 407 spin_unlock_irqrestore(&entry->lock, flags); 408 409 if (val != orig) 410 qcom_smp2p_kick(entry->smp2p); 411 412 return 0; 413 } 414 415 static const struct qcom_smem_state_ops smp2p_state_ops = { 416 .update_bits = smp2p_update_bits, 417 }; 418 419 static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p, 420 struct smp2p_entry *entry, 421 struct device_node *node) 422 { 423 struct smp2p_smem_item *out = smp2p->out; 424 char buf[SMP2P_MAX_ENTRY_NAME] = {}; 425 426 /* Allocate an entry from the smem item */ 427 strscpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME); 428 memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME); 429 430 /* Make the logical entry reference the physical value */ 431 entry->value = &out->entries[out->valid_entries].value; 432 433 out->valid_entries++; 434 435 entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry); 436 if (IS_ERR(entry->state)) { 437 dev_err(smp2p->dev, "failed to register qcom_smem_state\n"); 438 return PTR_ERR(entry->state); 439 } 440 441 return 0; 442 } 443 444 static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p) 445 { 446 struct smp2p_smem_item *out; 447 unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND]; 448 unsigned pid = smp2p->remote_pid; 449 int ret; 450 451 ret = qcom_smem_alloc(pid, smem_id, sizeof(*out)); 452 if (ret < 0 && ret != -EEXIST) { 453 if (ret != -EPROBE_DEFER) 454 dev_err(smp2p->dev, 455 "unable to allocate local smp2p item\n"); 456 return ret; 457 } 458 459 out = qcom_smem_get(pid, smem_id, NULL); 460 if (IS_ERR(out)) { 461 dev_err(smp2p->dev, "Unable to acquire local smp2p item\n"); 462 return PTR_ERR(out); 463 } 464 465 memset(out, 0, sizeof(*out)); 466 out->magic = SMP2P_MAGIC; 467 out->local_pid = smp2p->local_pid; 468 out->remote_pid = smp2p->remote_pid; 469 out->total_entries = SMP2P_MAX_ENTRY; 470 out->valid_entries = 0; 471 out->features = SMP2P_ALL_FEATURES; 472 473 /* 474 * Make sure the rest of the header is written before we validate the 475 * item by writing a valid version number. 476 */ 477 wmb(); 478 out->version = 1; 479 480 qcom_smp2p_kick(smp2p); 481 482 smp2p->out = out; 483 484 return 0; 485 } 486 487 static int smp2p_parse_ipc(struct qcom_smp2p *smp2p) 488 { 489 struct device_node *syscon; 490 struct device *dev = smp2p->dev; 491 const char *key; 492 int ret; 493 494 syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0); 495 if (!syscon) { 496 dev_err(dev, "no qcom,ipc node\n"); 497 return -ENODEV; 498 } 499 500 smp2p->ipc_regmap = syscon_node_to_regmap(syscon); 501 of_node_put(syscon); 502 if (IS_ERR(smp2p->ipc_regmap)) 503 return PTR_ERR(smp2p->ipc_regmap); 504 505 key = "qcom,ipc"; 506 ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset); 507 if (ret < 0) { 508 dev_err(dev, "no offset in %s\n", key); 509 return -EINVAL; 510 } 511 512 ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit); 513 if (ret < 0) { 514 dev_err(dev, "no bit in %s\n", key); 515 return -EINVAL; 516 } 517 518 return 0; 519 } 520 521 static int qcom_smp2p_probe(struct platform_device *pdev) 522 { 523 struct smp2p_entry *entry; 524 struct device_node *node; 525 struct qcom_smp2p *smp2p; 526 const char *key; 527 int irq; 528 int ret; 529 530 smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL); 531 if (!smp2p) 532 return -ENOMEM; 533 534 smp2p->dev = &pdev->dev; 535 INIT_LIST_HEAD(&smp2p->inbound); 536 INIT_LIST_HEAD(&smp2p->outbound); 537 538 platform_set_drvdata(pdev, smp2p); 539 540 key = "qcom,smem"; 541 ret = of_property_read_u32_array(pdev->dev.of_node, key, 542 smp2p->smem_items, 2); 543 if (ret) 544 return ret; 545 546 key = "qcom,local-pid"; 547 ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid); 548 if (ret) 549 goto report_read_failure; 550 551 key = "qcom,remote-pid"; 552 ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid); 553 if (ret) 554 goto report_read_failure; 555 556 irq = platform_get_irq(pdev, 0); 557 if (irq < 0) 558 return irq; 559 560 smp2p->mbox_client.dev = &pdev->dev; 561 smp2p->mbox_client.knows_txdone = true; 562 smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0); 563 if (IS_ERR(smp2p->mbox_chan)) { 564 if (PTR_ERR(smp2p->mbox_chan) != -ENODEV) 565 return PTR_ERR(smp2p->mbox_chan); 566 567 smp2p->mbox_chan = NULL; 568 569 ret = smp2p_parse_ipc(smp2p); 570 if (ret) 571 return ret; 572 } 573 574 ret = qcom_smp2p_alloc_outbound_item(smp2p); 575 if (ret < 0) 576 goto release_mbox; 577 578 for_each_available_child_of_node(pdev->dev.of_node, node) { 579 entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL); 580 if (!entry) { 581 ret = -ENOMEM; 582 of_node_put(node); 583 goto unwind_interfaces; 584 } 585 586 entry->smp2p = smp2p; 587 spin_lock_init(&entry->lock); 588 589 ret = of_property_read_string(node, "qcom,entry-name", &entry->name); 590 if (ret < 0) { 591 of_node_put(node); 592 goto unwind_interfaces; 593 } 594 595 if (of_property_read_bool(node, "interrupt-controller")) { 596 ret = qcom_smp2p_inbound_entry(smp2p, entry, node); 597 if (ret < 0) { 598 of_node_put(node); 599 goto unwind_interfaces; 600 } 601 602 list_add(&entry->node, &smp2p->inbound); 603 } else { 604 ret = qcom_smp2p_outbound_entry(smp2p, entry, node); 605 if (ret < 0) { 606 of_node_put(node); 607 goto unwind_interfaces; 608 } 609 610 list_add(&entry->node, &smp2p->outbound); 611 } 612 } 613 614 /* Kick the outgoing edge after allocating entries */ 615 qcom_smp2p_kick(smp2p); 616 617 ret = devm_request_threaded_irq(&pdev->dev, irq, 618 NULL, qcom_smp2p_intr, 619 IRQF_ONESHOT, 620 "smp2p", (void *)smp2p); 621 if (ret) { 622 dev_err(&pdev->dev, "failed to request interrupt\n"); 623 goto unwind_interfaces; 624 } 625 626 /* 627 * Treat smp2p interrupt as wakeup source, but keep it disabled 628 * by default. User space can decide enabling it depending on its 629 * use cases. For example if remoteproc crashes and device wants 630 * to handle it immediatedly (e.g. to not miss phone calls) it can 631 * enable wakeup source from user space, while other devices which 632 * do not have proper autosleep feature may want to handle it with 633 * other wakeup events (e.g. Power button) instead waking up immediately. 634 */ 635 device_set_wakeup_capable(&pdev->dev, true); 636 637 ret = dev_pm_set_wake_irq(&pdev->dev, irq); 638 if (ret) 639 goto set_wake_irq_fail; 640 641 return 0; 642 643 set_wake_irq_fail: 644 dev_pm_clear_wake_irq(&pdev->dev); 645 646 unwind_interfaces: 647 list_for_each_entry(entry, &smp2p->inbound, node) 648 irq_domain_remove(entry->domain); 649 650 list_for_each_entry(entry, &smp2p->outbound, node) 651 qcom_smem_state_unregister(entry->state); 652 653 smp2p->out->valid_entries = 0; 654 655 release_mbox: 656 mbox_free_channel(smp2p->mbox_chan); 657 658 return ret; 659 660 report_read_failure: 661 dev_err(&pdev->dev, "failed to read %s\n", key); 662 return -EINVAL; 663 } 664 665 static void qcom_smp2p_remove(struct platform_device *pdev) 666 { 667 struct qcom_smp2p *smp2p = platform_get_drvdata(pdev); 668 struct smp2p_entry *entry; 669 670 dev_pm_clear_wake_irq(&pdev->dev); 671 672 list_for_each_entry(entry, &smp2p->inbound, node) 673 irq_domain_remove(entry->domain); 674 675 list_for_each_entry(entry, &smp2p->outbound, node) 676 qcom_smem_state_unregister(entry->state); 677 678 mbox_free_channel(smp2p->mbox_chan); 679 680 smp2p->out->valid_entries = 0; 681 } 682 683 static const struct of_device_id qcom_smp2p_of_match[] = { 684 { .compatible = "qcom,smp2p" }, 685 {} 686 }; 687 MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match); 688 689 static struct platform_driver qcom_smp2p_driver = { 690 .probe = qcom_smp2p_probe, 691 .remove_new = qcom_smp2p_remove, 692 .driver = { 693 .name = "qcom_smp2p", 694 .of_match_table = qcom_smp2p_of_match, 695 }, 696 }; 697 module_platform_driver(qcom_smp2p_driver); 698 699 MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver"); 700 MODULE_LICENSE("GPL v2"); 701