xref: /linux/drivers/soc/qcom/smp2p.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, Sony Mobile Communications AB.
4  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
5  */
6 
7 #include <linux/interrupt.h>
8 #include <linux/list.h>
9 #include <linux/io.h>
10 #include <linux/of.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/mailbox_client.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_wakeirq.h>
18 #include <linux/regmap.h>
19 #include <linux/seq_file.h>
20 #include <linux/soc/qcom/smem.h>
21 #include <linux/soc/qcom/smem_state.h>
22 #include <linux/spinlock.h>
23 
24 /*
25  * The Shared Memory Point to Point (SMP2P) protocol facilitates communication
26  * of a single 32-bit value between two processors.  Each value has a single
27  * writer (the local side) and a single reader (the remote side). Values are
28  * uniquely identified in the system by the directed edge (local processor ID
29  * to remote processor ID) and a string identifier.
30  *
31  * Each processor is responsible for creating the outgoing SMEM items and each
32  * item is writable by the local processor and readable by the remote
33  * processor.  By using two separate SMEM items that are single-reader and
34  * single-writer, SMP2P does not require any remote locking mechanisms.
35  *
36  * The driver uses the Linux GPIO and interrupt framework to expose a virtual
37  * GPIO for each outbound entry and a virtual interrupt controller for each
38  * inbound entry.
39  *
40  * V2 of SMP2P allows remote processors to write to outbound smp2p items before
41  * the full smp2p connection is negotiated. This is important for processors
42  * started before linux runs.
43  */
44 
45 #define SMP2P_MAX_ENTRY 16
46 #define SMP2P_MAX_ENTRY_NAME 16
47 
48 #define SMP2P_FEATURE_SSR_ACK 0x1
49 #define SMP2P_FLAGS_RESTART_DONE_BIT 0
50 #define SMP2P_FLAGS_RESTART_ACK_BIT 1
51 
52 #define SMP2P_MAGIC 0x504d5324
53 #define SMP2P_ALL_FEATURES	SMP2P_FEATURE_SSR_ACK
54 #define MAX_VERSION 2
55 
56 /**
57  * struct smp2p_smem_item - in memory communication structure
58  * @magic:		magic number
59  * @version:		version
60  * @features:		features flag - currently unused
61  * @local_pid:		processor id of sending end
62  * @remote_pid:		processor id of receiving end
63  * @total_entries:	number of entries - always SMP2P_MAX_ENTRY
64  * @valid_entries:	number of allocated entries
65  * @flags:
66  * @entries:		individual communication entries
67  * @entries.name:	name of the entry
68  * @entries.value:	content of the entry
69  */
70 struct smp2p_smem_item {
71 	u32 magic;
72 	u8 version;
73 	unsigned features:24;
74 	u16 local_pid;
75 	u16 remote_pid;
76 	u16 total_entries;
77 	u16 valid_entries;
78 	u32 flags;
79 
80 	struct {
81 		u8 name[SMP2P_MAX_ENTRY_NAME];
82 		u32 value;
83 	} entries[SMP2P_MAX_ENTRY];
84 } __packed;
85 
86 /**
87  * struct smp2p_entry - driver context matching one entry
88  * @node:	list entry to keep track of allocated entries
89  * @smp2p:	reference to the device driver context
90  * @name:	name of the entry, to match against smp2p_smem_item
91  * @value:	pointer to smp2p_smem_item entry value
92  * @last_value:	last handled value
93  * @domain:	irq_domain for inbound entries
94  * @irq_enabled:bitmap to track enabled irq bits
95  * @irq_rising:	bitmap to mark irq bits for rising detection
96  * @irq_falling:bitmap to mark irq bits for falling detection
97  * @state:	smem state handle
98  * @lock:	spinlock to protect read-modify-write of the value
99  */
100 struct smp2p_entry {
101 	struct list_head node;
102 	struct qcom_smp2p *smp2p;
103 
104 	const char *name;
105 	u32 *value;
106 	u32 last_value;
107 
108 	struct irq_domain *domain;
109 	DECLARE_BITMAP(irq_enabled, 32);
110 	DECLARE_BITMAP(irq_rising, 32);
111 	DECLARE_BITMAP(irq_falling, 32);
112 
113 	struct qcom_smem_state *state;
114 
115 	spinlock_t lock;
116 };
117 
118 #define SMP2P_INBOUND	0
119 #define SMP2P_OUTBOUND	1
120 
121 /**
122  * struct qcom_smp2p - device driver context
123  * @dev:	device driver handle
124  * @in:		pointer to the inbound smem item
125  * @out:	pointer to the outbound smem item
126  * @smem_items:	ids of the two smem items
127  * @valid_entries: already scanned inbound entries
128  * @ssr_ack_enabled: SMP2P_FEATURE_SSR_ACK feature is supported and was enabled
129  * @ssr_ack: current cached state of the local ack bit
130  * @negotiation_done: whether negotiating finished
131  * @local_pid:	processor id of the inbound edge
132  * @remote_pid:	processor id of the outbound edge
133  * @ipc_regmap:	regmap for the outbound ipc
134  * @ipc_offset:	offset within the regmap
135  * @ipc_bit:	bit in regmap@offset to kick to signal remote processor
136  * @mbox_client: mailbox client handle
137  * @mbox_chan:	apcs ipc mailbox channel handle
138  * @inbound:	list of inbound entries
139  * @outbound:	list of outbound entries
140  */
141 struct qcom_smp2p {
142 	struct device *dev;
143 
144 	struct smp2p_smem_item *in;
145 	struct smp2p_smem_item *out;
146 
147 	unsigned smem_items[SMP2P_OUTBOUND + 1];
148 
149 	unsigned valid_entries;
150 
151 	bool ssr_ack_enabled;
152 	bool ssr_ack;
153 	bool negotiation_done;
154 
155 	unsigned local_pid;
156 	unsigned remote_pid;
157 
158 	struct regmap *ipc_regmap;
159 	int ipc_offset;
160 	int ipc_bit;
161 
162 	struct mbox_client mbox_client;
163 	struct mbox_chan *mbox_chan;
164 
165 	struct list_head inbound;
166 	struct list_head outbound;
167 };
168 
169 #define CREATE_TRACE_POINTS
170 #include "trace-smp2p.h"
171 
172 static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
173 {
174 	/* Make sure any updated data is written before the kick */
175 	wmb();
176 
177 	if (smp2p->mbox_chan) {
178 		mbox_send_message(smp2p->mbox_chan, NULL);
179 		mbox_client_txdone(smp2p->mbox_chan, 0);
180 	} else {
181 		regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit));
182 	}
183 }
184 
185 static bool qcom_smp2p_check_ssr(struct qcom_smp2p *smp2p)
186 {
187 	struct smp2p_smem_item *in = smp2p->in;
188 	struct smp2p_entry *entry;
189 	bool restart_done;
190 	bool restart;
191 
192 	if (!smp2p->ssr_ack_enabled)
193 		return false;
194 
195 	restart_done = in->flags & BIT(SMP2P_FLAGS_RESTART_DONE_BIT);
196 	restart = restart_done != smp2p->ssr_ack;
197 	list_for_each_entry(entry, &smp2p->inbound, node) {
198 		if (!entry->value)
199 			continue;
200 		entry->last_value = 0;
201 	}
202 
203 	return restart;
204 }
205 
206 static void qcom_smp2p_do_ssr_ack(struct qcom_smp2p *smp2p)
207 {
208 	struct smp2p_smem_item *out = smp2p->out;
209 	u32 val;
210 
211 	trace_smp2p_ssr_ack(smp2p->dev);
212 	smp2p->ssr_ack = !smp2p->ssr_ack;
213 
214 	val = out->flags & ~BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
215 	if (smp2p->ssr_ack)
216 		val |= BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
217 	out->flags = val;
218 
219 	qcom_smp2p_kick(smp2p);
220 }
221 
222 static void qcom_smp2p_negotiate(struct qcom_smp2p *smp2p)
223 {
224 	struct smp2p_smem_item *out = smp2p->out;
225 	struct smp2p_smem_item *in = smp2p->in;
226 
227 	if (in->version == out->version) {
228 		out->features &= in->features;
229 
230 		if (out->features & SMP2P_FEATURE_SSR_ACK)
231 			smp2p->ssr_ack_enabled = true;
232 
233 		smp2p->negotiation_done = true;
234 		trace_smp2p_negotiate(smp2p->dev, out->features);
235 	} else if (in->version && in->version < out->version) {
236 		out->version = in->version;
237 		qcom_smp2p_kick(smp2p);
238 	}
239 }
240 
241 static int qcom_smp2p_in_version(struct qcom_smp2p *smp2p)
242 {
243 	unsigned int smem_id = smp2p->smem_items[SMP2P_INBOUND];
244 	unsigned int pid = smp2p->remote_pid;
245 	struct smp2p_smem_item *in;
246 	size_t size;
247 
248 	in = qcom_smem_get(pid, smem_id, &size);
249 	if (IS_ERR(in))
250 		return 0;
251 
252 	return in->version;
253 }
254 
255 static void qcom_smp2p_start_in(struct qcom_smp2p *smp2p)
256 {
257 	unsigned int smem_id = smp2p->smem_items[SMP2P_INBOUND];
258 	unsigned int pid = smp2p->remote_pid;
259 	char buf[SMP2P_MAX_ENTRY_NAME];
260 	struct smp2p_smem_item *in;
261 	struct smp2p_entry *entry;
262 	size_t size;
263 	int i;
264 
265 	in = qcom_smem_get(pid, smem_id, &size);
266 	if (IS_ERR(in))
267 		return;
268 
269 	smp2p->in = in;
270 
271 	/* Check if version is initialized by the remote. */
272 	if (in->version == 0)
273 		return;
274 
275 	for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
276 		list_for_each_entry(entry, &smp2p->inbound, node) {
277 			memcpy(buf, in->entries[i].name, sizeof(buf));
278 			if (!strcmp(buf, entry->name)) {
279 				entry->value = &in->entries[i].value;
280 				entry->last_value = readl(entry->value);
281 				break;
282 			}
283 		}
284 	}
285 	smp2p->valid_entries = i;
286 }
287 
288 static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p)
289 {
290 	struct smp2p_smem_item *in;
291 	struct smp2p_entry *entry;
292 	int irq_pin;
293 	u32 status;
294 	char buf[SMP2P_MAX_ENTRY_NAME];
295 	u32 val;
296 	int i;
297 
298 	in = smp2p->in;
299 
300 	/* Match newly created entries */
301 	for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
302 		list_for_each_entry(entry, &smp2p->inbound, node) {
303 			memcpy(buf, in->entries[i].name, sizeof(buf));
304 			if (!strcmp(buf, entry->name)) {
305 				entry->value = &in->entries[i].value;
306 				break;
307 			}
308 		}
309 	}
310 	smp2p->valid_entries = i;
311 
312 	/* Fire interrupts based on any value changes */
313 	list_for_each_entry(entry, &smp2p->inbound, node) {
314 		/* Ignore entries not yet allocated by the remote side */
315 		if (!entry->value)
316 			continue;
317 
318 		val = readl(entry->value);
319 
320 		status = val ^ entry->last_value;
321 		entry->last_value = val;
322 
323 		trace_smp2p_notify_in(entry, status, val);
324 
325 		/* No changes of this entry? */
326 		if (!status)
327 			continue;
328 
329 		for_each_set_bit(i, entry->irq_enabled, 32) {
330 			if (!(status & BIT(i)))
331 				continue;
332 
333 			if ((val & BIT(i) && test_bit(i, entry->irq_rising)) ||
334 			    (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) {
335 				irq_pin = irq_find_mapping(entry->domain, i);
336 				handle_nested_irq(irq_pin);
337 			}
338 		}
339 	}
340 }
341 
342 /**
343  * qcom_smp2p_intr() - interrupt handler for incoming notifications
344  * @irq:	unused
345  * @data:	smp2p driver context
346  *
347  * Handle notifications from the remote side to handle newly allocated entries
348  * or any changes to the state bits of existing entries.
349  *
350  * Return: %IRQ_HANDLED
351  */
352 static irqreturn_t qcom_smp2p_intr(int irq, void *data)
353 {
354 	struct smp2p_smem_item *in;
355 	struct qcom_smp2p *smp2p = data;
356 	unsigned int smem_id = smp2p->smem_items[SMP2P_INBOUND];
357 	unsigned int pid = smp2p->remote_pid;
358 	bool ack_restart;
359 	size_t size;
360 
361 	in = smp2p->in;
362 
363 	/* Acquire smem item, if not already found */
364 	if (!in) {
365 		in = qcom_smem_get(pid, smem_id, &size);
366 		if (IS_ERR(in)) {
367 			dev_err(smp2p->dev,
368 				"Unable to acquire remote smp2p item\n");
369 			goto out;
370 		}
371 
372 		smp2p->in = in;
373 	}
374 
375 	if (!smp2p->negotiation_done)
376 		qcom_smp2p_negotiate(smp2p);
377 
378 	if (smp2p->negotiation_done) {
379 		ack_restart = qcom_smp2p_check_ssr(smp2p);
380 		qcom_smp2p_notify_in(smp2p);
381 
382 		if (ack_restart)
383 			qcom_smp2p_do_ssr_ack(smp2p);
384 	}
385 
386 out:
387 	return IRQ_HANDLED;
388 }
389 
390 static void smp2p_mask_irq(struct irq_data *irqd)
391 {
392 	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
393 	irq_hw_number_t irq = irqd_to_hwirq(irqd);
394 
395 	clear_bit(irq, entry->irq_enabled);
396 }
397 
398 static void smp2p_unmask_irq(struct irq_data *irqd)
399 {
400 	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
401 	irq_hw_number_t irq = irqd_to_hwirq(irqd);
402 
403 	set_bit(irq, entry->irq_enabled);
404 }
405 
406 static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type)
407 {
408 	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
409 	irq_hw_number_t irq = irqd_to_hwirq(irqd);
410 
411 	if (!(type & IRQ_TYPE_EDGE_BOTH))
412 		return -EINVAL;
413 
414 	if (type & IRQ_TYPE_EDGE_RISING)
415 		set_bit(irq, entry->irq_rising);
416 	else
417 		clear_bit(irq, entry->irq_rising);
418 
419 	if (type & IRQ_TYPE_EDGE_FALLING)
420 		set_bit(irq, entry->irq_falling);
421 	else
422 		clear_bit(irq, entry->irq_falling);
423 
424 	return 0;
425 }
426 
427 static void smp2p_irq_print_chip(struct irq_data *irqd, struct seq_file *p)
428 {
429 	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
430 
431 	seq_printf(p, "%8s", dev_name(entry->smp2p->dev));
432 }
433 
434 static int smp2p_irq_get_irqchip_state(struct irq_data *irqd, enum irqchip_irq_state which,
435 				       bool *state)
436 {
437 	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
438 	u32 val;
439 
440 	if (which != IRQCHIP_STATE_LINE_LEVEL)
441 		return -EINVAL;
442 
443 	if (!entry->value)
444 		return -ENODEV;
445 
446 	val = readl(entry->value);
447 	*state = !!(val & BIT(irqd_to_hwirq(irqd)));
448 
449 	return 0;
450 }
451 
452 static struct irq_chip smp2p_irq_chip = {
453 	.name           = "smp2p",
454 	.irq_mask       = smp2p_mask_irq,
455 	.irq_unmask     = smp2p_unmask_irq,
456 	.irq_set_type	= smp2p_set_irq_type,
457 	.irq_print_chip = smp2p_irq_print_chip,
458 	.irq_get_irqchip_state = smp2p_irq_get_irqchip_state,
459 };
460 
461 static int smp2p_irq_map(struct irq_domain *d,
462 			 unsigned int irq,
463 			 irq_hw_number_t hw)
464 {
465 	struct smp2p_entry *entry = d->host_data;
466 
467 	irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq);
468 	irq_set_chip_data(irq, entry);
469 	irq_set_nested_thread(irq, 1);
470 	irq_set_noprobe(irq);
471 
472 	return 0;
473 }
474 
475 static const struct irq_domain_ops smp2p_irq_ops = {
476 	.map = smp2p_irq_map,
477 	.xlate = irq_domain_xlate_twocell,
478 };
479 
480 static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
481 				    struct smp2p_entry *entry,
482 				    struct device_node *node)
483 {
484 	entry->domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &smp2p_irq_ops, entry);
485 	if (!entry->domain) {
486 		dev_err(smp2p->dev, "failed to add irq_domain\n");
487 		return -ENOMEM;
488 	}
489 
490 	return 0;
491 }
492 
493 static int smp2p_update_bits(void *data, u32 mask, u32 value)
494 {
495 	struct smp2p_entry *entry = data;
496 	unsigned long flags;
497 	u32 orig;
498 	u32 val;
499 
500 	spin_lock_irqsave(&entry->lock, flags);
501 	val = orig = readl(entry->value);
502 	val &= ~mask;
503 	val |= value;
504 	writel(val, entry->value);
505 	spin_unlock_irqrestore(&entry->lock, flags);
506 
507 	trace_smp2p_update_bits(entry, orig, val);
508 
509 	if (val != orig)
510 		qcom_smp2p_kick(entry->smp2p);
511 
512 	return 0;
513 }
514 
515 static const struct qcom_smem_state_ops smp2p_state_ops = {
516 	.update_bits = smp2p_update_bits,
517 };
518 
519 static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
520 				     struct smp2p_entry *entry,
521 				     struct device_node *node)
522 {
523 	struct smp2p_smem_item *out = smp2p->out;
524 	char buf[SMP2P_MAX_ENTRY_NAME] = {};
525 
526 	/* Allocate an entry from the smem item */
527 	strscpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
528 	memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
529 
530 	/* Make the logical entry reference the physical value */
531 	entry->value = &out->entries[out->valid_entries].value;
532 
533 	out->valid_entries++;
534 
535 	entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
536 	if (IS_ERR(entry->state)) {
537 		dev_err(smp2p->dev, "failed to register qcom_smem_state\n");
538 		return PTR_ERR(entry->state);
539 	}
540 
541 	return 0;
542 }
543 
544 static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
545 {
546 	struct smp2p_smem_item *out;
547 	unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND];
548 	unsigned pid = smp2p->remote_pid;
549 	u8 in_version;
550 	int ret;
551 
552 	ret = qcom_smem_alloc(pid, smem_id, sizeof(*out));
553 	if (ret < 0 && ret != -EEXIST)
554 		return dev_err_probe(smp2p->dev, ret,
555 				     "unable to allocate local smp2p item\n");
556 
557 	out = qcom_smem_get(pid, smem_id, NULL);
558 	if (IS_ERR(out)) {
559 		dev_err(smp2p->dev, "Unable to acquire local smp2p item\n");
560 		return PTR_ERR(out);
561 	}
562 
563 	memset(out, 0, sizeof(*out));
564 	out->magic = SMP2P_MAGIC;
565 	out->local_pid = smp2p->local_pid;
566 	out->remote_pid = smp2p->remote_pid;
567 	out->total_entries = SMP2P_MAX_ENTRY;
568 	out->valid_entries = 0;
569 	out->features = SMP2P_ALL_FEATURES;
570 
571 	in_version = qcom_smp2p_in_version(smp2p);
572 	if (in_version > MAX_VERSION) {
573 		dev_err(smp2p->dev, "Unsupported smp2p version %d\n", in_version);
574 		return -EINVAL;
575 	}
576 
577 	/*
578 	 * Make sure the rest of the header is written before we validate the
579 	 * item by writing a valid version number.
580 	 */
581 	wmb();
582 	if (in_version && in_version <= 2)
583 		out->version = in_version;
584 	else
585 		out->version = 2;
586 
587 	qcom_smp2p_kick(smp2p);
588 
589 	smp2p->out = out;
590 
591 	return 0;
592 }
593 
594 static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
595 {
596 	struct device_node *syscon;
597 	struct device *dev = smp2p->dev;
598 	const char *key;
599 	int ret;
600 
601 	syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0);
602 	if (!syscon) {
603 		dev_err(dev, "no qcom,ipc node\n");
604 		return -ENODEV;
605 	}
606 
607 	smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
608 	of_node_put(syscon);
609 	if (IS_ERR(smp2p->ipc_regmap))
610 		return PTR_ERR(smp2p->ipc_regmap);
611 
612 	key = "qcom,ipc";
613 	ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset);
614 	if (ret < 0) {
615 		dev_err(dev, "no offset in %s\n", key);
616 		return -EINVAL;
617 	}
618 
619 	ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit);
620 	if (ret < 0) {
621 		dev_err(dev, "no bit in %s\n", key);
622 		return -EINVAL;
623 	}
624 
625 	return 0;
626 }
627 
628 static int qcom_smp2p_probe(struct platform_device *pdev)
629 {
630 	struct smp2p_entry *entry;
631 	struct qcom_smp2p *smp2p;
632 	const char *key;
633 	int irq;
634 	int ret;
635 
636 	smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL);
637 	if (!smp2p)
638 		return -ENOMEM;
639 
640 	smp2p->dev = &pdev->dev;
641 	INIT_LIST_HEAD(&smp2p->inbound);
642 	INIT_LIST_HEAD(&smp2p->outbound);
643 
644 	platform_set_drvdata(pdev, smp2p);
645 
646 	key = "qcom,smem";
647 	ret = of_property_read_u32_array(pdev->dev.of_node, key,
648 					 smp2p->smem_items, 2);
649 	if (ret)
650 		return ret;
651 
652 	key = "qcom,local-pid";
653 	ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid);
654 	if (ret)
655 		goto report_read_failure;
656 
657 	key = "qcom,remote-pid";
658 	ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid);
659 	if (ret)
660 		goto report_read_failure;
661 
662 	irq = platform_get_irq(pdev, 0);
663 	if (irq < 0)
664 		return irq;
665 
666 	smp2p->mbox_client.dev = &pdev->dev;
667 	smp2p->mbox_client.knows_txdone = true;
668 	smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0);
669 	if (IS_ERR(smp2p->mbox_chan)) {
670 		if (PTR_ERR(smp2p->mbox_chan) != -ENOENT)
671 			return PTR_ERR(smp2p->mbox_chan);
672 
673 		smp2p->mbox_chan = NULL;
674 
675 		ret = smp2p_parse_ipc(smp2p);
676 		if (ret)
677 			return ret;
678 	}
679 
680 	ret = qcom_smp2p_alloc_outbound_item(smp2p);
681 	if (ret < 0)
682 		goto release_mbox;
683 
684 	for_each_available_child_of_node_scoped(pdev->dev.of_node, node) {
685 		entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
686 		if (!entry) {
687 			ret = -ENOMEM;
688 			goto unwind_interfaces;
689 		}
690 
691 		entry->smp2p = smp2p;
692 		spin_lock_init(&entry->lock);
693 
694 		ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
695 		if (ret < 0)
696 			goto unwind_interfaces;
697 
698 		if (of_property_read_bool(node, "interrupt-controller")) {
699 			ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
700 			if (ret < 0)
701 				goto unwind_interfaces;
702 
703 			list_add(&entry->node, &smp2p->inbound);
704 		} else  {
705 			ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
706 			if (ret < 0)
707 				goto unwind_interfaces;
708 
709 			list_add(&entry->node, &smp2p->outbound);
710 		}
711 	}
712 
713 	/* Check inbound entries in the case of early boot processor */
714 	qcom_smp2p_start_in(smp2p);
715 
716 	/* Kick the outgoing edge after allocating entries */
717 	qcom_smp2p_kick(smp2p);
718 
719 	ret = devm_request_threaded_irq(&pdev->dev, irq,
720 					NULL, qcom_smp2p_intr,
721 					IRQF_ONESHOT,
722 					NULL, (void *)smp2p);
723 	if (ret) {
724 		dev_err(&pdev->dev, "failed to request interrupt\n");
725 		goto unwind_interfaces;
726 	}
727 
728 	/*
729 	 * Treat smp2p interrupt as wakeup source, but keep it disabled
730 	 * by default. User space can decide enabling it depending on its
731 	 * use cases. For example if remoteproc crashes and device wants
732 	 * to handle it immediatedly (e.g. to not miss phone calls) it can
733 	 * enable wakeup source from user space, while other devices which
734 	 * do not have proper autosleep feature may want to handle it with
735 	 * other wakeup events (e.g. Power button) instead waking up immediately.
736 	 */
737 	device_set_wakeup_capable(&pdev->dev, true);
738 
739 	ret = dev_pm_set_wake_irq(&pdev->dev, irq);
740 	if (ret)
741 		goto set_wake_irq_fail;
742 
743 	return 0;
744 
745 set_wake_irq_fail:
746 	dev_pm_clear_wake_irq(&pdev->dev);
747 
748 unwind_interfaces:
749 	list_for_each_entry(entry, &smp2p->inbound, node)
750 		irq_domain_remove(entry->domain);
751 
752 	list_for_each_entry(entry, &smp2p->outbound, node)
753 		qcom_smem_state_unregister(entry->state);
754 
755 	smp2p->out->valid_entries = 0;
756 
757 release_mbox:
758 	mbox_free_channel(smp2p->mbox_chan);
759 
760 	return ret;
761 
762 report_read_failure:
763 	dev_err(&pdev->dev, "failed to read %s\n", key);
764 	return -EINVAL;
765 }
766 
767 static void qcom_smp2p_remove(struct platform_device *pdev)
768 {
769 	struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
770 	struct smp2p_entry *entry;
771 
772 	dev_pm_clear_wake_irq(&pdev->dev);
773 
774 	list_for_each_entry(entry, &smp2p->inbound, node)
775 		irq_domain_remove(entry->domain);
776 
777 	list_for_each_entry(entry, &smp2p->outbound, node)
778 		qcom_smem_state_unregister(entry->state);
779 
780 	mbox_free_channel(smp2p->mbox_chan);
781 
782 	smp2p->out->valid_entries = 0;
783 }
784 
785 static const struct of_device_id qcom_smp2p_of_match[] = {
786 	{ .compatible = "qcom,smp2p" },
787 	{}
788 };
789 MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match);
790 
791 static struct platform_driver qcom_smp2p_driver = {
792 	.probe = qcom_smp2p_probe,
793 	.remove = qcom_smp2p_remove,
794 	.driver  = {
795 		.name  = "qcom_smp2p",
796 		.of_match_table = qcom_smp2p_of_match,
797 	},
798 };
799 module_platform_driver(qcom_smp2p_driver);
800 
801 MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver");
802 MODULE_LICENSE("GPL v2");
803