xref: /linux/drivers/soc/qcom/smp2p.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, Sony Mobile Communications AB.
4  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
5  */
6 
7 #include <linux/interrupt.h>
8 #include <linux/list.h>
9 #include <linux/io.h>
10 #include <linux/of.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/mailbox_client.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_wakeirq.h>
18 #include <linux/regmap.h>
19 #include <linux/seq_file.h>
20 #include <linux/soc/qcom/smem.h>
21 #include <linux/soc/qcom/smem_state.h>
22 #include <linux/spinlock.h>
23 
24 /*
25  * The Shared Memory Point to Point (SMP2P) protocol facilitates communication
26  * of a single 32-bit value between two processors.  Each value has a single
27  * writer (the local side) and a single reader (the remote side). Values are
28  * uniquely identified in the system by the directed edge (local processor ID
29  * to remote processor ID) and a string identifier.
30  *
31  * Each processor is responsible for creating the outgoing SMEM items and each
32  * item is writable by the local processor and readable by the remote
33  * processor.  By using two separate SMEM items that are single-reader and
34  * single-writer, SMP2P does not require any remote locking mechanisms.
35  *
36  * The driver uses the Linux GPIO and interrupt framework to expose a virtual
37  * GPIO for each outbound entry and a virtual interrupt controller for each
38  * inbound entry.
39  */
40 
41 #define SMP2P_MAX_ENTRY 16
42 #define SMP2P_MAX_ENTRY_NAME 16
43 
44 #define SMP2P_FEATURE_SSR_ACK 0x1
45 #define SMP2P_FLAGS_RESTART_DONE_BIT 0
46 #define SMP2P_FLAGS_RESTART_ACK_BIT 1
47 
48 #define SMP2P_MAGIC 0x504d5324
49 #define SMP2P_ALL_FEATURES	SMP2P_FEATURE_SSR_ACK
50 
51 /**
52  * struct smp2p_smem_item - in memory communication structure
53  * @magic:		magic number
54  * @version:		version - must be 1
55  * @features:		features flag - currently unused
56  * @local_pid:		processor id of sending end
57  * @remote_pid:		processor id of receiving end
58  * @total_entries:	number of entries - always SMP2P_MAX_ENTRY
59  * @valid_entries:	number of allocated entries
60  * @flags:
61  * @entries:		individual communication entries
62  * @entries.name:	name of the entry
63  * @entries.value:	content of the entry
64  */
65 struct smp2p_smem_item {
66 	u32 magic;
67 	u8 version;
68 	unsigned features:24;
69 	u16 local_pid;
70 	u16 remote_pid;
71 	u16 total_entries;
72 	u16 valid_entries;
73 	u32 flags;
74 
75 	struct {
76 		u8 name[SMP2P_MAX_ENTRY_NAME];
77 		u32 value;
78 	} entries[SMP2P_MAX_ENTRY];
79 } __packed;
80 
81 /**
82  * struct smp2p_entry - driver context matching one entry
83  * @node:	list entry to keep track of allocated entries
84  * @smp2p:	reference to the device driver context
85  * @name:	name of the entry, to match against smp2p_smem_item
86  * @value:	pointer to smp2p_smem_item entry value
87  * @last_value:	last handled value
88  * @domain:	irq_domain for inbound entries
89  * @irq_enabled:bitmap to track enabled irq bits
90  * @irq_rising:	bitmap to mark irq bits for rising detection
91  * @irq_falling:bitmap to mark irq bits for falling detection
92  * @state:	smem state handle
93  * @lock:	spinlock to protect read-modify-write of the value
94  */
95 struct smp2p_entry {
96 	struct list_head node;
97 	struct qcom_smp2p *smp2p;
98 
99 	const char *name;
100 	u32 *value;
101 	u32 last_value;
102 
103 	struct irq_domain *domain;
104 	DECLARE_BITMAP(irq_enabled, 32);
105 	DECLARE_BITMAP(irq_rising, 32);
106 	DECLARE_BITMAP(irq_falling, 32);
107 
108 	struct qcom_smem_state *state;
109 
110 	spinlock_t lock;
111 };
112 
113 #define SMP2P_INBOUND	0
114 #define SMP2P_OUTBOUND	1
115 
116 /**
117  * struct qcom_smp2p - device driver context
118  * @dev:	device driver handle
119  * @in:		pointer to the inbound smem item
120  * @out:	pointer to the outbound smem item
121  * @smem_items:	ids of the two smem items
122  * @valid_entries: already scanned inbound entries
123  * @ssr_ack_enabled: SMP2P_FEATURE_SSR_ACK feature is supported and was enabled
124  * @ssr_ack: current cached state of the local ack bit
125  * @negotiation_done: whether negotiating finished
126  * @local_pid:	processor id of the inbound edge
127  * @remote_pid:	processor id of the outbound edge
128  * @ipc_regmap:	regmap for the outbound ipc
129  * @ipc_offset:	offset within the regmap
130  * @ipc_bit:	bit in regmap@offset to kick to signal remote processor
131  * @mbox_client: mailbox client handle
132  * @mbox_chan:	apcs ipc mailbox channel handle
133  * @inbound:	list of inbound entries
134  * @outbound:	list of outbound entries
135  */
136 struct qcom_smp2p {
137 	struct device *dev;
138 
139 	struct smp2p_smem_item *in;
140 	struct smp2p_smem_item *out;
141 
142 	unsigned smem_items[SMP2P_OUTBOUND + 1];
143 
144 	unsigned valid_entries;
145 
146 	bool ssr_ack_enabled;
147 	bool ssr_ack;
148 	bool negotiation_done;
149 
150 	unsigned local_pid;
151 	unsigned remote_pid;
152 
153 	struct regmap *ipc_regmap;
154 	int ipc_offset;
155 	int ipc_bit;
156 
157 	struct mbox_client mbox_client;
158 	struct mbox_chan *mbox_chan;
159 
160 	struct list_head inbound;
161 	struct list_head outbound;
162 };
163 
164 #define CREATE_TRACE_POINTS
165 #include "trace-smp2p.h"
166 
qcom_smp2p_kick(struct qcom_smp2p * smp2p)167 static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
168 {
169 	/* Make sure any updated data is written before the kick */
170 	wmb();
171 
172 	if (smp2p->mbox_chan) {
173 		mbox_send_message(smp2p->mbox_chan, NULL);
174 		mbox_client_txdone(smp2p->mbox_chan, 0);
175 	} else {
176 		regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit));
177 	}
178 }
179 
qcom_smp2p_check_ssr(struct qcom_smp2p * smp2p)180 static bool qcom_smp2p_check_ssr(struct qcom_smp2p *smp2p)
181 {
182 	struct smp2p_smem_item *in = smp2p->in;
183 	bool restart;
184 
185 	if (!smp2p->ssr_ack_enabled)
186 		return false;
187 
188 	restart = in->flags & BIT(SMP2P_FLAGS_RESTART_DONE_BIT);
189 
190 	return restart != smp2p->ssr_ack;
191 }
192 
qcom_smp2p_do_ssr_ack(struct qcom_smp2p * smp2p)193 static void qcom_smp2p_do_ssr_ack(struct qcom_smp2p *smp2p)
194 {
195 	struct smp2p_smem_item *out = smp2p->out;
196 	u32 val;
197 
198 	trace_smp2p_ssr_ack(smp2p->dev);
199 	smp2p->ssr_ack = !smp2p->ssr_ack;
200 
201 	val = out->flags & ~BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
202 	if (smp2p->ssr_ack)
203 		val |= BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
204 	out->flags = val;
205 
206 	qcom_smp2p_kick(smp2p);
207 }
208 
qcom_smp2p_negotiate(struct qcom_smp2p * smp2p)209 static void qcom_smp2p_negotiate(struct qcom_smp2p *smp2p)
210 {
211 	struct smp2p_smem_item *out = smp2p->out;
212 	struct smp2p_smem_item *in = smp2p->in;
213 
214 	if (in->version == out->version) {
215 		out->features &= in->features;
216 
217 		if (out->features & SMP2P_FEATURE_SSR_ACK)
218 			smp2p->ssr_ack_enabled = true;
219 
220 		smp2p->negotiation_done = true;
221 		trace_smp2p_negotiate(smp2p->dev, out->features);
222 	}
223 }
224 
qcom_smp2p_notify_in(struct qcom_smp2p * smp2p)225 static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p)
226 {
227 	struct smp2p_smem_item *in;
228 	struct smp2p_entry *entry;
229 	int irq_pin;
230 	u32 status;
231 	char buf[SMP2P_MAX_ENTRY_NAME];
232 	u32 val;
233 	int i;
234 
235 	in = smp2p->in;
236 
237 	/* Match newly created entries */
238 	for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
239 		list_for_each_entry(entry, &smp2p->inbound, node) {
240 			memcpy(buf, in->entries[i].name, sizeof(buf));
241 			if (!strcmp(buf, entry->name)) {
242 				entry->value = &in->entries[i].value;
243 				break;
244 			}
245 		}
246 	}
247 	smp2p->valid_entries = i;
248 
249 	/* Fire interrupts based on any value changes */
250 	list_for_each_entry(entry, &smp2p->inbound, node) {
251 		/* Ignore entries not yet allocated by the remote side */
252 		if (!entry->value)
253 			continue;
254 
255 		val = readl(entry->value);
256 
257 		status = val ^ entry->last_value;
258 		entry->last_value = val;
259 
260 		trace_smp2p_notify_in(entry, status, val);
261 
262 		/* No changes of this entry? */
263 		if (!status)
264 			continue;
265 
266 		for_each_set_bit(i, entry->irq_enabled, 32) {
267 			if (!(status & BIT(i)))
268 				continue;
269 
270 			if ((val & BIT(i) && test_bit(i, entry->irq_rising)) ||
271 			    (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) {
272 				irq_pin = irq_find_mapping(entry->domain, i);
273 				handle_nested_irq(irq_pin);
274 			}
275 		}
276 	}
277 }
278 
279 /**
280  * qcom_smp2p_intr() - interrupt handler for incoming notifications
281  * @irq:	unused
282  * @data:	smp2p driver context
283  *
284  * Handle notifications from the remote side to handle newly allocated entries
285  * or any changes to the state bits of existing entries.
286  *
287  * Return: %IRQ_HANDLED
288  */
qcom_smp2p_intr(int irq,void * data)289 static irqreturn_t qcom_smp2p_intr(int irq, void *data)
290 {
291 	struct smp2p_smem_item *in;
292 	struct qcom_smp2p *smp2p = data;
293 	unsigned int smem_id = smp2p->smem_items[SMP2P_INBOUND];
294 	unsigned int pid = smp2p->remote_pid;
295 	bool ack_restart;
296 	size_t size;
297 
298 	in = smp2p->in;
299 
300 	/* Acquire smem item, if not already found */
301 	if (!in) {
302 		in = qcom_smem_get(pid, smem_id, &size);
303 		if (IS_ERR(in)) {
304 			dev_err(smp2p->dev,
305 				"Unable to acquire remote smp2p item\n");
306 			goto out;
307 		}
308 
309 		smp2p->in = in;
310 	}
311 
312 	if (!smp2p->negotiation_done)
313 		qcom_smp2p_negotiate(smp2p);
314 
315 	if (smp2p->negotiation_done) {
316 		ack_restart = qcom_smp2p_check_ssr(smp2p);
317 		qcom_smp2p_notify_in(smp2p);
318 
319 		if (ack_restart)
320 			qcom_smp2p_do_ssr_ack(smp2p);
321 	}
322 
323 out:
324 	return IRQ_HANDLED;
325 }
326 
smp2p_mask_irq(struct irq_data * irqd)327 static void smp2p_mask_irq(struct irq_data *irqd)
328 {
329 	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
330 	irq_hw_number_t irq = irqd_to_hwirq(irqd);
331 
332 	clear_bit(irq, entry->irq_enabled);
333 }
334 
smp2p_unmask_irq(struct irq_data * irqd)335 static void smp2p_unmask_irq(struct irq_data *irqd)
336 {
337 	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
338 	irq_hw_number_t irq = irqd_to_hwirq(irqd);
339 
340 	set_bit(irq, entry->irq_enabled);
341 }
342 
smp2p_set_irq_type(struct irq_data * irqd,unsigned int type)343 static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type)
344 {
345 	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
346 	irq_hw_number_t irq = irqd_to_hwirq(irqd);
347 
348 	if (!(type & IRQ_TYPE_EDGE_BOTH))
349 		return -EINVAL;
350 
351 	if (type & IRQ_TYPE_EDGE_RISING)
352 		set_bit(irq, entry->irq_rising);
353 	else
354 		clear_bit(irq, entry->irq_rising);
355 
356 	if (type & IRQ_TYPE_EDGE_FALLING)
357 		set_bit(irq, entry->irq_falling);
358 	else
359 		clear_bit(irq, entry->irq_falling);
360 
361 	return 0;
362 }
363 
smp2p_irq_print_chip(struct irq_data * irqd,struct seq_file * p)364 static void smp2p_irq_print_chip(struct irq_data *irqd, struct seq_file *p)
365 {
366 	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
367 
368 	seq_printf(p, " %8s", dev_name(entry->smp2p->dev));
369 }
370 
371 static struct irq_chip smp2p_irq_chip = {
372 	.name           = "smp2p",
373 	.irq_mask       = smp2p_mask_irq,
374 	.irq_unmask     = smp2p_unmask_irq,
375 	.irq_set_type	= smp2p_set_irq_type,
376 	.irq_print_chip = smp2p_irq_print_chip,
377 };
378 
smp2p_irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw)379 static int smp2p_irq_map(struct irq_domain *d,
380 			 unsigned int irq,
381 			 irq_hw_number_t hw)
382 {
383 	struct smp2p_entry *entry = d->host_data;
384 
385 	irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq);
386 	irq_set_chip_data(irq, entry);
387 	irq_set_nested_thread(irq, 1);
388 	irq_set_noprobe(irq);
389 
390 	return 0;
391 }
392 
393 static const struct irq_domain_ops smp2p_irq_ops = {
394 	.map = smp2p_irq_map,
395 	.xlate = irq_domain_xlate_twocell,
396 };
397 
qcom_smp2p_inbound_entry(struct qcom_smp2p * smp2p,struct smp2p_entry * entry,struct device_node * node)398 static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
399 				    struct smp2p_entry *entry,
400 				    struct device_node *node)
401 {
402 	entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry);
403 	if (!entry->domain) {
404 		dev_err(smp2p->dev, "failed to add irq_domain\n");
405 		return -ENOMEM;
406 	}
407 
408 	return 0;
409 }
410 
smp2p_update_bits(void * data,u32 mask,u32 value)411 static int smp2p_update_bits(void *data, u32 mask, u32 value)
412 {
413 	struct smp2p_entry *entry = data;
414 	unsigned long flags;
415 	u32 orig;
416 	u32 val;
417 
418 	spin_lock_irqsave(&entry->lock, flags);
419 	val = orig = readl(entry->value);
420 	val &= ~mask;
421 	val |= value;
422 	writel(val, entry->value);
423 	spin_unlock_irqrestore(&entry->lock, flags);
424 
425 	trace_smp2p_update_bits(entry, orig, val);
426 
427 	if (val != orig)
428 		qcom_smp2p_kick(entry->smp2p);
429 
430 	return 0;
431 }
432 
433 static const struct qcom_smem_state_ops smp2p_state_ops = {
434 	.update_bits = smp2p_update_bits,
435 };
436 
qcom_smp2p_outbound_entry(struct qcom_smp2p * smp2p,struct smp2p_entry * entry,struct device_node * node)437 static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
438 				     struct smp2p_entry *entry,
439 				     struct device_node *node)
440 {
441 	struct smp2p_smem_item *out = smp2p->out;
442 	char buf[SMP2P_MAX_ENTRY_NAME] = {};
443 
444 	/* Allocate an entry from the smem item */
445 	strscpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
446 	memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
447 
448 	/* Make the logical entry reference the physical value */
449 	entry->value = &out->entries[out->valid_entries].value;
450 
451 	out->valid_entries++;
452 
453 	entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
454 	if (IS_ERR(entry->state)) {
455 		dev_err(smp2p->dev, "failed to register qcom_smem_state\n");
456 		return PTR_ERR(entry->state);
457 	}
458 
459 	return 0;
460 }
461 
qcom_smp2p_alloc_outbound_item(struct qcom_smp2p * smp2p)462 static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
463 {
464 	struct smp2p_smem_item *out;
465 	unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND];
466 	unsigned pid = smp2p->remote_pid;
467 	int ret;
468 
469 	ret = qcom_smem_alloc(pid, smem_id, sizeof(*out));
470 	if (ret < 0 && ret != -EEXIST) {
471 		if (ret != -EPROBE_DEFER)
472 			dev_err(smp2p->dev,
473 				"unable to allocate local smp2p item\n");
474 		return ret;
475 	}
476 
477 	out = qcom_smem_get(pid, smem_id, NULL);
478 	if (IS_ERR(out)) {
479 		dev_err(smp2p->dev, "Unable to acquire local smp2p item\n");
480 		return PTR_ERR(out);
481 	}
482 
483 	memset(out, 0, sizeof(*out));
484 	out->magic = SMP2P_MAGIC;
485 	out->local_pid = smp2p->local_pid;
486 	out->remote_pid = smp2p->remote_pid;
487 	out->total_entries = SMP2P_MAX_ENTRY;
488 	out->valid_entries = 0;
489 	out->features = SMP2P_ALL_FEATURES;
490 
491 	/*
492 	 * Make sure the rest of the header is written before we validate the
493 	 * item by writing a valid version number.
494 	 */
495 	wmb();
496 	out->version = 1;
497 
498 	qcom_smp2p_kick(smp2p);
499 
500 	smp2p->out = out;
501 
502 	return 0;
503 }
504 
smp2p_parse_ipc(struct qcom_smp2p * smp2p)505 static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
506 {
507 	struct device_node *syscon;
508 	struct device *dev = smp2p->dev;
509 	const char *key;
510 	int ret;
511 
512 	syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0);
513 	if (!syscon) {
514 		dev_err(dev, "no qcom,ipc node\n");
515 		return -ENODEV;
516 	}
517 
518 	smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
519 	of_node_put(syscon);
520 	if (IS_ERR(smp2p->ipc_regmap))
521 		return PTR_ERR(smp2p->ipc_regmap);
522 
523 	key = "qcom,ipc";
524 	ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset);
525 	if (ret < 0) {
526 		dev_err(dev, "no offset in %s\n", key);
527 		return -EINVAL;
528 	}
529 
530 	ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit);
531 	if (ret < 0) {
532 		dev_err(dev, "no bit in %s\n", key);
533 		return -EINVAL;
534 	}
535 
536 	return 0;
537 }
538 
qcom_smp2p_probe(struct platform_device * pdev)539 static int qcom_smp2p_probe(struct platform_device *pdev)
540 {
541 	struct smp2p_entry *entry;
542 	struct qcom_smp2p *smp2p;
543 	const char *key;
544 	int irq;
545 	int ret;
546 
547 	smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL);
548 	if (!smp2p)
549 		return -ENOMEM;
550 
551 	smp2p->dev = &pdev->dev;
552 	INIT_LIST_HEAD(&smp2p->inbound);
553 	INIT_LIST_HEAD(&smp2p->outbound);
554 
555 	platform_set_drvdata(pdev, smp2p);
556 
557 	key = "qcom,smem";
558 	ret = of_property_read_u32_array(pdev->dev.of_node, key,
559 					 smp2p->smem_items, 2);
560 	if (ret)
561 		return ret;
562 
563 	key = "qcom,local-pid";
564 	ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid);
565 	if (ret)
566 		goto report_read_failure;
567 
568 	key = "qcom,remote-pid";
569 	ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid);
570 	if (ret)
571 		goto report_read_failure;
572 
573 	irq = platform_get_irq(pdev, 0);
574 	if (irq < 0)
575 		return irq;
576 
577 	smp2p->mbox_client.dev = &pdev->dev;
578 	smp2p->mbox_client.knows_txdone = true;
579 	smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0);
580 	if (IS_ERR(smp2p->mbox_chan)) {
581 		if (PTR_ERR(smp2p->mbox_chan) != -ENODEV)
582 			return PTR_ERR(smp2p->mbox_chan);
583 
584 		smp2p->mbox_chan = NULL;
585 
586 		ret = smp2p_parse_ipc(smp2p);
587 		if (ret)
588 			return ret;
589 	}
590 
591 	ret = qcom_smp2p_alloc_outbound_item(smp2p);
592 	if (ret < 0)
593 		goto release_mbox;
594 
595 	for_each_available_child_of_node_scoped(pdev->dev.of_node, node) {
596 		entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
597 		if (!entry) {
598 			ret = -ENOMEM;
599 			goto unwind_interfaces;
600 		}
601 
602 		entry->smp2p = smp2p;
603 		spin_lock_init(&entry->lock);
604 
605 		ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
606 		if (ret < 0)
607 			goto unwind_interfaces;
608 
609 		if (of_property_read_bool(node, "interrupt-controller")) {
610 			ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
611 			if (ret < 0)
612 				goto unwind_interfaces;
613 
614 			list_add(&entry->node, &smp2p->inbound);
615 		} else  {
616 			ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
617 			if (ret < 0)
618 				goto unwind_interfaces;
619 
620 			list_add(&entry->node, &smp2p->outbound);
621 		}
622 	}
623 
624 	/* Kick the outgoing edge after allocating entries */
625 	qcom_smp2p_kick(smp2p);
626 
627 	ret = devm_request_threaded_irq(&pdev->dev, irq,
628 					NULL, qcom_smp2p_intr,
629 					IRQF_ONESHOT,
630 					NULL, (void *)smp2p);
631 	if (ret) {
632 		dev_err(&pdev->dev, "failed to request interrupt\n");
633 		goto unwind_interfaces;
634 	}
635 
636 	/*
637 	 * Treat smp2p interrupt as wakeup source, but keep it disabled
638 	 * by default. User space can decide enabling it depending on its
639 	 * use cases. For example if remoteproc crashes and device wants
640 	 * to handle it immediatedly (e.g. to not miss phone calls) it can
641 	 * enable wakeup source from user space, while other devices which
642 	 * do not have proper autosleep feature may want to handle it with
643 	 * other wakeup events (e.g. Power button) instead waking up immediately.
644 	 */
645 	device_set_wakeup_capable(&pdev->dev, true);
646 
647 	ret = dev_pm_set_wake_irq(&pdev->dev, irq);
648 	if (ret)
649 		goto set_wake_irq_fail;
650 
651 	return 0;
652 
653 set_wake_irq_fail:
654 	dev_pm_clear_wake_irq(&pdev->dev);
655 
656 unwind_interfaces:
657 	list_for_each_entry(entry, &smp2p->inbound, node)
658 		irq_domain_remove(entry->domain);
659 
660 	list_for_each_entry(entry, &smp2p->outbound, node)
661 		qcom_smem_state_unregister(entry->state);
662 
663 	smp2p->out->valid_entries = 0;
664 
665 release_mbox:
666 	mbox_free_channel(smp2p->mbox_chan);
667 
668 	return ret;
669 
670 report_read_failure:
671 	dev_err(&pdev->dev, "failed to read %s\n", key);
672 	return -EINVAL;
673 }
674 
qcom_smp2p_remove(struct platform_device * pdev)675 static void qcom_smp2p_remove(struct platform_device *pdev)
676 {
677 	struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
678 	struct smp2p_entry *entry;
679 
680 	dev_pm_clear_wake_irq(&pdev->dev);
681 
682 	list_for_each_entry(entry, &smp2p->inbound, node)
683 		irq_domain_remove(entry->domain);
684 
685 	list_for_each_entry(entry, &smp2p->outbound, node)
686 		qcom_smem_state_unregister(entry->state);
687 
688 	mbox_free_channel(smp2p->mbox_chan);
689 
690 	smp2p->out->valid_entries = 0;
691 }
692 
693 static const struct of_device_id qcom_smp2p_of_match[] = {
694 	{ .compatible = "qcom,smp2p" },
695 	{}
696 };
697 MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match);
698 
699 static struct platform_driver qcom_smp2p_driver = {
700 	.probe = qcom_smp2p_probe,
701 	.remove_new = qcom_smp2p_remove,
702 	.driver  = {
703 		.name  = "qcom_smp2p",
704 		.of_match_table = qcom_smp2p_of_match,
705 	},
706 };
707 module_platform_driver(qcom_smp2p_driver);
708 
709 MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver");
710 MODULE_LICENSE("GPL v2");
711