xref: /linux/drivers/soc/qcom/smsm.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, Sony Mobile Communications Inc.
4  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
5  */
6 
7 #include <linux/interrupt.h>
8 #include <linux/mailbox_client.h>
9 #include <linux/mfd/syscon.h>
10 #include <linux/module.h>
11 #include <linux/of_irq.h>
12 #include <linux/platform_device.h>
13 #include <linux/spinlock.h>
14 #include <linux/regmap.h>
15 #include <linux/soc/qcom/smem.h>
16 #include <linux/soc/qcom/smem_state.h>
17 
18 /*
19  * This driver implements the Qualcomm Shared Memory State Machine, a mechanism
20  * for communicating single bit state information to remote processors.
21  *
22  * The implementation is based on two sections of shared memory; the first
23  * holding the state bits and the second holding a matrix of subscription bits.
24  *
25  * The state bits are structured in entries of 32 bits, each belonging to one
26  * system in the SoC. The entry belonging to the local system is considered
27  * read-write, while the rest should be considered read-only.
28  *
29  * The subscription matrix consists of N bitmaps per entry, denoting interest
30  * in updates of the entry for each of the N hosts. Upon updating a state bit
31  * each host's subscription bitmap should be queried and the remote system
32  * should be interrupted if they request so.
33  *
34  * The subscription matrix is laid out in entry-major order:
35  * entry0: [host0 ... hostN]
36  *	.
37  *	.
38  * entryM: [host0 ... hostN]
39  *
40  * A third, optional, shared memory region might contain information regarding
41  * the number of entries in the state bitmap as well as number of columns in
42  * the subscription matrix.
43  */
44 
45 /*
46  * Shared memory identifiers, used to acquire handles to respective memory
47  * region.
48  */
49 #define SMEM_SMSM_SHARED_STATE		85
50 #define SMEM_SMSM_CPU_INTR_MASK		333
51 #define SMEM_SMSM_SIZE_INFO		419
52 
53 /*
54  * Default sizes, in case SMEM_SMSM_SIZE_INFO is not found.
55  */
56 #define SMSM_DEFAULT_NUM_ENTRIES	8
57 #define SMSM_DEFAULT_NUM_HOSTS		3
58 
59 struct smsm_entry;
60 struct smsm_host;
61 
62 /**
63  * struct qcom_smsm - smsm driver context
64  * @dev:	smsm device pointer
65  * @local_host:	column in the subscription matrix representing this system
66  * @num_hosts:	number of columns in the subscription matrix
67  * @num_entries: number of entries in the state map and rows in the subscription
68  *		matrix
69  * @local_state: pointer to the local processor's state bits
70  * @subscription: pointer to local processor's row in subscription matrix
71  * @state:	smem state handle
72  * @lock:	spinlock for read-modify-write of the outgoing state
73  * @entries:	context for each of the entries
74  * @hosts:	context for each of the hosts
75  * @mbox_client: mailbox client handle
76  */
77 struct qcom_smsm {
78 	struct device *dev;
79 
80 	u32 local_host;
81 
82 	u32 num_hosts;
83 	u32 num_entries;
84 
85 	u32 *local_state;
86 	u32 *subscription;
87 	struct qcom_smem_state *state;
88 
89 	spinlock_t lock;
90 
91 	struct smsm_entry *entries;
92 	struct smsm_host *hosts;
93 
94 	struct mbox_client mbox_client;
95 };
96 
97 /**
98  * struct smsm_entry - per remote processor entry context
99  * @smsm:	back-reference to driver context
100  * @domain:	IRQ domain for this entry, if representing a remote system
101  * @irq_enabled: bitmap of which state bits IRQs are enabled
102  * @irq_rising:	bitmap tracking if rising bits should be propagated
103  * @irq_falling: bitmap tracking if falling bits should be propagated
104  * @last_value:	snapshot of state bits last time the interrupts where propagated
105  * @remote_state: pointer to this entry's state bits
106  * @subscription: pointer to a row in the subscription matrix representing this
107  *		entry
108  */
109 struct smsm_entry {
110 	struct qcom_smsm *smsm;
111 
112 	struct irq_domain *domain;
113 	DECLARE_BITMAP(irq_enabled, 32);
114 	DECLARE_BITMAP(irq_rising, 32);
115 	DECLARE_BITMAP(irq_falling, 32);
116 	unsigned long last_value;
117 
118 	u32 *remote_state;
119 	u32 *subscription;
120 };
121 
122 /**
123  * struct smsm_host - representation of a remote host
124  * @ipc_regmap:	regmap for outgoing interrupt
125  * @ipc_offset:	offset in @ipc_regmap for outgoing interrupt
126  * @ipc_bit:	bit in @ipc_regmap + @ipc_offset for outgoing interrupt
127  * @mbox_chan:	apcs ipc mailbox channel handle
128  */
129 struct smsm_host {
130 	struct regmap *ipc_regmap;
131 	int ipc_offset;
132 	int ipc_bit;
133 
134 	struct mbox_chan *mbox_chan;
135 };
136 
137 /**
138  * smsm_update_bits() - change bit in outgoing entry and inform subscribers
139  * @data:	smsm context pointer
140  * @mask:	value mask
141  * @value:	new value
142  *
143  * Used to set and clear the bits in the outgoing/local entry and inform
144  * subscribers about the change.
145  */
146 static int smsm_update_bits(void *data, u32 mask, u32 value)
147 {
148 	struct qcom_smsm *smsm = data;
149 	struct smsm_host *hostp;
150 	unsigned long flags;
151 	u32 changes;
152 	u32 host;
153 	u32 orig;
154 	u32 val;
155 
156 	spin_lock_irqsave(&smsm->lock, flags);
157 
158 	/* Update the entry */
159 	val = orig = readl(smsm->local_state);
160 	val &= ~mask;
161 	val |= value;
162 
163 	/* Don't signal if we didn't change the value */
164 	changes = val ^ orig;
165 	if (!changes) {
166 		spin_unlock_irqrestore(&smsm->lock, flags);
167 		goto done;
168 	}
169 
170 	/* Write out the new value */
171 	writel(val, smsm->local_state);
172 	spin_unlock_irqrestore(&smsm->lock, flags);
173 
174 	/* Make sure the value update is ordered before any kicks */
175 	wmb();
176 
177 	/* Iterate over all hosts to check whom wants a kick */
178 	for (host = 0; host < smsm->num_hosts; host++) {
179 		hostp = &smsm->hosts[host];
180 
181 		val = readl(smsm->subscription + host);
182 		if (!(val & changes))
183 			continue;
184 
185 		if (hostp->mbox_chan) {
186 			mbox_send_message(hostp->mbox_chan, NULL);
187 			mbox_client_txdone(hostp->mbox_chan, 0);
188 		} else if (hostp->ipc_regmap) {
189 			regmap_write(hostp->ipc_regmap,
190 				     hostp->ipc_offset,
191 				     BIT(hostp->ipc_bit));
192 		}
193 	}
194 
195 done:
196 	return 0;
197 }
198 
199 static const struct qcom_smem_state_ops smsm_state_ops = {
200 	.update_bits = smsm_update_bits,
201 };
202 
203 /**
204  * smsm_intr() - cascading IRQ handler for SMSM
205  * @irq:	unused
206  * @data:	entry related to this IRQ
207  *
208  * This function cascades an incoming interrupt from a remote system, based on
209  * the state bits and configuration.
210  */
211 static irqreturn_t smsm_intr(int irq, void *data)
212 {
213 	struct smsm_entry *entry = data;
214 	unsigned i;
215 	int irq_pin;
216 	u32 changed;
217 	u32 val;
218 
219 	val = readl(entry->remote_state);
220 	changed = val ^ xchg(&entry->last_value, val);
221 
222 	for_each_set_bit(i, entry->irq_enabled, 32) {
223 		if (!(changed & BIT(i)))
224 			continue;
225 
226 		if (val & BIT(i)) {
227 			if (test_bit(i, entry->irq_rising)) {
228 				irq_pin = irq_find_mapping(entry->domain, i);
229 				handle_nested_irq(irq_pin);
230 			}
231 		} else {
232 			if (test_bit(i, entry->irq_falling)) {
233 				irq_pin = irq_find_mapping(entry->domain, i);
234 				handle_nested_irq(irq_pin);
235 			}
236 		}
237 	}
238 
239 	return IRQ_HANDLED;
240 }
241 
242 /**
243  * smsm_mask_irq() - un-subscribe from cascades of IRQs of a certain staus bit
244  * @irqd:	IRQ handle to be masked
245  *
246  * This un-subscribes the local CPU from interrupts upon changes to the defines
247  * status bit. The bit is also cleared from cascading.
248  */
249 static void smsm_mask_irq(struct irq_data *irqd)
250 {
251 	struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
252 	irq_hw_number_t irq = irqd_to_hwirq(irqd);
253 	struct qcom_smsm *smsm = entry->smsm;
254 	u32 val;
255 
256 	if (entry->subscription) {
257 		val = readl(entry->subscription + smsm->local_host);
258 		val &= ~BIT(irq);
259 		writel(val, entry->subscription + smsm->local_host);
260 	}
261 
262 	clear_bit(irq, entry->irq_enabled);
263 }
264 
265 /**
266  * smsm_unmask_irq() - subscribe to cascades of IRQs of a certain status bit
267  * @irqd:	IRQ handle to be unmasked
268  *
269  * This subscribes the local CPU to interrupts upon changes to the defined
270  * status bit. The bit is also marked for cascading.
271  */
272 static void smsm_unmask_irq(struct irq_data *irqd)
273 {
274 	struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
275 	irq_hw_number_t irq = irqd_to_hwirq(irqd);
276 	struct qcom_smsm *smsm = entry->smsm;
277 	u32 val;
278 
279 	/* Make sure our last cached state is up-to-date */
280 	if (readl(entry->remote_state) & BIT(irq))
281 		set_bit(irq, &entry->last_value);
282 	else
283 		clear_bit(irq, &entry->last_value);
284 
285 	set_bit(irq, entry->irq_enabled);
286 
287 	if (entry->subscription) {
288 		val = readl(entry->subscription + smsm->local_host);
289 		val |= BIT(irq);
290 		writel(val, entry->subscription + smsm->local_host);
291 	}
292 }
293 
294 /**
295  * smsm_set_irq_type() - updates the requested IRQ type for the cascading
296  * @irqd:	consumer interrupt handle
297  * @type:	requested flags
298  */
299 static int smsm_set_irq_type(struct irq_data *irqd, unsigned int type)
300 {
301 	struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
302 	irq_hw_number_t irq = irqd_to_hwirq(irqd);
303 
304 	if (!(type & IRQ_TYPE_EDGE_BOTH))
305 		return -EINVAL;
306 
307 	if (type & IRQ_TYPE_EDGE_RISING)
308 		set_bit(irq, entry->irq_rising);
309 	else
310 		clear_bit(irq, entry->irq_rising);
311 
312 	if (type & IRQ_TYPE_EDGE_FALLING)
313 		set_bit(irq, entry->irq_falling);
314 	else
315 		clear_bit(irq, entry->irq_falling);
316 
317 	return 0;
318 }
319 
320 static int smsm_get_irqchip_state(struct irq_data *irqd,
321 				  enum irqchip_irq_state which, bool *state)
322 {
323 	struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
324 	irq_hw_number_t irq = irqd_to_hwirq(irqd);
325 	u32 val;
326 
327 	if (which != IRQCHIP_STATE_LINE_LEVEL)
328 		return -EINVAL;
329 
330 	val = readl(entry->remote_state);
331 	*state = !!(val & BIT(irq));
332 
333 	return 0;
334 }
335 
336 static struct irq_chip smsm_irq_chip = {
337 	.name           = "smsm",
338 	.irq_mask       = smsm_mask_irq,
339 	.irq_unmask     = smsm_unmask_irq,
340 	.irq_set_type	= smsm_set_irq_type,
341 	.irq_get_irqchip_state = smsm_get_irqchip_state,
342 };
343 
344 /**
345  * smsm_irq_map() - sets up a mapping for a cascaded IRQ
346  * @d:		IRQ domain representing an entry
347  * @irq:	IRQ to set up
348  * @hw:		unused
349  */
350 static int smsm_irq_map(struct irq_domain *d,
351 			unsigned int irq,
352 			irq_hw_number_t hw)
353 {
354 	struct smsm_entry *entry = d->host_data;
355 
356 	irq_set_chip_and_handler(irq, &smsm_irq_chip, handle_level_irq);
357 	irq_set_chip_data(irq, entry);
358 	irq_set_nested_thread(irq, 1);
359 
360 	return 0;
361 }
362 
363 static const struct irq_domain_ops smsm_irq_ops = {
364 	.map = smsm_irq_map,
365 	.xlate = irq_domain_xlate_twocell,
366 };
367 
368 /**
369  * smsm_parse_mbox() - requests an mbox channel
370  * @smsm:	smsm driver context
371  * @host_id:	index of the remote host to be resolved
372  *
373  * Requests the desired channel using the mbox interface which is needed for
374  * sending the outgoing interrupts to a remove hosts - identified by @host_id.
375  */
376 static int smsm_parse_mbox(struct qcom_smsm *smsm, unsigned int host_id)
377 {
378 	struct smsm_host *host = &smsm->hosts[host_id];
379 	int ret = 0;
380 
381 	host->mbox_chan = mbox_request_channel(&smsm->mbox_client, host_id);
382 	if (IS_ERR(host->mbox_chan)) {
383 		ret = PTR_ERR(host->mbox_chan);
384 		host->mbox_chan = NULL;
385 	}
386 
387 	return ret;
388 }
389 
390 /**
391  * smsm_parse_ipc() - parses a qcom,ipc-%d device tree property
392  * @smsm:	smsm driver context
393  * @host_id:	index of the remote host to be resolved
394  *
395  * Parses device tree to acquire the information needed for sending the
396  * outgoing interrupts to a remote host - identified by @host_id.
397  */
398 static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id)
399 {
400 	struct device_node *syscon;
401 	struct device_node *node = smsm->dev->of_node;
402 	struct smsm_host *host = &smsm->hosts[host_id];
403 	char key[16];
404 	int ret;
405 
406 	snprintf(key, sizeof(key), "qcom,ipc-%d", host_id);
407 	syscon = of_parse_phandle(node, key, 0);
408 	if (!syscon)
409 		return 0;
410 
411 	host->ipc_regmap = syscon_node_to_regmap(syscon);
412 	of_node_put(syscon);
413 	if (IS_ERR(host->ipc_regmap))
414 		return PTR_ERR(host->ipc_regmap);
415 
416 	ret = of_property_read_u32_index(node, key, 1, &host->ipc_offset);
417 	if (ret < 0) {
418 		dev_err(smsm->dev, "no offset in %s\n", key);
419 		return -EINVAL;
420 	}
421 
422 	ret = of_property_read_u32_index(node, key, 2, &host->ipc_bit);
423 	if (ret < 0) {
424 		dev_err(smsm->dev, "no bit in %s\n", key);
425 		return -EINVAL;
426 	}
427 
428 	return 0;
429 }
430 
431 /**
432  * smsm_inbound_entry() - parse DT and set up an entry representing a remote system
433  * @smsm:	smsm driver context
434  * @entry:	entry context to be set up
435  * @node:	dt node containing the entry's properties
436  */
437 static int smsm_inbound_entry(struct qcom_smsm *smsm,
438 			      struct smsm_entry *entry,
439 			      struct device_node *node)
440 {
441 	int ret;
442 	int irq;
443 
444 	irq = irq_of_parse_and_map(node, 0);
445 	if (!irq) {
446 		dev_err(smsm->dev, "failed to parse smsm interrupt\n");
447 		return -EINVAL;
448 	}
449 
450 	ret = devm_request_threaded_irq(smsm->dev, irq,
451 					NULL, smsm_intr,
452 					IRQF_ONESHOT,
453 					"smsm", (void *)entry);
454 	if (ret) {
455 		dev_err(smsm->dev, "failed to request interrupt\n");
456 		return ret;
457 	}
458 
459 	entry->domain = irq_domain_add_linear(node, 32, &smsm_irq_ops, entry);
460 	if (!entry->domain) {
461 		dev_err(smsm->dev, "failed to add irq_domain\n");
462 		return -ENOMEM;
463 	}
464 
465 	return 0;
466 }
467 
468 /**
469  * smsm_get_size_info() - parse the optional memory segment for sizes
470  * @smsm:	smsm driver context
471  *
472  * Attempt to acquire the number of hosts and entries from the optional shared
473  * memory location. Not being able to find this segment should indicate that
474  * we're on a older system where these values was hard coded to
475  * SMSM_DEFAULT_NUM_ENTRIES and SMSM_DEFAULT_NUM_HOSTS.
476  *
477  * Returns 0 on success, negative errno on failure.
478  */
479 static int smsm_get_size_info(struct qcom_smsm *smsm)
480 {
481 	size_t size;
482 	struct {
483 		u32 num_hosts;
484 		u32 num_entries;
485 		u32 reserved0;
486 		u32 reserved1;
487 	} *info;
488 
489 	info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SIZE_INFO, &size);
490 	if (IS_ERR(info) && PTR_ERR(info) != -ENOENT)
491 		return dev_err_probe(smsm->dev, PTR_ERR(info),
492 				     "unable to retrieve smsm size info\n");
493 	else if (IS_ERR(info) || size != sizeof(*info)) {
494 		dev_warn(smsm->dev, "no smsm size info, using defaults\n");
495 		smsm->num_entries = SMSM_DEFAULT_NUM_ENTRIES;
496 		smsm->num_hosts = SMSM_DEFAULT_NUM_HOSTS;
497 		return 0;
498 	}
499 
500 	smsm->num_entries = info->num_entries;
501 	smsm->num_hosts = info->num_hosts;
502 
503 	dev_dbg(smsm->dev,
504 		"found custom size of smsm: %d entries %d hosts\n",
505 		smsm->num_entries, smsm->num_hosts);
506 
507 	return 0;
508 }
509 
510 static int qcom_smsm_probe(struct platform_device *pdev)
511 {
512 	struct device_node *local_node;
513 	struct device_node *node;
514 	struct smsm_entry *entry;
515 	struct qcom_smsm *smsm;
516 	u32 *intr_mask;
517 	size_t size;
518 	u32 *states;
519 	u32 id;
520 	int ret;
521 
522 	smsm = devm_kzalloc(&pdev->dev, sizeof(*smsm), GFP_KERNEL);
523 	if (!smsm)
524 		return -ENOMEM;
525 	smsm->dev = &pdev->dev;
526 	spin_lock_init(&smsm->lock);
527 
528 	ret = smsm_get_size_info(smsm);
529 	if (ret)
530 		return ret;
531 
532 	smsm->entries = devm_kcalloc(&pdev->dev,
533 				     smsm->num_entries,
534 				     sizeof(struct smsm_entry),
535 				     GFP_KERNEL);
536 	if (!smsm->entries)
537 		return -ENOMEM;
538 
539 	smsm->hosts = devm_kcalloc(&pdev->dev,
540 				   smsm->num_hosts,
541 				   sizeof(struct smsm_host),
542 				   GFP_KERNEL);
543 	if (!smsm->hosts)
544 		return -ENOMEM;
545 
546 	for_each_child_of_node(pdev->dev.of_node, local_node) {
547 		if (of_property_present(local_node, "#qcom,smem-state-cells"))
548 			break;
549 	}
550 	if (!local_node) {
551 		dev_err(&pdev->dev, "no state entry\n");
552 		return -EINVAL;
553 	}
554 
555 	of_property_read_u32(pdev->dev.of_node,
556 			     "qcom,local-host",
557 			     &smsm->local_host);
558 
559 	smsm->mbox_client.dev = &pdev->dev;
560 	smsm->mbox_client.knows_txdone = true;
561 
562 	/* Parse the host properties */
563 	for (id = 0; id < smsm->num_hosts; id++) {
564 		/* Try using mbox interface first, otherwise fall back to syscon */
565 		ret = smsm_parse_mbox(smsm, id);
566 		if (!ret)
567 			continue;
568 
569 		ret = smsm_parse_ipc(smsm, id);
570 		if (ret < 0)
571 			goto out_put;
572 	}
573 
574 	/* Acquire the main SMSM state vector */
575 	ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE,
576 			      smsm->num_entries * sizeof(u32));
577 	if (ret < 0 && ret != -EEXIST) {
578 		dev_err(&pdev->dev, "unable to allocate shared state entry\n");
579 		goto out_put;
580 	}
581 
582 	states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL);
583 	if (IS_ERR(states)) {
584 		dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
585 		ret = PTR_ERR(states);
586 		goto out_put;
587 	}
588 
589 	/* Acquire the list of interrupt mask vectors */
590 	size = smsm->num_entries * smsm->num_hosts * sizeof(u32);
591 	ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size);
592 	if (ret < 0 && ret != -EEXIST) {
593 		dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n");
594 		goto out_put;
595 	}
596 
597 	intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL);
598 	if (IS_ERR(intr_mask)) {
599 		dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n");
600 		ret = PTR_ERR(intr_mask);
601 		goto out_put;
602 	}
603 
604 	/* Setup the reference to the local state bits */
605 	smsm->local_state = states + smsm->local_host;
606 	smsm->subscription = intr_mask + smsm->local_host * smsm->num_hosts;
607 
608 	/* Register the outgoing state */
609 	smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm);
610 	if (IS_ERR(smsm->state)) {
611 		dev_err(smsm->dev, "failed to register qcom_smem_state\n");
612 		ret = PTR_ERR(smsm->state);
613 		goto out_put;
614 	}
615 
616 	/* Register handlers for remote processor entries of interest. */
617 	for_each_available_child_of_node(pdev->dev.of_node, node) {
618 		if (!of_property_read_bool(node, "interrupt-controller"))
619 			continue;
620 
621 		ret = of_property_read_u32(node, "reg", &id);
622 		if (ret || id >= smsm->num_entries) {
623 			dev_err(&pdev->dev, "invalid reg of entry\n");
624 			if (!ret)
625 				ret = -EINVAL;
626 			goto unwind_interfaces;
627 		}
628 		entry = &smsm->entries[id];
629 
630 		entry->smsm = smsm;
631 		entry->remote_state = states + id;
632 
633 		/* Setup subscription pointers and unsubscribe to any kicks */
634 		entry->subscription = intr_mask + id * smsm->num_hosts;
635 		writel(0, entry->subscription + smsm->local_host);
636 
637 		ret = smsm_inbound_entry(smsm, entry, node);
638 		if (ret < 0)
639 			goto unwind_interfaces;
640 	}
641 
642 	platform_set_drvdata(pdev, smsm);
643 	of_node_put(local_node);
644 
645 	return 0;
646 
647 unwind_interfaces:
648 	of_node_put(node);
649 	for (id = 0; id < smsm->num_entries; id++)
650 		if (smsm->entries[id].domain)
651 			irq_domain_remove(smsm->entries[id].domain);
652 
653 	qcom_smem_state_unregister(smsm->state);
654 out_put:
655 	for (id = 0; id < smsm->num_hosts; id++)
656 		mbox_free_channel(smsm->hosts[id].mbox_chan);
657 
658 	of_node_put(local_node);
659 	return ret;
660 }
661 
662 static void qcom_smsm_remove(struct platform_device *pdev)
663 {
664 	struct qcom_smsm *smsm = platform_get_drvdata(pdev);
665 	unsigned id;
666 
667 	for (id = 0; id < smsm->num_entries; id++)
668 		if (smsm->entries[id].domain)
669 			irq_domain_remove(smsm->entries[id].domain);
670 
671 	for (id = 0; id < smsm->num_hosts; id++)
672 		mbox_free_channel(smsm->hosts[id].mbox_chan);
673 
674 	qcom_smem_state_unregister(smsm->state);
675 }
676 
677 static const struct of_device_id qcom_smsm_of_match[] = {
678 	{ .compatible = "qcom,smsm" },
679 	{}
680 };
681 MODULE_DEVICE_TABLE(of, qcom_smsm_of_match);
682 
683 static struct platform_driver qcom_smsm_driver = {
684 	.probe = qcom_smsm_probe,
685 	.remove_new = qcom_smsm_remove,
686 	.driver  = {
687 		.name  = "qcom-smsm",
688 		.of_match_table = qcom_smsm_of_match,
689 	},
690 };
691 module_platform_driver(qcom_smsm_driver);
692 
693 MODULE_DESCRIPTION("Qualcomm Shared Memory State Machine driver");
694 MODULE_LICENSE("GPL v2");
695