xref: /linux/drivers/soc/qcom/smem.c (revision 905e46acd3272d04566fec49afbd7ad9e2ed9ae3)
1 /*
2  * Copyright (c) 2015, Sony Mobile Communications AB.
3  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 and
7  * only version 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14 
15 #include <linux/hwspinlock.h>
16 #include <linux/io.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_address.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 #include <linux/soc/qcom/smem.h>
23 
24 /*
25  * The Qualcomm shared memory system is a allocate only heap structure that
26  * consists of one of more memory areas that can be accessed by the processors
27  * in the SoC.
28  *
29  * All systems contains a global heap, accessible by all processors in the SoC,
30  * with a table of contents data structure (@smem_header) at the beginning of
31  * the main shared memory block.
32  *
33  * The global header contains meta data for allocations as well as a fixed list
34  * of 512 entries (@smem_global_entry) that can be initialized to reference
35  * parts of the shared memory space.
36  *
37  *
38  * In addition to this global heap a set of "private" heaps can be set up at
39  * boot time with access restrictions so that only certain processor pairs can
40  * access the data.
41  *
42  * These partitions are referenced from an optional partition table
43  * (@smem_ptable), that is found 4kB from the end of the main smem region. The
44  * partition table entries (@smem_ptable_entry) lists the involved processors
45  * (or hosts) and their location in the main shared memory region.
46  *
47  * Each partition starts with a header (@smem_partition_header) that identifies
48  * the partition and holds properties for the two internal memory regions. The
49  * two regions are cached and non-cached memory respectively. Each region
50  * contain a link list of allocation headers (@smem_private_entry) followed by
51  * their data.
52  *
53  * Items in the non-cached region are allocated from the start of the partition
54  * while items in the cached region are allocated from the end. The free area
55  * is hence the region between the cached and non-cached offsets.
56  *
57  *
58  * To synchronize allocations in the shared memory heaps a remote spinlock must
59  * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
60  * platforms.
61  *
62  */
63 
64 /*
65  * Item 3 of the global heap contains an array of versions for the various
66  * software components in the SoC. We verify that the boot loader version is
67  * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check.
68  */
69 #define SMEM_ITEM_VERSION	3
70 #define  SMEM_MASTER_SBL_VERSION_INDEX	7
71 #define  SMEM_EXPECTED_VERSION		11
72 
73 /*
74  * The first 8 items are only to be allocated by the boot loader while
75  * initializing the heap.
76  */
77 #define SMEM_ITEM_LAST_FIXED	8
78 
79 /* Highest accepted item number, for both global and private heaps */
80 #define SMEM_ITEM_COUNT		512
81 
82 /* Processor/host identifier for the application processor */
83 #define SMEM_HOST_APPS		0
84 
85 /* Max number of processors/hosts in a system */
86 #define SMEM_HOST_COUNT		9
87 
88 /**
89   * struct smem_proc_comm - proc_comm communication struct (legacy)
90   * @command:	current command to be executed
91   * @status:	status of the currently requested command
92   * @params:	parameters to the command
93   */
94 struct smem_proc_comm {
95 	__le32 command;
96 	__le32 status;
97 	__le32 params[2];
98 };
99 
100 /**
101  * struct smem_global_entry - entry to reference smem items on the heap
102  * @allocated:	boolean to indicate if this entry is used
103  * @offset:	offset to the allocated space
104  * @size:	size of the allocated space, 8 byte aligned
105  * @aux_base:	base address for the memory region used by this unit, or 0 for
106  *		the default region. bits 0,1 are reserved
107  */
108 struct smem_global_entry {
109 	__le32 allocated;
110 	__le32 offset;
111 	__le32 size;
112 	__le32 aux_base; /* bits 1:0 reserved */
113 };
114 #define AUX_BASE_MASK		0xfffffffc
115 
116 /**
117  * struct smem_header - header found in beginning of primary smem region
118  * @proc_comm:		proc_comm communication interface (legacy)
119  * @version:		array of versions for the various subsystems
120  * @initialized:	boolean to indicate that smem is initialized
121  * @free_offset:	index of the first unallocated byte in smem
122  * @available:		number of bytes available for allocation
123  * @reserved:		reserved field, must be 0
124  * toc:			array of references to items
125  */
126 struct smem_header {
127 	struct smem_proc_comm proc_comm[4];
128 	__le32 version[32];
129 	__le32 initialized;
130 	__le32 free_offset;
131 	__le32 available;
132 	__le32 reserved;
133 	struct smem_global_entry toc[SMEM_ITEM_COUNT];
134 };
135 
136 /**
137  * struct smem_ptable_entry - one entry in the @smem_ptable list
138  * @offset:	offset, within the main shared memory region, of the partition
139  * @size:	size of the partition
140  * @flags:	flags for the partition (currently unused)
141  * @host0:	first processor/host with access to this partition
142  * @host1:	second processor/host with access to this partition
143  * @reserved:	reserved entries for later use
144  */
145 struct smem_ptable_entry {
146 	__le32 offset;
147 	__le32 size;
148 	__le32 flags;
149 	__le16 host0;
150 	__le16 host1;
151 	__le32 reserved[8];
152 };
153 
154 /**
155  * struct smem_ptable - partition table for the private partitions
156  * @magic:	magic number, must be SMEM_PTABLE_MAGIC
157  * @version:	version of the partition table
158  * @num_entries: number of partitions in the table
159  * @reserved:	for now reserved entries
160  * @entry:	list of @smem_ptable_entry for the @num_entries partitions
161  */
162 struct smem_ptable {
163 	u8 magic[4];
164 	__le32 version;
165 	__le32 num_entries;
166 	__le32 reserved[5];
167 	struct smem_ptable_entry entry[];
168 };
169 
170 static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
171 
172 /**
173  * struct smem_partition_header - header of the partitions
174  * @magic:	magic number, must be SMEM_PART_MAGIC
175  * @host0:	first processor/host with access to this partition
176  * @host1:	second processor/host with access to this partition
177  * @size:	size of the partition
178  * @offset_free_uncached: offset to the first free byte of uncached memory in
179  *		this partition
180  * @offset_free_cached: offset to the first free byte of cached memory in this
181  *		partition
182  * @reserved:	for now reserved entries
183  */
184 struct smem_partition_header {
185 	u8 magic[4];
186 	__le16 host0;
187 	__le16 host1;
188 	__le32 size;
189 	__le32 offset_free_uncached;
190 	__le32 offset_free_cached;
191 	__le32 reserved[3];
192 };
193 
194 static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
195 
196 /**
197  * struct smem_private_entry - header of each item in the private partition
198  * @canary:	magic number, must be SMEM_PRIVATE_CANARY
199  * @item:	identifying number of the smem item
200  * @size:	size of the data, including padding bytes
201  * @padding_data: number of bytes of padding of data
202  * @padding_hdr: number of bytes of padding between the header and the data
203  * @reserved:	for now reserved entry
204  */
205 struct smem_private_entry {
206 	u16 canary; /* bytes are the same so no swapping needed */
207 	__le16 item;
208 	__le32 size; /* includes padding bytes */
209 	__le16 padding_data;
210 	__le16 padding_hdr;
211 	__le32 reserved;
212 };
213 #define SMEM_PRIVATE_CANARY	0xa5a5
214 
215 /**
216  * struct smem_region - representation of a chunk of memory used for smem
217  * @aux_base:	identifier of aux_mem base
218  * @virt_base:	virtual base address of memory with this aux_mem identifier
219  * @size:	size of the memory region
220  */
221 struct smem_region {
222 	u32 aux_base;
223 	void __iomem *virt_base;
224 	size_t size;
225 };
226 
227 /**
228  * struct qcom_smem - device data for the smem device
229  * @dev:	device pointer
230  * @hwlock:	reference to a hwspinlock
231  * @partitions:	list of pointers to partitions affecting the current
232  *		processor/host
233  * @num_regions: number of @regions
234  * @regions:	list of the memory regions defining the shared memory
235  */
236 struct qcom_smem {
237 	struct device *dev;
238 
239 	struct hwspinlock *hwlock;
240 
241 	struct smem_partition_header *partitions[SMEM_HOST_COUNT];
242 
243 	unsigned num_regions;
244 	struct smem_region regions[0];
245 };
246 
247 static struct smem_private_entry *
248 phdr_to_last_private_entry(struct smem_partition_header *phdr)
249 {
250 	void *p = phdr;
251 
252 	return p + le32_to_cpu(phdr->offset_free_uncached);
253 }
254 
255 static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
256 {
257 	void *p = phdr;
258 
259 	return p + le32_to_cpu(phdr->offset_free_cached);
260 }
261 
262 static struct smem_private_entry *
263 phdr_to_first_private_entry(struct smem_partition_header *phdr)
264 {
265 	void *p = phdr;
266 
267 	return p + sizeof(*phdr);
268 }
269 
270 static struct smem_private_entry *
271 private_entry_next(struct smem_private_entry *e)
272 {
273 	void *p = e;
274 
275 	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
276 	       le32_to_cpu(e->size);
277 }
278 
279 static void *entry_to_item(struct smem_private_entry *e)
280 {
281 	void *p = e;
282 
283 	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
284 }
285 
286 /* Pointer to the one and only smem handle */
287 static struct qcom_smem *__smem;
288 
289 /* Timeout (ms) for the trylock of remote spinlocks */
290 #define HWSPINLOCK_TIMEOUT	1000
291 
292 static int qcom_smem_alloc_private(struct qcom_smem *smem,
293 				   unsigned host,
294 				   unsigned item,
295 				   size_t size)
296 {
297 	struct smem_partition_header *phdr;
298 	struct smem_private_entry *hdr, *end;
299 	size_t alloc_size;
300 	void *cached;
301 
302 	phdr = smem->partitions[host];
303 	hdr = phdr_to_first_private_entry(phdr);
304 	end = phdr_to_last_private_entry(phdr);
305 	cached = phdr_to_first_cached_entry(phdr);
306 
307 	while (hdr < end) {
308 		if (hdr->canary != SMEM_PRIVATE_CANARY) {
309 			dev_err(smem->dev,
310 				"Found invalid canary in host %d partition\n",
311 				host);
312 			return -EINVAL;
313 		}
314 
315 		if (le16_to_cpu(hdr->item) == item)
316 			return -EEXIST;
317 
318 		hdr = private_entry_next(hdr);
319 	}
320 
321 	/* Check that we don't grow into the cached region */
322 	alloc_size = sizeof(*hdr) + ALIGN(size, 8);
323 	if ((void *)hdr + alloc_size >= cached) {
324 		dev_err(smem->dev, "Out of memory\n");
325 		return -ENOSPC;
326 	}
327 
328 	hdr->canary = SMEM_PRIVATE_CANARY;
329 	hdr->item = cpu_to_le16(item);
330 	hdr->size = cpu_to_le32(ALIGN(size, 8));
331 	hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
332 	hdr->padding_hdr = 0;
333 
334 	/*
335 	 * Ensure the header is written before we advance the free offset, so
336 	 * that remote processors that does not take the remote spinlock still
337 	 * gets a consistent view of the linked list.
338 	 */
339 	wmb();
340 	le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
341 
342 	return 0;
343 }
344 
345 static int qcom_smem_alloc_global(struct qcom_smem *smem,
346 				  unsigned item,
347 				  size_t size)
348 {
349 	struct smem_header *header;
350 	struct smem_global_entry *entry;
351 
352 	if (WARN_ON(item >= SMEM_ITEM_COUNT))
353 		return -EINVAL;
354 
355 	header = smem->regions[0].virt_base;
356 	entry = &header->toc[item];
357 	if (entry->allocated)
358 		return -EEXIST;
359 
360 	size = ALIGN(size, 8);
361 	if (WARN_ON(size > le32_to_cpu(header->available)))
362 		return -ENOMEM;
363 
364 	entry->offset = header->free_offset;
365 	entry->size = cpu_to_le32(size);
366 
367 	/*
368 	 * Ensure the header is consistent before we mark the item allocated,
369 	 * so that remote processors will get a consistent view of the item
370 	 * even though they do not take the spinlock on read.
371 	 */
372 	wmb();
373 	entry->allocated = cpu_to_le32(1);
374 
375 	le32_add_cpu(&header->free_offset, size);
376 	le32_add_cpu(&header->available, -size);
377 
378 	return 0;
379 }
380 
381 /**
382  * qcom_smem_alloc() - allocate space for a smem item
383  * @host:	remote processor id, or -1
384  * @item:	smem item handle
385  * @size:	number of bytes to be allocated
386  *
387  * Allocate space for a given smem item of size @size, given that the item is
388  * not yet allocated.
389  */
390 int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
391 {
392 	unsigned long flags;
393 	int ret;
394 
395 	if (!__smem)
396 		return -EPROBE_DEFER;
397 
398 	if (item < SMEM_ITEM_LAST_FIXED) {
399 		dev_err(__smem->dev,
400 			"Rejecting allocation of static entry %d\n", item);
401 		return -EINVAL;
402 	}
403 
404 	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
405 					  HWSPINLOCK_TIMEOUT,
406 					  &flags);
407 	if (ret)
408 		return ret;
409 
410 	if (host < SMEM_HOST_COUNT && __smem->partitions[host])
411 		ret = qcom_smem_alloc_private(__smem, host, item, size);
412 	else
413 		ret = qcom_smem_alloc_global(__smem, item, size);
414 
415 	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
416 
417 	return ret;
418 }
419 EXPORT_SYMBOL(qcom_smem_alloc);
420 
421 static void *qcom_smem_get_global(struct qcom_smem *smem,
422 				  unsigned item,
423 				  size_t *size)
424 {
425 	struct smem_header *header;
426 	struct smem_region *area;
427 	struct smem_global_entry *entry;
428 	u32 aux_base;
429 	unsigned i;
430 
431 	if (WARN_ON(item >= SMEM_ITEM_COUNT))
432 		return ERR_PTR(-EINVAL);
433 
434 	header = smem->regions[0].virt_base;
435 	entry = &header->toc[item];
436 	if (!entry->allocated)
437 		return ERR_PTR(-ENXIO);
438 
439 	aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
440 
441 	for (i = 0; i < smem->num_regions; i++) {
442 		area = &smem->regions[i];
443 
444 		if (area->aux_base == aux_base || !aux_base) {
445 			if (size != NULL)
446 				*size = le32_to_cpu(entry->size);
447 			return area->virt_base + le32_to_cpu(entry->offset);
448 		}
449 	}
450 
451 	return ERR_PTR(-ENOENT);
452 }
453 
454 static void *qcom_smem_get_private(struct qcom_smem *smem,
455 				   unsigned host,
456 				   unsigned item,
457 				   size_t *size)
458 {
459 	struct smem_partition_header *phdr;
460 	struct smem_private_entry *e, *end;
461 
462 	phdr = smem->partitions[host];
463 	e = phdr_to_first_private_entry(phdr);
464 	end = phdr_to_last_private_entry(phdr);
465 
466 	while (e < end) {
467 		if (e->canary != SMEM_PRIVATE_CANARY) {
468 			dev_err(smem->dev,
469 				"Found invalid canary in host %d partition\n",
470 				host);
471 			return ERR_PTR(-EINVAL);
472 		}
473 
474 		if (le16_to_cpu(e->item) == item) {
475 			if (size != NULL)
476 				*size = le32_to_cpu(e->size) -
477 					le16_to_cpu(e->padding_data);
478 
479 			return entry_to_item(e);
480 		}
481 
482 		e = private_entry_next(e);
483 	}
484 
485 	return ERR_PTR(-ENOENT);
486 }
487 
488 /**
489  * qcom_smem_get() - resolve ptr of size of a smem item
490  * @host:	the remote processor, or -1
491  * @item:	smem item handle
492  * @size:	pointer to be filled out with size of the item
493  *
494  * Looks up smem item and returns pointer to it. Size of smem
495  * item is returned in @size.
496  */
497 void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
498 {
499 	unsigned long flags;
500 	int ret;
501 	void *ptr = ERR_PTR(-EPROBE_DEFER);
502 
503 	if (!__smem)
504 		return ptr;
505 
506 	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
507 					  HWSPINLOCK_TIMEOUT,
508 					  &flags);
509 	if (ret)
510 		return ERR_PTR(ret);
511 
512 	if (host < SMEM_HOST_COUNT && __smem->partitions[host])
513 		ptr = qcom_smem_get_private(__smem, host, item, size);
514 	else
515 		ptr = qcom_smem_get_global(__smem, item, size);
516 
517 	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
518 
519 	return ptr;
520 
521 }
522 EXPORT_SYMBOL(qcom_smem_get);
523 
524 /**
525  * qcom_smem_get_free_space() - retrieve amount of free space in a partition
526  * @host:	the remote processor identifying a partition, or -1
527  *
528  * To be used by smem clients as a quick way to determine if any new
529  * allocations has been made.
530  */
531 int qcom_smem_get_free_space(unsigned host)
532 {
533 	struct smem_partition_header *phdr;
534 	struct smem_header *header;
535 	unsigned ret;
536 
537 	if (!__smem)
538 		return -EPROBE_DEFER;
539 
540 	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
541 		phdr = __smem->partitions[host];
542 		ret = le32_to_cpu(phdr->offset_free_cached) -
543 		      le32_to_cpu(phdr->offset_free_uncached);
544 	} else {
545 		header = __smem->regions[0].virt_base;
546 		ret = le32_to_cpu(header->available);
547 	}
548 
549 	return ret;
550 }
551 EXPORT_SYMBOL(qcom_smem_get_free_space);
552 
553 static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
554 {
555 	__le32 *versions;
556 	size_t size;
557 
558 	versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size);
559 	if (IS_ERR(versions)) {
560 		dev_err(smem->dev, "Unable to read the version item\n");
561 		return -ENOENT;
562 	}
563 
564 	if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) {
565 		dev_err(smem->dev, "Version item is too small\n");
566 		return -EINVAL;
567 	}
568 
569 	return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
570 }
571 
572 static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
573 					  unsigned local_host)
574 {
575 	struct smem_partition_header *header;
576 	struct smem_ptable_entry *entry;
577 	struct smem_ptable *ptable;
578 	unsigned remote_host;
579 	u32 version, host0, host1;
580 	int i;
581 
582 	ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
583 	if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
584 		return 0;
585 
586 	version = le32_to_cpu(ptable->version);
587 	if (version != 1) {
588 		dev_err(smem->dev,
589 			"Unsupported partition header version %d\n", version);
590 		return -EINVAL;
591 	}
592 
593 	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
594 		entry = &ptable->entry[i];
595 		host0 = le16_to_cpu(entry->host0);
596 		host1 = le16_to_cpu(entry->host1);
597 
598 		if (host0 != local_host && host1 != local_host)
599 			continue;
600 
601 		if (!le32_to_cpu(entry->offset))
602 			continue;
603 
604 		if (!le32_to_cpu(entry->size))
605 			continue;
606 
607 		if (host0 == local_host)
608 			remote_host = host1;
609 		else
610 			remote_host = host0;
611 
612 		if (remote_host >= SMEM_HOST_COUNT) {
613 			dev_err(smem->dev,
614 				"Invalid remote host %d\n",
615 				remote_host);
616 			return -EINVAL;
617 		}
618 
619 		if (smem->partitions[remote_host]) {
620 			dev_err(smem->dev,
621 				"Already found a partition for host %d\n",
622 				remote_host);
623 			return -EINVAL;
624 		}
625 
626 		header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
627 		host0 = le16_to_cpu(header->host0);
628 		host1 = le16_to_cpu(header->host1);
629 
630 		if (memcmp(header->magic, SMEM_PART_MAGIC,
631 			    sizeof(header->magic))) {
632 			dev_err(smem->dev,
633 				"Partition %d has invalid magic\n", i);
634 			return -EINVAL;
635 		}
636 
637 		if (host0 != local_host && host1 != local_host) {
638 			dev_err(smem->dev,
639 				"Partition %d hosts are invalid\n", i);
640 			return -EINVAL;
641 		}
642 
643 		if (host0 != remote_host && host1 != remote_host) {
644 			dev_err(smem->dev,
645 				"Partition %d hosts are invalid\n", i);
646 			return -EINVAL;
647 		}
648 
649 		if (header->size != entry->size) {
650 			dev_err(smem->dev,
651 				"Partition %d has invalid size\n", i);
652 			return -EINVAL;
653 		}
654 
655 		if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) {
656 			dev_err(smem->dev,
657 				"Partition %d has invalid free pointer\n", i);
658 			return -EINVAL;
659 		}
660 
661 		smem->partitions[remote_host] = header;
662 	}
663 
664 	return 0;
665 }
666 
667 static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
668 				const char *name, int i)
669 {
670 	struct device_node *np;
671 	struct resource r;
672 	int ret;
673 
674 	np = of_parse_phandle(dev->of_node, name, 0);
675 	if (!np) {
676 		dev_err(dev, "No %s specified\n", name);
677 		return -EINVAL;
678 	}
679 
680 	ret = of_address_to_resource(np, 0, &r);
681 	of_node_put(np);
682 	if (ret)
683 		return ret;
684 
685 	smem->regions[i].aux_base = (u32)r.start;
686 	smem->regions[i].size = resource_size(&r);
687 	smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, resource_size(&r));
688 	if (!smem->regions[i].virt_base)
689 		return -ENOMEM;
690 
691 	return 0;
692 }
693 
694 static int qcom_smem_probe(struct platform_device *pdev)
695 {
696 	struct smem_header *header;
697 	struct qcom_smem *smem;
698 	size_t array_size;
699 	int num_regions;
700 	int hwlock_id;
701 	u32 version;
702 	int ret;
703 
704 	num_regions = 1;
705 	if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
706 		num_regions++;
707 
708 	array_size = num_regions * sizeof(struct smem_region);
709 	smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
710 	if (!smem)
711 		return -ENOMEM;
712 
713 	smem->dev = &pdev->dev;
714 	smem->num_regions = num_regions;
715 
716 	ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
717 	if (ret)
718 		return ret;
719 
720 	if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
721 					"qcom,rpm-msg-ram", 1)))
722 		return ret;
723 
724 	header = smem->regions[0].virt_base;
725 	if (le32_to_cpu(header->initialized) != 1 ||
726 	    le32_to_cpu(header->reserved)) {
727 		dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
728 		return -EINVAL;
729 	}
730 
731 	version = qcom_smem_get_sbl_version(smem);
732 	if (version >> 16 != SMEM_EXPECTED_VERSION) {
733 		dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
734 		return -EINVAL;
735 	}
736 
737 	ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
738 	if (ret < 0)
739 		return ret;
740 
741 	hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
742 	if (hwlock_id < 0) {
743 		if (hwlock_id != -EPROBE_DEFER)
744 			dev_err(&pdev->dev, "failed to retrieve hwlock\n");
745 		return hwlock_id;
746 	}
747 
748 	smem->hwlock = hwspin_lock_request_specific(hwlock_id);
749 	if (!smem->hwlock)
750 		return -ENXIO;
751 
752 	__smem = smem;
753 
754 	return 0;
755 }
756 
757 static int qcom_smem_remove(struct platform_device *pdev)
758 {
759 	hwspin_lock_free(__smem->hwlock);
760 	__smem = NULL;
761 
762 	return 0;
763 }
764 
765 static const struct of_device_id qcom_smem_of_match[] = {
766 	{ .compatible = "qcom,smem" },
767 	{}
768 };
769 MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
770 
771 static struct platform_driver qcom_smem_driver = {
772 	.probe = qcom_smem_probe,
773 	.remove = qcom_smem_remove,
774 	.driver  = {
775 		.name = "qcom-smem",
776 		.of_match_table = qcom_smem_of_match,
777 		.suppress_bind_attrs = true,
778 	},
779 };
780 
781 static int __init qcom_smem_init(void)
782 {
783 	return platform_driver_register(&qcom_smem_driver);
784 }
785 arch_initcall(qcom_smem_init);
786 
787 static void __exit qcom_smem_exit(void)
788 {
789 	platform_driver_unregister(&qcom_smem_driver);
790 }
791 module_exit(qcom_smem_exit)
792 
793 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
794 MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
795 MODULE_LICENSE("GPL v2");
796