xref: /linux/drivers/soc/qcom/smem.c (revision c7e1e3ccfbd153c890240a391f258efaedfa94d0)
1 /*
2  * Copyright (c) 2015, Sony Mobile Communications AB.
3  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 and
7  * only version 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14 
15 #include <linux/hwspinlock.h>
16 #include <linux/io.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_address.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 #include <linux/soc/qcom/smem.h>
23 
24 /*
25  * The Qualcomm shared memory system is a allocate only heap structure that
26  * consists of one of more memory areas that can be accessed by the processors
27  * in the SoC.
28  *
29  * All systems contains a global heap, accessible by all processors in the SoC,
30  * with a table of contents data structure (@smem_header) at the beginning of
31  * the main shared memory block.
32  *
33  * The global header contains meta data for allocations as well as a fixed list
34  * of 512 entries (@smem_global_entry) that can be initialized to reference
35  * parts of the shared memory space.
36  *
37  *
38  * In addition to this global heap a set of "private" heaps can be set up at
39  * boot time with access restrictions so that only certain processor pairs can
40  * access the data.
41  *
42  * These partitions are referenced from an optional partition table
43  * (@smem_ptable), that is found 4kB from the end of the main smem region. The
44  * partition table entries (@smem_ptable_entry) lists the involved processors
45  * (or hosts) and their location in the main shared memory region.
46  *
47  * Each partition starts with a header (@smem_partition_header) that identifies
48  * the partition and holds properties for the two internal memory regions. The
49  * two regions are cached and non-cached memory respectively. Each region
50  * contain a link list of allocation headers (@smem_private_entry) followed by
51  * their data.
52  *
53  * Items in the non-cached region are allocated from the start of the partition
54  * while items in the cached region are allocated from the end. The free area
55  * is hence the region between the cached and non-cached offsets.
56  *
57  *
58  * To synchronize allocations in the shared memory heaps a remote spinlock must
59  * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
60  * platforms.
61  *
62  */
63 
64 /*
65  * Item 3 of the global heap contains an array of versions for the various
66  * software components in the SoC. We verify that the boot loader version is
67  * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check.
68  */
69 #define SMEM_ITEM_VERSION	3
70 #define  SMEM_MASTER_SBL_VERSION_INDEX	7
71 #define  SMEM_EXPECTED_VERSION		11
72 
73 /*
74  * The first 8 items are only to be allocated by the boot loader while
75  * initializing the heap.
76  */
77 #define SMEM_ITEM_LAST_FIXED	8
78 
79 /* Highest accepted item number, for both global and private heaps */
80 #define SMEM_ITEM_COUNT		512
81 
82 /* Processor/host identifier for the application processor */
83 #define SMEM_HOST_APPS		0
84 
85 /* Max number of processors/hosts in a system */
86 #define SMEM_HOST_COUNT		9
87 
88 /**
89   * struct smem_proc_comm - proc_comm communication struct (legacy)
90   * @command:	current command to be executed
91   * @status:	status of the currently requested command
92   * @params:	parameters to the command
93   */
94 struct smem_proc_comm {
95 	u32 command;
96 	u32 status;
97 	u32 params[2];
98 };
99 
100 /**
101  * struct smem_global_entry - entry to reference smem items on the heap
102  * @allocated:	boolean to indicate if this entry is used
103  * @offset:	offset to the allocated space
104  * @size:	size of the allocated space, 8 byte aligned
105  * @aux_base:	base address for the memory region used by this unit, or 0 for
106  *		the default region. bits 0,1 are reserved
107  */
108 struct smem_global_entry {
109 	u32 allocated;
110 	u32 offset;
111 	u32 size;
112 	u32 aux_base; /* bits 1:0 reserved */
113 };
114 #define AUX_BASE_MASK		0xfffffffc
115 
116 /**
117  * struct smem_header - header found in beginning of primary smem region
118  * @proc_comm:		proc_comm communication interface (legacy)
119  * @version:		array of versions for the various subsystems
120  * @initialized:	boolean to indicate that smem is initialized
121  * @free_offset:	index of the first unallocated byte in smem
122  * @available:		number of bytes available for allocation
123  * @reserved:		reserved field, must be 0
124  * toc:			array of references to items
125  */
126 struct smem_header {
127 	struct smem_proc_comm proc_comm[4];
128 	u32 version[32];
129 	u32 initialized;
130 	u32 free_offset;
131 	u32 available;
132 	u32 reserved;
133 	struct smem_global_entry toc[SMEM_ITEM_COUNT];
134 };
135 
136 /**
137  * struct smem_ptable_entry - one entry in the @smem_ptable list
138  * @offset:	offset, within the main shared memory region, of the partition
139  * @size:	size of the partition
140  * @flags:	flags for the partition (currently unused)
141  * @host0:	first processor/host with access to this partition
142  * @host1:	second processor/host with access to this partition
143  * @reserved:	reserved entries for later use
144  */
145 struct smem_ptable_entry {
146 	u32 offset;
147 	u32 size;
148 	u32 flags;
149 	u16 host0;
150 	u16 host1;
151 	u32 reserved[8];
152 };
153 
154 /**
155  * struct smem_ptable - partition table for the private partitions
156  * @magic:	magic number, must be SMEM_PTABLE_MAGIC
157  * @version:	version of the partition table
158  * @num_entries: number of partitions in the table
159  * @reserved:	for now reserved entries
160  * @entry:	list of @smem_ptable_entry for the @num_entries partitions
161  */
162 struct smem_ptable {
163 	u32 magic;
164 	u32 version;
165 	u32 num_entries;
166 	u32 reserved[5];
167 	struct smem_ptable_entry entry[];
168 };
169 #define SMEM_PTABLE_MAGIC	0x434f5424 /* "$TOC" */
170 
171 /**
172  * struct smem_partition_header - header of the partitions
173  * @magic:	magic number, must be SMEM_PART_MAGIC
174  * @host0:	first processor/host with access to this partition
175  * @host1:	second processor/host with access to this partition
176  * @size:	size of the partition
177  * @offset_free_uncached: offset to the first free byte of uncached memory in
178  *		this partition
179  * @offset_free_cached: offset to the first free byte of cached memory in this
180  *		partition
181  * @reserved:	for now reserved entries
182  */
183 struct smem_partition_header {
184 	u32 magic;
185 	u16 host0;
186 	u16 host1;
187 	u32 size;
188 	u32 offset_free_uncached;
189 	u32 offset_free_cached;
190 	u32 reserved[3];
191 };
192 #define SMEM_PART_MAGIC		0x54525024 /* "$PRT" */
193 
194 /**
195  * struct smem_private_entry - header of each item in the private partition
196  * @canary:	magic number, must be SMEM_PRIVATE_CANARY
197  * @item:	identifying number of the smem item
198  * @size:	size of the data, including padding bytes
199  * @padding_data: number of bytes of padding of data
200  * @padding_hdr: number of bytes of padding between the header and the data
201  * @reserved:	for now reserved entry
202  */
203 struct smem_private_entry {
204 	u16 canary;
205 	u16 item;
206 	u32 size; /* includes padding bytes */
207 	u16 padding_data;
208 	u16 padding_hdr;
209 	u32 reserved;
210 };
211 #define SMEM_PRIVATE_CANARY	0xa5a5
212 
213 /**
214  * struct smem_region - representation of a chunk of memory used for smem
215  * @aux_base:	identifier of aux_mem base
216  * @virt_base:	virtual base address of memory with this aux_mem identifier
217  * @size:	size of the memory region
218  */
219 struct smem_region {
220 	u32 aux_base;
221 	void __iomem *virt_base;
222 	size_t size;
223 };
224 
225 /**
226  * struct qcom_smem - device data for the smem device
227  * @dev:	device pointer
228  * @hwlock:	reference to a hwspinlock
229  * @partitions:	list of pointers to partitions affecting the current
230  *		processor/host
231  * @num_regions: number of @regions
232  * @regions:	list of the memory regions defining the shared memory
233  */
234 struct qcom_smem {
235 	struct device *dev;
236 
237 	struct hwspinlock *hwlock;
238 
239 	struct smem_partition_header *partitions[SMEM_HOST_COUNT];
240 
241 	unsigned num_regions;
242 	struct smem_region regions[0];
243 };
244 
245 /* Pointer to the one and only smem handle */
246 static struct qcom_smem *__smem;
247 
248 /* Timeout (ms) for the trylock of remote spinlocks */
249 #define HWSPINLOCK_TIMEOUT	1000
250 
251 static int qcom_smem_alloc_private(struct qcom_smem *smem,
252 				   unsigned host,
253 				   unsigned item,
254 				   size_t size)
255 {
256 	struct smem_partition_header *phdr;
257 	struct smem_private_entry *hdr;
258 	size_t alloc_size;
259 	void *p;
260 
261 	/* We're not going to find it if there's no matching partition */
262 	if (host >= SMEM_HOST_COUNT || !smem->partitions[host])
263 		return -ENOENT;
264 
265 	phdr = smem->partitions[host];
266 
267 	p = (void *)phdr + sizeof(*phdr);
268 	while (p < (void *)phdr + phdr->offset_free_uncached) {
269 		hdr = p;
270 
271 		if (hdr->canary != SMEM_PRIVATE_CANARY) {
272 			dev_err(smem->dev,
273 				"Found invalid canary in host %d partition\n",
274 				host);
275 			return -EINVAL;
276 		}
277 
278 		if (hdr->item == item)
279 			return -EEXIST;
280 
281 		p += sizeof(*hdr) + hdr->padding_hdr + hdr->size;
282 	}
283 
284 	/* Check that we don't grow into the cached region */
285 	alloc_size = sizeof(*hdr) + ALIGN(size, 8);
286 	if (p + alloc_size >= (void *)phdr + phdr->offset_free_cached) {
287 		dev_err(smem->dev, "Out of memory\n");
288 		return -ENOSPC;
289 	}
290 
291 	hdr = p;
292 	hdr->canary = SMEM_PRIVATE_CANARY;
293 	hdr->item = item;
294 	hdr->size = ALIGN(size, 8);
295 	hdr->padding_data = hdr->size - size;
296 	hdr->padding_hdr = 0;
297 
298 	/*
299 	 * Ensure the header is written before we advance the free offset, so
300 	 * that remote processors that does not take the remote spinlock still
301 	 * gets a consistent view of the linked list.
302 	 */
303 	wmb();
304 	phdr->offset_free_uncached += alloc_size;
305 
306 	return 0;
307 }
308 
309 static int qcom_smem_alloc_global(struct qcom_smem *smem,
310 				  unsigned item,
311 				  size_t size)
312 {
313 	struct smem_header *header;
314 	struct smem_global_entry *entry;
315 
316 	if (WARN_ON(item >= SMEM_ITEM_COUNT))
317 		return -EINVAL;
318 
319 	header = smem->regions[0].virt_base;
320 	entry = &header->toc[item];
321 	if (entry->allocated)
322 		return -EEXIST;
323 
324 	size = ALIGN(size, 8);
325 	if (WARN_ON(size > header->available))
326 		return -ENOMEM;
327 
328 	entry->offset = header->free_offset;
329 	entry->size = size;
330 
331 	/*
332 	 * Ensure the header is consistent before we mark the item allocated,
333 	 * so that remote processors will get a consistent view of the item
334 	 * even though they do not take the spinlock on read.
335 	 */
336 	wmb();
337 	entry->allocated = 1;
338 
339 	header->free_offset += size;
340 	header->available -= size;
341 
342 	return 0;
343 }
344 
345 /**
346  * qcom_smem_alloc() - allocate space for a smem item
347  * @host:	remote processor id, or -1
348  * @item:	smem item handle
349  * @size:	number of bytes to be allocated
350  *
351  * Allocate space for a given smem item of size @size, given that the item is
352  * not yet allocated.
353  */
354 int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
355 {
356 	unsigned long flags;
357 	int ret;
358 
359 	if (!__smem)
360 		return -EPROBE_DEFER;
361 
362 	if (item < SMEM_ITEM_LAST_FIXED) {
363 		dev_err(__smem->dev,
364 			"Rejecting allocation of static entry %d\n", item);
365 		return -EINVAL;
366 	}
367 
368 	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
369 					  HWSPINLOCK_TIMEOUT,
370 					  &flags);
371 	if (ret)
372 		return ret;
373 
374 	ret = qcom_smem_alloc_private(__smem, host, item, size);
375 	if (ret == -ENOENT)
376 		ret = qcom_smem_alloc_global(__smem, item, size);
377 
378 	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
379 
380 	return ret;
381 }
382 EXPORT_SYMBOL(qcom_smem_alloc);
383 
384 static int qcom_smem_get_global(struct qcom_smem *smem,
385 				unsigned item,
386 				void **ptr,
387 				size_t *size)
388 {
389 	struct smem_header *header;
390 	struct smem_region *area;
391 	struct smem_global_entry *entry;
392 	u32 aux_base;
393 	unsigned i;
394 
395 	if (WARN_ON(item >= SMEM_ITEM_COUNT))
396 		return -EINVAL;
397 
398 	header = smem->regions[0].virt_base;
399 	entry = &header->toc[item];
400 	if (!entry->allocated)
401 		return -ENXIO;
402 
403 	if (ptr != NULL) {
404 		aux_base = entry->aux_base & AUX_BASE_MASK;
405 
406 		for (i = 0; i < smem->num_regions; i++) {
407 			area = &smem->regions[i];
408 
409 			if (area->aux_base == aux_base || !aux_base) {
410 				*ptr = area->virt_base + entry->offset;
411 				break;
412 			}
413 		}
414 	}
415 	if (size != NULL)
416 		*size = entry->size;
417 
418 	return 0;
419 }
420 
421 static int qcom_smem_get_private(struct qcom_smem *smem,
422 				 unsigned host,
423 				 unsigned item,
424 				 void **ptr,
425 				 size_t *size)
426 {
427 	struct smem_partition_header *phdr;
428 	struct smem_private_entry *hdr;
429 	void *p;
430 
431 	/* We're not going to find it if there's no matching partition */
432 	if (host >= SMEM_HOST_COUNT || !smem->partitions[host])
433 		return -ENOENT;
434 
435 	phdr = smem->partitions[host];
436 
437 	p = (void *)phdr + sizeof(*phdr);
438 	while (p < (void *)phdr + phdr->offset_free_uncached) {
439 		hdr = p;
440 
441 		if (hdr->canary != SMEM_PRIVATE_CANARY) {
442 			dev_err(smem->dev,
443 				"Found invalid canary in host %d partition\n",
444 				host);
445 			return -EINVAL;
446 		}
447 
448 		if (hdr->item == item) {
449 			if (ptr != NULL)
450 				*ptr = p + sizeof(*hdr) + hdr->padding_hdr;
451 
452 			if (size != NULL)
453 				*size = hdr->size - hdr->padding_data;
454 
455 			return 0;
456 		}
457 
458 		p += sizeof(*hdr) + hdr->padding_hdr + hdr->size;
459 	}
460 
461 	return -ENOENT;
462 }
463 
464 /**
465  * qcom_smem_get() - resolve ptr of size of a smem item
466  * @host:	the remote processor, or -1
467  * @item:	smem item handle
468  * @ptr:	pointer to be filled out with address of the item
469  * @size:	pointer to be filled out with size of the item
470  *
471  * Looks up pointer and size of a smem item.
472  */
473 int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size)
474 {
475 	unsigned long flags;
476 	int ret;
477 
478 	if (!__smem)
479 		return -EPROBE_DEFER;
480 
481 	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
482 					  HWSPINLOCK_TIMEOUT,
483 					  &flags);
484 	if (ret)
485 		return ret;
486 
487 	ret = qcom_smem_get_private(__smem, host, item, ptr, size);
488 	if (ret == -ENOENT)
489 		ret = qcom_smem_get_global(__smem, item, ptr, size);
490 
491 	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
492 	return ret;
493 
494 }
495 EXPORT_SYMBOL(qcom_smem_get);
496 
497 /**
498  * qcom_smem_get_free_space() - retrieve amount of free space in a partition
499  * @host:	the remote processor identifying a partition, or -1
500  *
501  * To be used by smem clients as a quick way to determine if any new
502  * allocations has been made.
503  */
504 int qcom_smem_get_free_space(unsigned host)
505 {
506 	struct smem_partition_header *phdr;
507 	struct smem_header *header;
508 	unsigned ret;
509 
510 	if (!__smem)
511 		return -EPROBE_DEFER;
512 
513 	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
514 		phdr = __smem->partitions[host];
515 		ret = phdr->offset_free_cached - phdr->offset_free_uncached;
516 	} else {
517 		header = __smem->regions[0].virt_base;
518 		ret = header->available;
519 	}
520 
521 	return ret;
522 }
523 EXPORT_SYMBOL(qcom_smem_get_free_space);
524 
525 static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
526 {
527 	unsigned *versions;
528 	size_t size;
529 	int ret;
530 
531 	ret = qcom_smem_get_global(smem, SMEM_ITEM_VERSION,
532 				   (void **)&versions, &size);
533 	if (ret < 0) {
534 		dev_err(smem->dev, "Unable to read the version item\n");
535 		return -ENOENT;
536 	}
537 
538 	if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) {
539 		dev_err(smem->dev, "Version item is too small\n");
540 		return -EINVAL;
541 	}
542 
543 	return versions[SMEM_MASTER_SBL_VERSION_INDEX];
544 }
545 
546 static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
547 					  unsigned local_host)
548 {
549 	struct smem_partition_header *header;
550 	struct smem_ptable_entry *entry;
551 	struct smem_ptable *ptable;
552 	unsigned remote_host;
553 	int i;
554 
555 	ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
556 	if (ptable->magic != SMEM_PTABLE_MAGIC)
557 		return 0;
558 
559 	if (ptable->version != 1) {
560 		dev_err(smem->dev,
561 			"Unsupported partition header version %d\n",
562 			ptable->version);
563 		return -EINVAL;
564 	}
565 
566 	for (i = 0; i < ptable->num_entries; i++) {
567 		entry = &ptable->entry[i];
568 
569 		if (entry->host0 != local_host && entry->host1 != local_host)
570 			continue;
571 
572 		if (!entry->offset)
573 			continue;
574 
575 		if (!entry->size)
576 			continue;
577 
578 		if (entry->host0 == local_host)
579 			remote_host = entry->host1;
580 		else
581 			remote_host = entry->host0;
582 
583 		if (remote_host >= SMEM_HOST_COUNT) {
584 			dev_err(smem->dev,
585 				"Invalid remote host %d\n",
586 				remote_host);
587 			return -EINVAL;
588 		}
589 
590 		if (smem->partitions[remote_host]) {
591 			dev_err(smem->dev,
592 				"Already found a partition for host %d\n",
593 				remote_host);
594 			return -EINVAL;
595 		}
596 
597 		header = smem->regions[0].virt_base + entry->offset;
598 
599 		if (header->magic != SMEM_PART_MAGIC) {
600 			dev_err(smem->dev,
601 				"Partition %d has invalid magic\n", i);
602 			return -EINVAL;
603 		}
604 
605 		if (header->host0 != local_host && header->host1 != local_host) {
606 			dev_err(smem->dev,
607 				"Partition %d hosts are invalid\n", i);
608 			return -EINVAL;
609 		}
610 
611 		if (header->host0 != remote_host && header->host1 != remote_host) {
612 			dev_err(smem->dev,
613 				"Partition %d hosts are invalid\n", i);
614 			return -EINVAL;
615 		}
616 
617 		if (header->size != entry->size) {
618 			dev_err(smem->dev,
619 				"Partition %d has invalid size\n", i);
620 			return -EINVAL;
621 		}
622 
623 		if (header->offset_free_uncached > header->size) {
624 			dev_err(smem->dev,
625 				"Partition %d has invalid free pointer\n", i);
626 			return -EINVAL;
627 		}
628 
629 		smem->partitions[remote_host] = header;
630 	}
631 
632 	return 0;
633 }
634 
635 static int qcom_smem_count_mem_regions(struct platform_device *pdev)
636 {
637 	struct resource *res;
638 	int num_regions = 0;
639 	int i;
640 
641 	for (i = 0; i < pdev->num_resources; i++) {
642 		res = &pdev->resource[i];
643 
644 		if (resource_type(res) == IORESOURCE_MEM)
645 			num_regions++;
646 	}
647 
648 	return num_regions;
649 }
650 
651 static int qcom_smem_probe(struct platform_device *pdev)
652 {
653 	struct smem_header *header;
654 	struct device_node *np;
655 	struct qcom_smem *smem;
656 	struct resource *res;
657 	struct resource r;
658 	size_t array_size;
659 	int num_regions = 0;
660 	int hwlock_id;
661 	u32 version;
662 	int ret;
663 	int i;
664 
665 	num_regions = qcom_smem_count_mem_regions(pdev) + 1;
666 
667 	array_size = num_regions * sizeof(struct smem_region);
668 	smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
669 	if (!smem)
670 		return -ENOMEM;
671 
672 	smem->dev = &pdev->dev;
673 	smem->num_regions = num_regions;
674 
675 	np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
676 	if (!np) {
677 		dev_err(&pdev->dev, "No memory-region specified\n");
678 		return -EINVAL;
679 	}
680 
681 	ret = of_address_to_resource(np, 0, &r);
682 	of_node_put(np);
683 	if (ret)
684 		return ret;
685 
686 	smem->regions[0].aux_base = (u32)r.start;
687 	smem->regions[0].size = resource_size(&r);
688 	smem->regions[0].virt_base = devm_ioremap_nocache(&pdev->dev,
689 							  r.start,
690 							  resource_size(&r));
691 	if (!smem->regions[0].virt_base)
692 		return -ENOMEM;
693 
694 	for (i = 1; i < num_regions; i++) {
695 		res = platform_get_resource(pdev, IORESOURCE_MEM, i - 1);
696 
697 		smem->regions[i].aux_base = (u32)res->start;
698 		smem->regions[i].size = resource_size(res);
699 		smem->regions[i].virt_base = devm_ioremap_nocache(&pdev->dev,
700 								  res->start,
701 								  resource_size(res));
702 		if (!smem->regions[i].virt_base)
703 			return -ENOMEM;
704 	}
705 
706 	header = smem->regions[0].virt_base;
707 	if (header->initialized != 1 || header->reserved) {
708 		dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
709 		return -EINVAL;
710 	}
711 
712 	version = qcom_smem_get_sbl_version(smem);
713 	if (version >> 16 != SMEM_EXPECTED_VERSION) {
714 		dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
715 		return -EINVAL;
716 	}
717 
718 	ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
719 	if (ret < 0)
720 		return ret;
721 
722 	hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
723 	if (hwlock_id < 0) {
724 		dev_err(&pdev->dev, "failed to retrieve hwlock\n");
725 		return hwlock_id;
726 	}
727 
728 	smem->hwlock = hwspin_lock_request_specific(hwlock_id);
729 	if (!smem->hwlock)
730 		return -ENXIO;
731 
732 	__smem = smem;
733 
734 	return 0;
735 }
736 
737 static int qcom_smem_remove(struct platform_device *pdev)
738 {
739 	__smem = NULL;
740 	hwspin_lock_free(__smem->hwlock);
741 
742 	return 0;
743 }
744 
745 static const struct of_device_id qcom_smem_of_match[] = {
746 	{ .compatible = "qcom,smem" },
747 	{}
748 };
749 MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
750 
751 static struct platform_driver qcom_smem_driver = {
752 	.probe = qcom_smem_probe,
753 	.remove = qcom_smem_remove,
754 	.driver  = {
755 		.name = "qcom-smem",
756 		.of_match_table = qcom_smem_of_match,
757 		.suppress_bind_attrs = true,
758 	},
759 };
760 
761 static int __init qcom_smem_init(void)
762 {
763 	return platform_driver_register(&qcom_smem_driver);
764 }
765 arch_initcall(qcom_smem_init);
766 
767 static void __exit qcom_smem_exit(void)
768 {
769 	platform_driver_unregister(&qcom_smem_driver);
770 }
771 module_exit(qcom_smem_exit)
772 
773 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
774 MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
775 MODULE_LICENSE("GPL v2");
776