xref: /linux/drivers/soc/qcom/smem.c (revision ea8a163e02d6925773129e2dd86e419e491b791d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, Sony Mobile Communications AB.
4  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
5  */
6 
7 #include <linux/hwspinlock.h>
8 #include <linux/io.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/of_reserved_mem.h>
13 #include <linux/platform_device.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 #include <linux/soc/qcom/smem.h>
17 
18 /*
19  * The Qualcomm shared memory system is a allocate only heap structure that
20  * consists of one of more memory areas that can be accessed by the processors
21  * in the SoC.
22  *
23  * All systems contains a global heap, accessible by all processors in the SoC,
24  * with a table of contents data structure (@smem_header) at the beginning of
25  * the main shared memory block.
26  *
27  * The global header contains meta data for allocations as well as a fixed list
28  * of 512 entries (@smem_global_entry) that can be initialized to reference
29  * parts of the shared memory space.
30  *
31  *
32  * In addition to this global heap a set of "private" heaps can be set up at
33  * boot time with access restrictions so that only certain processor pairs can
34  * access the data.
35  *
36  * These partitions are referenced from an optional partition table
37  * (@smem_ptable), that is found 4kB from the end of the main smem region. The
38  * partition table entries (@smem_ptable_entry) lists the involved processors
39  * (or hosts) and their location in the main shared memory region.
40  *
41  * Each partition starts with a header (@smem_partition_header) that identifies
42  * the partition and holds properties for the two internal memory regions. The
43  * two regions are cached and non-cached memory respectively. Each region
44  * contain a link list of allocation headers (@smem_private_entry) followed by
45  * their data.
46  *
47  * Items in the non-cached region are allocated from the start of the partition
48  * while items in the cached region are allocated from the end. The free area
49  * is hence the region between the cached and non-cached offsets. The header of
50  * cached items comes after the data.
51  *
52  * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
53  * for the global heap. A new global partition is created from the global heap
54  * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
55  * set by the bootloader.
56  *
57  * To synchronize allocations in the shared memory heaps a remote spinlock must
58  * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
59  * platforms.
60  *
61  */
62 
63 /*
64  * The version member of the smem header contains an array of versions for the
65  * various software components in the SoC. We verify that the boot loader
66  * version is a valid version as a sanity check.
67  */
68 #define SMEM_MASTER_SBL_VERSION_INDEX	7
69 #define SMEM_GLOBAL_HEAP_VERSION	11
70 #define SMEM_GLOBAL_PART_VERSION	12
71 
72 /*
73  * The first 8 items are only to be allocated by the boot loader while
74  * initializing the heap.
75  */
76 #define SMEM_ITEM_LAST_FIXED	8
77 
78 /* Highest accepted item number, for both global and private heaps */
79 #define SMEM_ITEM_COUNT		512
80 
81 /* Processor/host identifier for the application processor */
82 #define SMEM_HOST_APPS		0
83 
84 /* Processor/host identifier for the global partition */
85 #define SMEM_GLOBAL_HOST	0xfffe
86 
87 /* Max number of processors/hosts in a system */
88 #define SMEM_HOST_COUNT		14
89 
90 /**
91   * struct smem_proc_comm - proc_comm communication struct (legacy)
92   * @command:	current command to be executed
93   * @status:	status of the currently requested command
94   * @params:	parameters to the command
95   */
96 struct smem_proc_comm {
97 	__le32 command;
98 	__le32 status;
99 	__le32 params[2];
100 };
101 
102 /**
103  * struct smem_global_entry - entry to reference smem items on the heap
104  * @allocated:	boolean to indicate if this entry is used
105  * @offset:	offset to the allocated space
106  * @size:	size of the allocated space, 8 byte aligned
107  * @aux_base:	base address for the memory region used by this unit, or 0 for
108  *		the default region. bits 0,1 are reserved
109  */
110 struct smem_global_entry {
111 	__le32 allocated;
112 	__le32 offset;
113 	__le32 size;
114 	__le32 aux_base; /* bits 1:0 reserved */
115 };
116 #define AUX_BASE_MASK		0xfffffffc
117 
118 /**
119  * struct smem_header - header found in beginning of primary smem region
120  * @proc_comm:		proc_comm communication interface (legacy)
121  * @version:		array of versions for the various subsystems
122  * @initialized:	boolean to indicate that smem is initialized
123  * @free_offset:	index of the first unallocated byte in smem
124  * @available:		number of bytes available for allocation
125  * @reserved:		reserved field, must be 0
126  * @toc:		array of references to items
127  */
128 struct smem_header {
129 	struct smem_proc_comm proc_comm[4];
130 	__le32 version[32];
131 	__le32 initialized;
132 	__le32 free_offset;
133 	__le32 available;
134 	__le32 reserved;
135 	struct smem_global_entry toc[SMEM_ITEM_COUNT];
136 };
137 
138 /**
139  * struct smem_ptable_entry - one entry in the @smem_ptable list
140  * @offset:	offset, within the main shared memory region, of the partition
141  * @size:	size of the partition
142  * @flags:	flags for the partition (currently unused)
143  * @host0:	first processor/host with access to this partition
144  * @host1:	second processor/host with access to this partition
145  * @cacheline:	alignment for "cached" entries
146  * @reserved:	reserved entries for later use
147  */
148 struct smem_ptable_entry {
149 	__le32 offset;
150 	__le32 size;
151 	__le32 flags;
152 	__le16 host0;
153 	__le16 host1;
154 	__le32 cacheline;
155 	__le32 reserved[7];
156 };
157 
158 /**
159  * struct smem_ptable - partition table for the private partitions
160  * @magic:	magic number, must be SMEM_PTABLE_MAGIC
161  * @version:	version of the partition table
162  * @num_entries: number of partitions in the table
163  * @reserved:	for now reserved entries
164  * @entry:	list of @smem_ptable_entry for the @num_entries partitions
165  */
166 struct smem_ptable {
167 	u8 magic[4];
168 	__le32 version;
169 	__le32 num_entries;
170 	__le32 reserved[5];
171 	struct smem_ptable_entry entry[];
172 };
173 
174 static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
175 
176 /**
177  * struct smem_partition_header - header of the partitions
178  * @magic:	magic number, must be SMEM_PART_MAGIC
179  * @host0:	first processor/host with access to this partition
180  * @host1:	second processor/host with access to this partition
181  * @size:	size of the partition
182  * @offset_free_uncached: offset to the first free byte of uncached memory in
183  *		this partition
184  * @offset_free_cached: offset to the first free byte of cached memory in this
185  *		partition
186  * @reserved:	for now reserved entries
187  */
188 struct smem_partition_header {
189 	u8 magic[4];
190 	__le16 host0;
191 	__le16 host1;
192 	__le32 size;
193 	__le32 offset_free_uncached;
194 	__le32 offset_free_cached;
195 	__le32 reserved[3];
196 };
197 
198 static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
199 
200 /**
201  * struct smem_private_entry - header of each item in the private partition
202  * @canary:	magic number, must be SMEM_PRIVATE_CANARY
203  * @item:	identifying number of the smem item
204  * @size:	size of the data, including padding bytes
205  * @padding_data: number of bytes of padding of data
206  * @padding_hdr: number of bytes of padding between the header and the data
207  * @reserved:	for now reserved entry
208  */
209 struct smem_private_entry {
210 	u16 canary; /* bytes are the same so no swapping needed */
211 	__le16 item;
212 	__le32 size; /* includes padding bytes */
213 	__le16 padding_data;
214 	__le16 padding_hdr;
215 	__le32 reserved;
216 };
217 #define SMEM_PRIVATE_CANARY	0xa5a5
218 
219 /**
220  * struct smem_info - smem region info located after the table of contents
221  * @magic:	magic number, must be SMEM_INFO_MAGIC
222  * @size:	size of the smem region
223  * @base_addr:	base address of the smem region
224  * @reserved:	for now reserved entry
225  * @num_items:	highest accepted item number
226  */
227 struct smem_info {
228 	u8 magic[4];
229 	__le32 size;
230 	__le32 base_addr;
231 	__le32 reserved;
232 	__le16 num_items;
233 };
234 
235 static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
236 
237 /**
238  * struct smem_region - representation of a chunk of memory used for smem
239  * @aux_base:	identifier of aux_mem base
240  * @virt_base:	virtual base address of memory with this aux_mem identifier
241  * @size:	size of the memory region
242  */
243 struct smem_region {
244 	phys_addr_t aux_base;
245 	void __iomem *virt_base;
246 	size_t size;
247 };
248 
249 /**
250  * struct qcom_smem - device data for the smem device
251  * @dev:	device pointer
252  * @hwlock:	reference to a hwspinlock
253  * @global_partition:	pointer to global partition when in use
254  * @global_cacheline:	cacheline size for global partition
255  * @partitions:	list of pointers to partitions affecting the current
256  *		processor/host
257  * @cacheline:	list of cacheline sizes for each host
258  * @item_count: max accepted item number
259  * @socinfo:	platform device pointer
260  * @num_regions: number of @regions
261  * @regions:	list of the memory regions defining the shared memory
262  */
263 struct qcom_smem {
264 	struct device *dev;
265 
266 	struct hwspinlock *hwlock;
267 
268 	struct smem_partition_header *global_partition;
269 	size_t global_cacheline;
270 	struct smem_partition_header *partitions[SMEM_HOST_COUNT];
271 	size_t cacheline[SMEM_HOST_COUNT];
272 	u32 item_count;
273 	struct platform_device *socinfo;
274 
275 	unsigned num_regions;
276 	struct smem_region regions[];
277 };
278 
279 static void *
280 phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
281 {
282 	void *p = phdr;
283 
284 	return p + le32_to_cpu(phdr->offset_free_uncached);
285 }
286 
287 static struct smem_private_entry *
288 phdr_to_first_cached_entry(struct smem_partition_header *phdr,
289 					size_t cacheline)
290 {
291 	void *p = phdr;
292 	struct smem_private_entry *e;
293 
294 	return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
295 }
296 
297 static void *
298 phdr_to_last_cached_entry(struct smem_partition_header *phdr)
299 {
300 	void *p = phdr;
301 
302 	return p + le32_to_cpu(phdr->offset_free_cached);
303 }
304 
305 static struct smem_private_entry *
306 phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
307 {
308 	void *p = phdr;
309 
310 	return p + sizeof(*phdr);
311 }
312 
313 static struct smem_private_entry *
314 uncached_entry_next(struct smem_private_entry *e)
315 {
316 	void *p = e;
317 
318 	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
319 	       le32_to_cpu(e->size);
320 }
321 
322 static struct smem_private_entry *
323 cached_entry_next(struct smem_private_entry *e, size_t cacheline)
324 {
325 	void *p = e;
326 
327 	return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
328 }
329 
330 static void *uncached_entry_to_item(struct smem_private_entry *e)
331 {
332 	void *p = e;
333 
334 	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
335 }
336 
337 static void *cached_entry_to_item(struct smem_private_entry *e)
338 {
339 	void *p = e;
340 
341 	return p - le32_to_cpu(e->size);
342 }
343 
344 /* Pointer to the one and only smem handle */
345 static struct qcom_smem *__smem;
346 
347 /* Timeout (ms) for the trylock of remote spinlocks */
348 #define HWSPINLOCK_TIMEOUT	1000
349 
350 static int qcom_smem_alloc_private(struct qcom_smem *smem,
351 				   struct smem_partition_header *phdr,
352 				   unsigned item,
353 				   size_t size)
354 {
355 	struct smem_private_entry *hdr, *end;
356 	size_t alloc_size;
357 	void *cached;
358 
359 	hdr = phdr_to_first_uncached_entry(phdr);
360 	end = phdr_to_last_uncached_entry(phdr);
361 	cached = phdr_to_last_cached_entry(phdr);
362 
363 	while (hdr < end) {
364 		if (hdr->canary != SMEM_PRIVATE_CANARY)
365 			goto bad_canary;
366 		if (le16_to_cpu(hdr->item) == item)
367 			return -EEXIST;
368 
369 		hdr = uncached_entry_next(hdr);
370 	}
371 
372 	/* Check that we don't grow into the cached region */
373 	alloc_size = sizeof(*hdr) + ALIGN(size, 8);
374 	if ((void *)hdr + alloc_size > cached) {
375 		dev_err(smem->dev, "Out of memory\n");
376 		return -ENOSPC;
377 	}
378 
379 	hdr->canary = SMEM_PRIVATE_CANARY;
380 	hdr->item = cpu_to_le16(item);
381 	hdr->size = cpu_to_le32(ALIGN(size, 8));
382 	hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
383 	hdr->padding_hdr = 0;
384 
385 	/*
386 	 * Ensure the header is written before we advance the free offset, so
387 	 * that remote processors that does not take the remote spinlock still
388 	 * gets a consistent view of the linked list.
389 	 */
390 	wmb();
391 	le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
392 
393 	return 0;
394 bad_canary:
395 	dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
396 		le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
397 
398 	return -EINVAL;
399 }
400 
401 static int qcom_smem_alloc_global(struct qcom_smem *smem,
402 				  unsigned item,
403 				  size_t size)
404 {
405 	struct smem_global_entry *entry;
406 	struct smem_header *header;
407 
408 	header = smem->regions[0].virt_base;
409 	entry = &header->toc[item];
410 	if (entry->allocated)
411 		return -EEXIST;
412 
413 	size = ALIGN(size, 8);
414 	if (WARN_ON(size > le32_to_cpu(header->available)))
415 		return -ENOMEM;
416 
417 	entry->offset = header->free_offset;
418 	entry->size = cpu_to_le32(size);
419 
420 	/*
421 	 * Ensure the header is consistent before we mark the item allocated,
422 	 * so that remote processors will get a consistent view of the item
423 	 * even though they do not take the spinlock on read.
424 	 */
425 	wmb();
426 	entry->allocated = cpu_to_le32(1);
427 
428 	le32_add_cpu(&header->free_offset, size);
429 	le32_add_cpu(&header->available, -size);
430 
431 	return 0;
432 }
433 
434 /**
435  * qcom_smem_alloc() - allocate space for a smem item
436  * @host:	remote processor id, or -1
437  * @item:	smem item handle
438  * @size:	number of bytes to be allocated
439  *
440  * Allocate space for a given smem item of size @size, given that the item is
441  * not yet allocated.
442  */
443 int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
444 {
445 	struct smem_partition_header *phdr;
446 	unsigned long flags;
447 	int ret;
448 
449 	if (!__smem)
450 		return -EPROBE_DEFER;
451 
452 	if (item < SMEM_ITEM_LAST_FIXED) {
453 		dev_err(__smem->dev,
454 			"Rejecting allocation of static entry %d\n", item);
455 		return -EINVAL;
456 	}
457 
458 	if (WARN_ON(item >= __smem->item_count))
459 		return -EINVAL;
460 
461 	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
462 					  HWSPINLOCK_TIMEOUT,
463 					  &flags);
464 	if (ret)
465 		return ret;
466 
467 	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
468 		phdr = __smem->partitions[host];
469 		ret = qcom_smem_alloc_private(__smem, phdr, item, size);
470 	} else if (__smem->global_partition) {
471 		phdr = __smem->global_partition;
472 		ret = qcom_smem_alloc_private(__smem, phdr, item, size);
473 	} else {
474 		ret = qcom_smem_alloc_global(__smem, item, size);
475 	}
476 
477 	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
478 
479 	return ret;
480 }
481 EXPORT_SYMBOL(qcom_smem_alloc);
482 
483 static void *qcom_smem_get_global(struct qcom_smem *smem,
484 				  unsigned item,
485 				  size_t *size)
486 {
487 	struct smem_header *header;
488 	struct smem_region *region;
489 	struct smem_global_entry *entry;
490 	u32 aux_base;
491 	unsigned i;
492 
493 	header = smem->regions[0].virt_base;
494 	entry = &header->toc[item];
495 	if (!entry->allocated)
496 		return ERR_PTR(-ENXIO);
497 
498 	aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
499 
500 	for (i = 0; i < smem->num_regions; i++) {
501 		region = &smem->regions[i];
502 
503 		if ((u32)region->aux_base == aux_base || !aux_base) {
504 			if (size != NULL)
505 				*size = le32_to_cpu(entry->size);
506 			return region->virt_base + le32_to_cpu(entry->offset);
507 		}
508 	}
509 
510 	return ERR_PTR(-ENOENT);
511 }
512 
513 static void *qcom_smem_get_private(struct qcom_smem *smem,
514 				   struct smem_partition_header *phdr,
515 				   size_t cacheline,
516 				   unsigned item,
517 				   size_t *size)
518 {
519 	struct smem_private_entry *e, *end;
520 
521 	e = phdr_to_first_uncached_entry(phdr);
522 	end = phdr_to_last_uncached_entry(phdr);
523 
524 	while (e < end) {
525 		if (e->canary != SMEM_PRIVATE_CANARY)
526 			goto invalid_canary;
527 
528 		if (le16_to_cpu(e->item) == item) {
529 			if (size != NULL)
530 				*size = le32_to_cpu(e->size) -
531 					le16_to_cpu(e->padding_data);
532 
533 			return uncached_entry_to_item(e);
534 		}
535 
536 		e = uncached_entry_next(e);
537 	}
538 
539 	/* Item was not found in the uncached list, search the cached list */
540 
541 	e = phdr_to_first_cached_entry(phdr, cacheline);
542 	end = phdr_to_last_cached_entry(phdr);
543 
544 	while (e > end) {
545 		if (e->canary != SMEM_PRIVATE_CANARY)
546 			goto invalid_canary;
547 
548 		if (le16_to_cpu(e->item) == item) {
549 			if (size != NULL)
550 				*size = le32_to_cpu(e->size) -
551 					le16_to_cpu(e->padding_data);
552 
553 			return cached_entry_to_item(e);
554 		}
555 
556 		e = cached_entry_next(e, cacheline);
557 	}
558 
559 	return ERR_PTR(-ENOENT);
560 
561 invalid_canary:
562 	dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
563 			le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
564 
565 	return ERR_PTR(-EINVAL);
566 }
567 
568 /**
569  * qcom_smem_get() - resolve ptr of size of a smem item
570  * @host:	the remote processor, or -1
571  * @item:	smem item handle
572  * @size:	pointer to be filled out with size of the item
573  *
574  * Looks up smem item and returns pointer to it. Size of smem
575  * item is returned in @size.
576  */
577 void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
578 {
579 	struct smem_partition_header *phdr;
580 	unsigned long flags;
581 	size_t cacheln;
582 	int ret;
583 	void *ptr = ERR_PTR(-EPROBE_DEFER);
584 
585 	if (!__smem)
586 		return ptr;
587 
588 	if (WARN_ON(item >= __smem->item_count))
589 		return ERR_PTR(-EINVAL);
590 
591 	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
592 					  HWSPINLOCK_TIMEOUT,
593 					  &flags);
594 	if (ret)
595 		return ERR_PTR(ret);
596 
597 	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
598 		phdr = __smem->partitions[host];
599 		cacheln = __smem->cacheline[host];
600 		ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
601 	} else if (__smem->global_partition) {
602 		phdr = __smem->global_partition;
603 		cacheln = __smem->global_cacheline;
604 		ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
605 	} else {
606 		ptr = qcom_smem_get_global(__smem, item, size);
607 	}
608 
609 	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
610 
611 	return ptr;
612 
613 }
614 EXPORT_SYMBOL(qcom_smem_get);
615 
616 /**
617  * qcom_smem_get_free_space() - retrieve amount of free space in a partition
618  * @host:	the remote processor identifying a partition, or -1
619  *
620  * To be used by smem clients as a quick way to determine if any new
621  * allocations has been made.
622  */
623 int qcom_smem_get_free_space(unsigned host)
624 {
625 	struct smem_partition_header *phdr;
626 	struct smem_header *header;
627 	unsigned ret;
628 
629 	if (!__smem)
630 		return -EPROBE_DEFER;
631 
632 	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
633 		phdr = __smem->partitions[host];
634 		ret = le32_to_cpu(phdr->offset_free_cached) -
635 		      le32_to_cpu(phdr->offset_free_uncached);
636 	} else if (__smem->global_partition) {
637 		phdr = __smem->global_partition;
638 		ret = le32_to_cpu(phdr->offset_free_cached) -
639 		      le32_to_cpu(phdr->offset_free_uncached);
640 	} else {
641 		header = __smem->regions[0].virt_base;
642 		ret = le32_to_cpu(header->available);
643 	}
644 
645 	return ret;
646 }
647 EXPORT_SYMBOL(qcom_smem_get_free_space);
648 
649 /**
650  * qcom_smem_virt_to_phys() - return the physical address associated
651  * with an smem item pointer (previously returned by qcom_smem_get()
652  * @p:	the virtual address to convert
653  *
654  * Returns 0 if the pointer provided is not within any smem region.
655  */
656 phys_addr_t qcom_smem_virt_to_phys(void *p)
657 {
658 	unsigned i;
659 
660 	for (i = 0; i < __smem->num_regions; i++) {
661 		struct smem_region *region = &__smem->regions[i];
662 
663 		if (p < region->virt_base)
664 			continue;
665 		if (p < region->virt_base + region->size) {
666 			u64 offset = p - region->virt_base;
667 
668 			return region->aux_base + offset;
669 		}
670 	}
671 
672 	return 0;
673 }
674 EXPORT_SYMBOL(qcom_smem_virt_to_phys);
675 
676 static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
677 {
678 	struct smem_header *header;
679 	__le32 *versions;
680 
681 	header = smem->regions[0].virt_base;
682 	versions = header->version;
683 
684 	return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
685 }
686 
687 static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
688 {
689 	struct smem_ptable *ptable;
690 	u32 version;
691 
692 	ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
693 	if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
694 		return ERR_PTR(-ENOENT);
695 
696 	version = le32_to_cpu(ptable->version);
697 	if (version != 1) {
698 		dev_err(smem->dev,
699 			"Unsupported partition header version %d\n", version);
700 		return ERR_PTR(-EINVAL);
701 	}
702 	return ptable;
703 }
704 
705 static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
706 {
707 	struct smem_ptable *ptable;
708 	struct smem_info *info;
709 
710 	ptable = qcom_smem_get_ptable(smem);
711 	if (IS_ERR_OR_NULL(ptable))
712 		return SMEM_ITEM_COUNT;
713 
714 	info = (struct smem_info *)&ptable->entry[ptable->num_entries];
715 	if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
716 		return SMEM_ITEM_COUNT;
717 
718 	return le16_to_cpu(info->num_items);
719 }
720 
721 /*
722  * Validate the partition header for a partition whose partition
723  * table entry is supplied.  Returns a pointer to its header if
724  * valid, or a null pointer otherwise.
725  */
726 static struct smem_partition_header *
727 qcom_smem_partition_header(struct qcom_smem *smem,
728 		struct smem_ptable_entry *entry, u16 host0, u16 host1)
729 {
730 	struct smem_partition_header *header;
731 	u32 size;
732 
733 	header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
734 
735 	if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
736 		dev_err(smem->dev, "bad partition magic %4ph\n", header->magic);
737 		return NULL;
738 	}
739 
740 	if (host0 != le16_to_cpu(header->host0)) {
741 		dev_err(smem->dev, "bad host0 (%hu != %hu)\n",
742 				host0, le16_to_cpu(header->host0));
743 		return NULL;
744 	}
745 	if (host1 != le16_to_cpu(header->host1)) {
746 		dev_err(smem->dev, "bad host1 (%hu != %hu)\n",
747 				host1, le16_to_cpu(header->host1));
748 		return NULL;
749 	}
750 
751 	size = le32_to_cpu(header->size);
752 	if (size != le32_to_cpu(entry->size)) {
753 		dev_err(smem->dev, "bad partition size (%u != %u)\n",
754 			size, le32_to_cpu(entry->size));
755 		return NULL;
756 	}
757 
758 	if (le32_to_cpu(header->offset_free_uncached) > size) {
759 		dev_err(smem->dev, "bad partition free uncached (%u > %u)\n",
760 			le32_to_cpu(header->offset_free_uncached), size);
761 		return NULL;
762 	}
763 
764 	return header;
765 }
766 
767 static int qcom_smem_set_global_partition(struct qcom_smem *smem)
768 {
769 	struct smem_partition_header *header;
770 	struct smem_ptable_entry *entry;
771 	struct smem_ptable *ptable;
772 	bool found = false;
773 	int i;
774 
775 	if (smem->global_partition) {
776 		dev_err(smem->dev, "Already found the global partition\n");
777 		return -EINVAL;
778 	}
779 
780 	ptable = qcom_smem_get_ptable(smem);
781 	if (IS_ERR(ptable))
782 		return PTR_ERR(ptable);
783 
784 	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
785 		entry = &ptable->entry[i];
786 		if (!le32_to_cpu(entry->offset))
787 			continue;
788 		if (!le32_to_cpu(entry->size))
789 			continue;
790 
791 		if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST)
792 			continue;
793 
794 		if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) {
795 			found = true;
796 			break;
797 		}
798 	}
799 
800 	if (!found) {
801 		dev_err(smem->dev, "Missing entry for global partition\n");
802 		return -EINVAL;
803 	}
804 
805 	header = qcom_smem_partition_header(smem, entry,
806 				SMEM_GLOBAL_HOST, SMEM_GLOBAL_HOST);
807 	if (!header)
808 		return -EINVAL;
809 
810 	smem->global_partition = header;
811 	smem->global_cacheline = le32_to_cpu(entry->cacheline);
812 
813 	return 0;
814 }
815 
816 static int
817 qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
818 {
819 	struct smem_partition_header *header;
820 	struct smem_ptable_entry *entry;
821 	struct smem_ptable *ptable;
822 	unsigned int remote_host;
823 	u16 host0, host1;
824 	int i;
825 
826 	ptable = qcom_smem_get_ptable(smem);
827 	if (IS_ERR(ptable))
828 		return PTR_ERR(ptable);
829 
830 	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
831 		entry = &ptable->entry[i];
832 		if (!le32_to_cpu(entry->offset))
833 			continue;
834 		if (!le32_to_cpu(entry->size))
835 			continue;
836 
837 		host0 = le16_to_cpu(entry->host0);
838 		host1 = le16_to_cpu(entry->host1);
839 		if (host0 == local_host)
840 			remote_host = host1;
841 		else if (host1 == local_host)
842 			remote_host = host0;
843 		else
844 			continue;
845 
846 		if (remote_host >= SMEM_HOST_COUNT) {
847 			dev_err(smem->dev, "bad host %hu\n", remote_host);
848 			return -EINVAL;
849 		}
850 
851 		if (smem->partitions[remote_host]) {
852 			dev_err(smem->dev, "duplicate host %hu\n", remote_host);
853 			return -EINVAL;
854 		}
855 
856 		header = qcom_smem_partition_header(smem, entry, host0, host1);
857 		if (!header)
858 			return -EINVAL;
859 
860 		smem->partitions[remote_host] = header;
861 		smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
862 	}
863 
864 	return 0;
865 }
866 
867 static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name,
868 				 struct smem_region *region)
869 {
870 	struct device *dev = smem->dev;
871 	struct device_node *np;
872 	struct resource r;
873 	int ret;
874 
875 	np = of_parse_phandle(dev->of_node, name, 0);
876 	if (!np) {
877 		dev_err(dev, "No %s specified\n", name);
878 		return -EINVAL;
879 	}
880 
881 	ret = of_address_to_resource(np, 0, &r);
882 	of_node_put(np);
883 	if (ret)
884 		return ret;
885 
886 	region->aux_base = r.start;
887 	region->size = resource_size(&r);
888 
889 	return 0;
890 }
891 
892 static int qcom_smem_probe(struct platform_device *pdev)
893 {
894 	struct smem_header *header;
895 	struct reserved_mem *rmem;
896 	struct qcom_smem *smem;
897 	size_t array_size;
898 	int num_regions;
899 	int hwlock_id;
900 	u32 version;
901 	int ret;
902 	int i;
903 
904 	num_regions = 1;
905 	if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
906 		num_regions++;
907 
908 	array_size = num_regions * sizeof(struct smem_region);
909 	smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
910 	if (!smem)
911 		return -ENOMEM;
912 
913 	smem->dev = &pdev->dev;
914 	smem->num_regions = num_regions;
915 
916 	rmem = of_reserved_mem_lookup(pdev->dev.of_node);
917 	if (rmem) {
918 		smem->regions[0].aux_base = rmem->base;
919 		smem->regions[0].size = rmem->size;
920 	} else {
921 		/*
922 		 * Fall back to the memory-region reference, if we're not a
923 		 * reserved-memory node.
924 		 */
925 		ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]);
926 		if (ret)
927 			return ret;
928 	}
929 
930 	if (num_regions > 1) {
931 		ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]);
932 		if (ret)
933 			return ret;
934 	}
935 
936 	for (i = 0; i < num_regions; i++) {
937 		smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev,
938 							     smem->regions[i].aux_base,
939 							     smem->regions[i].size);
940 		if (!smem->regions[i].virt_base) {
941 			dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base);
942 			return -ENOMEM;
943 		}
944 	}
945 
946 	header = smem->regions[0].virt_base;
947 	if (le32_to_cpu(header->initialized) != 1 ||
948 	    le32_to_cpu(header->reserved)) {
949 		dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
950 		return -EINVAL;
951 	}
952 
953 	version = qcom_smem_get_sbl_version(smem);
954 	switch (version >> 16) {
955 	case SMEM_GLOBAL_PART_VERSION:
956 		ret = qcom_smem_set_global_partition(smem);
957 		if (ret < 0)
958 			return ret;
959 		smem->item_count = qcom_smem_get_item_count(smem);
960 		break;
961 	case SMEM_GLOBAL_HEAP_VERSION:
962 		smem->item_count = SMEM_ITEM_COUNT;
963 		break;
964 	default:
965 		dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
966 		return -EINVAL;
967 	}
968 
969 	BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT);
970 	ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
971 	if (ret < 0 && ret != -ENOENT)
972 		return ret;
973 
974 	hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
975 	if (hwlock_id < 0) {
976 		if (hwlock_id != -EPROBE_DEFER)
977 			dev_err(&pdev->dev, "failed to retrieve hwlock\n");
978 		return hwlock_id;
979 	}
980 
981 	smem->hwlock = hwspin_lock_request_specific(hwlock_id);
982 	if (!smem->hwlock)
983 		return -ENXIO;
984 
985 	__smem = smem;
986 
987 	smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
988 						      PLATFORM_DEVID_NONE, NULL,
989 						      0);
990 	if (IS_ERR(smem->socinfo))
991 		dev_dbg(&pdev->dev, "failed to register socinfo device\n");
992 
993 	return 0;
994 }
995 
996 static int qcom_smem_remove(struct platform_device *pdev)
997 {
998 	platform_device_unregister(__smem->socinfo);
999 
1000 	hwspin_lock_free(__smem->hwlock);
1001 	__smem = NULL;
1002 
1003 	return 0;
1004 }
1005 
1006 static const struct of_device_id qcom_smem_of_match[] = {
1007 	{ .compatible = "qcom,smem" },
1008 	{}
1009 };
1010 MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
1011 
1012 static struct platform_driver qcom_smem_driver = {
1013 	.probe = qcom_smem_probe,
1014 	.remove = qcom_smem_remove,
1015 	.driver  = {
1016 		.name = "qcom-smem",
1017 		.of_match_table = qcom_smem_of_match,
1018 		.suppress_bind_attrs = true,
1019 	},
1020 };
1021 
1022 static int __init qcom_smem_init(void)
1023 {
1024 	return platform_driver_register(&qcom_smem_driver);
1025 }
1026 arch_initcall(qcom_smem_init);
1027 
1028 static void __exit qcom_smem_exit(void)
1029 {
1030 	platform_driver_unregister(&qcom_smem_driver);
1031 }
1032 module_exit(qcom_smem_exit)
1033 
1034 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
1035 MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
1036 MODULE_LICENSE("GPL v2");
1037