1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015, Sony Mobile Communications AB.
4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
5 */
6
7 #include <linux/hwspinlock.h>
8 #include <linux/io.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/of_reserved_mem.h>
13 #include <linux/platform_device.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 #include <linux/soc/qcom/smem.h>
17 #include <linux/soc/qcom/socinfo.h>
18
19 /*
20 * The Qualcomm shared memory system is a allocate only heap structure that
21 * consists of one of more memory areas that can be accessed by the processors
22 * in the SoC.
23 *
24 * All systems contains a global heap, accessible by all processors in the SoC,
25 * with a table of contents data structure (@smem_header) at the beginning of
26 * the main shared memory block.
27 *
28 * The global header contains meta data for allocations as well as a fixed list
29 * of 512 entries (@smem_global_entry) that can be initialized to reference
30 * parts of the shared memory space.
31 *
32 *
33 * In addition to this global heap a set of "private" heaps can be set up at
34 * boot time with access restrictions so that only certain processor pairs can
35 * access the data.
36 *
37 * These partitions are referenced from an optional partition table
38 * (@smem_ptable), that is found 4kB from the end of the main smem region. The
39 * partition table entries (@smem_ptable_entry) lists the involved processors
40 * (or hosts) and their location in the main shared memory region.
41 *
42 * Each partition starts with a header (@smem_partition_header) that identifies
43 * the partition and holds properties for the two internal memory regions. The
44 * two regions are cached and non-cached memory respectively. Each region
45 * contain a link list of allocation headers (@smem_private_entry) followed by
46 * their data.
47 *
48 * Items in the non-cached region are allocated from the start of the partition
49 * while items in the cached region are allocated from the end. The free area
50 * is hence the region between the cached and non-cached offsets. The header of
51 * cached items comes after the data.
52 *
53 * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
54 * for the global heap. A new global partition is created from the global heap
55 * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
56 * set by the bootloader.
57 *
58 * To synchronize allocations in the shared memory heaps a remote spinlock must
59 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
60 * platforms.
61 *
62 */
63
64 /*
65 * The version member of the smem header contains an array of versions for the
66 * various software components in the SoC. We verify that the boot loader
67 * version is a valid version as a sanity check.
68 */
69 #define SMEM_MASTER_SBL_VERSION_INDEX 7
70 #define SMEM_GLOBAL_HEAP_VERSION 11
71 #define SMEM_GLOBAL_PART_VERSION 12
72
73 /*
74 * The first 8 items are only to be allocated by the boot loader while
75 * initializing the heap.
76 */
77 #define SMEM_ITEM_LAST_FIXED 8
78
79 /* Highest accepted item number, for both global and private heaps */
80 #define SMEM_ITEM_COUNT 512
81
82 /* Processor/host identifier for the application processor */
83 #define SMEM_HOST_APPS 0
84
85 /* Processor/host identifier for the global partition */
86 #define SMEM_GLOBAL_HOST 0xfffe
87
88 /* Max number of processors/hosts in a system */
89 #define SMEM_HOST_COUNT 25
90
91 /**
92 * struct smem_proc_comm - proc_comm communication struct (legacy)
93 * @command: current command to be executed
94 * @status: status of the currently requested command
95 * @params: parameters to the command
96 */
97 struct smem_proc_comm {
98 __le32 command;
99 __le32 status;
100 __le32 params[2];
101 };
102
103 /**
104 * struct smem_global_entry - entry to reference smem items on the heap
105 * @allocated: boolean to indicate if this entry is used
106 * @offset: offset to the allocated space
107 * @size: size of the allocated space, 8 byte aligned
108 * @aux_base: base address for the memory region used by this unit, or 0 for
109 * the default region. bits 0,1 are reserved
110 */
111 struct smem_global_entry {
112 __le32 allocated;
113 __le32 offset;
114 __le32 size;
115 __le32 aux_base; /* bits 1:0 reserved */
116 };
117 #define AUX_BASE_MASK 0xfffffffc
118
119 /**
120 * struct smem_header - header found in beginning of primary smem region
121 * @proc_comm: proc_comm communication interface (legacy)
122 * @version: array of versions for the various subsystems
123 * @initialized: boolean to indicate that smem is initialized
124 * @free_offset: index of the first unallocated byte in smem
125 * @available: number of bytes available for allocation
126 * @reserved: reserved field, must be 0
127 * @toc: array of references to items
128 */
129 struct smem_header {
130 struct smem_proc_comm proc_comm[4];
131 __le32 version[32];
132 __le32 initialized;
133 __le32 free_offset;
134 __le32 available;
135 __le32 reserved;
136 struct smem_global_entry toc[SMEM_ITEM_COUNT];
137 };
138
139 /**
140 * struct smem_ptable_entry - one entry in the @smem_ptable list
141 * @offset: offset, within the main shared memory region, of the partition
142 * @size: size of the partition
143 * @flags: flags for the partition (currently unused)
144 * @host0: first processor/host with access to this partition
145 * @host1: second processor/host with access to this partition
146 * @cacheline: alignment for "cached" entries
147 * @reserved: reserved entries for later use
148 */
149 struct smem_ptable_entry {
150 __le32 offset;
151 __le32 size;
152 __le32 flags;
153 __le16 host0;
154 __le16 host1;
155 __le32 cacheline;
156 __le32 reserved[7];
157 };
158
159 /**
160 * struct smem_ptable - partition table for the private partitions
161 * @magic: magic number, must be SMEM_PTABLE_MAGIC
162 * @version: version of the partition table
163 * @num_entries: number of partitions in the table
164 * @reserved: for now reserved entries
165 * @entry: list of @smem_ptable_entry for the @num_entries partitions
166 */
167 struct smem_ptable {
168 u8 magic[4];
169 __le32 version;
170 __le32 num_entries;
171 __le32 reserved[5];
172 struct smem_ptable_entry entry[];
173 };
174
175 static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
176
177 /**
178 * struct smem_partition_header - header of the partitions
179 * @magic: magic number, must be SMEM_PART_MAGIC
180 * @host0: first processor/host with access to this partition
181 * @host1: second processor/host with access to this partition
182 * @size: size of the partition
183 * @offset_free_uncached: offset to the first free byte of uncached memory in
184 * this partition
185 * @offset_free_cached: offset to the first free byte of cached memory in this
186 * partition
187 * @reserved: for now reserved entries
188 */
189 struct smem_partition_header {
190 u8 magic[4];
191 __le16 host0;
192 __le16 host1;
193 __le32 size;
194 __le32 offset_free_uncached;
195 __le32 offset_free_cached;
196 __le32 reserved[3];
197 };
198
199 /**
200 * struct smem_partition - describes smem partition
201 * @virt_base: starting virtual address of partition
202 * @phys_base: starting physical address of partition
203 * @cacheline: alignment for "cached" entries
204 * @size: size of partition
205 */
206 struct smem_partition {
207 void __iomem *virt_base;
208 phys_addr_t phys_base;
209 size_t cacheline;
210 size_t size;
211 };
212
213 static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
214
215 /**
216 * struct smem_private_entry - header of each item in the private partition
217 * @canary: magic number, must be SMEM_PRIVATE_CANARY
218 * @item: identifying number of the smem item
219 * @size: size of the data, including padding bytes
220 * @padding_data: number of bytes of padding of data
221 * @padding_hdr: number of bytes of padding between the header and the data
222 * @reserved: for now reserved entry
223 */
224 struct smem_private_entry {
225 u16 canary; /* bytes are the same so no swapping needed */
226 __le16 item;
227 __le32 size; /* includes padding bytes */
228 __le16 padding_data;
229 __le16 padding_hdr;
230 __le32 reserved;
231 };
232 #define SMEM_PRIVATE_CANARY 0xa5a5
233
234 /**
235 * struct smem_info - smem region info located after the table of contents
236 * @magic: magic number, must be SMEM_INFO_MAGIC
237 * @size: size of the smem region
238 * @base_addr: base address of the smem region
239 * @reserved: for now reserved entry
240 * @num_items: highest accepted item number
241 */
242 struct smem_info {
243 u8 magic[4];
244 __le32 size;
245 __le32 base_addr;
246 __le32 reserved;
247 __le16 num_items;
248 };
249
250 static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
251
252 /**
253 * struct smem_region - representation of a chunk of memory used for smem
254 * @aux_base: identifier of aux_mem base
255 * @virt_base: virtual base address of memory with this aux_mem identifier
256 * @size: size of the memory region
257 */
258 struct smem_region {
259 phys_addr_t aux_base;
260 void __iomem *virt_base;
261 size_t size;
262 };
263
264 /**
265 * struct qcom_smem - device data for the smem device
266 * @dev: device pointer
267 * @hwlock: reference to a hwspinlock
268 * @ptable: virtual base of partition table
269 * @global_partition: describes for global partition when in use
270 * @partitions: list of partitions of current processor/host
271 * @item_count: max accepted item number
272 * @socinfo: platform device pointer
273 * @num_regions: number of @regions
274 * @regions: list of the memory regions defining the shared memory
275 */
276 struct qcom_smem {
277 struct device *dev;
278
279 struct hwspinlock *hwlock;
280
281 u32 item_count;
282 struct platform_device *socinfo;
283 struct smem_ptable *ptable;
284 struct smem_partition global_partition;
285 struct smem_partition partitions[SMEM_HOST_COUNT];
286
287 unsigned num_regions;
288 struct smem_region regions[] __counted_by(num_regions);
289 };
290
291 static void *
phdr_to_last_uncached_entry(struct smem_partition_header * phdr)292 phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
293 {
294 void *p = phdr;
295
296 return p + le32_to_cpu(phdr->offset_free_uncached);
297 }
298
299 static struct smem_private_entry *
phdr_to_first_cached_entry(struct smem_partition_header * phdr,size_t cacheline)300 phdr_to_first_cached_entry(struct smem_partition_header *phdr,
301 size_t cacheline)
302 {
303 void *p = phdr;
304 struct smem_private_entry *e;
305
306 return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
307 }
308
309 static void *
phdr_to_last_cached_entry(struct smem_partition_header * phdr)310 phdr_to_last_cached_entry(struct smem_partition_header *phdr)
311 {
312 void *p = phdr;
313
314 return p + le32_to_cpu(phdr->offset_free_cached);
315 }
316
317 static struct smem_private_entry *
phdr_to_first_uncached_entry(struct smem_partition_header * phdr)318 phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
319 {
320 void *p = phdr;
321
322 return p + sizeof(*phdr);
323 }
324
325 static struct smem_private_entry *
uncached_entry_next(struct smem_private_entry * e)326 uncached_entry_next(struct smem_private_entry *e)
327 {
328 void *p = e;
329
330 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
331 le32_to_cpu(e->size);
332 }
333
334 static struct smem_private_entry *
cached_entry_next(struct smem_private_entry * e,size_t cacheline)335 cached_entry_next(struct smem_private_entry *e, size_t cacheline)
336 {
337 void *p = e;
338
339 return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
340 }
341
uncached_entry_to_item(struct smem_private_entry * e)342 static void *uncached_entry_to_item(struct smem_private_entry *e)
343 {
344 void *p = e;
345
346 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
347 }
348
cached_entry_to_item(struct smem_private_entry * e)349 static void *cached_entry_to_item(struct smem_private_entry *e)
350 {
351 void *p = e;
352
353 return p - le32_to_cpu(e->size);
354 }
355
356 /*
357 * Pointer to the one and only smem handle.
358 * Init to -EPROBE_DEFER to signal SMEM still has to be probed.
359 * Can be set to -ENODEV if SMEM is not initialized by SBL.
360 */
361 static struct qcom_smem *__smem = INIT_ERR_PTR(-EPROBE_DEFER);
362
363 /* Timeout (ms) for the trylock of remote spinlocks */
364 #define HWSPINLOCK_TIMEOUT 1000
365
366 /* The qcom hwspinlock id is always plus one from the smem host id */
367 #define SMEM_HOST_ID_TO_HWSPINLOCK_ID(__x) ((__x) + 1)
368
369 /**
370 * qcom_smem_bust_hwspin_lock_by_host() - bust the smem hwspinlock for a host
371 * @host: remote processor id
372 *
373 * Busts the hwspin_lock for the given smem host id. This helper is intended
374 * for remoteproc drivers that manage remoteprocs with an equivalent smem
375 * driver instance in the remote firmware. Drivers can force a release of the
376 * smem hwspin_lock if the rproc unexpectedly goes into a bad state.
377 *
378 * Context: Process context.
379 *
380 * Returns: 0 on success, otherwise negative errno.
381 */
qcom_smem_bust_hwspin_lock_by_host(unsigned int host)382 int qcom_smem_bust_hwspin_lock_by_host(unsigned int host)
383 {
384 /* This function is for remote procs, so ignore SMEM_HOST_APPS */
385 if (host == SMEM_HOST_APPS || host >= SMEM_HOST_COUNT)
386 return -EINVAL;
387
388 return hwspin_lock_bust(__smem->hwlock, SMEM_HOST_ID_TO_HWSPINLOCK_ID(host));
389 }
390 EXPORT_SYMBOL_GPL(qcom_smem_bust_hwspin_lock_by_host);
391
392 /**
393 * qcom_smem_is_available() - Check if SMEM is available
394 *
395 * Return: true if SMEM is available, false otherwise.
396 */
qcom_smem_is_available(void)397 bool qcom_smem_is_available(void)
398 {
399 return !!__smem;
400 }
401 EXPORT_SYMBOL_GPL(qcom_smem_is_available);
402
qcom_smem_alloc_private(struct qcom_smem * smem,struct smem_partition * part,unsigned item,size_t size)403 static int qcom_smem_alloc_private(struct qcom_smem *smem,
404 struct smem_partition *part,
405 unsigned item,
406 size_t size)
407 {
408 struct smem_private_entry *hdr, *end;
409 struct smem_partition_header *phdr;
410 size_t alloc_size;
411 void *cached;
412 void *p_end;
413
414 phdr = (struct smem_partition_header __force *)part->virt_base;
415 p_end = (void *)phdr + part->size;
416
417 hdr = phdr_to_first_uncached_entry(phdr);
418 end = phdr_to_last_uncached_entry(phdr);
419 cached = phdr_to_last_cached_entry(phdr);
420
421 if (WARN_ON((void *)end > p_end || cached > p_end))
422 return -EINVAL;
423
424 while (hdr < end) {
425 if (hdr->canary != SMEM_PRIVATE_CANARY)
426 goto bad_canary;
427 if (le16_to_cpu(hdr->item) == item)
428 return -EEXIST;
429
430 hdr = uncached_entry_next(hdr);
431 }
432
433 if (WARN_ON((void *)hdr > p_end))
434 return -EINVAL;
435
436 /* Check that we don't grow into the cached region */
437 alloc_size = sizeof(*hdr) + ALIGN(size, 8);
438 if ((void *)hdr + alloc_size > cached) {
439 dev_err(smem->dev, "Out of memory\n");
440 return -ENOSPC;
441 }
442
443 hdr->canary = SMEM_PRIVATE_CANARY;
444 hdr->item = cpu_to_le16(item);
445 hdr->size = cpu_to_le32(ALIGN(size, 8));
446 hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
447 hdr->padding_hdr = 0;
448
449 /*
450 * Ensure the header is written before we advance the free offset, so
451 * that remote processors that does not take the remote spinlock still
452 * gets a consistent view of the linked list.
453 */
454 wmb();
455 le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
456
457 return 0;
458 bad_canary:
459 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
460 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
461
462 return -EINVAL;
463 }
464
qcom_smem_alloc_global(struct qcom_smem * smem,unsigned item,size_t size)465 static int qcom_smem_alloc_global(struct qcom_smem *smem,
466 unsigned item,
467 size_t size)
468 {
469 struct smem_global_entry *entry;
470 struct smem_header *header;
471
472 header = smem->regions[0].virt_base;
473 entry = &header->toc[item];
474 if (entry->allocated)
475 return -EEXIST;
476
477 size = ALIGN(size, 8);
478 if (WARN_ON(size > le32_to_cpu(header->available)))
479 return -ENOMEM;
480
481 entry->offset = header->free_offset;
482 entry->size = cpu_to_le32(size);
483
484 /*
485 * Ensure the header is consistent before we mark the item allocated,
486 * so that remote processors will get a consistent view of the item
487 * even though they do not take the spinlock on read.
488 */
489 wmb();
490 entry->allocated = cpu_to_le32(1);
491
492 le32_add_cpu(&header->free_offset, size);
493 le32_add_cpu(&header->available, -size);
494
495 return 0;
496 }
497
498 /**
499 * qcom_smem_alloc() - allocate space for a smem item
500 * @host: remote processor id, or -1
501 * @item: smem item handle
502 * @size: number of bytes to be allocated
503 *
504 * Allocate space for a given smem item of size @size, given that the item is
505 * not yet allocated.
506 *
507 * Return: 0 on success, negative errno on failure.
508 */
qcom_smem_alloc(unsigned host,unsigned item,size_t size)509 int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
510 {
511 struct smem_partition *part;
512 unsigned long flags;
513 int ret;
514
515 if (IS_ERR(__smem))
516 return PTR_ERR(__smem);
517
518 if (item < SMEM_ITEM_LAST_FIXED) {
519 dev_err(__smem->dev,
520 "Rejecting allocation of static entry %d\n", item);
521 return -EINVAL;
522 }
523
524 if (item >= __smem->item_count)
525 return -EINVAL;
526
527 ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
528 HWSPINLOCK_TIMEOUT,
529 &flags);
530 if (ret)
531 return ret;
532
533 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
534 part = &__smem->partitions[host];
535 ret = qcom_smem_alloc_private(__smem, part, item, size);
536 } else if (__smem->global_partition.virt_base) {
537 part = &__smem->global_partition;
538 ret = qcom_smem_alloc_private(__smem, part, item, size);
539 } else {
540 ret = qcom_smem_alloc_global(__smem, item, size);
541 }
542
543 hwspin_unlock_irqrestore(__smem->hwlock, &flags);
544
545 return ret;
546 }
547 EXPORT_SYMBOL_GPL(qcom_smem_alloc);
548
qcom_smem_get_global(struct qcom_smem * smem,unsigned item,size_t * size)549 static void *qcom_smem_get_global(struct qcom_smem *smem,
550 unsigned item,
551 size_t *size)
552 {
553 struct smem_header *header;
554 struct smem_region *region;
555 struct smem_global_entry *entry;
556 u64 entry_offset;
557 u32 e_size;
558 u32 aux_base;
559 unsigned i;
560
561 header = smem->regions[0].virt_base;
562 entry = &header->toc[item];
563 if (!entry->allocated)
564 return ERR_PTR(-ENXIO);
565
566 aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
567
568 for (i = 0; i < smem->num_regions; i++) {
569 region = &smem->regions[i];
570
571 if ((u32)region->aux_base == aux_base || !aux_base) {
572 e_size = le32_to_cpu(entry->size);
573 entry_offset = le32_to_cpu(entry->offset);
574
575 if (WARN_ON(e_size + entry_offset > region->size))
576 return ERR_PTR(-EINVAL);
577
578 if (size != NULL)
579 *size = e_size;
580
581 return region->virt_base + entry_offset;
582 }
583 }
584
585 return ERR_PTR(-ENOENT);
586 }
587
qcom_smem_get_private(struct qcom_smem * smem,struct smem_partition * part,unsigned item,size_t * size)588 static void *qcom_smem_get_private(struct qcom_smem *smem,
589 struct smem_partition *part,
590 unsigned item,
591 size_t *size)
592 {
593 struct smem_private_entry *e, *end;
594 struct smem_partition_header *phdr;
595 void *item_ptr, *p_end;
596 u32 padding_data;
597 u32 e_size;
598
599 phdr = (struct smem_partition_header __force *)part->virt_base;
600 p_end = (void *)phdr + part->size;
601
602 e = phdr_to_first_uncached_entry(phdr);
603 end = phdr_to_last_uncached_entry(phdr);
604
605 while (e < end) {
606 if (e->canary != SMEM_PRIVATE_CANARY)
607 goto invalid_canary;
608
609 if (le16_to_cpu(e->item) == item) {
610 if (size != NULL) {
611 e_size = le32_to_cpu(e->size);
612 padding_data = le16_to_cpu(e->padding_data);
613
614 if (WARN_ON(e_size > part->size || padding_data > e_size))
615 return ERR_PTR(-EINVAL);
616
617 *size = e_size - padding_data;
618 }
619
620 item_ptr = uncached_entry_to_item(e);
621 if (WARN_ON(item_ptr > p_end))
622 return ERR_PTR(-EINVAL);
623
624 return item_ptr;
625 }
626
627 e = uncached_entry_next(e);
628 }
629
630 if (WARN_ON((void *)e > p_end))
631 return ERR_PTR(-EINVAL);
632
633 /* Item was not found in the uncached list, search the cached list */
634
635 e = phdr_to_first_cached_entry(phdr, part->cacheline);
636 end = phdr_to_last_cached_entry(phdr);
637
638 if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end))
639 return ERR_PTR(-EINVAL);
640
641 while (e > end) {
642 if (e->canary != SMEM_PRIVATE_CANARY)
643 goto invalid_canary;
644
645 if (le16_to_cpu(e->item) == item) {
646 if (size != NULL) {
647 e_size = le32_to_cpu(e->size);
648 padding_data = le16_to_cpu(e->padding_data);
649
650 if (WARN_ON(e_size > part->size || padding_data > e_size))
651 return ERR_PTR(-EINVAL);
652
653 *size = e_size - padding_data;
654 }
655
656 item_ptr = cached_entry_to_item(e);
657 if (WARN_ON(item_ptr < (void *)phdr))
658 return ERR_PTR(-EINVAL);
659
660 return item_ptr;
661 }
662
663 e = cached_entry_next(e, part->cacheline);
664 }
665
666 if (WARN_ON((void *)e < (void *)phdr))
667 return ERR_PTR(-EINVAL);
668
669 return ERR_PTR(-ENOENT);
670
671 invalid_canary:
672 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
673 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
674
675 return ERR_PTR(-EINVAL);
676 }
677
678 /**
679 * qcom_smem_get() - resolve ptr of size of a smem item
680 * @host: the remote processor, or -1
681 * @item: smem item handle
682 * @size: pointer to be filled out with size of the item
683 *
684 * Looks up smem item and returns pointer to it. Size of smem
685 * item is returned in @size.
686 *
687 * Return: a pointer to an SMEM item on success, ERR_PTR() on failure.
688 */
qcom_smem_get(unsigned host,unsigned item,size_t * size)689 void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
690 {
691 struct smem_partition *part;
692 void *ptr;
693
694 if (IS_ERR(__smem))
695 return __smem;
696
697 if (item >= __smem->item_count)
698 return ERR_PTR(-EINVAL);
699
700 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
701 part = &__smem->partitions[host];
702 ptr = qcom_smem_get_private(__smem, part, item, size);
703 } else if (__smem->global_partition.virt_base) {
704 part = &__smem->global_partition;
705 ptr = qcom_smem_get_private(__smem, part, item, size);
706 } else {
707 ptr = qcom_smem_get_global(__smem, item, size);
708 }
709
710 return ptr;
711 }
712 EXPORT_SYMBOL_GPL(qcom_smem_get);
713
714 /**
715 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
716 * @host: the remote processor identifying a partition, or -1
717 *
718 * To be used by smem clients as a quick way to determine if any new
719 * allocations has been made.
720 *
721 * Return: number of available bytes on success, negative errno on failure.
722 */
qcom_smem_get_free_space(unsigned host)723 int qcom_smem_get_free_space(unsigned host)
724 {
725 struct smem_partition *part;
726 struct smem_partition_header *phdr;
727 struct smem_header *header;
728 unsigned ret;
729
730 if (IS_ERR(__smem))
731 return PTR_ERR(__smem);
732
733 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
734 part = &__smem->partitions[host];
735 phdr = part->virt_base;
736 ret = le32_to_cpu(phdr->offset_free_cached) -
737 le32_to_cpu(phdr->offset_free_uncached);
738
739 if (ret > le32_to_cpu(part->size))
740 return -EINVAL;
741 } else if (__smem->global_partition.virt_base) {
742 part = &__smem->global_partition;
743 phdr = part->virt_base;
744 ret = le32_to_cpu(phdr->offset_free_cached) -
745 le32_to_cpu(phdr->offset_free_uncached);
746
747 if (ret > le32_to_cpu(part->size))
748 return -EINVAL;
749 } else {
750 header = __smem->regions[0].virt_base;
751 ret = le32_to_cpu(header->available);
752
753 if (ret > __smem->regions[0].size)
754 return -EINVAL;
755 }
756
757 return ret;
758 }
759 EXPORT_SYMBOL_GPL(qcom_smem_get_free_space);
760
addr_in_range(void __iomem * base,size_t size,void * addr)761 static bool addr_in_range(void __iomem *base, size_t size, void *addr)
762 {
763 return base && ((void __iomem *)addr >= base && (void __iomem *)addr < base + size);
764 }
765
766 /**
767 * qcom_smem_virt_to_phys() - return the physical address associated
768 * with an smem item pointer (previously returned by qcom_smem_get()
769 * @p: the virtual address to convert
770 *
771 * Return: physical address of the SMEM item (if found), 0 otherwise
772 */
qcom_smem_virt_to_phys(void * p)773 phys_addr_t qcom_smem_virt_to_phys(void *p)
774 {
775 struct smem_partition *part;
776 struct smem_region *area;
777 u64 offset;
778 u32 i;
779
780 for (i = 0; i < SMEM_HOST_COUNT; i++) {
781 part = &__smem->partitions[i];
782
783 if (addr_in_range(part->virt_base, part->size, p)) {
784 offset = p - part->virt_base;
785
786 return (phys_addr_t)part->phys_base + offset;
787 }
788 }
789
790 part = &__smem->global_partition;
791
792 if (addr_in_range(part->virt_base, part->size, p)) {
793 offset = p - part->virt_base;
794
795 return (phys_addr_t)part->phys_base + offset;
796 }
797
798 for (i = 0; i < __smem->num_regions; i++) {
799 area = &__smem->regions[i];
800
801 if (addr_in_range(area->virt_base, area->size, p)) {
802 offset = p - area->virt_base;
803
804 return (phys_addr_t)area->aux_base + offset;
805 }
806 }
807
808 return 0;
809 }
810 EXPORT_SYMBOL_GPL(qcom_smem_virt_to_phys);
811
812 /**
813 * qcom_smem_get_soc_id() - return the SoC ID
814 * @id: On success, we return the SoC ID here.
815 *
816 * Look up SoC ID from HW/SW build ID and return it.
817 *
818 * Return: 0 on success, negative errno on failure.
819 */
qcom_smem_get_soc_id(u32 * id)820 int qcom_smem_get_soc_id(u32 *id)
821 {
822 struct socinfo *info;
823
824 info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID, NULL);
825 if (IS_ERR(info))
826 return PTR_ERR(info);
827
828 *id = __le32_to_cpu(info->id);
829
830 return 0;
831 }
832 EXPORT_SYMBOL_GPL(qcom_smem_get_soc_id);
833
834 /**
835 * qcom_smem_get_feature_code() - return the feature code
836 * @code: On success, return the feature code here.
837 *
838 * Look up the feature code identifier from SMEM and return it.
839 *
840 * Return: 0 on success, negative errno on failure.
841 */
qcom_smem_get_feature_code(u32 * code)842 int qcom_smem_get_feature_code(u32 *code)
843 {
844 struct socinfo *info;
845 u32 raw_code;
846
847 info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID, NULL);
848 if (IS_ERR(info))
849 return PTR_ERR(info);
850
851 /* This only makes sense for socinfo >= 16 */
852 if (__le32_to_cpu(info->fmt) < SOCINFO_VERSION(0, 16))
853 return -EOPNOTSUPP;
854
855 raw_code = __le32_to_cpu(info->feature_code);
856
857 /* Ensure the value makes sense */
858 if (raw_code > SOCINFO_FC_INT_MAX)
859 raw_code = SOCINFO_FC_UNKNOWN;
860
861 *code = raw_code;
862
863 return 0;
864 }
865 EXPORT_SYMBOL_GPL(qcom_smem_get_feature_code);
866
qcom_smem_get_sbl_version(struct qcom_smem * smem)867 static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
868 {
869 struct smem_header *header;
870 __le32 *versions;
871
872 header = smem->regions[0].virt_base;
873 versions = header->version;
874
875 return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
876 }
877
qcom_smem_get_ptable(struct qcom_smem * smem)878 static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
879 {
880 struct smem_ptable *ptable;
881 u32 version;
882
883 ptable = smem->ptable;
884 if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
885 return ERR_PTR(-ENOENT);
886
887 version = le32_to_cpu(ptable->version);
888 if (version != 1) {
889 dev_err(smem->dev,
890 "Unsupported partition header version %d\n", version);
891 return ERR_PTR(-EINVAL);
892 }
893 return ptable;
894 }
895
qcom_smem_get_item_count(struct qcom_smem * smem)896 static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
897 {
898 struct smem_ptable *ptable;
899 struct smem_info *info;
900
901 ptable = qcom_smem_get_ptable(smem);
902 if (IS_ERR_OR_NULL(ptable))
903 return SMEM_ITEM_COUNT;
904
905 info = (struct smem_info *)&ptable->entry[le32_to_cpu(ptable->num_entries)];
906 if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
907 return SMEM_ITEM_COUNT;
908
909 return le16_to_cpu(info->num_items);
910 }
911
912 /*
913 * Validate the partition header for a partition whose partition
914 * table entry is supplied. Returns a pointer to its header if
915 * valid, or a null pointer otherwise.
916 */
917 static struct smem_partition_header *
qcom_smem_partition_header(struct qcom_smem * smem,struct smem_ptable_entry * entry,u16 host0,u16 host1)918 qcom_smem_partition_header(struct qcom_smem *smem,
919 struct smem_ptable_entry *entry, u16 host0, u16 host1)
920 {
921 struct smem_partition_header *header;
922 u32 phys_addr;
923 u32 size;
924
925 phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset);
926 header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size));
927
928 if (!header)
929 return NULL;
930
931 if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
932 dev_err(smem->dev, "bad partition magic %4ph\n", header->magic);
933 return NULL;
934 }
935
936 if (host0 != le16_to_cpu(header->host0)) {
937 dev_err(smem->dev, "bad host0 (%hu != %hu)\n",
938 host0, le16_to_cpu(header->host0));
939 return NULL;
940 }
941 if (host1 != le16_to_cpu(header->host1)) {
942 dev_err(smem->dev, "bad host1 (%hu != %hu)\n",
943 host1, le16_to_cpu(header->host1));
944 return NULL;
945 }
946
947 size = le32_to_cpu(header->size);
948 if (size != le32_to_cpu(entry->size)) {
949 dev_err(smem->dev, "bad partition size (%u != %u)\n",
950 size, le32_to_cpu(entry->size));
951 return NULL;
952 }
953
954 if (le32_to_cpu(header->offset_free_uncached) > size) {
955 dev_err(smem->dev, "bad partition free uncached (%u > %u)\n",
956 le32_to_cpu(header->offset_free_uncached), size);
957 return NULL;
958 }
959
960 return header;
961 }
962
qcom_smem_set_global_partition(struct qcom_smem * smem)963 static int qcom_smem_set_global_partition(struct qcom_smem *smem)
964 {
965 struct smem_partition_header *header;
966 struct smem_ptable_entry *entry;
967 struct smem_ptable *ptable;
968 bool found = false;
969 int i;
970
971 if (smem->global_partition.virt_base) {
972 dev_err(smem->dev, "Already found the global partition\n");
973 return -EINVAL;
974 }
975
976 ptable = qcom_smem_get_ptable(smem);
977 if (IS_ERR(ptable))
978 return PTR_ERR(ptable);
979
980 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
981 entry = &ptable->entry[i];
982 if (!le32_to_cpu(entry->offset))
983 continue;
984 if (!le32_to_cpu(entry->size))
985 continue;
986
987 if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST)
988 continue;
989
990 if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) {
991 found = true;
992 break;
993 }
994 }
995
996 if (!found) {
997 dev_err(smem->dev, "Missing entry for global partition\n");
998 return -EINVAL;
999 }
1000
1001 header = qcom_smem_partition_header(smem, entry,
1002 SMEM_GLOBAL_HOST, SMEM_GLOBAL_HOST);
1003 if (!header)
1004 return -EINVAL;
1005
1006 smem->global_partition.virt_base = (void __iomem *)header;
1007 smem->global_partition.phys_base = smem->regions[0].aux_base +
1008 le32_to_cpu(entry->offset);
1009 smem->global_partition.size = le32_to_cpu(entry->size);
1010 smem->global_partition.cacheline = le32_to_cpu(entry->cacheline);
1011
1012 return 0;
1013 }
1014
1015 static int
qcom_smem_enumerate_partitions(struct qcom_smem * smem,u16 local_host)1016 qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
1017 {
1018 struct smem_partition_header *header;
1019 struct smem_ptable_entry *entry;
1020 struct smem_ptable *ptable;
1021 u16 remote_host;
1022 u16 host0, host1;
1023 int i;
1024
1025 ptable = qcom_smem_get_ptable(smem);
1026 if (IS_ERR(ptable))
1027 return PTR_ERR(ptable);
1028
1029 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
1030 entry = &ptable->entry[i];
1031 if (!le32_to_cpu(entry->offset))
1032 continue;
1033 if (!le32_to_cpu(entry->size))
1034 continue;
1035
1036 host0 = le16_to_cpu(entry->host0);
1037 host1 = le16_to_cpu(entry->host1);
1038 if (host0 == local_host)
1039 remote_host = host1;
1040 else if (host1 == local_host)
1041 remote_host = host0;
1042 else
1043 continue;
1044
1045 if (remote_host >= SMEM_HOST_COUNT) {
1046 dev_err(smem->dev, "bad host %u\n", remote_host);
1047 return -EINVAL;
1048 }
1049
1050 if (smem->partitions[remote_host].virt_base) {
1051 dev_err(smem->dev, "duplicate host %u\n", remote_host);
1052 return -EINVAL;
1053 }
1054
1055 header = qcom_smem_partition_header(smem, entry, host0, host1);
1056 if (!header)
1057 return -EINVAL;
1058
1059 smem->partitions[remote_host].virt_base = (void __iomem *)header;
1060 smem->partitions[remote_host].phys_base = smem->regions[0].aux_base +
1061 le32_to_cpu(entry->offset);
1062 smem->partitions[remote_host].size = le32_to_cpu(entry->size);
1063 smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline);
1064 }
1065
1066 return 0;
1067 }
1068
qcom_smem_map_toc(struct qcom_smem * smem,struct smem_region * region)1069 static int qcom_smem_map_toc(struct qcom_smem *smem, struct smem_region *region)
1070 {
1071 u32 ptable_start;
1072
1073 /* map starting 4K for smem header */
1074 region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K);
1075 ptable_start = region->aux_base + region->size - SZ_4K;
1076 /* map last 4k for toc */
1077 smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K);
1078
1079 if (!region->virt_base || !smem->ptable)
1080 return -ENOMEM;
1081
1082 return 0;
1083 }
1084
qcom_smem_map_global(struct qcom_smem * smem,u32 size)1085 static int qcom_smem_map_global(struct qcom_smem *smem, u32 size)
1086 {
1087 u32 phys_addr;
1088
1089 phys_addr = smem->regions[0].aux_base;
1090
1091 smem->regions[0].size = size;
1092 smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size);
1093
1094 if (!smem->regions[0].virt_base)
1095 return -ENOMEM;
1096
1097 return 0;
1098 }
1099
qcom_smem_resolve_mem(struct qcom_smem * smem,const char * name,struct smem_region * region)1100 static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name,
1101 struct smem_region *region)
1102 {
1103 struct device *dev = smem->dev;
1104 struct device_node *np;
1105 struct resource r;
1106 int ret;
1107
1108 np = of_parse_phandle(dev->of_node, name, 0);
1109 if (!np) {
1110 dev_err(dev, "No %s specified\n", name);
1111 return -EINVAL;
1112 }
1113
1114 ret = of_address_to_resource(np, 0, &r);
1115 of_node_put(np);
1116 if (ret)
1117 return ret;
1118
1119 region->aux_base = r.start;
1120 region->size = resource_size(&r);
1121
1122 return 0;
1123 }
1124
qcom_smem_probe(struct platform_device * pdev)1125 static int qcom_smem_probe(struct platform_device *pdev)
1126 {
1127 struct smem_header *header;
1128 struct reserved_mem *rmem;
1129 struct qcom_smem *smem;
1130 unsigned long flags;
1131 int num_regions;
1132 int hwlock_id;
1133 u32 version;
1134 u32 size;
1135 int ret;
1136 int i;
1137
1138 num_regions = 1;
1139 if (of_property_present(pdev->dev.of_node, "qcom,rpm-msg-ram"))
1140 num_regions++;
1141
1142 smem = devm_kzalloc(&pdev->dev, struct_size(smem, regions, num_regions),
1143 GFP_KERNEL);
1144 if (!smem)
1145 return -ENOMEM;
1146
1147 smem->dev = &pdev->dev;
1148 smem->num_regions = num_regions;
1149
1150 rmem = of_reserved_mem_lookup(pdev->dev.of_node);
1151 if (rmem) {
1152 smem->regions[0].aux_base = rmem->base;
1153 smem->regions[0].size = rmem->size;
1154 } else {
1155 /*
1156 * Fall back to the memory-region reference, if we're not a
1157 * reserved-memory node.
1158 */
1159 ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]);
1160 if (ret)
1161 return ret;
1162 }
1163
1164 if (num_regions > 1) {
1165 ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]);
1166 if (ret)
1167 return ret;
1168 }
1169
1170
1171 ret = qcom_smem_map_toc(smem, &smem->regions[0]);
1172 if (ret)
1173 return ret;
1174
1175 for (i = 1; i < num_regions; i++) {
1176 smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev,
1177 smem->regions[i].aux_base,
1178 smem->regions[i].size);
1179 if (!smem->regions[i].virt_base) {
1180 dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base);
1181 return -ENOMEM;
1182 }
1183 }
1184
1185 header = smem->regions[0].virt_base;
1186 if (le32_to_cpu(header->initialized) != 1 ||
1187 le32_to_cpu(header->reserved)) {
1188 __smem = ERR_PTR(-ENODEV);
1189 return dev_err_probe(&pdev->dev, PTR_ERR(__smem), "SMEM is not initialized by SBL\n");
1190 }
1191
1192 hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
1193 if (hwlock_id < 0)
1194 return dev_err_probe(&pdev->dev, hwlock_id,
1195 "failed to retrieve hwlock\n");
1196
1197 smem->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, hwlock_id);
1198 if (!smem->hwlock)
1199 return -ENXIO;
1200
1201 ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags);
1202 if (ret)
1203 return ret;
1204 size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset);
1205 hwspin_unlock_irqrestore(smem->hwlock, &flags);
1206
1207 version = qcom_smem_get_sbl_version(smem);
1208 /*
1209 * smem header mapping is required only in heap version scheme, so unmap
1210 * it here. It will be remapped in qcom_smem_map_global() when whole
1211 * partition is mapped again.
1212 */
1213 devm_iounmap(smem->dev, smem->regions[0].virt_base);
1214 switch (version >> 16) {
1215 case SMEM_GLOBAL_PART_VERSION:
1216 ret = qcom_smem_set_global_partition(smem);
1217 if (ret < 0)
1218 return ret;
1219 smem->item_count = qcom_smem_get_item_count(smem);
1220 break;
1221 case SMEM_GLOBAL_HEAP_VERSION:
1222 qcom_smem_map_global(smem, size);
1223 smem->item_count = SMEM_ITEM_COUNT;
1224 break;
1225 default:
1226 dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
1227 return -EINVAL;
1228 }
1229
1230 BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT);
1231 ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
1232 if (ret < 0 && ret != -ENOENT)
1233 return ret;
1234
1235 __smem = smem;
1236
1237 smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
1238 PLATFORM_DEVID_NONE, NULL,
1239 0);
1240 if (IS_ERR(smem->socinfo))
1241 dev_dbg(&pdev->dev, "failed to register socinfo device\n");
1242
1243 return 0;
1244 }
1245
qcom_smem_remove(struct platform_device * pdev)1246 static void qcom_smem_remove(struct platform_device *pdev)
1247 {
1248 platform_device_unregister(__smem->socinfo);
1249
1250 __smem = NULL;
1251 }
1252
1253 static const struct of_device_id qcom_smem_of_match[] = {
1254 { .compatible = "qcom,smem" },
1255 {}
1256 };
1257 MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
1258
1259 static struct platform_driver qcom_smem_driver = {
1260 .probe = qcom_smem_probe,
1261 .remove = qcom_smem_remove,
1262 .driver = {
1263 .name = "qcom-smem",
1264 .of_match_table = qcom_smem_of_match,
1265 .suppress_bind_attrs = true,
1266 },
1267 };
1268
qcom_smem_init(void)1269 static int __init qcom_smem_init(void)
1270 {
1271 return platform_driver_register(&qcom_smem_driver);
1272 }
1273 arch_initcall(qcom_smem_init);
1274
qcom_smem_exit(void)1275 static void __exit qcom_smem_exit(void)
1276 {
1277 platform_driver_unregister(&qcom_smem_driver);
1278 }
1279 module_exit(qcom_smem_exit)
1280
1281 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
1282 MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
1283 MODULE_LICENSE("GPL v2");
1284