1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015, Sony Mobile Communications AB. 4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 5 */ 6 7 #include <linux/hwspinlock.h> 8 #include <linux/io.h> 9 #include <linux/module.h> 10 #include <linux/of.h> 11 #include <linux/of_address.h> 12 #include <linux/of_reserved_mem.h> 13 #include <linux/platform_device.h> 14 #include <linux/sizes.h> 15 #include <linux/slab.h> 16 #include <linux/soc/qcom/smem.h> 17 #include <linux/soc/qcom/socinfo.h> 18 19 /* 20 * The Qualcomm shared memory system is a allocate only heap structure that 21 * consists of one of more memory areas that can be accessed by the processors 22 * in the SoC. 23 * 24 * All systems contains a global heap, accessible by all processors in the SoC, 25 * with a table of contents data structure (@smem_header) at the beginning of 26 * the main shared memory block. 27 * 28 * The global header contains meta data for allocations as well as a fixed list 29 * of 512 entries (@smem_global_entry) that can be initialized to reference 30 * parts of the shared memory space. 31 * 32 * 33 * In addition to this global heap a set of "private" heaps can be set up at 34 * boot time with access restrictions so that only certain processor pairs can 35 * access the data. 36 * 37 * These partitions are referenced from an optional partition table 38 * (@smem_ptable), that is found 4kB from the end of the main smem region. The 39 * partition table entries (@smem_ptable_entry) lists the involved processors 40 * (or hosts) and their location in the main shared memory region. 41 * 42 * Each partition starts with a header (@smem_partition_header) that identifies 43 * the partition and holds properties for the two internal memory regions. The 44 * two regions are cached and non-cached memory respectively. Each region 45 * contain a link list of allocation headers (@smem_private_entry) followed by 46 * their data. 47 * 48 * Items in the non-cached region are allocated from the start of the partition 49 * while items in the cached region are allocated from the end. The free area 50 * is hence the region between the cached and non-cached offsets. The header of 51 * cached items comes after the data. 52 * 53 * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure 54 * for the global heap. A new global partition is created from the global heap 55 * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is 56 * set by the bootloader. 57 * 58 * To synchronize allocations in the shared memory heaps a remote spinlock must 59 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all 60 * platforms. 61 * 62 */ 63 64 /* 65 * The version member of the smem header contains an array of versions for the 66 * various software components in the SoC. We verify that the boot loader 67 * version is a valid version as a sanity check. 68 */ 69 #define SMEM_MASTER_SBL_VERSION_INDEX 7 70 #define SMEM_GLOBAL_HEAP_VERSION 11 71 #define SMEM_GLOBAL_PART_VERSION 12 72 73 /* 74 * The first 8 items are only to be allocated by the boot loader while 75 * initializing the heap. 76 */ 77 #define SMEM_ITEM_LAST_FIXED 8 78 79 /* Highest accepted item number, for both global and private heaps */ 80 #define SMEM_ITEM_COUNT 512 81 82 /* Processor/host identifier for the application processor */ 83 #define SMEM_HOST_APPS 0 84 85 /* Processor/host identifier for the global partition */ 86 #define SMEM_GLOBAL_HOST 0xfffe 87 88 /* Max number of processors/hosts in a system */ 89 #define SMEM_HOST_COUNT 20 90 91 /** 92 * struct smem_proc_comm - proc_comm communication struct (legacy) 93 * @command: current command to be executed 94 * @status: status of the currently requested command 95 * @params: parameters to the command 96 */ 97 struct smem_proc_comm { 98 __le32 command; 99 __le32 status; 100 __le32 params[2]; 101 }; 102 103 /** 104 * struct smem_global_entry - entry to reference smem items on the heap 105 * @allocated: boolean to indicate if this entry is used 106 * @offset: offset to the allocated space 107 * @size: size of the allocated space, 8 byte aligned 108 * @aux_base: base address for the memory region used by this unit, or 0 for 109 * the default region. bits 0,1 are reserved 110 */ 111 struct smem_global_entry { 112 __le32 allocated; 113 __le32 offset; 114 __le32 size; 115 __le32 aux_base; /* bits 1:0 reserved */ 116 }; 117 #define AUX_BASE_MASK 0xfffffffc 118 119 /** 120 * struct smem_header - header found in beginning of primary smem region 121 * @proc_comm: proc_comm communication interface (legacy) 122 * @version: array of versions for the various subsystems 123 * @initialized: boolean to indicate that smem is initialized 124 * @free_offset: index of the first unallocated byte in smem 125 * @available: number of bytes available for allocation 126 * @reserved: reserved field, must be 0 127 * @toc: array of references to items 128 */ 129 struct smem_header { 130 struct smem_proc_comm proc_comm[4]; 131 __le32 version[32]; 132 __le32 initialized; 133 __le32 free_offset; 134 __le32 available; 135 __le32 reserved; 136 struct smem_global_entry toc[SMEM_ITEM_COUNT]; 137 }; 138 139 /** 140 * struct smem_ptable_entry - one entry in the @smem_ptable list 141 * @offset: offset, within the main shared memory region, of the partition 142 * @size: size of the partition 143 * @flags: flags for the partition (currently unused) 144 * @host0: first processor/host with access to this partition 145 * @host1: second processor/host with access to this partition 146 * @cacheline: alignment for "cached" entries 147 * @reserved: reserved entries for later use 148 */ 149 struct smem_ptable_entry { 150 __le32 offset; 151 __le32 size; 152 __le32 flags; 153 __le16 host0; 154 __le16 host1; 155 __le32 cacheline; 156 __le32 reserved[7]; 157 }; 158 159 /** 160 * struct smem_ptable - partition table for the private partitions 161 * @magic: magic number, must be SMEM_PTABLE_MAGIC 162 * @version: version of the partition table 163 * @num_entries: number of partitions in the table 164 * @reserved: for now reserved entries 165 * @entry: list of @smem_ptable_entry for the @num_entries partitions 166 */ 167 struct smem_ptable { 168 u8 magic[4]; 169 __le32 version; 170 __le32 num_entries; 171 __le32 reserved[5]; 172 struct smem_ptable_entry entry[]; 173 }; 174 175 static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */ 176 177 /** 178 * struct smem_partition_header - header of the partitions 179 * @magic: magic number, must be SMEM_PART_MAGIC 180 * @host0: first processor/host with access to this partition 181 * @host1: second processor/host with access to this partition 182 * @size: size of the partition 183 * @offset_free_uncached: offset to the first free byte of uncached memory in 184 * this partition 185 * @offset_free_cached: offset to the first free byte of cached memory in this 186 * partition 187 * @reserved: for now reserved entries 188 */ 189 struct smem_partition_header { 190 u8 magic[4]; 191 __le16 host0; 192 __le16 host1; 193 __le32 size; 194 __le32 offset_free_uncached; 195 __le32 offset_free_cached; 196 __le32 reserved[3]; 197 }; 198 199 /** 200 * struct smem_partition - describes smem partition 201 * @virt_base: starting virtual address of partition 202 * @phys_base: starting physical address of partition 203 * @cacheline: alignment for "cached" entries 204 * @size: size of partition 205 */ 206 struct smem_partition { 207 void __iomem *virt_base; 208 phys_addr_t phys_base; 209 size_t cacheline; 210 size_t size; 211 }; 212 213 static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 }; 214 215 /** 216 * struct smem_private_entry - header of each item in the private partition 217 * @canary: magic number, must be SMEM_PRIVATE_CANARY 218 * @item: identifying number of the smem item 219 * @size: size of the data, including padding bytes 220 * @padding_data: number of bytes of padding of data 221 * @padding_hdr: number of bytes of padding between the header and the data 222 * @reserved: for now reserved entry 223 */ 224 struct smem_private_entry { 225 u16 canary; /* bytes are the same so no swapping needed */ 226 __le16 item; 227 __le32 size; /* includes padding bytes */ 228 __le16 padding_data; 229 __le16 padding_hdr; 230 __le32 reserved; 231 }; 232 #define SMEM_PRIVATE_CANARY 0xa5a5 233 234 /** 235 * struct smem_info - smem region info located after the table of contents 236 * @magic: magic number, must be SMEM_INFO_MAGIC 237 * @size: size of the smem region 238 * @base_addr: base address of the smem region 239 * @reserved: for now reserved entry 240 * @num_items: highest accepted item number 241 */ 242 struct smem_info { 243 u8 magic[4]; 244 __le32 size; 245 __le32 base_addr; 246 __le32 reserved; 247 __le16 num_items; 248 }; 249 250 static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */ 251 252 /** 253 * struct smem_region - representation of a chunk of memory used for smem 254 * @aux_base: identifier of aux_mem base 255 * @virt_base: virtual base address of memory with this aux_mem identifier 256 * @size: size of the memory region 257 */ 258 struct smem_region { 259 phys_addr_t aux_base; 260 void __iomem *virt_base; 261 size_t size; 262 }; 263 264 /** 265 * struct qcom_smem - device data for the smem device 266 * @dev: device pointer 267 * @hwlock: reference to a hwspinlock 268 * @ptable: virtual base of partition table 269 * @global_partition: describes for global partition when in use 270 * @partitions: list of partitions of current processor/host 271 * @item_count: max accepted item number 272 * @socinfo: platform device pointer 273 * @num_regions: number of @regions 274 * @regions: list of the memory regions defining the shared memory 275 */ 276 struct qcom_smem { 277 struct device *dev; 278 279 struct hwspinlock *hwlock; 280 281 u32 item_count; 282 struct platform_device *socinfo; 283 struct smem_ptable *ptable; 284 struct smem_partition global_partition; 285 struct smem_partition partitions[SMEM_HOST_COUNT]; 286 287 unsigned num_regions; 288 struct smem_region regions[]; 289 }; 290 291 static void * 292 phdr_to_last_uncached_entry(struct smem_partition_header *phdr) 293 { 294 void *p = phdr; 295 296 return p + le32_to_cpu(phdr->offset_free_uncached); 297 } 298 299 static struct smem_private_entry * 300 phdr_to_first_cached_entry(struct smem_partition_header *phdr, 301 size_t cacheline) 302 { 303 void *p = phdr; 304 struct smem_private_entry *e; 305 306 return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline); 307 } 308 309 static void * 310 phdr_to_last_cached_entry(struct smem_partition_header *phdr) 311 { 312 void *p = phdr; 313 314 return p + le32_to_cpu(phdr->offset_free_cached); 315 } 316 317 static struct smem_private_entry * 318 phdr_to_first_uncached_entry(struct smem_partition_header *phdr) 319 { 320 void *p = phdr; 321 322 return p + sizeof(*phdr); 323 } 324 325 static struct smem_private_entry * 326 uncached_entry_next(struct smem_private_entry *e) 327 { 328 void *p = e; 329 330 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) + 331 le32_to_cpu(e->size); 332 } 333 334 static struct smem_private_entry * 335 cached_entry_next(struct smem_private_entry *e, size_t cacheline) 336 { 337 void *p = e; 338 339 return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline); 340 } 341 342 static void *uncached_entry_to_item(struct smem_private_entry *e) 343 { 344 void *p = e; 345 346 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr); 347 } 348 349 static void *cached_entry_to_item(struct smem_private_entry *e) 350 { 351 void *p = e; 352 353 return p - le32_to_cpu(e->size); 354 } 355 356 /* Pointer to the one and only smem handle */ 357 static struct qcom_smem *__smem; 358 359 /* Timeout (ms) for the trylock of remote spinlocks */ 360 #define HWSPINLOCK_TIMEOUT 1000 361 362 static int qcom_smem_alloc_private(struct qcom_smem *smem, 363 struct smem_partition *part, 364 unsigned item, 365 size_t size) 366 { 367 struct smem_private_entry *hdr, *end; 368 struct smem_partition_header *phdr; 369 size_t alloc_size; 370 void *cached; 371 void *p_end; 372 373 phdr = (struct smem_partition_header __force *)part->virt_base; 374 p_end = (void *)phdr + part->size; 375 376 hdr = phdr_to_first_uncached_entry(phdr); 377 end = phdr_to_last_uncached_entry(phdr); 378 cached = phdr_to_last_cached_entry(phdr); 379 380 if (WARN_ON((void *)end > p_end || cached > p_end)) 381 return -EINVAL; 382 383 while (hdr < end) { 384 if (hdr->canary != SMEM_PRIVATE_CANARY) 385 goto bad_canary; 386 if (le16_to_cpu(hdr->item) == item) 387 return -EEXIST; 388 389 hdr = uncached_entry_next(hdr); 390 } 391 392 if (WARN_ON((void *)hdr > p_end)) 393 return -EINVAL; 394 395 /* Check that we don't grow into the cached region */ 396 alloc_size = sizeof(*hdr) + ALIGN(size, 8); 397 if ((void *)hdr + alloc_size > cached) { 398 dev_err(smem->dev, "Out of memory\n"); 399 return -ENOSPC; 400 } 401 402 hdr->canary = SMEM_PRIVATE_CANARY; 403 hdr->item = cpu_to_le16(item); 404 hdr->size = cpu_to_le32(ALIGN(size, 8)); 405 hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size); 406 hdr->padding_hdr = 0; 407 408 /* 409 * Ensure the header is written before we advance the free offset, so 410 * that remote processors that does not take the remote spinlock still 411 * gets a consistent view of the linked list. 412 */ 413 wmb(); 414 le32_add_cpu(&phdr->offset_free_uncached, alloc_size); 415 416 return 0; 417 bad_canary: 418 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", 419 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); 420 421 return -EINVAL; 422 } 423 424 static int qcom_smem_alloc_global(struct qcom_smem *smem, 425 unsigned item, 426 size_t size) 427 { 428 struct smem_global_entry *entry; 429 struct smem_header *header; 430 431 header = smem->regions[0].virt_base; 432 entry = &header->toc[item]; 433 if (entry->allocated) 434 return -EEXIST; 435 436 size = ALIGN(size, 8); 437 if (WARN_ON(size > le32_to_cpu(header->available))) 438 return -ENOMEM; 439 440 entry->offset = header->free_offset; 441 entry->size = cpu_to_le32(size); 442 443 /* 444 * Ensure the header is consistent before we mark the item allocated, 445 * so that remote processors will get a consistent view of the item 446 * even though they do not take the spinlock on read. 447 */ 448 wmb(); 449 entry->allocated = cpu_to_le32(1); 450 451 le32_add_cpu(&header->free_offset, size); 452 le32_add_cpu(&header->available, -size); 453 454 return 0; 455 } 456 457 /** 458 * qcom_smem_alloc() - allocate space for a smem item 459 * @host: remote processor id, or -1 460 * @item: smem item handle 461 * @size: number of bytes to be allocated 462 * 463 * Allocate space for a given smem item of size @size, given that the item is 464 * not yet allocated. 465 */ 466 int qcom_smem_alloc(unsigned host, unsigned item, size_t size) 467 { 468 struct smem_partition *part; 469 unsigned long flags; 470 int ret; 471 472 if (!__smem) 473 return -EPROBE_DEFER; 474 475 if (item < SMEM_ITEM_LAST_FIXED) { 476 dev_err(__smem->dev, 477 "Rejecting allocation of static entry %d\n", item); 478 return -EINVAL; 479 } 480 481 if (WARN_ON(item >= __smem->item_count)) 482 return -EINVAL; 483 484 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, 485 HWSPINLOCK_TIMEOUT, 486 &flags); 487 if (ret) 488 return ret; 489 490 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { 491 part = &__smem->partitions[host]; 492 ret = qcom_smem_alloc_private(__smem, part, item, size); 493 } else if (__smem->global_partition.virt_base) { 494 part = &__smem->global_partition; 495 ret = qcom_smem_alloc_private(__smem, part, item, size); 496 } else { 497 ret = qcom_smem_alloc_global(__smem, item, size); 498 } 499 500 hwspin_unlock_irqrestore(__smem->hwlock, &flags); 501 502 return ret; 503 } 504 EXPORT_SYMBOL_GPL(qcom_smem_alloc); 505 506 static void *qcom_smem_get_global(struct qcom_smem *smem, 507 unsigned item, 508 size_t *size) 509 { 510 struct smem_header *header; 511 struct smem_region *region; 512 struct smem_global_entry *entry; 513 u64 entry_offset; 514 u32 e_size; 515 u32 aux_base; 516 unsigned i; 517 518 header = smem->regions[0].virt_base; 519 entry = &header->toc[item]; 520 if (!entry->allocated) 521 return ERR_PTR(-ENXIO); 522 523 aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK; 524 525 for (i = 0; i < smem->num_regions; i++) { 526 region = &smem->regions[i]; 527 528 if ((u32)region->aux_base == aux_base || !aux_base) { 529 e_size = le32_to_cpu(entry->size); 530 entry_offset = le32_to_cpu(entry->offset); 531 532 if (WARN_ON(e_size + entry_offset > region->size)) 533 return ERR_PTR(-EINVAL); 534 535 if (size != NULL) 536 *size = e_size; 537 538 return region->virt_base + entry_offset; 539 } 540 } 541 542 return ERR_PTR(-ENOENT); 543 } 544 545 static void *qcom_smem_get_private(struct qcom_smem *smem, 546 struct smem_partition *part, 547 unsigned item, 548 size_t *size) 549 { 550 struct smem_private_entry *e, *end; 551 struct smem_partition_header *phdr; 552 void *item_ptr, *p_end; 553 u32 padding_data; 554 u32 e_size; 555 556 phdr = (struct smem_partition_header __force *)part->virt_base; 557 p_end = (void *)phdr + part->size; 558 559 e = phdr_to_first_uncached_entry(phdr); 560 end = phdr_to_last_uncached_entry(phdr); 561 562 while (e < end) { 563 if (e->canary != SMEM_PRIVATE_CANARY) 564 goto invalid_canary; 565 566 if (le16_to_cpu(e->item) == item) { 567 if (size != NULL) { 568 e_size = le32_to_cpu(e->size); 569 padding_data = le16_to_cpu(e->padding_data); 570 571 if (WARN_ON(e_size > part->size || padding_data > e_size)) 572 return ERR_PTR(-EINVAL); 573 574 *size = e_size - padding_data; 575 } 576 577 item_ptr = uncached_entry_to_item(e); 578 if (WARN_ON(item_ptr > p_end)) 579 return ERR_PTR(-EINVAL); 580 581 return item_ptr; 582 } 583 584 e = uncached_entry_next(e); 585 } 586 587 if (WARN_ON((void *)e > p_end)) 588 return ERR_PTR(-EINVAL); 589 590 /* Item was not found in the uncached list, search the cached list */ 591 592 e = phdr_to_first_cached_entry(phdr, part->cacheline); 593 end = phdr_to_last_cached_entry(phdr); 594 595 if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end)) 596 return ERR_PTR(-EINVAL); 597 598 while (e > end) { 599 if (e->canary != SMEM_PRIVATE_CANARY) 600 goto invalid_canary; 601 602 if (le16_to_cpu(e->item) == item) { 603 if (size != NULL) { 604 e_size = le32_to_cpu(e->size); 605 padding_data = le16_to_cpu(e->padding_data); 606 607 if (WARN_ON(e_size > part->size || padding_data > e_size)) 608 return ERR_PTR(-EINVAL); 609 610 *size = e_size - padding_data; 611 } 612 613 item_ptr = cached_entry_to_item(e); 614 if (WARN_ON(item_ptr < (void *)phdr)) 615 return ERR_PTR(-EINVAL); 616 617 return item_ptr; 618 } 619 620 e = cached_entry_next(e, part->cacheline); 621 } 622 623 if (WARN_ON((void *)e < (void *)phdr)) 624 return ERR_PTR(-EINVAL); 625 626 return ERR_PTR(-ENOENT); 627 628 invalid_canary: 629 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", 630 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); 631 632 return ERR_PTR(-EINVAL); 633 } 634 635 /** 636 * qcom_smem_get() - resolve ptr of size of a smem item 637 * @host: the remote processor, or -1 638 * @item: smem item handle 639 * @size: pointer to be filled out with size of the item 640 * 641 * Looks up smem item and returns pointer to it. Size of smem 642 * item is returned in @size. 643 */ 644 void *qcom_smem_get(unsigned host, unsigned item, size_t *size) 645 { 646 struct smem_partition *part; 647 unsigned long flags; 648 int ret; 649 void *ptr = ERR_PTR(-EPROBE_DEFER); 650 651 if (!__smem) 652 return ptr; 653 654 if (WARN_ON(item >= __smem->item_count)) 655 return ERR_PTR(-EINVAL); 656 657 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, 658 HWSPINLOCK_TIMEOUT, 659 &flags); 660 if (ret) 661 return ERR_PTR(ret); 662 663 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { 664 part = &__smem->partitions[host]; 665 ptr = qcom_smem_get_private(__smem, part, item, size); 666 } else if (__smem->global_partition.virt_base) { 667 part = &__smem->global_partition; 668 ptr = qcom_smem_get_private(__smem, part, item, size); 669 } else { 670 ptr = qcom_smem_get_global(__smem, item, size); 671 } 672 673 hwspin_unlock_irqrestore(__smem->hwlock, &flags); 674 675 return ptr; 676 677 } 678 EXPORT_SYMBOL_GPL(qcom_smem_get); 679 680 /** 681 * qcom_smem_get_free_space() - retrieve amount of free space in a partition 682 * @host: the remote processor identifying a partition, or -1 683 * 684 * To be used by smem clients as a quick way to determine if any new 685 * allocations has been made. 686 */ 687 int qcom_smem_get_free_space(unsigned host) 688 { 689 struct smem_partition *part; 690 struct smem_partition_header *phdr; 691 struct smem_header *header; 692 unsigned ret; 693 694 if (!__smem) 695 return -EPROBE_DEFER; 696 697 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { 698 part = &__smem->partitions[host]; 699 phdr = part->virt_base; 700 ret = le32_to_cpu(phdr->offset_free_cached) - 701 le32_to_cpu(phdr->offset_free_uncached); 702 703 if (ret > le32_to_cpu(part->size)) 704 return -EINVAL; 705 } else if (__smem->global_partition.virt_base) { 706 part = &__smem->global_partition; 707 phdr = part->virt_base; 708 ret = le32_to_cpu(phdr->offset_free_cached) - 709 le32_to_cpu(phdr->offset_free_uncached); 710 711 if (ret > le32_to_cpu(part->size)) 712 return -EINVAL; 713 } else { 714 header = __smem->regions[0].virt_base; 715 ret = le32_to_cpu(header->available); 716 717 if (ret > __smem->regions[0].size) 718 return -EINVAL; 719 } 720 721 return ret; 722 } 723 EXPORT_SYMBOL_GPL(qcom_smem_get_free_space); 724 725 static bool addr_in_range(void __iomem *base, size_t size, void *addr) 726 { 727 return base && (addr >= base && addr < base + size); 728 } 729 730 /** 731 * qcom_smem_virt_to_phys() - return the physical address associated 732 * with an smem item pointer (previously returned by qcom_smem_get() 733 * @p: the virtual address to convert 734 * 735 * Returns 0 if the pointer provided is not within any smem region. 736 */ 737 phys_addr_t qcom_smem_virt_to_phys(void *p) 738 { 739 struct smem_partition *part; 740 struct smem_region *area; 741 u64 offset; 742 u32 i; 743 744 for (i = 0; i < SMEM_HOST_COUNT; i++) { 745 part = &__smem->partitions[i]; 746 747 if (addr_in_range(part->virt_base, part->size, p)) { 748 offset = p - part->virt_base; 749 750 return (phys_addr_t)part->phys_base + offset; 751 } 752 } 753 754 part = &__smem->global_partition; 755 756 if (addr_in_range(part->virt_base, part->size, p)) { 757 offset = p - part->virt_base; 758 759 return (phys_addr_t)part->phys_base + offset; 760 } 761 762 for (i = 0; i < __smem->num_regions; i++) { 763 area = &__smem->regions[i]; 764 765 if (addr_in_range(area->virt_base, area->size, p)) { 766 offset = p - area->virt_base; 767 768 return (phys_addr_t)area->aux_base + offset; 769 } 770 } 771 772 return 0; 773 } 774 EXPORT_SYMBOL_GPL(qcom_smem_virt_to_phys); 775 776 /** 777 * qcom_smem_get_soc_id() - return the SoC ID 778 * @id: On success, we return the SoC ID here. 779 * 780 * Look up SoC ID from HW/SW build ID and return it. 781 * 782 * Return: 0 on success, negative errno on failure. 783 */ 784 int qcom_smem_get_soc_id(u32 *id) 785 { 786 struct socinfo *info; 787 788 info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID, NULL); 789 if (IS_ERR(info)) 790 return PTR_ERR(info); 791 792 *id = __le32_to_cpu(info->id); 793 794 return 0; 795 } 796 EXPORT_SYMBOL_GPL(qcom_smem_get_soc_id); 797 798 static int qcom_smem_get_sbl_version(struct qcom_smem *smem) 799 { 800 struct smem_header *header; 801 __le32 *versions; 802 803 header = smem->regions[0].virt_base; 804 versions = header->version; 805 806 return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]); 807 } 808 809 static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem) 810 { 811 struct smem_ptable *ptable; 812 u32 version; 813 814 ptable = smem->ptable; 815 if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) 816 return ERR_PTR(-ENOENT); 817 818 version = le32_to_cpu(ptable->version); 819 if (version != 1) { 820 dev_err(smem->dev, 821 "Unsupported partition header version %d\n", version); 822 return ERR_PTR(-EINVAL); 823 } 824 return ptable; 825 } 826 827 static u32 qcom_smem_get_item_count(struct qcom_smem *smem) 828 { 829 struct smem_ptable *ptable; 830 struct smem_info *info; 831 832 ptable = qcom_smem_get_ptable(smem); 833 if (IS_ERR_OR_NULL(ptable)) 834 return SMEM_ITEM_COUNT; 835 836 info = (struct smem_info *)&ptable->entry[ptable->num_entries]; 837 if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic))) 838 return SMEM_ITEM_COUNT; 839 840 return le16_to_cpu(info->num_items); 841 } 842 843 /* 844 * Validate the partition header for a partition whose partition 845 * table entry is supplied. Returns a pointer to its header if 846 * valid, or a null pointer otherwise. 847 */ 848 static struct smem_partition_header * 849 qcom_smem_partition_header(struct qcom_smem *smem, 850 struct smem_ptable_entry *entry, u16 host0, u16 host1) 851 { 852 struct smem_partition_header *header; 853 u32 phys_addr; 854 u32 size; 855 856 phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset); 857 header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size)); 858 859 if (!header) 860 return NULL; 861 862 if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) { 863 dev_err(smem->dev, "bad partition magic %4ph\n", header->magic); 864 return NULL; 865 } 866 867 if (host0 != le16_to_cpu(header->host0)) { 868 dev_err(smem->dev, "bad host0 (%hu != %hu)\n", 869 host0, le16_to_cpu(header->host0)); 870 return NULL; 871 } 872 if (host1 != le16_to_cpu(header->host1)) { 873 dev_err(smem->dev, "bad host1 (%hu != %hu)\n", 874 host1, le16_to_cpu(header->host1)); 875 return NULL; 876 } 877 878 size = le32_to_cpu(header->size); 879 if (size != le32_to_cpu(entry->size)) { 880 dev_err(smem->dev, "bad partition size (%u != %u)\n", 881 size, le32_to_cpu(entry->size)); 882 return NULL; 883 } 884 885 if (le32_to_cpu(header->offset_free_uncached) > size) { 886 dev_err(smem->dev, "bad partition free uncached (%u > %u)\n", 887 le32_to_cpu(header->offset_free_uncached), size); 888 return NULL; 889 } 890 891 return header; 892 } 893 894 static int qcom_smem_set_global_partition(struct qcom_smem *smem) 895 { 896 struct smem_partition_header *header; 897 struct smem_ptable_entry *entry; 898 struct smem_ptable *ptable; 899 bool found = false; 900 int i; 901 902 if (smem->global_partition.virt_base) { 903 dev_err(smem->dev, "Already found the global partition\n"); 904 return -EINVAL; 905 } 906 907 ptable = qcom_smem_get_ptable(smem); 908 if (IS_ERR(ptable)) 909 return PTR_ERR(ptable); 910 911 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { 912 entry = &ptable->entry[i]; 913 if (!le32_to_cpu(entry->offset)) 914 continue; 915 if (!le32_to_cpu(entry->size)) 916 continue; 917 918 if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST) 919 continue; 920 921 if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) { 922 found = true; 923 break; 924 } 925 } 926 927 if (!found) { 928 dev_err(smem->dev, "Missing entry for global partition\n"); 929 return -EINVAL; 930 } 931 932 header = qcom_smem_partition_header(smem, entry, 933 SMEM_GLOBAL_HOST, SMEM_GLOBAL_HOST); 934 if (!header) 935 return -EINVAL; 936 937 smem->global_partition.virt_base = (void __iomem *)header; 938 smem->global_partition.phys_base = smem->regions[0].aux_base + 939 le32_to_cpu(entry->offset); 940 smem->global_partition.size = le32_to_cpu(entry->size); 941 smem->global_partition.cacheline = le32_to_cpu(entry->cacheline); 942 943 return 0; 944 } 945 946 static int 947 qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host) 948 { 949 struct smem_partition_header *header; 950 struct smem_ptable_entry *entry; 951 struct smem_ptable *ptable; 952 u16 remote_host; 953 u16 host0, host1; 954 int i; 955 956 ptable = qcom_smem_get_ptable(smem); 957 if (IS_ERR(ptable)) 958 return PTR_ERR(ptable); 959 960 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { 961 entry = &ptable->entry[i]; 962 if (!le32_to_cpu(entry->offset)) 963 continue; 964 if (!le32_to_cpu(entry->size)) 965 continue; 966 967 host0 = le16_to_cpu(entry->host0); 968 host1 = le16_to_cpu(entry->host1); 969 if (host0 == local_host) 970 remote_host = host1; 971 else if (host1 == local_host) 972 remote_host = host0; 973 else 974 continue; 975 976 if (remote_host >= SMEM_HOST_COUNT) { 977 dev_err(smem->dev, "bad host %u\n", remote_host); 978 return -EINVAL; 979 } 980 981 if (smem->partitions[remote_host].virt_base) { 982 dev_err(smem->dev, "duplicate host %u\n", remote_host); 983 return -EINVAL; 984 } 985 986 header = qcom_smem_partition_header(smem, entry, host0, host1); 987 if (!header) 988 return -EINVAL; 989 990 smem->partitions[remote_host].virt_base = (void __iomem *)header; 991 smem->partitions[remote_host].phys_base = smem->regions[0].aux_base + 992 le32_to_cpu(entry->offset); 993 smem->partitions[remote_host].size = le32_to_cpu(entry->size); 994 smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline); 995 } 996 997 return 0; 998 } 999 1000 static int qcom_smem_map_toc(struct qcom_smem *smem, struct smem_region *region) 1001 { 1002 u32 ptable_start; 1003 1004 /* map starting 4K for smem header */ 1005 region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K); 1006 ptable_start = region->aux_base + region->size - SZ_4K; 1007 /* map last 4k for toc */ 1008 smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K); 1009 1010 if (!region->virt_base || !smem->ptable) 1011 return -ENOMEM; 1012 1013 return 0; 1014 } 1015 1016 static int qcom_smem_map_global(struct qcom_smem *smem, u32 size) 1017 { 1018 u32 phys_addr; 1019 1020 phys_addr = smem->regions[0].aux_base; 1021 1022 smem->regions[0].size = size; 1023 smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size); 1024 1025 if (!smem->regions[0].virt_base) 1026 return -ENOMEM; 1027 1028 return 0; 1029 } 1030 1031 static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name, 1032 struct smem_region *region) 1033 { 1034 struct device *dev = smem->dev; 1035 struct device_node *np; 1036 struct resource r; 1037 int ret; 1038 1039 np = of_parse_phandle(dev->of_node, name, 0); 1040 if (!np) { 1041 dev_err(dev, "No %s specified\n", name); 1042 return -EINVAL; 1043 } 1044 1045 ret = of_address_to_resource(np, 0, &r); 1046 of_node_put(np); 1047 if (ret) 1048 return ret; 1049 1050 region->aux_base = r.start; 1051 region->size = resource_size(&r); 1052 1053 return 0; 1054 } 1055 1056 static int qcom_smem_probe(struct platform_device *pdev) 1057 { 1058 struct smem_header *header; 1059 struct reserved_mem *rmem; 1060 struct qcom_smem *smem; 1061 unsigned long flags; 1062 size_t array_size; 1063 int num_regions; 1064 int hwlock_id; 1065 u32 version; 1066 u32 size; 1067 int ret; 1068 int i; 1069 1070 num_regions = 1; 1071 if (of_property_present(pdev->dev.of_node, "qcom,rpm-msg-ram")) 1072 num_regions++; 1073 1074 array_size = num_regions * sizeof(struct smem_region); 1075 smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL); 1076 if (!smem) 1077 return -ENOMEM; 1078 1079 smem->dev = &pdev->dev; 1080 smem->num_regions = num_regions; 1081 1082 rmem = of_reserved_mem_lookup(pdev->dev.of_node); 1083 if (rmem) { 1084 smem->regions[0].aux_base = rmem->base; 1085 smem->regions[0].size = rmem->size; 1086 } else { 1087 /* 1088 * Fall back to the memory-region reference, if we're not a 1089 * reserved-memory node. 1090 */ 1091 ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]); 1092 if (ret) 1093 return ret; 1094 } 1095 1096 if (num_regions > 1) { 1097 ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]); 1098 if (ret) 1099 return ret; 1100 } 1101 1102 1103 ret = qcom_smem_map_toc(smem, &smem->regions[0]); 1104 if (ret) 1105 return ret; 1106 1107 for (i = 1; i < num_regions; i++) { 1108 smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev, 1109 smem->regions[i].aux_base, 1110 smem->regions[i].size); 1111 if (!smem->regions[i].virt_base) { 1112 dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base); 1113 return -ENOMEM; 1114 } 1115 } 1116 1117 header = smem->regions[0].virt_base; 1118 if (le32_to_cpu(header->initialized) != 1 || 1119 le32_to_cpu(header->reserved)) { 1120 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); 1121 return -EINVAL; 1122 } 1123 1124 hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); 1125 if (hwlock_id < 0) { 1126 if (hwlock_id != -EPROBE_DEFER) 1127 dev_err(&pdev->dev, "failed to retrieve hwlock\n"); 1128 return hwlock_id; 1129 } 1130 1131 smem->hwlock = hwspin_lock_request_specific(hwlock_id); 1132 if (!smem->hwlock) 1133 return -ENXIO; 1134 1135 ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags); 1136 if (ret) 1137 return ret; 1138 size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset); 1139 hwspin_unlock_irqrestore(smem->hwlock, &flags); 1140 1141 version = qcom_smem_get_sbl_version(smem); 1142 /* 1143 * smem header mapping is required only in heap version scheme, so unmap 1144 * it here. It will be remapped in qcom_smem_map_global() when whole 1145 * partition is mapped again. 1146 */ 1147 devm_iounmap(smem->dev, smem->regions[0].virt_base); 1148 switch (version >> 16) { 1149 case SMEM_GLOBAL_PART_VERSION: 1150 ret = qcom_smem_set_global_partition(smem); 1151 if (ret < 0) 1152 return ret; 1153 smem->item_count = qcom_smem_get_item_count(smem); 1154 break; 1155 case SMEM_GLOBAL_HEAP_VERSION: 1156 qcom_smem_map_global(smem, size); 1157 smem->item_count = SMEM_ITEM_COUNT; 1158 break; 1159 default: 1160 dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); 1161 return -EINVAL; 1162 } 1163 1164 BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT); 1165 ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS); 1166 if (ret < 0 && ret != -ENOENT) 1167 return ret; 1168 1169 __smem = smem; 1170 1171 smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo", 1172 PLATFORM_DEVID_NONE, NULL, 1173 0); 1174 if (IS_ERR(smem->socinfo)) 1175 dev_dbg(&pdev->dev, "failed to register socinfo device\n"); 1176 1177 return 0; 1178 } 1179 1180 static int qcom_smem_remove(struct platform_device *pdev) 1181 { 1182 platform_device_unregister(__smem->socinfo); 1183 1184 hwspin_lock_free(__smem->hwlock); 1185 __smem = NULL; 1186 1187 return 0; 1188 } 1189 1190 static const struct of_device_id qcom_smem_of_match[] = { 1191 { .compatible = "qcom,smem" }, 1192 {} 1193 }; 1194 MODULE_DEVICE_TABLE(of, qcom_smem_of_match); 1195 1196 static struct platform_driver qcom_smem_driver = { 1197 .probe = qcom_smem_probe, 1198 .remove = qcom_smem_remove, 1199 .driver = { 1200 .name = "qcom-smem", 1201 .of_match_table = qcom_smem_of_match, 1202 .suppress_bind_attrs = true, 1203 }, 1204 }; 1205 1206 static int __init qcom_smem_init(void) 1207 { 1208 return platform_driver_register(&qcom_smem_driver); 1209 } 1210 arch_initcall(qcom_smem_init); 1211 1212 static void __exit qcom_smem_exit(void) 1213 { 1214 platform_driver_unregister(&qcom_smem_driver); 1215 } 1216 module_exit(qcom_smem_exit) 1217 1218 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>"); 1219 MODULE_DESCRIPTION("Qualcomm Shared Memory Manager"); 1220 MODULE_LICENSE("GPL v2"); 1221