Lines Matching +full:smem +full:- +full:part
1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
16 #include <linux/soc/qcom/smem.h>
38 * (@smem_ptable), that is found 4kB from the end of the main smem region. The
44 * two regions are cached and non-cached memory respectively. Each region
48 * Items in the non-cached region are allocated from the start of the partition
50 * is hence the region between the cached and non-cached offsets. The header of
55 * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
59 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
65 * The version member of the smem header contains an array of versions for the
92 * struct smem_proc_comm - proc_comm communication struct (legacy)
104 * struct smem_global_entry - entry to reference smem items on the heap
120 * struct smem_header - header found in beginning of primary smem region
123 * @initialized: boolean to indicate that smem is initialized
124 * @free_offset: index of the first unallocated byte in smem
140 * struct smem_ptable_entry - one entry in the @smem_ptable list
160 * struct smem_ptable - partition table for the private partitions
178 * struct smem_partition_header - header of the partitions
200 * struct smem_partition - describes smem partition
216 * struct smem_private_entry - header of each item in the private partition
218 * @item: identifying number of the smem item
235 * struct smem_info - smem region info located after the table of contents
237 * @size: size of the smem region
238 * @base_addr: base address of the smem region
253 * struct smem_region - representation of a chunk of memory used for smem
265 * struct qcom_smem - device data for the smem device
296 return p + le32_to_cpu(phdr->offset_free_uncached); in phdr_to_last_uncached_entry()
306 return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline); in phdr_to_first_cached_entry()
314 return p + le32_to_cpu(phdr->offset_free_cached); in phdr_to_last_cached_entry()
330 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) + in uncached_entry_next()
331 le32_to_cpu(e->size); in uncached_entry_next()
339 return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline); in cached_entry_next()
346 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr); in uncached_entry_to_item()
353 return p - le32_to_cpu(e->size); in cached_entry_to_item()
356 /* Pointer to the one and only smem handle */
362 /* The qcom hwspinlock id is always plus one from the smem host id */
366 * qcom_smem_bust_hwspin_lock_by_host() - bust the smem hwspinlock for a host
369 * Busts the hwspin_lock for the given smem host id. This helper is intended
370 * for remoteproc drivers that manage remoteprocs with an equivalent smem
372 * smem hwspin_lock if the rproc unexpectedly goes into a bad state.
382 return -EINVAL; in qcom_smem_bust_hwspin_lock_by_host()
384 return hwspin_lock_bust(__smem->hwlock, SMEM_HOST_ID_TO_HWSPINLOCK_ID(host)); in qcom_smem_bust_hwspin_lock_by_host()
389 * qcom_smem_is_available() - Check if SMEM is available
391 * Return: true if SMEM is available, false otherwise.
399 static int qcom_smem_alloc_private(struct qcom_smem *smem, in qcom_smem_alloc_private() argument
400 struct smem_partition *part, in qcom_smem_alloc_private() argument
410 phdr = (struct smem_partition_header __force *)part->virt_base; in qcom_smem_alloc_private()
411 p_end = (void *)phdr + part->size; in qcom_smem_alloc_private()
418 return -EINVAL; in qcom_smem_alloc_private()
421 if (hdr->canary != SMEM_PRIVATE_CANARY) in qcom_smem_alloc_private()
423 if (le16_to_cpu(hdr->item) == item) in qcom_smem_alloc_private()
424 return -EEXIST; in qcom_smem_alloc_private()
430 return -EINVAL; in qcom_smem_alloc_private()
435 dev_err(smem->dev, "Out of memory\n"); in qcom_smem_alloc_private()
436 return -ENOSPC; in qcom_smem_alloc_private()
439 hdr->canary = SMEM_PRIVATE_CANARY; in qcom_smem_alloc_private()
440 hdr->item = cpu_to_le16(item); in qcom_smem_alloc_private()
441 hdr->size = cpu_to_le32(ALIGN(size, 8)); in qcom_smem_alloc_private()
442 hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size); in qcom_smem_alloc_private()
443 hdr->padding_hdr = 0; in qcom_smem_alloc_private()
451 le32_add_cpu(&phdr->offset_free_uncached, alloc_size); in qcom_smem_alloc_private()
455 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", in qcom_smem_alloc_private()
456 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); in qcom_smem_alloc_private()
458 return -EINVAL; in qcom_smem_alloc_private()
461 static int qcom_smem_alloc_global(struct qcom_smem *smem, in qcom_smem_alloc_global() argument
468 header = smem->regions[0].virt_base; in qcom_smem_alloc_global()
469 entry = &header->toc[item]; in qcom_smem_alloc_global()
470 if (entry->allocated) in qcom_smem_alloc_global()
471 return -EEXIST; in qcom_smem_alloc_global()
474 if (WARN_ON(size > le32_to_cpu(header->available))) in qcom_smem_alloc_global()
475 return -ENOMEM; in qcom_smem_alloc_global()
477 entry->offset = header->free_offset; in qcom_smem_alloc_global()
478 entry->size = cpu_to_le32(size); in qcom_smem_alloc_global()
486 entry->allocated = cpu_to_le32(1); in qcom_smem_alloc_global()
488 le32_add_cpu(&header->free_offset, size); in qcom_smem_alloc_global()
489 le32_add_cpu(&header->available, -size); in qcom_smem_alloc_global()
495 * qcom_smem_alloc() - allocate space for a smem item
496 * @host: remote processor id, or -1
497 * @item: smem item handle
500 * Allocate space for a given smem item of size @size, given that the item is
507 struct smem_partition *part; in qcom_smem_alloc() local
512 return -EPROBE_DEFER; in qcom_smem_alloc()
515 dev_err(__smem->dev, in qcom_smem_alloc()
517 return -EINVAL; in qcom_smem_alloc()
520 if (WARN_ON(item >= __smem->item_count)) in qcom_smem_alloc()
521 return -EINVAL; in qcom_smem_alloc()
523 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, in qcom_smem_alloc()
529 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { in qcom_smem_alloc()
530 part = &__smem->partitions[host]; in qcom_smem_alloc()
531 ret = qcom_smem_alloc_private(__smem, part, item, size); in qcom_smem_alloc()
532 } else if (__smem->global_partition.virt_base) { in qcom_smem_alloc()
533 part = &__smem->global_partition; in qcom_smem_alloc()
534 ret = qcom_smem_alloc_private(__smem, part, item, size); in qcom_smem_alloc()
539 hwspin_unlock_irqrestore(__smem->hwlock, &flags); in qcom_smem_alloc()
545 static void *qcom_smem_get_global(struct qcom_smem *smem, in qcom_smem_get_global() argument
557 header = smem->regions[0].virt_base; in qcom_smem_get_global()
558 entry = &header->toc[item]; in qcom_smem_get_global()
559 if (!entry->allocated) in qcom_smem_get_global()
560 return ERR_PTR(-ENXIO); in qcom_smem_get_global()
562 aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK; in qcom_smem_get_global()
564 for (i = 0; i < smem->num_regions; i++) { in qcom_smem_get_global()
565 region = &smem->regions[i]; in qcom_smem_get_global()
567 if ((u32)region->aux_base == aux_base || !aux_base) { in qcom_smem_get_global()
568 e_size = le32_to_cpu(entry->size); in qcom_smem_get_global()
569 entry_offset = le32_to_cpu(entry->offset); in qcom_smem_get_global()
571 if (WARN_ON(e_size + entry_offset > region->size)) in qcom_smem_get_global()
572 return ERR_PTR(-EINVAL); in qcom_smem_get_global()
577 return region->virt_base + entry_offset; in qcom_smem_get_global()
581 return ERR_PTR(-ENOENT); in qcom_smem_get_global()
584 static void *qcom_smem_get_private(struct qcom_smem *smem, in qcom_smem_get_private() argument
585 struct smem_partition *part, in qcom_smem_get_private() argument
595 phdr = (struct smem_partition_header __force *)part->virt_base; in qcom_smem_get_private()
596 p_end = (void *)phdr + part->size; in qcom_smem_get_private()
602 if (e->canary != SMEM_PRIVATE_CANARY) in qcom_smem_get_private()
605 if (le16_to_cpu(e->item) == item) { in qcom_smem_get_private()
607 e_size = le32_to_cpu(e->size); in qcom_smem_get_private()
608 padding_data = le16_to_cpu(e->padding_data); in qcom_smem_get_private()
610 if (WARN_ON(e_size > part->size || padding_data > e_size)) in qcom_smem_get_private()
611 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
613 *size = e_size - padding_data; in qcom_smem_get_private()
618 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
627 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
631 e = phdr_to_first_cached_entry(phdr, part->cacheline); in qcom_smem_get_private()
635 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
638 if (e->canary != SMEM_PRIVATE_CANARY) in qcom_smem_get_private()
641 if (le16_to_cpu(e->item) == item) { in qcom_smem_get_private()
643 e_size = le32_to_cpu(e->size); in qcom_smem_get_private()
644 padding_data = le16_to_cpu(e->padding_data); in qcom_smem_get_private()
646 if (WARN_ON(e_size > part->size || padding_data > e_size)) in qcom_smem_get_private()
647 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
649 *size = e_size - padding_data; in qcom_smem_get_private()
654 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
659 e = cached_entry_next(e, part->cacheline); in qcom_smem_get_private()
663 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
665 return ERR_PTR(-ENOENT); in qcom_smem_get_private()
668 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", in qcom_smem_get_private()
669 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); in qcom_smem_get_private()
671 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
675 * qcom_smem_get() - resolve ptr of size of a smem item
676 * @host: the remote processor, or -1
677 * @item: smem item handle
680 * Looks up smem item and returns pointer to it. Size of smem
683 * Return: a pointer to an SMEM item on success, ERR_PTR() on failure.
687 struct smem_partition *part; in qcom_smem_get() local
688 void *ptr = ERR_PTR(-EPROBE_DEFER); in qcom_smem_get()
693 if (WARN_ON(item >= __smem->item_count)) in qcom_smem_get()
694 return ERR_PTR(-EINVAL); in qcom_smem_get()
696 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { in qcom_smem_get()
697 part = &__smem->partitions[host]; in qcom_smem_get()
698 ptr = qcom_smem_get_private(__smem, part, item, size); in qcom_smem_get()
699 } else if (__smem->global_partition.virt_base) { in qcom_smem_get()
700 part = &__smem->global_partition; in qcom_smem_get()
701 ptr = qcom_smem_get_private(__smem, part, item, size); in qcom_smem_get()
711 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
712 * @host: the remote processor identifying a partition, or -1
714 * To be used by smem clients as a quick way to determine if any new
721 struct smem_partition *part; in qcom_smem_get_free_space() local
727 return -EPROBE_DEFER; in qcom_smem_get_free_space()
729 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { in qcom_smem_get_free_space()
730 part = &__smem->partitions[host]; in qcom_smem_get_free_space()
731 phdr = part->virt_base; in qcom_smem_get_free_space()
732 ret = le32_to_cpu(phdr->offset_free_cached) - in qcom_smem_get_free_space()
733 le32_to_cpu(phdr->offset_free_uncached); in qcom_smem_get_free_space()
735 if (ret > le32_to_cpu(part->size)) in qcom_smem_get_free_space()
736 return -EINVAL; in qcom_smem_get_free_space()
737 } else if (__smem->global_partition.virt_base) { in qcom_smem_get_free_space()
738 part = &__smem->global_partition; in qcom_smem_get_free_space()
739 phdr = part->virt_base; in qcom_smem_get_free_space()
740 ret = le32_to_cpu(phdr->offset_free_cached) - in qcom_smem_get_free_space()
741 le32_to_cpu(phdr->offset_free_uncached); in qcom_smem_get_free_space()
743 if (ret > le32_to_cpu(part->size)) in qcom_smem_get_free_space()
744 return -EINVAL; in qcom_smem_get_free_space()
746 header = __smem->regions[0].virt_base; in qcom_smem_get_free_space()
747 ret = le32_to_cpu(header->available); in qcom_smem_get_free_space()
749 if (ret > __smem->regions[0].size) in qcom_smem_get_free_space()
750 return -EINVAL; in qcom_smem_get_free_space()
763 * qcom_smem_virt_to_phys() - return the physical address associated
764 * with an smem item pointer (previously returned by qcom_smem_get()
767 * Return: physical address of the SMEM item (if found), 0 otherwise
771 struct smem_partition *part; in qcom_smem_virt_to_phys() local
777 part = &__smem->partitions[i]; in qcom_smem_virt_to_phys()
779 if (addr_in_range(part->virt_base, part->size, p)) { in qcom_smem_virt_to_phys()
780 offset = p - part->virt_base; in qcom_smem_virt_to_phys()
782 return (phys_addr_t)part->phys_base + offset; in qcom_smem_virt_to_phys()
786 part = &__smem->global_partition; in qcom_smem_virt_to_phys()
788 if (addr_in_range(part->virt_base, part->size, p)) { in qcom_smem_virt_to_phys()
789 offset = p - part->virt_base; in qcom_smem_virt_to_phys()
791 return (phys_addr_t)part->phys_base + offset; in qcom_smem_virt_to_phys()
794 for (i = 0; i < __smem->num_regions; i++) { in qcom_smem_virt_to_phys()
795 area = &__smem->regions[i]; in qcom_smem_virt_to_phys()
797 if (addr_in_range(area->virt_base, area->size, p)) { in qcom_smem_virt_to_phys()
798 offset = p - area->virt_base; in qcom_smem_virt_to_phys()
800 return (phys_addr_t)area->aux_base + offset; in qcom_smem_virt_to_phys()
809 * qcom_smem_get_soc_id() - return the SoC ID
824 *id = __le32_to_cpu(info->id); in qcom_smem_get_soc_id()
831 * qcom_smem_get_feature_code() - return the feature code
834 * Look up the feature code identifier from SMEM and return it.
848 if (__le32_to_cpu(info->fmt) < SOCINFO_VERSION(0, 16)) in qcom_smem_get_feature_code()
849 return -EOPNOTSUPP; in qcom_smem_get_feature_code()
851 raw_code = __le32_to_cpu(info->feature_code); in qcom_smem_get_feature_code()
863 static int qcom_smem_get_sbl_version(struct qcom_smem *smem) in qcom_smem_get_sbl_version() argument
868 header = smem->regions[0].virt_base; in qcom_smem_get_sbl_version()
869 versions = header->version; in qcom_smem_get_sbl_version()
874 static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem) in qcom_smem_get_ptable() argument
879 ptable = smem->ptable; in qcom_smem_get_ptable()
880 if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) in qcom_smem_get_ptable()
881 return ERR_PTR(-ENOENT); in qcom_smem_get_ptable()
883 version = le32_to_cpu(ptable->version); in qcom_smem_get_ptable()
885 dev_err(smem->dev, in qcom_smem_get_ptable()
887 return ERR_PTR(-EINVAL); in qcom_smem_get_ptable()
892 static u32 qcom_smem_get_item_count(struct qcom_smem *smem) in qcom_smem_get_item_count() argument
897 ptable = qcom_smem_get_ptable(smem); in qcom_smem_get_item_count()
901 info = (struct smem_info *)&ptable->entry[ptable->num_entries]; in qcom_smem_get_item_count()
902 if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic))) in qcom_smem_get_item_count()
905 return le16_to_cpu(info->num_items); in qcom_smem_get_item_count()
914 qcom_smem_partition_header(struct qcom_smem *smem, in qcom_smem_partition_header() argument
921 phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset); in qcom_smem_partition_header()
922 header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size)); in qcom_smem_partition_header()
927 if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) { in qcom_smem_partition_header()
928 dev_err(smem->dev, "bad partition magic %4ph\n", header->magic); in qcom_smem_partition_header()
932 if (host0 != le16_to_cpu(header->host0)) { in qcom_smem_partition_header()
933 dev_err(smem->dev, "bad host0 (%hu != %hu)\n", in qcom_smem_partition_header()
934 host0, le16_to_cpu(header->host0)); in qcom_smem_partition_header()
937 if (host1 != le16_to_cpu(header->host1)) { in qcom_smem_partition_header()
938 dev_err(smem->dev, "bad host1 (%hu != %hu)\n", in qcom_smem_partition_header()
939 host1, le16_to_cpu(header->host1)); in qcom_smem_partition_header()
943 size = le32_to_cpu(header->size); in qcom_smem_partition_header()
944 if (size != le32_to_cpu(entry->size)) { in qcom_smem_partition_header()
945 dev_err(smem->dev, "bad partition size (%u != %u)\n", in qcom_smem_partition_header()
946 size, le32_to_cpu(entry->size)); in qcom_smem_partition_header()
950 if (le32_to_cpu(header->offset_free_uncached) > size) { in qcom_smem_partition_header()
951 dev_err(smem->dev, "bad partition free uncached (%u > %u)\n", in qcom_smem_partition_header()
952 le32_to_cpu(header->offset_free_uncached), size); in qcom_smem_partition_header()
959 static int qcom_smem_set_global_partition(struct qcom_smem *smem) in qcom_smem_set_global_partition() argument
967 if (smem->global_partition.virt_base) { in qcom_smem_set_global_partition()
968 dev_err(smem->dev, "Already found the global partition\n"); in qcom_smem_set_global_partition()
969 return -EINVAL; in qcom_smem_set_global_partition()
972 ptable = qcom_smem_get_ptable(smem); in qcom_smem_set_global_partition()
976 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { in qcom_smem_set_global_partition()
977 entry = &ptable->entry[i]; in qcom_smem_set_global_partition()
978 if (!le32_to_cpu(entry->offset)) in qcom_smem_set_global_partition()
980 if (!le32_to_cpu(entry->size)) in qcom_smem_set_global_partition()
983 if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST) in qcom_smem_set_global_partition()
986 if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) { in qcom_smem_set_global_partition()
993 dev_err(smem->dev, "Missing entry for global partition\n"); in qcom_smem_set_global_partition()
994 return -EINVAL; in qcom_smem_set_global_partition()
997 header = qcom_smem_partition_header(smem, entry, in qcom_smem_set_global_partition()
1000 return -EINVAL; in qcom_smem_set_global_partition()
1002 smem->global_partition.virt_base = (void __iomem *)header; in qcom_smem_set_global_partition()
1003 smem->global_partition.phys_base = smem->regions[0].aux_base + in qcom_smem_set_global_partition()
1004 le32_to_cpu(entry->offset); in qcom_smem_set_global_partition()
1005 smem->global_partition.size = le32_to_cpu(entry->size); in qcom_smem_set_global_partition()
1006 smem->global_partition.cacheline = le32_to_cpu(entry->cacheline); in qcom_smem_set_global_partition()
1012 qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host) in qcom_smem_enumerate_partitions() argument
1021 ptable = qcom_smem_get_ptable(smem); in qcom_smem_enumerate_partitions()
1025 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { in qcom_smem_enumerate_partitions()
1026 entry = &ptable->entry[i]; in qcom_smem_enumerate_partitions()
1027 if (!le32_to_cpu(entry->offset)) in qcom_smem_enumerate_partitions()
1029 if (!le32_to_cpu(entry->size)) in qcom_smem_enumerate_partitions()
1032 host0 = le16_to_cpu(entry->host0); in qcom_smem_enumerate_partitions()
1033 host1 = le16_to_cpu(entry->host1); in qcom_smem_enumerate_partitions()
1042 dev_err(smem->dev, "bad host %u\n", remote_host); in qcom_smem_enumerate_partitions()
1043 return -EINVAL; in qcom_smem_enumerate_partitions()
1046 if (smem->partitions[remote_host].virt_base) { in qcom_smem_enumerate_partitions()
1047 dev_err(smem->dev, "duplicate host %u\n", remote_host); in qcom_smem_enumerate_partitions()
1048 return -EINVAL; in qcom_smem_enumerate_partitions()
1051 header = qcom_smem_partition_header(smem, entry, host0, host1); in qcom_smem_enumerate_partitions()
1053 return -EINVAL; in qcom_smem_enumerate_partitions()
1055 smem->partitions[remote_host].virt_base = (void __iomem *)header; in qcom_smem_enumerate_partitions()
1056 smem->partitions[remote_host].phys_base = smem->regions[0].aux_base + in qcom_smem_enumerate_partitions()
1057 le32_to_cpu(entry->offset); in qcom_smem_enumerate_partitions()
1058 smem->partitions[remote_host].size = le32_to_cpu(entry->size); in qcom_smem_enumerate_partitions()
1059 smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline); in qcom_smem_enumerate_partitions()
1065 static int qcom_smem_map_toc(struct qcom_smem *smem, struct smem_region *region) in qcom_smem_map_toc() argument
1069 /* map starting 4K for smem header */ in qcom_smem_map_toc()
1070 region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K); in qcom_smem_map_toc()
1071 ptable_start = region->aux_base + region->size - SZ_4K; in qcom_smem_map_toc()
1073 smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K); in qcom_smem_map_toc()
1075 if (!region->virt_base || !smem->ptable) in qcom_smem_map_toc()
1076 return -ENOMEM; in qcom_smem_map_toc()
1081 static int qcom_smem_map_global(struct qcom_smem *smem, u32 size) in qcom_smem_map_global() argument
1085 phys_addr = smem->regions[0].aux_base; in qcom_smem_map_global()
1087 smem->regions[0].size = size; in qcom_smem_map_global()
1088 smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size); in qcom_smem_map_global()
1090 if (!smem->regions[0].virt_base) in qcom_smem_map_global()
1091 return -ENOMEM; in qcom_smem_map_global()
1096 static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name, in qcom_smem_resolve_mem() argument
1099 struct device *dev = smem->dev; in qcom_smem_resolve_mem()
1104 np = of_parse_phandle(dev->of_node, name, 0); in qcom_smem_resolve_mem()
1107 return -EINVAL; in qcom_smem_resolve_mem()
1115 region->aux_base = r.start; in qcom_smem_resolve_mem()
1116 region->size = resource_size(&r); in qcom_smem_resolve_mem()
1125 struct qcom_smem *smem; in qcom_smem_probe() local
1135 if (of_property_present(pdev->dev.of_node, "qcom,rpm-msg-ram")) in qcom_smem_probe()
1138 smem = devm_kzalloc(&pdev->dev, struct_size(smem, regions, num_regions), in qcom_smem_probe()
1140 if (!smem) in qcom_smem_probe()
1141 return -ENOMEM; in qcom_smem_probe()
1143 smem->dev = &pdev->dev; in qcom_smem_probe()
1144 smem->num_regions = num_regions; in qcom_smem_probe()
1146 rmem = of_reserved_mem_lookup(pdev->dev.of_node); in qcom_smem_probe()
1148 smem->regions[0].aux_base = rmem->base; in qcom_smem_probe()
1149 smem->regions[0].size = rmem->size; in qcom_smem_probe()
1152 * Fall back to the memory-region reference, if we're not a in qcom_smem_probe()
1153 * reserved-memory node. in qcom_smem_probe()
1155 ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]); in qcom_smem_probe()
1161 ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]); in qcom_smem_probe()
1167 ret = qcom_smem_map_toc(smem, &smem->regions[0]); in qcom_smem_probe()
1172 smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev, in qcom_smem_probe()
1173 smem->regions[i].aux_base, in qcom_smem_probe()
1174 smem->regions[i].size); in qcom_smem_probe()
1175 if (!smem->regions[i].virt_base) { in qcom_smem_probe()
1176 dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base); in qcom_smem_probe()
1177 return -ENOMEM; in qcom_smem_probe()
1181 header = smem->regions[0].virt_base; in qcom_smem_probe()
1182 if (le32_to_cpu(header->initialized) != 1 || in qcom_smem_probe()
1183 le32_to_cpu(header->reserved)) { in qcom_smem_probe()
1184 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); in qcom_smem_probe()
1185 return -EINVAL; in qcom_smem_probe()
1188 hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); in qcom_smem_probe()
1190 return dev_err_probe(&pdev->dev, hwlock_id, in qcom_smem_probe()
1193 smem->hwlock = hwspin_lock_request_specific(hwlock_id); in qcom_smem_probe()
1194 if (!smem->hwlock) in qcom_smem_probe()
1195 return -ENXIO; in qcom_smem_probe()
1197 ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags); in qcom_smem_probe()
1200 size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset); in qcom_smem_probe()
1201 hwspin_unlock_irqrestore(smem->hwlock, &flags); in qcom_smem_probe()
1203 version = qcom_smem_get_sbl_version(smem); in qcom_smem_probe()
1205 * smem header mapping is required only in heap version scheme, so unmap in qcom_smem_probe()
1209 devm_iounmap(smem->dev, smem->regions[0].virt_base); in qcom_smem_probe()
1212 ret = qcom_smem_set_global_partition(smem); in qcom_smem_probe()
1215 smem->item_count = qcom_smem_get_item_count(smem); in qcom_smem_probe()
1218 qcom_smem_map_global(smem, size); in qcom_smem_probe()
1219 smem->item_count = SMEM_ITEM_COUNT; in qcom_smem_probe()
1222 dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); in qcom_smem_probe()
1223 return -EINVAL; in qcom_smem_probe()
1227 ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS); in qcom_smem_probe()
1228 if (ret < 0 && ret != -ENOENT) in qcom_smem_probe()
1231 __smem = smem; in qcom_smem_probe()
1233 smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo", in qcom_smem_probe()
1236 if (IS_ERR(smem->socinfo)) in qcom_smem_probe()
1237 dev_dbg(&pdev->dev, "failed to register socinfo device\n"); in qcom_smem_probe()
1244 platform_device_unregister(__smem->socinfo); in qcom_smem_remove()
1246 hwspin_lock_free(__smem->hwlock); in qcom_smem_remove()
1251 { .compatible = "qcom,smem" },
1260 .name = "qcom-smem",