Lines Matching +full:combined +full:- +full:power +full:- +full:req

1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (c) 2010-2011 by Samsung Electronics.
41 WARN_ON_ONCE(cma->nranges != 1); in cma_get_base()
42 return PFN_PHYS(cma->ranges[0].base_pfn); in cma_get_base()
47 return cma->count << PAGE_SHIFT; in cma_get_size()
52 return cma->name; in cma_get_name()
58 if (align_order <= cma->order_per_bit) in cma_bitmap_aligned_mask()
60 return (1UL << (align_order - cma->order_per_bit)) - 1; in cma_bitmap_aligned_mask()
71 return (cmr->base_pfn & ((1UL << align_order) - 1)) in cma_bitmap_aligned_offset()
72 >> cma->order_per_bit; in cma_bitmap_aligned_offset()
78 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; in cma_bitmap_pages_to_bits()
87 bitmap_no = (pfn - cmr->base_pfn) >> cma->order_per_bit; in cma_clear_bitmap()
90 spin_lock_irqsave(&cma->lock, flags); in cma_clear_bitmap()
91 bitmap_clear(cmr->bitmap, bitmap_no, bitmap_count); in cma_clear_bitmap()
92 cma->available_count += count; in cma_clear_bitmap()
93 spin_unlock_irqrestore(&cma->lock, flags); in cma_clear_bitmap()
114 valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags); in cma_validate_zones()
115 if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags)) in cma_validate_zones()
118 for (r = 0; r < cma->nranges; r++) { in cma_validate_zones()
119 cmr = &cma->ranges[r]; in cma_validate_zones()
120 base_pfn = cmr->base_pfn; in cma_validate_zones()
128 if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) { in cma_validate_zones()
129 set_bit(CMA_ZONES_INVALID, &cma->flags); in cma_validate_zones()
134 set_bit(CMA_ZONES_VALID, &cma->flags); in cma_validate_zones()
146 for (allocrange = 0; allocrange < cma->nranges; allocrange++) { in cma_activate_area()
147 cmr = &cma->ranges[allocrange]; in cma_activate_area()
148 early_pfn[allocrange] = cmr->early_pfn; in cma_activate_area()
149 cmr->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma, cmr), in cma_activate_area()
151 if (!cmr->bitmap) in cma_activate_area()
158 for (r = 0; r < cma->nranges; r++) { in cma_activate_area()
159 cmr = &cma->ranges[r]; in cma_activate_area()
160 if (early_pfn[r] != cmr->base_pfn) { in cma_activate_area()
161 count = early_pfn[r] - cmr->base_pfn; in cma_activate_area()
163 bitmap_set(cmr->bitmap, 0, bitmap_count); in cma_activate_area()
166 for (pfn = early_pfn[r]; pfn < cmr->base_pfn + cmr->count; in cma_activate_area()
171 spin_lock_init(&cma->lock); in cma_activate_area()
173 mutex_init(&cma->alloc_mutex); in cma_activate_area()
176 INIT_HLIST_HEAD(&cma->mem_head); in cma_activate_area()
177 spin_lock_init(&cma->mem_head_lock); in cma_activate_area()
179 set_bit(CMA_ACTIVATED, &cma->flags); in cma_activate_area()
185 bitmap_free(cma->ranges[r].bitmap); in cma_activate_area()
188 if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) { in cma_activate_area()
190 cmr = &cma->ranges[r]; in cma_activate_area()
191 end_pfn = cmr->base_pfn + cmr->count; in cma_activate_area()
196 totalcma_pages -= cma->count; in cma_activate_area()
197 cma->available_count = cma->count = 0; in cma_activate_area()
198 pr_err("CMA area %s could not be activated\n", cma->name); in cma_activate_area()
214 set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags); in cma_reserve_pages_on_error()
225 return -ENOSPC; in cma_new_area()
236 snprintf(cma->name, CMA_MAX_NAME, "%s", name); in cma_new_area()
238 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); in cma_new_area()
240 cma->available_count = cma->count = size >> PAGE_SHIFT; in cma_new_area()
241 cma->order_per_bit = order_per_bit; in cma_new_area()
243 totalcma_pages += cma->count; in cma_new_area()
250 totalcma_pages -= cma->count; in cma_drop_area()
251 cma_area_count--; in cma_drop_area()
255 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
276 return -EINVAL; in cma_init_reserved_mem()
284 return -EINVAL; in cma_init_reserved_mem()
289 return -EINVAL; in cma_init_reserved_mem()
295 cma->ranges[0].base_pfn = PFN_DOWN(base); in cma_init_reserved_mem()
296 cma->ranges[0].early_pfn = PFN_DOWN(base); in cma_init_reserved_mem()
297 cma->ranges[0].count = cma->count; in cma_init_reserved_mem()
298 cma->nranges = 1; in cma_init_reserved_mem()
299 cma->nid = NUMA_NO_NODE; in cma_init_reserved_mem()
324 return mlp->size > mrp->size; in revsizecmp()
330 return mlp->base < mrp->base; in basecmp()
345 list_add(&mrp->list, ranges); in list_insert_sorted()
352 __list_add(&mrp->list, mlp->list.prev, &mlp->list); in list_insert_sorted()
359 phys_addr_t highmem_start = __pa(high_memory - 1) + 1; in cma_fixed_reserve()
368 return -EINVAL; in cma_fixed_reserve()
374 return -EBUSY; in cma_fixed_reserve()
386 * If there is enough memory, try a bottom-up allocation first. in cma_alloc_mem()
407 phys_addr_t highmem = __pa(high_memory - 1) + 1; in cma_alloc_mem()
444 return -ENOSPC; in __cma_declare_contiguous_nid()
448 return -EINVAL; in __cma_declare_contiguous_nid()
451 return -EINVAL; in __cma_declare_contiguous_nid()
458 if (fixed && base & (alignment - 1)) { in __cma_declare_contiguous_nid()
461 return -EINVAL; in __cma_declare_contiguous_nid()
465 limit &= ~(alignment - 1); in __cma_declare_contiguous_nid()
472 return -EINVAL; in __cma_declare_contiguous_nid()
486 return -EINVAL; in __cma_declare_contiguous_nid()
497 return -ENOMEM; in __cma_declare_contiguous_nid()
512 (*res_cma)->nid = nid; in __cma_declare_contiguous_nid()
547 if (ret != -ENOMEM) in cma_declare_contiguous_multi()
558 * - @align is a power of 2 in cma_declare_contiguous_multi()
559 * - @align is >= pageblock alignment in cma_declare_contiguous_multi()
560 * - @size is aligned to @align and to @order_per_bit in cma_declare_contiguous_multi()
590 size = end - start; in cma_declare_contiguous_multi()
596 pr_debug("consider %016llx - %016llx\n", (u64)start, (u64)end); in cma_declare_contiguous_multi()
604 * not, re-use the smallest element. in cma_declare_contiguous_multi()
611 if (size < mrp->size) in cma_declare_contiguous_multi()
613 list_del(&mrp->list); in cma_declare_contiguous_multi()
614 sizesum -= mrp->size; in cma_declare_contiguous_multi()
615 pr_debug("deleted %016llx - %016llx from the list\n", in cma_declare_contiguous_multi()
616 (u64)mrp->base, (u64)mrp->base + size); in cma_declare_contiguous_multi()
618 mrp->base = start; in cma_declare_contiguous_multi()
619 mrp->size = size; in cma_declare_contiguous_multi()
625 pr_debug("added %016llx - %016llx to the list\n", in cma_declare_contiguous_multi()
626 (u64)mrp->base, (u64)mrp->base + size); in cma_declare_contiguous_multi()
636 ret = -ENOMEM; in cma_declare_contiguous_multi()
641 * Found ranges that provide enough combined space. in cma_declare_contiguous_multi()
643 * want to mimic a bottom-up memblock allocation. in cma_declare_contiguous_multi()
650 sizesum += mlp->size; in cma_declare_contiguous_multi()
663 size = min(sizeleft, mlp->size); in cma_declare_contiguous_multi()
664 if (memblock_reserve(mlp->base, size)) { in cma_declare_contiguous_multi()
674 pr_debug("created region %d: %016llx - %016llx\n", in cma_declare_contiguous_multi()
675 nr, (u64)mlp->base, (u64)mlp->base + size); in cma_declare_contiguous_multi()
676 cmrp = &cma->ranges[nr++]; in cma_declare_contiguous_multi()
677 cmrp->base_pfn = PHYS_PFN(mlp->base); in cma_declare_contiguous_multi()
678 cmrp->early_pfn = cmrp->base_pfn; in cma_declare_contiguous_multi()
679 cmrp->count = size >> PAGE_SHIFT; in cma_declare_contiguous_multi()
681 sizeleft -= size; in cma_declare_contiguous_multi()
691 memblock_phys_free(mlp->base, mlp->size); in cma_declare_contiguous_multi()
694 ret = -ENOMEM; in cma_declare_contiguous_multi()
698 cma->nranges = nr; in cma_declare_contiguous_multi()
699 cma->nid = nid; in cma_declare_contiguous_multi()
713 * cma_declare_contiguous_nid() - reserve custom contiguous area
717 * @alignment: Alignment for the CMA area, should be power of 2 or zero
760 spin_lock_irq(&cma->lock); in cma_debug_show_areas()
762 for (r = 0; r < cma->nranges; r++) { in cma_debug_show_areas()
763 cmr = &cma->ranges[r]; in cma_debug_show_areas()
768 for_each_clear_bitrange(start, end, cmr->bitmap, nbits) { in cma_debug_show_areas()
769 nr_part = (end - start) << cma->order_per_bit; in cma_debug_show_areas()
774 pr_cont("=> %lu free of %lu total pages\n", cma->available_count, in cma_debug_show_areas()
775 cma->count); in cma_debug_show_areas()
776 spin_unlock_irq(&cma->lock); in cma_debug_show_areas()
785 int ret = -EBUSY; in cma_range_alloc()
797 spin_lock_irq(&cma->lock); in cma_range_alloc()
802 if (count > cma->available_count) { in cma_range_alloc()
803 spin_unlock_irq(&cma->lock); in cma_range_alloc()
806 bitmap_no = bitmap_find_next_zero_area_off(cmr->bitmap, in cma_range_alloc()
810 spin_unlock_irq(&cma->lock); in cma_range_alloc()
814 pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit); in cma_range_alloc()
823 spin_unlock_irq(&cma->lock); in cma_range_alloc()
824 pr_warn_ratelimited("%s: %s: skipping incompatible area [0x%lx-0x%lx]", in cma_range_alloc()
825 __func__, cma->name, pfn, pfn + count - 1); in cma_range_alloc()
829 bitmap_set(cmr->bitmap, bitmap_no, bitmap_count); in cma_range_alloc()
830 cma->available_count -= count; in cma_range_alloc()
836 spin_unlock_irq(&cma->lock); in cma_range_alloc()
838 mutex_lock(&cma->alloc_mutex); in cma_range_alloc()
840 mutex_unlock(&cma->alloc_mutex); in cma_range_alloc()
845 if (ret != -EBUSY) in cma_range_alloc()
851 trace_cma_alloc_busy_retry(cma->name, pfn, page, count, align); in cma_range_alloc()
863 int ret = -ENOMEM, r; in __cma_alloc()
865 const char *name = cma ? cma->name : NULL; in __cma_alloc()
867 if (!cma || !cma->count) in __cma_alloc()
871 (void *)cma, cma->name, count, align); in __cma_alloc()
876 trace_cma_alloc_start(name, count, cma->available_count, cma->count, align); in __cma_alloc()
878 for (r = 0; r < cma->nranges; r++) { in __cma_alloc()
881 ret = cma_range_alloc(cma, &cma->ranges[r], count, align, in __cma_alloc()
883 if (ret != -EBUSY || page) in __cma_alloc()
898 pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n", in __cma_alloc()
899 __func__, cma->name, count, ret); in __cma_alloc()
918 * cma_alloc() - allocate pages from contiguous area
953 if (!cma || !pages || count > cma->count) in cma_pages_valid()
959 for (r = 0; r < cma->nranges; r++) { in cma_pages_valid()
960 cmr = &cma->ranges[r]; in cma_pages_valid()
961 end = cmr->base_pfn + cmr->count; in cma_pages_valid()
962 if (pfn >= cmr->base_pfn && pfn < end) { in cma_pages_valid()
976 * cma_release() - release allocated pages
1000 for (r = 0; r < cma->nranges; r++) { in cma_release()
1001 cmr = &cma->ranges[r]; in cma_release()
1002 if (pfn >= cmr->base_pfn && in cma_release()
1003 pfn < (cmr->base_pfn + cmr->count)) { in cma_release()
1004 VM_BUG_ON(end_pfn > cmr->base_pfn + cmr->count); in cma_release()
1009 if (r == cma->nranges) in cma_release()
1015 trace_cma_release(cma->name, pfn, pages, count); in cma_release()
1025 return cma_release(cma, &folio->page, folio_nr_pages(folio)); in cma_free_folio()
1048 for (r = 0; r < cma->nranges; r++) { in cma_intersects()
1049 cmr = &cma->ranges[r]; in cma_intersects()
1051 rstart = PFN_PHYS(cmr->base_pfn); in cma_intersects()
1052 rend = PFN_PHYS(cmr->base_pfn + cmr->count); in cma_intersects()
1066 * system is single-threaded, so there is no locking. The alignment
1067 * checking is restrictive - only pageblock-aligned areas
1076 * in the area properly, since this just points to memblock-allocated
1082 * the responsibility of the caller (e.g. like normal memblock-allocated
1092 if (!cma || !cma->count) in cma_reserve_early()
1097 if (test_bit(CMA_ACTIVATED, &cma->flags)) in cma_reserve_early()
1103 if (!IS_ALIGNED(size, (PAGE_SIZE << cma->order_per_bit))) in cma_reserve_early()
1108 if (size > cma->available_count) in cma_reserve_early()
1111 for (r = 0; r < cma->nranges; r++) { in cma_reserve_early()
1112 cmr = &cma->ranges[r]; in cma_reserve_early()
1113 available = cmr->count - (cmr->early_pfn - cmr->base_pfn); in cma_reserve_early()
1115 ret = phys_to_virt(PFN_PHYS(cmr->early_pfn)); in cma_reserve_early()
1116 cmr->early_pfn += size; in cma_reserve_early()
1117 cma->available_count -= size; in cma_reserve_early()