Lines Matching full:area
337 struct vm_struct *area; in ioremap_page_range() local
339 area = find_vm_area((void *)addr); in ioremap_page_range()
340 if (!area || !(area->flags & VM_IOREMAP)) { in ioremap_page_range()
344 if (addr != (unsigned long)area->addr || in ioremap_page_range()
345 (void *)end != area->addr + get_vm_area_size(area)) { in ioremap_page_range()
347 addr, end, (long)area->addr, in ioremap_page_range()
348 (long)area->addr + get_vm_area_size(area)); in ioremap_page_range()
499 * @addr: start of the VM area to unmap
500 * @end: end of the VM area to unmap (non-inclusive)
686 * @addr: start of the VM area to map
687 * @end: end of the VM area to map (non-inclusive)
706 static int check_sparse_vm_area(struct vm_struct *area, unsigned long start, in check_sparse_vm_area() argument
710 if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS)) in check_sparse_vm_area()
712 if (WARN_ON_ONCE(area->flags & VM_NO_GUARD)) in check_sparse_vm_area()
714 if (WARN_ON_ONCE(!(area->flags & VM_SPARSE))) in check_sparse_vm_area()
718 if (start < (unsigned long)area->addr || in check_sparse_vm_area()
719 (void *)end > area->addr + get_vm_area_size(area)) in check_sparse_vm_area()
726 * @area: vm_area
731 int vm_area_map_pages(struct vm_struct *area, unsigned long start, in vm_area_map_pages() argument
736 err = check_sparse_vm_area(area, start, end); in vm_area_map_pages()
745 * @area: vm_area
749 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start, in vm_area_unmap_pages() argument
752 if (check_sparse_vm_area(area, start, end)) in vm_area_unmap_pages()
876 * find a lowest match of free area.
1413 * free area is inserted. If VA has been merged, it is
1461 /* Point to the new merged area. */ in __merge_or_add_vmap_area()
1492 /* Point to the new merged area. */ in __merge_or_add_vmap_area()
1813 * Returns a start address of the newly allocated area, if success.
1880 * when fit type of free area is NE_FIT_TYPE. It guarantees that in preload_this_cpu_lock()
2401 * Free a vmap area, caller ensuring that the area has been unmapped,
2438 * Free and unmap a vmap area
2549 #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/
3096 * vm_area_add_early - add vmap area early during boot
3099 * This function is used to add fixed kernel vm area to vmlist before
3122 * vm_area_register_early - register vmap area early during boot
3126 * This function is used to register kernel vm area before
3170 struct vm_struct *area; in __get_vm_area_node() local
3182 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node()
3183 if (unlikely(!area)) in __get_vm_area_node()
3189 area->flags = flags; in __get_vm_area_node()
3190 area->caller = caller; in __get_vm_area_node()
3191 area->requested_size = requested_size; in __get_vm_area_node()
3193 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area); in __get_vm_area_node()
3195 kfree(area); in __get_vm_area_node()
3208 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, in __get_vm_area_node()
3211 return area; in __get_vm_area_node()
3223 * get_vm_area - reserve a contiguous kernel virtual area
3224 * @size: size of the area
3227 * Search an area of @size in the kernel virtual mapping area,
3228 * and reserved it for out purposes. Returns the area descriptor
3231 * Return: the area descriptor on success or %NULL on failure.
3250 * find_vm_area - find a continuous kernel virtual area
3253 * Search for the kernel VM area starting at @addr, and return it.
3257 * Return: the area descriptor on success or %NULL on failure.
3271 * remove_vm_area - find and remove a continuous kernel virtual area
3274 * Search for the kernel VM area starting at @addr, and remove it.
3275 * This function returns the found VM area, but using it is NOT safe
3278 * Return: the area descriptor on success or %NULL on failure.
3305 static inline void set_area_direct_map(const struct vm_struct *area, in set_area_direct_map() argument
3311 for (i = 0; i < area->nr_pages; i++) in set_area_direct_map()
3312 if (page_address(area->pages[i])) in set_area_direct_map()
3313 set_direct_map(area->pages[i]); in set_area_direct_map()
3319 static void vm_reset_perms(struct vm_struct *area) in vm_reset_perms() argument
3322 unsigned int page_order = vm_area_page_order(area); in vm_reset_perms()
3330 for (i = 0; i < area->nr_pages; i += 1U << page_order) { in vm_reset_perms()
3331 unsigned long addr = (unsigned long)page_address(area->pages[i]); in vm_reset_perms()
3348 set_area_direct_map(area, set_direct_map_invalid_noflush); in vm_reset_perms()
3350 set_area_direct_map(area, set_direct_map_default_noflush); in vm_reset_perms()
3390 * Free the virtually continuous memory area starting at @addr, as obtained
3422 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", in vfree()
3454 * Free the virtually contiguous memory area starting at @addr,
3470 WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n", in vunmap()
3491 * Return: the address of the area or %NULL on failure
3496 struct vm_struct *area; in vmap() local
3516 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); in vmap()
3517 if (!area) in vmap()
3520 addr = (unsigned long)area->addr; in vmap()
3523 vunmap(area->addr); in vmap()
3528 area->pages = pages; in vmap()
3529 area->nr_pages = count; in vmap()
3531 return area->addr; in vmap()
3570 struct vm_struct *area; in vmap_pfn() local
3572 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, in vmap_pfn()
3574 if (!area) in vmap_pfn()
3576 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in vmap_pfn()
3578 free_vm_area(area); in vmap_pfn()
3582 flush_cache_vmap((unsigned long)area->addr, in vmap_pfn()
3583 (unsigned long)area->addr + count * PAGE_SIZE); in vmap_pfn()
3585 return area->addr; in vmap_pfn()
3678 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, in __vmalloc_area_node() argument
3684 unsigned long addr = (unsigned long)area->addr; in __vmalloc_area_node()
3685 unsigned long size = get_vm_area_size(area); in __vmalloc_area_node()
3699 area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node, in __vmalloc_area_node()
3700 area->caller); in __vmalloc_area_node()
3702 area->pages = kmalloc_node_noprof(array_size, nested_gfp, node); in __vmalloc_area_node()
3705 if (!area->pages) { in __vmalloc_area_node()
3709 free_vm_area(area); in __vmalloc_area_node()
3713 set_vm_area_page_order(area, page_shift - PAGE_SHIFT); in __vmalloc_area_node()
3714 page_order = vm_area_page_order(area); in __vmalloc_area_node()
3724 area->nr_pages = vm_area_alloc_pages((page_order ? in __vmalloc_area_node()
3726 node, page_order, nr_small_pages, area->pages); in __vmalloc_area_node()
3728 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
3730 if (gfp_mask & __GFP_ACCOUNT && area->nr_pages) in __vmalloc_area_node()
3731 mod_memcg_page_state(area->pages[0], MEMCG_VMALLOC, in __vmalloc_area_node()
3732 area->nr_pages); in __vmalloc_area_node()
3738 if (area->nr_pages != nr_small_pages) { in __vmalloc_area_node()
3752 area->nr_pages * PAGE_SIZE); in __vmalloc_area_node()
3766 ret = vmap_pages_range(addr, addr + size, prot, area->pages, in __vmalloc_area_node()
3780 area->nr_pages * PAGE_SIZE); in __vmalloc_area_node()
3784 return area->addr; in __vmalloc_area_node()
3787 vfree(area->addr); in __vmalloc_area_node()
3795 * @start: vm area range start
3796 * @end: vm area range end
3799 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
3817 * Return: the address of the area or %NULL on failure
3824 struct vm_struct *area; in __vmalloc_node_range_noprof() local
3857 area = __get_vm_area_node(size, align, shift, VM_ALLOC | in __vmalloc_node_range_noprof()
3860 if (!area) { in __vmalloc_node_range_noprof()
3897 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); in __vmalloc_node_range_noprof()
3914 area->addr = kasan_unpoison_vmalloc(area->addr, size, kasan_flags); in __vmalloc_node_range_noprof()
3921 clear_vm_uninitialized_flag(area); in __vmalloc_node_range_noprof()
3924 kmemleak_vmalloc(area, PAGE_ALIGN(size), gfp_mask); in __vmalloc_node_range_noprof()
3926 return area->addr; in __vmalloc_node_range_noprof()
4043 * The resulting memory area is zeroed so it can be mapped to userspace
4144 WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p); in vrealloc_node_align_noprof()
4151 "vrealloc() has mismatched area vs requested sizes (%p)\n", p)) in vrealloc_node_align_noprof()
4236 * The resulting memory area is 32bit addressable and zeroed so it can be
4295 * To do safe access to this _mapped_ area, we need lock. But in aligned_vread_iter()
4334 * If it's area created by vm_map_ram() interface directly, but in vmap_ram_vread_iter()
4344 * Area is split into regions and tracked with vmap_block, read out in vmap_ram_vread_iter()
4404 * vread_iter() - read vmalloc area in a safe way to an iterator.
4409 * This function checks that addr is a valid vmalloc'ed area, and
4410 * copy data from that area to a given buffer. If the given memory range
4412 * proper area of @buf. If there are memory holes, they'll be zero-filled.
4413 * IOREMAP area is treated as memory hole and no copy is done.
4416 * vm_struct area, returns 0. @buf should be kernel's buffer.
4419 * should know vmalloc() area is valid and can use memcpy().
4420 * This is for routines which have to access vmalloc area without
4425 * include any intersection with valid vmalloc area
4461 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need in vread_iter()
4500 else /* IOREMAP | SPARSE area is treated as memory hole */ in vread_iter()
4534 * @size: size of map area
4538 * This function checks that @kaddr is a valid vmalloc'ed area,
4549 struct vm_struct *area; in remap_vmalloc_range_partial() local
4561 area = find_vm_area(kaddr); in remap_vmalloc_range_partial()
4562 if (!area) in remap_vmalloc_range_partial()
4565 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) in remap_vmalloc_range_partial()
4569 end_index > get_vm_area_size(area)) in remap_vmalloc_range_partial()
4599 * This function checks that addr is a valid vmalloc'ed area, and
4614 void free_vm_area(struct vm_struct *area) in free_vm_area() argument
4617 ret = remove_vm_area(area->addr); in free_vm_area()
4618 BUG_ON(ret != area); in free_vm_area()
4619 kfree(area); in free_vm_area()
4633 * Returns: vmap_area if it is found. If there is no such area
4693 * @offsets: array containing offset of each area
4694 * @sizes: array containing size of each area
4711 * base address is pulled down to fit the area. Scanning is repeated till
4723 int area, area2, last_area, term_area; in pcpu_get_vm_areas() local
4729 for (last_area = 0, area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4730 start = offsets[area]; in pcpu_get_vm_areas()
4731 end = start + sizes[area]; in pcpu_get_vm_areas()
4734 BUG_ON(!IS_ALIGNED(offsets[area], align)); in pcpu_get_vm_areas()
4735 BUG_ON(!IS_ALIGNED(sizes[area], align)); in pcpu_get_vm_areas()
4737 /* detect the area with the highest address */ in pcpu_get_vm_areas()
4739 last_area = area; in pcpu_get_vm_areas()
4741 for (area2 = area + 1; area2 < nr_vms; area2++) { in pcpu_get_vm_areas()
4760 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4761 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); in pcpu_get_vm_areas()
4762 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); in pcpu_get_vm_areas()
4763 if (!vas[area] || !vms[area]) in pcpu_get_vm_areas()
4769 /* start scanning - we scan from the top, begin with the last area */ in pcpu_get_vm_areas()
4770 area = term_area = last_area; in pcpu_get_vm_areas()
4771 start = offsets[area]; in pcpu_get_vm_areas()
4772 end = start + sizes[area]; in pcpu_get_vm_areas()
4797 term_area = area; in pcpu_get_vm_areas()
4807 term_area = area; in pcpu_get_vm_areas()
4812 * This area fits, move on to the previous one. If in pcpu_get_vm_areas()
4815 area = (area + nr_vms - 1) % nr_vms; in pcpu_get_vm_areas()
4816 if (area == term_area) in pcpu_get_vm_areas()
4819 start = offsets[area]; in pcpu_get_vm_areas()
4820 end = start + sizes[area]; in pcpu_get_vm_areas()
4825 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4828 start = base + offsets[area]; in pcpu_get_vm_areas()
4829 size = sizes[area]; in pcpu_get_vm_areas()
4842 /* Allocated area. */ in pcpu_get_vm_areas()
4843 va = vas[area]; in pcpu_get_vm_areas()
4851 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4852 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL)) in pcpu_get_vm_areas()
4857 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4858 struct vmap_node *vn = addr_to_node(vas[area]->va_start); in pcpu_get_vm_areas()
4861 insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head); in pcpu_get_vm_areas()
4862 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, in pcpu_get_vm_areas()
4873 for (area = 0; area < nr_vms; area++) in pcpu_get_vm_areas()
4874 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, in pcpu_get_vm_areas()
4875 vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); in pcpu_get_vm_areas()
4887 while (area--) { in pcpu_get_vm_areas()
4888 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
4889 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
4890 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4896 vas[area] = NULL; in pcpu_get_vm_areas()
4906 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4907 if (vas[area]) in pcpu_get_vm_areas()
4910 vas[area] = kmem_cache_zalloc( in pcpu_get_vm_areas()
4912 if (!vas[area]) in pcpu_get_vm_areas()
4920 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4921 if (vas[area]) in pcpu_get_vm_areas()
4922 kmem_cache_free(vmap_area_cachep, vas[area]); in pcpu_get_vm_areas()
4924 kfree(vms[area]); in pcpu_get_vm_areas()
4938 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4939 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
4940 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
4941 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4947 vas[area] = NULL; in pcpu_get_vm_areas()
4948 kfree(vms[area]); in pcpu_get_vm_areas()