| /linux/lib/ |
| H A D | maple_tree.c | 221 static unsigned int mas_mt_height(struct ma_state *mas) in mas_mt_height() argument 223 return mt_height(mas->tree); in mas_mt_height() 263 static __always_inline void mas_set_err(struct ma_state *mas, long err) in mas_set_err() argument 265 mas->node = MA_ERROR(err); in mas_set_err() 266 mas->status = ma_error; in mas_set_err() 269 static __always_inline bool mas_is_ptr(const struct ma_state *mas) in mas_is_ptr() argument 271 return mas->status == ma_root; in mas_is_ptr() 274 static __always_inline bool mas_is_start(const struct ma_state *mas) in mas_is_start() argument 276 return mas->status == ma_start; in mas_is_start() 279 static __always_inline bool mas_is_none(const struct ma_state *mas) in mas_is_none() argument [all …]
|
| H A D | test_maple_tree.c | 22 #define mas_dump(mas) do {} while (0) argument 23 #define mas_wr_dump(mas) do {} while (0) argument 352 MA_STATE(mas, mt, 0, 0); in check_rev_find() 359 mas_set(&mas, 1000); in check_rev_find() 360 val = mas_find_rev(&mas, 1000); in check_rev_find() 362 val = mas_find_rev(&mas, 1000); in check_rev_find() 365 mas_set(&mas, 999); in check_rev_find() 366 val = mas_find_rev(&mas, 997); in check_rev_find() 369 mas_set(&mas, 1000); in check_rev_find() 370 val = mas_find_rev(&mas, 900); in check_rev_find() [all …]
|
| H A D | alloc_tag.c | 368 MA_STATE(mas, &mod_area_mt, 0, module_tags.size); in clean_unused_module_areas_locked() 371 mas_for_each(&mas, val, module_tags.size) { in clean_unused_module_areas_locked() 379 start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index); in clean_unused_module_areas_locked() 380 end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last); in clean_unused_module_areas_locked() 382 mas_erase(&mas); in clean_unused_module_areas_locked() 387 static bool find_aligned_area(struct ma_state *mas, unsigned long section_size, in find_aligned_area() argument 394 if (!mas_empty_area(mas, 0, section_size - 1, prepend + size)) { in find_aligned_area() 395 if (IS_ALIGNED(mas->index + prepend, align)) in find_aligned_area() 399 mas_reset(mas); in find_aligned_area() 400 if (!mas_empty_area(mas, 0, section_size - 1, in find_aligned_area() [all …]
|
| /linux/include/linux/ |
| H A D | maple_tree.h | 272 #define mtree_lock_nested(mas, subclass) \ argument 457 struct ma_state *mas; member 472 #define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock)) argument 473 #define mas_lock_nested(mas, subclass) \ argument 474 spin_lock_nested(&((mas)->tree->ma_lock), subclass) 475 #define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock)) argument 507 .mas = ma_state, \ 521 void *mas_walk(struct ma_state *mas); 522 void *mas_store(struct ma_state *mas, void *entry); 523 void *mas_erase(struct ma_state *mas); [all …]
|
| H A D | mm.h | 1109 return mas_find(&vmi->mas, max - 1); in vma_find() 1118 return mas_find(&vmi->mas, ULONG_MAX); in vma_next() 1124 return mas_next_range(&vmi->mas, ULONG_MAX); in vma_iter_next_range() 1130 return mas_prev(&vmi->mas, 0); in vma_prev() 1136 __mas_set_range(&vmi->mas, start, end - 1); in vma_iter_clear_gfp() 1137 mas_store_gfp(&vmi->mas, NULL, gfp); in vma_iter_clear_gfp() 1138 if (unlikely(mas_is_err(&vmi->mas))) in vma_iter_clear_gfp() 1147 mas_destroy(&vmi->mas); in vma_iter_free() 1153 vmi->mas.index = vma->vm_start; in vma_iter_bulk_store() 1154 vmi->mas.last = vma->vm_end - 1; in vma_iter_bulk_store() [all …]
|
| /linux/include/trace/events/ |
| H A D | maple_tree.h | 15 TP_PROTO(const char *fn, struct ma_state *mas), 17 TP_ARGS(fn, mas), 30 __entry->min = mas->min; 31 __entry->max = mas->max; 32 __entry->index = mas->index; 33 __entry->last = mas->last; 34 __entry->node = mas->node; 48 TP_PROTO(const char *fn, struct ma_state *mas), 50 TP_ARGS(fn, mas), 63 __entry->min = mas->min; [all …]
|
| /linux/mm/ |
| H A D | vma.h | 212 if (vmi->mas.status != ma_start && in vma_iter_store_gfp() 213 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) in vma_iter_store_gfp() 216 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store_gfp() 217 mas_store_gfp(&vmi->mas, vma, gfp); in vma_iter_store_gfp() 218 if (unlikely(mas_is_err(&vmi->mas))) in vma_iter_store_gfp() 263 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, 426 return mas_prev(&vmi->mas, min); in vma_prev_limit() 464 __mas_set_range(&vmi->mas, index, last - 1); in vma_iter_config() 469 mas_reset(&vmi->mas); in vma_iter_reset() 475 return mas_prev_range(&vmi->mas, min); in vma_iter_prev_range_limit() [all …]
|
| H A D | execmem.c | 110 static inline unsigned long mas_range_len(struct ma_state *mas) in mas_range_len() argument 112 return mas->last - mas->index + 1; in mas_range_len() 162 MA_STATE(mas, free_areas, 0, ULONG_MAX); in execmem_cache_clean() 166 mas_for_each(&mas, area, ULONG_MAX) { in execmem_cache_clean() 167 size_t size = mas_range_len(&mas); in execmem_cache_clean() 170 IS_ALIGNED(mas.index, PMD_SIZE)) { in execmem_cache_clean() 174 mas_store_gfp(&mas, NULL, GFP_KERNEL); in execmem_cache_clean() 187 MA_STATE(mas, free_areas, addr - 1, addr + 1); in execmem_cache_add_locked() 194 area = mas_walk(&mas); in execmem_cache_add_locked() 195 if (area && mas.last == addr - 1) in execmem_cache_add_locked() [all …]
|
| H A D | mmap_lock.c | 248 MA_STATE(mas, &mm->mm_mt, address, address); in lock_vma_under_rcu() 253 vma = mas_walk(&mas); in lock_vma_under_rcu() 265 mas_set(&mas, address); in lock_vma_under_rcu()
|
| H A D | debug.c | 359 mas_dump(&vmi->mas); in vma_iter_dump_tree() 360 mt_dump(vmi->mas.tree, mt_dump_hex); in vma_iter_dump_tree()
|
| H A D | vma.c | 470 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, in unmap_region() argument 478 unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end); in unmap_region() 479 mas_set(mas, vma->vm_end); in unmap_region() 480 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, in unmap_region() 1524 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); in do_vmi_align_munmap() 2298 struct ma_state *mas = &vms->vmi->mas; in vms_abort_munmap_vmas() local 2311 mas_set_range(mas, vms->start, vms->end - 1); in vms_abort_munmap_vmas() 2312 mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL); in vms_abort_munmap_vmas() 2362 vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); in __mmap_setup() 2428 unmap_region(&vmi->mas, vma, map->prev, map->next); in __mmap_new_file_vma()
|
| H A D | mmap.c | 1277 unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX); in exit_mmap() 1288 free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS, in exit_mmap() 1748 mt_clear_in_rcu(vmi.mas.tree); in dup_mmap() 1839 mt_set_in_rcu(vmi.mas.tree); in dup_mmap() 1852 mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); in dup_mmap() 1853 mas_store(&vmi.mas, XA_ZERO_ENTRY); in dup_mmap()
|
| H A D | oom_kill.c | 520 MA_STATE(mas, &mm->mm_mt, ULONG_MAX, ULONG_MAX); in __oom_reap_task_mm() 536 mas_for_each_rev(&mas, vma, 0) { in __oom_reap_task_mm()
|
| H A D | memory.c | 373 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, in free_pgtables() argument 389 next = mas_find(mas, ceiling - 1); in free_pgtables() 409 next = mas_find(mas, ceiling - 1); in free_pgtables() 2083 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, in unmap_vmas() argument 2103 vma = mas_find(mas, tree_end - 1); in unmap_vmas()
|
| H A D | internal.h | 445 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
|
| /linux/scripts/gdb/linux/ |
| H A D | mapletree.py | 198 def mtree_lookup_walk(mas): argument 200 n = mas.node 209 if pivots[offset] >= mas.index: 216 n = mt_slot(mas.tree, slots, offset) 218 mas.reset() 230 mas = Mas(mt, index, index) 234 entry = mas.start() 235 if mas.is_none(): 238 if mas.is_ptr(): 243 entry = mtree_lookup_walk(mas) [all …]
|
| /linux/tools/testing/radix-tree/ |
| H A D | maple.c | 414 static inline void mas_node_walk(struct ma_state *mas, struct maple_node *node, in mas_node_walk() argument 426 (*range_max) = (*range_min) = mas->index; in mas_node_walk() 430 mas->offset = mas->index = mas->min; in mas_node_walk() 440 prev = mas->min; in mas_node_walk() 441 index = mas->index; in mas_node_walk() 460 max = mas->max; in mas_node_walk() 464 mas->offset = offset; in mas_node_walk() 469 mas->max = max; in mas_node_walk() 470 mas->min = prev; in mas_node_walk() 486 static inline bool mas_descend_walk(struct ma_state *mas, in mas_descend_walk() argument [all …]
|
| /linux/tools/testing/vma/ |
| H A D | vma_internal.h | 493 struct ma_state mas; member 498 .mas = { \ 864 mas_pause(&vmi->mas); in vma_iter_invalidate() 894 return mas_find(&vmi->mas, ULONG_MAX); in vma_next() 1002 return mas_find(&vmi->mas, max - 1); in vma_find() 1008 __mas_set_range(&vmi->mas, start, end - 1); in vma_iter_clear_gfp() 1009 mas_store_gfp(&vmi->mas, NULL, gfp); in vma_iter_clear_gfp() 1010 if (unlikely(mas_is_err(&vmi->mas))) in vma_iter_clear_gfp() 1035 return mas_prev(&vmi->mas, 0); in vma_prev() 1040 mas_set(&vmi->mas, addr); in vma_iter_set() [all …]
|
| H A D | vma.c | 19 (fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
|
| /linux/mm/damon/tests/ |
| H A D | vaddr-kunit.h | 21 MA_STATE(mas, mt, 0, 0); in __link_vmas() 26 mas_lock(&mas); in __link_vmas() 28 mas_set_range(&mas, vmas[i].vm_start, vmas[i].vm_end - 1); in __link_vmas() 29 if (mas_store_gfp(&mas, &vmas[i], GFP_KERNEL)) in __link_vmas() 35 mas_unlock(&mas); in __link_vmas()
|
| /linux/drivers/iommu/generic_pt/ |
| H A D | kunit_iommu_pt.h | 290 MA_STATE(mas, mt, start, last); in unmap_collisions() 294 mas_for_each(&mas, entry, last) { in unmap_collisions() 295 pt_vaddr_t mas_start = mas.index; in unmap_collisions() 296 pt_vaddr_t len = (mas.last - mas_start) + 1; in unmap_collisions() 299 mas_erase(&mas); in unmap_collisions() 300 mas_pause(&mas); in unmap_collisions()
|
| /linux/kernel/irq/ |
| H A D | irqdesc.c | 176 MA_STATE(mas, &sparse_irqs, 0, 0); in irq_find_free_area() 178 if (mas_empty_area(&mas, from, MAX_SPARSE_IRQS, cnt)) in irq_find_free_area() 180 return mas.index; in irq_find_free_area() 196 MA_STATE(mas, &sparse_irqs, irq, irq); in irq_insert_desc() 197 WARN_ON(mas_store_gfp(&mas, desc, GFP_KERNEL) != 0); in irq_insert_desc() 202 MA_STATE(mas, &sparse_irqs, irq, irq); in delete_irq_desc() 203 mas_erase(&mas); in delete_irq_desc()
|
| /linux/Documentation/core-api/ |
| H A D | maple_tree.rst | 145 The advanced API is based around the ma_state, this is where the 'mas' 152 The maple state keeps track of the range start and end in mas->index and 153 mas->last, respectively. 155 mas_walk() will walk the tree to the location of mas->index and set the 156 mas->index and mas->last according to the range for the entry.
|
| /linux/drivers/gpu/drm/nouveau/ |
| H A D | nouveau_debugfs.c | 209 MA_STATE(mas, &uvmm->region_mt, 0, 0); in nouveau_debugfs_gpuva_regions() 214 mas_for_each(&mas, reg, ULONG_MAX) in nouveau_debugfs_gpuva_regions()
|
| /linux/drivers/media/pci/saa7164/ |
| H A D | saa7164-api.c | 615 u8 mas; in saa7164_api_set_dif() local 621 mas = 0xd0; in saa7164_api_set_dif() 623 mas = 0xe0; in saa7164_api_set_dif() 639 buf[0x0a] = mas; in saa7164_api_set_dif()
|