Lines Matching refs:vb

2693 	struct vmap_block *vb;  in new_vmap_block()  local
2702 vb = kmalloc_node(sizeof(struct vmap_block), gfp_mask, node); in new_vmap_block()
2703 if (unlikely(!vb)) in new_vmap_block()
2711 kfree(vb); in new_vmap_block()
2716 spin_lock_init(&vb->lock); in new_vmap_block()
2717 vb->va = va; in new_vmap_block()
2720 bitmap_zero(vb->used_map, VMAP_BBMAP_BITS); in new_vmap_block()
2721 vb->free = VMAP_BBMAP_BITS - (1UL << order); in new_vmap_block()
2722 vb->dirty = 0; in new_vmap_block()
2723 vb->dirty_min = VMAP_BBMAP_BITS; in new_vmap_block()
2724 vb->dirty_max = 0; in new_vmap_block()
2725 bitmap_set(vb->used_map, 0, (1UL << order)); in new_vmap_block()
2726 INIT_LIST_HEAD(&vb->free_list); in new_vmap_block()
2727 vb->cpu = raw_smp_processor_id(); in new_vmap_block()
2731 err = xa_insert(xa, vb_idx, vb, gfp_mask); in new_vmap_block()
2733 kfree(vb); in new_vmap_block()
2744 vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu); in new_vmap_block()
2746 list_add_tail_rcu(&vb->free_list, &vbq->free); in new_vmap_block()
2752 static void free_vmap_block(struct vmap_block *vb) in free_vmap_block() argument
2758 xa = addr_to_vb_xa(vb->va->va_start); in free_vmap_block()
2759 tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start)); in free_vmap_block()
2760 BUG_ON(tmp != vb); in free_vmap_block()
2762 vn = addr_to_node(vb->va->va_start); in free_vmap_block()
2764 unlink_va(vb->va, &vn->busy.root); in free_vmap_block()
2767 free_vmap_area_noflush(vb->va); in free_vmap_block()
2768 kfree_rcu(vb, rcu_head); in free_vmap_block()
2771 static bool purge_fragmented_block(struct vmap_block *vb, in purge_fragmented_block() argument
2774 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu); in purge_fragmented_block()
2776 if (vb->free + vb->dirty != VMAP_BBMAP_BITS || in purge_fragmented_block()
2777 vb->dirty == VMAP_BBMAP_BITS) in purge_fragmented_block()
2781 if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD)) in purge_fragmented_block()
2785 WRITE_ONCE(vb->free, 0); in purge_fragmented_block()
2787 WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS); in purge_fragmented_block()
2788 vb->dirty_min = 0; in purge_fragmented_block()
2789 vb->dirty_max = VMAP_BBMAP_BITS; in purge_fragmented_block()
2791 list_del_rcu(&vb->free_list); in purge_fragmented_block()
2793 list_add_tail(&vb->purge, purge_list); in purge_fragmented_block()
2799 struct vmap_block *vb, *n_vb; in free_purged_blocks() local
2801 list_for_each_entry_safe(vb, n_vb, purge_list, purge) { in free_purged_blocks()
2802 list_del(&vb->purge); in free_purged_blocks()
2803 free_vmap_block(vb); in free_purged_blocks()
2810 struct vmap_block *vb; in purge_fragmented_blocks() local
2814 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in purge_fragmented_blocks()
2815 unsigned long free = READ_ONCE(vb->free); in purge_fragmented_blocks()
2816 unsigned long dirty = READ_ONCE(vb->dirty); in purge_fragmented_blocks()
2822 spin_lock(&vb->lock); in purge_fragmented_blocks()
2823 purge_fragmented_block(vb, &purge, true); in purge_fragmented_blocks()
2824 spin_unlock(&vb->lock); in purge_fragmented_blocks()
2841 struct vmap_block *vb; in vb_alloc() local
2859 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in vb_alloc()
2862 if (READ_ONCE(vb->free) < (1UL << order)) in vb_alloc()
2865 spin_lock(&vb->lock); in vb_alloc()
2866 if (vb->free < (1UL << order)) { in vb_alloc()
2867 spin_unlock(&vb->lock); in vb_alloc()
2871 pages_off = VMAP_BBMAP_BITS - vb->free; in vb_alloc()
2872 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); in vb_alloc()
2873 WRITE_ONCE(vb->free, vb->free - (1UL << order)); in vb_alloc()
2874 bitmap_set(vb->used_map, pages_off, (1UL << order)); in vb_alloc()
2875 if (vb->free == 0) { in vb_alloc()
2877 list_del_rcu(&vb->free_list); in vb_alloc()
2881 spin_unlock(&vb->lock); in vb_alloc()
2898 struct vmap_block *vb; in vb_free() local
2910 vb = xa_load(xa, addr_to_vb_idx(addr)); in vb_free()
2912 spin_lock(&vb->lock); in vb_free()
2913 bitmap_clear(vb->used_map, offset, (1UL << order)); in vb_free()
2914 spin_unlock(&vb->lock); in vb_free()
2921 spin_lock(&vb->lock); in vb_free()
2924 vb->dirty_min = min(vb->dirty_min, offset); in vb_free()
2925 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); in vb_free()
2927 WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order)); in vb_free()
2928 if (vb->dirty == VMAP_BBMAP_BITS) { in vb_free()
2929 BUG_ON(vb->free); in vb_free()
2930 spin_unlock(&vb->lock); in vb_free()
2931 free_vmap_block(vb); in vb_free()
2933 spin_unlock(&vb->lock); in vb_free()
2948 struct vmap_block *vb; in _vm_unmap_aliases() local
2952 xa_for_each(&vbq->vmap_blocks, idx, vb) { in _vm_unmap_aliases()
2953 spin_lock(&vb->lock); in _vm_unmap_aliases()
2960 if (!purge_fragmented_block(vb, &purge_list, false) && in _vm_unmap_aliases()
2961 vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) { in _vm_unmap_aliases()
2962 unsigned long va_start = vb->va->va_start; in _vm_unmap_aliases()
2965 s = va_start + (vb->dirty_min << PAGE_SHIFT); in _vm_unmap_aliases()
2966 e = va_start + (vb->dirty_max << PAGE_SHIFT); in _vm_unmap_aliases()
2972 vb->dirty_min = VMAP_BBMAP_BITS; in _vm_unmap_aliases()
2973 vb->dirty_max = 0; in _vm_unmap_aliases()
2977 spin_unlock(&vb->lock); in _vm_unmap_aliases()
4484 struct vmap_block *vb; in vmap_ram_vread_iter() local
4505 vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr)); in vmap_ram_vread_iter()
4506 if (!vb) in vmap_ram_vread_iter()
4509 spin_lock(&vb->lock); in vmap_ram_vread_iter()
4510 if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) { in vmap_ram_vread_iter()
4511 spin_unlock(&vb->lock); in vmap_ram_vread_iter()
4515 for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) { in vmap_ram_vread_iter()
4521 start = vmap_block_vaddr(vb->va->va_start, rs); in vmap_ram_vread_iter()
4549 spin_unlock(&vb->lock); in vmap_ram_vread_iter()
4556 spin_unlock(&vb->lock); in vmap_ram_vread_iter()