Lines Matching full:pa

258  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
262 * - new PA: buddy += N; PA = N
263 * - use inode PA: on-disk += N; PA -= N
264 * - discard inode PA buddy -= on-disk - PA; PA = 0
265 * - use locality group PA on-disk += N; PA -= N
266 * - discard locality group PA buddy -= PA; PA = 0
267 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
269 * bits from PA, only from on-disk bitmap
279 * bit set and PA claims same block, it's OK. IOW, one can set bit in
280 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
285 * - new PA
286 * blocks for PA are allocated in the buddy, buddy must be referenced
287 * until PA is linked to allocation group to avoid concurrent buddy init
288 * - use inode PA
289 * we need to make sure that either on-disk bitmap or PA has uptodate data
290 * given (3) we care that PA-=N operation doesn't interfere with init
291 * - discard inode PA
293 * - use locality group PA
294 * again PA-=N must be serialized with init
295 * - discard locality group PA
297 * - new PA vs.
298 * - use inode PA
300 * - discard inode PA
301 * discard process must wait until PA isn't used by another process
302 * - use locality group PA
304 * - discard locality group PA
305 * discard process must wait until PA isn't used by another process
306 * - use inode PA
307 * - use inode PA
309 * - discard inode PA
310 * discard process must wait until PA isn't used by another process
311 * - use locality group PA
313 * - discard locality group PA
314 * discard process must wait until PA isn't used by another process
317 * - PA is referenced and while it is no discard is possible
318 * - PA is referenced until block isn't marked in on-disk bitmap
319 * - PA changes only after on-disk bitmap
324 * a special case when we've used PA to emptiness. no need to modify buddy
339 * find proper PA (per-inode or group)
343 * release PA
355 * remove PA from object (inode or locality group)
367 * - per-pa lock (pa)
372 * - new pa
376 * - find and use pa:
377 * pa
379 * - release consumed pa:
380 * pa
386 * pa
390 * pa
395 * pa
764 struct ext4_prealloc_space *pa; in __mb_check_buddy() local
765 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); in __mb_check_buddy()
766 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); in __mb_check_buddy()
768 for (i = 0; i < pa->pa_len; i++) in __mb_check_buddy()
3757 struct ext4_prealloc_space *pa; in ext4_mb_cleanup_pa() local
3762 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); in ext4_mb_cleanup_pa()
3763 list_del(&pa->pa_group_list); in ext4_mb_cleanup_pa()
3765 kmem_cache_free(ext4_pspace_cachep, pa); in ext4_mb_cleanup_pa()
4225 * PA rbtree walk. We assume that we have held the inode PA rbtree lock
4299 /* Step 1: find any one immediate neighboring PA of the normalized range */ in ext4_mb_pa_adjust_overlap()
4308 /* PA must not overlap original request */ in ext4_mb_pa_adjust_overlap()
4317 * Step 2: check if the found PA is left or right neighbor and in ext4_mb_pa_adjust_overlap()
4545 * In case of inode pa, later we use the allocated blocks in ext4_mb_normalize_request()
4626 * Called on failure; free up any blocks from the inode PA for this
4633 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_discard_allocated_blocks() local
4637 if (pa == NULL) { in ext4_discard_allocated_blocks()
4656 if (pa->pa_type == MB_INODE_PA) { in ext4_discard_allocated_blocks()
4657 spin_lock(&pa->pa_lock); in ext4_discard_allocated_blocks()
4658 pa->pa_free += ac->ac_b_ex.fe_len; in ext4_discard_allocated_blocks()
4659 spin_unlock(&pa->pa_lock); in ext4_discard_allocated_blocks()
4667 struct ext4_prealloc_space *pa) in ext4_mb_use_inode_pa() argument
4675 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); in ext4_mb_use_inode_pa()
4676 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), in ext4_mb_use_inode_pa()
4683 ac->ac_pa = pa; in ext4_mb_use_inode_pa()
4685 BUG_ON(start < pa->pa_pstart); in ext4_mb_use_inode_pa()
4686 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); in ext4_mb_use_inode_pa()
4687 BUG_ON(pa->pa_free < len); in ext4_mb_use_inode_pa()
4689 pa->pa_free -= len; in ext4_mb_use_inode_pa()
4691 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); in ext4_mb_use_inode_pa()
4698 struct ext4_prealloc_space *pa) in ext4_mb_use_group_pa() argument
4702 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, in ext4_mb_use_group_pa()
4707 ac->ac_pa = pa; in ext4_mb_use_group_pa()
4711 * instead we correct pa later, after blocks are marked in ext4_mb_use_group_pa()
4713 * Other CPUs are prevented from allocating from this pa by lg_mutex in ext4_mb_use_group_pa()
4715 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", in ext4_mb_use_group_pa()
4716 pa->pa_lstart, len, pa); in ext4_mb_use_group_pa()
4727 struct ext4_prealloc_space *pa, in ext4_mb_check_group_pa() argument
4733 atomic_inc(&pa->pa_count); in ext4_mb_check_group_pa()
4734 return pa; in ext4_mb_check_group_pa()
4737 new_distance = abs(goal_block - pa->pa_pstart); in ext4_mb_check_group_pa()
4744 atomic_inc(&pa->pa_count); in ext4_mb_check_group_pa()
4745 return pa; in ext4_mb_check_group_pa()
4749 * check if found pa meets EXT4_MB_HINT_GOAL_ONLY
4753 struct ext4_prealloc_space *pa) in ext4_mb_pa_goal_check() argument
4767 start = pa->pa_pstart + in ext4_mb_pa_goal_check()
4768 (ac->ac_g_ex.fe_logical - pa->pa_lstart); in ext4_mb_pa_goal_check()
4772 if (ac->ac_g_ex.fe_len > pa->pa_len - in ext4_mb_pa_goal_check()
4773 EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart)) in ext4_mb_pa_goal_check()
4798 * first, try per-file preallocation by searching the inode pa rbtree. in ext4_mb_use_preallocated()
4801 * ext4_mb_discard_group_preallocation() can paralelly mark the pa in ext4_mb_use_preallocated()
4811 * Step 1: Find a pa with logical start immediately adjacent to the in ext4_mb_use_preallocated()
4824 * Step 2: The adjacent pa might be to the right of logical start, find in ext4_mb_use_preallocated()
4825 * the left adjacent pa. After this step we'd have a valid tmp_pa whose in ext4_mb_use_preallocated()
4837 * If there is no adjacent pa to the left then finding in ext4_mb_use_preallocated()
4838 * an overlapping pa is not possible hence stop searching in ext4_mb_use_preallocated()
4839 * inode pa tree in ext4_mb_use_preallocated()
4848 * Step 3: If the left adjacent pa is deleted, keep moving left to find in ext4_mb_use_preallocated()
4849 * the first non deleted adjacent pa. After this step we should have a in ext4_mb_use_preallocated()
4855 * no non deleted left adjacent pa, so stop searching in ext4_mb_use_preallocated()
4856 * inode pa tree in ext4_mb_use_preallocated()
4867 * to delete this pa underneath us. Since group in ext4_mb_use_preallocated()
4881 * Step 4: We now have the non deleted left adjacent pa. Only this in ext4_mb_use_preallocated()
4882 * pa can possibly satisfy the request hence check if it overlaps in ext4_mb_use_preallocated()
4895 * Since PAs don't overlap, we won't find any other PA to in ext4_mb_use_preallocated()
4910 * We found a valid overlapping pa but couldn't use it because in ext4_mb_use_preallocated()
4914 * 1. When a new inode pa is added to rbtree it must have in ext4_mb_use_preallocated()
4918 * 2. An inode pa that is in the rbtree can only have it's in ext4_mb_use_preallocated()
4933 * sure that another process will never see a pa in rbtree with in ext4_mb_use_preallocated()
4992 struct ext4_prealloc_space *pa; in ext4_mb_generate_from_pa() local
5011 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); in ext4_mb_generate_from_pa()
5012 spin_lock(&pa->pa_lock); in ext4_mb_generate_from_pa()
5013 ext4_get_group_no_and_offset(sb, pa->pa_pstart, in ext4_mb_generate_from_pa()
5015 len = pa->pa_len; in ext4_mb_generate_from_pa()
5016 spin_unlock(&pa->pa_lock); in ext4_mb_generate_from_pa()
5027 struct ext4_prealloc_space *pa) in ext4_mb_mark_pa_deleted() argument
5031 if (pa->pa_deleted) { in ext4_mb_mark_pa_deleted()
5032 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", in ext4_mb_mark_pa_deleted()
5033 pa->pa_type, pa->pa_pstart, pa->pa_lstart, in ext4_mb_mark_pa_deleted()
5034 pa->pa_len); in ext4_mb_mark_pa_deleted()
5038 pa->pa_deleted = 1; in ext4_mb_mark_pa_deleted()
5040 if (pa->pa_type == MB_INODE_PA) { in ext4_mb_mark_pa_deleted()
5041 ei = EXT4_I(pa->pa_inode); in ext4_mb_mark_pa_deleted()
5046 static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa) in ext4_mb_pa_free() argument
5048 BUG_ON(!pa); in ext4_mb_pa_free()
5049 BUG_ON(atomic_read(&pa->pa_count)); in ext4_mb_pa_free()
5050 BUG_ON(pa->pa_deleted == 0); in ext4_mb_pa_free()
5051 kmem_cache_free(ext4_pspace_cachep, pa); in ext4_mb_pa_free()
5056 struct ext4_prealloc_space *pa; in ext4_mb_pa_callback() local
5058 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); in ext4_mb_pa_callback()
5059 ext4_mb_pa_free(pa); in ext4_mb_pa_callback()
5067 struct super_block *sb, struct ext4_prealloc_space *pa) in ext4_mb_put_pa() argument
5074 spin_lock(&pa->pa_lock); in ext4_mb_put_pa()
5075 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { in ext4_mb_put_pa()
5076 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
5080 if (pa->pa_deleted == 1) { in ext4_mb_put_pa()
5081 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
5085 ext4_mb_mark_pa_deleted(sb, pa); in ext4_mb_put_pa()
5086 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
5088 grp_blk = pa->pa_pstart; in ext4_mb_put_pa()
5091 * next group when pa is used up in ext4_mb_put_pa()
5093 if (pa->pa_type == MB_GROUP_PA) in ext4_mb_put_pa()
5102 * find block B in PA in ext4_mb_put_pa()
5105 * drop PA from group in ext4_mb_put_pa()
5109 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" in ext4_mb_put_pa()
5113 list_del(&pa->pa_group_list); in ext4_mb_put_pa()
5116 if (pa->pa_type == MB_INODE_PA) { in ext4_mb_put_pa()
5117 write_lock(pa->pa_node_lock.inode_lock); in ext4_mb_put_pa()
5118 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); in ext4_mb_put_pa()
5119 write_unlock(pa->pa_node_lock.inode_lock); in ext4_mb_put_pa()
5120 ext4_mb_pa_free(pa); in ext4_mb_put_pa()
5122 spin_lock(pa->pa_node_lock.lg_lock); in ext4_mb_put_pa()
5123 list_del_rcu(&pa->pa_node.lg_list); in ext4_mb_put_pa()
5124 spin_unlock(pa->pa_node_lock.lg_lock); in ext4_mb_put_pa()
5125 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_put_pa()
5162 struct ext4_prealloc_space *pa; in ext4_mb_new_inode_pa() local
5172 pa = ac->ac_pa; in ext4_mb_new_inode_pa()
5222 pa->pa_lstart = ac->ac_b_ex.fe_logical; in ext4_mb_new_inode_pa()
5223 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_inode_pa()
5224 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_inode_pa()
5225 pa->pa_free = pa->pa_len; in ext4_mb_new_inode_pa()
5226 spin_lock_init(&pa->pa_lock); in ext4_mb_new_inode_pa()
5227 INIT_LIST_HEAD(&pa->pa_group_list); in ext4_mb_new_inode_pa()
5228 pa->pa_deleted = 0; in ext4_mb_new_inode_pa()
5229 pa->pa_type = MB_INODE_PA; in ext4_mb_new_inode_pa()
5231 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, in ext4_mb_new_inode_pa()
5232 pa->pa_len, pa->pa_lstart); in ext4_mb_new_inode_pa()
5233 trace_ext4_mb_new_inode_pa(ac, pa); in ext4_mb_new_inode_pa()
5235 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); in ext4_mb_new_inode_pa()
5236 ext4_mb_use_inode_pa(ac, pa); in ext4_mb_new_inode_pa()
5243 pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock; in ext4_mb_new_inode_pa()
5244 pa->pa_inode = ac->ac_inode; in ext4_mb_new_inode_pa()
5246 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); in ext4_mb_new_inode_pa()
5248 write_lock(pa->pa_node_lock.inode_lock); in ext4_mb_new_inode_pa()
5249 ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node); in ext4_mb_new_inode_pa()
5250 write_unlock(pa->pa_node_lock.inode_lock); in ext4_mb_new_inode_pa()
5262 struct ext4_prealloc_space *pa; in ext4_mb_new_group_pa() local
5271 pa = ac->ac_pa; in ext4_mb_new_group_pa()
5273 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_group_pa()
5274 pa->pa_lstart = pa->pa_pstart; in ext4_mb_new_group_pa()
5275 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_group_pa()
5276 pa->pa_free = pa->pa_len; in ext4_mb_new_group_pa()
5277 spin_lock_init(&pa->pa_lock); in ext4_mb_new_group_pa()
5278 INIT_LIST_HEAD(&pa->pa_node.lg_list); in ext4_mb_new_group_pa()
5279 INIT_LIST_HEAD(&pa->pa_group_list); in ext4_mb_new_group_pa()
5280 pa->pa_deleted = 0; in ext4_mb_new_group_pa()
5281 pa->pa_type = MB_GROUP_PA; in ext4_mb_new_group_pa()
5283 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, in ext4_mb_new_group_pa()
5284 pa->pa_len, pa->pa_lstart); in ext4_mb_new_group_pa()
5285 trace_ext4_mb_new_group_pa(ac, pa); in ext4_mb_new_group_pa()
5287 ext4_mb_use_group_pa(ac, pa); in ext4_mb_new_group_pa()
5288 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); in ext4_mb_new_group_pa()
5296 pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock; in ext4_mb_new_group_pa()
5297 pa->pa_inode = NULL; in ext4_mb_new_group_pa()
5299 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); in ext4_mb_new_group_pa()
5302 * We will later add the new pa to the right bucket in ext4_mb_new_group_pa()
5318 * @pa must be unlinked from inode and group lists, so that
5325 struct ext4_prealloc_space *pa) in ext4_mb_release_inode_pa() argument
5336 BUG_ON(pa->pa_deleted == 0); in ext4_mb_release_inode_pa()
5337 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); in ext4_mb_release_inode_pa()
5338 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); in ext4_mb_release_inode_pa()
5339 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); in ext4_mb_release_inode_pa()
5340 end = bit + pa->pa_len; in ext4_mb_release_inode_pa()
5353 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + in ext4_mb_release_inode_pa()
5356 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); in ext4_mb_release_inode_pa()
5359 if (free != pa->pa_free) { in ext4_mb_release_inode_pa()
5361 "pa %p: logic %lu, phys. %lu, len %d", in ext4_mb_release_inode_pa()
5362 pa, (unsigned long) pa->pa_lstart, in ext4_mb_release_inode_pa()
5363 (unsigned long) pa->pa_pstart, in ext4_mb_release_inode_pa()
5364 pa->pa_len); in ext4_mb_release_inode_pa()
5366 free, pa->pa_free); in ext4_mb_release_inode_pa()
5368 * pa is already deleted so we use the value obtained in ext4_mb_release_inode_pa()
5377 struct ext4_prealloc_space *pa) in ext4_mb_release_group_pa() argument
5383 trace_ext4_mb_release_group_pa(sb, pa); in ext4_mb_release_group_pa()
5384 BUG_ON(pa->pa_deleted == 0); in ext4_mb_release_group_pa()
5385 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); in ext4_mb_release_group_pa()
5386 if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) { in ext4_mb_release_group_pa()
5388 e4b->bd_group, group, pa->pa_pstart); in ext4_mb_release_group_pa()
5391 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); in ext4_mb_release_group_pa()
5392 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); in ext4_mb_release_group_pa()
5393 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); in ext4_mb_release_group_pa()
5411 struct ext4_prealloc_space *pa, *tmp; in ext4_mb_discard_group_preallocations() local
5442 list_for_each_entry_safe(pa, tmp, in ext4_mb_discard_group_preallocations()
5444 spin_lock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
5445 if (atomic_read(&pa->pa_count)) { in ext4_mb_discard_group_preallocations()
5446 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
5450 if (pa->pa_deleted) { in ext4_mb_discard_group_preallocations()
5451 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
5456 ext4_mb_mark_pa_deleted(sb, pa); in ext4_mb_discard_group_preallocations()
5462 free += pa->pa_free; in ext4_mb_discard_group_preallocations()
5464 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
5466 list_del(&pa->pa_group_list); in ext4_mb_discard_group_preallocations()
5467 list_add(&pa->u.pa_tmp_list, &list); in ext4_mb_discard_group_preallocations()
5471 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { in ext4_mb_discard_group_preallocations()
5474 if (pa->pa_type == MB_GROUP_PA) { in ext4_mb_discard_group_preallocations()
5475 spin_lock(pa->pa_node_lock.lg_lock); in ext4_mb_discard_group_preallocations()
5476 list_del_rcu(&pa->pa_node.lg_list); in ext4_mb_discard_group_preallocations()
5477 spin_unlock(pa->pa_node_lock.lg_lock); in ext4_mb_discard_group_preallocations()
5479 write_lock(pa->pa_node_lock.inode_lock); in ext4_mb_discard_group_preallocations()
5480 ei = EXT4_I(pa->pa_inode); in ext4_mb_discard_group_preallocations()
5481 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); in ext4_mb_discard_group_preallocations()
5482 write_unlock(pa->pa_node_lock.inode_lock); in ext4_mb_discard_group_preallocations()
5485 list_del(&pa->u.pa_tmp_list); in ext4_mb_discard_group_preallocations()
5487 if (pa->pa_type == MB_GROUP_PA) { in ext4_mb_discard_group_preallocations()
5488 ext4_mb_release_group_pa(&e4b, pa); in ext4_mb_discard_group_preallocations()
5489 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_discard_group_preallocations()
5491 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); in ext4_mb_discard_group_preallocations()
5492 ext4_mb_pa_free(pa); in ext4_mb_discard_group_preallocations()
5519 struct ext4_prealloc_space *pa, *tmp; in ext4_discard_preallocations() local
5538 /* first, collect all pa's in the inode */ in ext4_discard_preallocations()
5542 pa = rb_entry(iter, struct ext4_prealloc_space, in ext4_discard_preallocations()
5544 BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock); in ext4_discard_preallocations()
5546 spin_lock(&pa->pa_lock); in ext4_discard_preallocations()
5547 if (atomic_read(&pa->pa_count)) { in ext4_discard_preallocations()
5550 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
5553 "uh-oh! used pa while discarding"); in ext4_discard_preallocations()
5559 if (pa->pa_deleted == 0) { in ext4_discard_preallocations()
5560 ext4_mb_mark_pa_deleted(sb, pa); in ext4_discard_preallocations()
5561 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
5562 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); in ext4_discard_preallocations()
5563 list_add(&pa->u.pa_tmp_list, &list); in ext4_discard_preallocations()
5567 /* someone is deleting pa right now */ in ext4_discard_preallocations()
5568 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
5572 * doesn't mean pa is already unlinked from in ext4_discard_preallocations()
5576 * pa from inode's list may access already in ext4_discard_preallocations()
5588 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { in ext4_discard_preallocations()
5589 BUG_ON(pa->pa_type != MB_INODE_PA); in ext4_discard_preallocations()
5590 group = ext4_get_group_number(sb, pa->pa_pstart); in ext4_discard_preallocations()
5610 list_del(&pa->pa_group_list); in ext4_discard_preallocations()
5611 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); in ext4_discard_preallocations()
5617 list_del(&pa->u.pa_tmp_list); in ext4_discard_preallocations()
5618 ext4_mb_pa_free(pa); in ext4_discard_preallocations()
5624 struct ext4_prealloc_space *pa; in ext4_mb_pa_alloc() local
5627 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); in ext4_mb_pa_alloc()
5628 if (!pa) in ext4_mb_pa_alloc()
5630 atomic_set(&pa->pa_count, 1); in ext4_mb_pa_alloc()
5631 ac->ac_pa = pa; in ext4_mb_pa_alloc()
5637 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_pa_put_free() local
5639 BUG_ON(!pa); in ext4_mb_pa_put_free()
5641 WARN_ON(!atomic_dec_and_test(&pa->pa_count)); in ext4_mb_pa_put_free()
5644 * len of found blocks < len of requested blocks hence the PA has not in ext4_mb_pa_put_free()
5647 pa->pa_deleted = 1; in ext4_mb_pa_put_free()
5648 ext4_mb_pa_free(pa); in ext4_mb_pa_put_free()
5663 struct ext4_prealloc_space *pa; in ext4_mb_show_pa() local
5671 pa = list_entry(cur, struct ext4_prealloc_space, in ext4_mb_show_pa()
5673 spin_lock(&pa->pa_lock); in ext4_mb_show_pa()
5674 ext4_get_group_no_and_offset(sb, pa->pa_pstart, in ext4_mb_show_pa()
5676 spin_unlock(&pa->pa_lock); in ext4_mb_show_pa()
5677 mb_debug(sb, "PA:%u:%d:%d\n", i, start, in ext4_mb_show_pa()
5678 pa->pa_len); in ext4_mb_show_pa()
5714 mb_debug(sb, "used pa: %s, ", str_yes_no(ac->ac_pa)); in ext4_mb_show_ac()
5717 "group pa" : "inode pa"); in ext4_mb_show_ac()
5849 struct ext4_prealloc_space *pa, *tmp; in ext4_mb_discard_lg_preallocations() local
5854 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], in ext4_mb_discard_lg_preallocations()
5857 spin_lock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
5858 if (atomic_read(&pa->pa_count)) { in ext4_mb_discard_lg_preallocations()
5860 * This is the pa that we just used in ext4_mb_discard_lg_preallocations()
5864 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
5867 if (pa->pa_deleted) { in ext4_mb_discard_lg_preallocations()
5868 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
5872 BUG_ON(pa->pa_type != MB_GROUP_PA); in ext4_mb_discard_lg_preallocations()
5875 ext4_mb_mark_pa_deleted(sb, pa); in ext4_mb_discard_lg_preallocations()
5876 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
5878 list_del_rcu(&pa->pa_node.lg_list); in ext4_mb_discard_lg_preallocations()
5879 list_add(&pa->u.pa_tmp_list, &discard_list); in ext4_mb_discard_lg_preallocations()
5894 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { in ext4_mb_discard_lg_preallocations()
5897 group = ext4_get_group_number(sb, pa->pa_pstart); in ext4_mb_discard_lg_preallocations()
5906 list_del(&pa->pa_group_list); in ext4_mb_discard_lg_preallocations()
5907 ext4_mb_release_group_pa(&e4b, pa); in ext4_mb_discard_lg_preallocations()
5911 list_del(&pa->u.pa_tmp_list); in ext4_mb_discard_lg_preallocations()
5912 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_discard_lg_preallocations()
5930 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; in ext4_mb_add_n_trim() local
5932 order = fls(pa->pa_free) - 1; in ext4_mb_add_n_trim()
5946 if (!added && pa->pa_free < tmp_pa->pa_free) { in ext4_mb_add_n_trim()
5948 list_add_tail_rcu(&pa->pa_node.lg_list, in ext4_mb_add_n_trim()
5960 list_add_tail_rcu(&pa->pa_node.lg_list, in ext4_mb_add_n_trim()
5976 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_release_context() local
5977 if (pa) { in ext4_mb_release_context()
5978 if (pa->pa_type == MB_GROUP_PA) { in ext4_mb_release_context()
5980 spin_lock(&pa->pa_lock); in ext4_mb_release_context()
5981 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
5982 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
5983 pa->pa_free -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
5984 pa->pa_len -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
5985 spin_unlock(&pa->pa_lock); in ext4_mb_release_context()
5988 * We want to add the pa to the right bucket. in ext4_mb_release_context()
5993 if (likely(pa->pa_free)) { in ext4_mb_release_context()
5994 spin_lock(pa->pa_node_lock.lg_lock); in ext4_mb_release_context()
5995 list_del_rcu(&pa->pa_node.lg_list); in ext4_mb_release_context()
5996 spin_unlock(pa->pa_node_lock.lg_lock); in ext4_mb_release_context()
6001 ext4_mb_put_pa(ac, ac->ac_sb, pa); in ext4_mb_release_context()
6219 * pa allocated above is added to grp->bb_prealloc_list only in ext4_mb_new_blocks()
6223 * So we have to free this pa here itself. in ext4_mb_new_blocks()
6248 * If block allocation fails then the pa allocated above in ext4_mb_new_blocks()