xref: /linux/fs/btrfs/space-info.c (revision 3e7819886281e077e82006fe4804b0d6b0f5643b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "misc.h"
4 #include "ctree.h"
5 #include "space-info.h"
6 #include "sysfs.h"
7 #include "volumes.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
10 #include "transaction.h"
11 #include "block-group.h"
12 #include "fs.h"
13 #include "accessors.h"
14 #include "extent-tree.h"
15 
16 /*
17  * HOW DOES SPACE RESERVATION WORK
18  *
19  * If you want to know about delalloc specifically, there is a separate comment
20  * for that with the delalloc code.  This comment is about how the whole system
21  * works generally.
22  *
23  * BASIC CONCEPTS
24  *
25  *   1) space_info.  This is the ultimate arbiter of how much space we can use.
26  *   There's a description of the bytes_ fields with the struct declaration,
27  *   refer to that for specifics on each field.  Suffice it to say that for
28  *   reservations we care about total_bytes - SUM(space_info->bytes_) when
29  *   determining if there is space to make an allocation.  There is a space_info
30  *   for METADATA, SYSTEM, and DATA areas.
31  *
32  *   2) block_rsv's.  These are basically buckets for every different type of
33  *   metadata reservation we have.  You can see the comment in the block_rsv
34  *   code on the rules for each type, but generally block_rsv->reserved is how
35  *   much space is accounted for in space_info->bytes_may_use.
36  *
37  *   3) btrfs_calc*_size.  These are the worst case calculations we used based
38  *   on the number of items we will want to modify.  We have one for changing
39  *   items, and one for inserting new items.  Generally we use these helpers to
40  *   determine the size of the block reserves, and then use the actual bytes
41  *   values to adjust the space_info counters.
42  *
43  * MAKING RESERVATIONS, THE NORMAL CASE
44  *
45  *   We call into either btrfs_reserve_data_bytes() or
46  *   btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
47  *   num_bytes we want to reserve.
48  *
49  *   ->reserve
50  *     space_info->bytes_may_reserve += num_bytes
51  *
52  *   ->extent allocation
53  *     Call btrfs_add_reserved_bytes() which does
54  *     space_info->bytes_may_reserve -= num_bytes
55  *     space_info->bytes_reserved += extent_bytes
56  *
57  *   ->insert reference
58  *     Call btrfs_update_block_group() which does
59  *     space_info->bytes_reserved -= extent_bytes
60  *     space_info->bytes_used += extent_bytes
61  *
62  * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
63  *
64  *   Assume we are unable to simply make the reservation because we do not have
65  *   enough space
66  *
67  *   -> __reserve_bytes
68  *     create a reserve_ticket with ->bytes set to our reservation, add it to
69  *     the tail of space_info->tickets, kick async flush thread
70  *
71  *   ->handle_reserve_ticket
72  *     wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
73  *     on the ticket.
74  *
75  *   -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
76  *     Flushes various things attempting to free up space.
77  *
78  *   -> btrfs_try_granting_tickets()
79  *     This is called by anything that either subtracts space from
80  *     space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
81  *     space_info->total_bytes.  This loops through the ->priority_tickets and
82  *     then the ->tickets list checking to see if the reservation can be
83  *     completed.  If it can the space is added to space_info->bytes_may_use and
84  *     the ticket is woken up.
85  *
86  *   -> ticket wakeup
87  *     Check if ->bytes == 0, if it does we got our reservation and we can carry
88  *     on, if not return the appropriate error (ENOSPC, but can be EINTR if we
89  *     were interrupted.)
90  *
91  * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
92  *
93  *   Same as the above, except we add ourselves to the
94  *   space_info->priority_tickets, and we do not use ticket->wait, we simply
95  *   call flush_space() ourselves for the states that are safe for us to call
96  *   without deadlocking and hope for the best.
97  *
98  * THE FLUSHING STATES
99  *
100  *   Generally speaking we will have two cases for each state, a "nice" state
101  *   and a "ALL THE THINGS" state.  In btrfs we delay a lot of work in order to
102  *   reduce the locking over head on the various trees, and even to keep from
103  *   doing any work at all in the case of delayed refs.  Each of these delayed
104  *   things however hold reservations, and so letting them run allows us to
105  *   reclaim space so we can make new reservations.
106  *
107  *   FLUSH_DELAYED_ITEMS
108  *     Every inode has a delayed item to update the inode.  Take a simple write
109  *     for example, we would update the inode item at write time to update the
110  *     mtime, and then again at finish_ordered_io() time in order to update the
111  *     isize or bytes.  We keep these delayed items to coalesce these operations
112  *     into a single operation done on demand.  These are an easy way to reclaim
113  *     metadata space.
114  *
115  *   FLUSH_DELALLOC
116  *     Look at the delalloc comment to get an idea of how much space is reserved
117  *     for delayed allocation.  We can reclaim some of this space simply by
118  *     running delalloc, but usually we need to wait for ordered extents to
119  *     reclaim the bulk of this space.
120  *
121  *   FLUSH_DELAYED_REFS
122  *     We have a block reserve for the outstanding delayed refs space, and every
123  *     delayed ref operation holds a reservation.  Running these is a quick way
124  *     to reclaim space, but we want to hold this until the end because COW can
125  *     churn a lot and we can avoid making some extent tree modifications if we
126  *     are able to delay for as long as possible.
127  *
128  *   ALLOC_CHUNK
129  *     We will skip this the first time through space reservation, because of
130  *     overcommit and we don't want to have a lot of useless metadata space when
131  *     our worst case reservations will likely never come true.
132  *
133  *   RUN_DELAYED_IPUTS
134  *     If we're freeing inodes we're likely freeing checksums, file extent
135  *     items, and extent tree items.  Loads of space could be freed up by these
136  *     operations, however they won't be usable until the transaction commits.
137  *
138  *   COMMIT_TRANS
139  *     This will commit the transaction.  Historically we had a lot of logic
140  *     surrounding whether or not we'd commit the transaction, but this waits born
141  *     out of a pre-tickets era where we could end up committing the transaction
142  *     thousands of times in a row without making progress.  Now thanks to our
143  *     ticketing system we know if we're not making progress and can error
144  *     everybody out after a few commits rather than burning the disk hoping for
145  *     a different answer.
146  *
147  * OVERCOMMIT
148  *
149  *   Because we hold so many reservations for metadata we will allow you to
150  *   reserve more space than is currently free in the currently allocate
151  *   metadata space.  This only happens with metadata, data does not allow
152  *   overcommitting.
153  *
154  *   You can see the current logic for when we allow overcommit in
155  *   btrfs_can_overcommit(), but it only applies to unallocated space.  If there
156  *   is no unallocated space to be had, all reservations are kept within the
157  *   free space in the allocated metadata chunks.
158  *
159  *   Because of overcommitting, you generally want to use the
160  *   btrfs_can_overcommit() logic for metadata allocations, as it does the right
161  *   thing with or without extra unallocated space.
162  */
163 
164 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
165 			  bool may_use_included)
166 {
167 	ASSERT(s_info);
168 	return s_info->bytes_used + s_info->bytes_reserved +
169 		s_info->bytes_pinned + s_info->bytes_readonly +
170 		s_info->bytes_zone_unusable +
171 		(may_use_included ? s_info->bytes_may_use : 0);
172 }
173 
174 /*
175  * after adding space to the filesystem, we need to clear the full flags
176  * on all the space infos.
177  */
178 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
179 {
180 	struct list_head *head = &info->space_info;
181 	struct btrfs_space_info *found;
182 
183 	list_for_each_entry(found, head, list)
184 		found->full = 0;
185 }
186 
187 /*
188  * Block groups with more than this value (percents) of unusable space will be
189  * scheduled for background reclaim.
190  */
191 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH			(75)
192 
193 /*
194  * Calculate chunk size depending on volume type (regular or zoned).
195  */
196 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
197 {
198 	if (btrfs_is_zoned(fs_info))
199 		return fs_info->zone_size;
200 
201 	ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
202 
203 	if (flags & BTRFS_BLOCK_GROUP_DATA)
204 		return BTRFS_MAX_DATA_CHUNK_SIZE;
205 	else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
206 		return SZ_32M;
207 
208 	/* Handle BTRFS_BLOCK_GROUP_METADATA */
209 	if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
210 		return SZ_1G;
211 
212 	return SZ_256M;
213 }
214 
215 /*
216  * Update default chunk size.
217  */
218 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
219 					u64 chunk_size)
220 {
221 	WRITE_ONCE(space_info->chunk_size, chunk_size);
222 }
223 
224 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
225 {
226 
227 	struct btrfs_space_info *space_info;
228 	int i;
229 	int ret;
230 
231 	space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
232 	if (!space_info)
233 		return -ENOMEM;
234 
235 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
236 		INIT_LIST_HEAD(&space_info->block_groups[i]);
237 	init_rwsem(&space_info->groups_sem);
238 	spin_lock_init(&space_info->lock);
239 	space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
240 	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
241 	INIT_LIST_HEAD(&space_info->ro_bgs);
242 	INIT_LIST_HEAD(&space_info->tickets);
243 	INIT_LIST_HEAD(&space_info->priority_tickets);
244 	space_info->clamp = 1;
245 	btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
246 
247 	if (btrfs_is_zoned(info))
248 		space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
249 
250 	ret = btrfs_sysfs_add_space_info_type(info, space_info);
251 	if (ret)
252 		return ret;
253 
254 	list_add(&space_info->list, &info->space_info);
255 	if (flags & BTRFS_BLOCK_GROUP_DATA)
256 		info->data_sinfo = space_info;
257 
258 	return ret;
259 }
260 
261 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
262 {
263 	struct btrfs_super_block *disk_super;
264 	u64 features;
265 	u64 flags;
266 	int mixed = 0;
267 	int ret;
268 
269 	disk_super = fs_info->super_copy;
270 	if (!btrfs_super_root(disk_super))
271 		return -EINVAL;
272 
273 	features = btrfs_super_incompat_flags(disk_super);
274 	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
275 		mixed = 1;
276 
277 	flags = BTRFS_BLOCK_GROUP_SYSTEM;
278 	ret = create_space_info(fs_info, flags);
279 	if (ret)
280 		goto out;
281 
282 	if (mixed) {
283 		flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
284 		ret = create_space_info(fs_info, flags);
285 	} else {
286 		flags = BTRFS_BLOCK_GROUP_METADATA;
287 		ret = create_space_info(fs_info, flags);
288 		if (ret)
289 			goto out;
290 
291 		flags = BTRFS_BLOCK_GROUP_DATA;
292 		ret = create_space_info(fs_info, flags);
293 	}
294 out:
295 	return ret;
296 }
297 
298 void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
299 				struct btrfs_block_group *block_group)
300 {
301 	struct btrfs_space_info *found;
302 	int factor, index;
303 
304 	factor = btrfs_bg_type_to_factor(block_group->flags);
305 
306 	found = btrfs_find_space_info(info, block_group->flags);
307 	ASSERT(found);
308 	spin_lock(&found->lock);
309 	found->total_bytes += block_group->length;
310 	found->disk_total += block_group->length * factor;
311 	found->bytes_used += block_group->used;
312 	found->disk_used += block_group->used * factor;
313 	found->bytes_readonly += block_group->bytes_super;
314 	found->bytes_zone_unusable += block_group->zone_unusable;
315 	if (block_group->length > 0)
316 		found->full = 0;
317 	btrfs_try_granting_tickets(info, found);
318 	spin_unlock(&found->lock);
319 
320 	block_group->space_info = found;
321 
322 	index = btrfs_bg_flags_to_raid_index(block_group->flags);
323 	down_write(&found->groups_sem);
324 	list_add_tail(&block_group->list, &found->block_groups[index]);
325 	up_write(&found->groups_sem);
326 }
327 
328 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
329 					       u64 flags)
330 {
331 	struct list_head *head = &info->space_info;
332 	struct btrfs_space_info *found;
333 
334 	flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
335 
336 	list_for_each_entry(found, head, list) {
337 		if (found->flags & flags)
338 			return found;
339 	}
340 	return NULL;
341 }
342 
343 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
344 			  struct btrfs_space_info *space_info,
345 			  enum btrfs_reserve_flush_enum flush)
346 {
347 	struct btrfs_space_info *data_sinfo;
348 	u64 profile;
349 	u64 avail;
350 	u64 data_chunk_size;
351 	int factor;
352 
353 	if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
354 		profile = btrfs_system_alloc_profile(fs_info);
355 	else
356 		profile = btrfs_metadata_alloc_profile(fs_info);
357 
358 	avail = atomic64_read(&fs_info->free_chunk_space);
359 
360 	/*
361 	 * If we have dup, raid1 or raid10 then only half of the free
362 	 * space is actually usable.  For raid56, the space info used
363 	 * doesn't include the parity drive, so we don't have to
364 	 * change the math
365 	 */
366 	factor = btrfs_bg_type_to_factor(profile);
367 	avail = div_u64(avail, factor);
368 	if (avail == 0)
369 		return 0;
370 
371 	/*
372 	 * Calculate the data_chunk_size, space_info->chunk_size is the
373 	 * "optimal" chunk size based on the fs size.  However when we actually
374 	 * allocate the chunk we will strip this down further, making it no more
375 	 * than 10% of the disk or 1G, whichever is smaller.
376 	 *
377 	 * On the zoned mode, we need to use zone_size (=
378 	 * data_sinfo->chunk_size) as it is.
379 	 */
380 	data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
381 	if (!btrfs_is_zoned(fs_info)) {
382 		data_chunk_size = min(data_sinfo->chunk_size,
383 				      mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
384 		data_chunk_size = min_t(u64, data_chunk_size, SZ_1G);
385 	} else {
386 		data_chunk_size = data_sinfo->chunk_size;
387 	}
388 
389 	/*
390 	 * Since data allocations immediately use block groups as part of the
391 	 * reservation, because we assume that data reservations will == actual
392 	 * usage, we could potentially overcommit and then immediately have that
393 	 * available space used by a data allocation, which could put us in a
394 	 * bind when we get close to filling the file system.
395 	 *
396 	 * To handle this simply remove the data_chunk_size from the available
397 	 * space.  If we are relatively empty this won't affect our ability to
398 	 * overcommit much, and if we're very close to full it'll keep us from
399 	 * getting into a position where we've given ourselves very little
400 	 * metadata wiggle room.
401 	 */
402 	if (avail <= data_chunk_size)
403 		return 0;
404 	avail -= data_chunk_size;
405 
406 	/*
407 	 * If we aren't flushing all things, let us overcommit up to
408 	 * 1/2th of the space. If we can flush, don't let us overcommit
409 	 * too much, let it overcommit up to 1/8 of the space.
410 	 */
411 	if (flush == BTRFS_RESERVE_FLUSH_ALL)
412 		avail >>= 3;
413 	else
414 		avail >>= 1;
415 
416 	/*
417 	 * On the zoned mode, we always allocate one zone as one chunk.
418 	 * Returning non-zone size alingned bytes here will result in
419 	 * less pressure for the async metadata reclaim process, and it
420 	 * will over-commit too much leading to ENOSPC. Align down to the
421 	 * zone size to avoid that.
422 	 */
423 	if (btrfs_is_zoned(fs_info))
424 		avail = ALIGN_DOWN(avail, fs_info->zone_size);
425 
426 	return avail;
427 }
428 
429 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
430 			 struct btrfs_space_info *space_info, u64 bytes,
431 			 enum btrfs_reserve_flush_enum flush)
432 {
433 	u64 avail;
434 	u64 used;
435 
436 	/* Don't overcommit when in mixed mode */
437 	if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
438 		return 0;
439 
440 	used = btrfs_space_info_used(space_info, true);
441 	avail = calc_available_free_space(fs_info, space_info, flush);
442 
443 	if (used + bytes < space_info->total_bytes + avail)
444 		return 1;
445 	return 0;
446 }
447 
448 static void remove_ticket(struct btrfs_space_info *space_info,
449 			  struct reserve_ticket *ticket)
450 {
451 	if (!list_empty(&ticket->list)) {
452 		list_del_init(&ticket->list);
453 		ASSERT(space_info->reclaim_size >= ticket->bytes);
454 		space_info->reclaim_size -= ticket->bytes;
455 	}
456 }
457 
458 /*
459  * This is for space we already have accounted in space_info->bytes_may_use, so
460  * basically when we're returning space from block_rsv's.
461  */
462 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
463 				struct btrfs_space_info *space_info)
464 {
465 	struct list_head *head;
466 	enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
467 
468 	lockdep_assert_held(&space_info->lock);
469 
470 	head = &space_info->priority_tickets;
471 again:
472 	while (!list_empty(head)) {
473 		struct reserve_ticket *ticket;
474 		u64 used = btrfs_space_info_used(space_info, true);
475 
476 		ticket = list_first_entry(head, struct reserve_ticket, list);
477 
478 		/* Check and see if our ticket can be satisfied now. */
479 		if ((used + ticket->bytes <= space_info->total_bytes) ||
480 		    btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
481 					 flush)) {
482 			btrfs_space_info_update_bytes_may_use(fs_info,
483 							      space_info,
484 							      ticket->bytes);
485 			remove_ticket(space_info, ticket);
486 			ticket->bytes = 0;
487 			space_info->tickets_id++;
488 			wake_up(&ticket->wait);
489 		} else {
490 			break;
491 		}
492 	}
493 
494 	if (head == &space_info->priority_tickets) {
495 		head = &space_info->tickets;
496 		flush = BTRFS_RESERVE_FLUSH_ALL;
497 		goto again;
498 	}
499 }
500 
501 #define DUMP_BLOCK_RSV(fs_info, rsv_name)				\
502 do {									\
503 	struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name;		\
504 	spin_lock(&__rsv->lock);					\
505 	btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu",	\
506 		   __rsv->size, __rsv->reserved);			\
507 	spin_unlock(&__rsv->lock);					\
508 } while (0)
509 
510 static const char *space_info_flag_to_str(const struct btrfs_space_info *space_info)
511 {
512 	switch (space_info->flags) {
513 	case BTRFS_BLOCK_GROUP_SYSTEM:
514 		return "SYSTEM";
515 	case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA:
516 		return "DATA+METADATA";
517 	case BTRFS_BLOCK_GROUP_DATA:
518 		return "DATA";
519 	case BTRFS_BLOCK_GROUP_METADATA:
520 		return "METADATA";
521 	default:
522 		return "UNKNOWN";
523 	}
524 }
525 
526 static void dump_global_block_rsv(struct btrfs_fs_info *fs_info)
527 {
528 	DUMP_BLOCK_RSV(fs_info, global_block_rsv);
529 	DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
530 	DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
531 	DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
532 	DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
533 }
534 
535 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
536 				    struct btrfs_space_info *info)
537 {
538 	const char *flag_str = space_info_flag_to_str(info);
539 	lockdep_assert_held(&info->lock);
540 
541 	/* The free space could be negative in case of overcommit */
542 	btrfs_info(fs_info, "space_info %s has %lld free, is %sfull",
543 		   flag_str,
544 		   (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
545 		   info->full ? "" : "not ");
546 	btrfs_info(fs_info,
547 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
548 		info->total_bytes, info->bytes_used, info->bytes_pinned,
549 		info->bytes_reserved, info->bytes_may_use,
550 		info->bytes_readonly, info->bytes_zone_unusable);
551 }
552 
553 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
554 			   struct btrfs_space_info *info, u64 bytes,
555 			   int dump_block_groups)
556 {
557 	struct btrfs_block_group *cache;
558 	u64 total_avail = 0;
559 	int index = 0;
560 
561 	spin_lock(&info->lock);
562 	__btrfs_dump_space_info(fs_info, info);
563 	dump_global_block_rsv(fs_info);
564 	spin_unlock(&info->lock);
565 
566 	if (!dump_block_groups)
567 		return;
568 
569 	down_read(&info->groups_sem);
570 again:
571 	list_for_each_entry(cache, &info->block_groups[index], list) {
572 		u64 avail;
573 
574 		spin_lock(&cache->lock);
575 		avail = cache->length - cache->used - cache->pinned -
576 			cache->reserved - cache->delalloc_bytes -
577 			cache->bytes_super - cache->zone_unusable;
578 		btrfs_info(fs_info,
579 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
580 			   cache->start, cache->length, cache->used, cache->pinned,
581 			   cache->reserved, cache->delalloc_bytes,
582 			   cache->bytes_super, cache->zone_unusable,
583 			   avail, cache->ro ? "[readonly]" : "");
584 		spin_unlock(&cache->lock);
585 		btrfs_dump_free_space(cache, bytes);
586 		total_avail += avail;
587 	}
588 	if (++index < BTRFS_NR_RAID_TYPES)
589 		goto again;
590 	up_read(&info->groups_sem);
591 
592 	btrfs_info(fs_info, "%llu bytes available across all block groups", total_avail);
593 }
594 
595 static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info,
596 					u64 to_reclaim)
597 {
598 	u64 bytes;
599 	u64 nr;
600 
601 	bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
602 	nr = div64_u64(to_reclaim, bytes);
603 	if (!nr)
604 		nr = 1;
605 	return nr;
606 }
607 
608 #define EXTENT_SIZE_PER_ITEM	SZ_256K
609 
610 /*
611  * shrink metadata reservation for delalloc
612  */
613 static void shrink_delalloc(struct btrfs_fs_info *fs_info,
614 			    struct btrfs_space_info *space_info,
615 			    u64 to_reclaim, bool wait_ordered,
616 			    bool for_preempt)
617 {
618 	struct btrfs_trans_handle *trans;
619 	u64 delalloc_bytes;
620 	u64 ordered_bytes;
621 	u64 items;
622 	long time_left;
623 	int loops;
624 
625 	delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
626 	ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
627 	if (delalloc_bytes == 0 && ordered_bytes == 0)
628 		return;
629 
630 	/* Calc the number of the pages we need flush for space reservation */
631 	if (to_reclaim == U64_MAX) {
632 		items = U64_MAX;
633 	} else {
634 		/*
635 		 * to_reclaim is set to however much metadata we need to
636 		 * reclaim, but reclaiming that much data doesn't really track
637 		 * exactly.  What we really want to do is reclaim full inode's
638 		 * worth of reservations, however that's not available to us
639 		 * here.  We will take a fraction of the delalloc bytes for our
640 		 * flushing loops and hope for the best.  Delalloc will expand
641 		 * the amount we write to cover an entire dirty extent, which
642 		 * will reclaim the metadata reservation for that range.  If
643 		 * it's not enough subsequent flush stages will be more
644 		 * aggressive.
645 		 */
646 		to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
647 		items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
648 	}
649 
650 	trans = current->journal_info;
651 
652 	/*
653 	 * If we are doing more ordered than delalloc we need to just wait on
654 	 * ordered extents, otherwise we'll waste time trying to flush delalloc
655 	 * that likely won't give us the space back we need.
656 	 */
657 	if (ordered_bytes > delalloc_bytes && !for_preempt)
658 		wait_ordered = true;
659 
660 	loops = 0;
661 	while ((delalloc_bytes || ordered_bytes) && loops < 3) {
662 		u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
663 		long nr_pages = min_t(u64, temp, LONG_MAX);
664 		int async_pages;
665 
666 		btrfs_start_delalloc_roots(fs_info, nr_pages, true);
667 
668 		/*
669 		 * We need to make sure any outstanding async pages are now
670 		 * processed before we continue.  This is because things like
671 		 * sync_inode() try to be smart and skip writing if the inode is
672 		 * marked clean.  We don't use filemap_fwrite for flushing
673 		 * because we want to control how many pages we write out at a
674 		 * time, thus this is the only safe way to make sure we've
675 		 * waited for outstanding compressed workers to have started
676 		 * their jobs and thus have ordered extents set up properly.
677 		 *
678 		 * This exists because we do not want to wait for each
679 		 * individual inode to finish its async work, we simply want to
680 		 * start the IO on everybody, and then come back here and wait
681 		 * for all of the async work to catch up.  Once we're done with
682 		 * that we know we'll have ordered extents for everything and we
683 		 * can decide if we wait for that or not.
684 		 *
685 		 * If we choose to replace this in the future, make absolutely
686 		 * sure that the proper waiting is being done in the async case,
687 		 * as there have been bugs in that area before.
688 		 */
689 		async_pages = atomic_read(&fs_info->async_delalloc_pages);
690 		if (!async_pages)
691 			goto skip_async;
692 
693 		/*
694 		 * We don't want to wait forever, if we wrote less pages in this
695 		 * loop than we have outstanding, only wait for that number of
696 		 * pages, otherwise we can wait for all async pages to finish
697 		 * before continuing.
698 		 */
699 		if (async_pages > nr_pages)
700 			async_pages -= nr_pages;
701 		else
702 			async_pages = 0;
703 		wait_event(fs_info->async_submit_wait,
704 			   atomic_read(&fs_info->async_delalloc_pages) <=
705 			   async_pages);
706 skip_async:
707 		loops++;
708 		if (wait_ordered && !trans) {
709 			btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
710 		} else {
711 			time_left = schedule_timeout_killable(1);
712 			if (time_left)
713 				break;
714 		}
715 
716 		/*
717 		 * If we are for preemption we just want a one-shot of delalloc
718 		 * flushing so we can stop flushing if we decide we don't need
719 		 * to anymore.
720 		 */
721 		if (for_preempt)
722 			break;
723 
724 		spin_lock(&space_info->lock);
725 		if (list_empty(&space_info->tickets) &&
726 		    list_empty(&space_info->priority_tickets)) {
727 			spin_unlock(&space_info->lock);
728 			break;
729 		}
730 		spin_unlock(&space_info->lock);
731 
732 		delalloc_bytes = percpu_counter_sum_positive(
733 						&fs_info->delalloc_bytes);
734 		ordered_bytes = percpu_counter_sum_positive(
735 						&fs_info->ordered_bytes);
736 	}
737 }
738 
739 /*
740  * Try to flush some data based on policy set by @state. This is only advisory
741  * and may fail for various reasons. The caller is supposed to examine the
742  * state of @space_info to detect the outcome.
743  */
744 static void flush_space(struct btrfs_fs_info *fs_info,
745 		       struct btrfs_space_info *space_info, u64 num_bytes,
746 		       enum btrfs_flush_state state, bool for_preempt)
747 {
748 	struct btrfs_root *root = fs_info->tree_root;
749 	struct btrfs_trans_handle *trans;
750 	int nr;
751 	int ret = 0;
752 
753 	switch (state) {
754 	case FLUSH_DELAYED_ITEMS_NR:
755 	case FLUSH_DELAYED_ITEMS:
756 		if (state == FLUSH_DELAYED_ITEMS_NR)
757 			nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
758 		else
759 			nr = -1;
760 
761 		trans = btrfs_join_transaction_nostart(root);
762 		if (IS_ERR(trans)) {
763 			ret = PTR_ERR(trans);
764 			if (ret == -ENOENT)
765 				ret = 0;
766 			break;
767 		}
768 		ret = btrfs_run_delayed_items_nr(trans, nr);
769 		btrfs_end_transaction(trans);
770 		break;
771 	case FLUSH_DELALLOC:
772 	case FLUSH_DELALLOC_WAIT:
773 	case FLUSH_DELALLOC_FULL:
774 		if (state == FLUSH_DELALLOC_FULL)
775 			num_bytes = U64_MAX;
776 		shrink_delalloc(fs_info, space_info, num_bytes,
777 				state != FLUSH_DELALLOC, for_preempt);
778 		break;
779 	case FLUSH_DELAYED_REFS_NR:
780 	case FLUSH_DELAYED_REFS:
781 		trans = btrfs_join_transaction_nostart(root);
782 		if (IS_ERR(trans)) {
783 			ret = PTR_ERR(trans);
784 			if (ret == -ENOENT)
785 				ret = 0;
786 			break;
787 		}
788 		if (state == FLUSH_DELAYED_REFS_NR)
789 			btrfs_run_delayed_refs(trans, num_bytes);
790 		else
791 			btrfs_run_delayed_refs(trans, 0);
792 		btrfs_end_transaction(trans);
793 		break;
794 	case ALLOC_CHUNK:
795 	case ALLOC_CHUNK_FORCE:
796 		trans = btrfs_join_transaction(root);
797 		if (IS_ERR(trans)) {
798 			ret = PTR_ERR(trans);
799 			break;
800 		}
801 		ret = btrfs_chunk_alloc(trans,
802 				btrfs_get_alloc_profile(fs_info, space_info->flags),
803 				(state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
804 					CHUNK_ALLOC_FORCE);
805 		btrfs_end_transaction(trans);
806 
807 		if (ret > 0 || ret == -ENOSPC)
808 			ret = 0;
809 		break;
810 	case RUN_DELAYED_IPUTS:
811 		/*
812 		 * If we have pending delayed iputs then we could free up a
813 		 * bunch of pinned space, so make sure we run the iputs before
814 		 * we do our pinned bytes check below.
815 		 */
816 		btrfs_run_delayed_iputs(fs_info);
817 		btrfs_wait_on_delayed_iputs(fs_info);
818 		break;
819 	case COMMIT_TRANS:
820 		ASSERT(current->journal_info == NULL);
821 		/*
822 		 * We don't want to start a new transaction, just attach to the
823 		 * current one or wait it fully commits in case its commit is
824 		 * happening at the moment. Note: we don't use a nostart join
825 		 * because that does not wait for a transaction to fully commit
826 		 * (only for it to be unblocked, state TRANS_STATE_UNBLOCKED).
827 		 */
828 		trans = btrfs_attach_transaction_barrier(root);
829 		if (IS_ERR(trans)) {
830 			ret = PTR_ERR(trans);
831 			if (ret == -ENOENT)
832 				ret = 0;
833 			break;
834 		}
835 		ret = btrfs_commit_transaction(trans);
836 		break;
837 	default:
838 		ret = -ENOSPC;
839 		break;
840 	}
841 
842 	trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
843 				ret, for_preempt);
844 	return;
845 }
846 
847 static inline u64
848 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
849 				 struct btrfs_space_info *space_info)
850 {
851 	u64 used;
852 	u64 avail;
853 	u64 to_reclaim = space_info->reclaim_size;
854 
855 	lockdep_assert_held(&space_info->lock);
856 
857 	avail = calc_available_free_space(fs_info, space_info,
858 					  BTRFS_RESERVE_FLUSH_ALL);
859 	used = btrfs_space_info_used(space_info, true);
860 
861 	/*
862 	 * We may be flushing because suddenly we have less space than we had
863 	 * before, and now we're well over-committed based on our current free
864 	 * space.  If that's the case add in our overage so we make sure to put
865 	 * appropriate pressure on the flushing state machine.
866 	 */
867 	if (space_info->total_bytes + avail < used)
868 		to_reclaim += used - (space_info->total_bytes + avail);
869 
870 	return to_reclaim;
871 }
872 
873 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
874 				    struct btrfs_space_info *space_info)
875 {
876 	const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
877 	u64 ordered, delalloc;
878 	u64 thresh;
879 	u64 used;
880 
881 	thresh = mult_perc(space_info->total_bytes, 90);
882 
883 	lockdep_assert_held(&space_info->lock);
884 
885 	/* If we're just plain full then async reclaim just slows us down. */
886 	if ((space_info->bytes_used + space_info->bytes_reserved +
887 	     global_rsv_size) >= thresh)
888 		return false;
889 
890 	used = space_info->bytes_may_use + space_info->bytes_pinned;
891 
892 	/* The total flushable belongs to the global rsv, don't flush. */
893 	if (global_rsv_size >= used)
894 		return false;
895 
896 	/*
897 	 * 128MiB is 1/4 of the maximum global rsv size.  If we have less than
898 	 * that devoted to other reservations then there's no sense in flushing,
899 	 * we don't have a lot of things that need flushing.
900 	 */
901 	if (used - global_rsv_size <= SZ_128M)
902 		return false;
903 
904 	/*
905 	 * We have tickets queued, bail so we don't compete with the async
906 	 * flushers.
907 	 */
908 	if (space_info->reclaim_size)
909 		return false;
910 
911 	/*
912 	 * If we have over half of the free space occupied by reservations or
913 	 * pinned then we want to start flushing.
914 	 *
915 	 * We do not do the traditional thing here, which is to say
916 	 *
917 	 *   if (used >= ((total_bytes + avail) / 2))
918 	 *     return 1;
919 	 *
920 	 * because this doesn't quite work how we want.  If we had more than 50%
921 	 * of the space_info used by bytes_used and we had 0 available we'd just
922 	 * constantly run the background flusher.  Instead we want it to kick in
923 	 * if our reclaimable space exceeds our clamped free space.
924 	 *
925 	 * Our clamping range is 2^1 -> 2^8.  Practically speaking that means
926 	 * the following:
927 	 *
928 	 * Amount of RAM        Minimum threshold       Maximum threshold
929 	 *
930 	 *        256GiB                     1GiB                  128GiB
931 	 *        128GiB                   512MiB                   64GiB
932 	 *         64GiB                   256MiB                   32GiB
933 	 *         32GiB                   128MiB                   16GiB
934 	 *         16GiB                    64MiB                    8GiB
935 	 *
936 	 * These are the range our thresholds will fall in, corresponding to how
937 	 * much delalloc we need for the background flusher to kick in.
938 	 */
939 
940 	thresh = calc_available_free_space(fs_info, space_info,
941 					   BTRFS_RESERVE_FLUSH_ALL);
942 	used = space_info->bytes_used + space_info->bytes_reserved +
943 	       space_info->bytes_readonly + global_rsv_size;
944 	if (used < space_info->total_bytes)
945 		thresh += space_info->total_bytes - used;
946 	thresh >>= space_info->clamp;
947 
948 	used = space_info->bytes_pinned;
949 
950 	/*
951 	 * If we have more ordered bytes than delalloc bytes then we're either
952 	 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
953 	 * around.  Preemptive flushing is only useful in that it can free up
954 	 * space before tickets need to wait for things to finish.  In the case
955 	 * of ordered extents, preemptively waiting on ordered extents gets us
956 	 * nothing, if our reservations are tied up in ordered extents we'll
957 	 * simply have to slow down writers by forcing them to wait on ordered
958 	 * extents.
959 	 *
960 	 * In the case that ordered is larger than delalloc, only include the
961 	 * block reserves that we would actually be able to directly reclaim
962 	 * from.  In this case if we're heavy on metadata operations this will
963 	 * clearly be heavy enough to warrant preemptive flushing.  In the case
964 	 * of heavy DIO or ordered reservations, preemptive flushing will just
965 	 * waste time and cause us to slow down.
966 	 *
967 	 * We want to make sure we truly are maxed out on ordered however, so
968 	 * cut ordered in half, and if it's still higher than delalloc then we
969 	 * can keep flushing.  This is to avoid the case where we start
970 	 * flushing, and now delalloc == ordered and we stop preemptively
971 	 * flushing when we could still have several gigs of delalloc to flush.
972 	 */
973 	ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
974 	delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
975 	if (ordered >= delalloc)
976 		used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) +
977 			btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv);
978 	else
979 		used += space_info->bytes_may_use - global_rsv_size;
980 
981 	return (used >= thresh && !btrfs_fs_closing(fs_info) &&
982 		!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
983 }
984 
985 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
986 				  struct btrfs_space_info *space_info,
987 				  struct reserve_ticket *ticket)
988 {
989 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
990 	u64 min_bytes;
991 
992 	if (!ticket->steal)
993 		return false;
994 
995 	if (global_rsv->space_info != space_info)
996 		return false;
997 
998 	spin_lock(&global_rsv->lock);
999 	min_bytes = mult_perc(global_rsv->size, 10);
1000 	if (global_rsv->reserved < min_bytes + ticket->bytes) {
1001 		spin_unlock(&global_rsv->lock);
1002 		return false;
1003 	}
1004 	global_rsv->reserved -= ticket->bytes;
1005 	remove_ticket(space_info, ticket);
1006 	ticket->bytes = 0;
1007 	wake_up(&ticket->wait);
1008 	space_info->tickets_id++;
1009 	if (global_rsv->reserved < global_rsv->size)
1010 		global_rsv->full = 0;
1011 	spin_unlock(&global_rsv->lock);
1012 
1013 	return true;
1014 }
1015 
1016 /*
1017  * We've exhausted our flushing, start failing tickets.
1018  *
1019  * @fs_info - fs_info for this fs
1020  * @space_info - the space info we were flushing
1021  *
1022  * We call this when we've exhausted our flushing ability and haven't made
1023  * progress in satisfying tickets.  The reservation code handles tickets in
1024  * order, so if there is a large ticket first and then smaller ones we could
1025  * very well satisfy the smaller tickets.  This will attempt to wake up any
1026  * tickets in the list to catch this case.
1027  *
1028  * This function returns true if it was able to make progress by clearing out
1029  * other tickets, or if it stumbles across a ticket that was smaller than the
1030  * first ticket.
1031  */
1032 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
1033 				   struct btrfs_space_info *space_info)
1034 {
1035 	struct reserve_ticket *ticket;
1036 	u64 tickets_id = space_info->tickets_id;
1037 	const bool aborted = BTRFS_FS_ERROR(fs_info);
1038 
1039 	trace_btrfs_fail_all_tickets(fs_info, space_info);
1040 
1041 	if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1042 		btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
1043 		__btrfs_dump_space_info(fs_info, space_info);
1044 	}
1045 
1046 	while (!list_empty(&space_info->tickets) &&
1047 	       tickets_id == space_info->tickets_id) {
1048 		ticket = list_first_entry(&space_info->tickets,
1049 					  struct reserve_ticket, list);
1050 
1051 		if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
1052 			return true;
1053 
1054 		if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1055 			btrfs_info(fs_info, "failing ticket with %llu bytes",
1056 				   ticket->bytes);
1057 
1058 		remove_ticket(space_info, ticket);
1059 		if (aborted)
1060 			ticket->error = -EIO;
1061 		else
1062 			ticket->error = -ENOSPC;
1063 		wake_up(&ticket->wait);
1064 
1065 		/*
1066 		 * We're just throwing tickets away, so more flushing may not
1067 		 * trip over btrfs_try_granting_tickets, so we need to call it
1068 		 * here to see if we can make progress with the next ticket in
1069 		 * the list.
1070 		 */
1071 		if (!aborted)
1072 			btrfs_try_granting_tickets(fs_info, space_info);
1073 	}
1074 	return (tickets_id != space_info->tickets_id);
1075 }
1076 
1077 /*
1078  * This is for normal flushers, we can wait all goddamned day if we want to.  We
1079  * will loop and continuously try to flush as long as we are making progress.
1080  * We count progress as clearing off tickets each time we have to loop.
1081  */
1082 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
1083 {
1084 	struct btrfs_fs_info *fs_info;
1085 	struct btrfs_space_info *space_info;
1086 	u64 to_reclaim;
1087 	enum btrfs_flush_state flush_state;
1088 	int commit_cycles = 0;
1089 	u64 last_tickets_id;
1090 
1091 	fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1092 	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1093 
1094 	spin_lock(&space_info->lock);
1095 	to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1096 	if (!to_reclaim) {
1097 		space_info->flush = 0;
1098 		spin_unlock(&space_info->lock);
1099 		return;
1100 	}
1101 	last_tickets_id = space_info->tickets_id;
1102 	spin_unlock(&space_info->lock);
1103 
1104 	flush_state = FLUSH_DELAYED_ITEMS_NR;
1105 	do {
1106 		flush_space(fs_info, space_info, to_reclaim, flush_state, false);
1107 		spin_lock(&space_info->lock);
1108 		if (list_empty(&space_info->tickets)) {
1109 			space_info->flush = 0;
1110 			spin_unlock(&space_info->lock);
1111 			return;
1112 		}
1113 		to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1114 							      space_info);
1115 		if (last_tickets_id == space_info->tickets_id) {
1116 			flush_state++;
1117 		} else {
1118 			last_tickets_id = space_info->tickets_id;
1119 			flush_state = FLUSH_DELAYED_ITEMS_NR;
1120 			if (commit_cycles)
1121 				commit_cycles--;
1122 		}
1123 
1124 		/*
1125 		 * We do not want to empty the system of delalloc unless we're
1126 		 * under heavy pressure, so allow one trip through the flushing
1127 		 * logic before we start doing a FLUSH_DELALLOC_FULL.
1128 		 */
1129 		if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
1130 			flush_state++;
1131 
1132 		/*
1133 		 * We don't want to force a chunk allocation until we've tried
1134 		 * pretty hard to reclaim space.  Think of the case where we
1135 		 * freed up a bunch of space and so have a lot of pinned space
1136 		 * to reclaim.  We would rather use that than possibly create a
1137 		 * underutilized metadata chunk.  So if this is our first run
1138 		 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1139 		 * commit the transaction.  If nothing has changed the next go
1140 		 * around then we can force a chunk allocation.
1141 		 */
1142 		if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1143 			flush_state++;
1144 
1145 		if (flush_state > COMMIT_TRANS) {
1146 			commit_cycles++;
1147 			if (commit_cycles > 2) {
1148 				if (maybe_fail_all_tickets(fs_info, space_info)) {
1149 					flush_state = FLUSH_DELAYED_ITEMS_NR;
1150 					commit_cycles--;
1151 				} else {
1152 					space_info->flush = 0;
1153 				}
1154 			} else {
1155 				flush_state = FLUSH_DELAYED_ITEMS_NR;
1156 			}
1157 		}
1158 		spin_unlock(&space_info->lock);
1159 	} while (flush_state <= COMMIT_TRANS);
1160 }
1161 
1162 /*
1163  * This handles pre-flushing of metadata space before we get to the point that
1164  * we need to start blocking threads on tickets.  The logic here is different
1165  * from the other flush paths because it doesn't rely on tickets to tell us how
1166  * much we need to flush, instead it attempts to keep us below the 80% full
1167  * watermark of space by flushing whichever reservation pool is currently the
1168  * largest.
1169  */
1170 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1171 {
1172 	struct btrfs_fs_info *fs_info;
1173 	struct btrfs_space_info *space_info;
1174 	struct btrfs_block_rsv *delayed_block_rsv;
1175 	struct btrfs_block_rsv *delayed_refs_rsv;
1176 	struct btrfs_block_rsv *global_rsv;
1177 	struct btrfs_block_rsv *trans_rsv;
1178 	int loops = 0;
1179 
1180 	fs_info = container_of(work, struct btrfs_fs_info,
1181 			       preempt_reclaim_work);
1182 	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1183 	delayed_block_rsv = &fs_info->delayed_block_rsv;
1184 	delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1185 	global_rsv = &fs_info->global_block_rsv;
1186 	trans_rsv = &fs_info->trans_block_rsv;
1187 
1188 	spin_lock(&space_info->lock);
1189 	while (need_preemptive_reclaim(fs_info, space_info)) {
1190 		enum btrfs_flush_state flush;
1191 		u64 delalloc_size = 0;
1192 		u64 to_reclaim, block_rsv_size;
1193 		const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
1194 
1195 		loops++;
1196 
1197 		/*
1198 		 * We don't have a precise counter for the metadata being
1199 		 * reserved for delalloc, so we'll approximate it by subtracting
1200 		 * out the block rsv's space from the bytes_may_use.  If that
1201 		 * amount is higher than the individual reserves, then we can
1202 		 * assume it's tied up in delalloc reservations.
1203 		 */
1204 		block_rsv_size = global_rsv_size +
1205 			btrfs_block_rsv_reserved(delayed_block_rsv) +
1206 			btrfs_block_rsv_reserved(delayed_refs_rsv) +
1207 			btrfs_block_rsv_reserved(trans_rsv);
1208 		if (block_rsv_size < space_info->bytes_may_use)
1209 			delalloc_size = space_info->bytes_may_use - block_rsv_size;
1210 
1211 		/*
1212 		 * We don't want to include the global_rsv in our calculation,
1213 		 * because that's space we can't touch.  Subtract it from the
1214 		 * block_rsv_size for the next checks.
1215 		 */
1216 		block_rsv_size -= global_rsv_size;
1217 
1218 		/*
1219 		 * We really want to avoid flushing delalloc too much, as it
1220 		 * could result in poor allocation patterns, so only flush it if
1221 		 * it's larger than the rest of the pools combined.
1222 		 */
1223 		if (delalloc_size > block_rsv_size) {
1224 			to_reclaim = delalloc_size;
1225 			flush = FLUSH_DELALLOC;
1226 		} else if (space_info->bytes_pinned >
1227 			   (btrfs_block_rsv_reserved(delayed_block_rsv) +
1228 			    btrfs_block_rsv_reserved(delayed_refs_rsv))) {
1229 			to_reclaim = space_info->bytes_pinned;
1230 			flush = COMMIT_TRANS;
1231 		} else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
1232 			   btrfs_block_rsv_reserved(delayed_refs_rsv)) {
1233 			to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv);
1234 			flush = FLUSH_DELAYED_ITEMS_NR;
1235 		} else {
1236 			to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv);
1237 			flush = FLUSH_DELAYED_REFS_NR;
1238 		}
1239 
1240 		spin_unlock(&space_info->lock);
1241 
1242 		/*
1243 		 * We don't want to reclaim everything, just a portion, so scale
1244 		 * down the to_reclaim by 1/4.  If it takes us down to 0,
1245 		 * reclaim 1 items worth.
1246 		 */
1247 		to_reclaim >>= 2;
1248 		if (!to_reclaim)
1249 			to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1250 		flush_space(fs_info, space_info, to_reclaim, flush, true);
1251 		cond_resched();
1252 		spin_lock(&space_info->lock);
1253 	}
1254 
1255 	/* We only went through once, back off our clamping. */
1256 	if (loops == 1 && !space_info->reclaim_size)
1257 		space_info->clamp = max(1, space_info->clamp - 1);
1258 	trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1259 	spin_unlock(&space_info->lock);
1260 }
1261 
1262 /*
1263  * FLUSH_DELALLOC_WAIT:
1264  *   Space is freed from flushing delalloc in one of two ways.
1265  *
1266  *   1) compression is on and we allocate less space than we reserved
1267  *   2) we are overwriting existing space
1268  *
1269  *   For #1 that extra space is reclaimed as soon as the delalloc pages are
1270  *   COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1271  *   length to ->bytes_reserved, and subtracts the reserved space from
1272  *   ->bytes_may_use.
1273  *
1274  *   For #2 this is trickier.  Once the ordered extent runs we will drop the
1275  *   extent in the range we are overwriting, which creates a delayed ref for
1276  *   that freed extent.  This however is not reclaimed until the transaction
1277  *   commits, thus the next stages.
1278  *
1279  * RUN_DELAYED_IPUTS
1280  *   If we are freeing inodes, we want to make sure all delayed iputs have
1281  *   completed, because they could have been on an inode with i_nlink == 0, and
1282  *   thus have been truncated and freed up space.  But again this space is not
1283  *   immediately re-usable, it comes in the form of a delayed ref, which must be
1284  *   run and then the transaction must be committed.
1285  *
1286  * COMMIT_TRANS
1287  *   This is where we reclaim all of the pinned space generated by running the
1288  *   iputs
1289  *
1290  * ALLOC_CHUNK_FORCE
1291  *   For data we start with alloc chunk force, however we could have been full
1292  *   before, and then the transaction commit could have freed new block groups,
1293  *   so if we now have space to allocate do the force chunk allocation.
1294  */
1295 static const enum btrfs_flush_state data_flush_states[] = {
1296 	FLUSH_DELALLOC_FULL,
1297 	RUN_DELAYED_IPUTS,
1298 	COMMIT_TRANS,
1299 	ALLOC_CHUNK_FORCE,
1300 };
1301 
1302 static void btrfs_async_reclaim_data_space(struct work_struct *work)
1303 {
1304 	struct btrfs_fs_info *fs_info;
1305 	struct btrfs_space_info *space_info;
1306 	u64 last_tickets_id;
1307 	enum btrfs_flush_state flush_state = 0;
1308 
1309 	fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1310 	space_info = fs_info->data_sinfo;
1311 
1312 	spin_lock(&space_info->lock);
1313 	if (list_empty(&space_info->tickets)) {
1314 		space_info->flush = 0;
1315 		spin_unlock(&space_info->lock);
1316 		return;
1317 	}
1318 	last_tickets_id = space_info->tickets_id;
1319 	spin_unlock(&space_info->lock);
1320 
1321 	while (!space_info->full) {
1322 		flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1323 		spin_lock(&space_info->lock);
1324 		if (list_empty(&space_info->tickets)) {
1325 			space_info->flush = 0;
1326 			spin_unlock(&space_info->lock);
1327 			return;
1328 		}
1329 
1330 		/* Something happened, fail everything and bail. */
1331 		if (BTRFS_FS_ERROR(fs_info))
1332 			goto aborted_fs;
1333 		last_tickets_id = space_info->tickets_id;
1334 		spin_unlock(&space_info->lock);
1335 	}
1336 
1337 	while (flush_state < ARRAY_SIZE(data_flush_states)) {
1338 		flush_space(fs_info, space_info, U64_MAX,
1339 			    data_flush_states[flush_state], false);
1340 		spin_lock(&space_info->lock);
1341 		if (list_empty(&space_info->tickets)) {
1342 			space_info->flush = 0;
1343 			spin_unlock(&space_info->lock);
1344 			return;
1345 		}
1346 
1347 		if (last_tickets_id == space_info->tickets_id) {
1348 			flush_state++;
1349 		} else {
1350 			last_tickets_id = space_info->tickets_id;
1351 			flush_state = 0;
1352 		}
1353 
1354 		if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1355 			if (space_info->full) {
1356 				if (maybe_fail_all_tickets(fs_info, space_info))
1357 					flush_state = 0;
1358 				else
1359 					space_info->flush = 0;
1360 			} else {
1361 				flush_state = 0;
1362 			}
1363 
1364 			/* Something happened, fail everything and bail. */
1365 			if (BTRFS_FS_ERROR(fs_info))
1366 				goto aborted_fs;
1367 
1368 		}
1369 		spin_unlock(&space_info->lock);
1370 	}
1371 	return;
1372 
1373 aborted_fs:
1374 	maybe_fail_all_tickets(fs_info, space_info);
1375 	space_info->flush = 0;
1376 	spin_unlock(&space_info->lock);
1377 }
1378 
1379 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1380 {
1381 	INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1382 	INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1383 	INIT_WORK(&fs_info->preempt_reclaim_work,
1384 		  btrfs_preempt_reclaim_metadata_space);
1385 }
1386 
1387 static const enum btrfs_flush_state priority_flush_states[] = {
1388 	FLUSH_DELAYED_ITEMS_NR,
1389 	FLUSH_DELAYED_ITEMS,
1390 	ALLOC_CHUNK,
1391 };
1392 
1393 static const enum btrfs_flush_state evict_flush_states[] = {
1394 	FLUSH_DELAYED_ITEMS_NR,
1395 	FLUSH_DELAYED_ITEMS,
1396 	FLUSH_DELAYED_REFS_NR,
1397 	FLUSH_DELAYED_REFS,
1398 	FLUSH_DELALLOC,
1399 	FLUSH_DELALLOC_WAIT,
1400 	FLUSH_DELALLOC_FULL,
1401 	ALLOC_CHUNK,
1402 	COMMIT_TRANS,
1403 };
1404 
1405 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1406 				struct btrfs_space_info *space_info,
1407 				struct reserve_ticket *ticket,
1408 				const enum btrfs_flush_state *states,
1409 				int states_nr)
1410 {
1411 	u64 to_reclaim;
1412 	int flush_state = 0;
1413 
1414 	spin_lock(&space_info->lock);
1415 	to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1416 	/*
1417 	 * This is the priority reclaim path, so to_reclaim could be >0 still
1418 	 * because we may have only satisfied the priority tickets and still
1419 	 * left non priority tickets on the list.  We would then have
1420 	 * to_reclaim but ->bytes == 0.
1421 	 */
1422 	if (ticket->bytes == 0) {
1423 		spin_unlock(&space_info->lock);
1424 		return;
1425 	}
1426 
1427 	while (flush_state < states_nr) {
1428 		spin_unlock(&space_info->lock);
1429 		flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1430 			    false);
1431 		flush_state++;
1432 		spin_lock(&space_info->lock);
1433 		if (ticket->bytes == 0) {
1434 			spin_unlock(&space_info->lock);
1435 			return;
1436 		}
1437 	}
1438 
1439 	/*
1440 	 * Attempt to steal from the global rsv if we can, except if the fs was
1441 	 * turned into error mode due to a transaction abort when flushing space
1442 	 * above, in that case fail with the abort error instead of returning
1443 	 * success to the caller if we can steal from the global rsv - this is
1444 	 * just to have caller fail immeditelly instead of later when trying to
1445 	 * modify the fs, making it easier to debug -ENOSPC problems.
1446 	 */
1447 	if (BTRFS_FS_ERROR(fs_info)) {
1448 		ticket->error = BTRFS_FS_ERROR(fs_info);
1449 		remove_ticket(space_info, ticket);
1450 	} else if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
1451 		ticket->error = -ENOSPC;
1452 		remove_ticket(space_info, ticket);
1453 	}
1454 
1455 	/*
1456 	 * We must run try_granting_tickets here because we could be a large
1457 	 * ticket in front of a smaller ticket that can now be satisfied with
1458 	 * the available space.
1459 	 */
1460 	btrfs_try_granting_tickets(fs_info, space_info);
1461 	spin_unlock(&space_info->lock);
1462 }
1463 
1464 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
1465 					struct btrfs_space_info *space_info,
1466 					struct reserve_ticket *ticket)
1467 {
1468 	spin_lock(&space_info->lock);
1469 
1470 	/* We could have been granted before we got here. */
1471 	if (ticket->bytes == 0) {
1472 		spin_unlock(&space_info->lock);
1473 		return;
1474 	}
1475 
1476 	while (!space_info->full) {
1477 		spin_unlock(&space_info->lock);
1478 		flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1479 		spin_lock(&space_info->lock);
1480 		if (ticket->bytes == 0) {
1481 			spin_unlock(&space_info->lock);
1482 			return;
1483 		}
1484 	}
1485 
1486 	ticket->error = -ENOSPC;
1487 	remove_ticket(space_info, ticket);
1488 	btrfs_try_granting_tickets(fs_info, space_info);
1489 	spin_unlock(&space_info->lock);
1490 }
1491 
1492 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1493 				struct btrfs_space_info *space_info,
1494 				struct reserve_ticket *ticket)
1495 
1496 {
1497 	DEFINE_WAIT(wait);
1498 	int ret = 0;
1499 
1500 	spin_lock(&space_info->lock);
1501 	while (ticket->bytes > 0 && ticket->error == 0) {
1502 		ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1503 		if (ret) {
1504 			/*
1505 			 * Delete us from the list. After we unlock the space
1506 			 * info, we don't want the async reclaim job to reserve
1507 			 * space for this ticket. If that would happen, then the
1508 			 * ticket's task would not known that space was reserved
1509 			 * despite getting an error, resulting in a space leak
1510 			 * (bytes_may_use counter of our space_info).
1511 			 */
1512 			remove_ticket(space_info, ticket);
1513 			ticket->error = -EINTR;
1514 			break;
1515 		}
1516 		spin_unlock(&space_info->lock);
1517 
1518 		schedule();
1519 
1520 		finish_wait(&ticket->wait, &wait);
1521 		spin_lock(&space_info->lock);
1522 	}
1523 	spin_unlock(&space_info->lock);
1524 }
1525 
1526 /*
1527  * Do the appropriate flushing and waiting for a ticket.
1528  *
1529  * @fs_info:    the filesystem
1530  * @space_info: space info for the reservation
1531  * @ticket:     ticket for the reservation
1532  * @start_ns:   timestamp when the reservation started
1533  * @orig_bytes: amount of bytes originally reserved
1534  * @flush:      how much we can flush
1535  *
1536  * This does the work of figuring out how to flush for the ticket, waiting for
1537  * the reservation, and returning the appropriate error if there is one.
1538  */
1539 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1540 				 struct btrfs_space_info *space_info,
1541 				 struct reserve_ticket *ticket,
1542 				 u64 start_ns, u64 orig_bytes,
1543 				 enum btrfs_reserve_flush_enum flush)
1544 {
1545 	int ret;
1546 
1547 	switch (flush) {
1548 	case BTRFS_RESERVE_FLUSH_DATA:
1549 	case BTRFS_RESERVE_FLUSH_ALL:
1550 	case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1551 		wait_reserve_ticket(fs_info, space_info, ticket);
1552 		break;
1553 	case BTRFS_RESERVE_FLUSH_LIMIT:
1554 		priority_reclaim_metadata_space(fs_info, space_info, ticket,
1555 						priority_flush_states,
1556 						ARRAY_SIZE(priority_flush_states));
1557 		break;
1558 	case BTRFS_RESERVE_FLUSH_EVICT:
1559 		priority_reclaim_metadata_space(fs_info, space_info, ticket,
1560 						evict_flush_states,
1561 						ARRAY_SIZE(evict_flush_states));
1562 		break;
1563 	case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1564 		priority_reclaim_data_space(fs_info, space_info, ticket);
1565 		break;
1566 	default:
1567 		ASSERT(0);
1568 		break;
1569 	}
1570 
1571 	ret = ticket->error;
1572 	ASSERT(list_empty(&ticket->list));
1573 	/*
1574 	 * Check that we can't have an error set if the reservation succeeded,
1575 	 * as that would confuse tasks and lead them to error out without
1576 	 * releasing reserved space (if an error happens the expectation is that
1577 	 * space wasn't reserved at all).
1578 	 */
1579 	ASSERT(!(ticket->bytes == 0 && ticket->error));
1580 	trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1581 				   start_ns, flush, ticket->error);
1582 	return ret;
1583 }
1584 
1585 /*
1586  * This returns true if this flush state will go through the ordinary flushing
1587  * code.
1588  */
1589 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1590 {
1591 	return	(flush == BTRFS_RESERVE_FLUSH_ALL) ||
1592 		(flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1593 }
1594 
1595 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
1596 				       struct btrfs_space_info *space_info)
1597 {
1598 	u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1599 	u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1600 
1601 	/*
1602 	 * If we're heavy on ordered operations then clamping won't help us.  We
1603 	 * need to clamp specifically to keep up with dirty'ing buffered
1604 	 * writers, because there's not a 1:1 correlation of writing delalloc
1605 	 * and freeing space, like there is with flushing delayed refs or
1606 	 * delayed nodes.  If we're already more ordered than delalloc then
1607 	 * we're keeping up, otherwise we aren't and should probably clamp.
1608 	 */
1609 	if (ordered < delalloc)
1610 		space_info->clamp = min(space_info->clamp + 1, 8);
1611 }
1612 
1613 static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
1614 {
1615 	return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1616 		flush == BTRFS_RESERVE_FLUSH_EVICT);
1617 }
1618 
1619 /*
1620  * NO_FLUSH and FLUSH_EMERGENCY don't want to create a ticket, they just want to
1621  * fail as quickly as possible.
1622  */
1623 static inline bool can_ticket(enum btrfs_reserve_flush_enum flush)
1624 {
1625 	return (flush != BTRFS_RESERVE_NO_FLUSH &&
1626 		flush != BTRFS_RESERVE_FLUSH_EMERGENCY);
1627 }
1628 
1629 /*
1630  * Try to reserve bytes from the block_rsv's space.
1631  *
1632  * @fs_info:    the filesystem
1633  * @space_info: space info we want to allocate from
1634  * @orig_bytes: number of bytes we want
1635  * @flush:      whether or not we can flush to make our reservation
1636  *
1637  * This will reserve orig_bytes number of bytes from the space info associated
1638  * with the block_rsv.  If there is not enough space it will make an attempt to
1639  * flush out space to make room.  It will do this by flushing delalloc if
1640  * possible or committing the transaction.  If flush is 0 then no attempts to
1641  * regain reservations will be made and this will fail if there is not enough
1642  * space already.
1643  */
1644 static int __reserve_bytes(struct btrfs_fs_info *fs_info,
1645 			   struct btrfs_space_info *space_info, u64 orig_bytes,
1646 			   enum btrfs_reserve_flush_enum flush)
1647 {
1648 	struct work_struct *async_work;
1649 	struct reserve_ticket ticket;
1650 	u64 start_ns = 0;
1651 	u64 used;
1652 	int ret = -ENOSPC;
1653 	bool pending_tickets;
1654 
1655 	ASSERT(orig_bytes);
1656 	/*
1657 	 * If have a transaction handle (current->journal_info != NULL), then
1658 	 * the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor
1659 	 * BTRFS_RESERVE_FLUSH_EVICT, as we could deadlock because those
1660 	 * flushing methods can trigger transaction commits.
1661 	 */
1662 	if (current->journal_info) {
1663 		/* One assert per line for easier debugging. */
1664 		ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL);
1665 		ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL);
1666 		ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT);
1667 	}
1668 
1669 	if (flush == BTRFS_RESERVE_FLUSH_DATA)
1670 		async_work = &fs_info->async_data_reclaim_work;
1671 	else
1672 		async_work = &fs_info->async_reclaim_work;
1673 
1674 	spin_lock(&space_info->lock);
1675 	used = btrfs_space_info_used(space_info, true);
1676 
1677 	/*
1678 	 * We don't want NO_FLUSH allocations to jump everybody, they can
1679 	 * generally handle ENOSPC in a different way, so treat them the same as
1680 	 * normal flushers when it comes to skipping pending tickets.
1681 	 */
1682 	if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1683 		pending_tickets = !list_empty(&space_info->tickets) ||
1684 			!list_empty(&space_info->priority_tickets);
1685 	else
1686 		pending_tickets = !list_empty(&space_info->priority_tickets);
1687 
1688 	/*
1689 	 * Carry on if we have enough space (short-circuit) OR call
1690 	 * can_overcommit() to ensure we can overcommit to continue.
1691 	 */
1692 	if (!pending_tickets &&
1693 	    ((used + orig_bytes <= space_info->total_bytes) ||
1694 	     btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1695 		btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1696 						      orig_bytes);
1697 		ret = 0;
1698 	}
1699 
1700 	/*
1701 	 * Things are dire, we need to make a reservation so we don't abort.  We
1702 	 * will let this reservation go through as long as we have actual space
1703 	 * left to allocate for the block.
1704 	 */
1705 	if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
1706 		used = btrfs_space_info_used(space_info, false);
1707 		if (used + orig_bytes <= space_info->total_bytes) {
1708 			btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1709 							      orig_bytes);
1710 			ret = 0;
1711 		}
1712 	}
1713 
1714 	/*
1715 	 * If we couldn't make a reservation then setup our reservation ticket
1716 	 * and kick the async worker if it's not already running.
1717 	 *
1718 	 * If we are a priority flusher then we just need to add our ticket to
1719 	 * the list and we will do our own flushing further down.
1720 	 */
1721 	if (ret && can_ticket(flush)) {
1722 		ticket.bytes = orig_bytes;
1723 		ticket.error = 0;
1724 		space_info->reclaim_size += ticket.bytes;
1725 		init_waitqueue_head(&ticket.wait);
1726 		ticket.steal = can_steal(flush);
1727 		if (trace_btrfs_reserve_ticket_enabled())
1728 			start_ns = ktime_get_ns();
1729 
1730 		if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1731 		    flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1732 		    flush == BTRFS_RESERVE_FLUSH_DATA) {
1733 			list_add_tail(&ticket.list, &space_info->tickets);
1734 			if (!space_info->flush) {
1735 				/*
1736 				 * We were forced to add a reserve ticket, so
1737 				 * our preemptive flushing is unable to keep
1738 				 * up.  Clamp down on the threshold for the
1739 				 * preemptive flushing in order to keep up with
1740 				 * the workload.
1741 				 */
1742 				maybe_clamp_preempt(fs_info, space_info);
1743 
1744 				space_info->flush = 1;
1745 				trace_btrfs_trigger_flush(fs_info,
1746 							  space_info->flags,
1747 							  orig_bytes, flush,
1748 							  "enospc");
1749 				queue_work(system_unbound_wq, async_work);
1750 			}
1751 		} else {
1752 			list_add_tail(&ticket.list,
1753 				      &space_info->priority_tickets);
1754 		}
1755 	} else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1756 		/*
1757 		 * We will do the space reservation dance during log replay,
1758 		 * which means we won't have fs_info->fs_root set, so don't do
1759 		 * the async reclaim as we will panic.
1760 		 */
1761 		if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1762 		    !work_busy(&fs_info->preempt_reclaim_work) &&
1763 		    need_preemptive_reclaim(fs_info, space_info)) {
1764 			trace_btrfs_trigger_flush(fs_info, space_info->flags,
1765 						  orig_bytes, flush, "preempt");
1766 			queue_work(system_unbound_wq,
1767 				   &fs_info->preempt_reclaim_work);
1768 		}
1769 	}
1770 	spin_unlock(&space_info->lock);
1771 	if (!ret || !can_ticket(flush))
1772 		return ret;
1773 
1774 	return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1775 				     orig_bytes, flush);
1776 }
1777 
1778 /*
1779  * Try to reserve metadata bytes from the block_rsv's space.
1780  *
1781  * @fs_info:    the filesystem
1782  * @space_info: the space_info we're allocating for
1783  * @orig_bytes: number of bytes we want
1784  * @flush:      whether or not we can flush to make our reservation
1785  *
1786  * This will reserve orig_bytes number of bytes from the space info associated
1787  * with the block_rsv.  If there is not enough space it will make an attempt to
1788  * flush out space to make room.  It will do this by flushing delalloc if
1789  * possible or committing the transaction.  If flush is 0 then no attempts to
1790  * regain reservations will be made and this will fail if there is not enough
1791  * space already.
1792  */
1793 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
1794 				 struct btrfs_space_info *space_info,
1795 				 u64 orig_bytes,
1796 				 enum btrfs_reserve_flush_enum flush)
1797 {
1798 	int ret;
1799 
1800 	ret = __reserve_bytes(fs_info, space_info, orig_bytes, flush);
1801 	if (ret == -ENOSPC) {
1802 		trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1803 					      space_info->flags, orig_bytes, 1);
1804 
1805 		if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1806 			btrfs_dump_space_info(fs_info, space_info, orig_bytes, 0);
1807 	}
1808 	return ret;
1809 }
1810 
1811 /*
1812  * Try to reserve data bytes for an allocation.
1813  *
1814  * @fs_info: the filesystem
1815  * @bytes:   number of bytes we need
1816  * @flush:   how we are allowed to flush
1817  *
1818  * This will reserve bytes from the data space info.  If there is not enough
1819  * space then we will attempt to flush space as specified by flush.
1820  */
1821 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
1822 			     enum btrfs_reserve_flush_enum flush)
1823 {
1824 	struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
1825 	int ret;
1826 
1827 	ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1828 	       flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE ||
1829 	       flush == BTRFS_RESERVE_NO_FLUSH);
1830 	ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
1831 
1832 	ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
1833 	if (ret == -ENOSPC) {
1834 		trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1835 					      data_sinfo->flags, bytes, 1);
1836 		if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1837 			btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
1838 	}
1839 	return ret;
1840 }
1841 
1842 /* Dump all the space infos when we abort a transaction due to ENOSPC. */
1843 __cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info)
1844 {
1845 	struct btrfs_space_info *space_info;
1846 
1847 	btrfs_info(fs_info, "dumping space info:");
1848 	list_for_each_entry(space_info, &fs_info->space_info, list) {
1849 		spin_lock(&space_info->lock);
1850 		__btrfs_dump_space_info(fs_info, space_info);
1851 		spin_unlock(&space_info->lock);
1852 	}
1853 	dump_global_block_rsv(fs_info);
1854 }
1855 
1856 /*
1857  * Account the unused space of all the readonly block group in the space_info.
1858  * takes mirrors into account.
1859  */
1860 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
1861 {
1862 	struct btrfs_block_group *block_group;
1863 	u64 free_bytes = 0;
1864 	int factor;
1865 
1866 	/* It's df, we don't care if it's racy */
1867 	if (list_empty(&sinfo->ro_bgs))
1868 		return 0;
1869 
1870 	spin_lock(&sinfo->lock);
1871 	list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
1872 		spin_lock(&block_group->lock);
1873 
1874 		if (!block_group->ro) {
1875 			spin_unlock(&block_group->lock);
1876 			continue;
1877 		}
1878 
1879 		factor = btrfs_bg_type_to_factor(block_group->flags);
1880 		free_bytes += (block_group->length -
1881 			       block_group->used) * factor;
1882 
1883 		spin_unlock(&block_group->lock);
1884 	}
1885 	spin_unlock(&sinfo->lock);
1886 
1887 	return free_bytes;
1888 }
1889