xref: /linux/fs/btrfs/space-info.c (revision ff30564411ffdcee49d579cb15eb13185a36e253)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "linux/spinlock.h"
4 #include <linux/minmax.h>
5 #include "misc.h"
6 #include "ctree.h"
7 #include "space-info.h"
8 #include "sysfs.h"
9 #include "volumes.h"
10 #include "free-space-cache.h"
11 #include "ordered-data.h"
12 #include "transaction.h"
13 #include "block-group.h"
14 #include "fs.h"
15 #include "accessors.h"
16 #include "extent-tree.h"
17 
18 /*
19  * HOW DOES SPACE RESERVATION WORK
20  *
21  * If you want to know about delalloc specifically, there is a separate comment
22  * for that with the delalloc code.  This comment is about how the whole system
23  * works generally.
24  *
25  * BASIC CONCEPTS
26  *
27  *   1) space_info.  This is the ultimate arbiter of how much space we can use.
28  *   There's a description of the bytes_ fields with the struct declaration,
29  *   refer to that for specifics on each field.  Suffice it to say that for
30  *   reservations we care about total_bytes - SUM(space_info->bytes_) when
31  *   determining if there is space to make an allocation.  There is a space_info
32  *   for METADATA, SYSTEM, and DATA areas.
33  *
34  *   2) block_rsv's.  These are basically buckets for every different type of
35  *   metadata reservation we have.  You can see the comment in the block_rsv
36  *   code on the rules for each type, but generally block_rsv->reserved is how
37  *   much space is accounted for in space_info->bytes_may_use.
38  *
39  *   3) btrfs_calc*_size.  These are the worst case calculations we used based
40  *   on the number of items we will want to modify.  We have one for changing
41  *   items, and one for inserting new items.  Generally we use these helpers to
42  *   determine the size of the block reserves, and then use the actual bytes
43  *   values to adjust the space_info counters.
44  *
45  * MAKING RESERVATIONS, THE NORMAL CASE
46  *
47  *   We call into either btrfs_reserve_data_bytes() or
48  *   btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
49  *   num_bytes we want to reserve.
50  *
51  *   ->reserve
52  *     space_info->bytes_may_reserve += num_bytes
53  *
54  *   ->extent allocation
55  *     Call btrfs_add_reserved_bytes() which does
56  *     space_info->bytes_may_reserve -= num_bytes
57  *     space_info->bytes_reserved += extent_bytes
58  *
59  *   ->insert reference
60  *     Call btrfs_update_block_group() which does
61  *     space_info->bytes_reserved -= extent_bytes
62  *     space_info->bytes_used += extent_bytes
63  *
64  * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
65  *
66  *   Assume we are unable to simply make the reservation because we do not have
67  *   enough space
68  *
69  *   -> __reserve_bytes
70  *     create a reserve_ticket with ->bytes set to our reservation, add it to
71  *     the tail of space_info->tickets, kick async flush thread
72  *
73  *   ->handle_reserve_ticket
74  *     wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
75  *     on the ticket.
76  *
77  *   -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
78  *     Flushes various things attempting to free up space.
79  *
80  *   -> btrfs_try_granting_tickets()
81  *     This is called by anything that either subtracts space from
82  *     space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
83  *     space_info->total_bytes.  This loops through the ->priority_tickets and
84  *     then the ->tickets list checking to see if the reservation can be
85  *     completed.  If it can the space is added to space_info->bytes_may_use and
86  *     the ticket is woken up.
87  *
88  *   -> ticket wakeup
89  *     Check if ->bytes == 0, if it does we got our reservation and we can carry
90  *     on, if not return the appropriate error (ENOSPC, but can be EINTR if we
91  *     were interrupted.)
92  *
93  * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
94  *
95  *   Same as the above, except we add ourselves to the
96  *   space_info->priority_tickets, and we do not use ticket->wait, we simply
97  *   call flush_space() ourselves for the states that are safe for us to call
98  *   without deadlocking and hope for the best.
99  *
100  * THE FLUSHING STATES
101  *
102  *   Generally speaking we will have two cases for each state, a "nice" state
103  *   and a "ALL THE THINGS" state.  In btrfs we delay a lot of work in order to
104  *   reduce the locking over head on the various trees, and even to keep from
105  *   doing any work at all in the case of delayed refs.  Each of these delayed
106  *   things however hold reservations, and so letting them run allows us to
107  *   reclaim space so we can make new reservations.
108  *
109  *   FLUSH_DELAYED_ITEMS
110  *     Every inode has a delayed item to update the inode.  Take a simple write
111  *     for example, we would update the inode item at write time to update the
112  *     mtime, and then again at finish_ordered_io() time in order to update the
113  *     isize or bytes.  We keep these delayed items to coalesce these operations
114  *     into a single operation done on demand.  These are an easy way to reclaim
115  *     metadata space.
116  *
117  *   FLUSH_DELALLOC
118  *     Look at the delalloc comment to get an idea of how much space is reserved
119  *     for delayed allocation.  We can reclaim some of this space simply by
120  *     running delalloc, but usually we need to wait for ordered extents to
121  *     reclaim the bulk of this space.
122  *
123  *   FLUSH_DELAYED_REFS
124  *     We have a block reserve for the outstanding delayed refs space, and every
125  *     delayed ref operation holds a reservation.  Running these is a quick way
126  *     to reclaim space, but we want to hold this until the end because COW can
127  *     churn a lot and we can avoid making some extent tree modifications if we
128  *     are able to delay for as long as possible.
129  *
130  *   ALLOC_CHUNK
131  *     We will skip this the first time through space reservation, because of
132  *     overcommit and we don't want to have a lot of useless metadata space when
133  *     our worst case reservations will likely never come true.
134  *
135  *   RUN_DELAYED_IPUTS
136  *     If we're freeing inodes we're likely freeing checksums, file extent
137  *     items, and extent tree items.  Loads of space could be freed up by these
138  *     operations, however they won't be usable until the transaction commits.
139  *
140  *   COMMIT_TRANS
141  *     This will commit the transaction.  Historically we had a lot of logic
142  *     surrounding whether or not we'd commit the transaction, but this waits born
143  *     out of a pre-tickets era where we could end up committing the transaction
144  *     thousands of times in a row without making progress.  Now thanks to our
145  *     ticketing system we know if we're not making progress and can error
146  *     everybody out after a few commits rather than burning the disk hoping for
147  *     a different answer.
148  *
149  * OVERCOMMIT
150  *
151  *   Because we hold so many reservations for metadata we will allow you to
152  *   reserve more space than is currently free in the currently allocate
153  *   metadata space.  This only happens with metadata, data does not allow
154  *   overcommitting.
155  *
156  *   You can see the current logic for when we allow overcommit in
157  *   btrfs_can_overcommit(), but it only applies to unallocated space.  If there
158  *   is no unallocated space to be had, all reservations are kept within the
159  *   free space in the allocated metadata chunks.
160  *
161  *   Because of overcommitting, you generally want to use the
162  *   btrfs_can_overcommit() logic for metadata allocations, as it does the right
163  *   thing with or without extra unallocated space.
164  */
165 
166 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
167 			  bool may_use_included)
168 {
169 	ASSERT(s_info);
170 	return s_info->bytes_used + s_info->bytes_reserved +
171 		s_info->bytes_pinned + s_info->bytes_readonly +
172 		s_info->bytes_zone_unusable +
173 		(may_use_included ? s_info->bytes_may_use : 0);
174 }
175 
176 /*
177  * after adding space to the filesystem, we need to clear the full flags
178  * on all the space infos.
179  */
180 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
181 {
182 	struct list_head *head = &info->space_info;
183 	struct btrfs_space_info *found;
184 
185 	list_for_each_entry(found, head, list)
186 		found->full = 0;
187 }
188 
189 /*
190  * Block groups with more than this value (percents) of unusable space will be
191  * scheduled for background reclaim.
192  */
193 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH			(75)
194 
195 #define BTRFS_UNALLOC_BLOCK_GROUP_TARGET			(10ULL)
196 
197 /*
198  * Calculate chunk size depending on volume type (regular or zoned).
199  */
200 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
201 {
202 	if (btrfs_is_zoned(fs_info))
203 		return fs_info->zone_size;
204 
205 	ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
206 
207 	if (flags & BTRFS_BLOCK_GROUP_DATA)
208 		return BTRFS_MAX_DATA_CHUNK_SIZE;
209 	else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
210 		return SZ_32M;
211 
212 	/* Handle BTRFS_BLOCK_GROUP_METADATA */
213 	if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
214 		return SZ_1G;
215 
216 	return SZ_256M;
217 }
218 
219 /*
220  * Update default chunk size.
221  */
222 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
223 					u64 chunk_size)
224 {
225 	WRITE_ONCE(space_info->chunk_size, chunk_size);
226 }
227 
228 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
229 {
230 
231 	struct btrfs_space_info *space_info;
232 	int i;
233 	int ret;
234 
235 	space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
236 	if (!space_info)
237 		return -ENOMEM;
238 
239 	space_info->fs_info = info;
240 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
241 		INIT_LIST_HEAD(&space_info->block_groups[i]);
242 	init_rwsem(&space_info->groups_sem);
243 	spin_lock_init(&space_info->lock);
244 	space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
245 	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
246 	INIT_LIST_HEAD(&space_info->ro_bgs);
247 	INIT_LIST_HEAD(&space_info->tickets);
248 	INIT_LIST_HEAD(&space_info->priority_tickets);
249 	space_info->clamp = 1;
250 	btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
251 
252 	if (btrfs_is_zoned(info))
253 		space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
254 
255 	ret = btrfs_sysfs_add_space_info_type(info, space_info);
256 	if (ret)
257 		return ret;
258 
259 	list_add(&space_info->list, &info->space_info);
260 	if (flags & BTRFS_BLOCK_GROUP_DATA)
261 		info->data_sinfo = space_info;
262 
263 	return ret;
264 }
265 
266 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
267 {
268 	struct btrfs_super_block *disk_super;
269 	u64 features;
270 	u64 flags;
271 	int mixed = 0;
272 	int ret;
273 
274 	disk_super = fs_info->super_copy;
275 	if (!btrfs_super_root(disk_super))
276 		return -EINVAL;
277 
278 	features = btrfs_super_incompat_flags(disk_super);
279 	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
280 		mixed = 1;
281 
282 	flags = BTRFS_BLOCK_GROUP_SYSTEM;
283 	ret = create_space_info(fs_info, flags);
284 	if (ret)
285 		goto out;
286 
287 	if (mixed) {
288 		flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
289 		ret = create_space_info(fs_info, flags);
290 	} else {
291 		flags = BTRFS_BLOCK_GROUP_METADATA;
292 		ret = create_space_info(fs_info, flags);
293 		if (ret)
294 			goto out;
295 
296 		flags = BTRFS_BLOCK_GROUP_DATA;
297 		ret = create_space_info(fs_info, flags);
298 	}
299 out:
300 	return ret;
301 }
302 
303 void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
304 				struct btrfs_block_group *block_group)
305 {
306 	struct btrfs_space_info *found;
307 	int factor, index;
308 
309 	factor = btrfs_bg_type_to_factor(block_group->flags);
310 
311 	found = btrfs_find_space_info(info, block_group->flags);
312 	ASSERT(found);
313 	spin_lock(&found->lock);
314 	found->total_bytes += block_group->length;
315 	found->disk_total += block_group->length * factor;
316 	found->bytes_used += block_group->used;
317 	found->disk_used += block_group->used * factor;
318 	found->bytes_readonly += block_group->bytes_super;
319 	found->bytes_zone_unusable += block_group->zone_unusable;
320 	if (block_group->length > 0)
321 		found->full = 0;
322 	btrfs_try_granting_tickets(info, found);
323 	spin_unlock(&found->lock);
324 
325 	block_group->space_info = found;
326 
327 	index = btrfs_bg_flags_to_raid_index(block_group->flags);
328 	down_write(&found->groups_sem);
329 	list_add_tail(&block_group->list, &found->block_groups[index]);
330 	up_write(&found->groups_sem);
331 }
332 
333 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
334 					       u64 flags)
335 {
336 	struct list_head *head = &info->space_info;
337 	struct btrfs_space_info *found;
338 
339 	flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
340 
341 	list_for_each_entry(found, head, list) {
342 		if (found->flags & flags)
343 			return found;
344 	}
345 	return NULL;
346 }
347 
348 static u64 calc_effective_data_chunk_size(struct btrfs_fs_info *fs_info)
349 {
350 	struct btrfs_space_info *data_sinfo;
351 	u64 data_chunk_size;
352 
353 	/*
354 	 * Calculate the data_chunk_size, space_info->chunk_size is the
355 	 * "optimal" chunk size based on the fs size.  However when we actually
356 	 * allocate the chunk we will strip this down further, making it no
357 	 * more than 10% of the disk or 1G, whichever is smaller.
358 	 *
359 	 * On the zoned mode, we need to use zone_size (= data_sinfo->chunk_size)
360 	 * as it is.
361 	 */
362 	data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
363 	if (btrfs_is_zoned(fs_info))
364 		return data_sinfo->chunk_size;
365 	data_chunk_size = min(data_sinfo->chunk_size,
366 			      mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
367 	return min_t(u64, data_chunk_size, SZ_1G);
368 }
369 
370 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
371 			  struct btrfs_space_info *space_info,
372 			  enum btrfs_reserve_flush_enum flush)
373 {
374 	u64 profile;
375 	u64 avail;
376 	u64 data_chunk_size;
377 	int factor;
378 
379 	if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
380 		profile = btrfs_system_alloc_profile(fs_info);
381 	else
382 		profile = btrfs_metadata_alloc_profile(fs_info);
383 
384 	avail = atomic64_read(&fs_info->free_chunk_space);
385 
386 	/*
387 	 * If we have dup, raid1 or raid10 then only half of the free
388 	 * space is actually usable.  For raid56, the space info used
389 	 * doesn't include the parity drive, so we don't have to
390 	 * change the math
391 	 */
392 	factor = btrfs_bg_type_to_factor(profile);
393 	avail = div_u64(avail, factor);
394 	if (avail == 0)
395 		return 0;
396 
397 	data_chunk_size = calc_effective_data_chunk_size(fs_info);
398 
399 	/*
400 	 * Since data allocations immediately use block groups as part of the
401 	 * reservation, because we assume that data reservations will == actual
402 	 * usage, we could potentially overcommit and then immediately have that
403 	 * available space used by a data allocation, which could put us in a
404 	 * bind when we get close to filling the file system.
405 	 *
406 	 * To handle this simply remove the data_chunk_size from the available
407 	 * space.  If we are relatively empty this won't affect our ability to
408 	 * overcommit much, and if we're very close to full it'll keep us from
409 	 * getting into a position where we've given ourselves very little
410 	 * metadata wiggle room.
411 	 */
412 	if (avail <= data_chunk_size)
413 		return 0;
414 	avail -= data_chunk_size;
415 
416 	/*
417 	 * If we aren't flushing all things, let us overcommit up to
418 	 * 1/2th of the space. If we can flush, don't let us overcommit
419 	 * too much, let it overcommit up to 1/8 of the space.
420 	 */
421 	if (flush == BTRFS_RESERVE_FLUSH_ALL)
422 		avail >>= 3;
423 	else
424 		avail >>= 1;
425 
426 	/*
427 	 * On the zoned mode, we always allocate one zone as one chunk.
428 	 * Returning non-zone size alingned bytes here will result in
429 	 * less pressure for the async metadata reclaim process, and it
430 	 * will over-commit too much leading to ENOSPC. Align down to the
431 	 * zone size to avoid that.
432 	 */
433 	if (btrfs_is_zoned(fs_info))
434 		avail = ALIGN_DOWN(avail, fs_info->zone_size);
435 
436 	return avail;
437 }
438 
439 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
440 			 struct btrfs_space_info *space_info, u64 bytes,
441 			 enum btrfs_reserve_flush_enum flush)
442 {
443 	u64 avail;
444 	u64 used;
445 
446 	/* Don't overcommit when in mixed mode */
447 	if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
448 		return 0;
449 
450 	used = btrfs_space_info_used(space_info, true);
451 	avail = calc_available_free_space(fs_info, space_info, flush);
452 
453 	if (used + bytes < space_info->total_bytes + avail)
454 		return 1;
455 	return 0;
456 }
457 
458 static void remove_ticket(struct btrfs_space_info *space_info,
459 			  struct reserve_ticket *ticket)
460 {
461 	if (!list_empty(&ticket->list)) {
462 		list_del_init(&ticket->list);
463 		ASSERT(space_info->reclaim_size >= ticket->bytes);
464 		space_info->reclaim_size -= ticket->bytes;
465 	}
466 }
467 
468 /*
469  * This is for space we already have accounted in space_info->bytes_may_use, so
470  * basically when we're returning space from block_rsv's.
471  */
472 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
473 				struct btrfs_space_info *space_info)
474 {
475 	struct list_head *head;
476 	enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
477 
478 	lockdep_assert_held(&space_info->lock);
479 
480 	head = &space_info->priority_tickets;
481 again:
482 	while (!list_empty(head)) {
483 		struct reserve_ticket *ticket;
484 		u64 used = btrfs_space_info_used(space_info, true);
485 
486 		ticket = list_first_entry(head, struct reserve_ticket, list);
487 
488 		/* Check and see if our ticket can be satisfied now. */
489 		if ((used + ticket->bytes <= space_info->total_bytes) ||
490 		    btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
491 					 flush)) {
492 			btrfs_space_info_update_bytes_may_use(fs_info,
493 							      space_info,
494 							      ticket->bytes);
495 			remove_ticket(space_info, ticket);
496 			ticket->bytes = 0;
497 			space_info->tickets_id++;
498 			wake_up(&ticket->wait);
499 		} else {
500 			break;
501 		}
502 	}
503 
504 	if (head == &space_info->priority_tickets) {
505 		head = &space_info->tickets;
506 		flush = BTRFS_RESERVE_FLUSH_ALL;
507 		goto again;
508 	}
509 }
510 
511 #define DUMP_BLOCK_RSV(fs_info, rsv_name)				\
512 do {									\
513 	struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name;		\
514 	spin_lock(&__rsv->lock);					\
515 	btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu",	\
516 		   __rsv->size, __rsv->reserved);			\
517 	spin_unlock(&__rsv->lock);					\
518 } while (0)
519 
520 static const char *space_info_flag_to_str(const struct btrfs_space_info *space_info)
521 {
522 	switch (space_info->flags) {
523 	case BTRFS_BLOCK_GROUP_SYSTEM:
524 		return "SYSTEM";
525 	case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA:
526 		return "DATA+METADATA";
527 	case BTRFS_BLOCK_GROUP_DATA:
528 		return "DATA";
529 	case BTRFS_BLOCK_GROUP_METADATA:
530 		return "METADATA";
531 	default:
532 		return "UNKNOWN";
533 	}
534 }
535 
536 static void dump_global_block_rsv(struct btrfs_fs_info *fs_info)
537 {
538 	DUMP_BLOCK_RSV(fs_info, global_block_rsv);
539 	DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
540 	DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
541 	DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
542 	DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
543 }
544 
545 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
546 				    struct btrfs_space_info *info)
547 {
548 	const char *flag_str = space_info_flag_to_str(info);
549 	lockdep_assert_held(&info->lock);
550 
551 	/* The free space could be negative in case of overcommit */
552 	btrfs_info(fs_info, "space_info %s has %lld free, is %sfull",
553 		   flag_str,
554 		   (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
555 		   info->full ? "" : "not ");
556 	btrfs_info(fs_info,
557 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
558 		info->total_bytes, info->bytes_used, info->bytes_pinned,
559 		info->bytes_reserved, info->bytes_may_use,
560 		info->bytes_readonly, info->bytes_zone_unusable);
561 }
562 
563 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
564 			   struct btrfs_space_info *info, u64 bytes,
565 			   int dump_block_groups)
566 {
567 	struct btrfs_block_group *cache;
568 	u64 total_avail = 0;
569 	int index = 0;
570 
571 	spin_lock(&info->lock);
572 	__btrfs_dump_space_info(fs_info, info);
573 	dump_global_block_rsv(fs_info);
574 	spin_unlock(&info->lock);
575 
576 	if (!dump_block_groups)
577 		return;
578 
579 	down_read(&info->groups_sem);
580 again:
581 	list_for_each_entry(cache, &info->block_groups[index], list) {
582 		u64 avail;
583 
584 		spin_lock(&cache->lock);
585 		avail = cache->length - cache->used - cache->pinned -
586 			cache->reserved - cache->delalloc_bytes -
587 			cache->bytes_super - cache->zone_unusable;
588 		btrfs_info(fs_info,
589 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
590 			   cache->start, cache->length, cache->used, cache->pinned,
591 			   cache->reserved, cache->delalloc_bytes,
592 			   cache->bytes_super, cache->zone_unusable,
593 			   avail, cache->ro ? "[readonly]" : "");
594 		spin_unlock(&cache->lock);
595 		btrfs_dump_free_space(cache, bytes);
596 		total_avail += avail;
597 	}
598 	if (++index < BTRFS_NR_RAID_TYPES)
599 		goto again;
600 	up_read(&info->groups_sem);
601 
602 	btrfs_info(fs_info, "%llu bytes available across all block groups", total_avail);
603 }
604 
605 static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info,
606 					u64 to_reclaim)
607 {
608 	u64 bytes;
609 	u64 nr;
610 
611 	bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
612 	nr = div64_u64(to_reclaim, bytes);
613 	if (!nr)
614 		nr = 1;
615 	return nr;
616 }
617 
618 /*
619  * shrink metadata reservation for delalloc
620  */
621 static void shrink_delalloc(struct btrfs_fs_info *fs_info,
622 			    struct btrfs_space_info *space_info,
623 			    u64 to_reclaim, bool wait_ordered,
624 			    bool for_preempt)
625 {
626 	struct btrfs_trans_handle *trans;
627 	u64 delalloc_bytes;
628 	u64 ordered_bytes;
629 	u64 items;
630 	long time_left;
631 	int loops;
632 
633 	delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
634 	ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
635 	if (delalloc_bytes == 0 && ordered_bytes == 0)
636 		return;
637 
638 	/* Calc the number of the pages we need flush for space reservation */
639 	if (to_reclaim == U64_MAX) {
640 		items = U64_MAX;
641 	} else {
642 		/*
643 		 * to_reclaim is set to however much metadata we need to
644 		 * reclaim, but reclaiming that much data doesn't really track
645 		 * exactly.  What we really want to do is reclaim full inode's
646 		 * worth of reservations, however that's not available to us
647 		 * here.  We will take a fraction of the delalloc bytes for our
648 		 * flushing loops and hope for the best.  Delalloc will expand
649 		 * the amount we write to cover an entire dirty extent, which
650 		 * will reclaim the metadata reservation for that range.  If
651 		 * it's not enough subsequent flush stages will be more
652 		 * aggressive.
653 		 */
654 		to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
655 		items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
656 	}
657 
658 	trans = current->journal_info;
659 
660 	/*
661 	 * If we are doing more ordered than delalloc we need to just wait on
662 	 * ordered extents, otherwise we'll waste time trying to flush delalloc
663 	 * that likely won't give us the space back we need.
664 	 */
665 	if (ordered_bytes > delalloc_bytes && !for_preempt)
666 		wait_ordered = true;
667 
668 	loops = 0;
669 	while ((delalloc_bytes || ordered_bytes) && loops < 3) {
670 		u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
671 		long nr_pages = min_t(u64, temp, LONG_MAX);
672 		int async_pages;
673 
674 		btrfs_start_delalloc_roots(fs_info, nr_pages, true);
675 
676 		/*
677 		 * We need to make sure any outstanding async pages are now
678 		 * processed before we continue.  This is because things like
679 		 * sync_inode() try to be smart and skip writing if the inode is
680 		 * marked clean.  We don't use filemap_fwrite for flushing
681 		 * because we want to control how many pages we write out at a
682 		 * time, thus this is the only safe way to make sure we've
683 		 * waited for outstanding compressed workers to have started
684 		 * their jobs and thus have ordered extents set up properly.
685 		 *
686 		 * This exists because we do not want to wait for each
687 		 * individual inode to finish its async work, we simply want to
688 		 * start the IO on everybody, and then come back here and wait
689 		 * for all of the async work to catch up.  Once we're done with
690 		 * that we know we'll have ordered extents for everything and we
691 		 * can decide if we wait for that or not.
692 		 *
693 		 * If we choose to replace this in the future, make absolutely
694 		 * sure that the proper waiting is being done in the async case,
695 		 * as there have been bugs in that area before.
696 		 */
697 		async_pages = atomic_read(&fs_info->async_delalloc_pages);
698 		if (!async_pages)
699 			goto skip_async;
700 
701 		/*
702 		 * We don't want to wait forever, if we wrote less pages in this
703 		 * loop than we have outstanding, only wait for that number of
704 		 * pages, otherwise we can wait for all async pages to finish
705 		 * before continuing.
706 		 */
707 		if (async_pages > nr_pages)
708 			async_pages -= nr_pages;
709 		else
710 			async_pages = 0;
711 		wait_event(fs_info->async_submit_wait,
712 			   atomic_read(&fs_info->async_delalloc_pages) <=
713 			   async_pages);
714 skip_async:
715 		loops++;
716 		if (wait_ordered && !trans) {
717 			btrfs_wait_ordered_roots(fs_info, items, NULL);
718 		} else {
719 			time_left = schedule_timeout_killable(1);
720 			if (time_left)
721 				break;
722 		}
723 
724 		/*
725 		 * If we are for preemption we just want a one-shot of delalloc
726 		 * flushing so we can stop flushing if we decide we don't need
727 		 * to anymore.
728 		 */
729 		if (for_preempt)
730 			break;
731 
732 		spin_lock(&space_info->lock);
733 		if (list_empty(&space_info->tickets) &&
734 		    list_empty(&space_info->priority_tickets)) {
735 			spin_unlock(&space_info->lock);
736 			break;
737 		}
738 		spin_unlock(&space_info->lock);
739 
740 		delalloc_bytes = percpu_counter_sum_positive(
741 						&fs_info->delalloc_bytes);
742 		ordered_bytes = percpu_counter_sum_positive(
743 						&fs_info->ordered_bytes);
744 	}
745 }
746 
747 /*
748  * Try to flush some data based on policy set by @state. This is only advisory
749  * and may fail for various reasons. The caller is supposed to examine the
750  * state of @space_info to detect the outcome.
751  */
752 static void flush_space(struct btrfs_fs_info *fs_info,
753 		       struct btrfs_space_info *space_info, u64 num_bytes,
754 		       enum btrfs_flush_state state, bool for_preempt)
755 {
756 	struct btrfs_root *root = fs_info->tree_root;
757 	struct btrfs_trans_handle *trans;
758 	int nr;
759 	int ret = 0;
760 
761 	switch (state) {
762 	case FLUSH_DELAYED_ITEMS_NR:
763 	case FLUSH_DELAYED_ITEMS:
764 		if (state == FLUSH_DELAYED_ITEMS_NR)
765 			nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
766 		else
767 			nr = -1;
768 
769 		trans = btrfs_join_transaction_nostart(root);
770 		if (IS_ERR(trans)) {
771 			ret = PTR_ERR(trans);
772 			if (ret == -ENOENT)
773 				ret = 0;
774 			break;
775 		}
776 		ret = btrfs_run_delayed_items_nr(trans, nr);
777 		btrfs_end_transaction(trans);
778 		break;
779 	case FLUSH_DELALLOC:
780 	case FLUSH_DELALLOC_WAIT:
781 	case FLUSH_DELALLOC_FULL:
782 		if (state == FLUSH_DELALLOC_FULL)
783 			num_bytes = U64_MAX;
784 		shrink_delalloc(fs_info, space_info, num_bytes,
785 				state != FLUSH_DELALLOC, for_preempt);
786 		break;
787 	case FLUSH_DELAYED_REFS_NR:
788 	case FLUSH_DELAYED_REFS:
789 		trans = btrfs_join_transaction_nostart(root);
790 		if (IS_ERR(trans)) {
791 			ret = PTR_ERR(trans);
792 			if (ret == -ENOENT)
793 				ret = 0;
794 			break;
795 		}
796 		if (state == FLUSH_DELAYED_REFS_NR)
797 			btrfs_run_delayed_refs(trans, num_bytes);
798 		else
799 			btrfs_run_delayed_refs(trans, 0);
800 		btrfs_end_transaction(trans);
801 		break;
802 	case ALLOC_CHUNK:
803 	case ALLOC_CHUNK_FORCE:
804 		trans = btrfs_join_transaction(root);
805 		if (IS_ERR(trans)) {
806 			ret = PTR_ERR(trans);
807 			break;
808 		}
809 		ret = btrfs_chunk_alloc(trans,
810 				btrfs_get_alloc_profile(fs_info, space_info->flags),
811 				(state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
812 					CHUNK_ALLOC_FORCE);
813 		btrfs_end_transaction(trans);
814 
815 		if (ret > 0 || ret == -ENOSPC)
816 			ret = 0;
817 		break;
818 	case RUN_DELAYED_IPUTS:
819 		/*
820 		 * If we have pending delayed iputs then we could free up a
821 		 * bunch of pinned space, so make sure we run the iputs before
822 		 * we do our pinned bytes check below.
823 		 */
824 		btrfs_run_delayed_iputs(fs_info);
825 		btrfs_wait_on_delayed_iputs(fs_info);
826 		break;
827 	case COMMIT_TRANS:
828 		ASSERT(current->journal_info == NULL);
829 		/*
830 		 * We don't want to start a new transaction, just attach to the
831 		 * current one or wait it fully commits in case its commit is
832 		 * happening at the moment. Note: we don't use a nostart join
833 		 * because that does not wait for a transaction to fully commit
834 		 * (only for it to be unblocked, state TRANS_STATE_UNBLOCKED).
835 		 */
836 		ret = btrfs_commit_current_transaction(root);
837 		break;
838 	default:
839 		ret = -ENOSPC;
840 		break;
841 	}
842 
843 	trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
844 				ret, for_preempt);
845 	return;
846 }
847 
848 static inline u64
849 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
850 				 struct btrfs_space_info *space_info)
851 {
852 	u64 used;
853 	u64 avail;
854 	u64 to_reclaim = space_info->reclaim_size;
855 
856 	lockdep_assert_held(&space_info->lock);
857 
858 	avail = calc_available_free_space(fs_info, space_info,
859 					  BTRFS_RESERVE_FLUSH_ALL);
860 	used = btrfs_space_info_used(space_info, true);
861 
862 	/*
863 	 * We may be flushing because suddenly we have less space than we had
864 	 * before, and now we're well over-committed based on our current free
865 	 * space.  If that's the case add in our overage so we make sure to put
866 	 * appropriate pressure on the flushing state machine.
867 	 */
868 	if (space_info->total_bytes + avail < used)
869 		to_reclaim += used - (space_info->total_bytes + avail);
870 
871 	return to_reclaim;
872 }
873 
874 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
875 				    struct btrfs_space_info *space_info)
876 {
877 	const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
878 	u64 ordered, delalloc;
879 	u64 thresh;
880 	u64 used;
881 
882 	thresh = mult_perc(space_info->total_bytes, 90);
883 
884 	lockdep_assert_held(&space_info->lock);
885 
886 	/* If we're just plain full then async reclaim just slows us down. */
887 	if ((space_info->bytes_used + space_info->bytes_reserved +
888 	     global_rsv_size) >= thresh)
889 		return false;
890 
891 	used = space_info->bytes_may_use + space_info->bytes_pinned;
892 
893 	/* The total flushable belongs to the global rsv, don't flush. */
894 	if (global_rsv_size >= used)
895 		return false;
896 
897 	/*
898 	 * 128MiB is 1/4 of the maximum global rsv size.  If we have less than
899 	 * that devoted to other reservations then there's no sense in flushing,
900 	 * we don't have a lot of things that need flushing.
901 	 */
902 	if (used - global_rsv_size <= SZ_128M)
903 		return false;
904 
905 	/*
906 	 * We have tickets queued, bail so we don't compete with the async
907 	 * flushers.
908 	 */
909 	if (space_info->reclaim_size)
910 		return false;
911 
912 	/*
913 	 * If we have over half of the free space occupied by reservations or
914 	 * pinned then we want to start flushing.
915 	 *
916 	 * We do not do the traditional thing here, which is to say
917 	 *
918 	 *   if (used >= ((total_bytes + avail) / 2))
919 	 *     return 1;
920 	 *
921 	 * because this doesn't quite work how we want.  If we had more than 50%
922 	 * of the space_info used by bytes_used and we had 0 available we'd just
923 	 * constantly run the background flusher.  Instead we want it to kick in
924 	 * if our reclaimable space exceeds our clamped free space.
925 	 *
926 	 * Our clamping range is 2^1 -> 2^8.  Practically speaking that means
927 	 * the following:
928 	 *
929 	 * Amount of RAM        Minimum threshold       Maximum threshold
930 	 *
931 	 *        256GiB                     1GiB                  128GiB
932 	 *        128GiB                   512MiB                   64GiB
933 	 *         64GiB                   256MiB                   32GiB
934 	 *         32GiB                   128MiB                   16GiB
935 	 *         16GiB                    64MiB                    8GiB
936 	 *
937 	 * These are the range our thresholds will fall in, corresponding to how
938 	 * much delalloc we need for the background flusher to kick in.
939 	 */
940 
941 	thresh = calc_available_free_space(fs_info, space_info,
942 					   BTRFS_RESERVE_FLUSH_ALL);
943 	used = space_info->bytes_used + space_info->bytes_reserved +
944 	       space_info->bytes_readonly + global_rsv_size;
945 	if (used < space_info->total_bytes)
946 		thresh += space_info->total_bytes - used;
947 	thresh >>= space_info->clamp;
948 
949 	used = space_info->bytes_pinned;
950 
951 	/*
952 	 * If we have more ordered bytes than delalloc bytes then we're either
953 	 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
954 	 * around.  Preemptive flushing is only useful in that it can free up
955 	 * space before tickets need to wait for things to finish.  In the case
956 	 * of ordered extents, preemptively waiting on ordered extents gets us
957 	 * nothing, if our reservations are tied up in ordered extents we'll
958 	 * simply have to slow down writers by forcing them to wait on ordered
959 	 * extents.
960 	 *
961 	 * In the case that ordered is larger than delalloc, only include the
962 	 * block reserves that we would actually be able to directly reclaim
963 	 * from.  In this case if we're heavy on metadata operations this will
964 	 * clearly be heavy enough to warrant preemptive flushing.  In the case
965 	 * of heavy DIO or ordered reservations, preemptive flushing will just
966 	 * waste time and cause us to slow down.
967 	 *
968 	 * We want to make sure we truly are maxed out on ordered however, so
969 	 * cut ordered in half, and if it's still higher than delalloc then we
970 	 * can keep flushing.  This is to avoid the case where we start
971 	 * flushing, and now delalloc == ordered and we stop preemptively
972 	 * flushing when we could still have several gigs of delalloc to flush.
973 	 */
974 	ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
975 	delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
976 	if (ordered >= delalloc)
977 		used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) +
978 			btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv);
979 	else
980 		used += space_info->bytes_may_use - global_rsv_size;
981 
982 	return (used >= thresh && !btrfs_fs_closing(fs_info) &&
983 		!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
984 }
985 
986 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
987 				  struct btrfs_space_info *space_info,
988 				  struct reserve_ticket *ticket)
989 {
990 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
991 	u64 min_bytes;
992 
993 	if (!ticket->steal)
994 		return false;
995 
996 	if (global_rsv->space_info != space_info)
997 		return false;
998 
999 	spin_lock(&global_rsv->lock);
1000 	min_bytes = mult_perc(global_rsv->size, 10);
1001 	if (global_rsv->reserved < min_bytes + ticket->bytes) {
1002 		spin_unlock(&global_rsv->lock);
1003 		return false;
1004 	}
1005 	global_rsv->reserved -= ticket->bytes;
1006 	remove_ticket(space_info, ticket);
1007 	ticket->bytes = 0;
1008 	wake_up(&ticket->wait);
1009 	space_info->tickets_id++;
1010 	if (global_rsv->reserved < global_rsv->size)
1011 		global_rsv->full = 0;
1012 	spin_unlock(&global_rsv->lock);
1013 
1014 	return true;
1015 }
1016 
1017 /*
1018  * We've exhausted our flushing, start failing tickets.
1019  *
1020  * @fs_info - fs_info for this fs
1021  * @space_info - the space info we were flushing
1022  *
1023  * We call this when we've exhausted our flushing ability and haven't made
1024  * progress in satisfying tickets.  The reservation code handles tickets in
1025  * order, so if there is a large ticket first and then smaller ones we could
1026  * very well satisfy the smaller tickets.  This will attempt to wake up any
1027  * tickets in the list to catch this case.
1028  *
1029  * This function returns true if it was able to make progress by clearing out
1030  * other tickets, or if it stumbles across a ticket that was smaller than the
1031  * first ticket.
1032  */
1033 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
1034 				   struct btrfs_space_info *space_info)
1035 {
1036 	struct reserve_ticket *ticket;
1037 	u64 tickets_id = space_info->tickets_id;
1038 	const bool aborted = BTRFS_FS_ERROR(fs_info);
1039 
1040 	trace_btrfs_fail_all_tickets(fs_info, space_info);
1041 
1042 	if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1043 		btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
1044 		__btrfs_dump_space_info(fs_info, space_info);
1045 	}
1046 
1047 	while (!list_empty(&space_info->tickets) &&
1048 	       tickets_id == space_info->tickets_id) {
1049 		ticket = list_first_entry(&space_info->tickets,
1050 					  struct reserve_ticket, list);
1051 
1052 		if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
1053 			return true;
1054 
1055 		if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1056 			btrfs_info(fs_info, "failing ticket with %llu bytes",
1057 				   ticket->bytes);
1058 
1059 		remove_ticket(space_info, ticket);
1060 		if (aborted)
1061 			ticket->error = -EIO;
1062 		else
1063 			ticket->error = -ENOSPC;
1064 		wake_up(&ticket->wait);
1065 
1066 		/*
1067 		 * We're just throwing tickets away, so more flushing may not
1068 		 * trip over btrfs_try_granting_tickets, so we need to call it
1069 		 * here to see if we can make progress with the next ticket in
1070 		 * the list.
1071 		 */
1072 		if (!aborted)
1073 			btrfs_try_granting_tickets(fs_info, space_info);
1074 	}
1075 	return (tickets_id != space_info->tickets_id);
1076 }
1077 
1078 /*
1079  * This is for normal flushers, we can wait all goddamned day if we want to.  We
1080  * will loop and continuously try to flush as long as we are making progress.
1081  * We count progress as clearing off tickets each time we have to loop.
1082  */
1083 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
1084 {
1085 	struct btrfs_fs_info *fs_info;
1086 	struct btrfs_space_info *space_info;
1087 	u64 to_reclaim;
1088 	enum btrfs_flush_state flush_state;
1089 	int commit_cycles = 0;
1090 	u64 last_tickets_id;
1091 
1092 	fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1093 	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1094 
1095 	spin_lock(&space_info->lock);
1096 	to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1097 	if (!to_reclaim) {
1098 		space_info->flush = 0;
1099 		spin_unlock(&space_info->lock);
1100 		return;
1101 	}
1102 	last_tickets_id = space_info->tickets_id;
1103 	spin_unlock(&space_info->lock);
1104 
1105 	flush_state = FLUSH_DELAYED_ITEMS_NR;
1106 	do {
1107 		flush_space(fs_info, space_info, to_reclaim, flush_state, false);
1108 		spin_lock(&space_info->lock);
1109 		if (list_empty(&space_info->tickets)) {
1110 			space_info->flush = 0;
1111 			spin_unlock(&space_info->lock);
1112 			return;
1113 		}
1114 		to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1115 							      space_info);
1116 		if (last_tickets_id == space_info->tickets_id) {
1117 			flush_state++;
1118 		} else {
1119 			last_tickets_id = space_info->tickets_id;
1120 			flush_state = FLUSH_DELAYED_ITEMS_NR;
1121 			if (commit_cycles)
1122 				commit_cycles--;
1123 		}
1124 
1125 		/*
1126 		 * We do not want to empty the system of delalloc unless we're
1127 		 * under heavy pressure, so allow one trip through the flushing
1128 		 * logic before we start doing a FLUSH_DELALLOC_FULL.
1129 		 */
1130 		if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
1131 			flush_state++;
1132 
1133 		/*
1134 		 * We don't want to force a chunk allocation until we've tried
1135 		 * pretty hard to reclaim space.  Think of the case where we
1136 		 * freed up a bunch of space and so have a lot of pinned space
1137 		 * to reclaim.  We would rather use that than possibly create a
1138 		 * underutilized metadata chunk.  So if this is our first run
1139 		 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1140 		 * commit the transaction.  If nothing has changed the next go
1141 		 * around then we can force a chunk allocation.
1142 		 */
1143 		if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1144 			flush_state++;
1145 
1146 		if (flush_state > COMMIT_TRANS) {
1147 			commit_cycles++;
1148 			if (commit_cycles > 2) {
1149 				if (maybe_fail_all_tickets(fs_info, space_info)) {
1150 					flush_state = FLUSH_DELAYED_ITEMS_NR;
1151 					commit_cycles--;
1152 				} else {
1153 					space_info->flush = 0;
1154 				}
1155 			} else {
1156 				flush_state = FLUSH_DELAYED_ITEMS_NR;
1157 			}
1158 		}
1159 		spin_unlock(&space_info->lock);
1160 	} while (flush_state <= COMMIT_TRANS);
1161 }
1162 
1163 /*
1164  * This handles pre-flushing of metadata space before we get to the point that
1165  * we need to start blocking threads on tickets.  The logic here is different
1166  * from the other flush paths because it doesn't rely on tickets to tell us how
1167  * much we need to flush, instead it attempts to keep us below the 80% full
1168  * watermark of space by flushing whichever reservation pool is currently the
1169  * largest.
1170  */
1171 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1172 {
1173 	struct btrfs_fs_info *fs_info;
1174 	struct btrfs_space_info *space_info;
1175 	struct btrfs_block_rsv *delayed_block_rsv;
1176 	struct btrfs_block_rsv *delayed_refs_rsv;
1177 	struct btrfs_block_rsv *global_rsv;
1178 	struct btrfs_block_rsv *trans_rsv;
1179 	int loops = 0;
1180 
1181 	fs_info = container_of(work, struct btrfs_fs_info,
1182 			       preempt_reclaim_work);
1183 	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1184 	delayed_block_rsv = &fs_info->delayed_block_rsv;
1185 	delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1186 	global_rsv = &fs_info->global_block_rsv;
1187 	trans_rsv = &fs_info->trans_block_rsv;
1188 
1189 	spin_lock(&space_info->lock);
1190 	while (need_preemptive_reclaim(fs_info, space_info)) {
1191 		enum btrfs_flush_state flush;
1192 		u64 delalloc_size = 0;
1193 		u64 to_reclaim, block_rsv_size;
1194 		const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
1195 
1196 		loops++;
1197 
1198 		/*
1199 		 * We don't have a precise counter for the metadata being
1200 		 * reserved for delalloc, so we'll approximate it by subtracting
1201 		 * out the block rsv's space from the bytes_may_use.  If that
1202 		 * amount is higher than the individual reserves, then we can
1203 		 * assume it's tied up in delalloc reservations.
1204 		 */
1205 		block_rsv_size = global_rsv_size +
1206 			btrfs_block_rsv_reserved(delayed_block_rsv) +
1207 			btrfs_block_rsv_reserved(delayed_refs_rsv) +
1208 			btrfs_block_rsv_reserved(trans_rsv);
1209 		if (block_rsv_size < space_info->bytes_may_use)
1210 			delalloc_size = space_info->bytes_may_use - block_rsv_size;
1211 
1212 		/*
1213 		 * We don't want to include the global_rsv in our calculation,
1214 		 * because that's space we can't touch.  Subtract it from the
1215 		 * block_rsv_size for the next checks.
1216 		 */
1217 		block_rsv_size -= global_rsv_size;
1218 
1219 		/*
1220 		 * We really want to avoid flushing delalloc too much, as it
1221 		 * could result in poor allocation patterns, so only flush it if
1222 		 * it's larger than the rest of the pools combined.
1223 		 */
1224 		if (delalloc_size > block_rsv_size) {
1225 			to_reclaim = delalloc_size;
1226 			flush = FLUSH_DELALLOC;
1227 		} else if (space_info->bytes_pinned >
1228 			   (btrfs_block_rsv_reserved(delayed_block_rsv) +
1229 			    btrfs_block_rsv_reserved(delayed_refs_rsv))) {
1230 			to_reclaim = space_info->bytes_pinned;
1231 			flush = COMMIT_TRANS;
1232 		} else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
1233 			   btrfs_block_rsv_reserved(delayed_refs_rsv)) {
1234 			to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv);
1235 			flush = FLUSH_DELAYED_ITEMS_NR;
1236 		} else {
1237 			to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv);
1238 			flush = FLUSH_DELAYED_REFS_NR;
1239 		}
1240 
1241 		spin_unlock(&space_info->lock);
1242 
1243 		/*
1244 		 * We don't want to reclaim everything, just a portion, so scale
1245 		 * down the to_reclaim by 1/4.  If it takes us down to 0,
1246 		 * reclaim 1 items worth.
1247 		 */
1248 		to_reclaim >>= 2;
1249 		if (!to_reclaim)
1250 			to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1251 		flush_space(fs_info, space_info, to_reclaim, flush, true);
1252 		cond_resched();
1253 		spin_lock(&space_info->lock);
1254 	}
1255 
1256 	/* We only went through once, back off our clamping. */
1257 	if (loops == 1 && !space_info->reclaim_size)
1258 		space_info->clamp = max(1, space_info->clamp - 1);
1259 	trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1260 	spin_unlock(&space_info->lock);
1261 }
1262 
1263 /*
1264  * FLUSH_DELALLOC_WAIT:
1265  *   Space is freed from flushing delalloc in one of two ways.
1266  *
1267  *   1) compression is on and we allocate less space than we reserved
1268  *   2) we are overwriting existing space
1269  *
1270  *   For #1 that extra space is reclaimed as soon as the delalloc pages are
1271  *   COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1272  *   length to ->bytes_reserved, and subtracts the reserved space from
1273  *   ->bytes_may_use.
1274  *
1275  *   For #2 this is trickier.  Once the ordered extent runs we will drop the
1276  *   extent in the range we are overwriting, which creates a delayed ref for
1277  *   that freed extent.  This however is not reclaimed until the transaction
1278  *   commits, thus the next stages.
1279  *
1280  * RUN_DELAYED_IPUTS
1281  *   If we are freeing inodes, we want to make sure all delayed iputs have
1282  *   completed, because they could have been on an inode with i_nlink == 0, and
1283  *   thus have been truncated and freed up space.  But again this space is not
1284  *   immediately re-usable, it comes in the form of a delayed ref, which must be
1285  *   run and then the transaction must be committed.
1286  *
1287  * COMMIT_TRANS
1288  *   This is where we reclaim all of the pinned space generated by running the
1289  *   iputs
1290  *
1291  * ALLOC_CHUNK_FORCE
1292  *   For data we start with alloc chunk force, however we could have been full
1293  *   before, and then the transaction commit could have freed new block groups,
1294  *   so if we now have space to allocate do the force chunk allocation.
1295  */
1296 static const enum btrfs_flush_state data_flush_states[] = {
1297 	FLUSH_DELALLOC_FULL,
1298 	RUN_DELAYED_IPUTS,
1299 	COMMIT_TRANS,
1300 	ALLOC_CHUNK_FORCE,
1301 };
1302 
1303 static void btrfs_async_reclaim_data_space(struct work_struct *work)
1304 {
1305 	struct btrfs_fs_info *fs_info;
1306 	struct btrfs_space_info *space_info;
1307 	u64 last_tickets_id;
1308 	enum btrfs_flush_state flush_state = 0;
1309 
1310 	fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1311 	space_info = fs_info->data_sinfo;
1312 
1313 	spin_lock(&space_info->lock);
1314 	if (list_empty(&space_info->tickets)) {
1315 		space_info->flush = 0;
1316 		spin_unlock(&space_info->lock);
1317 		return;
1318 	}
1319 	last_tickets_id = space_info->tickets_id;
1320 	spin_unlock(&space_info->lock);
1321 
1322 	while (!space_info->full) {
1323 		flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1324 		spin_lock(&space_info->lock);
1325 		if (list_empty(&space_info->tickets)) {
1326 			space_info->flush = 0;
1327 			spin_unlock(&space_info->lock);
1328 			return;
1329 		}
1330 
1331 		/* Something happened, fail everything and bail. */
1332 		if (BTRFS_FS_ERROR(fs_info))
1333 			goto aborted_fs;
1334 		last_tickets_id = space_info->tickets_id;
1335 		spin_unlock(&space_info->lock);
1336 	}
1337 
1338 	while (flush_state < ARRAY_SIZE(data_flush_states)) {
1339 		flush_space(fs_info, space_info, U64_MAX,
1340 			    data_flush_states[flush_state], false);
1341 		spin_lock(&space_info->lock);
1342 		if (list_empty(&space_info->tickets)) {
1343 			space_info->flush = 0;
1344 			spin_unlock(&space_info->lock);
1345 			return;
1346 		}
1347 
1348 		if (last_tickets_id == space_info->tickets_id) {
1349 			flush_state++;
1350 		} else {
1351 			last_tickets_id = space_info->tickets_id;
1352 			flush_state = 0;
1353 		}
1354 
1355 		if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1356 			if (space_info->full) {
1357 				if (maybe_fail_all_tickets(fs_info, space_info))
1358 					flush_state = 0;
1359 				else
1360 					space_info->flush = 0;
1361 			} else {
1362 				flush_state = 0;
1363 			}
1364 
1365 			/* Something happened, fail everything and bail. */
1366 			if (BTRFS_FS_ERROR(fs_info))
1367 				goto aborted_fs;
1368 
1369 		}
1370 		spin_unlock(&space_info->lock);
1371 	}
1372 	return;
1373 
1374 aborted_fs:
1375 	maybe_fail_all_tickets(fs_info, space_info);
1376 	space_info->flush = 0;
1377 	spin_unlock(&space_info->lock);
1378 }
1379 
1380 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1381 {
1382 	INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1383 	INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1384 	INIT_WORK(&fs_info->preempt_reclaim_work,
1385 		  btrfs_preempt_reclaim_metadata_space);
1386 }
1387 
1388 static const enum btrfs_flush_state priority_flush_states[] = {
1389 	FLUSH_DELAYED_ITEMS_NR,
1390 	FLUSH_DELAYED_ITEMS,
1391 	ALLOC_CHUNK,
1392 };
1393 
1394 static const enum btrfs_flush_state evict_flush_states[] = {
1395 	FLUSH_DELAYED_ITEMS_NR,
1396 	FLUSH_DELAYED_ITEMS,
1397 	FLUSH_DELAYED_REFS_NR,
1398 	FLUSH_DELAYED_REFS,
1399 	FLUSH_DELALLOC,
1400 	FLUSH_DELALLOC_WAIT,
1401 	FLUSH_DELALLOC_FULL,
1402 	ALLOC_CHUNK,
1403 	COMMIT_TRANS,
1404 };
1405 
1406 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1407 				struct btrfs_space_info *space_info,
1408 				struct reserve_ticket *ticket,
1409 				const enum btrfs_flush_state *states,
1410 				int states_nr)
1411 {
1412 	u64 to_reclaim;
1413 	int flush_state = 0;
1414 
1415 	spin_lock(&space_info->lock);
1416 	to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1417 	/*
1418 	 * This is the priority reclaim path, so to_reclaim could be >0 still
1419 	 * because we may have only satisfied the priority tickets and still
1420 	 * left non priority tickets on the list.  We would then have
1421 	 * to_reclaim but ->bytes == 0.
1422 	 */
1423 	if (ticket->bytes == 0) {
1424 		spin_unlock(&space_info->lock);
1425 		return;
1426 	}
1427 
1428 	while (flush_state < states_nr) {
1429 		spin_unlock(&space_info->lock);
1430 		flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1431 			    false);
1432 		flush_state++;
1433 		spin_lock(&space_info->lock);
1434 		if (ticket->bytes == 0) {
1435 			spin_unlock(&space_info->lock);
1436 			return;
1437 		}
1438 	}
1439 
1440 	/*
1441 	 * Attempt to steal from the global rsv if we can, except if the fs was
1442 	 * turned into error mode due to a transaction abort when flushing space
1443 	 * above, in that case fail with the abort error instead of returning
1444 	 * success to the caller if we can steal from the global rsv - this is
1445 	 * just to have caller fail immeditelly instead of later when trying to
1446 	 * modify the fs, making it easier to debug -ENOSPC problems.
1447 	 */
1448 	if (BTRFS_FS_ERROR(fs_info)) {
1449 		ticket->error = BTRFS_FS_ERROR(fs_info);
1450 		remove_ticket(space_info, ticket);
1451 	} else if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
1452 		ticket->error = -ENOSPC;
1453 		remove_ticket(space_info, ticket);
1454 	}
1455 
1456 	/*
1457 	 * We must run try_granting_tickets here because we could be a large
1458 	 * ticket in front of a smaller ticket that can now be satisfied with
1459 	 * the available space.
1460 	 */
1461 	btrfs_try_granting_tickets(fs_info, space_info);
1462 	spin_unlock(&space_info->lock);
1463 }
1464 
1465 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
1466 					struct btrfs_space_info *space_info,
1467 					struct reserve_ticket *ticket)
1468 {
1469 	spin_lock(&space_info->lock);
1470 
1471 	/* We could have been granted before we got here. */
1472 	if (ticket->bytes == 0) {
1473 		spin_unlock(&space_info->lock);
1474 		return;
1475 	}
1476 
1477 	while (!space_info->full) {
1478 		spin_unlock(&space_info->lock);
1479 		flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1480 		spin_lock(&space_info->lock);
1481 		if (ticket->bytes == 0) {
1482 			spin_unlock(&space_info->lock);
1483 			return;
1484 		}
1485 	}
1486 
1487 	ticket->error = -ENOSPC;
1488 	remove_ticket(space_info, ticket);
1489 	btrfs_try_granting_tickets(fs_info, space_info);
1490 	spin_unlock(&space_info->lock);
1491 }
1492 
1493 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1494 				struct btrfs_space_info *space_info,
1495 				struct reserve_ticket *ticket)
1496 
1497 {
1498 	DEFINE_WAIT(wait);
1499 	int ret = 0;
1500 
1501 	spin_lock(&space_info->lock);
1502 	while (ticket->bytes > 0 && ticket->error == 0) {
1503 		ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1504 		if (ret) {
1505 			/*
1506 			 * Delete us from the list. After we unlock the space
1507 			 * info, we don't want the async reclaim job to reserve
1508 			 * space for this ticket. If that would happen, then the
1509 			 * ticket's task would not known that space was reserved
1510 			 * despite getting an error, resulting in a space leak
1511 			 * (bytes_may_use counter of our space_info).
1512 			 */
1513 			remove_ticket(space_info, ticket);
1514 			ticket->error = -EINTR;
1515 			break;
1516 		}
1517 		spin_unlock(&space_info->lock);
1518 
1519 		schedule();
1520 
1521 		finish_wait(&ticket->wait, &wait);
1522 		spin_lock(&space_info->lock);
1523 	}
1524 	spin_unlock(&space_info->lock);
1525 }
1526 
1527 /*
1528  * Do the appropriate flushing and waiting for a ticket.
1529  *
1530  * @fs_info:    the filesystem
1531  * @space_info: space info for the reservation
1532  * @ticket:     ticket for the reservation
1533  * @start_ns:   timestamp when the reservation started
1534  * @orig_bytes: amount of bytes originally reserved
1535  * @flush:      how much we can flush
1536  *
1537  * This does the work of figuring out how to flush for the ticket, waiting for
1538  * the reservation, and returning the appropriate error if there is one.
1539  */
1540 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1541 				 struct btrfs_space_info *space_info,
1542 				 struct reserve_ticket *ticket,
1543 				 u64 start_ns, u64 orig_bytes,
1544 				 enum btrfs_reserve_flush_enum flush)
1545 {
1546 	int ret;
1547 
1548 	switch (flush) {
1549 	case BTRFS_RESERVE_FLUSH_DATA:
1550 	case BTRFS_RESERVE_FLUSH_ALL:
1551 	case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1552 		wait_reserve_ticket(fs_info, space_info, ticket);
1553 		break;
1554 	case BTRFS_RESERVE_FLUSH_LIMIT:
1555 		priority_reclaim_metadata_space(fs_info, space_info, ticket,
1556 						priority_flush_states,
1557 						ARRAY_SIZE(priority_flush_states));
1558 		break;
1559 	case BTRFS_RESERVE_FLUSH_EVICT:
1560 		priority_reclaim_metadata_space(fs_info, space_info, ticket,
1561 						evict_flush_states,
1562 						ARRAY_SIZE(evict_flush_states));
1563 		break;
1564 	case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1565 		priority_reclaim_data_space(fs_info, space_info, ticket);
1566 		break;
1567 	default:
1568 		ASSERT(0);
1569 		break;
1570 	}
1571 
1572 	ret = ticket->error;
1573 	ASSERT(list_empty(&ticket->list));
1574 	/*
1575 	 * Check that we can't have an error set if the reservation succeeded,
1576 	 * as that would confuse tasks and lead them to error out without
1577 	 * releasing reserved space (if an error happens the expectation is that
1578 	 * space wasn't reserved at all).
1579 	 */
1580 	ASSERT(!(ticket->bytes == 0 && ticket->error));
1581 	trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1582 				   start_ns, flush, ticket->error);
1583 	return ret;
1584 }
1585 
1586 /*
1587  * This returns true if this flush state will go through the ordinary flushing
1588  * code.
1589  */
1590 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1591 {
1592 	return	(flush == BTRFS_RESERVE_FLUSH_ALL) ||
1593 		(flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1594 }
1595 
1596 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
1597 				       struct btrfs_space_info *space_info)
1598 {
1599 	u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1600 	u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1601 
1602 	/*
1603 	 * If we're heavy on ordered operations then clamping won't help us.  We
1604 	 * need to clamp specifically to keep up with dirty'ing buffered
1605 	 * writers, because there's not a 1:1 correlation of writing delalloc
1606 	 * and freeing space, like there is with flushing delayed refs or
1607 	 * delayed nodes.  If we're already more ordered than delalloc then
1608 	 * we're keeping up, otherwise we aren't and should probably clamp.
1609 	 */
1610 	if (ordered < delalloc)
1611 		space_info->clamp = min(space_info->clamp + 1, 8);
1612 }
1613 
1614 static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
1615 {
1616 	return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1617 		flush == BTRFS_RESERVE_FLUSH_EVICT);
1618 }
1619 
1620 /*
1621  * NO_FLUSH and FLUSH_EMERGENCY don't want to create a ticket, they just want to
1622  * fail as quickly as possible.
1623  */
1624 static inline bool can_ticket(enum btrfs_reserve_flush_enum flush)
1625 {
1626 	return (flush != BTRFS_RESERVE_NO_FLUSH &&
1627 		flush != BTRFS_RESERVE_FLUSH_EMERGENCY);
1628 }
1629 
1630 /*
1631  * Try to reserve bytes from the block_rsv's space.
1632  *
1633  * @fs_info:    the filesystem
1634  * @space_info: space info we want to allocate from
1635  * @orig_bytes: number of bytes we want
1636  * @flush:      whether or not we can flush to make our reservation
1637  *
1638  * This will reserve orig_bytes number of bytes from the space info associated
1639  * with the block_rsv.  If there is not enough space it will make an attempt to
1640  * flush out space to make room.  It will do this by flushing delalloc if
1641  * possible or committing the transaction.  If flush is 0 then no attempts to
1642  * regain reservations will be made and this will fail if there is not enough
1643  * space already.
1644  */
1645 static int __reserve_bytes(struct btrfs_fs_info *fs_info,
1646 			   struct btrfs_space_info *space_info, u64 orig_bytes,
1647 			   enum btrfs_reserve_flush_enum flush)
1648 {
1649 	struct work_struct *async_work;
1650 	struct reserve_ticket ticket;
1651 	u64 start_ns = 0;
1652 	u64 used;
1653 	int ret = -ENOSPC;
1654 	bool pending_tickets;
1655 
1656 	ASSERT(orig_bytes);
1657 	/*
1658 	 * If have a transaction handle (current->journal_info != NULL), then
1659 	 * the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor
1660 	 * BTRFS_RESERVE_FLUSH_EVICT, as we could deadlock because those
1661 	 * flushing methods can trigger transaction commits.
1662 	 */
1663 	if (current->journal_info) {
1664 		/* One assert per line for easier debugging. */
1665 		ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL);
1666 		ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL);
1667 		ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT);
1668 	}
1669 
1670 	if (flush == BTRFS_RESERVE_FLUSH_DATA)
1671 		async_work = &fs_info->async_data_reclaim_work;
1672 	else
1673 		async_work = &fs_info->async_reclaim_work;
1674 
1675 	spin_lock(&space_info->lock);
1676 	used = btrfs_space_info_used(space_info, true);
1677 
1678 	/*
1679 	 * We don't want NO_FLUSH allocations to jump everybody, they can
1680 	 * generally handle ENOSPC in a different way, so treat them the same as
1681 	 * normal flushers when it comes to skipping pending tickets.
1682 	 */
1683 	if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1684 		pending_tickets = !list_empty(&space_info->tickets) ||
1685 			!list_empty(&space_info->priority_tickets);
1686 	else
1687 		pending_tickets = !list_empty(&space_info->priority_tickets);
1688 
1689 	/*
1690 	 * Carry on if we have enough space (short-circuit) OR call
1691 	 * can_overcommit() to ensure we can overcommit to continue.
1692 	 */
1693 	if (!pending_tickets &&
1694 	    ((used + orig_bytes <= space_info->total_bytes) ||
1695 	     btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1696 		btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1697 						      orig_bytes);
1698 		ret = 0;
1699 	}
1700 
1701 	/*
1702 	 * Things are dire, we need to make a reservation so we don't abort.  We
1703 	 * will let this reservation go through as long as we have actual space
1704 	 * left to allocate for the block.
1705 	 */
1706 	if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
1707 		used = btrfs_space_info_used(space_info, false);
1708 		if (used + orig_bytes <= space_info->total_bytes) {
1709 			btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1710 							      orig_bytes);
1711 			ret = 0;
1712 		}
1713 	}
1714 
1715 	/*
1716 	 * If we couldn't make a reservation then setup our reservation ticket
1717 	 * and kick the async worker if it's not already running.
1718 	 *
1719 	 * If we are a priority flusher then we just need to add our ticket to
1720 	 * the list and we will do our own flushing further down.
1721 	 */
1722 	if (ret && can_ticket(flush)) {
1723 		ticket.bytes = orig_bytes;
1724 		ticket.error = 0;
1725 		space_info->reclaim_size += ticket.bytes;
1726 		init_waitqueue_head(&ticket.wait);
1727 		ticket.steal = can_steal(flush);
1728 		if (trace_btrfs_reserve_ticket_enabled())
1729 			start_ns = ktime_get_ns();
1730 
1731 		if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1732 		    flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1733 		    flush == BTRFS_RESERVE_FLUSH_DATA) {
1734 			list_add_tail(&ticket.list, &space_info->tickets);
1735 			if (!space_info->flush) {
1736 				/*
1737 				 * We were forced to add a reserve ticket, so
1738 				 * our preemptive flushing is unable to keep
1739 				 * up.  Clamp down on the threshold for the
1740 				 * preemptive flushing in order to keep up with
1741 				 * the workload.
1742 				 */
1743 				maybe_clamp_preempt(fs_info, space_info);
1744 
1745 				space_info->flush = 1;
1746 				trace_btrfs_trigger_flush(fs_info,
1747 							  space_info->flags,
1748 							  orig_bytes, flush,
1749 							  "enospc");
1750 				queue_work(system_unbound_wq, async_work);
1751 			}
1752 		} else {
1753 			list_add_tail(&ticket.list,
1754 				      &space_info->priority_tickets);
1755 		}
1756 	} else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1757 		/*
1758 		 * We will do the space reservation dance during log replay,
1759 		 * which means we won't have fs_info->fs_root set, so don't do
1760 		 * the async reclaim as we will panic.
1761 		 */
1762 		if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1763 		    !work_busy(&fs_info->preempt_reclaim_work) &&
1764 		    need_preemptive_reclaim(fs_info, space_info)) {
1765 			trace_btrfs_trigger_flush(fs_info, space_info->flags,
1766 						  orig_bytes, flush, "preempt");
1767 			queue_work(system_unbound_wq,
1768 				   &fs_info->preempt_reclaim_work);
1769 		}
1770 	}
1771 	spin_unlock(&space_info->lock);
1772 	if (!ret || !can_ticket(flush))
1773 		return ret;
1774 
1775 	return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1776 				     orig_bytes, flush);
1777 }
1778 
1779 /*
1780  * Try to reserve metadata bytes from the block_rsv's space.
1781  *
1782  * @fs_info:    the filesystem
1783  * @space_info: the space_info we're allocating for
1784  * @orig_bytes: number of bytes we want
1785  * @flush:      whether or not we can flush to make our reservation
1786  *
1787  * This will reserve orig_bytes number of bytes from the space info associated
1788  * with the block_rsv.  If there is not enough space it will make an attempt to
1789  * flush out space to make room.  It will do this by flushing delalloc if
1790  * possible or committing the transaction.  If flush is 0 then no attempts to
1791  * regain reservations will be made and this will fail if there is not enough
1792  * space already.
1793  */
1794 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
1795 				 struct btrfs_space_info *space_info,
1796 				 u64 orig_bytes,
1797 				 enum btrfs_reserve_flush_enum flush)
1798 {
1799 	int ret;
1800 
1801 	ret = __reserve_bytes(fs_info, space_info, orig_bytes, flush);
1802 	if (ret == -ENOSPC) {
1803 		trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1804 					      space_info->flags, orig_bytes, 1);
1805 
1806 		if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1807 			btrfs_dump_space_info(fs_info, space_info, orig_bytes, 0);
1808 	}
1809 	return ret;
1810 }
1811 
1812 /*
1813  * Try to reserve data bytes for an allocation.
1814  *
1815  * @fs_info: the filesystem
1816  * @bytes:   number of bytes we need
1817  * @flush:   how we are allowed to flush
1818  *
1819  * This will reserve bytes from the data space info.  If there is not enough
1820  * space then we will attempt to flush space as specified by flush.
1821  */
1822 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
1823 			     enum btrfs_reserve_flush_enum flush)
1824 {
1825 	struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
1826 	int ret;
1827 
1828 	ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1829 	       flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE ||
1830 	       flush == BTRFS_RESERVE_NO_FLUSH);
1831 	ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
1832 
1833 	ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
1834 	if (ret == -ENOSPC) {
1835 		trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1836 					      data_sinfo->flags, bytes, 1);
1837 		if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1838 			btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
1839 	}
1840 	return ret;
1841 }
1842 
1843 /* Dump all the space infos when we abort a transaction due to ENOSPC. */
1844 __cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info)
1845 {
1846 	struct btrfs_space_info *space_info;
1847 
1848 	btrfs_info(fs_info, "dumping space info:");
1849 	list_for_each_entry(space_info, &fs_info->space_info, list) {
1850 		spin_lock(&space_info->lock);
1851 		__btrfs_dump_space_info(fs_info, space_info);
1852 		spin_unlock(&space_info->lock);
1853 	}
1854 	dump_global_block_rsv(fs_info);
1855 }
1856 
1857 /*
1858  * Account the unused space of all the readonly block group in the space_info.
1859  * takes mirrors into account.
1860  */
1861 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
1862 {
1863 	struct btrfs_block_group *block_group;
1864 	u64 free_bytes = 0;
1865 	int factor;
1866 
1867 	/* It's df, we don't care if it's racy */
1868 	if (list_empty(&sinfo->ro_bgs))
1869 		return 0;
1870 
1871 	spin_lock(&sinfo->lock);
1872 	list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
1873 		spin_lock(&block_group->lock);
1874 
1875 		if (!block_group->ro) {
1876 			spin_unlock(&block_group->lock);
1877 			continue;
1878 		}
1879 
1880 		factor = btrfs_bg_type_to_factor(block_group->flags);
1881 		free_bytes += (block_group->length -
1882 			       block_group->used) * factor;
1883 
1884 		spin_unlock(&block_group->lock);
1885 	}
1886 	spin_unlock(&sinfo->lock);
1887 
1888 	return free_bytes;
1889 }
1890 
1891 static u64 calc_pct_ratio(u64 x, u64 y)
1892 {
1893 	int err;
1894 
1895 	if (!y)
1896 		return 0;
1897 again:
1898 	err = check_mul_overflow(100, x, &x);
1899 	if (err)
1900 		goto lose_precision;
1901 	return div64_u64(x, y);
1902 lose_precision:
1903 	x >>= 10;
1904 	y >>= 10;
1905 	if (!y)
1906 		y = 1;
1907 	goto again;
1908 }
1909 
1910 /*
1911  * A reasonable buffer for unallocated space is 10 data block_groups.
1912  * If we claw this back repeatedly, we can still achieve efficient
1913  * utilization when near full, and not do too much reclaim while
1914  * always maintaining a solid buffer for workloads that quickly
1915  * allocate and pressure the unallocated space.
1916  */
1917 static u64 calc_unalloc_target(struct btrfs_fs_info *fs_info)
1918 {
1919 	u64 chunk_sz = calc_effective_data_chunk_size(fs_info);
1920 
1921 	return BTRFS_UNALLOC_BLOCK_GROUP_TARGET * chunk_sz;
1922 }
1923 
1924 /*
1925  * The fundamental goal of automatic reclaim is to protect the filesystem's
1926  * unallocated space and thus minimize the probability of the filesystem going
1927  * read only when a metadata allocation failure causes a transaction abort.
1928  *
1929  * However, relocations happen into the space_info's unused space, therefore
1930  * automatic reclaim must also back off as that space runs low. There is no
1931  * value in doing trivial "relocations" of re-writing the same block group
1932  * into a fresh one.
1933  *
1934  * Furthermore, we want to avoid doing too much reclaim even if there are good
1935  * candidates. This is because the allocator is pretty good at filling up the
1936  * holes with writes. So we want to do just enough reclaim to try and stay
1937  * safe from running out of unallocated space but not be wasteful about it.
1938  *
1939  * Therefore, the dynamic reclaim threshold is calculated as follows:
1940  * - calculate a target unallocated amount of 5 block group sized chunks
1941  * - ratchet up the intensity of reclaim depending on how far we are from
1942  *   that target by using a formula of unalloc / target to set the threshold.
1943  *
1944  * Typically with 10 block groups as the target, the discrete values this comes
1945  * out to are 0, 10, 20, ... , 80, 90, and 99.
1946  */
1947 static int calc_dynamic_reclaim_threshold(struct btrfs_space_info *space_info)
1948 {
1949 	struct btrfs_fs_info *fs_info = space_info->fs_info;
1950 	u64 unalloc = atomic64_read(&fs_info->free_chunk_space);
1951 	u64 target = calc_unalloc_target(fs_info);
1952 	u64 alloc = space_info->total_bytes;
1953 	u64 used = btrfs_space_info_used(space_info, false);
1954 	u64 unused = alloc - used;
1955 	u64 want = target > unalloc ? target - unalloc : 0;
1956 	u64 data_chunk_size = calc_effective_data_chunk_size(fs_info);
1957 
1958 	/* If we have no unused space, don't bother, it won't work anyway. */
1959 	if (unused < data_chunk_size)
1960 		return 0;
1961 
1962 	/* Cast to int is OK because want <= target. */
1963 	return calc_pct_ratio(want, target);
1964 }
1965 
1966 int btrfs_calc_reclaim_threshold(struct btrfs_space_info *space_info)
1967 {
1968 	lockdep_assert_held(&space_info->lock);
1969 
1970 	if (READ_ONCE(space_info->dynamic_reclaim))
1971 		return calc_dynamic_reclaim_threshold(space_info);
1972 	return READ_ONCE(space_info->bg_reclaim_threshold);
1973 }
1974 
1975 /*
1976  * Under "urgent" reclaim, we will reclaim even fresh block groups that have
1977  * recently seen successful allocations, as we are desperate to reclaim
1978  * whatever we can to avoid ENOSPC in a transaction leading to a readonly fs.
1979  */
1980 static bool is_reclaim_urgent(struct btrfs_space_info *space_info)
1981 {
1982 	struct btrfs_fs_info *fs_info = space_info->fs_info;
1983 	u64 unalloc = atomic64_read(&fs_info->free_chunk_space);
1984 	u64 data_chunk_size = calc_effective_data_chunk_size(fs_info);
1985 
1986 	return unalloc < data_chunk_size;
1987 }
1988 
1989 static int do_reclaim_sweep(struct btrfs_fs_info *fs_info,
1990 			    struct btrfs_space_info *space_info, int raid)
1991 {
1992 	struct btrfs_block_group *bg;
1993 	int thresh_pct;
1994 	bool try_again = true;
1995 	bool urgent;
1996 
1997 	spin_lock(&space_info->lock);
1998 	urgent = is_reclaim_urgent(space_info);
1999 	thresh_pct = btrfs_calc_reclaim_threshold(space_info);
2000 	spin_unlock(&space_info->lock);
2001 
2002 	down_read(&space_info->groups_sem);
2003 again:
2004 	list_for_each_entry(bg, &space_info->block_groups[raid], list) {
2005 		u64 thresh;
2006 		bool reclaim = false;
2007 
2008 		btrfs_get_block_group(bg);
2009 		spin_lock(&bg->lock);
2010 		thresh = mult_perc(bg->length, thresh_pct);
2011 		if (bg->used < thresh && bg->reclaim_mark) {
2012 			try_again = false;
2013 			reclaim = true;
2014 		}
2015 		bg->reclaim_mark++;
2016 		spin_unlock(&bg->lock);
2017 		if (reclaim)
2018 			btrfs_mark_bg_to_reclaim(bg);
2019 		btrfs_put_block_group(bg);
2020 	}
2021 
2022 	/*
2023 	 * In situations where we are very motivated to reclaim (low unalloc)
2024 	 * use two passes to make the reclaim mark check best effort.
2025 	 *
2026 	 * If we have any staler groups, we don't touch the fresher ones, but if we
2027 	 * really need a block group, do take a fresh one.
2028 	 */
2029 	if (try_again && urgent) {
2030 		try_again = false;
2031 		goto again;
2032 	}
2033 
2034 	up_read(&space_info->groups_sem);
2035 	return 0;
2036 }
2037 
2038 void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes)
2039 {
2040 	u64 chunk_sz = calc_effective_data_chunk_size(space_info->fs_info);
2041 
2042 	lockdep_assert_held(&space_info->lock);
2043 	space_info->reclaimable_bytes += bytes;
2044 
2045 	if (space_info->reclaimable_bytes >= chunk_sz)
2046 		btrfs_set_periodic_reclaim_ready(space_info, true);
2047 }
2048 
2049 void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready)
2050 {
2051 	lockdep_assert_held(&space_info->lock);
2052 	if (!READ_ONCE(space_info->periodic_reclaim))
2053 		return;
2054 	if (ready != space_info->periodic_reclaim_ready) {
2055 		space_info->periodic_reclaim_ready = ready;
2056 		if (!ready)
2057 			space_info->reclaimable_bytes = 0;
2058 	}
2059 }
2060 
2061 bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info)
2062 {
2063 	bool ret;
2064 
2065 	if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
2066 		return false;
2067 	if (!READ_ONCE(space_info->periodic_reclaim))
2068 		return false;
2069 
2070 	spin_lock(&space_info->lock);
2071 	ret = space_info->periodic_reclaim_ready;
2072 	btrfs_set_periodic_reclaim_ready(space_info, false);
2073 	spin_unlock(&space_info->lock);
2074 
2075 	return ret;
2076 }
2077 
2078 int btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info)
2079 {
2080 	int ret;
2081 	int raid;
2082 	struct btrfs_space_info *space_info;
2083 
2084 	list_for_each_entry(space_info, &fs_info->space_info, list) {
2085 		if (!btrfs_should_periodic_reclaim(space_info))
2086 			continue;
2087 		for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) {
2088 			ret = do_reclaim_sweep(fs_info, space_info, raid);
2089 			if (ret)
2090 				return ret;
2091 		}
2092 	}
2093 
2094 	return ret;
2095 }
2096