1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/spinlock.h>
4 #include <linux/minmax.h>
5 #include "misc.h"
6 #include "ctree.h"
7 #include "space-info.h"
8 #include "sysfs.h"
9 #include "volumes.h"
10 #include "free-space-cache.h"
11 #include "ordered-data.h"
12 #include "transaction.h"
13 #include "block-group.h"
14 #include "fs.h"
15 #include "accessors.h"
16 #include "extent-tree.h"
17 #include "zoned.h"
18 #include "delayed-inode.h"
19
20 /*
21 * HOW DOES SPACE RESERVATION WORK
22 *
23 * If you want to know about delalloc specifically, there is a separate comment
24 * for that with the delalloc code. This comment is about how the whole system
25 * works generally.
26 *
27 * BASIC CONCEPTS
28 *
29 * 1) space_info. This is the ultimate arbiter of how much space we can use.
30 * There's a description of the bytes_ fields with the struct declaration,
31 * refer to that for specifics on each field. Suffice it to say that for
32 * reservations we care about total_bytes - SUM(space_info->bytes_) when
33 * determining if there is space to make an allocation. There is a space_info
34 * for METADATA, SYSTEM, and DATA areas.
35 *
36 * 2) block_rsv's. These are basically buckets for every different type of
37 * metadata reservation we have. You can see the comment in the block_rsv
38 * code on the rules for each type, but generally block_rsv->reserved is how
39 * much space is accounted for in space_info->bytes_may_use.
40 *
41 * 3) btrfs_calc*_size. These are the worst case calculations we used based
42 * on the number of items we will want to modify. We have one for changing
43 * items, and one for inserting new items. Generally we use these helpers to
44 * determine the size of the block reserves, and then use the actual bytes
45 * values to adjust the space_info counters.
46 *
47 * MAKING RESERVATIONS, THE NORMAL CASE
48 *
49 * We call into either btrfs_reserve_data_bytes() or
50 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
51 * num_bytes we want to reserve.
52 *
53 * ->reserve
54 * space_info->bytes_may_use += num_bytes
55 *
56 * ->extent allocation
57 * Call btrfs_add_reserved_bytes() which does
58 * space_info->bytes_may_use -= num_bytes
59 * space_info->bytes_reserved += extent_bytes
60 *
61 * ->insert reference
62 * Call btrfs_update_block_group() which does
63 * space_info->bytes_reserved -= extent_bytes
64 * space_info->bytes_used += extent_bytes
65 *
66 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
67 *
68 * Assume we are unable to simply make the reservation because we do not have
69 * enough space
70 *
71 * -> reserve_bytes
72 * create a reserve_ticket with ->bytes set to our reservation, add it to
73 * the tail of space_info->tickets, kick async flush thread
74 *
75 * ->handle_reserve_ticket
76 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
77 * on the ticket.
78 *
79 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
80 * Flushes various things attempting to free up space.
81 *
82 * -> btrfs_try_granting_tickets()
83 * This is called by anything that either subtracts space from
84 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
85 * space_info->total_bytes. This loops through the ->priority_tickets and
86 * then the ->tickets list checking to see if the reservation can be
87 * completed. If it can the space is added to space_info->bytes_may_use and
88 * the ticket is woken up.
89 *
90 * -> ticket wakeup
91 * Check if ->bytes == 0, if it does we got our reservation and we can carry
92 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we
93 * were interrupted.)
94 *
95 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
96 *
97 * Same as the above, except we add ourselves to the
98 * space_info->priority_tickets, and we do not use ticket->wait, we simply
99 * call flush_space() ourselves for the states that are safe for us to call
100 * without deadlocking and hope for the best.
101 *
102 * THE FLUSHING STATES
103 *
104 * Generally speaking we will have two cases for each state, a "nice" state
105 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to
106 * reduce the locking over head on the various trees, and even to keep from
107 * doing any work at all in the case of delayed refs. Each of these delayed
108 * things however hold reservations, and so letting them run allows us to
109 * reclaim space so we can make new reservations.
110 *
111 * FLUSH_DELAYED_ITEMS
112 * Every inode has a delayed item to update the inode. Take a simple write
113 * for example, we would update the inode item at write time to update the
114 * mtime, and then again at finish_ordered_io() time in order to update the
115 * isize or bytes. We keep these delayed items to coalesce these operations
116 * into a single operation done on demand. These are an easy way to reclaim
117 * metadata space.
118 *
119 * FLUSH_DELALLOC
120 * Look at the delalloc comment to get an idea of how much space is reserved
121 * for delayed allocation. We can reclaim some of this space simply by
122 * running delalloc, but usually we need to wait for ordered extents to
123 * reclaim the bulk of this space.
124 *
125 * FLUSH_DELAYED_REFS
126 * We have a block reserve for the outstanding delayed refs space, and every
127 * delayed ref operation holds a reservation. Running these is a quick way
128 * to reclaim space, but we want to hold this until the end because COW can
129 * churn a lot and we can avoid making some extent tree modifications if we
130 * are able to delay for as long as possible.
131 *
132 * RECLAIM_ZONES
133 * This state only works for the zoned mode. In zoned mode, we cannot reuse
134 * regions that have once been allocated and then been freed until we reset
135 * the zone, due to the sequential write requirement. The RECLAIM_ZONES state
136 * calls the reclaim machinery, evacuating the still valid data in these
137 * block-groups and relocates it to the data_reloc_bg. Afterwards these
138 * block-groups get deleted and the transaction is committed. This frees up
139 * space to use for new allocations.
140 *
141 * RESET_ZONES
142 * This state works only for the zoned mode. On the zoned mode, we cannot
143 * reuse once allocated then freed region until we reset the zone, due to
144 * the sequential write zone requirement. The RESET_ZONES state resets the
145 * zones of an unused block group and let us reuse the space. The reusing
146 * is faster than removing the block group and allocating another block
147 * group on the zones.
148 *
149 * ALLOC_CHUNK
150 * We will skip this the first time through space reservation, because of
151 * overcommit and we don't want to have a lot of useless metadata space when
152 * our worst case reservations will likely never come true.
153 *
154 * RUN_DELAYED_IPUTS
155 * If we're freeing inodes we're likely freeing checksums, file extent
156 * items, and extent tree items. Loads of space could be freed up by these
157 * operations, however they won't be usable until the transaction commits.
158 *
159 * COMMIT_TRANS
160 * This will commit the transaction. Historically we had a lot of logic
161 * surrounding whether or not we'd commit the transaction, but this waits born
162 * out of a pre-tickets era where we could end up committing the transaction
163 * thousands of times in a row without making progress. Now thanks to our
164 * ticketing system we know if we're not making progress and can error
165 * everybody out after a few commits rather than burning the disk hoping for
166 * a different answer.
167 *
168 * OVERCOMMIT
169 *
170 * Because we hold so many reservations for metadata we will allow you to
171 * reserve more space than is currently free in the currently allocate
172 * metadata space. This only happens with metadata, data does not allow
173 * overcommitting.
174 *
175 * You can see the current logic for when we allow overcommit in
176 * btrfs_can_overcommit(), but it only applies to unallocated space. If there
177 * is no unallocated space to be had, all reservations are kept within the
178 * free space in the allocated metadata chunks.
179 *
180 * Because of overcommitting, you generally want to use the
181 * btrfs_can_overcommit() logic for metadata allocations, as it does the right
182 * thing with or without extra unallocated space.
183 */
184
185 struct reserve_ticket {
186 u64 bytes;
187 int error;
188 bool steal;
189 struct list_head list;
190 wait_queue_head_t wait;
191 spinlock_t lock;
192 };
193
194 /*
195 * after adding space to the filesystem, we need to clear the full flags
196 * on all the space infos.
197 */
btrfs_clear_space_info_full(struct btrfs_fs_info * info)198 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
199 {
200 struct list_head *head = &info->space_info;
201 struct btrfs_space_info *found;
202
203 list_for_each_entry(found, head, list)
204 found->full = false;
205 }
206
207 /*
208 * Block groups with more than this value (percents) of unusable space will be
209 * scheduled for background reclaim.
210 */
211 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75)
212
213 #define BTRFS_UNALLOC_BLOCK_GROUP_TARGET (10ULL)
214
215 #define BTRFS_ZONED_SYNC_RECLAIM_BATCH (5)
216
217 /*
218 * Calculate chunk size depending on volume type (regular or zoned).
219 */
calc_chunk_size(const struct btrfs_fs_info * fs_info,u64 flags)220 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
221 {
222 if (btrfs_is_zoned(fs_info))
223 return fs_info->zone_size;
224
225 ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK, "flags=%llu", flags);
226
227 if (flags & BTRFS_BLOCK_GROUP_DATA)
228 return BTRFS_MAX_DATA_CHUNK_SIZE;
229 else if (flags & (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA_REMAP))
230 return SZ_32M;
231
232 /* Handle BTRFS_BLOCK_GROUP_METADATA */
233 if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
234 return SZ_1G;
235
236 return SZ_256M;
237 }
238
239 /*
240 * Update default chunk size.
241 */
btrfs_update_space_info_chunk_size(struct btrfs_space_info * space_info,u64 chunk_size)242 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
243 u64 chunk_size)
244 {
245 WRITE_ONCE(space_info->chunk_size, chunk_size);
246 }
247
init_space_info(struct btrfs_fs_info * info,struct btrfs_space_info * space_info,u64 flags)248 static void init_space_info(struct btrfs_fs_info *info,
249 struct btrfs_space_info *space_info, u64 flags)
250 {
251 space_info->fs_info = info;
252 for (int i = 0; i < BTRFS_NR_RAID_TYPES; i++)
253 INIT_LIST_HEAD(&space_info->block_groups[i]);
254 init_rwsem(&space_info->groups_sem);
255 spin_lock_init(&space_info->lock);
256 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
257 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
258 INIT_LIST_HEAD(&space_info->ro_bgs);
259 INIT_LIST_HEAD(&space_info->tickets);
260 INIT_LIST_HEAD(&space_info->priority_tickets);
261 space_info->clamp = 1;
262 btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
263 space_info->subgroup_id = BTRFS_SUB_GROUP_PRIMARY;
264
265 if (btrfs_is_zoned(info))
266 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
267 }
268
create_space_info_sub_group(struct btrfs_space_info * parent,u64 flags,enum btrfs_space_info_sub_group id,int index)269 static int create_space_info_sub_group(struct btrfs_space_info *parent, u64 flags,
270 enum btrfs_space_info_sub_group id, int index)
271 {
272 struct btrfs_fs_info *fs_info = parent->fs_info;
273 struct btrfs_space_info *sub_group;
274 int ret;
275
276 ASSERT(parent->subgroup_id == BTRFS_SUB_GROUP_PRIMARY,
277 "parent->subgroup_id=%d", parent->subgroup_id);
278 ASSERT(id != BTRFS_SUB_GROUP_PRIMARY, "id=%d", id);
279
280 sub_group = kzalloc_obj(*sub_group, GFP_NOFS);
281 if (!sub_group)
282 return -ENOMEM;
283
284 init_space_info(fs_info, sub_group, flags);
285 parent->sub_group[index] = sub_group;
286 sub_group->parent = parent;
287 sub_group->subgroup_id = id;
288
289 ret = btrfs_sysfs_add_space_info_type(sub_group);
290 if (ret)
291 parent->sub_group[index] = NULL;
292 return ret;
293 }
294
create_space_info(struct btrfs_fs_info * info,u64 flags)295 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
296 {
297
298 struct btrfs_space_info *space_info;
299 int ret = 0;
300
301 space_info = kzalloc_obj(*space_info, GFP_NOFS);
302 if (!space_info)
303 return -ENOMEM;
304
305 init_space_info(info, space_info, flags);
306
307 if (btrfs_is_zoned(info)) {
308 if (flags & BTRFS_BLOCK_GROUP_DATA)
309 ret = create_space_info_sub_group(space_info, flags,
310 BTRFS_SUB_GROUP_DATA_RELOC,
311 0);
312 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
313 ret = create_space_info_sub_group(space_info, flags,
314 BTRFS_SUB_GROUP_TREELOG,
315 0);
316
317 if (ret)
318 goto out_free;
319 }
320
321 ret = btrfs_sysfs_add_space_info_type(space_info);
322 if (ret)
323 return ret;
324
325 list_add(&space_info->list, &info->space_info);
326 if (flags & BTRFS_BLOCK_GROUP_DATA)
327 info->data_sinfo = space_info;
328
329 return ret;
330
331 out_free:
332 kfree(space_info);
333 return ret;
334 }
335
btrfs_init_space_info(struct btrfs_fs_info * fs_info)336 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
337 {
338 struct btrfs_super_block *disk_super;
339 u64 features;
340 u64 flags;
341 bool mixed = false;
342 int ret;
343
344 disk_super = fs_info->super_copy;
345 if (!btrfs_super_root(disk_super))
346 return -EINVAL;
347
348 features = btrfs_super_incompat_flags(disk_super);
349 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
350 mixed = true;
351
352 flags = BTRFS_BLOCK_GROUP_SYSTEM;
353 ret = create_space_info(fs_info, flags);
354 if (ret)
355 return ret;
356
357 if (mixed) {
358 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
359 ret = create_space_info(fs_info, flags);
360 if (ret)
361 return ret;
362 } else {
363 flags = BTRFS_BLOCK_GROUP_METADATA;
364 ret = create_space_info(fs_info, flags);
365 if (ret)
366 return ret;
367
368 flags = BTRFS_BLOCK_GROUP_DATA;
369 ret = create_space_info(fs_info, flags);
370 if (ret)
371 return ret;
372 }
373
374 if (features & BTRFS_FEATURE_INCOMPAT_REMAP_TREE) {
375 flags = BTRFS_BLOCK_GROUP_METADATA_REMAP;
376 ret = create_space_info(fs_info, flags);
377 }
378
379 return ret;
380 }
381
btrfs_add_bg_to_space_info(struct btrfs_fs_info * info,struct btrfs_block_group * block_group)382 void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
383 struct btrfs_block_group *block_group)
384 {
385 struct btrfs_space_info *space_info = block_group->space_info;
386 int factor, index;
387
388 factor = btrfs_bg_type_to_factor(block_group->flags);
389
390 spin_lock(&space_info->lock);
391
392 if (!(block_group->flags & BTRFS_BLOCK_GROUP_REMAPPED) ||
393 block_group->identity_remap_count != 0) {
394 space_info->total_bytes += block_group->length;
395 space_info->disk_total += block_group->length * factor;
396 }
397
398 space_info->bytes_used += block_group->used;
399 space_info->disk_used += block_group->used * factor;
400 space_info->bytes_readonly += block_group->bytes_super;
401 btrfs_space_info_update_bytes_zone_unusable(space_info, block_group->zone_unusable);
402 if (block_group->length > 0)
403 space_info->full = false;
404 btrfs_try_granting_tickets(space_info);
405 spin_unlock(&space_info->lock);
406
407 block_group->space_info = space_info;
408
409 index = btrfs_bg_flags_to_raid_index(block_group->flags);
410 down_write(&space_info->groups_sem);
411 list_add_tail(&block_group->list, &space_info->block_groups[index]);
412 up_write(&space_info->groups_sem);
413 }
414
btrfs_find_space_info(const struct btrfs_fs_info * info,u64 flags)415 struct btrfs_space_info *btrfs_find_space_info(const struct btrfs_fs_info *info,
416 u64 flags)
417 {
418 const struct list_head *head = &info->space_info;
419 struct btrfs_space_info *found;
420
421 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
422
423 list_for_each_entry(found, head, list) {
424 if (found->flags & flags)
425 return found;
426 }
427 return NULL;
428 }
429
calc_effective_data_chunk_size(const struct btrfs_fs_info * fs_info)430 static u64 calc_effective_data_chunk_size(const struct btrfs_fs_info *fs_info)
431 {
432 struct btrfs_space_info *data_sinfo;
433 u64 data_chunk_size;
434
435 /*
436 * Calculate the data_chunk_size, space_info->chunk_size is the
437 * "optimal" chunk size based on the fs size. However when we actually
438 * allocate the chunk we will strip this down further, making it no
439 * more than 10% of the disk or 1G, whichever is smaller.
440 *
441 * On the zoned mode, we need to use zone_size (= data_sinfo->chunk_size)
442 * as it is.
443 */
444 data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
445 if (btrfs_is_zoned(fs_info))
446 return data_sinfo->chunk_size;
447 data_chunk_size = min(data_sinfo->chunk_size,
448 mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
449 return min_t(u64, data_chunk_size, SZ_1G);
450 }
451
calc_available_free_space(const struct btrfs_space_info * space_info,enum btrfs_reserve_flush_enum flush)452 static u64 calc_available_free_space(const struct btrfs_space_info *space_info,
453 enum btrfs_reserve_flush_enum flush)
454 {
455 struct btrfs_fs_info *fs_info = space_info->fs_info;
456 bool has_per_profile;
457 u64 profile;
458 u64 avail;
459 u64 data_chunk_size;
460 int factor;
461
462 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
463 profile = btrfs_system_alloc_profile(fs_info);
464 else
465 profile = btrfs_metadata_alloc_profile(fs_info);
466
467 has_per_profile = btrfs_get_per_profile_avail(fs_info, profile, &avail);
468 if (!has_per_profile) {
469 avail = atomic64_read(&fs_info->free_chunk_space);
470
471 /*
472 * If we have dup, raid1 or raid10 then only half of the free
473 * space is actually usable. For raid56, the space info used
474 * doesn't include the parity drive, so we don't have to
475 * change the math
476 */
477 factor = btrfs_bg_type_to_factor(profile);
478 avail = div_u64(avail, factor);
479 if (avail == 0)
480 return 0;
481 }
482 data_chunk_size = calc_effective_data_chunk_size(fs_info);
483
484 /*
485 * Since data allocations immediately use block groups as part of the
486 * reservation, because we assume that data reservations will == actual
487 * usage, we could potentially overcommit and then immediately have that
488 * available space used by a data allocation, which could put us in a
489 * bind when we get close to filling the file system.
490 *
491 * To handle this simply remove the data_chunk_size from the available
492 * space. If we are relatively empty this won't affect our ability to
493 * overcommit much, and if we're very close to full it'll keep us from
494 * getting into a position where we've given ourselves very little
495 * metadata wiggle room.
496 */
497 if (avail <= data_chunk_size)
498 return 0;
499 avail -= data_chunk_size;
500
501 /*
502 * If we aren't flushing all things, let us overcommit up to
503 * 1/2th of the space. If we can flush, don't let us overcommit
504 * too much, let it overcommit up to 1/64th of the space.
505 */
506 if (flush == BTRFS_RESERVE_FLUSH_ALL || flush == BTRFS_RESERVE_FLUSH_ALL_STEAL)
507 avail >>= 6;
508 else
509 avail >>= 1;
510
511 /*
512 * On the zoned mode, we always allocate one zone as one chunk.
513 * Returning non-zone size aligned bytes here will result in
514 * less pressure for the async metadata reclaim process, and it
515 * will over-commit too much leading to ENOSPC. Align down to the
516 * zone size to avoid that.
517 */
518 if (btrfs_is_zoned(fs_info))
519 avail = ALIGN_DOWN(avail, fs_info->zone_size);
520
521 return avail;
522 }
523
check_can_overcommit(const struct btrfs_space_info * space_info,u64 space_info_used_bytes,u64 bytes,enum btrfs_reserve_flush_enum flush)524 static inline bool check_can_overcommit(const struct btrfs_space_info *space_info,
525 u64 space_info_used_bytes, u64 bytes,
526 enum btrfs_reserve_flush_enum flush)
527 {
528 const u64 avail = calc_available_free_space(space_info, flush);
529
530 return (space_info_used_bytes + bytes < space_info->total_bytes + avail);
531 }
532
can_overcommit(const struct btrfs_space_info * space_info,u64 space_info_used_bytes,u64 bytes,enum btrfs_reserve_flush_enum flush)533 static inline bool can_overcommit(const struct btrfs_space_info *space_info,
534 u64 space_info_used_bytes, u64 bytes,
535 enum btrfs_reserve_flush_enum flush)
536 {
537 /* Don't overcommit when in mixed mode. */
538 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
539 return false;
540
541 return check_can_overcommit(space_info, space_info_used_bytes, bytes, flush);
542 }
543
btrfs_can_overcommit(const struct btrfs_space_info * space_info,u64 bytes,enum btrfs_reserve_flush_enum flush)544 bool btrfs_can_overcommit(const struct btrfs_space_info *space_info, u64 bytes,
545 enum btrfs_reserve_flush_enum flush)
546 {
547 u64 used;
548
549 /* Don't overcommit when in mixed mode */
550 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
551 return false;
552
553 used = btrfs_space_info_used(space_info, true);
554
555 return check_can_overcommit(space_info, used, bytes, flush);
556 }
557
remove_ticket(struct btrfs_space_info * space_info,struct reserve_ticket * ticket,int error)558 static void remove_ticket(struct btrfs_space_info *space_info,
559 struct reserve_ticket *ticket, int error)
560 {
561 lockdep_assert_held(&space_info->lock);
562
563 if (!list_empty(&ticket->list)) {
564 list_del_init(&ticket->list);
565 ASSERT(space_info->reclaim_size >= ticket->bytes,
566 "space_info->reclaim_size=%llu ticket->bytes=%llu",
567 space_info->reclaim_size, ticket->bytes);
568 space_info->reclaim_size -= ticket->bytes;
569 }
570
571 spin_lock(&ticket->lock);
572 /*
573 * If we are called from a task waiting on the ticket, it may happen
574 * that before it sets an error on the ticket, a reclaim task was able
575 * to satisfy the ticket. In that case ignore the error.
576 */
577 if (error && ticket->bytes > 0)
578 ticket->error = error;
579 else
580 ticket->bytes = 0;
581
582 wake_up(&ticket->wait);
583 spin_unlock(&ticket->lock);
584 }
585
586 /*
587 * This is for space we already have accounted in space_info->bytes_may_use, so
588 * basically when we're returning space from block_rsv's.
589 */
btrfs_try_granting_tickets(struct btrfs_space_info * space_info)590 void btrfs_try_granting_tickets(struct btrfs_space_info *space_info)
591 {
592 struct list_head *head;
593 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
594 u64 used = btrfs_space_info_used(space_info, true);
595
596 lockdep_assert_held(&space_info->lock);
597
598 head = &space_info->priority_tickets;
599 again:
600 while (!list_empty(head)) {
601 struct reserve_ticket *ticket;
602 u64 used_after;
603
604 ticket = list_first_entry(head, struct reserve_ticket, list);
605 used_after = used + ticket->bytes;
606
607 /* Check and see if our ticket can be satisfied now. */
608 if (used_after <= space_info->total_bytes ||
609 can_overcommit(space_info, used, ticket->bytes, flush)) {
610 btrfs_space_info_update_bytes_may_use(space_info, ticket->bytes);
611 remove_ticket(space_info, ticket, 0);
612 space_info->tickets_id++;
613 used = used_after;
614 } else {
615 break;
616 }
617 }
618
619 if (head == &space_info->priority_tickets) {
620 head = &space_info->tickets;
621 flush = BTRFS_RESERVE_FLUSH_ALL;
622 goto again;
623 }
624 }
625
626 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \
627 do { \
628 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
629 spin_lock(&__rsv->lock); \
630 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
631 __rsv->size, __rsv->reserved); \
632 spin_unlock(&__rsv->lock); \
633 } while (0)
634
dump_global_block_rsv(struct btrfs_fs_info * fs_info)635 static void dump_global_block_rsv(struct btrfs_fs_info *fs_info)
636 {
637 DUMP_BLOCK_RSV(fs_info, global_block_rsv);
638 DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
639 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
640 DUMP_BLOCK_RSV(fs_info, remap_block_rsv);
641 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
642 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
643 }
644
__btrfs_dump_space_info(const struct btrfs_space_info * info)645 static void __btrfs_dump_space_info(const struct btrfs_space_info *info)
646 {
647 const struct btrfs_fs_info *fs_info = info->fs_info;
648 const char *flag_str = btrfs_space_info_type_str(info);
649 lockdep_assert_held(&info->lock);
650
651 /* The free space could be negative in case of overcommit */
652 btrfs_info(fs_info,
653 "space_info %s (sub-group id %d) has %lld free, is %sfull",
654 flag_str, info->subgroup_id,
655 (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
656 info->full ? "" : "not ");
657 btrfs_info(fs_info,
658 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
659 info->total_bytes, info->bytes_used, info->bytes_pinned,
660 info->bytes_reserved, info->bytes_may_use,
661 info->bytes_readonly, info->bytes_zone_unusable);
662 }
663
btrfs_dump_space_info(struct btrfs_space_info * info,u64 bytes,bool dump_block_groups)664 void btrfs_dump_space_info(struct btrfs_space_info *info, u64 bytes,
665 bool dump_block_groups)
666 {
667 struct btrfs_fs_info *fs_info = info->fs_info;
668 struct btrfs_block_group *cache;
669 u64 total_avail = 0;
670 int index = 0;
671
672 spin_lock(&info->lock);
673 __btrfs_dump_space_info(info);
674 dump_global_block_rsv(fs_info);
675 spin_unlock(&info->lock);
676
677 if (!dump_block_groups)
678 return;
679
680 down_read(&info->groups_sem);
681 again:
682 list_for_each_entry(cache, &info->block_groups[index], list) {
683 u64 avail;
684
685 spin_lock(&cache->lock);
686 avail = btrfs_block_group_available_space(cache);
687 btrfs_info(fs_info,
688 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
689 cache->start, cache->length, cache->used, cache->pinned,
690 cache->reserved, cache->delalloc_bytes,
691 cache->bytes_super, cache->zone_unusable,
692 avail, cache->ro ? "[readonly]" : "");
693 spin_unlock(&cache->lock);
694 btrfs_dump_free_space(cache, bytes);
695 total_avail += avail;
696 }
697 if (++index < BTRFS_NR_RAID_TYPES)
698 goto again;
699 up_read(&info->groups_sem);
700
701 btrfs_info(fs_info, "%llu bytes available across all block groups", total_avail);
702 }
703
calc_reclaim_items_nr(const struct btrfs_fs_info * fs_info,u64 to_reclaim)704 static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info,
705 u64 to_reclaim)
706 {
707 u64 bytes;
708 u64 nr;
709
710 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
711 nr = div64_u64(to_reclaim, bytes);
712 if (!nr)
713 nr = 1;
714 return nr;
715 }
716
717 /*
718 * shrink metadata reservation for delalloc
719 */
shrink_delalloc(struct btrfs_space_info * space_info,u64 to_reclaim,bool wait_ordered,bool for_preempt)720 static void shrink_delalloc(struct btrfs_space_info *space_info,
721 u64 to_reclaim, bool wait_ordered,
722 bool for_preempt)
723 {
724 struct btrfs_fs_info *fs_info = space_info->fs_info;
725 struct btrfs_trans_handle *trans;
726 u64 delalloc_bytes;
727 u64 ordered_bytes;
728 u64 items;
729 long time_left;
730 int loops;
731
732 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
733 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
734 if (delalloc_bytes == 0 && ordered_bytes == 0)
735 return;
736
737 /* Calc the number of the pages we need flush for space reservation */
738 if (to_reclaim == U64_MAX) {
739 items = U64_MAX;
740 } else {
741 /*
742 * to_reclaim is set to however much metadata we need to
743 * reclaim, but reclaiming that much data doesn't really track
744 * exactly. What we really want to do is reclaim full inode's
745 * worth of reservations, however that's not available to us
746 * here. We will take a fraction of the delalloc bytes for our
747 * flushing loops and hope for the best. Delalloc will expand
748 * the amount we write to cover an entire dirty extent, which
749 * will reclaim the metadata reservation for that range. If
750 * it's not enough subsequent flush stages will be more
751 * aggressive.
752 */
753 to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
754 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
755 }
756
757 trans = current->journal_info;
758
759 /*
760 * If we are doing more ordered than delalloc we need to just wait on
761 * ordered extents, otherwise we'll waste time trying to flush delalloc
762 * that likely won't give us the space back we need.
763 */
764 if (ordered_bytes > delalloc_bytes && !for_preempt)
765 wait_ordered = true;
766
767 loops = 0;
768 while ((delalloc_bytes || ordered_bytes) && loops < 3) {
769 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
770 long nr_pages = min_t(u64, temp, LONG_MAX);
771 int async_pages;
772
773 btrfs_start_delalloc_roots(fs_info, nr_pages, true);
774
775 /*
776 * We need to make sure any outstanding async pages are now
777 * processed before we continue. This is because things like
778 * sync_inode() try to be smart and skip writing if the inode is
779 * marked clean. We don't use filemap_fwrite for flushing
780 * because we want to control how many pages we write out at a
781 * time, thus this is the only safe way to make sure we've
782 * waited for outstanding compressed workers to have started
783 * their jobs and thus have ordered extents set up properly.
784 *
785 * This exists because we do not want to wait for each
786 * individual inode to finish its async work, we simply want to
787 * start the IO on everybody, and then come back here and wait
788 * for all of the async work to catch up. Once we're done with
789 * that we know we'll have ordered extents for everything and we
790 * can decide if we wait for that or not.
791 *
792 * If we choose to replace this in the future, make absolutely
793 * sure that the proper waiting is being done in the async case,
794 * as there have been bugs in that area before.
795 */
796 async_pages = atomic_read(&fs_info->async_delalloc_pages);
797 if (!async_pages)
798 goto skip_async;
799
800 /*
801 * We don't want to wait forever, if we wrote less pages in this
802 * loop than we have outstanding, only wait for that number of
803 * pages, otherwise we can wait for all async pages to finish
804 * before continuing.
805 */
806 if (async_pages > nr_pages)
807 async_pages -= nr_pages;
808 else
809 async_pages = 0;
810 wait_event(fs_info->async_submit_wait,
811 atomic_read(&fs_info->async_delalloc_pages) <=
812 async_pages);
813 skip_async:
814 loops++;
815 if (wait_ordered && !trans) {
816 btrfs_wait_ordered_roots(fs_info, items, NULL);
817 } else {
818 time_left = schedule_timeout_killable(1);
819 if (time_left)
820 break;
821 }
822
823 /*
824 * If we are for preemption we just want a one-shot of delalloc
825 * flushing so we can stop flushing if we decide we don't need
826 * to anymore.
827 */
828 if (for_preempt)
829 break;
830
831 spin_lock(&space_info->lock);
832 if (list_empty(&space_info->tickets) &&
833 list_empty(&space_info->priority_tickets)) {
834 spin_unlock(&space_info->lock);
835 break;
836 }
837 spin_unlock(&space_info->lock);
838
839 delalloc_bytes = percpu_counter_sum_positive(
840 &fs_info->delalloc_bytes);
841 ordered_bytes = percpu_counter_sum_positive(
842 &fs_info->ordered_bytes);
843 }
844 }
845
846 /*
847 * Try to flush some data based on policy set by @state. This is only advisory
848 * and may fail for various reasons. The caller is supposed to examine the
849 * state of @space_info to detect the outcome.
850 */
flush_space(struct btrfs_space_info * space_info,u64 num_bytes,enum btrfs_flush_state state,bool for_preempt)851 static void flush_space(struct btrfs_space_info *space_info, u64 num_bytes,
852 enum btrfs_flush_state state, bool for_preempt)
853 {
854 struct btrfs_fs_info *fs_info = space_info->fs_info;
855 struct btrfs_root *root = fs_info->tree_root;
856 struct btrfs_trans_handle *trans;
857 int nr;
858 int ret = 0;
859
860 switch (state) {
861 case FLUSH_DELAYED_ITEMS_NR:
862 case FLUSH_DELAYED_ITEMS:
863 if (state == FLUSH_DELAYED_ITEMS_NR)
864 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
865 else
866 nr = -1;
867
868 trans = btrfs_join_transaction_nostart(root);
869 if (IS_ERR(trans)) {
870 ret = PTR_ERR(trans);
871 if (ret == -ENOENT)
872 ret = 0;
873 break;
874 }
875 ret = btrfs_run_delayed_items_nr(trans, nr);
876 btrfs_end_transaction(trans);
877 break;
878 case FLUSH_DELALLOC:
879 case FLUSH_DELALLOC_WAIT:
880 case FLUSH_DELALLOC_FULL:
881 if (state == FLUSH_DELALLOC_FULL)
882 num_bytes = U64_MAX;
883 shrink_delalloc(space_info, num_bytes,
884 state != FLUSH_DELALLOC, for_preempt);
885 break;
886 case FLUSH_DELAYED_REFS_NR:
887 case FLUSH_DELAYED_REFS:
888 trans = btrfs_join_transaction_nostart(root);
889 if (IS_ERR(trans)) {
890 ret = PTR_ERR(trans);
891 if (ret == -ENOENT)
892 ret = 0;
893 break;
894 }
895 if (state == FLUSH_DELAYED_REFS_NR)
896 btrfs_run_delayed_refs(trans, num_bytes);
897 else
898 btrfs_run_delayed_refs(trans, 0);
899 btrfs_end_transaction(trans);
900 break;
901 case ALLOC_CHUNK:
902 case ALLOC_CHUNK_FORCE:
903 trans = btrfs_join_transaction(root);
904 if (IS_ERR(trans)) {
905 ret = PTR_ERR(trans);
906 break;
907 }
908 ret = btrfs_chunk_alloc(trans, space_info,
909 btrfs_get_alloc_profile(fs_info, space_info->flags),
910 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
911 CHUNK_ALLOC_FORCE);
912 btrfs_end_transaction(trans);
913
914 if (ret > 0 || ret == -ENOSPC)
915 ret = 0;
916 break;
917 case RECLAIM_ZONES:
918 if (btrfs_is_zoned(fs_info)) {
919 btrfs_reclaim_sweep(fs_info);
920 btrfs_delete_unused_bgs(fs_info);
921 btrfs_reclaim_block_groups(fs_info,
922 BTRFS_ZONED_SYNC_RECLAIM_BATCH);
923 ASSERT(current->journal_info == NULL);
924 ret = btrfs_commit_current_transaction(root);
925 } else {
926 ret = 0;
927 }
928 break;
929 case RUN_DELAYED_IPUTS:
930 /*
931 * If we have pending delayed iputs then we could free up a
932 * bunch of pinned space, so make sure we run the iputs before
933 * we do our pinned bytes check below.
934 */
935 btrfs_run_delayed_iputs(fs_info);
936 btrfs_wait_on_delayed_iputs(fs_info);
937 break;
938 case COMMIT_TRANS:
939 ASSERT(current->journal_info == NULL);
940 /*
941 * We don't want to start a new transaction, just attach to the
942 * current one or wait it fully commits in case its commit is
943 * happening at the moment. Note: we don't use a nostart join
944 * because that does not wait for a transaction to fully commit
945 * (only for it to be unblocked, state TRANS_STATE_UNBLOCKED).
946 */
947 ret = btrfs_commit_current_transaction(root);
948 break;
949 case RESET_ZONES:
950 ret = btrfs_reset_unused_block_groups(space_info, num_bytes);
951 break;
952 default:
953 ret = -ENOSPC;
954 break;
955 }
956
957 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
958 ret, for_preempt);
959 return;
960 }
961
btrfs_calc_reclaim_metadata_size(const struct btrfs_space_info * space_info)962 static u64 btrfs_calc_reclaim_metadata_size(const struct btrfs_space_info *space_info)
963 {
964 u64 used;
965 u64 avail;
966 u64 to_reclaim = space_info->reclaim_size;
967
968 lockdep_assert_held(&space_info->lock);
969
970 avail = calc_available_free_space(space_info, BTRFS_RESERVE_FLUSH_ALL);
971 used = btrfs_space_info_used(space_info, true);
972
973 /*
974 * We may be flushing because suddenly we have less space than we had
975 * before, and now we're well over-committed based on our current free
976 * space. If that's the case add in our overage so we make sure to put
977 * appropriate pressure on the flushing state machine.
978 */
979 if (space_info->total_bytes + avail < used)
980 to_reclaim += used - (space_info->total_bytes + avail);
981
982 return to_reclaim;
983 }
984
need_preemptive_reclaim(const struct btrfs_space_info * space_info)985 static bool need_preemptive_reclaim(const struct btrfs_space_info *space_info)
986 {
987 struct btrfs_fs_info *fs_info = space_info->fs_info;
988 const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
989 u64 ordered, delalloc;
990 u64 thresh;
991 u64 used;
992
993 lockdep_assert_held(&space_info->lock);
994
995 /*
996 * We have tickets queued, bail so we don't compete with the async
997 * flushers.
998 */
999 if (space_info->reclaim_size)
1000 return false;
1001
1002 thresh = mult_perc(space_info->total_bytes, 90);
1003
1004 /* If we're just plain full then async reclaim just slows us down. */
1005 if ((space_info->bytes_used + space_info->bytes_reserved +
1006 global_rsv_size) >= thresh)
1007 return false;
1008
1009 used = space_info->bytes_may_use + space_info->bytes_pinned;
1010
1011 /* The total flushable belongs to the global rsv, don't flush. */
1012 if (global_rsv_size >= used)
1013 return false;
1014
1015 /*
1016 * 128MiB is 1/4 of the maximum global rsv size. If we have less than
1017 * that devoted to other reservations then there's no sense in flushing,
1018 * we don't have a lot of things that need flushing.
1019 */
1020 if (used - global_rsv_size <= SZ_128M)
1021 return false;
1022
1023 /*
1024 * If we have over half of the free space occupied by reservations or
1025 * pinned then we want to start flushing.
1026 *
1027 * We do not do the traditional thing here, which is to say
1028 *
1029 * if (used >= ((total_bytes + avail) / 2))
1030 * return 1;
1031 *
1032 * because this doesn't quite work how we want. If we had more than 50%
1033 * of the space_info used by bytes_used and we had 0 available we'd just
1034 * constantly run the background flusher. Instead we want it to kick in
1035 * if our reclaimable space exceeds our clamped free space.
1036 *
1037 * Our clamping range is 2^1 -> 2^8. Practically speaking that means
1038 * the following:
1039 *
1040 * Amount of RAM Minimum threshold Maximum threshold
1041 *
1042 * 256GiB 1GiB 128GiB
1043 * 128GiB 512MiB 64GiB
1044 * 64GiB 256MiB 32GiB
1045 * 32GiB 128MiB 16GiB
1046 * 16GiB 64MiB 8GiB
1047 *
1048 * These are the range our thresholds will fall in, corresponding to how
1049 * much delalloc we need for the background flusher to kick in.
1050 */
1051
1052 thresh = calc_available_free_space(space_info, BTRFS_RESERVE_FLUSH_ALL);
1053 used = space_info->bytes_used + space_info->bytes_reserved +
1054 space_info->bytes_readonly + global_rsv_size;
1055 if (used < space_info->total_bytes)
1056 thresh += space_info->total_bytes - used;
1057 thresh >>= space_info->clamp;
1058
1059 used = space_info->bytes_pinned;
1060
1061 /*
1062 * If we have more ordered bytes than delalloc bytes then we're either
1063 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
1064 * around. Preemptive flushing is only useful in that it can free up
1065 * space before tickets need to wait for things to finish. In the case
1066 * of ordered extents, preemptively waiting on ordered extents gets us
1067 * nothing, if our reservations are tied up in ordered extents we'll
1068 * simply have to slow down writers by forcing them to wait on ordered
1069 * extents.
1070 *
1071 * In the case that ordered is larger than delalloc, only include the
1072 * block reserves that we would actually be able to directly reclaim
1073 * from. In this case if we're heavy on metadata operations this will
1074 * clearly be heavy enough to warrant preemptive flushing. In the case
1075 * of heavy DIO or ordered reservations, preemptive flushing will just
1076 * waste time and cause us to slow down.
1077 *
1078 * We want to make sure we truly are maxed out on ordered however, so
1079 * cut ordered in half, and if it's still higher than delalloc then we
1080 * can keep flushing. This is to avoid the case where we start
1081 * flushing, and now delalloc == ordered and we stop preemptively
1082 * flushing when we could still have several gigs of delalloc to flush.
1083 */
1084 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
1085 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
1086 if (ordered >= delalloc)
1087 used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) +
1088 btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv);
1089 else
1090 used += space_info->bytes_may_use - global_rsv_size;
1091
1092 return (used >= thresh && !btrfs_fs_closing(fs_info) &&
1093 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
1094 }
1095
steal_from_global_rsv(struct btrfs_space_info * space_info,struct reserve_ticket * ticket)1096 static bool steal_from_global_rsv(struct btrfs_space_info *space_info,
1097 struct reserve_ticket *ticket)
1098 {
1099 struct btrfs_fs_info *fs_info = space_info->fs_info;
1100 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
1101 u64 min_bytes;
1102
1103 lockdep_assert_held(&space_info->lock);
1104
1105 if (!ticket->steal)
1106 return false;
1107
1108 if (global_rsv->space_info != space_info)
1109 return false;
1110
1111 spin_lock(&global_rsv->lock);
1112 min_bytes = mult_perc(global_rsv->size, 10);
1113 if (global_rsv->reserved < min_bytes + ticket->bytes) {
1114 spin_unlock(&global_rsv->lock);
1115 return false;
1116 }
1117 global_rsv->reserved -= ticket->bytes;
1118 if (global_rsv->reserved < global_rsv->size)
1119 global_rsv->full = false;
1120 spin_unlock(&global_rsv->lock);
1121
1122 remove_ticket(space_info, ticket, 0);
1123 space_info->tickets_id++;
1124
1125 return true;
1126 }
1127
1128 /*
1129 * We've exhausted our flushing, start failing tickets.
1130 *
1131 * @space_info - the space info we were flushing
1132 *
1133 * We call this when we've exhausted our flushing ability and haven't made
1134 * progress in satisfying tickets. The reservation code handles tickets in
1135 * order, so if there is a large ticket first and then smaller ones we could
1136 * very well satisfy the smaller tickets. This will attempt to wake up any
1137 * tickets in the list to catch this case.
1138 *
1139 * This function returns true if it was able to make progress by clearing out
1140 * other tickets, or if it stumbles across a ticket that was smaller than the
1141 * first ticket.
1142 */
maybe_fail_all_tickets(struct btrfs_space_info * space_info)1143 static bool maybe_fail_all_tickets(struct btrfs_space_info *space_info)
1144 {
1145 struct btrfs_fs_info *fs_info = space_info->fs_info;
1146 struct reserve_ticket *ticket;
1147 u64 tickets_id = space_info->tickets_id;
1148 const int abort_error = BTRFS_FS_ERROR(fs_info);
1149
1150 trace_btrfs_fail_all_tickets(fs_info, space_info);
1151
1152 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1153 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
1154 __btrfs_dump_space_info(space_info);
1155 }
1156
1157 while (!list_empty(&space_info->tickets) &&
1158 tickets_id == space_info->tickets_id) {
1159 ticket = list_first_entry(&space_info->tickets,
1160 struct reserve_ticket, list);
1161 if (unlikely(abort_error)) {
1162 remove_ticket(space_info, ticket, abort_error);
1163 } else {
1164 if (steal_from_global_rsv(space_info, ticket))
1165 return true;
1166
1167 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1168 btrfs_info(fs_info, "failing ticket with %llu bytes",
1169 ticket->bytes);
1170
1171 remove_ticket(space_info, ticket, -ENOSPC);
1172
1173 /*
1174 * We're just throwing tickets away, so more flushing may
1175 * not trip over btrfs_try_granting_tickets, so we need
1176 * to call it here to see if we can make progress with
1177 * the next ticket in the list.
1178 */
1179 btrfs_try_granting_tickets(space_info);
1180 }
1181 }
1182 return (tickets_id != space_info->tickets_id);
1183 }
1184
do_async_reclaim_metadata_space(struct btrfs_space_info * space_info)1185 static void do_async_reclaim_metadata_space(struct btrfs_space_info *space_info)
1186 {
1187 struct btrfs_fs_info *fs_info = space_info->fs_info;
1188 u64 to_reclaim;
1189 enum btrfs_flush_state flush_state;
1190 int commit_cycles = 0;
1191 u64 last_tickets_id;
1192 enum btrfs_flush_state final_state;
1193
1194 if (btrfs_is_zoned(fs_info))
1195 final_state = RESET_ZONES;
1196 else
1197 final_state = COMMIT_TRANS;
1198
1199 spin_lock(&space_info->lock);
1200 to_reclaim = btrfs_calc_reclaim_metadata_size(space_info);
1201 if (!to_reclaim) {
1202 space_info->flush = false;
1203 spin_unlock(&space_info->lock);
1204 return;
1205 }
1206 last_tickets_id = space_info->tickets_id;
1207 spin_unlock(&space_info->lock);
1208
1209 flush_state = FLUSH_DELAYED_ITEMS_NR;
1210 do {
1211 flush_space(space_info, to_reclaim, flush_state, false);
1212 spin_lock(&space_info->lock);
1213 if (list_empty(&space_info->tickets)) {
1214 space_info->flush = false;
1215 spin_unlock(&space_info->lock);
1216 return;
1217 }
1218 to_reclaim = btrfs_calc_reclaim_metadata_size(space_info);
1219 if (last_tickets_id == space_info->tickets_id) {
1220 flush_state++;
1221 } else {
1222 last_tickets_id = space_info->tickets_id;
1223 flush_state = FLUSH_DELAYED_ITEMS_NR;
1224 if (commit_cycles)
1225 commit_cycles--;
1226 }
1227
1228 /*
1229 * We do not want to empty the system of delalloc unless we're
1230 * under heavy pressure, so allow one trip through the flushing
1231 * logic before we start doing a FLUSH_DELALLOC_FULL.
1232 */
1233 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
1234 flush_state++;
1235
1236 /*
1237 * We don't want to force a chunk allocation until we've tried
1238 * pretty hard to reclaim space. Think of the case where we
1239 * freed up a bunch of space and so have a lot of pinned space
1240 * to reclaim. We would rather use that than possibly create a
1241 * underutilized metadata chunk. So if this is our first run
1242 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1243 * commit the transaction. If nothing has changed the next go
1244 * around then we can force a chunk allocation.
1245 */
1246 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1247 flush_state++;
1248
1249 if (flush_state > final_state) {
1250 commit_cycles++;
1251 if (commit_cycles > 2) {
1252 if (maybe_fail_all_tickets(space_info)) {
1253 flush_state = FLUSH_DELAYED_ITEMS_NR;
1254 commit_cycles--;
1255 } else {
1256 space_info->flush = false;
1257 }
1258 } else {
1259 flush_state = FLUSH_DELAYED_ITEMS_NR;
1260 }
1261 }
1262 spin_unlock(&space_info->lock);
1263 } while (flush_state <= final_state);
1264 }
1265
1266 /*
1267 * This is for normal flushers, it can wait as much time as needed. We will
1268 * loop and continuously try to flush as long as we are making progress. We
1269 * count progress as clearing off tickets each time we have to loop.
1270 */
btrfs_async_reclaim_metadata_space(struct work_struct * work)1271 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
1272 {
1273 struct btrfs_fs_info *fs_info;
1274 struct btrfs_space_info *space_info;
1275
1276 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1277 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1278 do_async_reclaim_metadata_space(space_info);
1279 for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) {
1280 if (space_info->sub_group[i])
1281 do_async_reclaim_metadata_space(space_info->sub_group[i]);
1282 }
1283 }
1284
1285 /*
1286 * This handles pre-flushing of metadata space before we get to the point that
1287 * we need to start blocking threads on tickets. The logic here is different
1288 * from the other flush paths because it doesn't rely on tickets to tell us how
1289 * much we need to flush, instead it attempts to keep us below the 80% full
1290 * watermark of space by flushing whichever reservation pool is currently the
1291 * largest.
1292 */
btrfs_preempt_reclaim_metadata_space(struct work_struct * work)1293 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1294 {
1295 struct btrfs_fs_info *fs_info;
1296 struct btrfs_space_info *space_info;
1297 struct btrfs_block_rsv *delayed_block_rsv;
1298 struct btrfs_block_rsv *delayed_refs_rsv;
1299 struct btrfs_block_rsv *global_rsv;
1300 struct btrfs_block_rsv *trans_rsv;
1301 int loops = 0;
1302
1303 fs_info = container_of(work, struct btrfs_fs_info,
1304 preempt_reclaim_work);
1305 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1306 delayed_block_rsv = &fs_info->delayed_block_rsv;
1307 delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1308 global_rsv = &fs_info->global_block_rsv;
1309 trans_rsv = &fs_info->trans_block_rsv;
1310
1311 spin_lock(&space_info->lock);
1312 while (need_preemptive_reclaim(space_info)) {
1313 enum btrfs_flush_state flush;
1314 u64 delalloc_size = 0;
1315 u64 to_reclaim, block_rsv_size;
1316 const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
1317 const u64 bytes_may_use = space_info->bytes_may_use;
1318 const u64 bytes_pinned = space_info->bytes_pinned;
1319
1320 spin_unlock(&space_info->lock);
1321 /*
1322 * We don't have a precise counter for the metadata being
1323 * reserved for delalloc, so we'll approximate it by subtracting
1324 * out the block rsv's space from the bytes_may_use. If that
1325 * amount is higher than the individual reserves, then we can
1326 * assume it's tied up in delalloc reservations.
1327 */
1328 block_rsv_size = global_rsv_size +
1329 btrfs_block_rsv_reserved(delayed_block_rsv) +
1330 btrfs_block_rsv_reserved(delayed_refs_rsv) +
1331 btrfs_block_rsv_reserved(trans_rsv);
1332 if (block_rsv_size < bytes_may_use)
1333 delalloc_size = bytes_may_use - block_rsv_size;
1334
1335 /*
1336 * We don't want to include the global_rsv in our calculation,
1337 * because that's space we can't touch. Subtract it from the
1338 * block_rsv_size for the next checks.
1339 */
1340 block_rsv_size -= global_rsv_size;
1341
1342 /*
1343 * We really want to avoid flushing delalloc too much, as it
1344 * could result in poor allocation patterns, so only flush it if
1345 * it's larger than the rest of the pools combined.
1346 */
1347 if (delalloc_size > block_rsv_size) {
1348 to_reclaim = delalloc_size;
1349 flush = FLUSH_DELALLOC;
1350 } else if (bytes_pinned >
1351 (btrfs_block_rsv_reserved(delayed_block_rsv) +
1352 btrfs_block_rsv_reserved(delayed_refs_rsv))) {
1353 to_reclaim = bytes_pinned;
1354 flush = COMMIT_TRANS;
1355 } else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
1356 btrfs_block_rsv_reserved(delayed_refs_rsv)) {
1357 to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv);
1358 flush = FLUSH_DELAYED_ITEMS_NR;
1359 } else {
1360 to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv);
1361 flush = FLUSH_DELAYED_REFS_NR;
1362 }
1363
1364 loops++;
1365
1366 /*
1367 * We don't want to reclaim everything, just a portion, so scale
1368 * down the to_reclaim by 1/4. If it takes us down to 0,
1369 * reclaim 1 items worth.
1370 */
1371 to_reclaim >>= 2;
1372 if (!to_reclaim)
1373 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1374 flush_space(space_info, to_reclaim, flush, true);
1375 cond_resched();
1376 spin_lock(&space_info->lock);
1377 }
1378
1379 /* We only went through once, back off our clamping. */
1380 if (loops == 1 && !space_info->reclaim_size)
1381 space_info->clamp = max(1, space_info->clamp - 1);
1382 trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1383 spin_unlock(&space_info->lock);
1384 }
1385
1386 /*
1387 * FLUSH_DELALLOC_WAIT:
1388 * Space is freed from flushing delalloc in one of two ways.
1389 *
1390 * 1) compression is on and we allocate less space than we reserved
1391 * 2) we are overwriting existing space
1392 *
1393 * For #1 that extra space is reclaimed as soon as the delalloc pages are
1394 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1395 * length to ->bytes_reserved, and subtracts the reserved space from
1396 * ->bytes_may_use.
1397 *
1398 * For #2 this is trickier. Once the ordered extent runs we will drop the
1399 * extent in the range we are overwriting, which creates a delayed ref for
1400 * that freed extent. This however is not reclaimed until the transaction
1401 * commits, thus the next stages.
1402 *
1403 * RUN_DELAYED_IPUTS
1404 * If we are freeing inodes, we want to make sure all delayed iputs have
1405 * completed, because they could have been on an inode with i_nlink == 0, and
1406 * thus have been truncated and freed up space. But again this space is not
1407 * immediately reusable, it comes in the form of a delayed ref, which must be
1408 * run and then the transaction must be committed.
1409 *
1410 * COMMIT_TRANS
1411 * This is where we reclaim all of the pinned space generated by running the
1412 * iputs
1413 *
1414 * RESET_ZONES
1415 * This state works only for the zoned mode. We scan the unused block group
1416 * list and reset the zones and reuse the block group.
1417 *
1418 * ALLOC_CHUNK_FORCE
1419 * For data we start with alloc chunk force, however we could have been full
1420 * before, and then the transaction commit could have freed new block groups,
1421 * so if we now have space to allocate do the force chunk allocation.
1422 */
1423 static const enum btrfs_flush_state data_flush_states[] = {
1424 FLUSH_DELALLOC_FULL,
1425 RUN_DELAYED_IPUTS,
1426 COMMIT_TRANS,
1427 RECLAIM_ZONES,
1428 RESET_ZONES,
1429 ALLOC_CHUNK_FORCE,
1430 };
1431
do_async_reclaim_data_space(struct btrfs_space_info * space_info)1432 static void do_async_reclaim_data_space(struct btrfs_space_info *space_info)
1433 {
1434 struct btrfs_fs_info *fs_info = space_info->fs_info;
1435 u64 last_tickets_id;
1436 enum btrfs_flush_state flush_state = 0;
1437
1438 spin_lock(&space_info->lock);
1439 if (list_empty(&space_info->tickets)) {
1440 space_info->flush = false;
1441 spin_unlock(&space_info->lock);
1442 return;
1443 }
1444 last_tickets_id = space_info->tickets_id;
1445 spin_unlock(&space_info->lock);
1446
1447 while (!space_info->full) {
1448 flush_space(space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1449 spin_lock(&space_info->lock);
1450 if (list_empty(&space_info->tickets)) {
1451 space_info->flush = false;
1452 spin_unlock(&space_info->lock);
1453 return;
1454 }
1455
1456 /* Something happened, fail everything and bail. */
1457 if (unlikely(BTRFS_FS_ERROR(fs_info)))
1458 goto aborted_fs;
1459 last_tickets_id = space_info->tickets_id;
1460 spin_unlock(&space_info->lock);
1461 }
1462
1463 while (flush_state < ARRAY_SIZE(data_flush_states)) {
1464 flush_space(space_info, U64_MAX,
1465 data_flush_states[flush_state], false);
1466 spin_lock(&space_info->lock);
1467 if (list_empty(&space_info->tickets)) {
1468 space_info->flush = false;
1469 spin_unlock(&space_info->lock);
1470 return;
1471 }
1472
1473 if (last_tickets_id == space_info->tickets_id) {
1474 flush_state++;
1475 } else {
1476 last_tickets_id = space_info->tickets_id;
1477 flush_state = 0;
1478 }
1479
1480 if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1481 if (space_info->full) {
1482 if (maybe_fail_all_tickets(space_info))
1483 flush_state = 0;
1484 else
1485 space_info->flush = false;
1486 } else {
1487 flush_state = 0;
1488 }
1489
1490 /* Something happened, fail everything and bail. */
1491 if (unlikely(BTRFS_FS_ERROR(fs_info)))
1492 goto aborted_fs;
1493
1494 }
1495 spin_unlock(&space_info->lock);
1496 }
1497 return;
1498
1499 aborted_fs:
1500 maybe_fail_all_tickets(space_info);
1501 space_info->flush = false;
1502 spin_unlock(&space_info->lock);
1503 }
1504
btrfs_async_reclaim_data_space(struct work_struct * work)1505 static void btrfs_async_reclaim_data_space(struct work_struct *work)
1506 {
1507 struct btrfs_fs_info *fs_info;
1508 struct btrfs_space_info *space_info;
1509
1510 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1511 space_info = fs_info->data_sinfo;
1512 do_async_reclaim_data_space(space_info);
1513 for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++)
1514 if (space_info->sub_group[i])
1515 do_async_reclaim_data_space(space_info->sub_group[i]);
1516 }
1517
btrfs_init_async_reclaim_work(struct btrfs_fs_info * fs_info)1518 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1519 {
1520 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1521 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1522 INIT_WORK(&fs_info->preempt_reclaim_work,
1523 btrfs_preempt_reclaim_metadata_space);
1524 }
1525
1526 static const enum btrfs_flush_state priority_flush_states[] = {
1527 FLUSH_DELAYED_ITEMS_NR,
1528 FLUSH_DELAYED_ITEMS,
1529 RESET_ZONES,
1530 ALLOC_CHUNK,
1531 };
1532
1533 static const enum btrfs_flush_state evict_flush_states[] = {
1534 FLUSH_DELAYED_ITEMS_NR,
1535 FLUSH_DELAYED_ITEMS,
1536 FLUSH_DELAYED_REFS_NR,
1537 FLUSH_DELAYED_REFS,
1538 FLUSH_DELALLOC,
1539 FLUSH_DELALLOC_WAIT,
1540 FLUSH_DELALLOC_FULL,
1541 ALLOC_CHUNK,
1542 COMMIT_TRANS,
1543 RESET_ZONES,
1544 };
1545
is_ticket_served(struct reserve_ticket * ticket)1546 static bool is_ticket_served(struct reserve_ticket *ticket)
1547 {
1548 bool ret;
1549
1550 spin_lock(&ticket->lock);
1551 ret = (ticket->bytes == 0);
1552 spin_unlock(&ticket->lock);
1553
1554 return ret;
1555 }
1556
priority_reclaim_metadata_space(struct btrfs_space_info * space_info,struct reserve_ticket * ticket,const enum btrfs_flush_state * states,int states_nr)1557 static void priority_reclaim_metadata_space(struct btrfs_space_info *space_info,
1558 struct reserve_ticket *ticket,
1559 const enum btrfs_flush_state *states,
1560 int states_nr)
1561 {
1562 struct btrfs_fs_info *fs_info = space_info->fs_info;
1563 u64 to_reclaim;
1564 int flush_state = 0;
1565
1566 /*
1567 * This is the priority reclaim path, so to_reclaim could be >0 still
1568 * because we may have only satisfied the priority tickets and still
1569 * left non priority tickets on the list. We would then have
1570 * to_reclaim but ->bytes == 0.
1571 */
1572 if (is_ticket_served(ticket))
1573 return;
1574
1575 spin_lock(&space_info->lock);
1576 to_reclaim = btrfs_calc_reclaim_metadata_size(space_info);
1577 spin_unlock(&space_info->lock);
1578
1579 while (flush_state < states_nr) {
1580 flush_space(space_info, to_reclaim, states[flush_state], false);
1581 if (is_ticket_served(ticket))
1582 return;
1583 flush_state++;
1584 }
1585
1586 spin_lock(&space_info->lock);
1587 /*
1588 * Attempt to steal from the global rsv if we can, except if the fs was
1589 * turned into error mode due to a transaction abort when flushing space
1590 * above, in that case fail with the abort error instead of returning
1591 * success to the caller if we can steal from the global rsv - this is
1592 * just to have caller fail immediately instead of later when trying to
1593 * modify the fs, making it easier to debug -ENOSPC problems.
1594 */
1595 if (unlikely(BTRFS_FS_ERROR(fs_info)))
1596 remove_ticket(space_info, ticket, BTRFS_FS_ERROR(fs_info));
1597 else if (!steal_from_global_rsv(space_info, ticket))
1598 remove_ticket(space_info, ticket, -ENOSPC);
1599
1600 /*
1601 * We must run try_granting_tickets here because we could be a large
1602 * ticket in front of a smaller ticket that can now be satisfied with
1603 * the available space.
1604 */
1605 btrfs_try_granting_tickets(space_info);
1606 spin_unlock(&space_info->lock);
1607 }
1608
priority_reclaim_data_space(struct btrfs_space_info * space_info,struct reserve_ticket * ticket)1609 static void priority_reclaim_data_space(struct btrfs_space_info *space_info,
1610 struct reserve_ticket *ticket)
1611 {
1612 /* We could have been granted before we got here. */
1613 if (is_ticket_served(ticket))
1614 return;
1615
1616 spin_lock(&space_info->lock);
1617 while (!space_info->full) {
1618 spin_unlock(&space_info->lock);
1619 flush_space(space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1620 if (is_ticket_served(ticket))
1621 return;
1622 spin_lock(&space_info->lock);
1623 }
1624
1625 remove_ticket(space_info, ticket, -ENOSPC);
1626 btrfs_try_granting_tickets(space_info);
1627 spin_unlock(&space_info->lock);
1628 }
1629
wait_reserve_ticket(struct btrfs_space_info * space_info,struct reserve_ticket * ticket)1630 static void wait_reserve_ticket(struct btrfs_space_info *space_info,
1631 struct reserve_ticket *ticket)
1632
1633 {
1634 DEFINE_WAIT(wait);
1635
1636 spin_lock(&ticket->lock);
1637 while (ticket->bytes > 0 && ticket->error == 0) {
1638 int ret;
1639
1640 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1641 spin_unlock(&ticket->lock);
1642 if (ret) {
1643 /*
1644 * Delete us from the list. After we unlock the space
1645 * info, we don't want the async reclaim job to reserve
1646 * space for this ticket. If that would happen, then the
1647 * ticket's task would not known that space was reserved
1648 * despite getting an error, resulting in a space leak
1649 * (bytes_may_use counter of our space_info).
1650 */
1651 spin_lock(&space_info->lock);
1652 remove_ticket(space_info, ticket, -EINTR);
1653 spin_unlock(&space_info->lock);
1654 return;
1655 }
1656
1657 schedule();
1658
1659 finish_wait(&ticket->wait, &wait);
1660 spin_lock(&ticket->lock);
1661 }
1662 spin_unlock(&ticket->lock);
1663 }
1664
1665 /*
1666 * Do the appropriate flushing and waiting for a ticket.
1667 *
1668 * @space_info: space info for the reservation
1669 * @ticket: ticket for the reservation
1670 * @start_ns: timestamp when the reservation started
1671 * @orig_bytes: amount of bytes originally reserved
1672 * @flush: how much we can flush
1673 *
1674 * This does the work of figuring out how to flush for the ticket, waiting for
1675 * the reservation, and returning the appropriate error if there is one.
1676 */
handle_reserve_ticket(struct btrfs_space_info * space_info,struct reserve_ticket * ticket,u64 start_ns,u64 orig_bytes,enum btrfs_reserve_flush_enum flush)1677 static int handle_reserve_ticket(struct btrfs_space_info *space_info,
1678 struct reserve_ticket *ticket,
1679 u64 start_ns, u64 orig_bytes,
1680 enum btrfs_reserve_flush_enum flush)
1681 {
1682 int ret;
1683
1684 switch (flush) {
1685 case BTRFS_RESERVE_FLUSH_DATA:
1686 case BTRFS_RESERVE_FLUSH_ALL:
1687 case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1688 wait_reserve_ticket(space_info, ticket);
1689 break;
1690 case BTRFS_RESERVE_FLUSH_LIMIT:
1691 priority_reclaim_metadata_space(space_info, ticket,
1692 priority_flush_states,
1693 ARRAY_SIZE(priority_flush_states));
1694 break;
1695 case BTRFS_RESERVE_FLUSH_EVICT:
1696 priority_reclaim_metadata_space(space_info, ticket,
1697 evict_flush_states,
1698 ARRAY_SIZE(evict_flush_states));
1699 break;
1700 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1701 priority_reclaim_data_space(space_info, ticket);
1702 break;
1703 default:
1704 ASSERT(0, "flush=%d", flush);
1705 break;
1706 }
1707
1708 ret = ticket->error;
1709 ASSERT(list_empty(&ticket->list));
1710 /*
1711 * Check that we can't have an error set if the reservation succeeded,
1712 * as that would confuse tasks and lead them to error out without
1713 * releasing reserved space (if an error happens the expectation is that
1714 * space wasn't reserved at all).
1715 */
1716 ASSERT(!(ticket->bytes == 0 && ticket->error),
1717 "ticket->bytes=%llu ticket->error=%d", ticket->bytes, ticket->error);
1718 trace_btrfs_reserve_ticket(space_info->fs_info, space_info->flags,
1719 orig_bytes, start_ns, flush, ticket->error);
1720 return ret;
1721 }
1722
1723 /*
1724 * This returns true if this flush state will go through the ordinary flushing
1725 * code.
1726 */
is_normal_flushing(enum btrfs_reserve_flush_enum flush)1727 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1728 {
1729 return (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1730 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1731 }
1732
maybe_clamp_preempt(struct btrfs_space_info * space_info)1733 static inline void maybe_clamp_preempt(struct btrfs_space_info *space_info)
1734 {
1735 struct btrfs_fs_info *fs_info = space_info->fs_info;
1736 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1737 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1738
1739 /*
1740 * If we're heavy on ordered operations then clamping won't help us. We
1741 * need to clamp specifically to keep up with dirty'ing buffered
1742 * writers, because there's not a 1:1 correlation of writing delalloc
1743 * and freeing space, like there is with flushing delayed refs or
1744 * delayed nodes. If we're already more ordered than delalloc then
1745 * we're keeping up, otherwise we aren't and should probably clamp.
1746 */
1747 if (ordered < delalloc)
1748 space_info->clamp = min(space_info->clamp + 1, 8);
1749 }
1750
can_steal(enum btrfs_reserve_flush_enum flush)1751 static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
1752 {
1753 return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1754 flush == BTRFS_RESERVE_FLUSH_EVICT);
1755 }
1756
1757 /*
1758 * NO_FLUSH and FLUSH_EMERGENCY don't want to create a ticket, they just want to
1759 * fail as quickly as possible.
1760 */
can_ticket(enum btrfs_reserve_flush_enum flush)1761 static inline bool can_ticket(enum btrfs_reserve_flush_enum flush)
1762 {
1763 return (flush != BTRFS_RESERVE_NO_FLUSH &&
1764 flush != BTRFS_RESERVE_FLUSH_EMERGENCY);
1765 }
1766
1767 /*
1768 * Try to reserve bytes from the block_rsv's space.
1769 *
1770 * @space_info: space info we want to allocate from
1771 * @orig_bytes: number of bytes we want
1772 * @flush: whether or not we can flush to make our reservation
1773 *
1774 * This will reserve orig_bytes number of bytes from the space info associated
1775 * with the block_rsv. If there is not enough space it will make an attempt to
1776 * flush out space to make room. It will do this by flushing delalloc if
1777 * possible or committing the transaction. If flush is 0 then no attempts to
1778 * regain reservations will be made and this will fail if there is not enough
1779 * space already.
1780 */
reserve_bytes(struct btrfs_space_info * space_info,u64 orig_bytes,enum btrfs_reserve_flush_enum flush)1781 static int reserve_bytes(struct btrfs_space_info *space_info, u64 orig_bytes,
1782 enum btrfs_reserve_flush_enum flush)
1783 {
1784 struct btrfs_fs_info *fs_info = space_info->fs_info;
1785 struct work_struct *async_work;
1786 struct reserve_ticket ticket;
1787 u64 start_ns = 0;
1788 u64 used;
1789 int ret = -ENOSPC;
1790 bool pending_tickets;
1791
1792 ASSERT(orig_bytes, "orig_bytes=%llu", orig_bytes);
1793 /*
1794 * If have a transaction handle (current->journal_info != NULL), then
1795 * the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor
1796 * BTRFS_RESERVE_FLUSH_EVICT, as we could deadlock because those
1797 * flushing methods can trigger transaction commits.
1798 */
1799 if (current->journal_info) {
1800 /* One assert per line for easier debugging. */
1801 ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL, "flush=%d", flush);
1802 ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL, "flush=%d", flush);
1803 ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT, "flush=%d", flush);
1804 }
1805
1806 if (flush == BTRFS_RESERVE_FLUSH_DATA)
1807 async_work = &fs_info->async_data_reclaim_work;
1808 else
1809 async_work = &fs_info->async_reclaim_work;
1810
1811 spin_lock(&space_info->lock);
1812 used = btrfs_space_info_used(space_info, true);
1813
1814 /*
1815 * We don't want NO_FLUSH allocations to jump everybody, they can
1816 * generally handle ENOSPC in a different way, so treat them the same as
1817 * normal flushers when it comes to skipping pending tickets.
1818 */
1819 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1820 pending_tickets = !list_empty(&space_info->tickets) ||
1821 !list_empty(&space_info->priority_tickets);
1822 else
1823 pending_tickets = !list_empty(&space_info->priority_tickets);
1824
1825 /*
1826 * Carry on if we have enough space (short-circuit) OR call
1827 * can_overcommit() to ensure we can overcommit to continue.
1828 */
1829 if (!pending_tickets &&
1830 ((used + orig_bytes <= space_info->total_bytes) ||
1831 can_overcommit(space_info, used, orig_bytes, flush))) {
1832 btrfs_space_info_update_bytes_may_use(space_info, orig_bytes);
1833 ret = 0;
1834 }
1835
1836 /*
1837 * Things are dire, we need to make a reservation so we don't abort. We
1838 * will let this reservation go through as long as we have actual space
1839 * left to allocate for the block.
1840 */
1841 if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
1842 used -= space_info->bytes_may_use;
1843 if (used + orig_bytes <= space_info->total_bytes) {
1844 btrfs_space_info_update_bytes_may_use(space_info, orig_bytes);
1845 ret = 0;
1846 }
1847 }
1848
1849 /*
1850 * If we couldn't make a reservation then setup our reservation ticket
1851 * and kick the async worker if it's not already running.
1852 *
1853 * If we are a priority flusher then we just need to add our ticket to
1854 * the list and we will do our own flushing further down.
1855 */
1856 if (ret && can_ticket(flush)) {
1857 ticket.bytes = orig_bytes;
1858 ticket.error = 0;
1859 space_info->reclaim_size += ticket.bytes;
1860 init_waitqueue_head(&ticket.wait);
1861 spin_lock_init(&ticket.lock);
1862 ticket.steal = can_steal(flush);
1863 if (trace_btrfs_reserve_ticket_enabled())
1864 start_ns = ktime_get_ns();
1865
1866 if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1867 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1868 flush == BTRFS_RESERVE_FLUSH_DATA) {
1869 list_add_tail(&ticket.list, &space_info->tickets);
1870 if (!space_info->flush) {
1871 /*
1872 * We were forced to add a reserve ticket, so
1873 * our preemptive flushing is unable to keep
1874 * up. Clamp down on the threshold for the
1875 * preemptive flushing in order to keep up with
1876 * the workload.
1877 */
1878 maybe_clamp_preempt(space_info);
1879
1880 space_info->flush = true;
1881 trace_btrfs_trigger_flush(fs_info,
1882 space_info->flags,
1883 orig_bytes, flush,
1884 "enospc");
1885 queue_work(system_dfl_wq, async_work);
1886 }
1887 } else {
1888 list_add_tail(&ticket.list,
1889 &space_info->priority_tickets);
1890 }
1891 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1892 /*
1893 * We will do the space reservation dance during log replay,
1894 * which means we won't have fs_info->fs_root set, so don't do
1895 * the async reclaim as we will panic.
1896 */
1897 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1898 !work_busy(&fs_info->preempt_reclaim_work) &&
1899 need_preemptive_reclaim(space_info)) {
1900 trace_btrfs_trigger_flush(fs_info, space_info->flags,
1901 orig_bytes, flush, "preempt");
1902 queue_work(system_dfl_wq,
1903 &fs_info->preempt_reclaim_work);
1904 }
1905 }
1906 spin_unlock(&space_info->lock);
1907 if (!ret || !can_ticket(flush))
1908 return ret;
1909
1910 return handle_reserve_ticket(space_info, &ticket, start_ns, orig_bytes, flush);
1911 }
1912
1913 /*
1914 * Try to reserve metadata bytes from the block_rsv's space.
1915 *
1916 * @space_info: the space_info we're allocating for
1917 * @orig_bytes: number of bytes we want
1918 * @flush: whether or not we can flush to make our reservation
1919 *
1920 * This will reserve orig_bytes number of bytes from the space info associated
1921 * with the block_rsv. If there is not enough space it will make an attempt to
1922 * flush out space to make room. It will do this by flushing delalloc if
1923 * possible or committing the transaction. If flush is 0 then no attempts to
1924 * regain reservations will be made and this will fail if there is not enough
1925 * space already.
1926 */
btrfs_reserve_metadata_bytes(struct btrfs_space_info * space_info,u64 orig_bytes,enum btrfs_reserve_flush_enum flush)1927 int btrfs_reserve_metadata_bytes(struct btrfs_space_info *space_info,
1928 u64 orig_bytes,
1929 enum btrfs_reserve_flush_enum flush)
1930 {
1931 int ret;
1932
1933 ret = reserve_bytes(space_info, orig_bytes, flush);
1934 if (ret == -ENOSPC) {
1935 struct btrfs_fs_info *fs_info = space_info->fs_info;
1936
1937 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1938 space_info->flags, orig_bytes, 1);
1939
1940 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1941 btrfs_dump_space_info(space_info, orig_bytes, false);
1942 }
1943 return ret;
1944 }
1945
1946 /*
1947 * Try to reserve data bytes for an allocation.
1948 *
1949 * @space_info: the space_info we're allocating for
1950 * @bytes: number of bytes we need
1951 * @flush: how we are allowed to flush
1952 *
1953 * This will reserve bytes from the data space info. If there is not enough
1954 * space then we will attempt to flush space as specified by flush.
1955 */
btrfs_reserve_data_bytes(struct btrfs_space_info * space_info,u64 bytes,enum btrfs_reserve_flush_enum flush)1956 int btrfs_reserve_data_bytes(struct btrfs_space_info *space_info, u64 bytes,
1957 enum btrfs_reserve_flush_enum flush)
1958 {
1959 struct btrfs_fs_info *fs_info = space_info->fs_info;
1960 int ret;
1961
1962 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1963 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE ||
1964 flush == BTRFS_RESERVE_NO_FLUSH, "flush=%d", flush);
1965 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA,
1966 "current->journal_info=0x%lx flush=%d",
1967 (unsigned long)current->journal_info, flush);
1968
1969 ret = reserve_bytes(space_info, bytes, flush);
1970 if (ret == -ENOSPC) {
1971 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1972 space_info->flags, bytes, 1);
1973 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1974 btrfs_dump_space_info(space_info, bytes, false);
1975 }
1976 return ret;
1977 }
1978
1979 /* Dump all the space infos when we abort a transaction due to ENOSPC. */
btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info * fs_info)1980 __cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info)
1981 {
1982 struct btrfs_space_info *space_info;
1983
1984 btrfs_info(fs_info, "dumping space info:");
1985 list_for_each_entry(space_info, &fs_info->space_info, list) {
1986 spin_lock(&space_info->lock);
1987 __btrfs_dump_space_info(space_info);
1988 spin_unlock(&space_info->lock);
1989 }
1990 dump_global_block_rsv(fs_info);
1991 }
1992
1993 /*
1994 * Account the unused space of all the readonly block group in the space_info.
1995 * takes mirrors into account.
1996 */
btrfs_account_ro_block_groups_free_space(struct btrfs_space_info * sinfo)1997 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
1998 {
1999 struct btrfs_block_group *block_group;
2000 u64 free_bytes = 0;
2001 int factor;
2002
2003 /* It's df, we don't care if it's racy */
2004 if (data_race(list_empty(&sinfo->ro_bgs)))
2005 return 0;
2006
2007 spin_lock(&sinfo->lock);
2008 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
2009 spin_lock(&block_group->lock);
2010
2011 if (!block_group->ro) {
2012 spin_unlock(&block_group->lock);
2013 continue;
2014 }
2015
2016 factor = btrfs_bg_type_to_factor(block_group->flags);
2017 free_bytes += (block_group->length -
2018 block_group->used) * factor;
2019
2020 spin_unlock(&block_group->lock);
2021 }
2022 spin_unlock(&sinfo->lock);
2023
2024 return free_bytes;
2025 }
2026
calc_pct_ratio(u64 x,u64 y)2027 static u64 calc_pct_ratio(u64 x, u64 y)
2028 {
2029 int ret;
2030
2031 if (!y)
2032 return 0;
2033 again:
2034 ret = check_mul_overflow(100, x, &x);
2035 if (ret)
2036 goto lose_precision;
2037 return div64_u64(x, y);
2038 lose_precision:
2039 x >>= 10;
2040 y >>= 10;
2041 if (!y)
2042 y = 1;
2043 goto again;
2044 }
2045
2046 /*
2047 * A reasonable buffer for unallocated space is 10 data block_groups.
2048 * If we claw this back repeatedly, we can still achieve efficient
2049 * utilization when near full, and not do too much reclaim while
2050 * always maintaining a solid buffer for workloads that quickly
2051 * allocate and pressure the unallocated space.
2052 */
calc_unalloc_target(struct btrfs_fs_info * fs_info)2053 static u64 calc_unalloc_target(struct btrfs_fs_info *fs_info)
2054 {
2055 u64 chunk_sz = calc_effective_data_chunk_size(fs_info);
2056
2057 return BTRFS_UNALLOC_BLOCK_GROUP_TARGET * chunk_sz;
2058 }
2059
2060 /*
2061 * The fundamental goal of automatic reclaim is to protect the filesystem's
2062 * unallocated space and thus minimize the probability of the filesystem going
2063 * read only when a metadata allocation failure causes a transaction abort.
2064 *
2065 * However, relocations happen into the space_info's unused space, therefore
2066 * automatic reclaim must also back off as that space runs low. There is no
2067 * value in doing trivial "relocations" of re-writing the same block group
2068 * into a fresh one.
2069 *
2070 * Furthermore, we want to avoid doing too much reclaim even if there are good
2071 * candidates. This is because the allocator is pretty good at filling up the
2072 * holes with writes. So we want to do just enough reclaim to try and stay
2073 * safe from running out of unallocated space but not be wasteful about it.
2074 *
2075 * Therefore, the dynamic reclaim threshold is calculated as follows:
2076 * - calculate a target unallocated amount of 5 block group sized chunks
2077 * - ratchet up the intensity of reclaim depending on how far we are from
2078 * that target by using a formula of unalloc / target to set the threshold.
2079 *
2080 * Typically with 10 block groups as the target, the discrete values this comes
2081 * out to are 0, 10, 20, ... , 80, 90, and 99.
2082 */
calc_dynamic_reclaim_threshold(const struct btrfs_space_info * space_info)2083 static int calc_dynamic_reclaim_threshold(const struct btrfs_space_info *space_info)
2084 {
2085 struct btrfs_fs_info *fs_info = space_info->fs_info;
2086 u64 unalloc = atomic64_read(&fs_info->free_chunk_space);
2087 u64 target = calc_unalloc_target(fs_info);
2088 u64 alloc = space_info->total_bytes;
2089 u64 used = btrfs_space_info_used(space_info, false);
2090 u64 unused = alloc - used;
2091 u64 want = target > unalloc ? target - unalloc : 0;
2092 u64 data_chunk_size = calc_effective_data_chunk_size(fs_info);
2093
2094 /* If we have no unused space, don't bother, it won't work anyway. */
2095 if (unused < data_chunk_size)
2096 return 0;
2097
2098 /* Cast to int is OK because want <= target. */
2099 return calc_pct_ratio(want, target);
2100 }
2101
btrfs_calc_reclaim_threshold(const struct btrfs_space_info * space_info)2102 int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info)
2103 {
2104 lockdep_assert_held(&space_info->lock);
2105
2106 if (READ_ONCE(space_info->dynamic_reclaim))
2107 return calc_dynamic_reclaim_threshold(space_info);
2108 return READ_ONCE(space_info->bg_reclaim_threshold);
2109 }
2110
2111 /*
2112 * Under "urgent" reclaim, we will reclaim even fresh block groups that have
2113 * recently seen successful allocations, as we are desperate to reclaim
2114 * whatever we can to avoid ENOSPC in a transaction leading to a readonly fs.
2115 */
is_reclaim_urgent(struct btrfs_space_info * space_info)2116 static bool is_reclaim_urgent(struct btrfs_space_info *space_info)
2117 {
2118 struct btrfs_fs_info *fs_info = space_info->fs_info;
2119 u64 unalloc = atomic64_read(&fs_info->free_chunk_space);
2120 u64 data_chunk_size = calc_effective_data_chunk_size(fs_info);
2121
2122 return unalloc < data_chunk_size;
2123 }
2124
do_reclaim_sweep(struct btrfs_space_info * space_info,int raid)2125 static bool do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
2126 {
2127 struct btrfs_block_group *bg;
2128 int thresh_pct;
2129 bool will_reclaim = false;
2130 bool urgent;
2131
2132 spin_lock(&space_info->lock);
2133 urgent = is_reclaim_urgent(space_info);
2134 thresh_pct = btrfs_calc_reclaim_threshold(space_info);
2135 spin_unlock(&space_info->lock);
2136
2137 down_read(&space_info->groups_sem);
2138 again:
2139 list_for_each_entry(bg, &space_info->block_groups[raid], list) {
2140 u64 thresh;
2141 bool reclaim = false;
2142
2143 btrfs_get_block_group(bg);
2144 spin_lock(&bg->lock);
2145 thresh = mult_perc(bg->length, thresh_pct);
2146 if (bg->used < thresh && bg->reclaim_mark) {
2147 will_reclaim = true;
2148 reclaim = true;
2149 }
2150 bg->reclaim_mark++;
2151 spin_unlock(&bg->lock);
2152 if (reclaim)
2153 btrfs_mark_bg_to_reclaim(bg);
2154 btrfs_put_block_group(bg);
2155 }
2156
2157 /*
2158 * In situations where we are very motivated to reclaim (low unalloc)
2159 * use two passes to make the reclaim mark check best effort.
2160 *
2161 * If we have any staler groups, we don't touch the fresher ones, but if we
2162 * really need a block group, do take a fresh one.
2163 */
2164 if (!will_reclaim && urgent) {
2165 urgent = false;
2166 goto again;
2167 }
2168
2169 up_read(&space_info->groups_sem);
2170 return will_reclaim;
2171 }
2172
btrfs_space_info_update_reclaimable(struct btrfs_space_info * space_info,s64 bytes)2173 void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes)
2174 {
2175 u64 chunk_sz = calc_effective_data_chunk_size(space_info->fs_info);
2176
2177 lockdep_assert_held(&space_info->lock);
2178 space_info->reclaimable_bytes += bytes;
2179
2180 if (space_info->reclaimable_bytes > 0 &&
2181 space_info->reclaimable_bytes >= chunk_sz)
2182 btrfs_set_periodic_reclaim_ready(space_info, true);
2183 }
2184
btrfs_set_periodic_reclaim_ready(struct btrfs_space_info * space_info,bool ready)2185 void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready)
2186 {
2187 lockdep_assert_held(&space_info->lock);
2188 if (!READ_ONCE(space_info->periodic_reclaim))
2189 return;
2190 if (ready != space_info->periodic_reclaim_ready) {
2191 space_info->periodic_reclaim_ready = ready;
2192 if (!ready)
2193 space_info->reclaimable_bytes = 0;
2194 }
2195 }
2196
btrfs_should_periodic_reclaim(struct btrfs_space_info * space_info)2197 static bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info)
2198 {
2199 bool ret;
2200
2201 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
2202 return false;
2203 if (!READ_ONCE(space_info->periodic_reclaim))
2204 return false;
2205
2206 spin_lock(&space_info->lock);
2207 ret = space_info->periodic_reclaim_ready;
2208 spin_unlock(&space_info->lock);
2209
2210 return ret;
2211 }
2212
btrfs_reclaim_sweep(const struct btrfs_fs_info * fs_info)2213 void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info)
2214 {
2215 int raid;
2216 struct btrfs_space_info *space_info;
2217
2218 list_for_each_entry(space_info, &fs_info->space_info, list) {
2219 if (!btrfs_should_periodic_reclaim(space_info))
2220 continue;
2221 for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) {
2222 if (do_reclaim_sweep(space_info, raid)) {
2223 spin_lock(&space_info->lock);
2224 btrfs_set_periodic_reclaim_ready(space_info, false);
2225 spin_unlock(&space_info->lock);
2226 }
2227 }
2228 }
2229 }
2230
btrfs_return_free_space(struct btrfs_space_info * space_info,u64 len)2231 void btrfs_return_free_space(struct btrfs_space_info *space_info, u64 len)
2232 {
2233 struct btrfs_fs_info *fs_info = space_info->fs_info;
2234 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
2235
2236 lockdep_assert_held(&space_info->lock);
2237
2238 /* Prioritize the global reservation to receive the freed space. */
2239 if (global_rsv->space_info != space_info)
2240 goto grant;
2241
2242 spin_lock(&global_rsv->lock);
2243 if (!global_rsv->full) {
2244 u64 to_add = min(len, global_rsv->size - global_rsv->reserved);
2245
2246 global_rsv->reserved += to_add;
2247 btrfs_space_info_update_bytes_may_use(space_info, to_add);
2248 if (global_rsv->reserved >= global_rsv->size)
2249 global_rsv->full = true;
2250 len -= to_add;
2251 }
2252 spin_unlock(&global_rsv->lock);
2253
2254 grant:
2255 /* Add to any tickets we may have. */
2256 if (len)
2257 btrfs_try_granting_tickets(space_info);
2258 }
2259