1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/spinlock.h>
4 #include <linux/minmax.h>
5 #include "misc.h"
6 #include "ctree.h"
7 #include "space-info.h"
8 #include "sysfs.h"
9 #include "volumes.h"
10 #include "free-space-cache.h"
11 #include "ordered-data.h"
12 #include "transaction.h"
13 #include "block-group.h"
14 #include "fs.h"
15 #include "accessors.h"
16 #include "extent-tree.h"
17 #include "zoned.h"
18 #include "delayed-inode.h"
19
20 /*
21 * HOW DOES SPACE RESERVATION WORK
22 *
23 * If you want to know about delalloc specifically, there is a separate comment
24 * for that with the delalloc code. This comment is about how the whole system
25 * works generally.
26 *
27 * BASIC CONCEPTS
28 *
29 * 1) space_info. This is the ultimate arbiter of how much space we can use.
30 * There's a description of the bytes_ fields with the struct declaration,
31 * refer to that for specifics on each field. Suffice it to say that for
32 * reservations we care about total_bytes - SUM(space_info->bytes_) when
33 * determining if there is space to make an allocation. There is a space_info
34 * for METADATA, SYSTEM, and DATA areas.
35 *
36 * 2) block_rsv's. These are basically buckets for every different type of
37 * metadata reservation we have. You can see the comment in the block_rsv
38 * code on the rules for each type, but generally block_rsv->reserved is how
39 * much space is accounted for in space_info->bytes_may_use.
40 *
41 * 3) btrfs_calc*_size. These are the worst case calculations we used based
42 * on the number of items we will want to modify. We have one for changing
43 * items, and one for inserting new items. Generally we use these helpers to
44 * determine the size of the block reserves, and then use the actual bytes
45 * values to adjust the space_info counters.
46 *
47 * MAKING RESERVATIONS, THE NORMAL CASE
48 *
49 * We call into either btrfs_reserve_data_bytes() or
50 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
51 * num_bytes we want to reserve.
52 *
53 * ->reserve
54 * space_info->bytes_may_use += num_bytes
55 *
56 * ->extent allocation
57 * Call btrfs_add_reserved_bytes() which does
58 * space_info->bytes_may_use -= num_bytes
59 * space_info->bytes_reserved += extent_bytes
60 *
61 * ->insert reference
62 * Call btrfs_update_block_group() which does
63 * space_info->bytes_reserved -= extent_bytes
64 * space_info->bytes_used += extent_bytes
65 *
66 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
67 *
68 * Assume we are unable to simply make the reservation because we do not have
69 * enough space
70 *
71 * -> reserve_bytes
72 * create a reserve_ticket with ->bytes set to our reservation, add it to
73 * the tail of space_info->tickets, kick async flush thread
74 *
75 * ->handle_reserve_ticket
76 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
77 * on the ticket.
78 *
79 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
80 * Flushes various things attempting to free up space.
81 *
82 * -> btrfs_try_granting_tickets()
83 * This is called by anything that either subtracts space from
84 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
85 * space_info->total_bytes. This loops through the ->priority_tickets and
86 * then the ->tickets list checking to see if the reservation can be
87 * completed. If it can the space is added to space_info->bytes_may_use and
88 * the ticket is woken up.
89 *
90 * -> ticket wakeup
91 * Check if ->bytes == 0, if it does we got our reservation and we can carry
92 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we
93 * were interrupted.)
94 *
95 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
96 *
97 * Same as the above, except we add ourselves to the
98 * space_info->priority_tickets, and we do not use ticket->wait, we simply
99 * call flush_space() ourselves for the states that are safe for us to call
100 * without deadlocking and hope for the best.
101 *
102 * THE FLUSHING STATES
103 *
104 * Generally speaking we will have two cases for each state, a "nice" state
105 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to
106 * reduce the locking over head on the various trees, and even to keep from
107 * doing any work at all in the case of delayed refs. Each of these delayed
108 * things however hold reservations, and so letting them run allows us to
109 * reclaim space so we can make new reservations.
110 *
111 * FLUSH_DELAYED_ITEMS
112 * Every inode has a delayed item to update the inode. Take a simple write
113 * for example, we would update the inode item at write time to update the
114 * mtime, and then again at finish_ordered_io() time in order to update the
115 * isize or bytes. We keep these delayed items to coalesce these operations
116 * into a single operation done on demand. These are an easy way to reclaim
117 * metadata space.
118 *
119 * FLUSH_DELALLOC
120 * Look at the delalloc comment to get an idea of how much space is reserved
121 * for delayed allocation. We can reclaim some of this space simply by
122 * running delalloc, but usually we need to wait for ordered extents to
123 * reclaim the bulk of this space.
124 *
125 * FLUSH_DELAYED_REFS
126 * We have a block reserve for the outstanding delayed refs space, and every
127 * delayed ref operation holds a reservation. Running these is a quick way
128 * to reclaim space, but we want to hold this until the end because COW can
129 * churn a lot and we can avoid making some extent tree modifications if we
130 * are able to delay for as long as possible.
131 *
132 * RESET_ZONES
133 * This state works only for the zoned mode. On the zoned mode, we cannot
134 * reuse once allocated then freed region until we reset the zone, due to
135 * the sequential write zone requirement. The RESET_ZONES state resets the
136 * zones of an unused block group and let us reuse the space. The reusing
137 * is faster than removing the block group and allocating another block
138 * group on the zones.
139 *
140 * ALLOC_CHUNK
141 * We will skip this the first time through space reservation, because of
142 * overcommit and we don't want to have a lot of useless metadata space when
143 * our worst case reservations will likely never come true.
144 *
145 * RUN_DELAYED_IPUTS
146 * If we're freeing inodes we're likely freeing checksums, file extent
147 * items, and extent tree items. Loads of space could be freed up by these
148 * operations, however they won't be usable until the transaction commits.
149 *
150 * COMMIT_TRANS
151 * This will commit the transaction. Historically we had a lot of logic
152 * surrounding whether or not we'd commit the transaction, but this waits born
153 * out of a pre-tickets era where we could end up committing the transaction
154 * thousands of times in a row without making progress. Now thanks to our
155 * ticketing system we know if we're not making progress and can error
156 * everybody out after a few commits rather than burning the disk hoping for
157 * a different answer.
158 *
159 * OVERCOMMIT
160 *
161 * Because we hold so many reservations for metadata we will allow you to
162 * reserve more space than is currently free in the currently allocate
163 * metadata space. This only happens with metadata, data does not allow
164 * overcommitting.
165 *
166 * You can see the current logic for when we allow overcommit in
167 * btrfs_can_overcommit(), but it only applies to unallocated space. If there
168 * is no unallocated space to be had, all reservations are kept within the
169 * free space in the allocated metadata chunks.
170 *
171 * Because of overcommitting, you generally want to use the
172 * btrfs_can_overcommit() logic for metadata allocations, as it does the right
173 * thing with or without extra unallocated space.
174 */
175
176 struct reserve_ticket {
177 u64 bytes;
178 int error;
179 bool steal;
180 struct list_head list;
181 wait_queue_head_t wait;
182 spinlock_t lock;
183 };
184
185 /*
186 * after adding space to the filesystem, we need to clear the full flags
187 * on all the space infos.
188 */
btrfs_clear_space_info_full(struct btrfs_fs_info * info)189 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
190 {
191 struct list_head *head = &info->space_info;
192 struct btrfs_space_info *found;
193
194 list_for_each_entry(found, head, list)
195 found->full = false;
196 }
197
198 /*
199 * Block groups with more than this value (percents) of unusable space will be
200 * scheduled for background reclaim.
201 */
202 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75)
203
204 #define BTRFS_UNALLOC_BLOCK_GROUP_TARGET (10ULL)
205
206 /*
207 * Calculate chunk size depending on volume type (regular or zoned).
208 */
calc_chunk_size(const struct btrfs_fs_info * fs_info,u64 flags)209 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
210 {
211 if (btrfs_is_zoned(fs_info))
212 return fs_info->zone_size;
213
214 ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK, "flags=%llu", flags);
215
216 if (flags & BTRFS_BLOCK_GROUP_DATA)
217 return BTRFS_MAX_DATA_CHUNK_SIZE;
218 else if (flags & (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA_REMAP))
219 return SZ_32M;
220
221 /* Handle BTRFS_BLOCK_GROUP_METADATA */
222 if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
223 return SZ_1G;
224
225 return SZ_256M;
226 }
227
228 /*
229 * Update default chunk size.
230 */
btrfs_update_space_info_chunk_size(struct btrfs_space_info * space_info,u64 chunk_size)231 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
232 u64 chunk_size)
233 {
234 WRITE_ONCE(space_info->chunk_size, chunk_size);
235 }
236
init_space_info(struct btrfs_fs_info * info,struct btrfs_space_info * space_info,u64 flags)237 static void init_space_info(struct btrfs_fs_info *info,
238 struct btrfs_space_info *space_info, u64 flags)
239 {
240 space_info->fs_info = info;
241 for (int i = 0; i < BTRFS_NR_RAID_TYPES; i++)
242 INIT_LIST_HEAD(&space_info->block_groups[i]);
243 init_rwsem(&space_info->groups_sem);
244 spin_lock_init(&space_info->lock);
245 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
246 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
247 INIT_LIST_HEAD(&space_info->ro_bgs);
248 INIT_LIST_HEAD(&space_info->tickets);
249 INIT_LIST_HEAD(&space_info->priority_tickets);
250 space_info->clamp = 1;
251 btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
252 space_info->subgroup_id = BTRFS_SUB_GROUP_PRIMARY;
253
254 if (btrfs_is_zoned(info))
255 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
256 }
257
create_space_info_sub_group(struct btrfs_space_info * parent,u64 flags,enum btrfs_space_info_sub_group id,int index)258 static int create_space_info_sub_group(struct btrfs_space_info *parent, u64 flags,
259 enum btrfs_space_info_sub_group id, int index)
260 {
261 struct btrfs_fs_info *fs_info = parent->fs_info;
262 struct btrfs_space_info *sub_group;
263 int ret;
264
265 ASSERT(parent->subgroup_id == BTRFS_SUB_GROUP_PRIMARY,
266 "parent->subgroup_id=%d", parent->subgroup_id);
267 ASSERT(id != BTRFS_SUB_GROUP_PRIMARY, "id=%d", id);
268
269 sub_group = kzalloc_obj(*sub_group, GFP_NOFS);
270 if (!sub_group)
271 return -ENOMEM;
272
273 init_space_info(fs_info, sub_group, flags);
274 parent->sub_group[index] = sub_group;
275 sub_group->parent = parent;
276 sub_group->subgroup_id = id;
277
278 ret = btrfs_sysfs_add_space_info_type(sub_group);
279 if (ret) {
280 kfree(sub_group);
281 parent->sub_group[index] = NULL;
282 }
283 return ret;
284 }
285
create_space_info(struct btrfs_fs_info * info,u64 flags)286 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
287 {
288
289 struct btrfs_space_info *space_info;
290 int ret = 0;
291
292 space_info = kzalloc_obj(*space_info, GFP_NOFS);
293 if (!space_info)
294 return -ENOMEM;
295
296 init_space_info(info, space_info, flags);
297
298 if (btrfs_is_zoned(info)) {
299 if (flags & BTRFS_BLOCK_GROUP_DATA)
300 ret = create_space_info_sub_group(space_info, flags,
301 BTRFS_SUB_GROUP_DATA_RELOC,
302 0);
303 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
304 ret = create_space_info_sub_group(space_info, flags,
305 BTRFS_SUB_GROUP_TREELOG,
306 0);
307
308 if (ret)
309 goto out_free;
310 }
311
312 ret = btrfs_sysfs_add_space_info_type(space_info);
313 if (ret)
314 goto out_free;
315
316 list_add(&space_info->list, &info->space_info);
317 if (flags & BTRFS_BLOCK_GROUP_DATA)
318 info->data_sinfo = space_info;
319
320 return ret;
321
322 out_free:
323 kfree(space_info);
324 return ret;
325 }
326
btrfs_init_space_info(struct btrfs_fs_info * fs_info)327 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
328 {
329 struct btrfs_super_block *disk_super;
330 u64 features;
331 u64 flags;
332 bool mixed = false;
333 int ret;
334
335 disk_super = fs_info->super_copy;
336 if (!btrfs_super_root(disk_super))
337 return -EINVAL;
338
339 features = btrfs_super_incompat_flags(disk_super);
340 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
341 mixed = true;
342
343 flags = BTRFS_BLOCK_GROUP_SYSTEM;
344 ret = create_space_info(fs_info, flags);
345 if (ret)
346 return ret;
347
348 if (mixed) {
349 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
350 ret = create_space_info(fs_info, flags);
351 if (ret)
352 return ret;
353 } else {
354 flags = BTRFS_BLOCK_GROUP_METADATA;
355 ret = create_space_info(fs_info, flags);
356 if (ret)
357 return ret;
358
359 flags = BTRFS_BLOCK_GROUP_DATA;
360 ret = create_space_info(fs_info, flags);
361 if (ret)
362 return ret;
363 }
364
365 if (features & BTRFS_FEATURE_INCOMPAT_REMAP_TREE) {
366 flags = BTRFS_BLOCK_GROUP_METADATA_REMAP;
367 ret = create_space_info(fs_info, flags);
368 }
369
370 return ret;
371 }
372
btrfs_add_bg_to_space_info(struct btrfs_fs_info * info,struct btrfs_block_group * block_group)373 void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
374 struct btrfs_block_group *block_group)
375 {
376 struct btrfs_space_info *space_info = block_group->space_info;
377 int factor, index;
378
379 factor = btrfs_bg_type_to_factor(block_group->flags);
380
381 spin_lock(&space_info->lock);
382
383 if (!(block_group->flags & BTRFS_BLOCK_GROUP_REMAPPED) ||
384 block_group->identity_remap_count != 0) {
385 space_info->total_bytes += block_group->length;
386 space_info->disk_total += block_group->length * factor;
387 }
388
389 space_info->bytes_used += block_group->used;
390 space_info->disk_used += block_group->used * factor;
391 space_info->bytes_readonly += block_group->bytes_super;
392 btrfs_space_info_update_bytes_zone_unusable(space_info, block_group->zone_unusable);
393 if (block_group->length > 0)
394 space_info->full = false;
395 btrfs_try_granting_tickets(space_info);
396 spin_unlock(&space_info->lock);
397
398 block_group->space_info = space_info;
399
400 index = btrfs_bg_flags_to_raid_index(block_group->flags);
401 down_write(&space_info->groups_sem);
402 list_add_tail(&block_group->list, &space_info->block_groups[index]);
403 up_write(&space_info->groups_sem);
404 }
405
btrfs_find_space_info(struct btrfs_fs_info * info,u64 flags)406 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
407 u64 flags)
408 {
409 struct list_head *head = &info->space_info;
410 struct btrfs_space_info *found;
411
412 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
413
414 list_for_each_entry(found, head, list) {
415 if (found->flags & flags)
416 return found;
417 }
418 return NULL;
419 }
420
calc_effective_data_chunk_size(struct btrfs_fs_info * fs_info)421 static u64 calc_effective_data_chunk_size(struct btrfs_fs_info *fs_info)
422 {
423 struct btrfs_space_info *data_sinfo;
424 u64 data_chunk_size;
425
426 /*
427 * Calculate the data_chunk_size, space_info->chunk_size is the
428 * "optimal" chunk size based on the fs size. However when we actually
429 * allocate the chunk we will strip this down further, making it no
430 * more than 10% of the disk or 1G, whichever is smaller.
431 *
432 * On the zoned mode, we need to use zone_size (= data_sinfo->chunk_size)
433 * as it is.
434 */
435 data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
436 if (btrfs_is_zoned(fs_info))
437 return data_sinfo->chunk_size;
438 data_chunk_size = min(data_sinfo->chunk_size,
439 mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
440 return min_t(u64, data_chunk_size, SZ_1G);
441 }
442
calc_available_free_space(const struct btrfs_space_info * space_info,enum btrfs_reserve_flush_enum flush)443 static u64 calc_available_free_space(const struct btrfs_space_info *space_info,
444 enum btrfs_reserve_flush_enum flush)
445 {
446 struct btrfs_fs_info *fs_info = space_info->fs_info;
447 u64 profile;
448 u64 avail;
449 u64 data_chunk_size;
450 int factor;
451
452 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
453 profile = btrfs_system_alloc_profile(fs_info);
454 else
455 profile = btrfs_metadata_alloc_profile(fs_info);
456
457 avail = atomic64_read(&fs_info->free_chunk_space);
458
459 /*
460 * If we have dup, raid1 or raid10 then only half of the free
461 * space is actually usable. For raid56, the space info used
462 * doesn't include the parity drive, so we don't have to
463 * change the math
464 */
465 factor = btrfs_bg_type_to_factor(profile);
466 avail = div_u64(avail, factor);
467 if (avail == 0)
468 return 0;
469
470 data_chunk_size = calc_effective_data_chunk_size(fs_info);
471
472 /*
473 * Since data allocations immediately use block groups as part of the
474 * reservation, because we assume that data reservations will == actual
475 * usage, we could potentially overcommit and then immediately have that
476 * available space used by a data allocation, which could put us in a
477 * bind when we get close to filling the file system.
478 *
479 * To handle this simply remove the data_chunk_size from the available
480 * space. If we are relatively empty this won't affect our ability to
481 * overcommit much, and if we're very close to full it'll keep us from
482 * getting into a position where we've given ourselves very little
483 * metadata wiggle room.
484 */
485 if (avail <= data_chunk_size)
486 return 0;
487 avail -= data_chunk_size;
488
489 /*
490 * If we aren't flushing all things, let us overcommit up to
491 * 1/2th of the space. If we can flush, don't let us overcommit
492 * too much, let it overcommit up to 1/8 of the space.
493 */
494 if (flush == BTRFS_RESERVE_FLUSH_ALL)
495 avail >>= 3;
496 else
497 avail >>= 1;
498
499 /*
500 * On the zoned mode, we always allocate one zone as one chunk.
501 * Returning non-zone size aligned bytes here will result in
502 * less pressure for the async metadata reclaim process, and it
503 * will over-commit too much leading to ENOSPC. Align down to the
504 * zone size to avoid that.
505 */
506 if (btrfs_is_zoned(fs_info))
507 avail = ALIGN_DOWN(avail, fs_info->zone_size);
508
509 return avail;
510 }
511
check_can_overcommit(const struct btrfs_space_info * space_info,u64 space_info_used_bytes,u64 bytes,enum btrfs_reserve_flush_enum flush)512 static inline bool check_can_overcommit(const struct btrfs_space_info *space_info,
513 u64 space_info_used_bytes, u64 bytes,
514 enum btrfs_reserve_flush_enum flush)
515 {
516 const u64 avail = calc_available_free_space(space_info, flush);
517
518 return (space_info_used_bytes + bytes < space_info->total_bytes + avail);
519 }
520
can_overcommit(const struct btrfs_space_info * space_info,u64 space_info_used_bytes,u64 bytes,enum btrfs_reserve_flush_enum flush)521 static inline bool can_overcommit(const struct btrfs_space_info *space_info,
522 u64 space_info_used_bytes, u64 bytes,
523 enum btrfs_reserve_flush_enum flush)
524 {
525 /* Don't overcommit when in mixed mode. */
526 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
527 return false;
528
529 return check_can_overcommit(space_info, space_info_used_bytes, bytes, flush);
530 }
531
btrfs_can_overcommit(const struct btrfs_space_info * space_info,u64 bytes,enum btrfs_reserve_flush_enum flush)532 bool btrfs_can_overcommit(const struct btrfs_space_info *space_info, u64 bytes,
533 enum btrfs_reserve_flush_enum flush)
534 {
535 u64 used;
536
537 /* Don't overcommit when in mixed mode */
538 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
539 return false;
540
541 used = btrfs_space_info_used(space_info, true);
542
543 return check_can_overcommit(space_info, used, bytes, flush);
544 }
545
remove_ticket(struct btrfs_space_info * space_info,struct reserve_ticket * ticket,int error)546 static void remove_ticket(struct btrfs_space_info *space_info,
547 struct reserve_ticket *ticket, int error)
548 {
549 lockdep_assert_held(&space_info->lock);
550
551 if (!list_empty(&ticket->list)) {
552 list_del_init(&ticket->list);
553 ASSERT(space_info->reclaim_size >= ticket->bytes,
554 "space_info->reclaim_size=%llu ticket->bytes=%llu",
555 space_info->reclaim_size, ticket->bytes);
556 space_info->reclaim_size -= ticket->bytes;
557 }
558
559 spin_lock(&ticket->lock);
560 /*
561 * If we are called from a task waiting on the ticket, it may happen
562 * that before it sets an error on the ticket, a reclaim task was able
563 * to satisfy the ticket. In that case ignore the error.
564 */
565 if (error && ticket->bytes > 0)
566 ticket->error = error;
567 else
568 ticket->bytes = 0;
569
570 wake_up(&ticket->wait);
571 spin_unlock(&ticket->lock);
572 }
573
574 /*
575 * This is for space we already have accounted in space_info->bytes_may_use, so
576 * basically when we're returning space from block_rsv's.
577 */
btrfs_try_granting_tickets(struct btrfs_space_info * space_info)578 void btrfs_try_granting_tickets(struct btrfs_space_info *space_info)
579 {
580 struct list_head *head;
581 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
582 u64 used = btrfs_space_info_used(space_info, true);
583
584 lockdep_assert_held(&space_info->lock);
585
586 head = &space_info->priority_tickets;
587 again:
588 while (!list_empty(head)) {
589 struct reserve_ticket *ticket;
590 u64 used_after;
591
592 ticket = list_first_entry(head, struct reserve_ticket, list);
593 used_after = used + ticket->bytes;
594
595 /* Check and see if our ticket can be satisfied now. */
596 if (used_after <= space_info->total_bytes ||
597 can_overcommit(space_info, used, ticket->bytes, flush)) {
598 btrfs_space_info_update_bytes_may_use(space_info, ticket->bytes);
599 remove_ticket(space_info, ticket, 0);
600 space_info->tickets_id++;
601 used = used_after;
602 } else {
603 break;
604 }
605 }
606
607 if (head == &space_info->priority_tickets) {
608 head = &space_info->tickets;
609 flush = BTRFS_RESERVE_FLUSH_ALL;
610 goto again;
611 }
612 }
613
614 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \
615 do { \
616 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
617 spin_lock(&__rsv->lock); \
618 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
619 __rsv->size, __rsv->reserved); \
620 spin_unlock(&__rsv->lock); \
621 } while (0)
622
dump_global_block_rsv(struct btrfs_fs_info * fs_info)623 static void dump_global_block_rsv(struct btrfs_fs_info *fs_info)
624 {
625 DUMP_BLOCK_RSV(fs_info, global_block_rsv);
626 DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
627 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
628 DUMP_BLOCK_RSV(fs_info, remap_block_rsv);
629 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
630 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
631 }
632
__btrfs_dump_space_info(const struct btrfs_space_info * info)633 static void __btrfs_dump_space_info(const struct btrfs_space_info *info)
634 {
635 const struct btrfs_fs_info *fs_info = info->fs_info;
636 const char *flag_str = btrfs_space_info_type_str(info);
637 lockdep_assert_held(&info->lock);
638
639 /* The free space could be negative in case of overcommit */
640 btrfs_info(fs_info,
641 "space_info %s (sub-group id %d) has %lld free, is %sfull",
642 flag_str, info->subgroup_id,
643 (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
644 info->full ? "" : "not ");
645 btrfs_info(fs_info,
646 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
647 info->total_bytes, info->bytes_used, info->bytes_pinned,
648 info->bytes_reserved, info->bytes_may_use,
649 info->bytes_readonly, info->bytes_zone_unusable);
650 }
651
btrfs_dump_space_info(struct btrfs_space_info * info,u64 bytes,bool dump_block_groups)652 void btrfs_dump_space_info(struct btrfs_space_info *info, u64 bytes,
653 bool dump_block_groups)
654 {
655 struct btrfs_fs_info *fs_info = info->fs_info;
656 struct btrfs_block_group *cache;
657 u64 total_avail = 0;
658 int index = 0;
659
660 spin_lock(&info->lock);
661 __btrfs_dump_space_info(info);
662 dump_global_block_rsv(fs_info);
663 spin_unlock(&info->lock);
664
665 if (!dump_block_groups)
666 return;
667
668 down_read(&info->groups_sem);
669 again:
670 list_for_each_entry(cache, &info->block_groups[index], list) {
671 u64 avail;
672
673 spin_lock(&cache->lock);
674 avail = btrfs_block_group_available_space(cache);
675 btrfs_info(fs_info,
676 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
677 cache->start, cache->length, cache->used, cache->pinned,
678 cache->reserved, cache->delalloc_bytes,
679 cache->bytes_super, cache->zone_unusable,
680 avail, cache->ro ? "[readonly]" : "");
681 spin_unlock(&cache->lock);
682 btrfs_dump_free_space(cache, bytes);
683 total_avail += avail;
684 }
685 if (++index < BTRFS_NR_RAID_TYPES)
686 goto again;
687 up_read(&info->groups_sem);
688
689 btrfs_info(fs_info, "%llu bytes available across all block groups", total_avail);
690 }
691
calc_reclaim_items_nr(const struct btrfs_fs_info * fs_info,u64 to_reclaim)692 static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info,
693 u64 to_reclaim)
694 {
695 u64 bytes;
696 u64 nr;
697
698 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
699 nr = div64_u64(to_reclaim, bytes);
700 if (!nr)
701 nr = 1;
702 return nr;
703 }
704
705 /*
706 * shrink metadata reservation for delalloc
707 */
shrink_delalloc(struct btrfs_space_info * space_info,u64 to_reclaim,bool wait_ordered,bool for_preempt)708 static void shrink_delalloc(struct btrfs_space_info *space_info,
709 u64 to_reclaim, bool wait_ordered,
710 bool for_preempt)
711 {
712 struct btrfs_fs_info *fs_info = space_info->fs_info;
713 struct btrfs_trans_handle *trans;
714 u64 delalloc_bytes;
715 u64 ordered_bytes;
716 u64 items;
717 long time_left;
718 int loops;
719
720 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
721 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
722 if (delalloc_bytes == 0 && ordered_bytes == 0)
723 return;
724
725 /* Calc the number of the pages we need flush for space reservation */
726 if (to_reclaim == U64_MAX) {
727 items = U64_MAX;
728 } else {
729 /*
730 * to_reclaim is set to however much metadata we need to
731 * reclaim, but reclaiming that much data doesn't really track
732 * exactly. What we really want to do is reclaim full inode's
733 * worth of reservations, however that's not available to us
734 * here. We will take a fraction of the delalloc bytes for our
735 * flushing loops and hope for the best. Delalloc will expand
736 * the amount we write to cover an entire dirty extent, which
737 * will reclaim the metadata reservation for that range. If
738 * it's not enough subsequent flush stages will be more
739 * aggressive.
740 */
741 to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
742 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
743 }
744
745 trans = current->journal_info;
746
747 /*
748 * If we are doing more ordered than delalloc we need to just wait on
749 * ordered extents, otherwise we'll waste time trying to flush delalloc
750 * that likely won't give us the space back we need.
751 */
752 if (ordered_bytes > delalloc_bytes && !for_preempt)
753 wait_ordered = true;
754
755 loops = 0;
756 while ((delalloc_bytes || ordered_bytes) && loops < 3) {
757 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
758 long nr_pages = min_t(u64, temp, LONG_MAX);
759 int async_pages;
760
761 btrfs_start_delalloc_roots(fs_info, nr_pages, true);
762
763 /*
764 * We need to make sure any outstanding async pages are now
765 * processed before we continue. This is because things like
766 * sync_inode() try to be smart and skip writing if the inode is
767 * marked clean. We don't use filemap_fwrite for flushing
768 * because we want to control how many pages we write out at a
769 * time, thus this is the only safe way to make sure we've
770 * waited for outstanding compressed workers to have started
771 * their jobs and thus have ordered extents set up properly.
772 *
773 * This exists because we do not want to wait for each
774 * individual inode to finish its async work, we simply want to
775 * start the IO on everybody, and then come back here and wait
776 * for all of the async work to catch up. Once we're done with
777 * that we know we'll have ordered extents for everything and we
778 * can decide if we wait for that or not.
779 *
780 * If we choose to replace this in the future, make absolutely
781 * sure that the proper waiting is being done in the async case,
782 * as there have been bugs in that area before.
783 */
784 async_pages = atomic_read(&fs_info->async_delalloc_pages);
785 if (!async_pages)
786 goto skip_async;
787
788 /*
789 * We don't want to wait forever, if we wrote less pages in this
790 * loop than we have outstanding, only wait for that number of
791 * pages, otherwise we can wait for all async pages to finish
792 * before continuing.
793 */
794 if (async_pages > nr_pages)
795 async_pages -= nr_pages;
796 else
797 async_pages = 0;
798 wait_event(fs_info->async_submit_wait,
799 atomic_read(&fs_info->async_delalloc_pages) <=
800 async_pages);
801 skip_async:
802 loops++;
803 if (wait_ordered && !trans) {
804 btrfs_wait_ordered_roots(fs_info, items, NULL);
805 } else {
806 time_left = schedule_timeout_killable(1);
807 if (time_left)
808 break;
809 }
810
811 /*
812 * If we are for preemption we just want a one-shot of delalloc
813 * flushing so we can stop flushing if we decide we don't need
814 * to anymore.
815 */
816 if (for_preempt)
817 break;
818
819 spin_lock(&space_info->lock);
820 if (list_empty(&space_info->tickets) &&
821 list_empty(&space_info->priority_tickets)) {
822 spin_unlock(&space_info->lock);
823 break;
824 }
825 spin_unlock(&space_info->lock);
826
827 delalloc_bytes = percpu_counter_sum_positive(
828 &fs_info->delalloc_bytes);
829 ordered_bytes = percpu_counter_sum_positive(
830 &fs_info->ordered_bytes);
831 }
832 }
833
834 /*
835 * Try to flush some data based on policy set by @state. This is only advisory
836 * and may fail for various reasons. The caller is supposed to examine the
837 * state of @space_info to detect the outcome.
838 */
flush_space(struct btrfs_space_info * space_info,u64 num_bytes,enum btrfs_flush_state state,bool for_preempt)839 static void flush_space(struct btrfs_space_info *space_info, u64 num_bytes,
840 enum btrfs_flush_state state, bool for_preempt)
841 {
842 struct btrfs_fs_info *fs_info = space_info->fs_info;
843 struct btrfs_root *root = fs_info->tree_root;
844 struct btrfs_trans_handle *trans;
845 int nr;
846 int ret = 0;
847
848 switch (state) {
849 case FLUSH_DELAYED_ITEMS_NR:
850 case FLUSH_DELAYED_ITEMS:
851 if (state == FLUSH_DELAYED_ITEMS_NR)
852 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
853 else
854 nr = -1;
855
856 trans = btrfs_join_transaction_nostart(root);
857 if (IS_ERR(trans)) {
858 ret = PTR_ERR(trans);
859 if (ret == -ENOENT)
860 ret = 0;
861 break;
862 }
863 ret = btrfs_run_delayed_items_nr(trans, nr);
864 btrfs_end_transaction(trans);
865 break;
866 case FLUSH_DELALLOC:
867 case FLUSH_DELALLOC_WAIT:
868 case FLUSH_DELALLOC_FULL:
869 if (state == FLUSH_DELALLOC_FULL)
870 num_bytes = U64_MAX;
871 shrink_delalloc(space_info, num_bytes,
872 state != FLUSH_DELALLOC, for_preempt);
873 break;
874 case FLUSH_DELAYED_REFS_NR:
875 case FLUSH_DELAYED_REFS:
876 trans = btrfs_join_transaction_nostart(root);
877 if (IS_ERR(trans)) {
878 ret = PTR_ERR(trans);
879 if (ret == -ENOENT)
880 ret = 0;
881 break;
882 }
883 if (state == FLUSH_DELAYED_REFS_NR)
884 btrfs_run_delayed_refs(trans, num_bytes);
885 else
886 btrfs_run_delayed_refs(trans, 0);
887 btrfs_end_transaction(trans);
888 break;
889 case ALLOC_CHUNK:
890 case ALLOC_CHUNK_FORCE:
891 trans = btrfs_join_transaction(root);
892 if (IS_ERR(trans)) {
893 ret = PTR_ERR(trans);
894 break;
895 }
896 ret = btrfs_chunk_alloc(trans, space_info,
897 btrfs_get_alloc_profile(fs_info, space_info->flags),
898 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
899 CHUNK_ALLOC_FORCE);
900 btrfs_end_transaction(trans);
901
902 if (ret > 0 || ret == -ENOSPC)
903 ret = 0;
904 break;
905 case RUN_DELAYED_IPUTS:
906 /*
907 * If we have pending delayed iputs then we could free up a
908 * bunch of pinned space, so make sure we run the iputs before
909 * we do our pinned bytes check below.
910 */
911 btrfs_run_delayed_iputs(fs_info);
912 btrfs_wait_on_delayed_iputs(fs_info);
913 break;
914 case COMMIT_TRANS:
915 ASSERT(current->journal_info == NULL);
916 /*
917 * We don't want to start a new transaction, just attach to the
918 * current one or wait it fully commits in case its commit is
919 * happening at the moment. Note: we don't use a nostart join
920 * because that does not wait for a transaction to fully commit
921 * (only for it to be unblocked, state TRANS_STATE_UNBLOCKED).
922 */
923 ret = btrfs_commit_current_transaction(root);
924 break;
925 case RESET_ZONES:
926 ret = btrfs_reset_unused_block_groups(space_info, num_bytes);
927 break;
928 default:
929 ret = -ENOSPC;
930 break;
931 }
932
933 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
934 ret, for_preempt);
935 return;
936 }
937
btrfs_calc_reclaim_metadata_size(const struct btrfs_space_info * space_info)938 static u64 btrfs_calc_reclaim_metadata_size(const struct btrfs_space_info *space_info)
939 {
940 u64 used;
941 u64 avail;
942 u64 to_reclaim = space_info->reclaim_size;
943
944 lockdep_assert_held(&space_info->lock);
945
946 avail = calc_available_free_space(space_info, BTRFS_RESERVE_FLUSH_ALL);
947 used = btrfs_space_info_used(space_info, true);
948
949 /*
950 * We may be flushing because suddenly we have less space than we had
951 * before, and now we're well over-committed based on our current free
952 * space. If that's the case add in our overage so we make sure to put
953 * appropriate pressure on the flushing state machine.
954 */
955 if (space_info->total_bytes + avail < used)
956 to_reclaim += used - (space_info->total_bytes + avail);
957
958 return to_reclaim;
959 }
960
need_preemptive_reclaim(const struct btrfs_space_info * space_info)961 static bool need_preemptive_reclaim(const struct btrfs_space_info *space_info)
962 {
963 struct btrfs_fs_info *fs_info = space_info->fs_info;
964 const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
965 u64 ordered, delalloc;
966 u64 thresh;
967 u64 used;
968
969 lockdep_assert_held(&space_info->lock);
970
971 /*
972 * We have tickets queued, bail so we don't compete with the async
973 * flushers.
974 */
975 if (space_info->reclaim_size)
976 return false;
977
978 thresh = mult_perc(space_info->total_bytes, 90);
979
980 /* If we're just plain full then async reclaim just slows us down. */
981 if ((space_info->bytes_used + space_info->bytes_reserved +
982 global_rsv_size) >= thresh)
983 return false;
984
985 used = space_info->bytes_may_use + space_info->bytes_pinned;
986
987 /* The total flushable belongs to the global rsv, don't flush. */
988 if (global_rsv_size >= used)
989 return false;
990
991 /*
992 * 128MiB is 1/4 of the maximum global rsv size. If we have less than
993 * that devoted to other reservations then there's no sense in flushing,
994 * we don't have a lot of things that need flushing.
995 */
996 if (used - global_rsv_size <= SZ_128M)
997 return false;
998
999 /*
1000 * If we have over half of the free space occupied by reservations or
1001 * pinned then we want to start flushing.
1002 *
1003 * We do not do the traditional thing here, which is to say
1004 *
1005 * if (used >= ((total_bytes + avail) / 2))
1006 * return 1;
1007 *
1008 * because this doesn't quite work how we want. If we had more than 50%
1009 * of the space_info used by bytes_used and we had 0 available we'd just
1010 * constantly run the background flusher. Instead we want it to kick in
1011 * if our reclaimable space exceeds our clamped free space.
1012 *
1013 * Our clamping range is 2^1 -> 2^8. Practically speaking that means
1014 * the following:
1015 *
1016 * Amount of RAM Minimum threshold Maximum threshold
1017 *
1018 * 256GiB 1GiB 128GiB
1019 * 128GiB 512MiB 64GiB
1020 * 64GiB 256MiB 32GiB
1021 * 32GiB 128MiB 16GiB
1022 * 16GiB 64MiB 8GiB
1023 *
1024 * These are the range our thresholds will fall in, corresponding to how
1025 * much delalloc we need for the background flusher to kick in.
1026 */
1027
1028 thresh = calc_available_free_space(space_info, BTRFS_RESERVE_FLUSH_ALL);
1029 used = space_info->bytes_used + space_info->bytes_reserved +
1030 space_info->bytes_readonly + global_rsv_size;
1031 if (used < space_info->total_bytes)
1032 thresh += space_info->total_bytes - used;
1033 thresh >>= space_info->clamp;
1034
1035 used = space_info->bytes_pinned;
1036
1037 /*
1038 * If we have more ordered bytes than delalloc bytes then we're either
1039 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
1040 * around. Preemptive flushing is only useful in that it can free up
1041 * space before tickets need to wait for things to finish. In the case
1042 * of ordered extents, preemptively waiting on ordered extents gets us
1043 * nothing, if our reservations are tied up in ordered extents we'll
1044 * simply have to slow down writers by forcing them to wait on ordered
1045 * extents.
1046 *
1047 * In the case that ordered is larger than delalloc, only include the
1048 * block reserves that we would actually be able to directly reclaim
1049 * from. In this case if we're heavy on metadata operations this will
1050 * clearly be heavy enough to warrant preemptive flushing. In the case
1051 * of heavy DIO or ordered reservations, preemptive flushing will just
1052 * waste time and cause us to slow down.
1053 *
1054 * We want to make sure we truly are maxed out on ordered however, so
1055 * cut ordered in half, and if it's still higher than delalloc then we
1056 * can keep flushing. This is to avoid the case where we start
1057 * flushing, and now delalloc == ordered and we stop preemptively
1058 * flushing when we could still have several gigs of delalloc to flush.
1059 */
1060 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
1061 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
1062 if (ordered >= delalloc)
1063 used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) +
1064 btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv);
1065 else
1066 used += space_info->bytes_may_use - global_rsv_size;
1067
1068 return (used >= thresh && !btrfs_fs_closing(fs_info) &&
1069 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
1070 }
1071
steal_from_global_rsv(struct btrfs_space_info * space_info,struct reserve_ticket * ticket)1072 static bool steal_from_global_rsv(struct btrfs_space_info *space_info,
1073 struct reserve_ticket *ticket)
1074 {
1075 struct btrfs_fs_info *fs_info = space_info->fs_info;
1076 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
1077 u64 min_bytes;
1078
1079 lockdep_assert_held(&space_info->lock);
1080
1081 if (!ticket->steal)
1082 return false;
1083
1084 if (global_rsv->space_info != space_info)
1085 return false;
1086
1087 spin_lock(&global_rsv->lock);
1088 min_bytes = mult_perc(global_rsv->size, 10);
1089 if (global_rsv->reserved < min_bytes + ticket->bytes) {
1090 spin_unlock(&global_rsv->lock);
1091 return false;
1092 }
1093 global_rsv->reserved -= ticket->bytes;
1094 if (global_rsv->reserved < global_rsv->size)
1095 global_rsv->full = false;
1096 spin_unlock(&global_rsv->lock);
1097
1098 remove_ticket(space_info, ticket, 0);
1099 space_info->tickets_id++;
1100
1101 return true;
1102 }
1103
1104 /*
1105 * We've exhausted our flushing, start failing tickets.
1106 *
1107 * @space_info - the space info we were flushing
1108 *
1109 * We call this when we've exhausted our flushing ability and haven't made
1110 * progress in satisfying tickets. The reservation code handles tickets in
1111 * order, so if there is a large ticket first and then smaller ones we could
1112 * very well satisfy the smaller tickets. This will attempt to wake up any
1113 * tickets in the list to catch this case.
1114 *
1115 * This function returns true if it was able to make progress by clearing out
1116 * other tickets, or if it stumbles across a ticket that was smaller than the
1117 * first ticket.
1118 */
maybe_fail_all_tickets(struct btrfs_space_info * space_info)1119 static bool maybe_fail_all_tickets(struct btrfs_space_info *space_info)
1120 {
1121 struct btrfs_fs_info *fs_info = space_info->fs_info;
1122 struct reserve_ticket *ticket;
1123 u64 tickets_id = space_info->tickets_id;
1124 const int abort_error = BTRFS_FS_ERROR(fs_info);
1125
1126 trace_btrfs_fail_all_tickets(fs_info, space_info);
1127
1128 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1129 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
1130 __btrfs_dump_space_info(space_info);
1131 }
1132
1133 while (!list_empty(&space_info->tickets) &&
1134 tickets_id == space_info->tickets_id) {
1135 ticket = list_first_entry(&space_info->tickets,
1136 struct reserve_ticket, list);
1137 if (unlikely(abort_error)) {
1138 remove_ticket(space_info, ticket, abort_error);
1139 } else {
1140 if (steal_from_global_rsv(space_info, ticket))
1141 return true;
1142
1143 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1144 btrfs_info(fs_info, "failing ticket with %llu bytes",
1145 ticket->bytes);
1146
1147 remove_ticket(space_info, ticket, -ENOSPC);
1148
1149 /*
1150 * We're just throwing tickets away, so more flushing may
1151 * not trip over btrfs_try_granting_tickets, so we need
1152 * to call it here to see if we can make progress with
1153 * the next ticket in the list.
1154 */
1155 btrfs_try_granting_tickets(space_info);
1156 }
1157 }
1158 return (tickets_id != space_info->tickets_id);
1159 }
1160
do_async_reclaim_metadata_space(struct btrfs_space_info * space_info)1161 static void do_async_reclaim_metadata_space(struct btrfs_space_info *space_info)
1162 {
1163 struct btrfs_fs_info *fs_info = space_info->fs_info;
1164 u64 to_reclaim;
1165 enum btrfs_flush_state flush_state;
1166 int commit_cycles = 0;
1167 u64 last_tickets_id;
1168 enum btrfs_flush_state final_state;
1169
1170 if (btrfs_is_zoned(fs_info))
1171 final_state = RESET_ZONES;
1172 else
1173 final_state = COMMIT_TRANS;
1174
1175 spin_lock(&space_info->lock);
1176 to_reclaim = btrfs_calc_reclaim_metadata_size(space_info);
1177 if (!to_reclaim) {
1178 space_info->flush = false;
1179 spin_unlock(&space_info->lock);
1180 return;
1181 }
1182 last_tickets_id = space_info->tickets_id;
1183 spin_unlock(&space_info->lock);
1184
1185 flush_state = FLUSH_DELAYED_ITEMS_NR;
1186 do {
1187 flush_space(space_info, to_reclaim, flush_state, false);
1188 spin_lock(&space_info->lock);
1189 if (list_empty(&space_info->tickets)) {
1190 space_info->flush = false;
1191 spin_unlock(&space_info->lock);
1192 return;
1193 }
1194 to_reclaim = btrfs_calc_reclaim_metadata_size(space_info);
1195 if (last_tickets_id == space_info->tickets_id) {
1196 flush_state++;
1197 } else {
1198 last_tickets_id = space_info->tickets_id;
1199 flush_state = FLUSH_DELAYED_ITEMS_NR;
1200 if (commit_cycles)
1201 commit_cycles--;
1202 }
1203
1204 /*
1205 * We do not want to empty the system of delalloc unless we're
1206 * under heavy pressure, so allow one trip through the flushing
1207 * logic before we start doing a FLUSH_DELALLOC_FULL.
1208 */
1209 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
1210 flush_state++;
1211
1212 /*
1213 * We don't want to force a chunk allocation until we've tried
1214 * pretty hard to reclaim space. Think of the case where we
1215 * freed up a bunch of space and so have a lot of pinned space
1216 * to reclaim. We would rather use that than possibly create a
1217 * underutilized metadata chunk. So if this is our first run
1218 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1219 * commit the transaction. If nothing has changed the next go
1220 * around then we can force a chunk allocation.
1221 */
1222 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1223 flush_state++;
1224
1225 if (flush_state > final_state) {
1226 commit_cycles++;
1227 if (commit_cycles > 2) {
1228 if (maybe_fail_all_tickets(space_info)) {
1229 flush_state = FLUSH_DELAYED_ITEMS_NR;
1230 commit_cycles--;
1231 } else {
1232 space_info->flush = false;
1233 }
1234 } else {
1235 flush_state = FLUSH_DELAYED_ITEMS_NR;
1236 }
1237 }
1238 spin_unlock(&space_info->lock);
1239 } while (flush_state <= final_state);
1240 }
1241
1242 /*
1243 * This is for normal flushers, it can wait as much time as needed. We will
1244 * loop and continuously try to flush as long as we are making progress. We
1245 * count progress as clearing off tickets each time we have to loop.
1246 */
btrfs_async_reclaim_metadata_space(struct work_struct * work)1247 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
1248 {
1249 struct btrfs_fs_info *fs_info;
1250 struct btrfs_space_info *space_info;
1251
1252 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1253 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1254 do_async_reclaim_metadata_space(space_info);
1255 for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) {
1256 if (space_info->sub_group[i])
1257 do_async_reclaim_metadata_space(space_info->sub_group[i]);
1258 }
1259 }
1260
1261 /*
1262 * This handles pre-flushing of metadata space before we get to the point that
1263 * we need to start blocking threads on tickets. The logic here is different
1264 * from the other flush paths because it doesn't rely on tickets to tell us how
1265 * much we need to flush, instead it attempts to keep us below the 80% full
1266 * watermark of space by flushing whichever reservation pool is currently the
1267 * largest.
1268 */
btrfs_preempt_reclaim_metadata_space(struct work_struct * work)1269 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1270 {
1271 struct btrfs_fs_info *fs_info;
1272 struct btrfs_space_info *space_info;
1273 struct btrfs_block_rsv *delayed_block_rsv;
1274 struct btrfs_block_rsv *delayed_refs_rsv;
1275 struct btrfs_block_rsv *global_rsv;
1276 struct btrfs_block_rsv *trans_rsv;
1277 int loops = 0;
1278
1279 fs_info = container_of(work, struct btrfs_fs_info,
1280 preempt_reclaim_work);
1281 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1282 delayed_block_rsv = &fs_info->delayed_block_rsv;
1283 delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1284 global_rsv = &fs_info->global_block_rsv;
1285 trans_rsv = &fs_info->trans_block_rsv;
1286
1287 spin_lock(&space_info->lock);
1288 while (need_preemptive_reclaim(space_info)) {
1289 enum btrfs_flush_state flush;
1290 u64 delalloc_size = 0;
1291 u64 to_reclaim, block_rsv_size;
1292 const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
1293 const u64 bytes_may_use = space_info->bytes_may_use;
1294 const u64 bytes_pinned = space_info->bytes_pinned;
1295
1296 spin_unlock(&space_info->lock);
1297 /*
1298 * We don't have a precise counter for the metadata being
1299 * reserved for delalloc, so we'll approximate it by subtracting
1300 * out the block rsv's space from the bytes_may_use. If that
1301 * amount is higher than the individual reserves, then we can
1302 * assume it's tied up in delalloc reservations.
1303 */
1304 block_rsv_size = global_rsv_size +
1305 btrfs_block_rsv_reserved(delayed_block_rsv) +
1306 btrfs_block_rsv_reserved(delayed_refs_rsv) +
1307 btrfs_block_rsv_reserved(trans_rsv);
1308 if (block_rsv_size < bytes_may_use)
1309 delalloc_size = bytes_may_use - block_rsv_size;
1310
1311 /*
1312 * We don't want to include the global_rsv in our calculation,
1313 * because that's space we can't touch. Subtract it from the
1314 * block_rsv_size for the next checks.
1315 */
1316 block_rsv_size -= global_rsv_size;
1317
1318 /*
1319 * We really want to avoid flushing delalloc too much, as it
1320 * could result in poor allocation patterns, so only flush it if
1321 * it's larger than the rest of the pools combined.
1322 */
1323 if (delalloc_size > block_rsv_size) {
1324 to_reclaim = delalloc_size;
1325 flush = FLUSH_DELALLOC;
1326 } else if (bytes_pinned >
1327 (btrfs_block_rsv_reserved(delayed_block_rsv) +
1328 btrfs_block_rsv_reserved(delayed_refs_rsv))) {
1329 to_reclaim = bytes_pinned;
1330 flush = COMMIT_TRANS;
1331 } else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
1332 btrfs_block_rsv_reserved(delayed_refs_rsv)) {
1333 to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv);
1334 flush = FLUSH_DELAYED_ITEMS_NR;
1335 } else {
1336 to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv);
1337 flush = FLUSH_DELAYED_REFS_NR;
1338 }
1339
1340 loops++;
1341
1342 /*
1343 * We don't want to reclaim everything, just a portion, so scale
1344 * down the to_reclaim by 1/4. If it takes us down to 0,
1345 * reclaim 1 items worth.
1346 */
1347 to_reclaim >>= 2;
1348 if (!to_reclaim)
1349 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1350 flush_space(space_info, to_reclaim, flush, true);
1351 cond_resched();
1352 spin_lock(&space_info->lock);
1353 }
1354
1355 /* We only went through once, back off our clamping. */
1356 if (loops == 1 && !space_info->reclaim_size)
1357 space_info->clamp = max(1, space_info->clamp - 1);
1358 trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1359 spin_unlock(&space_info->lock);
1360 }
1361
1362 /*
1363 * FLUSH_DELALLOC_WAIT:
1364 * Space is freed from flushing delalloc in one of two ways.
1365 *
1366 * 1) compression is on and we allocate less space than we reserved
1367 * 2) we are overwriting existing space
1368 *
1369 * For #1 that extra space is reclaimed as soon as the delalloc pages are
1370 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1371 * length to ->bytes_reserved, and subtracts the reserved space from
1372 * ->bytes_may_use.
1373 *
1374 * For #2 this is trickier. Once the ordered extent runs we will drop the
1375 * extent in the range we are overwriting, which creates a delayed ref for
1376 * that freed extent. This however is not reclaimed until the transaction
1377 * commits, thus the next stages.
1378 *
1379 * RUN_DELAYED_IPUTS
1380 * If we are freeing inodes, we want to make sure all delayed iputs have
1381 * completed, because they could have been on an inode with i_nlink == 0, and
1382 * thus have been truncated and freed up space. But again this space is not
1383 * immediately reusable, it comes in the form of a delayed ref, which must be
1384 * run and then the transaction must be committed.
1385 *
1386 * COMMIT_TRANS
1387 * This is where we reclaim all of the pinned space generated by running the
1388 * iputs
1389 *
1390 * RESET_ZONES
1391 * This state works only for the zoned mode. We scan the unused block group
1392 * list and reset the zones and reuse the block group.
1393 *
1394 * ALLOC_CHUNK_FORCE
1395 * For data we start with alloc chunk force, however we could have been full
1396 * before, and then the transaction commit could have freed new block groups,
1397 * so if we now have space to allocate do the force chunk allocation.
1398 */
1399 static const enum btrfs_flush_state data_flush_states[] = {
1400 FLUSH_DELALLOC_FULL,
1401 RUN_DELAYED_IPUTS,
1402 COMMIT_TRANS,
1403 RESET_ZONES,
1404 ALLOC_CHUNK_FORCE,
1405 };
1406
do_async_reclaim_data_space(struct btrfs_space_info * space_info)1407 static void do_async_reclaim_data_space(struct btrfs_space_info *space_info)
1408 {
1409 struct btrfs_fs_info *fs_info = space_info->fs_info;
1410 u64 last_tickets_id;
1411 enum btrfs_flush_state flush_state = 0;
1412
1413 spin_lock(&space_info->lock);
1414 if (list_empty(&space_info->tickets)) {
1415 space_info->flush = false;
1416 spin_unlock(&space_info->lock);
1417 return;
1418 }
1419 last_tickets_id = space_info->tickets_id;
1420 spin_unlock(&space_info->lock);
1421
1422 while (!space_info->full) {
1423 flush_space(space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1424 spin_lock(&space_info->lock);
1425 if (list_empty(&space_info->tickets)) {
1426 space_info->flush = false;
1427 spin_unlock(&space_info->lock);
1428 return;
1429 }
1430
1431 /* Something happened, fail everything and bail. */
1432 if (unlikely(BTRFS_FS_ERROR(fs_info)))
1433 goto aborted_fs;
1434 last_tickets_id = space_info->tickets_id;
1435 spin_unlock(&space_info->lock);
1436 }
1437
1438 while (flush_state < ARRAY_SIZE(data_flush_states)) {
1439 flush_space(space_info, U64_MAX,
1440 data_flush_states[flush_state], false);
1441 spin_lock(&space_info->lock);
1442 if (list_empty(&space_info->tickets)) {
1443 space_info->flush = false;
1444 spin_unlock(&space_info->lock);
1445 return;
1446 }
1447
1448 if (last_tickets_id == space_info->tickets_id) {
1449 flush_state++;
1450 } else {
1451 last_tickets_id = space_info->tickets_id;
1452 flush_state = 0;
1453 }
1454
1455 if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1456 if (space_info->full) {
1457 if (maybe_fail_all_tickets(space_info))
1458 flush_state = 0;
1459 else
1460 space_info->flush = false;
1461 } else {
1462 flush_state = 0;
1463 }
1464
1465 /* Something happened, fail everything and bail. */
1466 if (unlikely(BTRFS_FS_ERROR(fs_info)))
1467 goto aborted_fs;
1468
1469 }
1470 spin_unlock(&space_info->lock);
1471 }
1472 return;
1473
1474 aborted_fs:
1475 maybe_fail_all_tickets(space_info);
1476 space_info->flush = false;
1477 spin_unlock(&space_info->lock);
1478 }
1479
btrfs_async_reclaim_data_space(struct work_struct * work)1480 static void btrfs_async_reclaim_data_space(struct work_struct *work)
1481 {
1482 struct btrfs_fs_info *fs_info;
1483 struct btrfs_space_info *space_info;
1484
1485 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1486 space_info = fs_info->data_sinfo;
1487 do_async_reclaim_data_space(space_info);
1488 for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++)
1489 if (space_info->sub_group[i])
1490 do_async_reclaim_data_space(space_info->sub_group[i]);
1491 }
1492
btrfs_init_async_reclaim_work(struct btrfs_fs_info * fs_info)1493 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1494 {
1495 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1496 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1497 INIT_WORK(&fs_info->preempt_reclaim_work,
1498 btrfs_preempt_reclaim_metadata_space);
1499 }
1500
1501 static const enum btrfs_flush_state priority_flush_states[] = {
1502 FLUSH_DELAYED_ITEMS_NR,
1503 FLUSH_DELAYED_ITEMS,
1504 RESET_ZONES,
1505 ALLOC_CHUNK,
1506 };
1507
1508 static const enum btrfs_flush_state evict_flush_states[] = {
1509 FLUSH_DELAYED_ITEMS_NR,
1510 FLUSH_DELAYED_ITEMS,
1511 FLUSH_DELAYED_REFS_NR,
1512 FLUSH_DELAYED_REFS,
1513 FLUSH_DELALLOC,
1514 FLUSH_DELALLOC_WAIT,
1515 FLUSH_DELALLOC_FULL,
1516 ALLOC_CHUNK,
1517 COMMIT_TRANS,
1518 RESET_ZONES,
1519 };
1520
is_ticket_served(struct reserve_ticket * ticket)1521 static bool is_ticket_served(struct reserve_ticket *ticket)
1522 {
1523 bool ret;
1524
1525 spin_lock(&ticket->lock);
1526 ret = (ticket->bytes == 0);
1527 spin_unlock(&ticket->lock);
1528
1529 return ret;
1530 }
1531
priority_reclaim_metadata_space(struct btrfs_space_info * space_info,struct reserve_ticket * ticket,const enum btrfs_flush_state * states,int states_nr)1532 static void priority_reclaim_metadata_space(struct btrfs_space_info *space_info,
1533 struct reserve_ticket *ticket,
1534 const enum btrfs_flush_state *states,
1535 int states_nr)
1536 {
1537 struct btrfs_fs_info *fs_info = space_info->fs_info;
1538 u64 to_reclaim;
1539 int flush_state = 0;
1540
1541 /*
1542 * This is the priority reclaim path, so to_reclaim could be >0 still
1543 * because we may have only satisfied the priority tickets and still
1544 * left non priority tickets on the list. We would then have
1545 * to_reclaim but ->bytes == 0.
1546 */
1547 if (is_ticket_served(ticket))
1548 return;
1549
1550 spin_lock(&space_info->lock);
1551 to_reclaim = btrfs_calc_reclaim_metadata_size(space_info);
1552 spin_unlock(&space_info->lock);
1553
1554 while (flush_state < states_nr) {
1555 flush_space(space_info, to_reclaim, states[flush_state], false);
1556 if (is_ticket_served(ticket))
1557 return;
1558 flush_state++;
1559 }
1560
1561 spin_lock(&space_info->lock);
1562 /*
1563 * Attempt to steal from the global rsv if we can, except if the fs was
1564 * turned into error mode due to a transaction abort when flushing space
1565 * above, in that case fail with the abort error instead of returning
1566 * success to the caller if we can steal from the global rsv - this is
1567 * just to have caller fail immediately instead of later when trying to
1568 * modify the fs, making it easier to debug -ENOSPC problems.
1569 */
1570 if (unlikely(BTRFS_FS_ERROR(fs_info)))
1571 remove_ticket(space_info, ticket, BTRFS_FS_ERROR(fs_info));
1572 else if (!steal_from_global_rsv(space_info, ticket))
1573 remove_ticket(space_info, ticket, -ENOSPC);
1574
1575 /*
1576 * We must run try_granting_tickets here because we could be a large
1577 * ticket in front of a smaller ticket that can now be satisfied with
1578 * the available space.
1579 */
1580 btrfs_try_granting_tickets(space_info);
1581 spin_unlock(&space_info->lock);
1582 }
1583
priority_reclaim_data_space(struct btrfs_space_info * space_info,struct reserve_ticket * ticket)1584 static void priority_reclaim_data_space(struct btrfs_space_info *space_info,
1585 struct reserve_ticket *ticket)
1586 {
1587 /* We could have been granted before we got here. */
1588 if (is_ticket_served(ticket))
1589 return;
1590
1591 spin_lock(&space_info->lock);
1592 while (!space_info->full) {
1593 spin_unlock(&space_info->lock);
1594 flush_space(space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1595 if (is_ticket_served(ticket))
1596 return;
1597 spin_lock(&space_info->lock);
1598 }
1599
1600 remove_ticket(space_info, ticket, -ENOSPC);
1601 btrfs_try_granting_tickets(space_info);
1602 spin_unlock(&space_info->lock);
1603 }
1604
wait_reserve_ticket(struct btrfs_space_info * space_info,struct reserve_ticket * ticket)1605 static void wait_reserve_ticket(struct btrfs_space_info *space_info,
1606 struct reserve_ticket *ticket)
1607
1608 {
1609 DEFINE_WAIT(wait);
1610
1611 spin_lock(&ticket->lock);
1612 while (ticket->bytes > 0 && ticket->error == 0) {
1613 int ret;
1614
1615 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1616 spin_unlock(&ticket->lock);
1617 if (ret) {
1618 /*
1619 * Delete us from the list. After we unlock the space
1620 * info, we don't want the async reclaim job to reserve
1621 * space for this ticket. If that would happen, then the
1622 * ticket's task would not known that space was reserved
1623 * despite getting an error, resulting in a space leak
1624 * (bytes_may_use counter of our space_info).
1625 */
1626 spin_lock(&space_info->lock);
1627 remove_ticket(space_info, ticket, -EINTR);
1628 spin_unlock(&space_info->lock);
1629 return;
1630 }
1631
1632 schedule();
1633
1634 finish_wait(&ticket->wait, &wait);
1635 spin_lock(&ticket->lock);
1636 }
1637 spin_unlock(&ticket->lock);
1638 }
1639
1640 /*
1641 * Do the appropriate flushing and waiting for a ticket.
1642 *
1643 * @space_info: space info for the reservation
1644 * @ticket: ticket for the reservation
1645 * @start_ns: timestamp when the reservation started
1646 * @orig_bytes: amount of bytes originally reserved
1647 * @flush: how much we can flush
1648 *
1649 * This does the work of figuring out how to flush for the ticket, waiting for
1650 * the reservation, and returning the appropriate error if there is one.
1651 */
handle_reserve_ticket(struct btrfs_space_info * space_info,struct reserve_ticket * ticket,u64 start_ns,u64 orig_bytes,enum btrfs_reserve_flush_enum flush)1652 static int handle_reserve_ticket(struct btrfs_space_info *space_info,
1653 struct reserve_ticket *ticket,
1654 u64 start_ns, u64 orig_bytes,
1655 enum btrfs_reserve_flush_enum flush)
1656 {
1657 int ret;
1658
1659 switch (flush) {
1660 case BTRFS_RESERVE_FLUSH_DATA:
1661 case BTRFS_RESERVE_FLUSH_ALL:
1662 case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1663 wait_reserve_ticket(space_info, ticket);
1664 break;
1665 case BTRFS_RESERVE_FLUSH_LIMIT:
1666 priority_reclaim_metadata_space(space_info, ticket,
1667 priority_flush_states,
1668 ARRAY_SIZE(priority_flush_states));
1669 break;
1670 case BTRFS_RESERVE_FLUSH_EVICT:
1671 priority_reclaim_metadata_space(space_info, ticket,
1672 evict_flush_states,
1673 ARRAY_SIZE(evict_flush_states));
1674 break;
1675 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1676 priority_reclaim_data_space(space_info, ticket);
1677 break;
1678 default:
1679 ASSERT(0, "flush=%d", flush);
1680 break;
1681 }
1682
1683 ret = ticket->error;
1684 ASSERT(list_empty(&ticket->list));
1685 /*
1686 * Check that we can't have an error set if the reservation succeeded,
1687 * as that would confuse tasks and lead them to error out without
1688 * releasing reserved space (if an error happens the expectation is that
1689 * space wasn't reserved at all).
1690 */
1691 ASSERT(!(ticket->bytes == 0 && ticket->error),
1692 "ticket->bytes=%llu ticket->error=%d", ticket->bytes, ticket->error);
1693 trace_btrfs_reserve_ticket(space_info->fs_info, space_info->flags,
1694 orig_bytes, start_ns, flush, ticket->error);
1695 return ret;
1696 }
1697
1698 /*
1699 * This returns true if this flush state will go through the ordinary flushing
1700 * code.
1701 */
is_normal_flushing(enum btrfs_reserve_flush_enum flush)1702 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1703 {
1704 return (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1705 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1706 }
1707
maybe_clamp_preempt(struct btrfs_space_info * space_info)1708 static inline void maybe_clamp_preempt(struct btrfs_space_info *space_info)
1709 {
1710 struct btrfs_fs_info *fs_info = space_info->fs_info;
1711 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1712 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1713
1714 /*
1715 * If we're heavy on ordered operations then clamping won't help us. We
1716 * need to clamp specifically to keep up with dirty'ing buffered
1717 * writers, because there's not a 1:1 correlation of writing delalloc
1718 * and freeing space, like there is with flushing delayed refs or
1719 * delayed nodes. If we're already more ordered than delalloc then
1720 * we're keeping up, otherwise we aren't and should probably clamp.
1721 */
1722 if (ordered < delalloc)
1723 space_info->clamp = min(space_info->clamp + 1, 8);
1724 }
1725
can_steal(enum btrfs_reserve_flush_enum flush)1726 static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
1727 {
1728 return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1729 flush == BTRFS_RESERVE_FLUSH_EVICT);
1730 }
1731
1732 /*
1733 * NO_FLUSH and FLUSH_EMERGENCY don't want to create a ticket, they just want to
1734 * fail as quickly as possible.
1735 */
can_ticket(enum btrfs_reserve_flush_enum flush)1736 static inline bool can_ticket(enum btrfs_reserve_flush_enum flush)
1737 {
1738 return (flush != BTRFS_RESERVE_NO_FLUSH &&
1739 flush != BTRFS_RESERVE_FLUSH_EMERGENCY);
1740 }
1741
1742 /*
1743 * Try to reserve bytes from the block_rsv's space.
1744 *
1745 * @space_info: space info we want to allocate from
1746 * @orig_bytes: number of bytes we want
1747 * @flush: whether or not we can flush to make our reservation
1748 *
1749 * This will reserve orig_bytes number of bytes from the space info associated
1750 * with the block_rsv. If there is not enough space it will make an attempt to
1751 * flush out space to make room. It will do this by flushing delalloc if
1752 * possible or committing the transaction. If flush is 0 then no attempts to
1753 * regain reservations will be made and this will fail if there is not enough
1754 * space already.
1755 */
reserve_bytes(struct btrfs_space_info * space_info,u64 orig_bytes,enum btrfs_reserve_flush_enum flush)1756 static int reserve_bytes(struct btrfs_space_info *space_info, u64 orig_bytes,
1757 enum btrfs_reserve_flush_enum flush)
1758 {
1759 struct btrfs_fs_info *fs_info = space_info->fs_info;
1760 struct work_struct *async_work;
1761 struct reserve_ticket ticket;
1762 u64 start_ns = 0;
1763 u64 used;
1764 int ret = -ENOSPC;
1765 bool pending_tickets;
1766
1767 ASSERT(orig_bytes, "orig_bytes=%llu", orig_bytes);
1768 /*
1769 * If have a transaction handle (current->journal_info != NULL), then
1770 * the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor
1771 * BTRFS_RESERVE_FLUSH_EVICT, as we could deadlock because those
1772 * flushing methods can trigger transaction commits.
1773 */
1774 if (current->journal_info) {
1775 /* One assert per line for easier debugging. */
1776 ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL, "flush=%d", flush);
1777 ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL, "flush=%d", flush);
1778 ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT, "flush=%d", flush);
1779 }
1780
1781 if (flush == BTRFS_RESERVE_FLUSH_DATA)
1782 async_work = &fs_info->async_data_reclaim_work;
1783 else
1784 async_work = &fs_info->async_reclaim_work;
1785
1786 spin_lock(&space_info->lock);
1787 used = btrfs_space_info_used(space_info, true);
1788
1789 /*
1790 * We don't want NO_FLUSH allocations to jump everybody, they can
1791 * generally handle ENOSPC in a different way, so treat them the same as
1792 * normal flushers when it comes to skipping pending tickets.
1793 */
1794 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1795 pending_tickets = !list_empty(&space_info->tickets) ||
1796 !list_empty(&space_info->priority_tickets);
1797 else
1798 pending_tickets = !list_empty(&space_info->priority_tickets);
1799
1800 /*
1801 * Carry on if we have enough space (short-circuit) OR call
1802 * can_overcommit() to ensure we can overcommit to continue.
1803 */
1804 if (!pending_tickets &&
1805 ((used + orig_bytes <= space_info->total_bytes) ||
1806 can_overcommit(space_info, used, orig_bytes, flush))) {
1807 btrfs_space_info_update_bytes_may_use(space_info, orig_bytes);
1808 ret = 0;
1809 }
1810
1811 /*
1812 * Things are dire, we need to make a reservation so we don't abort. We
1813 * will let this reservation go through as long as we have actual space
1814 * left to allocate for the block.
1815 */
1816 if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
1817 used -= space_info->bytes_may_use;
1818 if (used + orig_bytes <= space_info->total_bytes) {
1819 btrfs_space_info_update_bytes_may_use(space_info, orig_bytes);
1820 ret = 0;
1821 }
1822 }
1823
1824 /*
1825 * If we couldn't make a reservation then setup our reservation ticket
1826 * and kick the async worker if it's not already running.
1827 *
1828 * If we are a priority flusher then we just need to add our ticket to
1829 * the list and we will do our own flushing further down.
1830 */
1831 if (ret && can_ticket(flush)) {
1832 ticket.bytes = orig_bytes;
1833 ticket.error = 0;
1834 space_info->reclaim_size += ticket.bytes;
1835 init_waitqueue_head(&ticket.wait);
1836 spin_lock_init(&ticket.lock);
1837 ticket.steal = can_steal(flush);
1838 if (trace_btrfs_reserve_ticket_enabled())
1839 start_ns = ktime_get_ns();
1840
1841 if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1842 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1843 flush == BTRFS_RESERVE_FLUSH_DATA) {
1844 list_add_tail(&ticket.list, &space_info->tickets);
1845 if (!space_info->flush) {
1846 /*
1847 * We were forced to add a reserve ticket, so
1848 * our preemptive flushing is unable to keep
1849 * up. Clamp down on the threshold for the
1850 * preemptive flushing in order to keep up with
1851 * the workload.
1852 */
1853 maybe_clamp_preempt(space_info);
1854
1855 space_info->flush = true;
1856 trace_btrfs_trigger_flush(fs_info,
1857 space_info->flags,
1858 orig_bytes, flush,
1859 "enospc");
1860 queue_work(system_dfl_wq, async_work);
1861 }
1862 } else {
1863 list_add_tail(&ticket.list,
1864 &space_info->priority_tickets);
1865 }
1866 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1867 /*
1868 * We will do the space reservation dance during log replay,
1869 * which means we won't have fs_info->fs_root set, so don't do
1870 * the async reclaim as we will panic.
1871 */
1872 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1873 !work_busy(&fs_info->preempt_reclaim_work) &&
1874 need_preemptive_reclaim(space_info)) {
1875 trace_btrfs_trigger_flush(fs_info, space_info->flags,
1876 orig_bytes, flush, "preempt");
1877 queue_work(system_dfl_wq,
1878 &fs_info->preempt_reclaim_work);
1879 }
1880 }
1881 spin_unlock(&space_info->lock);
1882 if (!ret || !can_ticket(flush))
1883 return ret;
1884
1885 return handle_reserve_ticket(space_info, &ticket, start_ns, orig_bytes, flush);
1886 }
1887
1888 /*
1889 * Try to reserve metadata bytes from the block_rsv's space.
1890 *
1891 * @space_info: the space_info we're allocating for
1892 * @orig_bytes: number of bytes we want
1893 * @flush: whether or not we can flush to make our reservation
1894 *
1895 * This will reserve orig_bytes number of bytes from the space info associated
1896 * with the block_rsv. If there is not enough space it will make an attempt to
1897 * flush out space to make room. It will do this by flushing delalloc if
1898 * possible or committing the transaction. If flush is 0 then no attempts to
1899 * regain reservations will be made and this will fail if there is not enough
1900 * space already.
1901 */
btrfs_reserve_metadata_bytes(struct btrfs_space_info * space_info,u64 orig_bytes,enum btrfs_reserve_flush_enum flush)1902 int btrfs_reserve_metadata_bytes(struct btrfs_space_info *space_info,
1903 u64 orig_bytes,
1904 enum btrfs_reserve_flush_enum flush)
1905 {
1906 int ret;
1907
1908 ret = reserve_bytes(space_info, orig_bytes, flush);
1909 if (ret == -ENOSPC) {
1910 struct btrfs_fs_info *fs_info = space_info->fs_info;
1911
1912 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1913 space_info->flags, orig_bytes, 1);
1914
1915 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1916 btrfs_dump_space_info(space_info, orig_bytes, false);
1917 }
1918 return ret;
1919 }
1920
1921 /*
1922 * Try to reserve data bytes for an allocation.
1923 *
1924 * @space_info: the space_info we're allocating for
1925 * @bytes: number of bytes we need
1926 * @flush: how we are allowed to flush
1927 *
1928 * This will reserve bytes from the data space info. If there is not enough
1929 * space then we will attempt to flush space as specified by flush.
1930 */
btrfs_reserve_data_bytes(struct btrfs_space_info * space_info,u64 bytes,enum btrfs_reserve_flush_enum flush)1931 int btrfs_reserve_data_bytes(struct btrfs_space_info *space_info, u64 bytes,
1932 enum btrfs_reserve_flush_enum flush)
1933 {
1934 struct btrfs_fs_info *fs_info = space_info->fs_info;
1935 int ret;
1936
1937 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1938 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE ||
1939 flush == BTRFS_RESERVE_NO_FLUSH, "flush=%d", flush);
1940 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA,
1941 "current->journal_info=0x%lx flush=%d",
1942 (unsigned long)current->journal_info, flush);
1943
1944 ret = reserve_bytes(space_info, bytes, flush);
1945 if (ret == -ENOSPC) {
1946 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1947 space_info->flags, bytes, 1);
1948 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1949 btrfs_dump_space_info(space_info, bytes, false);
1950 }
1951 return ret;
1952 }
1953
1954 /* Dump all the space infos when we abort a transaction due to ENOSPC. */
btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info * fs_info)1955 __cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info)
1956 {
1957 struct btrfs_space_info *space_info;
1958
1959 btrfs_info(fs_info, "dumping space info:");
1960 list_for_each_entry(space_info, &fs_info->space_info, list) {
1961 spin_lock(&space_info->lock);
1962 __btrfs_dump_space_info(space_info);
1963 spin_unlock(&space_info->lock);
1964 }
1965 dump_global_block_rsv(fs_info);
1966 }
1967
1968 /*
1969 * Account the unused space of all the readonly block group in the space_info.
1970 * takes mirrors into account.
1971 */
btrfs_account_ro_block_groups_free_space(struct btrfs_space_info * sinfo)1972 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
1973 {
1974 struct btrfs_block_group *block_group;
1975 u64 free_bytes = 0;
1976 int factor;
1977
1978 /* It's df, we don't care if it's racy */
1979 if (data_race(list_empty(&sinfo->ro_bgs)))
1980 return 0;
1981
1982 spin_lock(&sinfo->lock);
1983 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
1984 spin_lock(&block_group->lock);
1985
1986 if (!block_group->ro) {
1987 spin_unlock(&block_group->lock);
1988 continue;
1989 }
1990
1991 factor = btrfs_bg_type_to_factor(block_group->flags);
1992 free_bytes += (block_group->length -
1993 block_group->used) * factor;
1994
1995 spin_unlock(&block_group->lock);
1996 }
1997 spin_unlock(&sinfo->lock);
1998
1999 return free_bytes;
2000 }
2001
calc_pct_ratio(u64 x,u64 y)2002 static u64 calc_pct_ratio(u64 x, u64 y)
2003 {
2004 int ret;
2005
2006 if (!y)
2007 return 0;
2008 again:
2009 ret = check_mul_overflow(100, x, &x);
2010 if (ret)
2011 goto lose_precision;
2012 return div64_u64(x, y);
2013 lose_precision:
2014 x >>= 10;
2015 y >>= 10;
2016 if (!y)
2017 y = 1;
2018 goto again;
2019 }
2020
2021 /*
2022 * A reasonable buffer for unallocated space is 10 data block_groups.
2023 * If we claw this back repeatedly, we can still achieve efficient
2024 * utilization when near full, and not do too much reclaim while
2025 * always maintaining a solid buffer for workloads that quickly
2026 * allocate and pressure the unallocated space.
2027 */
calc_unalloc_target(struct btrfs_fs_info * fs_info)2028 static u64 calc_unalloc_target(struct btrfs_fs_info *fs_info)
2029 {
2030 u64 chunk_sz = calc_effective_data_chunk_size(fs_info);
2031
2032 return BTRFS_UNALLOC_BLOCK_GROUP_TARGET * chunk_sz;
2033 }
2034
2035 /*
2036 * The fundamental goal of automatic reclaim is to protect the filesystem's
2037 * unallocated space and thus minimize the probability of the filesystem going
2038 * read only when a metadata allocation failure causes a transaction abort.
2039 *
2040 * However, relocations happen into the space_info's unused space, therefore
2041 * automatic reclaim must also back off as that space runs low. There is no
2042 * value in doing trivial "relocations" of re-writing the same block group
2043 * into a fresh one.
2044 *
2045 * Furthermore, we want to avoid doing too much reclaim even if there are good
2046 * candidates. This is because the allocator is pretty good at filling up the
2047 * holes with writes. So we want to do just enough reclaim to try and stay
2048 * safe from running out of unallocated space but not be wasteful about it.
2049 *
2050 * Therefore, the dynamic reclaim threshold is calculated as follows:
2051 * - calculate a target unallocated amount of 5 block group sized chunks
2052 * - ratchet up the intensity of reclaim depending on how far we are from
2053 * that target by using a formula of unalloc / target to set the threshold.
2054 *
2055 * Typically with 10 block groups as the target, the discrete values this comes
2056 * out to are 0, 10, 20, ... , 80, 90, and 99.
2057 */
calc_dynamic_reclaim_threshold(const struct btrfs_space_info * space_info)2058 static int calc_dynamic_reclaim_threshold(const struct btrfs_space_info *space_info)
2059 {
2060 struct btrfs_fs_info *fs_info = space_info->fs_info;
2061 u64 unalloc = atomic64_read(&fs_info->free_chunk_space);
2062 u64 target = calc_unalloc_target(fs_info);
2063 u64 alloc = space_info->total_bytes;
2064 u64 used = btrfs_space_info_used(space_info, false);
2065 u64 unused = alloc - used;
2066 u64 want = target > unalloc ? target - unalloc : 0;
2067 u64 data_chunk_size = calc_effective_data_chunk_size(fs_info);
2068
2069 /* If we have no unused space, don't bother, it won't work anyway. */
2070 if (unused < data_chunk_size)
2071 return 0;
2072
2073 /* Cast to int is OK because want <= target. */
2074 return calc_pct_ratio(want, target);
2075 }
2076
btrfs_calc_reclaim_threshold(const struct btrfs_space_info * space_info)2077 int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info)
2078 {
2079 lockdep_assert_held(&space_info->lock);
2080
2081 if (READ_ONCE(space_info->dynamic_reclaim))
2082 return calc_dynamic_reclaim_threshold(space_info);
2083 return READ_ONCE(space_info->bg_reclaim_threshold);
2084 }
2085
2086 /*
2087 * Under "urgent" reclaim, we will reclaim even fresh block groups that have
2088 * recently seen successful allocations, as we are desperate to reclaim
2089 * whatever we can to avoid ENOSPC in a transaction leading to a readonly fs.
2090 */
is_reclaim_urgent(struct btrfs_space_info * space_info)2091 static bool is_reclaim_urgent(struct btrfs_space_info *space_info)
2092 {
2093 struct btrfs_fs_info *fs_info = space_info->fs_info;
2094 u64 unalloc = atomic64_read(&fs_info->free_chunk_space);
2095 u64 data_chunk_size = calc_effective_data_chunk_size(fs_info);
2096
2097 return unalloc < data_chunk_size;
2098 }
2099
do_reclaim_sweep(struct btrfs_space_info * space_info,int raid)2100 static bool do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
2101 {
2102 struct btrfs_block_group *bg;
2103 int thresh_pct;
2104 bool will_reclaim = false;
2105 bool urgent;
2106
2107 spin_lock(&space_info->lock);
2108 urgent = is_reclaim_urgent(space_info);
2109 thresh_pct = btrfs_calc_reclaim_threshold(space_info);
2110 spin_unlock(&space_info->lock);
2111
2112 down_read(&space_info->groups_sem);
2113 again:
2114 list_for_each_entry(bg, &space_info->block_groups[raid], list) {
2115 u64 thresh;
2116 bool reclaim = false;
2117
2118 btrfs_get_block_group(bg);
2119 spin_lock(&bg->lock);
2120 thresh = mult_perc(bg->length, thresh_pct);
2121 if (bg->used < thresh && bg->reclaim_mark) {
2122 will_reclaim = true;
2123 reclaim = true;
2124 }
2125 bg->reclaim_mark++;
2126 spin_unlock(&bg->lock);
2127 if (reclaim)
2128 btrfs_mark_bg_to_reclaim(bg);
2129 btrfs_put_block_group(bg);
2130 }
2131
2132 /*
2133 * In situations where we are very motivated to reclaim (low unalloc)
2134 * use two passes to make the reclaim mark check best effort.
2135 *
2136 * If we have any staler groups, we don't touch the fresher ones, but if we
2137 * really need a block group, do take a fresh one.
2138 */
2139 if (!will_reclaim && urgent) {
2140 urgent = false;
2141 goto again;
2142 }
2143
2144 up_read(&space_info->groups_sem);
2145 return will_reclaim;
2146 }
2147
btrfs_space_info_update_reclaimable(struct btrfs_space_info * space_info,s64 bytes)2148 void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes)
2149 {
2150 u64 chunk_sz = calc_effective_data_chunk_size(space_info->fs_info);
2151
2152 lockdep_assert_held(&space_info->lock);
2153 space_info->reclaimable_bytes += bytes;
2154
2155 if (space_info->reclaimable_bytes > 0 &&
2156 space_info->reclaimable_bytes >= chunk_sz)
2157 btrfs_set_periodic_reclaim_ready(space_info, true);
2158 }
2159
btrfs_set_periodic_reclaim_ready(struct btrfs_space_info * space_info,bool ready)2160 void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready)
2161 {
2162 lockdep_assert_held(&space_info->lock);
2163 if (!READ_ONCE(space_info->periodic_reclaim))
2164 return;
2165 if (ready != space_info->periodic_reclaim_ready) {
2166 space_info->periodic_reclaim_ready = ready;
2167 if (!ready)
2168 space_info->reclaimable_bytes = 0;
2169 }
2170 }
2171
btrfs_should_periodic_reclaim(struct btrfs_space_info * space_info)2172 static bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info)
2173 {
2174 bool ret;
2175
2176 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
2177 return false;
2178 if (!READ_ONCE(space_info->periodic_reclaim))
2179 return false;
2180
2181 spin_lock(&space_info->lock);
2182 ret = space_info->periodic_reclaim_ready;
2183 spin_unlock(&space_info->lock);
2184
2185 return ret;
2186 }
2187
btrfs_reclaim_sweep(const struct btrfs_fs_info * fs_info)2188 void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info)
2189 {
2190 int raid;
2191 struct btrfs_space_info *space_info;
2192
2193 list_for_each_entry(space_info, &fs_info->space_info, list) {
2194 if (!btrfs_should_periodic_reclaim(space_info))
2195 continue;
2196 for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) {
2197 if (do_reclaim_sweep(space_info, raid))
2198 btrfs_set_periodic_reclaim_ready(space_info, false);
2199 }
2200 }
2201 }
2202
btrfs_return_free_space(struct btrfs_space_info * space_info,u64 len)2203 void btrfs_return_free_space(struct btrfs_space_info *space_info, u64 len)
2204 {
2205 struct btrfs_fs_info *fs_info = space_info->fs_info;
2206 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
2207
2208 lockdep_assert_held(&space_info->lock);
2209
2210 /* Prioritize the global reservation to receive the freed space. */
2211 if (global_rsv->space_info != space_info)
2212 goto grant;
2213
2214 spin_lock(&global_rsv->lock);
2215 if (!global_rsv->full) {
2216 u64 to_add = min(len, global_rsv->size - global_rsv->reserved);
2217
2218 global_rsv->reserved += to_add;
2219 btrfs_space_info_update_bytes_may_use(space_info, to_add);
2220 if (global_rsv->reserved >= global_rsv->size)
2221 global_rsv->full = true;
2222 len -= to_add;
2223 }
2224 spin_unlock(&global_rsv->lock);
2225
2226 grant:
2227 /* Add to any tickets we may have. */
2228 if (len)
2229 btrfs_try_granting_tickets(space_info);
2230 }
2231