1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_alloc.h"
14 #include "xfs_errortag.h"
15 #include "xfs_error.h"
16 #include "xfs_trace.h"
17 #include "xfs_trans.h"
18 #include "xfs_rmap_btree.h"
19 #include "xfs_btree.h"
20 #include "xfs_refcount_btree.h"
21 #include "xfs_ialloc_btree.h"
22 #include "xfs_ag.h"
23 #include "xfs_ag_resv.h"
24
25 /*
26 * Per-AG Block Reservations
27 *
28 * For some kinds of allocation group metadata structures, it is advantageous
29 * to reserve a small number of blocks in each AG so that future expansions of
30 * that data structure do not encounter ENOSPC because errors during a btree
31 * split cause the filesystem to go offline.
32 *
33 * Prior to the introduction of reflink, this wasn't an issue because the free
34 * space btrees maintain a reserve of space (the AGFL) to handle any expansion
35 * that may be necessary; and allocations of other metadata (inodes, BMBT,
36 * dir/attr) aren't restricted to a single AG. However, with reflink it is
37 * possible to allocate all the space in an AG, have subsequent reflink/CoW
38 * activity expand the refcount btree, and discover that there's no space left
39 * to handle that expansion. Since we can calculate the maximum size of the
40 * refcount btree, we can reserve space for it and avoid ENOSPC.
41 *
42 * Handling per-AG reservations consists of three changes to the allocator's
43 * behavior: First, because these reservations are always needed, we decrease
44 * the ag_max_usable counter to reflect the size of the AG after the reserved
45 * blocks are taken. Second, the reservations must be reflected in the
46 * fdblocks count to maintain proper accounting. Third, each AG must maintain
47 * its own reserved block counter so that we can calculate the amount of space
48 * that must remain free to maintain the reservations. Fourth, the "remaining
49 * reserved blocks" count must be used when calculating the length of the
50 * longest free extent in an AG and to clamp maxlen in the per-AG allocation
51 * functions. In other words, we maintain a virtual allocation via in-core
52 * accounting tricks so that we don't have to clean up after a crash. :)
53 *
54 * Reserved blocks can be managed by passing one of the enum xfs_ag_resv_type
55 * values via struct xfs_alloc_arg or directly to the xfs_free_extent
56 * function. It might seem a little funny to maintain a reservoir of blocks
57 * to feed another reservoir, but the AGFL only holds enough blocks to get
58 * through the next transaction. The per-AG reservation is to ensure (we
59 * hope) that each AG never runs out of blocks. Each data structure wanting
60 * to use the reservation system should update ask/used in xfs_ag_resv_init.
61 */
62
63 /*
64 * Are we critically low on blocks? For now we'll define that as the number
65 * of blocks we can get our hands on being less than 10% of what we reserved
66 * or less than some arbitrary number (maximum btree height).
67 */
68 bool
xfs_ag_resv_critical(struct xfs_perag * pag,enum xfs_ag_resv_type type)69 xfs_ag_resv_critical(
70 struct xfs_perag *pag,
71 enum xfs_ag_resv_type type)
72 {
73 struct xfs_mount *mp = pag_mount(pag);
74 xfs_extlen_t avail;
75 xfs_extlen_t orig;
76
77 switch (type) {
78 case XFS_AG_RESV_METADATA:
79 avail = pag->pagf_freeblks - pag->pag_rmapbt_resv.ar_reserved;
80 orig = pag->pag_meta_resv.ar_asked;
81 break;
82 case XFS_AG_RESV_RMAPBT:
83 avail = pag->pagf_freeblks + pag->pagf_flcount -
84 pag->pag_meta_resv.ar_reserved;
85 orig = pag->pag_rmapbt_resv.ar_asked;
86 break;
87 default:
88 ASSERT(0);
89 return false;
90 }
91
92 trace_xfs_ag_resv_critical(pag, type, avail);
93
94 /* Critically low if less than 10% or max btree height remains. */
95 return XFS_TEST_ERROR(avail < orig / 10 ||
96 avail < mp->m_agbtree_maxlevels,
97 mp, XFS_ERRTAG_AG_RESV_CRITICAL);
98 }
99
100 /*
101 * How many blocks are reserved but not used, and therefore must not be
102 * allocated away?
103 */
104 xfs_extlen_t
xfs_ag_resv_needed(struct xfs_perag * pag,enum xfs_ag_resv_type type)105 xfs_ag_resv_needed(
106 struct xfs_perag *pag,
107 enum xfs_ag_resv_type type)
108 {
109 xfs_extlen_t len;
110
111 len = pag->pag_meta_resv.ar_reserved + pag->pag_rmapbt_resv.ar_reserved;
112 switch (type) {
113 case XFS_AG_RESV_METADATA:
114 case XFS_AG_RESV_RMAPBT:
115 len -= xfs_perag_resv(pag, type)->ar_reserved;
116 break;
117 case XFS_AG_RESV_METAFILE:
118 case XFS_AG_RESV_NONE:
119 /* empty */
120 break;
121 default:
122 ASSERT(0);
123 }
124
125 trace_xfs_ag_resv_needed(pag, type, len);
126
127 return len;
128 }
129
130 /* Clean out a reservation */
131 static void
__xfs_ag_resv_free(struct xfs_perag * pag,enum xfs_ag_resv_type type)132 __xfs_ag_resv_free(
133 struct xfs_perag *pag,
134 enum xfs_ag_resv_type type)
135 {
136 struct xfs_ag_resv *resv;
137 xfs_extlen_t oldresv;
138
139 trace_xfs_ag_resv_free(pag, type, 0);
140
141 resv = xfs_perag_resv(pag, type);
142 if (pag_agno(pag) == 0)
143 pag_mount(pag)->m_ag_max_usable += resv->ar_asked;
144 /*
145 * RMAPBT blocks come from the AGFL and AGFL blocks are always
146 * considered "free", so whatever was reserved at mount time must be
147 * given back at umount.
148 */
149 if (type == XFS_AG_RESV_RMAPBT)
150 oldresv = resv->ar_orig_reserved;
151 else
152 oldresv = resv->ar_reserved;
153 xfs_add_fdblocks(pag_mount(pag), oldresv);
154 resv->ar_reserved = 0;
155 resv->ar_asked = 0;
156 resv->ar_orig_reserved = 0;
157 }
158
159 /* Free a per-AG reservation. */
160 void
xfs_ag_resv_free(struct xfs_perag * pag)161 xfs_ag_resv_free(
162 struct xfs_perag *pag)
163 {
164 __xfs_ag_resv_free(pag, XFS_AG_RESV_RMAPBT);
165 __xfs_ag_resv_free(pag, XFS_AG_RESV_METADATA);
166 }
167
168 static int
__xfs_ag_resv_init(struct xfs_perag * pag,enum xfs_ag_resv_type type,xfs_extlen_t ask,xfs_extlen_t used)169 __xfs_ag_resv_init(
170 struct xfs_perag *pag,
171 enum xfs_ag_resv_type type,
172 xfs_extlen_t ask,
173 xfs_extlen_t used)
174 {
175 struct xfs_mount *mp = pag_mount(pag);
176 struct xfs_ag_resv *resv;
177 int error;
178 xfs_extlen_t hidden_space;
179
180 if (used > ask)
181 ask = used;
182
183 switch (type) {
184 case XFS_AG_RESV_RMAPBT:
185 /*
186 * Space taken by the rmapbt is not subtracted from fdblocks
187 * because the rmapbt lives in the free space. Here we must
188 * subtract the entire reservation from fdblocks so that we
189 * always have blocks available for rmapbt expansion.
190 */
191 hidden_space = ask;
192 break;
193 case XFS_AG_RESV_METADATA:
194 /*
195 * Space taken by all other metadata btrees are accounted
196 * on-disk as used space. We therefore only hide the space
197 * that is reserved but not used by the trees.
198 */
199 hidden_space = ask - used;
200 break;
201 default:
202 ASSERT(0);
203 return -EINVAL;
204 }
205
206 if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_AG_RESV_FAIL))
207 error = -ENOSPC;
208 else
209 error = xfs_dec_fdblocks(mp, hidden_space, true);
210 if (error) {
211 trace_xfs_ag_resv_init_error(pag, error, _RET_IP_);
212 xfs_warn(mp,
213 "Per-AG reservation for AG %u failed. Filesystem may run out of space.",
214 pag_agno(pag));
215 return error;
216 }
217
218 /*
219 * Reduce the maximum per-AG allocation length by however much we're
220 * trying to reserve for an AG. Since this is a filesystem-wide
221 * counter, we only make the adjustment for AG 0. This assumes that
222 * there aren't any AGs hungrier for per-AG reservation than AG 0.
223 */
224 if (pag_agno(pag) == 0)
225 mp->m_ag_max_usable -= ask;
226
227 resv = xfs_perag_resv(pag, type);
228 resv->ar_asked = ask;
229 resv->ar_orig_reserved = hidden_space;
230 resv->ar_reserved = ask - used;
231
232 trace_xfs_ag_resv_init(pag, type, ask);
233 return 0;
234 }
235
236 /* Create a per-AG block reservation. */
237 int
xfs_ag_resv_init(struct xfs_perag * pag,struct xfs_trans * tp)238 xfs_ag_resv_init(
239 struct xfs_perag *pag,
240 struct xfs_trans *tp)
241 {
242 struct xfs_mount *mp = pag_mount(pag);
243 xfs_extlen_t ask;
244 xfs_extlen_t used;
245 int error = 0, error2;
246 bool has_resv = false;
247
248 /* Create the metadata reservation. */
249 if (pag->pag_meta_resv.ar_asked == 0) {
250 ask = used = 0;
251
252 error = xfs_refcountbt_calc_reserves(mp, tp, pag, &ask, &used);
253 if (error)
254 goto out;
255
256 error = xfs_finobt_calc_reserves(pag, tp, &ask, &used);
257 if (error)
258 goto out;
259
260 error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
261 ask, used);
262 if (error) {
263 /*
264 * Because we didn't have per-AG reservations when the
265 * finobt feature was added we might not be able to
266 * reserve all needed blocks. Warn and fall back to the
267 * old and potentially buggy code in that case, but
268 * ensure we do have the reservation for the refcountbt.
269 */
270 ask = used = 0;
271
272 mp->m_finobt_nores = true;
273
274 error = xfs_refcountbt_calc_reserves(mp, tp, pag, &ask,
275 &used);
276 if (error)
277 goto out;
278
279 error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
280 ask, used);
281 if (error)
282 goto out;
283 }
284 if (ask)
285 has_resv = true;
286 }
287
288 /* Create the RMAPBT metadata reservation */
289 if (pag->pag_rmapbt_resv.ar_asked == 0) {
290 ask = used = 0;
291
292 error = xfs_rmapbt_calc_reserves(mp, tp, pag, &ask, &used);
293 if (error)
294 goto out;
295
296 error = __xfs_ag_resv_init(pag, XFS_AG_RESV_RMAPBT, ask, used);
297 if (error)
298 goto out;
299 if (ask)
300 has_resv = true;
301 }
302
303 out:
304 /*
305 * Initialize the pagf if we have at least one active reservation on the
306 * AG. This may have occurred already via reservation calculation, but
307 * fall back to an explicit init to ensure the in-core allocbt usage
308 * counters are initialized as soon as possible. This is important
309 * because filesystems with large perag reservations are susceptible to
310 * free space reservation problems that the allocbt counter is used to
311 * address.
312 */
313 if (has_resv) {
314 error2 = xfs_alloc_read_agf(pag, tp, 0, NULL);
315 if (error2)
316 return error2;
317
318 /*
319 * If there isn't enough space in the AG to satisfy the
320 * reservation, let the caller know that there wasn't enough
321 * space. Callers are responsible for deciding what to do
322 * next, since (in theory) we can stumble along with
323 * insufficient reservation if data blocks are being freed to
324 * replenish the AG's free space.
325 */
326 if (!error &&
327 xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
328 xfs_perag_resv(pag, XFS_AG_RESV_RMAPBT)->ar_reserved >
329 pag->pagf_freeblks + pag->pagf_flcount)
330 error = -ENOSPC;
331 }
332
333 return error;
334 }
335
336 /* Allocate a block from the reservation. */
337 void
xfs_ag_resv_alloc_extent(struct xfs_perag * pag,enum xfs_ag_resv_type type,struct xfs_alloc_arg * args)338 xfs_ag_resv_alloc_extent(
339 struct xfs_perag *pag,
340 enum xfs_ag_resv_type type,
341 struct xfs_alloc_arg *args)
342 {
343 struct xfs_ag_resv *resv;
344 xfs_extlen_t len;
345 uint field;
346
347 trace_xfs_ag_resv_alloc_extent(pag, type, args->len);
348
349 switch (type) {
350 case XFS_AG_RESV_AGFL:
351 case XFS_AG_RESV_METAFILE:
352 return;
353 case XFS_AG_RESV_METADATA:
354 case XFS_AG_RESV_RMAPBT:
355 resv = xfs_perag_resv(pag, type);
356 break;
357 default:
358 ASSERT(0);
359 fallthrough;
360 case XFS_AG_RESV_NONE:
361 field = args->wasdel ? XFS_TRANS_SB_RES_FDBLOCKS :
362 XFS_TRANS_SB_FDBLOCKS;
363 xfs_trans_mod_sb(args->tp, field, -(int64_t)args->len);
364 return;
365 }
366
367 len = min_t(xfs_extlen_t, args->len, resv->ar_reserved);
368 resv->ar_reserved -= len;
369 if (type == XFS_AG_RESV_RMAPBT)
370 return;
371 /* Allocations of reserved blocks only need on-disk sb updates... */
372 xfs_trans_mod_sb(args->tp, XFS_TRANS_SB_RES_FDBLOCKS, -(int64_t)len);
373 /* ...but non-reserved blocks need in-core and on-disk updates. */
374 if (args->len > len)
375 xfs_trans_mod_sb(args->tp, XFS_TRANS_SB_FDBLOCKS,
376 -((int64_t)args->len - len));
377 }
378
379 /* Free a block to the reservation. */
380 void
xfs_ag_resv_free_extent(struct xfs_perag * pag,enum xfs_ag_resv_type type,struct xfs_trans * tp,xfs_extlen_t len)381 xfs_ag_resv_free_extent(
382 struct xfs_perag *pag,
383 enum xfs_ag_resv_type type,
384 struct xfs_trans *tp,
385 xfs_extlen_t len)
386 {
387 xfs_extlen_t leftover;
388 struct xfs_ag_resv *resv;
389
390 trace_xfs_ag_resv_free_extent(pag, type, len);
391
392 switch (type) {
393 case XFS_AG_RESV_AGFL:
394 case XFS_AG_RESV_METAFILE:
395 return;
396 case XFS_AG_RESV_METADATA:
397 case XFS_AG_RESV_RMAPBT:
398 resv = xfs_perag_resv(pag, type);
399 break;
400 default:
401 ASSERT(0);
402 fallthrough;
403 case XFS_AG_RESV_NONE:
404 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (int64_t)len);
405 fallthrough;
406 case XFS_AG_RESV_IGNORE:
407 return;
408 }
409
410 leftover = min_t(xfs_extlen_t, len, resv->ar_asked - resv->ar_reserved);
411 resv->ar_reserved += leftover;
412 if (type == XFS_AG_RESV_RMAPBT)
413 return;
414 /* Freeing into the reserved pool only requires on-disk update... */
415 xfs_trans_mod_sb(tp, XFS_TRANS_SB_RES_FDBLOCKS, len);
416 /* ...but freeing beyond that requires in-core and on-disk update. */
417 if (len > leftover)
418 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, len - leftover);
419 }
420