1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_alloc.h"
16 #include "xfs_bmap.h"
17 #include "xfs_bmap_btree.h"
18 #include "xfs_bmap_util.h"
19 #include "xfs_trans.h"
20 #include "xfs_trans_space.h"
21 #include "xfs_icache.h"
22 #include "xfs_rtalloc.h"
23 #include "xfs_sb.h"
24 #include "xfs_rtbitmap.h"
25 #include "xfs_rtrmap_btree.h"
26 #include "xfs_quota.h"
27 #include "xfs_log_priv.h"
28 #include "xfs_health.h"
29 #include "xfs_da_format.h"
30 #include "xfs_metafile.h"
31 #include "xfs_rtgroup.h"
32 #include "xfs_error.h"
33 #include "xfs_trace.h"
34 #include "xfs_rtrefcount_btree.h"
35 #include "xfs_reflink.h"
36 #include "xfs_zone_alloc.h"
37
38 /*
39 * Return whether there are any free extents in the size range given
40 * by low and high, for the bitmap block bbno.
41 */
42 STATIC int
xfs_rtany_summary(struct xfs_rtalloc_args * args,int low,int high,xfs_fileoff_t bbno,int * maxlog)43 xfs_rtany_summary(
44 struct xfs_rtalloc_args *args,
45 int low, /* low log2 extent size */
46 int high, /* high log2 extent size */
47 xfs_fileoff_t bbno, /* bitmap block number */
48 int *maxlog) /* out: max log2 extent size free */
49 {
50 uint8_t *rsum_cache = args->rtg->rtg_rsum_cache;
51 int error;
52 int log; /* loop counter, log2 of ext. size */
53 xfs_suminfo_t sum; /* summary data */
54
55 /* There are no extents at levels >= rsum_cache[bbno]. */
56 if (rsum_cache) {
57 high = min(high, rsum_cache[bbno] - 1);
58 if (low > high) {
59 *maxlog = -1;
60 return 0;
61 }
62 }
63
64 /*
65 * Loop over logs of extent sizes.
66 */
67 for (log = high; log >= low; log--) {
68 /*
69 * Get one summary datum.
70 */
71 error = xfs_rtget_summary(args, log, bbno, &sum);
72 if (error) {
73 return error;
74 }
75 /*
76 * If there are any, return success.
77 */
78 if (sum) {
79 *maxlog = log;
80 goto out;
81 }
82 }
83 /*
84 * Found nothing, return failure.
85 */
86 *maxlog = -1;
87 out:
88 /* There were no extents at levels > log. */
89 if (rsum_cache && log + 1 < rsum_cache[bbno])
90 rsum_cache[bbno] = log + 1;
91 return 0;
92 }
93
94 /*
95 * Copy and transform the summary file, given the old and new
96 * parameters in the mount structures.
97 */
98 STATIC int
xfs_rtcopy_summary(struct xfs_rtalloc_args * oargs,struct xfs_rtalloc_args * nargs)99 xfs_rtcopy_summary(
100 struct xfs_rtalloc_args *oargs,
101 struct xfs_rtalloc_args *nargs)
102 {
103 xfs_fileoff_t bbno; /* bitmap block number */
104 int error;
105 int log; /* summary level number (log length) */
106 xfs_suminfo_t sum; /* summary data */
107
108 for (log = oargs->mp->m_rsumlevels - 1; log >= 0; log--) {
109 for (bbno = oargs->mp->m_sb.sb_rbmblocks - 1;
110 (xfs_srtblock_t)bbno >= 0;
111 bbno--) {
112 error = xfs_rtget_summary(oargs, log, bbno, &sum);
113 if (error)
114 goto out;
115 if (sum == 0)
116 continue;
117 error = xfs_rtmodify_summary(oargs, log, bbno, -sum);
118 if (error)
119 goto out;
120 error = xfs_rtmodify_summary(nargs, log, bbno, sum);
121 if (error)
122 goto out;
123 ASSERT(sum > 0);
124 }
125 }
126 error = 0;
127 out:
128 xfs_rtbuf_cache_relse(oargs);
129 return 0;
130 }
131 /*
132 * Mark an extent specified by start and len allocated.
133 * Updates all the summary information as well as the bitmap.
134 */
135 STATIC int
xfs_rtallocate_range(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t len)136 xfs_rtallocate_range(
137 struct xfs_rtalloc_args *args,
138 xfs_rtxnum_t start, /* start rtext to allocate */
139 xfs_rtxlen_t len) /* in/out: summary block number */
140 {
141 struct xfs_mount *mp = args->mp;
142 xfs_rtxnum_t end; /* end of the allocated rtext */
143 int error;
144 xfs_rtxnum_t postblock = 0; /* first rtext allocated > end */
145 xfs_rtxnum_t preblock = 0; /* first rtext allocated < start */
146
147 end = start + len - 1;
148 /*
149 * Assume we're allocating out of the middle of a free extent.
150 * We need to find the beginning and end of the extent so we can
151 * properly update the summary.
152 */
153 error = xfs_rtfind_back(args, start, &preblock);
154 if (error)
155 return error;
156
157 /*
158 * Find the next allocated block (end of free extent).
159 */
160 error = xfs_rtfind_forw(args, end, args->rtg->rtg_extents - 1,
161 &postblock);
162 if (error)
163 return error;
164
165 /*
166 * Decrement the summary information corresponding to the entire
167 * (old) free extent.
168 */
169 error = xfs_rtmodify_summary(args,
170 xfs_highbit64(postblock + 1 - preblock),
171 xfs_rtx_to_rbmblock(mp, preblock), -1);
172 if (error)
173 return error;
174
175 /*
176 * If there are blocks not being allocated at the front of the
177 * old extent, add summary data for them to be free.
178 */
179 if (preblock < start) {
180 error = xfs_rtmodify_summary(args,
181 xfs_highbit64(start - preblock),
182 xfs_rtx_to_rbmblock(mp, preblock), 1);
183 if (error)
184 return error;
185 }
186
187 /*
188 * If there are blocks not being allocated at the end of the
189 * old extent, add summary data for them to be free.
190 */
191 if (postblock > end) {
192 error = xfs_rtmodify_summary(args,
193 xfs_highbit64(postblock - end),
194 xfs_rtx_to_rbmblock(mp, end + 1), 1);
195 if (error)
196 return error;
197 }
198
199 /*
200 * Modify the bitmap to mark this extent allocated.
201 */
202 return xfs_rtmodify_range(args, start, len, 0);
203 }
204
205 /* Reduce @rtxlen until it is a multiple of @prod. */
206 static inline xfs_rtxlen_t
xfs_rtalloc_align_len(xfs_rtxlen_t rtxlen,xfs_rtxlen_t prod)207 xfs_rtalloc_align_len(
208 xfs_rtxlen_t rtxlen,
209 xfs_rtxlen_t prod)
210 {
211 if (unlikely(prod > 1))
212 return rounddown(rtxlen, prod);
213 return rtxlen;
214 }
215
216 /*
217 * Make sure we don't run off the end of the rt volume. Be careful that
218 * adjusting maxlen downwards doesn't cause us to fail the alignment checks.
219 */
220 static inline xfs_rtxlen_t
xfs_rtallocate_clamp_len(struct xfs_rtgroup * rtg,xfs_rtxnum_t startrtx,xfs_rtxlen_t rtxlen,xfs_rtxlen_t prod)221 xfs_rtallocate_clamp_len(
222 struct xfs_rtgroup *rtg,
223 xfs_rtxnum_t startrtx,
224 xfs_rtxlen_t rtxlen,
225 xfs_rtxlen_t prod)
226 {
227 xfs_rtxlen_t ret;
228
229 ret = min(rtg->rtg_extents, startrtx + rtxlen) - startrtx;
230 return xfs_rtalloc_align_len(ret, prod);
231 }
232
233 /*
234 * Attempt to allocate an extent minlen<=len<=maxlen starting from
235 * bitmap block bbno. If we don't get maxlen then use prod to trim
236 * the length, if given. Returns error; returns starting block in *rtx.
237 * The lengths are all in rtextents.
238 */
239 STATIC int
xfs_rtallocate_extent_block(struct xfs_rtalloc_args * args,xfs_fileoff_t bbno,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxnum_t * nextp,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)240 xfs_rtallocate_extent_block(
241 struct xfs_rtalloc_args *args,
242 xfs_fileoff_t bbno, /* bitmap block number */
243 xfs_rtxlen_t minlen, /* minimum length to allocate */
244 xfs_rtxlen_t maxlen, /* maximum length to allocate */
245 xfs_rtxlen_t *len, /* out: actual length allocated */
246 xfs_rtxnum_t *nextp, /* out: next rtext to try */
247 xfs_rtxlen_t prod, /* extent product factor */
248 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
249 {
250 struct xfs_mount *mp = args->mp;
251 xfs_rtxnum_t besti = -1; /* best rtext found so far */
252 xfs_rtxnum_t end; /* last rtext in chunk */
253 xfs_rtxnum_t i; /* current rtext trying */
254 xfs_rtxnum_t next; /* next rtext to try */
255 xfs_rtxlen_t scanlen; /* number of free rtx to look for */
256 xfs_rtxlen_t bestlen = 0; /* best length found so far */
257 int stat; /* status from internal calls */
258 int error;
259
260 /*
261 * Loop over all the extents starting in this bitmap block up to the
262 * end of the rt volume, looking for one that's long enough.
263 */
264 end = min(args->rtg->rtg_extents, xfs_rbmblock_to_rtx(mp, bbno + 1)) -
265 1;
266 for (i = xfs_rbmblock_to_rtx(mp, bbno); i <= end; i++) {
267 /* Make sure we don't scan off the end of the rt volume. */
268 scanlen = xfs_rtallocate_clamp_len(args->rtg, i, maxlen, prod);
269 if (scanlen < minlen)
270 break;
271
272 /*
273 * See if there's a free extent of scanlen starting at i.
274 * If it's not so then next will contain the first non-free.
275 */
276 error = xfs_rtcheck_range(args, i, scanlen, 1, &next, &stat);
277 if (error)
278 return error;
279 if (stat) {
280 /*
281 * i to scanlen is all free, allocate and return that.
282 */
283 *len = scanlen;
284 *rtx = i;
285 return 0;
286 }
287
288 /*
289 * In the case where we have a variable-sized allocation
290 * request, figure out how big this free piece is,
291 * and if it's big enough for the minimum, and the best
292 * so far, remember it.
293 */
294 if (minlen < maxlen) {
295 xfs_rtxnum_t thislen; /* this extent size */
296
297 thislen = next - i;
298 if (thislen >= minlen && thislen > bestlen) {
299 besti = i;
300 bestlen = thislen;
301 }
302 }
303 /*
304 * If not done yet, find the start of the next free space.
305 */
306 if (next >= end)
307 break;
308 error = xfs_rtfind_forw(args, next, end, &i);
309 if (error)
310 return error;
311 }
312
313 /* Searched the whole thing & didn't find a maxlen free extent. */
314 if (besti == -1)
315 goto nospace;
316
317 /*
318 * Ensure bestlen is a multiple of prod, but don't return a too-short
319 * extent.
320 */
321 bestlen = xfs_rtalloc_align_len(bestlen, prod);
322 if (bestlen < minlen)
323 goto nospace;
324
325 /*
326 * Pick besti for bestlen & return that.
327 */
328 *len = bestlen;
329 *rtx = besti;
330 return 0;
331 nospace:
332 /* Allocation failed. Set *nextp to the next block to try. */
333 *nextp = next;
334 return -ENOSPC;
335 }
336
337 /*
338 * Allocate an extent of length minlen<=len<=maxlen, starting at block
339 * bno. If we don't get maxlen then use prod to trim the length, if given.
340 * Returns error; returns starting block in *rtx.
341 * The lengths are all in rtextents.
342 */
343 STATIC int
xfs_rtallocate_extent_exact(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)344 xfs_rtallocate_extent_exact(
345 struct xfs_rtalloc_args *args,
346 xfs_rtxnum_t start, /* starting rtext number to allocate */
347 xfs_rtxlen_t minlen, /* minimum length to allocate */
348 xfs_rtxlen_t maxlen, /* maximum length to allocate */
349 xfs_rtxlen_t *len, /* out: actual length allocated */
350 xfs_rtxlen_t prod, /* extent product factor */
351 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
352 {
353 xfs_rtxnum_t next; /* next rtext to try (dummy) */
354 xfs_rtxlen_t alloclen; /* candidate length */
355 xfs_rtxlen_t scanlen; /* number of free rtx to look for */
356 int isfree; /* extent is free */
357 int error;
358
359 ASSERT(minlen % prod == 0);
360 ASSERT(maxlen % prod == 0);
361
362 /* Make sure we don't run off the end of the rt volume. */
363 scanlen = xfs_rtallocate_clamp_len(args->rtg, start, maxlen, prod);
364 if (scanlen < minlen)
365 return -ENOSPC;
366
367 /* Check if the range in question (for scanlen) is free. */
368 error = xfs_rtcheck_range(args, start, scanlen, 1, &next, &isfree);
369 if (error)
370 return error;
371
372 if (isfree) {
373 /* start to scanlen is all free; allocate it. */
374 *len = scanlen;
375 *rtx = start;
376 return 0;
377 }
378
379 /*
380 * If not, allocate what there is, if it's at least minlen.
381 */
382 alloclen = next - start;
383 if (alloclen < minlen)
384 return -ENOSPC;
385
386 /* Ensure alloclen is a multiple of prod. */
387 alloclen = xfs_rtalloc_align_len(alloclen, prod);
388 if (alloclen < minlen)
389 return -ENOSPC;
390
391 *len = alloclen;
392 *rtx = start;
393 return 0;
394 }
395
396 /*
397 * Allocate an extent of length minlen<=len<=maxlen, starting as near
398 * to start as possible. If we don't get maxlen then use prod to trim
399 * the length, if given. The lengths are all in rtextents.
400 */
401 STATIC int
xfs_rtallocate_extent_near(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)402 xfs_rtallocate_extent_near(
403 struct xfs_rtalloc_args *args,
404 xfs_rtxnum_t start, /* starting rtext number to allocate */
405 xfs_rtxlen_t minlen, /* minimum length to allocate */
406 xfs_rtxlen_t maxlen, /* maximum length to allocate */
407 xfs_rtxlen_t *len, /* out: actual length allocated */
408 xfs_rtxlen_t prod, /* extent product factor */
409 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
410 {
411 struct xfs_mount *mp = args->mp;
412 int maxlog; /* max useful extent from summary */
413 xfs_fileoff_t bbno; /* bitmap block number */
414 int error;
415 int i; /* bitmap block offset (loop control) */
416 int j; /* secondary loop control */
417 int log2len; /* log2 of minlen */
418 xfs_rtxnum_t n; /* next rtext to try */
419
420 ASSERT(minlen % prod == 0);
421 ASSERT(maxlen % prod == 0);
422
423 /*
424 * If the block number given is off the end, silently set it to the last
425 * block.
426 */
427 start = min(start, args->rtg->rtg_extents - 1);
428
429 /*
430 * Try the exact allocation first.
431 */
432 error = xfs_rtallocate_extent_exact(args, start, minlen, maxlen, len,
433 prod, rtx);
434 if (error != -ENOSPC)
435 return error;
436
437 bbno = xfs_rtx_to_rbmblock(mp, start);
438 i = 0;
439 j = -1;
440 ASSERT(minlen != 0);
441 log2len = xfs_highbit32(minlen);
442 /*
443 * Loop over all bitmap blocks (bbno + i is current block).
444 */
445 for (;;) {
446 /*
447 * Get summary information of extents of all useful levels
448 * starting in this bitmap block.
449 */
450 error = xfs_rtany_summary(args, log2len, mp->m_rsumlevels - 1,
451 bbno + i, &maxlog);
452 if (error)
453 return error;
454
455 /*
456 * If there are any useful extents starting here, try
457 * allocating one.
458 */
459 if (maxlog >= 0) {
460 xfs_extlen_t maxavail =
461 min_t(xfs_rtblock_t, maxlen,
462 (1ULL << (maxlog + 1)) - 1);
463 /*
464 * On the positive side of the starting location.
465 */
466 if (i >= 0) {
467 /*
468 * Try to allocate an extent starting in
469 * this block.
470 */
471 error = xfs_rtallocate_extent_block(args,
472 bbno + i, minlen, maxavail, len,
473 &n, prod, rtx);
474 if (error != -ENOSPC)
475 return error;
476 }
477 /*
478 * On the negative side of the starting location.
479 */
480 else { /* i < 0 */
481 int maxblocks;
482
483 /*
484 * Loop backwards to find the end of the extent
485 * we found in the realtime summary.
486 *
487 * maxblocks is the maximum possible number of
488 * bitmap blocks from the start of the extent
489 * to the end of the extent.
490 */
491 if (maxlog == 0)
492 maxblocks = 0;
493 else if (maxlog < mp->m_blkbit_log)
494 maxblocks = 1;
495 else
496 maxblocks = 2 << (maxlog - mp->m_blkbit_log);
497
498 /*
499 * We need to check bbno + i + maxblocks down to
500 * bbno + i. We already checked bbno down to
501 * bbno + j + 1, so we don't need to check those
502 * again.
503 */
504 j = min(i + maxblocks, j);
505 for (; j >= i; j--) {
506 error = xfs_rtallocate_extent_block(args,
507 bbno + j, minlen,
508 maxavail, len, &n, prod,
509 rtx);
510 if (error != -ENOSPC)
511 return error;
512 }
513 }
514 }
515 /*
516 * Loop control. If we were on the positive side, and there's
517 * still more blocks on the negative side, go there.
518 */
519 if (i > 0 && (int)bbno - i >= 0)
520 i = -i;
521 /*
522 * If positive, and no more negative, but there are more
523 * positive, go there.
524 */
525 else if (i > 0 && (int)bbno + i < mp->m_sb.sb_rbmblocks - 1)
526 i++;
527 /*
528 * If negative or 0 (just started), and there are positive
529 * blocks to go, go there. The 0 case moves to block 1.
530 */
531 else if (i <= 0 && (int)bbno - i < mp->m_sb.sb_rbmblocks - 1)
532 i = 1 - i;
533 /*
534 * If negative or 0 and there are more negative blocks,
535 * go there.
536 */
537 else if (i <= 0 && (int)bbno + i > 0)
538 i--;
539 /*
540 * Must be done. Return failure.
541 */
542 else
543 break;
544 }
545 return -ENOSPC;
546 }
547
548 static int
xfs_rtalloc_sumlevel(struct xfs_rtalloc_args * args,int l,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t prod,xfs_rtxlen_t * len,xfs_rtxnum_t * rtx)549 xfs_rtalloc_sumlevel(
550 struct xfs_rtalloc_args *args,
551 int l, /* level number */
552 xfs_rtxlen_t minlen, /* minimum length to allocate */
553 xfs_rtxlen_t maxlen, /* maximum length to allocate */
554 xfs_rtxlen_t prod, /* extent product factor */
555 xfs_rtxlen_t *len, /* out: actual length allocated */
556 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
557 {
558 xfs_fileoff_t i; /* bitmap block number */
559 int error;
560
561 for (i = 0; i < args->mp->m_sb.sb_rbmblocks; i++) {
562 xfs_suminfo_t sum; /* summary information for extents */
563 xfs_rtxnum_t n; /* next rtext to be tried */
564
565 error = xfs_rtget_summary(args, l, i, &sum);
566 if (error)
567 return error;
568
569 /*
570 * Nothing there, on to the next block.
571 */
572 if (!sum)
573 continue;
574
575 /*
576 * Try allocating the extent.
577 */
578 error = xfs_rtallocate_extent_block(args, i, minlen, maxlen,
579 len, &n, prod, rtx);
580 if (error != -ENOSPC)
581 return error;
582
583 /*
584 * If the "next block to try" returned from the allocator is
585 * beyond the next bitmap block, skip to that bitmap block.
586 */
587 if (xfs_rtx_to_rbmblock(args->mp, n) > i + 1)
588 i = xfs_rtx_to_rbmblock(args->mp, n) - 1;
589 }
590
591 return -ENOSPC;
592 }
593
594 /*
595 * Allocate an extent of length minlen<=len<=maxlen, with no position
596 * specified. If we don't get maxlen then use prod to trim
597 * the length, if given. The lengths are all in rtextents.
598 */
599 static int
xfs_rtallocate_extent_size(struct xfs_rtalloc_args * args,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)600 xfs_rtallocate_extent_size(
601 struct xfs_rtalloc_args *args,
602 xfs_rtxlen_t minlen, /* minimum length to allocate */
603 xfs_rtxlen_t maxlen, /* maximum length to allocate */
604 xfs_rtxlen_t *len, /* out: actual length allocated */
605 xfs_rtxlen_t prod, /* extent product factor */
606 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
607 {
608 int error;
609 int l; /* level number (loop control) */
610
611 ASSERT(minlen % prod == 0);
612 ASSERT(maxlen % prod == 0);
613 ASSERT(maxlen != 0);
614
615 /*
616 * Loop over all the levels starting with maxlen.
617 *
618 * At each level, look at all the bitmap blocks, to see if there are
619 * extents starting there that are long enough (>= maxlen).
620 *
621 * Note, only on the initial level can the allocation fail if the
622 * summary says there's an extent.
623 */
624 for (l = xfs_highbit32(maxlen); l < args->mp->m_rsumlevels; l++) {
625 error = xfs_rtalloc_sumlevel(args, l, minlen, maxlen, prod, len,
626 rtx);
627 if (error != -ENOSPC)
628 return error;
629 }
630
631 /*
632 * Didn't find any maxlen blocks. Try smaller ones, unless we are
633 * looking for a fixed size extent.
634 */
635 if (minlen > --maxlen)
636 return -ENOSPC;
637 ASSERT(minlen != 0);
638 ASSERT(maxlen != 0);
639
640 /*
641 * Loop over sizes, from maxlen down to minlen.
642 *
643 * This time, when we do the allocations, allow smaller ones to succeed,
644 * but make sure the specified minlen/maxlen are in the possible range
645 * for this summary level.
646 */
647 for (l = xfs_highbit32(maxlen); l >= xfs_highbit32(minlen); l--) {
648 error = xfs_rtalloc_sumlevel(args, l,
649 max_t(xfs_rtxlen_t, minlen, 1 << l),
650 min_t(xfs_rtxlen_t, maxlen, (1 << (l + 1)) - 1),
651 prod, len, rtx);
652 if (error != -ENOSPC)
653 return error;
654 }
655
656 return -ENOSPC;
657 }
658
659 static void
xfs_rtunmount_rtg(struct xfs_rtgroup * rtg)660 xfs_rtunmount_rtg(
661 struct xfs_rtgroup *rtg)
662 {
663 int i;
664
665 for (i = 0; i < XFS_RTGI_MAX; i++)
666 xfs_rtginode_irele(&rtg->rtg_inodes[i]);
667 if (!xfs_has_zoned(rtg_mount(rtg)))
668 kvfree(rtg->rtg_rsum_cache);
669 }
670
671 static int
xfs_alloc_rsum_cache(struct xfs_rtgroup * rtg,xfs_extlen_t rbmblocks)672 xfs_alloc_rsum_cache(
673 struct xfs_rtgroup *rtg,
674 xfs_extlen_t rbmblocks)
675 {
676 /*
677 * The rsum cache is initialized to the maximum value, which is
678 * trivially an upper bound on the maximum level with any free extents.
679 */
680 rtg->rtg_rsum_cache = kvmalloc(rbmblocks, GFP_KERNEL);
681 if (!rtg->rtg_rsum_cache)
682 return -ENOMEM;
683 memset(rtg->rtg_rsum_cache, -1, rbmblocks);
684 return 0;
685 }
686
687 /*
688 * If we changed the rt extent size (meaning there was no rt volume previously)
689 * and the root directory had EXTSZINHERIT and RTINHERIT set, it's possible
690 * that the extent size hint on the root directory is no longer congruent with
691 * the new rt extent size. Log the rootdir inode to fix this.
692 */
693 static int
xfs_growfs_rt_fixup_extsize(struct xfs_mount * mp)694 xfs_growfs_rt_fixup_extsize(
695 struct xfs_mount *mp)
696 {
697 struct xfs_inode *ip = mp->m_rootip;
698 struct xfs_trans *tp;
699 int error = 0;
700
701 xfs_ilock(ip, XFS_IOLOCK_EXCL);
702 if (!(ip->i_diflags & XFS_DIFLAG_RTINHERIT) ||
703 !(ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT))
704 goto out_iolock;
705
706 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_ichange, 0, 0, false,
707 &tp);
708 if (error)
709 goto out_iolock;
710
711 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
712 error = xfs_trans_commit(tp);
713 xfs_iunlock(ip, XFS_ILOCK_EXCL);
714
715 out_iolock:
716 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
717 return error;
718 }
719
720 /* Ensure that the rtgroup metadata inode is loaded, creating it if neeeded. */
721 static int
xfs_rtginode_ensure(struct xfs_rtgroup * rtg,enum xfs_rtg_inodes type)722 xfs_rtginode_ensure(
723 struct xfs_rtgroup *rtg,
724 enum xfs_rtg_inodes type)
725 {
726 struct xfs_trans *tp;
727 int error;
728
729 if (rtg->rtg_inodes[type])
730 return 0;
731
732 error = xfs_trans_alloc_empty(rtg_mount(rtg), &tp);
733 if (error)
734 return error;
735 error = xfs_rtginode_load(rtg, type, tp);
736 xfs_trans_cancel(tp);
737
738 if (error != -ENOENT)
739 return 0;
740 return xfs_rtginode_create(rtg, type, true);
741 }
742
743 static struct xfs_mount *
xfs_growfs_rt_alloc_fake_mount(const struct xfs_mount * mp,xfs_rfsblock_t rblocks,xfs_agblock_t rextsize)744 xfs_growfs_rt_alloc_fake_mount(
745 const struct xfs_mount *mp,
746 xfs_rfsblock_t rblocks,
747 xfs_agblock_t rextsize)
748 {
749 struct xfs_mount *nmp;
750
751 nmp = kmemdup(mp, sizeof(*mp), GFP_KERNEL);
752 if (!nmp)
753 return NULL;
754 xfs_mount_sb_set_rextsize(nmp, &nmp->m_sb, rextsize);
755 nmp->m_sb.sb_rblocks = rblocks;
756 nmp->m_sb.sb_rextents = xfs_blen_to_rtbxlen(nmp, nmp->m_sb.sb_rblocks);
757 nmp->m_sb.sb_rbmblocks = xfs_rtbitmap_blockcount(nmp);
758 nmp->m_sb.sb_rextslog = xfs_compute_rextslog(nmp->m_sb.sb_rextents);
759 if (xfs_has_rtgroups(nmp))
760 nmp->m_sb.sb_rgcount = howmany_64(nmp->m_sb.sb_rextents,
761 nmp->m_sb.sb_rgextents);
762 else
763 nmp->m_sb.sb_rgcount = 1;
764 nmp->m_rsumblocks = xfs_rtsummary_blockcount(nmp, &nmp->m_rsumlevels);
765
766 if (rblocks > 0)
767 nmp->m_features |= XFS_FEAT_REALTIME;
768
769 /* recompute growfsrt reservation from new rsumsize */
770 xfs_trans_resv_calc(nmp, &nmp->m_resv);
771 return nmp;
772 }
773
774 /* Free all the new space and return the number of extents actually freed. */
775 static int
xfs_growfs_rt_free_new(struct xfs_rtgroup * rtg,struct xfs_rtalloc_args * nargs,xfs_rtbxlen_t * freed_rtx)776 xfs_growfs_rt_free_new(
777 struct xfs_rtgroup *rtg,
778 struct xfs_rtalloc_args *nargs,
779 xfs_rtbxlen_t *freed_rtx)
780 {
781 struct xfs_mount *mp = rtg_mount(rtg);
782 xfs_rgnumber_t rgno = rtg_rgno(rtg);
783 xfs_rtxnum_t start_rtx = 0, end_rtx;
784
785 if (rgno < mp->m_sb.sb_rgcount)
786 start_rtx = xfs_rtgroup_extents(mp, rgno);
787 end_rtx = xfs_rtgroup_extents(nargs->mp, rgno);
788
789 /*
790 * Compute the first new extent that we want to free, being careful to
791 * skip past a realtime superblock at the start of the realtime volume.
792 */
793 if (xfs_has_rtsb(nargs->mp) && rgno == 0 && start_rtx == 0)
794 start_rtx++;
795 *freed_rtx = end_rtx - start_rtx;
796 return xfs_rtfree_range(nargs, start_rtx, *freed_rtx);
797 }
798
799 static xfs_rfsblock_t
xfs_growfs_rt_nrblocks(struct xfs_rtgroup * rtg,xfs_rfsblock_t nrblocks,xfs_agblock_t rextsize,xfs_fileoff_t bmbno)800 xfs_growfs_rt_nrblocks(
801 struct xfs_rtgroup *rtg,
802 xfs_rfsblock_t nrblocks,
803 xfs_agblock_t rextsize,
804 xfs_fileoff_t bmbno)
805 {
806 struct xfs_mount *mp = rtg_mount(rtg);
807 xfs_rfsblock_t step;
808
809 step = (bmbno + 1) * mp->m_rtx_per_rbmblock * rextsize;
810 if (xfs_has_rtgroups(mp)) {
811 xfs_rfsblock_t rgblocks = mp->m_sb.sb_rgextents * rextsize;
812
813 step = min(rgblocks, step) + rgblocks * rtg_rgno(rtg);
814 }
815
816 return min(nrblocks, step);
817 }
818
819 /*
820 * If the post-grow filesystem will have an rtsb; we're initializing the first
821 * rtgroup; and the filesystem didn't have a realtime section, write the rtsb
822 * now, and attach the rtsb buffer to the real mount.
823 */
824 static int
xfs_growfs_rt_init_rtsb(const struct xfs_rtalloc_args * nargs,const struct xfs_rtgroup * rtg,const struct xfs_rtalloc_args * args)825 xfs_growfs_rt_init_rtsb(
826 const struct xfs_rtalloc_args *nargs,
827 const struct xfs_rtgroup *rtg,
828 const struct xfs_rtalloc_args *args)
829 {
830 struct xfs_mount *mp = args->mp;
831 struct xfs_buf *rtsb_bp;
832 int error;
833
834 if (!xfs_has_rtsb(nargs->mp))
835 return 0;
836 if (rtg_rgno(rtg) > 0)
837 return 0;
838 if (mp->m_sb.sb_rblocks)
839 return 0;
840
841 error = xfs_buf_get_uncached(mp->m_rtdev_targp, XFS_FSB_TO_BB(mp, 1),
842 &rtsb_bp);
843 if (error)
844 return error;
845
846 rtsb_bp->b_maps[0].bm_bn = XFS_RTSB_DADDR;
847 rtsb_bp->b_ops = &xfs_rtsb_buf_ops;
848
849 xfs_update_rtsb(rtsb_bp, mp->m_sb_bp);
850 mp->m_rtsb_bp = rtsb_bp;
851 error = xfs_bwrite(rtsb_bp);
852 xfs_buf_unlock(rtsb_bp);
853 if (error)
854 return error;
855
856 /* Initialize the rtrmap to reflect the rtsb. */
857 if (rtg_rmap(args->rtg) != NULL)
858 error = xfs_rtrmapbt_init_rtsb(nargs->mp, args->rtg, args->tp);
859
860 return error;
861 }
862
863 static void
xfs_growfs_rt_sb_fields(struct xfs_trans * tp,const struct xfs_mount * nmp)864 xfs_growfs_rt_sb_fields(
865 struct xfs_trans *tp,
866 const struct xfs_mount *nmp)
867 {
868 struct xfs_mount *mp = tp->t_mountp;
869
870 if (nmp->m_sb.sb_rextsize != mp->m_sb.sb_rextsize)
871 xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTSIZE,
872 nmp->m_sb.sb_rextsize - mp->m_sb.sb_rextsize);
873 if (nmp->m_sb.sb_rbmblocks != mp->m_sb.sb_rbmblocks)
874 xfs_trans_mod_sb(tp, XFS_TRANS_SB_RBMBLOCKS,
875 nmp->m_sb.sb_rbmblocks - mp->m_sb.sb_rbmblocks);
876 if (nmp->m_sb.sb_rblocks != mp->m_sb.sb_rblocks)
877 xfs_trans_mod_sb(tp, XFS_TRANS_SB_RBLOCKS,
878 nmp->m_sb.sb_rblocks - mp->m_sb.sb_rblocks);
879 if (nmp->m_sb.sb_rextents != mp->m_sb.sb_rextents)
880 xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTENTS,
881 nmp->m_sb.sb_rextents - mp->m_sb.sb_rextents);
882 if (nmp->m_sb.sb_rextslog != mp->m_sb.sb_rextslog)
883 xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTSLOG,
884 nmp->m_sb.sb_rextslog - mp->m_sb.sb_rextslog);
885 if (nmp->m_sb.sb_rgcount != mp->m_sb.sb_rgcount)
886 xfs_trans_mod_sb(tp, XFS_TRANS_SB_RGCOUNT,
887 nmp->m_sb.sb_rgcount - mp->m_sb.sb_rgcount);
888 }
889
890 static int
xfs_growfs_rt_zoned(struct xfs_rtgroup * rtg,xfs_rfsblock_t nrblocks)891 xfs_growfs_rt_zoned(
892 struct xfs_rtgroup *rtg,
893 xfs_rfsblock_t nrblocks)
894 {
895 struct xfs_mount *mp = rtg_mount(rtg);
896 struct xfs_mount *nmp;
897 struct xfs_trans *tp;
898 xfs_rtbxlen_t freed_rtx;
899 int error;
900
901 /*
902 * Calculate new sb and mount fields for this round. Also ensure the
903 * rtg_extents value is uptodate as the rtbitmap code relies on it.
904 */
905 nmp = xfs_growfs_rt_alloc_fake_mount(mp, nrblocks,
906 mp->m_sb.sb_rextsize);
907 if (!nmp)
908 return -ENOMEM;
909 freed_rtx = nmp->m_sb.sb_rextents - mp->m_sb.sb_rextents;
910
911 xfs_rtgroup_calc_geometry(nmp, rtg, rtg_rgno(rtg),
912 nmp->m_sb.sb_rgcount, nmp->m_sb.sb_rextents);
913
914 error = xfs_trans_alloc(mp, &M_RES(nmp)->tr_growrtfree, 0, 0, 0, &tp);
915 if (error)
916 goto out_free;
917
918 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
919 xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);
920
921 xfs_growfs_rt_sb_fields(tp, nmp);
922 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, freed_rtx);
923
924 error = xfs_trans_commit(tp);
925 if (error)
926 goto out_free;
927
928 /*
929 * Ensure the mount RT feature flag is now set, and compute new
930 * maxlevels for rt btrees.
931 */
932 mp->m_features |= XFS_FEAT_REALTIME;
933 xfs_rtrmapbt_compute_maxlevels(mp);
934 xfs_rtrefcountbt_compute_maxlevels(mp);
935 xfs_zoned_add_available(mp, freed_rtx);
936 out_free:
937 kfree(nmp);
938 return error;
939 }
940
941 static int
xfs_growfs_rt_bmblock(struct xfs_rtgroup * rtg,xfs_rfsblock_t nrblocks,xfs_agblock_t rextsize,xfs_fileoff_t bmbno)942 xfs_growfs_rt_bmblock(
943 struct xfs_rtgroup *rtg,
944 xfs_rfsblock_t nrblocks,
945 xfs_agblock_t rextsize,
946 xfs_fileoff_t bmbno)
947 {
948 struct xfs_mount *mp = rtg_mount(rtg);
949 struct xfs_inode *rbmip = rtg_bitmap(rtg);
950 struct xfs_inode *rsumip = rtg_summary(rtg);
951 struct xfs_rtalloc_args args = {
952 .mp = mp,
953 .rtg = rtg,
954 };
955 struct xfs_rtalloc_args nargs = {
956 .rtg = rtg,
957 };
958 struct xfs_mount *nmp;
959 xfs_rtbxlen_t freed_rtx;
960 int error;
961
962 /*
963 * Calculate new sb and mount fields for this round. Also ensure the
964 * rtg_extents value is uptodate as the rtbitmap code relies on it.
965 */
966 nmp = nargs.mp = xfs_growfs_rt_alloc_fake_mount(mp,
967 xfs_growfs_rt_nrblocks(rtg, nrblocks, rextsize, bmbno),
968 rextsize);
969 if (!nmp)
970 return -ENOMEM;
971
972 xfs_rtgroup_calc_geometry(nmp, rtg, rtg_rgno(rtg),
973 nmp->m_sb.sb_rgcount, nmp->m_sb.sb_rextents);
974
975 /*
976 * Recompute the growfsrt reservation from the new rsumsize, so that the
977 * transaction below use the new, potentially larger value.
978 * */
979 xfs_trans_resv_calc(nmp, &nmp->m_resv);
980 error = xfs_trans_alloc(mp, &M_RES(nmp)->tr_growrtfree, 0, 0, 0,
981 &args.tp);
982 if (error)
983 goto out_free;
984 nargs.tp = args.tp;
985
986 xfs_rtgroup_lock(args.rtg, XFS_RTGLOCK_BITMAP | XFS_RTGLOCK_RMAP);
987 xfs_rtgroup_trans_join(args.tp, args.rtg,
988 XFS_RTGLOCK_BITMAP | XFS_RTGLOCK_RMAP);
989
990 /*
991 * Update the bitmap inode's size ondisk and incore. We need to update
992 * the incore size so that inode inactivation won't punch what it thinks
993 * are "posteof" blocks.
994 */
995 rbmip->i_disk_size = nmp->m_sb.sb_rbmblocks * nmp->m_sb.sb_blocksize;
996 i_size_write(VFS_I(rbmip), rbmip->i_disk_size);
997 xfs_trans_log_inode(args.tp, rbmip, XFS_ILOG_CORE);
998
999 /*
1000 * Update the summary inode's size. We need to update the incore size
1001 * so that inode inactivation won't punch what it thinks are "posteof"
1002 * blocks.
1003 */
1004 rsumip->i_disk_size = nmp->m_rsumblocks * nmp->m_sb.sb_blocksize;
1005 i_size_write(VFS_I(rsumip), rsumip->i_disk_size);
1006 xfs_trans_log_inode(args.tp, rsumip, XFS_ILOG_CORE);
1007
1008 /*
1009 * Copy summary data from old to new sizes when the real size (not
1010 * block-aligned) changes.
1011 */
1012 if (mp->m_sb.sb_rbmblocks != nmp->m_sb.sb_rbmblocks ||
1013 mp->m_rsumlevels != nmp->m_rsumlevels) {
1014 error = xfs_rtcopy_summary(&args, &nargs);
1015 if (error)
1016 goto out_cancel;
1017 }
1018
1019 error = xfs_growfs_rt_init_rtsb(&nargs, rtg, &args);
1020 if (error)
1021 goto out_cancel;
1022
1023 /*
1024 * Update superblock fields.
1025 */
1026 xfs_growfs_rt_sb_fields(args.tp, nmp);
1027
1028 /*
1029 * Free the new extent.
1030 */
1031 error = xfs_growfs_rt_free_new(rtg, &nargs, &freed_rtx);
1032 xfs_rtbuf_cache_relse(&nargs);
1033 if (error)
1034 goto out_cancel;
1035
1036 /*
1037 * Mark more blocks free in the superblock.
1038 */
1039 xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_FREXTENTS, freed_rtx);
1040
1041 /*
1042 * Update the calculated values in the real mount structure.
1043 */
1044 mp->m_rsumlevels = nmp->m_rsumlevels;
1045 mp->m_rsumblocks = nmp->m_rsumblocks;
1046
1047 /*
1048 * Recompute the growfsrt reservation from the new rsumsize.
1049 */
1050 xfs_trans_resv_calc(mp, &mp->m_resv);
1051
1052 error = xfs_trans_commit(args.tp);
1053 if (error)
1054 goto out_free;
1055
1056 /*
1057 * Ensure the mount RT feature flag is now set, and compute new
1058 * maxlevels for rt btrees.
1059 */
1060 mp->m_features |= XFS_FEAT_REALTIME;
1061 xfs_rtrmapbt_compute_maxlevels(mp);
1062 xfs_rtrefcountbt_compute_maxlevels(mp);
1063
1064 kfree(nmp);
1065 return 0;
1066
1067 out_cancel:
1068 xfs_trans_cancel(args.tp);
1069 out_free:
1070 kfree(nmp);
1071 return error;
1072 }
1073
1074 static xfs_rtxnum_t
xfs_last_rtgroup_extents(struct xfs_mount * mp)1075 xfs_last_rtgroup_extents(
1076 struct xfs_mount *mp)
1077 {
1078 return mp->m_sb.sb_rextents -
1079 ((xfs_rtxnum_t)(mp->m_sb.sb_rgcount - 1) *
1080 mp->m_sb.sb_rgextents);
1081 }
1082
1083 /*
1084 * Calculate the last rbmblock currently used.
1085 *
1086 * This also deals with the case where there were no rtextents before.
1087 */
1088 static xfs_fileoff_t
xfs_last_rt_bmblock(struct xfs_rtgroup * rtg)1089 xfs_last_rt_bmblock(
1090 struct xfs_rtgroup *rtg)
1091 {
1092 struct xfs_mount *mp = rtg_mount(rtg);
1093 xfs_rgnumber_t rgno = rtg_rgno(rtg);
1094 xfs_fileoff_t bmbno = 0;
1095
1096 ASSERT(!mp->m_sb.sb_rgcount || rgno >= mp->m_sb.sb_rgcount - 1);
1097
1098 if (mp->m_sb.sb_rgcount && rgno == mp->m_sb.sb_rgcount - 1) {
1099 xfs_rtxnum_t nrext = xfs_last_rtgroup_extents(mp);
1100
1101 /* Also fill up the previous block if not entirely full. */
1102 bmbno = xfs_rtbitmap_blockcount_len(mp, nrext);
1103 if (xfs_rtx_to_rbmword(mp, nrext) != 0)
1104 bmbno--;
1105 }
1106
1107 return bmbno;
1108 }
1109
1110 /*
1111 * Allocate space to the bitmap and summary files, as necessary.
1112 */
1113 static int
xfs_growfs_rt_alloc_blocks(struct xfs_rtgroup * rtg,xfs_rfsblock_t nrblocks,xfs_agblock_t rextsize,xfs_extlen_t * nrbmblocks)1114 xfs_growfs_rt_alloc_blocks(
1115 struct xfs_rtgroup *rtg,
1116 xfs_rfsblock_t nrblocks,
1117 xfs_agblock_t rextsize,
1118 xfs_extlen_t *nrbmblocks)
1119 {
1120 struct xfs_mount *mp = rtg_mount(rtg);
1121 struct xfs_inode *rbmip = rtg_bitmap(rtg);
1122 struct xfs_inode *rsumip = rtg_summary(rtg);
1123 xfs_extlen_t orbmblocks = 0;
1124 xfs_extlen_t orsumblocks = 0;
1125 struct xfs_mount *nmp;
1126 int error = 0;
1127
1128 nmp = xfs_growfs_rt_alloc_fake_mount(mp, nrblocks, rextsize);
1129 if (!nmp)
1130 return -ENOMEM;
1131 *nrbmblocks = nmp->m_sb.sb_rbmblocks;
1132
1133 if (xfs_has_rtgroups(mp)) {
1134 /*
1135 * For file systems with the rtgroups feature, the RT bitmap and
1136 * summary are always fully allocated, which means that we never
1137 * need to grow the existing files.
1138 *
1139 * But we have to be careful to only fill the bitmap until the
1140 * end of the actually used range.
1141 */
1142 if (rtg_rgno(rtg) == nmp->m_sb.sb_rgcount - 1)
1143 *nrbmblocks = xfs_rtbitmap_blockcount_len(nmp,
1144 xfs_last_rtgroup_extents(nmp));
1145
1146 if (mp->m_sb.sb_rgcount &&
1147 rtg_rgno(rtg) == mp->m_sb.sb_rgcount - 1)
1148 goto out_free;
1149 } else {
1150 /*
1151 * Get the old block counts for bitmap and summary inodes.
1152 * These can't change since other growfs callers are locked out.
1153 */
1154 orbmblocks = XFS_B_TO_FSB(mp, rbmip->i_disk_size);
1155 orsumblocks = XFS_B_TO_FSB(mp, rsumip->i_disk_size);
1156 }
1157
1158 error = xfs_rtfile_initialize_blocks(rtg, XFS_RTGI_BITMAP, orbmblocks,
1159 nmp->m_sb.sb_rbmblocks, NULL);
1160 if (error)
1161 goto out_free;
1162 error = xfs_rtfile_initialize_blocks(rtg, XFS_RTGI_SUMMARY, orsumblocks,
1163 nmp->m_rsumblocks, NULL);
1164 out_free:
1165 kfree(nmp);
1166 return error;
1167 }
1168
1169 static int
xfs_growfs_rtg(struct xfs_mount * mp,xfs_rgnumber_t rgno,xfs_rfsblock_t nrblocks,xfs_agblock_t rextsize)1170 xfs_growfs_rtg(
1171 struct xfs_mount *mp,
1172 xfs_rgnumber_t rgno,
1173 xfs_rfsblock_t nrblocks,
1174 xfs_agblock_t rextsize)
1175 {
1176 uint8_t *old_rsum_cache = NULL;
1177 xfs_extlen_t bmblocks;
1178 xfs_fileoff_t bmbno;
1179 struct xfs_rtgroup *rtg;
1180 unsigned int i;
1181 int error;
1182
1183 rtg = xfs_rtgroup_grab(mp, rgno);
1184 if (!rtg)
1185 return -EINVAL;
1186
1187 for (i = 0; i < XFS_RTGI_MAX; i++) {
1188 error = xfs_rtginode_ensure(rtg, i);
1189 if (error)
1190 goto out_rele;
1191 }
1192
1193 if (xfs_has_zoned(mp)) {
1194 error = xfs_growfs_rt_zoned(rtg, nrblocks);
1195 goto out_rele;
1196 }
1197
1198 error = xfs_growfs_rt_alloc_blocks(rtg, nrblocks, rextsize, &bmblocks);
1199 if (error)
1200 goto out_rele;
1201
1202 if (bmblocks != rtg_mount(rtg)->m_sb.sb_rbmblocks) {
1203 old_rsum_cache = rtg->rtg_rsum_cache;
1204 error = xfs_alloc_rsum_cache(rtg, bmblocks);
1205 if (error)
1206 goto out_rele;
1207 }
1208
1209 for (bmbno = xfs_last_rt_bmblock(rtg); bmbno < bmblocks; bmbno++) {
1210 error = xfs_growfs_rt_bmblock(rtg, nrblocks, rextsize, bmbno);
1211 if (error)
1212 goto out_error;
1213 }
1214
1215 kvfree(old_rsum_cache);
1216 goto out_rele;
1217
1218 out_error:
1219 /*
1220 * Reset rtg_extents to the old value if adding more blocks failed.
1221 */
1222 xfs_rtgroup_calc_geometry(mp, rtg, rtg_rgno(rtg), mp->m_sb.sb_rgcount,
1223 mp->m_sb.sb_rextents);
1224 if (old_rsum_cache) {
1225 kvfree(rtg->rtg_rsum_cache);
1226 rtg->rtg_rsum_cache = old_rsum_cache;
1227 }
1228 out_rele:
1229 xfs_rtgroup_rele(rtg);
1230 return error;
1231 }
1232
1233 int
xfs_growfs_check_rtgeom(const struct xfs_mount * mp,xfs_rfsblock_t dblocks,xfs_rfsblock_t rblocks,xfs_extlen_t rextsize)1234 xfs_growfs_check_rtgeom(
1235 const struct xfs_mount *mp,
1236 xfs_rfsblock_t dblocks,
1237 xfs_rfsblock_t rblocks,
1238 xfs_extlen_t rextsize)
1239 {
1240 xfs_extlen_t min_logfsbs;
1241 struct xfs_mount *nmp;
1242
1243 nmp = xfs_growfs_rt_alloc_fake_mount(mp, rblocks, rextsize);
1244 if (!nmp)
1245 return -ENOMEM;
1246 nmp->m_sb.sb_dblocks = dblocks;
1247
1248 xfs_rtrmapbt_compute_maxlevels(nmp);
1249 xfs_rtrefcountbt_compute_maxlevels(nmp);
1250 xfs_trans_resv_calc(nmp, M_RES(nmp));
1251
1252 /*
1253 * New summary size can't be more than half the size of the log. This
1254 * prevents us from getting a log overflow, since we'll log basically
1255 * the whole summary file at once.
1256 */
1257 min_logfsbs = min_t(xfs_extlen_t, xfs_log_calc_minimum_size(nmp),
1258 nmp->m_rsumblocks * 2);
1259
1260 kfree(nmp);
1261
1262 trace_xfs_growfs_check_rtgeom(mp, min_logfsbs);
1263
1264 if (min_logfsbs > mp->m_sb.sb_logblocks)
1265 return -EINVAL;
1266
1267 if (xfs_has_zoned(mp)) {
1268 uint32_t gblocks = mp->m_groups[XG_TYPE_RTG].blocks;
1269 uint32_t rem;
1270
1271 if (rextsize != 1)
1272 return -EINVAL;
1273 div_u64_rem(mp->m_sb.sb_rblocks, gblocks, &rem);
1274 if (rem) {
1275 xfs_warn(mp,
1276 "new RT volume size (%lld) not aligned to RT group size (%d)",
1277 mp->m_sb.sb_rblocks, gblocks);
1278 return -EINVAL;
1279 }
1280 }
1281
1282 return 0;
1283 }
1284
1285 /*
1286 * Compute the new number of rt groups and ensure that /rtgroups exists.
1287 *
1288 * Changing the rtgroup size is not allowed (even if the rt volume hasn't yet
1289 * been initialized) because the userspace ABI doesn't support it.
1290 */
1291 static int
xfs_growfs_rt_prep_groups(struct xfs_mount * mp,xfs_rfsblock_t rblocks,xfs_extlen_t rextsize,xfs_rgnumber_t * new_rgcount)1292 xfs_growfs_rt_prep_groups(
1293 struct xfs_mount *mp,
1294 xfs_rfsblock_t rblocks,
1295 xfs_extlen_t rextsize,
1296 xfs_rgnumber_t *new_rgcount)
1297 {
1298 int error;
1299
1300 *new_rgcount = howmany_64(rblocks, mp->m_sb.sb_rgextents * rextsize);
1301 if (*new_rgcount > XFS_MAX_RGNUMBER)
1302 return -EINVAL;
1303
1304 /* Make sure the /rtgroups dir has been created */
1305 if (!mp->m_rtdirip) {
1306 struct xfs_trans *tp;
1307
1308 error = xfs_trans_alloc_empty(mp, &tp);
1309 if (error)
1310 return error;
1311 error = xfs_rtginode_load_parent(tp);
1312 xfs_trans_cancel(tp);
1313
1314 if (error == -ENOENT)
1315 error = xfs_rtginode_mkdir_parent(mp);
1316 if (error)
1317 return error;
1318 }
1319
1320 return 0;
1321 }
1322
1323 static bool
xfs_grow_last_rtg(struct xfs_mount * mp)1324 xfs_grow_last_rtg(
1325 struct xfs_mount *mp)
1326 {
1327 if (!xfs_has_rtgroups(mp))
1328 return true;
1329 if (mp->m_sb.sb_rgcount == 0)
1330 return false;
1331 return xfs_rtgroup_extents(mp, mp->m_sb.sb_rgcount - 1) <=
1332 mp->m_sb.sb_rgextents;
1333 }
1334
1335 /*
1336 * Read in the last block of the RT device to make sure it is accessible.
1337 */
1338 static int
xfs_rt_check_size(struct xfs_mount * mp,xfs_rfsblock_t last_block)1339 xfs_rt_check_size(
1340 struct xfs_mount *mp,
1341 xfs_rfsblock_t last_block)
1342 {
1343 xfs_daddr_t daddr = XFS_FSB_TO_BB(mp, last_block);
1344 struct xfs_buf *bp;
1345 int error;
1346
1347 if (XFS_BB_TO_FSB(mp, daddr) != last_block) {
1348 xfs_warn(mp, "RT device size overflow: %llu != %llu",
1349 XFS_BB_TO_FSB(mp, daddr), last_block);
1350 return -EFBIG;
1351 }
1352
1353 error = xfs_buf_read_uncached(mp->m_rtdev_targp,
1354 XFS_FSB_TO_BB(mp, mp->m_sb.sb_rtstart) + daddr,
1355 XFS_FSB_TO_BB(mp, 1), &bp, NULL);
1356 if (error)
1357 xfs_warn(mp, "cannot read last RT device sector (%lld)",
1358 last_block);
1359 else
1360 xfs_buf_relse(bp);
1361 return error;
1362 }
1363
1364 /*
1365 * Grow the realtime area of the filesystem.
1366 */
1367 int
xfs_growfs_rt(struct xfs_mount * mp,struct xfs_growfs_rt * in)1368 xfs_growfs_rt(
1369 struct xfs_mount *mp,
1370 struct xfs_growfs_rt *in)
1371 {
1372 xfs_rgnumber_t old_rgcount = mp->m_sb.sb_rgcount;
1373 xfs_rgnumber_t new_rgcount = 1;
1374 xfs_rgnumber_t rgno;
1375 xfs_agblock_t old_rextsize = mp->m_sb.sb_rextsize;
1376 int error;
1377
1378 if (!capable(CAP_SYS_ADMIN))
1379 return -EPERM;
1380
1381 /* Needs to have been mounted with an rt device. */
1382 if (!XFS_IS_REALTIME_MOUNT(mp))
1383 return -EINVAL;
1384
1385 if (!mutex_trylock(&mp->m_growlock))
1386 return -EWOULDBLOCK;
1387
1388 /* Shrink not supported. */
1389 error = -EINVAL;
1390 if (in->newblocks <= mp->m_sb.sb_rblocks)
1391 goto out_unlock;
1392 /* Can only change rt extent size when adding rt volume. */
1393 if (mp->m_sb.sb_rblocks > 0 && in->extsize != mp->m_sb.sb_rextsize)
1394 goto out_unlock;
1395
1396 /* Range check the extent size. */
1397 if (XFS_FSB_TO_B(mp, in->extsize) > XFS_MAX_RTEXTSIZE ||
1398 XFS_FSB_TO_B(mp, in->extsize) < XFS_MIN_RTEXTSIZE)
1399 goto out_unlock;
1400
1401 /* Check for features supported only on rtgroups filesystems. */
1402 error = -EOPNOTSUPP;
1403 if (!xfs_has_rtgroups(mp)) {
1404 if (xfs_has_rmapbt(mp))
1405 goto out_unlock;
1406 if (xfs_has_quota(mp))
1407 goto out_unlock;
1408 if (xfs_has_reflink(mp))
1409 goto out_unlock;
1410 } else if (xfs_has_reflink(mp) &&
1411 !xfs_reflink_supports_rextsize(mp, in->extsize))
1412 goto out_unlock;
1413
1414 error = xfs_sb_validate_fsb_count(&mp->m_sb, in->newblocks);
1415 if (error)
1416 goto out_unlock;
1417
1418 error = xfs_rt_check_size(mp, in->newblocks - 1);
1419 if (error)
1420 goto out_unlock;
1421
1422 /*
1423 * Calculate new parameters. These are the final values to be reached.
1424 */
1425 error = -EINVAL;
1426 if (in->newblocks < in->extsize)
1427 goto out_unlock;
1428
1429 /* Make sure the new fs size won't cause problems with the log. */
1430 error = xfs_growfs_check_rtgeom(mp, mp->m_sb.sb_dblocks, in->newblocks,
1431 in->extsize);
1432 if (error)
1433 goto out_unlock;
1434
1435 if (xfs_has_rtgroups(mp)) {
1436 error = xfs_growfs_rt_prep_groups(mp, in->newblocks,
1437 in->extsize, &new_rgcount);
1438 if (error)
1439 goto out_unlock;
1440 }
1441
1442 if (xfs_grow_last_rtg(mp)) {
1443 error = xfs_growfs_rtg(mp, old_rgcount - 1, in->newblocks,
1444 in->extsize);
1445 if (error)
1446 goto out_unlock;
1447 }
1448
1449 for (rgno = old_rgcount; rgno < new_rgcount; rgno++) {
1450 xfs_rtbxlen_t rextents = div_u64(in->newblocks, in->extsize);
1451
1452 error = xfs_rtgroup_alloc(mp, rgno, new_rgcount, rextents);
1453 if (error)
1454 goto out_unlock;
1455
1456 error = xfs_growfs_rtg(mp, rgno, in->newblocks, in->extsize);
1457 if (error) {
1458 struct xfs_rtgroup *rtg;
1459
1460 rtg = xfs_rtgroup_grab(mp, rgno);
1461 if (!WARN_ON_ONCE(!rtg)) {
1462 xfs_rtunmount_rtg(rtg);
1463 xfs_rtgroup_rele(rtg);
1464 xfs_rtgroup_free(mp, rgno);
1465 }
1466 break;
1467 }
1468 }
1469
1470 if (!error && old_rextsize != in->extsize)
1471 error = xfs_growfs_rt_fixup_extsize(mp);
1472
1473 /*
1474 * Update secondary superblocks now the physical grow has completed.
1475 *
1476 * Also do this in case of an error as we might have already
1477 * successfully updated one or more RTGs and incremented sb_rgcount.
1478 */
1479 if (!xfs_is_shutdown(mp)) {
1480 int error2 = xfs_update_secondary_sbs(mp);
1481
1482 if (!error)
1483 error = error2;
1484
1485 /* Reset the rt metadata btree space reservations. */
1486 error2 = xfs_metafile_resv_init(mp);
1487 if (error2 && error2 != -ENOSPC)
1488 error = error2;
1489 }
1490
1491 out_unlock:
1492 mutex_unlock(&mp->m_growlock);
1493 return error;
1494 }
1495
1496 /* Read the realtime superblock and attach it to the mount. */
1497 int
xfs_rtmount_readsb(struct xfs_mount * mp)1498 xfs_rtmount_readsb(
1499 struct xfs_mount *mp)
1500 {
1501 struct xfs_buf *bp;
1502 int error;
1503
1504 if (!xfs_has_rtsb(mp))
1505 return 0;
1506 if (mp->m_sb.sb_rblocks == 0)
1507 return 0;
1508 if (mp->m_rtdev_targp == NULL) {
1509 xfs_warn(mp,
1510 "Filesystem has a realtime volume, use rtdev=device option");
1511 return -ENODEV;
1512 }
1513
1514 /* m_blkbb_log is not set up yet */
1515 error = xfs_buf_read_uncached(mp->m_rtdev_targp, XFS_RTSB_DADDR,
1516 mp->m_sb.sb_blocksize >> BBSHIFT, &bp,
1517 &xfs_rtsb_buf_ops);
1518 if (error) {
1519 xfs_warn(mp, "rt sb validate failed with error %d.", error);
1520 /* bad CRC means corrupted metadata */
1521 if (error == -EFSBADCRC)
1522 error = -EFSCORRUPTED;
1523 return error;
1524 }
1525
1526 mp->m_rtsb_bp = bp;
1527 xfs_buf_unlock(bp);
1528 return 0;
1529 }
1530
1531 /* Detach the realtime superblock from the mount and free it. */
1532 void
xfs_rtmount_freesb(struct xfs_mount * mp)1533 xfs_rtmount_freesb(
1534 struct xfs_mount *mp)
1535 {
1536 struct xfs_buf *bp = mp->m_rtsb_bp;
1537
1538 if (!bp)
1539 return;
1540
1541 xfs_buf_lock(bp);
1542 mp->m_rtsb_bp = NULL;
1543 xfs_buf_relse(bp);
1544 }
1545
1546 /*
1547 * Initialize realtime fields in the mount structure.
1548 */
1549 int /* error */
xfs_rtmount_init(struct xfs_mount * mp)1550 xfs_rtmount_init(
1551 struct xfs_mount *mp) /* file system mount structure */
1552 {
1553 if (mp->m_sb.sb_rblocks == 0)
1554 return 0;
1555 if (mp->m_rtdev_targp == NULL) {
1556 xfs_warn(mp,
1557 "Filesystem has a realtime volume, use rtdev=device option");
1558 return -ENODEV;
1559 }
1560
1561 mp->m_rsumblocks = xfs_rtsummary_blockcount(mp, &mp->m_rsumlevels);
1562
1563 return xfs_rt_check_size(mp, mp->m_sb.sb_rblocks - 1);
1564 }
1565
1566 static int
xfs_rtalloc_count_frextent(struct xfs_rtgroup * rtg,struct xfs_trans * tp,const struct xfs_rtalloc_rec * rec,void * priv)1567 xfs_rtalloc_count_frextent(
1568 struct xfs_rtgroup *rtg,
1569 struct xfs_trans *tp,
1570 const struct xfs_rtalloc_rec *rec,
1571 void *priv)
1572 {
1573 uint64_t *valp = priv;
1574
1575 *valp += rec->ar_extcount;
1576 return 0;
1577 }
1578
1579 /*
1580 * Reinitialize the number of free realtime extents from the realtime bitmap.
1581 * Callers must ensure that there is no other activity in the filesystem.
1582 */
1583 int
xfs_rtalloc_reinit_frextents(struct xfs_mount * mp)1584 xfs_rtalloc_reinit_frextents(
1585 struct xfs_mount *mp)
1586 {
1587 uint64_t val = 0;
1588 int error;
1589
1590 struct xfs_rtgroup *rtg = NULL;
1591
1592 while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1593 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
1594 error = xfs_rtalloc_query_all(rtg, NULL,
1595 xfs_rtalloc_count_frextent, &val);
1596 xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
1597 if (error) {
1598 xfs_rtgroup_rele(rtg);
1599 return error;
1600 }
1601 }
1602
1603 spin_lock(&mp->m_sb_lock);
1604 mp->m_sb.sb_frextents = val;
1605 spin_unlock(&mp->m_sb_lock);
1606 xfs_set_freecounter(mp, XC_FREE_RTEXTENTS, mp->m_sb.sb_frextents);
1607 return 0;
1608 }
1609
1610 /*
1611 * Read in the bmbt of an rt metadata inode so that we never have to load them
1612 * at runtime. This enables the use of shared ILOCKs for rtbitmap scans. Use
1613 * an empty transaction to avoid deadlocking on loops in the bmbt.
1614 */
1615 static inline int
xfs_rtmount_iread_extents(struct xfs_trans * tp,struct xfs_inode * ip)1616 xfs_rtmount_iread_extents(
1617 struct xfs_trans *tp,
1618 struct xfs_inode *ip)
1619 {
1620 int error;
1621
1622 xfs_ilock(ip, XFS_ILOCK_EXCL);
1623
1624 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1625 if (error)
1626 goto out_unlock;
1627
1628 if (xfs_inode_has_attr_fork(ip)) {
1629 error = xfs_iread_extents(tp, ip, XFS_ATTR_FORK);
1630 if (error)
1631 goto out_unlock;
1632 }
1633
1634 out_unlock:
1635 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1636 return error;
1637 }
1638
1639 static int
xfs_rtmount_rtg(struct xfs_mount * mp,struct xfs_trans * tp,struct xfs_rtgroup * rtg)1640 xfs_rtmount_rtg(
1641 struct xfs_mount *mp,
1642 struct xfs_trans *tp,
1643 struct xfs_rtgroup *rtg)
1644 {
1645 int error, i;
1646
1647 for (i = 0; i < XFS_RTGI_MAX; i++) {
1648 error = xfs_rtginode_load(rtg, i, tp);
1649 if (error)
1650 return error;
1651
1652 if (rtg->rtg_inodes[i]) {
1653 error = xfs_rtmount_iread_extents(tp,
1654 rtg->rtg_inodes[i]);
1655 if (error)
1656 return error;
1657 }
1658 }
1659
1660 if (xfs_has_zoned(mp))
1661 return 0;
1662 return xfs_alloc_rsum_cache(rtg, mp->m_sb.sb_rbmblocks);
1663 }
1664
1665 /*
1666 * Get the bitmap and summary inodes and the summary cache into the mount
1667 * structure at mount time.
1668 */
1669 int
xfs_rtmount_inodes(struct xfs_mount * mp)1670 xfs_rtmount_inodes(
1671 struct xfs_mount *mp)
1672 {
1673 struct xfs_trans *tp;
1674 struct xfs_rtgroup *rtg = NULL;
1675 int error;
1676
1677 error = xfs_trans_alloc_empty(mp, &tp);
1678 if (error)
1679 return error;
1680
1681 if (xfs_has_rtgroups(mp) && mp->m_sb.sb_rgcount > 0) {
1682 error = xfs_rtginode_load_parent(tp);
1683 if (error)
1684 goto out_cancel;
1685 }
1686
1687 while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1688 error = xfs_rtmount_rtg(mp, tp, rtg);
1689 if (error) {
1690 xfs_rtgroup_rele(rtg);
1691 xfs_rtunmount_inodes(mp);
1692 break;
1693 }
1694 }
1695
1696 out_cancel:
1697 xfs_trans_cancel(tp);
1698 return error;
1699 }
1700
1701 void
xfs_rtunmount_inodes(struct xfs_mount * mp)1702 xfs_rtunmount_inodes(
1703 struct xfs_mount *mp)
1704 {
1705 struct xfs_rtgroup *rtg = NULL;
1706
1707 while ((rtg = xfs_rtgroup_next(mp, rtg)))
1708 xfs_rtunmount_rtg(rtg);
1709 xfs_rtginode_irele(&mp->m_rtdirip);
1710 }
1711
1712 /*
1713 * Pick an extent for allocation at the start of a new realtime file.
1714 * Use the sequence number stored in the atime field of the bitmap inode.
1715 * Translate this to a fraction of the rtextents, and return the product
1716 * of rtextents and the fraction.
1717 * The fraction sequence is 0, 1/2, 1/4, 3/4, 1/8, ..., 7/8, 1/16, ...
1718 */
1719 static xfs_rtxnum_t
xfs_rtpick_extent(struct xfs_rtgroup * rtg,struct xfs_trans * tp,xfs_rtxlen_t len)1720 xfs_rtpick_extent(
1721 struct xfs_rtgroup *rtg,
1722 struct xfs_trans *tp,
1723 xfs_rtxlen_t len) /* allocation length (rtextents) */
1724 {
1725 struct xfs_mount *mp = rtg_mount(rtg);
1726 struct xfs_inode *rbmip = rtg_bitmap(rtg);
1727 xfs_rtxnum_t b = 0; /* result rtext */
1728 int log2; /* log of sequence number */
1729 uint64_t resid; /* residual after log removed */
1730 uint64_t seq; /* sequence number of file creation */
1731 struct timespec64 ts; /* timespec in inode */
1732
1733 xfs_assert_ilocked(rbmip, XFS_ILOCK_EXCL);
1734
1735 ts = inode_get_atime(VFS_I(rbmip));
1736 if (!(rbmip->i_diflags & XFS_DIFLAG_NEWRTBM)) {
1737 rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
1738 seq = 0;
1739 } else {
1740 seq = ts.tv_sec;
1741 }
1742 log2 = xfs_highbit64(seq);
1743 if (log2 != -1) {
1744 resid = seq - (1ULL << log2);
1745 b = (mp->m_sb.sb_rextents * ((resid << 1) + 1ULL)) >>
1746 (log2 + 1);
1747 if (b >= mp->m_sb.sb_rextents)
1748 div64_u64_rem(b, mp->m_sb.sb_rextents, &b);
1749 if (b + len > mp->m_sb.sb_rextents)
1750 b = mp->m_sb.sb_rextents - len;
1751 }
1752 ts.tv_sec = seq + 1;
1753 inode_set_atime_to_ts(VFS_I(rbmip), ts);
1754 xfs_trans_log_inode(tp, rbmip, XFS_ILOG_CORE);
1755 return b;
1756 }
1757
1758 static void
xfs_rtalloc_align_minmax(xfs_rtxlen_t * raminlen,xfs_rtxlen_t * ramaxlen,xfs_rtxlen_t * prod)1759 xfs_rtalloc_align_minmax(
1760 xfs_rtxlen_t *raminlen,
1761 xfs_rtxlen_t *ramaxlen,
1762 xfs_rtxlen_t *prod)
1763 {
1764 xfs_rtxlen_t newmaxlen = *ramaxlen;
1765 xfs_rtxlen_t newminlen = *raminlen;
1766 xfs_rtxlen_t slack;
1767
1768 slack = newmaxlen % *prod;
1769 if (slack)
1770 newmaxlen -= slack;
1771 slack = newminlen % *prod;
1772 if (slack)
1773 newminlen += *prod - slack;
1774
1775 /*
1776 * If adjusting for extent size hint alignment produces an invalid
1777 * min/max len combination, go ahead without it.
1778 */
1779 if (newmaxlen < newminlen) {
1780 *prod = 1;
1781 return;
1782 }
1783 *ramaxlen = newmaxlen;
1784 *raminlen = newminlen;
1785 }
1786
1787 /* Given a free extent, find any part of it that isn't busy, if possible. */
1788 STATIC bool
xfs_rtalloc_check_busy(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t minlen_rtx,xfs_rtxlen_t maxlen_rtx,xfs_rtxlen_t len_rtx,xfs_rtxlen_t prod,xfs_rtxnum_t rtx,xfs_rtxlen_t * reslen,xfs_rtxnum_t * resrtx,unsigned * busy_gen)1789 xfs_rtalloc_check_busy(
1790 struct xfs_rtalloc_args *args,
1791 xfs_rtxnum_t start,
1792 xfs_rtxlen_t minlen_rtx,
1793 xfs_rtxlen_t maxlen_rtx,
1794 xfs_rtxlen_t len_rtx,
1795 xfs_rtxlen_t prod,
1796 xfs_rtxnum_t rtx,
1797 xfs_rtxlen_t *reslen,
1798 xfs_rtxnum_t *resrtx,
1799 unsigned *busy_gen)
1800 {
1801 struct xfs_rtgroup *rtg = args->rtg;
1802 struct xfs_mount *mp = rtg_mount(rtg);
1803 xfs_agblock_t rgbno = xfs_rtx_to_rgbno(rtg, rtx);
1804 xfs_rgblock_t min_rgbno = xfs_rtx_to_rgbno(rtg, start);
1805 xfs_extlen_t minlen = xfs_rtxlen_to_extlen(mp, minlen_rtx);
1806 xfs_extlen_t len = xfs_rtxlen_to_extlen(mp, len_rtx);
1807 xfs_extlen_t diff;
1808 bool busy;
1809
1810 busy = xfs_extent_busy_trim(rtg_group(rtg), minlen,
1811 xfs_rtxlen_to_extlen(mp, maxlen_rtx), &rgbno, &len,
1812 busy_gen);
1813
1814 /*
1815 * If we have a largish extent that happens to start before min_rgbno,
1816 * see if we can shift it into range...
1817 */
1818 if (rgbno < min_rgbno && rgbno + len > min_rgbno) {
1819 diff = min_rgbno - rgbno;
1820 if (len > diff) {
1821 rgbno += diff;
1822 len -= diff;
1823 }
1824 }
1825
1826 if (prod > 1 && len >= minlen) {
1827 xfs_rgblock_t aligned_rgbno = roundup(rgbno, prod);
1828
1829 diff = aligned_rgbno - rgbno;
1830
1831 *resrtx = xfs_rgbno_to_rtx(mp, aligned_rgbno);
1832 *reslen = xfs_extlen_to_rtxlen(mp,
1833 diff >= len ? 0 : len - diff);
1834 } else {
1835 *resrtx = xfs_rgbno_to_rtx(mp, rgbno);
1836 *reslen = xfs_extlen_to_rtxlen(mp, len);
1837 }
1838
1839 return busy;
1840 }
1841
1842 /*
1843 * Adjust the given free extent so that it isn't busy, or flush the log and
1844 * wait for the space to become unbusy. Only needed for rtgroups.
1845 */
1846 STATIC int
xfs_rtallocate_adjust_for_busy(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)1847 xfs_rtallocate_adjust_for_busy(
1848 struct xfs_rtalloc_args *args,
1849 xfs_rtxnum_t start,
1850 xfs_rtxlen_t minlen,
1851 xfs_rtxlen_t maxlen,
1852 xfs_rtxlen_t *len,
1853 xfs_rtxlen_t prod,
1854 xfs_rtxnum_t *rtx)
1855 {
1856 xfs_rtxnum_t resrtx;
1857 xfs_rtxlen_t reslen;
1858 unsigned busy_gen;
1859 bool busy;
1860 int error;
1861
1862 again:
1863 busy = xfs_rtalloc_check_busy(args, start, minlen, maxlen, *len, prod,
1864 *rtx, &reslen, &resrtx, &busy_gen);
1865 if (!busy)
1866 return 0;
1867
1868 if (reslen < minlen || (start != 0 && resrtx != *rtx)) {
1869 /*
1870 * Enough of the extent was busy that we cannot satisfy the
1871 * allocation, or this is a near allocation and the start of
1872 * the extent is busy. Flush the log and wait for the busy
1873 * situation to resolve.
1874 */
1875 trace_xfs_rtalloc_extent_busy(args->rtg, start, minlen, maxlen,
1876 *len, prod, *rtx, busy_gen);
1877
1878 error = xfs_extent_busy_flush(args->tp, rtg_group(args->rtg),
1879 busy_gen, 0);
1880 if (error)
1881 return error;
1882
1883 goto again;
1884 }
1885
1886 /* Some of the free space wasn't busy, hand that back to the caller. */
1887 trace_xfs_rtalloc_extent_busy_trim(args->rtg, *rtx, *len, resrtx,
1888 reslen);
1889 *len = reslen;
1890 *rtx = resrtx;
1891
1892 return 0;
1893 }
1894
1895 static int
xfs_rtallocate_rtg(struct xfs_trans * tp,xfs_rgnumber_t rgno,xfs_rtblock_t bno_hint,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t prod,bool wasdel,bool initial_user_data,bool * rtlocked,xfs_rtblock_t * bno,xfs_extlen_t * blen)1896 xfs_rtallocate_rtg(
1897 struct xfs_trans *tp,
1898 xfs_rgnumber_t rgno,
1899 xfs_rtblock_t bno_hint,
1900 xfs_rtxlen_t minlen,
1901 xfs_rtxlen_t maxlen,
1902 xfs_rtxlen_t prod,
1903 bool wasdel,
1904 bool initial_user_data,
1905 bool *rtlocked,
1906 xfs_rtblock_t *bno,
1907 xfs_extlen_t *blen)
1908 {
1909 struct xfs_rtalloc_args args = {
1910 .mp = tp->t_mountp,
1911 .tp = tp,
1912 };
1913 xfs_rtxnum_t start = 0;
1914 xfs_rtxnum_t rtx;
1915 xfs_rtxlen_t len = 0;
1916 int error = 0;
1917
1918 args.rtg = xfs_rtgroup_grab(args.mp, rgno);
1919 if (!args.rtg)
1920 return -ENOSPC;
1921
1922 /*
1923 * We need to lock out modifications to both the RT bitmap and summary
1924 * inodes for finding free space in xfs_rtallocate_extent_{near,size}
1925 * and join the bitmap and summary inodes for the actual allocation
1926 * down in xfs_rtallocate_range.
1927 *
1928 * For RTG-enabled file system we don't want to join the inodes to the
1929 * transaction until we are committed to allocate to allocate from this
1930 * RTG so that only one inode of each type is locked at a time.
1931 *
1932 * But for pre-RTG file systems we need to already to join the bitmap
1933 * inode to the transaction for xfs_rtpick_extent, which bumps the
1934 * sequence number in it, so we'll have to join the inode to the
1935 * transaction early here.
1936 *
1937 * This is all a bit messy, but at least the mess is contained in
1938 * this function.
1939 */
1940 if (!*rtlocked) {
1941 xfs_rtgroup_lock(args.rtg, XFS_RTGLOCK_BITMAP);
1942 if (!xfs_has_rtgroups(args.mp))
1943 xfs_rtgroup_trans_join(tp, args.rtg,
1944 XFS_RTGLOCK_BITMAP);
1945 *rtlocked = true;
1946 }
1947
1948 /*
1949 * For an allocation to an empty file at offset 0, pick an extent that
1950 * will space things out in the rt area.
1951 */
1952 if (bno_hint != NULLFSBLOCK)
1953 start = xfs_rtb_to_rtx(args.mp, bno_hint);
1954 else if (!xfs_has_rtgroups(args.mp) && initial_user_data)
1955 start = xfs_rtpick_extent(args.rtg, tp, maxlen);
1956
1957 if (start) {
1958 error = xfs_rtallocate_extent_near(&args, start, minlen, maxlen,
1959 &len, prod, &rtx);
1960 /*
1961 * If we can't allocate near a specific rt extent, try again
1962 * without locality criteria.
1963 */
1964 if (error == -ENOSPC) {
1965 xfs_rtbuf_cache_relse(&args);
1966 error = 0;
1967 }
1968 }
1969
1970 if (!error) {
1971 error = xfs_rtallocate_extent_size(&args, minlen, maxlen, &len,
1972 prod, &rtx);
1973 }
1974
1975 if (error) {
1976 if (xfs_has_rtgroups(args.mp))
1977 goto out_unlock;
1978 goto out_release;
1979 }
1980
1981 if (xfs_has_rtgroups(args.mp)) {
1982 error = xfs_rtallocate_adjust_for_busy(&args, start, minlen,
1983 maxlen, &len, prod, &rtx);
1984 if (error)
1985 goto out_unlock;
1986
1987 xfs_rtgroup_trans_join(tp, args.rtg, XFS_RTGLOCK_BITMAP);
1988 }
1989
1990 error = xfs_rtallocate_range(&args, rtx, len);
1991 if (error)
1992 goto out_release;
1993
1994 xfs_trans_mod_sb(tp, wasdel ?
1995 XFS_TRANS_SB_RES_FREXTENTS : XFS_TRANS_SB_FREXTENTS,
1996 -(long)len);
1997 *bno = xfs_rtx_to_rtb(args.rtg, rtx);
1998 *blen = xfs_rtxlen_to_extlen(args.mp, len);
1999
2000 out_release:
2001 xfs_rtgroup_rele(args.rtg);
2002 xfs_rtbuf_cache_relse(&args);
2003 return error;
2004 out_unlock:
2005 xfs_rtgroup_unlock(args.rtg, XFS_RTGLOCK_BITMAP);
2006 *rtlocked = false;
2007 goto out_release;
2008 }
2009
2010 int
xfs_rtallocate_rtgs(struct xfs_trans * tp,xfs_fsblock_t bno_hint,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t prod,bool wasdel,bool initial_user_data,xfs_rtblock_t * bno,xfs_extlen_t * blen)2011 xfs_rtallocate_rtgs(
2012 struct xfs_trans *tp,
2013 xfs_fsblock_t bno_hint,
2014 xfs_rtxlen_t minlen,
2015 xfs_rtxlen_t maxlen,
2016 xfs_rtxlen_t prod,
2017 bool wasdel,
2018 bool initial_user_data,
2019 xfs_rtblock_t *bno,
2020 xfs_extlen_t *blen)
2021 {
2022 struct xfs_mount *mp = tp->t_mountp;
2023 xfs_rgnumber_t start_rgno, rgno;
2024 int error;
2025
2026 /*
2027 * For now this just blindly iterates over the RTGs for an initial
2028 * allocation. We could try to keep an in-memory rtg_longest member
2029 * to avoid the locking when just looking for big enough free space,
2030 * but for now this keeps things simple.
2031 */
2032 if (bno_hint != NULLFSBLOCK)
2033 start_rgno = xfs_rtb_to_rgno(mp, bno_hint);
2034 else
2035 start_rgno = (atomic_inc_return(&mp->m_rtgrotor) - 1) %
2036 mp->m_sb.sb_rgcount;
2037
2038 rgno = start_rgno;
2039 do {
2040 bool rtlocked = false;
2041
2042 error = xfs_rtallocate_rtg(tp, rgno, bno_hint, minlen, maxlen,
2043 prod, wasdel, initial_user_data, &rtlocked,
2044 bno, blen);
2045 if (error != -ENOSPC)
2046 return error;
2047 ASSERT(!rtlocked);
2048
2049 if (++rgno == mp->m_sb.sb_rgcount)
2050 rgno = 0;
2051 bno_hint = NULLFSBLOCK;
2052 } while (rgno != start_rgno);
2053
2054 return -ENOSPC;
2055 }
2056
2057 static int
xfs_rtallocate_align(struct xfs_bmalloca * ap,xfs_rtxlen_t * ralen,xfs_rtxlen_t * raminlen,xfs_rtxlen_t * prod,bool * noalign)2058 xfs_rtallocate_align(
2059 struct xfs_bmalloca *ap,
2060 xfs_rtxlen_t *ralen,
2061 xfs_rtxlen_t *raminlen,
2062 xfs_rtxlen_t *prod,
2063 bool *noalign)
2064 {
2065 struct xfs_mount *mp = ap->ip->i_mount;
2066 xfs_fileoff_t orig_offset = ap->offset;
2067 xfs_extlen_t minlen = mp->m_sb.sb_rextsize;
2068 xfs_extlen_t align; /* minimum allocation alignment */
2069 xfs_extlen_t mod; /* product factor for allocators */
2070 int error;
2071
2072 if (*noalign) {
2073 align = mp->m_sb.sb_rextsize;
2074 } else {
2075 if (ap->flags & XFS_BMAPI_COWFORK)
2076 align = xfs_get_cowextsz_hint(ap->ip);
2077 else
2078 align = xfs_get_extsz_hint(ap->ip);
2079 if (!align)
2080 align = 1;
2081 if (align == mp->m_sb.sb_rextsize)
2082 *noalign = true;
2083 }
2084
2085 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 1,
2086 ap->eof, 0, ap->conv, &ap->offset, &ap->length);
2087 if (error)
2088 return error;
2089 ASSERT(ap->length);
2090 ASSERT(xfs_extlen_to_rtxmod(mp, ap->length) == 0);
2091
2092 /*
2093 * If we shifted the file offset downward to satisfy an extent size
2094 * hint, increase minlen by that amount so that the allocator won't
2095 * give us an allocation that's too short to cover at least one of the
2096 * blocks that the caller asked for.
2097 */
2098 if (ap->offset != orig_offset)
2099 minlen += orig_offset - ap->offset;
2100
2101 /*
2102 * Set ralen to be the actual requested length in rtextents.
2103 *
2104 * If the old value was close enough to XFS_BMBT_MAX_EXTLEN that
2105 * we rounded up to it, cut it back so it's valid again.
2106 * Note that if it's a really large request (bigger than
2107 * XFS_BMBT_MAX_EXTLEN), we don't hear about that number, and can't
2108 * adjust the starting point to match it.
2109 */
2110 *ralen = xfs_extlen_to_rtxlen(mp, min(ap->length, XFS_MAX_BMBT_EXTLEN));
2111 *raminlen = max_t(xfs_rtxlen_t, 1, xfs_extlen_to_rtxlen(mp, minlen));
2112 ASSERT(*raminlen > 0);
2113 ASSERT(*raminlen <= *ralen);
2114
2115 /*
2116 * Only bother calculating a real prod factor if offset & length are
2117 * perfectly aligned, otherwise it will just get us in trouble.
2118 */
2119 div_u64_rem(ap->offset, align, &mod);
2120 if (mod || ap->length % align)
2121 *prod = 1;
2122 else
2123 *prod = xfs_extlen_to_rtxlen(mp, align);
2124
2125 if (*prod > 1)
2126 xfs_rtalloc_align_minmax(raminlen, ralen, prod);
2127 return 0;
2128 }
2129
2130 int
xfs_bmap_rtalloc(struct xfs_bmalloca * ap)2131 xfs_bmap_rtalloc(
2132 struct xfs_bmalloca *ap)
2133 {
2134 xfs_fileoff_t orig_offset = ap->offset;
2135 xfs_rtxlen_t prod = 0; /* product factor for allocators */
2136 xfs_rtxlen_t ralen = 0; /* realtime allocation length */
2137 xfs_rtblock_t bno_hint = NULLRTBLOCK;
2138 xfs_extlen_t orig_length = ap->length;
2139 xfs_rtxlen_t raminlen;
2140 bool rtlocked = false;
2141 bool noalign = false;
2142 bool initial_user_data =
2143 ap->datatype & XFS_ALLOC_INITIAL_USER_DATA;
2144 int error;
2145
2146 ASSERT(!xfs_has_zoned(ap->tp->t_mountp));
2147
2148 retry:
2149 error = xfs_rtallocate_align(ap, &ralen, &raminlen, &prod, &noalign);
2150 if (error)
2151 return error;
2152
2153 if (xfs_bmap_adjacent(ap))
2154 bno_hint = ap->blkno;
2155
2156 if (xfs_has_rtgroups(ap->ip->i_mount)) {
2157 error = xfs_rtallocate_rtgs(ap->tp, bno_hint, raminlen, ralen,
2158 prod, ap->wasdel, initial_user_data,
2159 &ap->blkno, &ap->length);
2160 } else {
2161 error = xfs_rtallocate_rtg(ap->tp, 0, bno_hint, raminlen, ralen,
2162 prod, ap->wasdel, initial_user_data,
2163 &rtlocked, &ap->blkno, &ap->length);
2164 }
2165
2166 if (error == -ENOSPC) {
2167 if (!noalign) {
2168 /*
2169 * We previously enlarged the request length to try to
2170 * satisfy an extent size hint. The allocator didn't
2171 * return anything, so reset the parameters to the
2172 * original values and try again without alignment
2173 * criteria.
2174 */
2175 ap->offset = orig_offset;
2176 ap->length = orig_length;
2177 noalign = true;
2178 goto retry;
2179 }
2180
2181 ap->blkno = NULLFSBLOCK;
2182 ap->length = 0;
2183 return 0;
2184 }
2185 if (error)
2186 return error;
2187
2188 xfs_bmap_alloc_account(ap);
2189 return 0;
2190 }
2191