1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs_platform.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_alloc.h"
16 #include "xfs_bmap.h"
17 #include "xfs_bmap_btree.h"
18 #include "xfs_bmap_util.h"
19 #include "xfs_trans.h"
20 #include "xfs_trans_space.h"
21 #include "xfs_icache.h"
22 #include "xfs_rtalloc.h"
23 #include "xfs_sb.h"
24 #include "xfs_rtbitmap.h"
25 #include "xfs_rtrmap_btree.h"
26 #include "xfs_quota.h"
27 #include "xfs_log_priv.h"
28 #include "xfs_health.h"
29 #include "xfs_da_format.h"
30 #include "xfs_metafile.h"
31 #include "xfs_rtgroup.h"
32 #include "xfs_error.h"
33 #include "xfs_trace.h"
34 #include "xfs_rtrefcount_btree.h"
35 #include "xfs_reflink.h"
36 #include "xfs_zone_alloc.h"
37
38 /*
39 * Return whether there are any free extents in the size range given
40 * by low and high, for the bitmap block bbno.
41 */
42 STATIC int
xfs_rtany_summary(struct xfs_rtalloc_args * args,int low,int high,xfs_fileoff_t bbno,int * maxlog)43 xfs_rtany_summary(
44 struct xfs_rtalloc_args *args,
45 int low, /* low log2 extent size */
46 int high, /* high log2 extent size */
47 xfs_fileoff_t bbno, /* bitmap block number */
48 int *maxlog) /* out: max log2 extent size free */
49 {
50 uint8_t *rsum_cache = args->rtg->rtg_rsum_cache;
51 int error;
52 int log; /* loop counter, log2 of ext. size */
53 xfs_suminfo_t sum; /* summary data */
54
55 /* There are no extents at levels >= rsum_cache[bbno]. */
56 if (rsum_cache) {
57 high = min(high, rsum_cache[bbno] - 1);
58 if (low > high) {
59 *maxlog = -1;
60 return 0;
61 }
62 }
63
64 /*
65 * Loop over logs of extent sizes.
66 */
67 for (log = high; log >= low; log--) {
68 /*
69 * Get one summary datum.
70 */
71 error = xfs_rtget_summary(args, log, bbno, &sum);
72 if (error) {
73 return error;
74 }
75 /*
76 * If there are any, return success.
77 */
78 if (sum) {
79 *maxlog = log;
80 goto out;
81 }
82 }
83 /*
84 * Found nothing, return failure.
85 */
86 *maxlog = -1;
87 out:
88 /* There were no extents at levels > log. */
89 if (rsum_cache && log + 1 < rsum_cache[bbno])
90 rsum_cache[bbno] = log + 1;
91 return 0;
92 }
93
94 /*
95 * Copy and transform the summary file, given the old and new
96 * parameters in the mount structures.
97 */
98 STATIC int
xfs_rtcopy_summary(struct xfs_rtalloc_args * oargs,struct xfs_rtalloc_args * nargs)99 xfs_rtcopy_summary(
100 struct xfs_rtalloc_args *oargs,
101 struct xfs_rtalloc_args *nargs)
102 {
103 xfs_fileoff_t bbno; /* bitmap block number */
104 int error;
105 int log; /* summary level number (log length) */
106 xfs_suminfo_t sum; /* summary data */
107
108 for (log = oargs->mp->m_rsumlevels - 1; log >= 0; log--) {
109 for (bbno = oargs->mp->m_sb.sb_rbmblocks - 1;
110 (xfs_srtblock_t)bbno >= 0;
111 bbno--) {
112 error = xfs_rtget_summary(oargs, log, bbno, &sum);
113 if (error)
114 goto out;
115 if (XFS_IS_CORRUPT(oargs->mp, sum < 0)) {
116 error = -EFSCORRUPTED;
117 goto out;
118 }
119 if (sum == 0)
120 continue;
121 error = xfs_rtmodify_summary(oargs, log, bbno, -sum);
122 if (error)
123 goto out;
124 error = xfs_rtmodify_summary(nargs, log, bbno, sum);
125 if (error)
126 goto out;
127 }
128 }
129 error = 0;
130 out:
131 xfs_rtbuf_cache_relse(oargs);
132 return error;
133 }
134 /*
135 * Mark an extent specified by start and len allocated.
136 * Updates all the summary information as well as the bitmap.
137 */
138 STATIC int
xfs_rtallocate_range(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t len)139 xfs_rtallocate_range(
140 struct xfs_rtalloc_args *args,
141 xfs_rtxnum_t start, /* start rtext to allocate */
142 xfs_rtxlen_t len) /* in/out: summary block number */
143 {
144 struct xfs_mount *mp = args->mp;
145 xfs_rtxnum_t end; /* end of the allocated rtext */
146 int error;
147 xfs_rtxnum_t postblock = 0; /* first rtext allocated > end */
148 xfs_rtxnum_t preblock = 0; /* first rtext allocated < start */
149
150 end = start + len - 1;
151 /*
152 * Assume we're allocating out of the middle of a free extent.
153 * We need to find the beginning and end of the extent so we can
154 * properly update the summary.
155 */
156 error = xfs_rtfind_back(args, start, &preblock);
157 if (error)
158 return error;
159
160 /*
161 * Find the next allocated block (end of free extent).
162 */
163 error = xfs_rtfind_forw(args, end, args->rtg->rtg_extents - 1,
164 &postblock);
165 if (error)
166 return error;
167
168 /*
169 * Decrement the summary information corresponding to the entire
170 * (old) free extent.
171 */
172 error = xfs_rtmodify_summary(args,
173 xfs_highbit64(postblock + 1 - preblock),
174 xfs_rtx_to_rbmblock(mp, preblock), -1);
175 if (error)
176 return error;
177
178 /*
179 * If there are blocks not being allocated at the front of the
180 * old extent, add summary data for them to be free.
181 */
182 if (preblock < start) {
183 error = xfs_rtmodify_summary(args,
184 xfs_highbit64(start - preblock),
185 xfs_rtx_to_rbmblock(mp, preblock), 1);
186 if (error)
187 return error;
188 }
189
190 /*
191 * If there are blocks not being allocated at the end of the
192 * old extent, add summary data for them to be free.
193 */
194 if (postblock > end) {
195 error = xfs_rtmodify_summary(args,
196 xfs_highbit64(postblock - end),
197 xfs_rtx_to_rbmblock(mp, end + 1), 1);
198 if (error)
199 return error;
200 }
201
202 /*
203 * Modify the bitmap to mark this extent allocated.
204 */
205 return xfs_rtmodify_range(args, start, len, 0);
206 }
207
208 /* Reduce @rtxlen until it is a multiple of @prod. */
209 static inline xfs_rtxlen_t
xfs_rtalloc_align_len(xfs_rtxlen_t rtxlen,xfs_rtxlen_t prod)210 xfs_rtalloc_align_len(
211 xfs_rtxlen_t rtxlen,
212 xfs_rtxlen_t prod)
213 {
214 if (unlikely(prod > 1))
215 return rounddown(rtxlen, prod);
216 return rtxlen;
217 }
218
219 /*
220 * Make sure we don't run off the end of the rt volume. Be careful that
221 * adjusting maxlen downwards doesn't cause us to fail the alignment checks.
222 */
223 static inline xfs_rtxlen_t
xfs_rtallocate_clamp_len(struct xfs_rtgroup * rtg,xfs_rtxnum_t startrtx,xfs_rtxlen_t rtxlen,xfs_rtxlen_t prod)224 xfs_rtallocate_clamp_len(
225 struct xfs_rtgroup *rtg,
226 xfs_rtxnum_t startrtx,
227 xfs_rtxlen_t rtxlen,
228 xfs_rtxlen_t prod)
229 {
230 xfs_rtxlen_t ret;
231
232 ret = min(rtg->rtg_extents, startrtx + rtxlen) - startrtx;
233 return xfs_rtalloc_align_len(ret, prod);
234 }
235
236 /*
237 * Attempt to allocate an extent minlen<=len<=maxlen starting from
238 * bitmap block bbno. If we don't get maxlen then use prod to trim
239 * the length, if given. Returns error; returns starting block in *rtx.
240 * The lengths are all in rtextents.
241 */
242 STATIC int
xfs_rtallocate_extent_block(struct xfs_rtalloc_args * args,xfs_fileoff_t bbno,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxnum_t * nextp,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)243 xfs_rtallocate_extent_block(
244 struct xfs_rtalloc_args *args,
245 xfs_fileoff_t bbno, /* bitmap block number */
246 xfs_rtxlen_t minlen, /* minimum length to allocate */
247 xfs_rtxlen_t maxlen, /* maximum length to allocate */
248 xfs_rtxlen_t *len, /* out: actual length allocated */
249 xfs_rtxnum_t *nextp, /* out: next rtext to try */
250 xfs_rtxlen_t prod, /* extent product factor */
251 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
252 {
253 struct xfs_mount *mp = args->mp;
254 xfs_rtxnum_t besti = -1; /* best rtext found so far */
255 xfs_rtxnum_t end; /* last rtext in chunk */
256 xfs_rtxnum_t i; /* current rtext trying */
257 xfs_rtxnum_t next; /* next rtext to try */
258 xfs_rtxlen_t scanlen; /* number of free rtx to look for */
259 xfs_rtxlen_t bestlen = 0; /* best length found so far */
260 int stat; /* status from internal calls */
261 int error;
262
263 /*
264 * Loop over all the extents starting in this bitmap block up to the
265 * end of the rt volume, looking for one that's long enough.
266 */
267 end = min(args->rtg->rtg_extents, xfs_rbmblock_to_rtx(mp, bbno + 1)) -
268 1;
269 for (i = xfs_rbmblock_to_rtx(mp, bbno); i <= end; i++) {
270 /* Make sure we don't scan off the end of the rt volume. */
271 scanlen = xfs_rtallocate_clamp_len(args->rtg, i, maxlen, prod);
272 if (scanlen < minlen)
273 break;
274
275 /*
276 * See if there's a free extent of scanlen starting at i.
277 * If it's not so then next will contain the first non-free.
278 */
279 error = xfs_rtcheck_range(args, i, scanlen, 1, &next, &stat);
280 if (error)
281 return error;
282 if (stat) {
283 /*
284 * i to scanlen is all free, allocate and return that.
285 */
286 *len = scanlen;
287 *rtx = i;
288 return 0;
289 }
290
291 /*
292 * In the case where we have a variable-sized allocation
293 * request, figure out how big this free piece is,
294 * and if it's big enough for the minimum, and the best
295 * so far, remember it.
296 */
297 if (minlen < maxlen) {
298 xfs_rtxnum_t thislen; /* this extent size */
299
300 thislen = next - i;
301 if (thislen >= minlen && thislen > bestlen) {
302 besti = i;
303 bestlen = thislen;
304 }
305 }
306 /*
307 * If not done yet, find the start of the next free space.
308 */
309 if (next >= end)
310 break;
311 error = xfs_rtfind_forw(args, next, end, &i);
312 if (error)
313 return error;
314 }
315
316 /* Searched the whole thing & didn't find a maxlen free extent. */
317 if (besti == -1)
318 goto nospace;
319
320 /*
321 * Ensure bestlen is a multiple of prod, but don't return a too-short
322 * extent.
323 */
324 bestlen = xfs_rtalloc_align_len(bestlen, prod);
325 if (bestlen < minlen)
326 goto nospace;
327
328 /*
329 * Pick besti for bestlen & return that.
330 */
331 *len = bestlen;
332 *rtx = besti;
333 return 0;
334 nospace:
335 /* Allocation failed. Set *nextp to the next block to try. */
336 *nextp = next;
337 return -ENOSPC;
338 }
339
340 /*
341 * Allocate an extent of length minlen<=len<=maxlen, starting at block
342 * bno. If we don't get maxlen then use prod to trim the length, if given.
343 * Returns error; returns starting block in *rtx.
344 * The lengths are all in rtextents.
345 */
346 STATIC int
xfs_rtallocate_extent_exact(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)347 xfs_rtallocate_extent_exact(
348 struct xfs_rtalloc_args *args,
349 xfs_rtxnum_t start, /* starting rtext number to allocate */
350 xfs_rtxlen_t minlen, /* minimum length to allocate */
351 xfs_rtxlen_t maxlen, /* maximum length to allocate */
352 xfs_rtxlen_t *len, /* out: actual length allocated */
353 xfs_rtxlen_t prod, /* extent product factor */
354 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
355 {
356 xfs_rtxnum_t next; /* next rtext to try (dummy) */
357 xfs_rtxlen_t alloclen; /* candidate length */
358 xfs_rtxlen_t scanlen; /* number of free rtx to look for */
359 int isfree; /* extent is free */
360 int error;
361
362 ASSERT(minlen % prod == 0);
363 ASSERT(maxlen % prod == 0);
364
365 /* Make sure we don't run off the end of the rt volume. */
366 scanlen = xfs_rtallocate_clamp_len(args->rtg, start, maxlen, prod);
367 if (scanlen < minlen)
368 return -ENOSPC;
369
370 /* Check if the range in question (for scanlen) is free. */
371 error = xfs_rtcheck_range(args, start, scanlen, 1, &next, &isfree);
372 if (error)
373 return error;
374
375 if (isfree) {
376 /* start to scanlen is all free; allocate it. */
377 *len = scanlen;
378 *rtx = start;
379 return 0;
380 }
381
382 /*
383 * If not, allocate what there is, if it's at least minlen.
384 */
385 alloclen = next - start;
386 if (alloclen < minlen)
387 return -ENOSPC;
388
389 /* Ensure alloclen is a multiple of prod. */
390 alloclen = xfs_rtalloc_align_len(alloclen, prod);
391 if (alloclen < minlen)
392 return -ENOSPC;
393
394 *len = alloclen;
395 *rtx = start;
396 return 0;
397 }
398
399 /*
400 * Allocate an extent of length minlen<=len<=maxlen, starting as near
401 * to start as possible. If we don't get maxlen then use prod to trim
402 * the length, if given. The lengths are all in rtextents.
403 */
404 STATIC int
xfs_rtallocate_extent_near(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)405 xfs_rtallocate_extent_near(
406 struct xfs_rtalloc_args *args,
407 xfs_rtxnum_t start, /* starting rtext number to allocate */
408 xfs_rtxlen_t minlen, /* minimum length to allocate */
409 xfs_rtxlen_t maxlen, /* maximum length to allocate */
410 xfs_rtxlen_t *len, /* out: actual length allocated */
411 xfs_rtxlen_t prod, /* extent product factor */
412 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
413 {
414 struct xfs_mount *mp = args->mp;
415 int maxlog; /* max useful extent from summary */
416 xfs_fileoff_t bbno; /* bitmap block number */
417 int error;
418 int i; /* bitmap block offset (loop control) */
419 int j; /* secondary loop control */
420 int log2len; /* log2 of minlen */
421 xfs_rtxnum_t n; /* next rtext to try */
422
423 ASSERT(minlen % prod == 0);
424 ASSERT(maxlen % prod == 0);
425
426 /*
427 * If the block number given is off the end, silently set it to the last
428 * block.
429 */
430 start = min(start, args->rtg->rtg_extents - 1);
431
432 /*
433 * Try the exact allocation first.
434 */
435 error = xfs_rtallocate_extent_exact(args, start, minlen, maxlen, len,
436 prod, rtx);
437 if (error != -ENOSPC)
438 return error;
439
440 bbno = xfs_rtx_to_rbmblock(mp, start);
441 i = 0;
442 j = -1;
443 ASSERT(minlen != 0);
444 log2len = xfs_highbit32(minlen);
445 /*
446 * Loop over all bitmap blocks (bbno + i is current block).
447 */
448 for (;;) {
449 /*
450 * Get summary information of extents of all useful levels
451 * starting in this bitmap block.
452 */
453 error = xfs_rtany_summary(args, log2len, mp->m_rsumlevels - 1,
454 bbno + i, &maxlog);
455 if (error)
456 return error;
457
458 /*
459 * If there are any useful extents starting here, try
460 * allocating one.
461 */
462 if (maxlog >= 0) {
463 xfs_extlen_t maxavail =
464 min_t(xfs_rtblock_t, maxlen,
465 (1ULL << (maxlog + 1)) - 1);
466 /*
467 * On the positive side of the starting location.
468 */
469 if (i >= 0) {
470 /*
471 * Try to allocate an extent starting in
472 * this block.
473 */
474 error = xfs_rtallocate_extent_block(args,
475 bbno + i, minlen, maxavail, len,
476 &n, prod, rtx);
477 if (error != -ENOSPC)
478 return error;
479 }
480 /*
481 * On the negative side of the starting location.
482 */
483 else { /* i < 0 */
484 int maxblocks;
485
486 /*
487 * Loop backwards to find the end of the extent
488 * we found in the realtime summary.
489 *
490 * maxblocks is the maximum possible number of
491 * bitmap blocks from the start of the extent
492 * to the end of the extent.
493 */
494 if (maxlog == 0)
495 maxblocks = 0;
496 else if (maxlog < mp->m_blkbit_log)
497 maxblocks = 1;
498 else
499 maxblocks = 2 << (maxlog - mp->m_blkbit_log);
500
501 /*
502 * We need to check bbno + i + maxblocks down to
503 * bbno + i. We already checked bbno down to
504 * bbno + j + 1, so we don't need to check those
505 * again.
506 */
507 j = min(i + maxblocks, j);
508 for (; j >= i; j--) {
509 error = xfs_rtallocate_extent_block(args,
510 bbno + j, minlen,
511 maxavail, len, &n, prod,
512 rtx);
513 if (error != -ENOSPC)
514 return error;
515 }
516 }
517 }
518 /*
519 * Loop control. If we were on the positive side, and there's
520 * still more blocks on the negative side, go there.
521 */
522 if (i > 0 && (int)bbno - i >= 0)
523 i = -i;
524 /*
525 * If positive, and no more negative, but there are more
526 * positive, go there.
527 */
528 else if (i > 0 && (int)bbno + i < mp->m_sb.sb_rbmblocks - 1)
529 i++;
530 /*
531 * If negative or 0 (just started), and there are positive
532 * blocks to go, go there. The 0 case moves to block 1.
533 */
534 else if (i <= 0 && (int)bbno - i < mp->m_sb.sb_rbmblocks - 1)
535 i = 1 - i;
536 /*
537 * If negative or 0 and there are more negative blocks,
538 * go there.
539 */
540 else if (i <= 0 && (int)bbno + i > 0)
541 i--;
542 /*
543 * Must be done. Return failure.
544 */
545 else
546 break;
547 }
548 return -ENOSPC;
549 }
550
551 static int
xfs_rtalloc_sumlevel(struct xfs_rtalloc_args * args,int l,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t prod,xfs_rtxlen_t * len,xfs_rtxnum_t * rtx)552 xfs_rtalloc_sumlevel(
553 struct xfs_rtalloc_args *args,
554 int l, /* level number */
555 xfs_rtxlen_t minlen, /* minimum length to allocate */
556 xfs_rtxlen_t maxlen, /* maximum length to allocate */
557 xfs_rtxlen_t prod, /* extent product factor */
558 xfs_rtxlen_t *len, /* out: actual length allocated */
559 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
560 {
561 xfs_fileoff_t i; /* bitmap block number */
562 int error;
563
564 for (i = 0; i < args->mp->m_sb.sb_rbmblocks; i++) {
565 xfs_suminfo_t sum; /* summary information for extents */
566 xfs_rtxnum_t n; /* next rtext to be tried */
567
568 error = xfs_rtget_summary(args, l, i, &sum);
569 if (error)
570 return error;
571
572 /*
573 * Nothing there, on to the next block.
574 */
575 if (!sum)
576 continue;
577
578 /*
579 * Try allocating the extent.
580 */
581 error = xfs_rtallocate_extent_block(args, i, minlen, maxlen,
582 len, &n, prod, rtx);
583 if (error != -ENOSPC)
584 return error;
585
586 /*
587 * If the "next block to try" returned from the allocator is
588 * beyond the next bitmap block, skip to that bitmap block.
589 */
590 if (xfs_rtx_to_rbmblock(args->mp, n) > i + 1)
591 i = xfs_rtx_to_rbmblock(args->mp, n) - 1;
592 }
593
594 return -ENOSPC;
595 }
596
597 /*
598 * Allocate an extent of length minlen<=len<=maxlen, with no position
599 * specified. If we don't get maxlen then use prod to trim
600 * the length, if given. The lengths are all in rtextents.
601 */
602 static int
xfs_rtallocate_extent_size(struct xfs_rtalloc_args * args,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)603 xfs_rtallocate_extent_size(
604 struct xfs_rtalloc_args *args,
605 xfs_rtxlen_t minlen, /* minimum length to allocate */
606 xfs_rtxlen_t maxlen, /* maximum length to allocate */
607 xfs_rtxlen_t *len, /* out: actual length allocated */
608 xfs_rtxlen_t prod, /* extent product factor */
609 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
610 {
611 int error;
612 int l; /* level number (loop control) */
613
614 ASSERT(minlen % prod == 0);
615 ASSERT(maxlen % prod == 0);
616 ASSERT(maxlen != 0);
617
618 /*
619 * Loop over all the levels starting with maxlen.
620 *
621 * At each level, look at all the bitmap blocks, to see if there are
622 * extents starting there that are long enough (>= maxlen).
623 *
624 * Note, only on the initial level can the allocation fail if the
625 * summary says there's an extent.
626 */
627 for (l = xfs_highbit32(maxlen); l < args->mp->m_rsumlevels; l++) {
628 error = xfs_rtalloc_sumlevel(args, l, minlen, maxlen, prod, len,
629 rtx);
630 if (error != -ENOSPC)
631 return error;
632 }
633
634 /*
635 * Didn't find any maxlen blocks. Try smaller ones, unless we are
636 * looking for a fixed size extent.
637 */
638 if (minlen > --maxlen)
639 return -ENOSPC;
640 ASSERT(minlen != 0);
641 ASSERT(maxlen != 0);
642
643 /*
644 * Loop over sizes, from maxlen down to minlen.
645 *
646 * This time, when we do the allocations, allow smaller ones to succeed,
647 * but make sure the specified minlen/maxlen are in the possible range
648 * for this summary level.
649 */
650 for (l = xfs_highbit32(maxlen); l >= xfs_highbit32(minlen); l--) {
651 error = xfs_rtalloc_sumlevel(args, l,
652 max_t(xfs_rtxlen_t, minlen, 1 << l),
653 min_t(xfs_rtxlen_t, maxlen, (1 << (l + 1)) - 1),
654 prod, len, rtx);
655 if (error != -ENOSPC)
656 return error;
657 }
658
659 return -ENOSPC;
660 }
661
662 static void
xfs_rtunmount_rtg(struct xfs_rtgroup * rtg)663 xfs_rtunmount_rtg(
664 struct xfs_rtgroup *rtg)
665 {
666 int i;
667
668 for (i = 0; i < XFS_RTGI_MAX; i++)
669 xfs_rtginode_irele(&rtg->rtg_inodes[i]);
670 if (!xfs_has_zoned(rtg_mount(rtg)))
671 kvfree(rtg->rtg_rsum_cache);
672 }
673
674 static int
xfs_alloc_rsum_cache(struct xfs_rtgroup * rtg,xfs_extlen_t rbmblocks)675 xfs_alloc_rsum_cache(
676 struct xfs_rtgroup *rtg,
677 xfs_extlen_t rbmblocks)
678 {
679 /*
680 * The rsum cache is initialized to the maximum value, which is
681 * trivially an upper bound on the maximum level with any free extents.
682 */
683 rtg->rtg_rsum_cache = kvmalloc(rbmblocks, GFP_KERNEL);
684 if (!rtg->rtg_rsum_cache)
685 return -ENOMEM;
686 memset(rtg->rtg_rsum_cache, -1, rbmblocks);
687 return 0;
688 }
689
690 /*
691 * If we changed the rt extent size (meaning there was no rt volume previously)
692 * and the root directory had EXTSZINHERIT and RTINHERIT set, it's possible
693 * that the extent size hint on the root directory is no longer congruent with
694 * the new rt extent size. Log the rootdir inode to fix this.
695 */
696 static int
xfs_growfs_rt_fixup_extsize(struct xfs_mount * mp)697 xfs_growfs_rt_fixup_extsize(
698 struct xfs_mount *mp)
699 {
700 struct xfs_inode *ip = mp->m_rootip;
701 struct xfs_trans *tp;
702 int error = 0;
703
704 xfs_ilock(ip, XFS_IOLOCK_EXCL);
705 if (!(ip->i_diflags & XFS_DIFLAG_RTINHERIT) ||
706 !(ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT))
707 goto out_iolock;
708
709 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_ichange, 0, 0, false,
710 &tp);
711 if (error)
712 goto out_iolock;
713
714 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
715 error = xfs_trans_commit(tp);
716 xfs_iunlock(ip, XFS_ILOCK_EXCL);
717
718 out_iolock:
719 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
720 return error;
721 }
722
723 /* Ensure that the rtgroup metadata inode is loaded, creating it if neeeded. */
724 static int
xfs_rtginode_ensure(struct xfs_rtgroup * rtg,enum xfs_rtg_inodes type)725 xfs_rtginode_ensure(
726 struct xfs_rtgroup *rtg,
727 enum xfs_rtg_inodes type)
728 {
729 struct xfs_trans *tp;
730 int error;
731
732 if (rtg->rtg_inodes[type])
733 return 0;
734
735 tp = xfs_trans_alloc_empty(rtg_mount(rtg));
736 error = xfs_rtginode_load(rtg, type, tp);
737 xfs_trans_cancel(tp);
738
739 if (error != -ENOENT)
740 return 0;
741 return xfs_rtginode_create(rtg, type, true);
742 }
743
744 static struct xfs_mount *
xfs_growfs_rt_alloc_fake_mount(const struct xfs_mount * mp,xfs_rfsblock_t rblocks,xfs_agblock_t rextsize)745 xfs_growfs_rt_alloc_fake_mount(
746 const struct xfs_mount *mp,
747 xfs_rfsblock_t rblocks,
748 xfs_agblock_t rextsize)
749 {
750 struct xfs_mount *nmp;
751
752 nmp = kmemdup(mp, sizeof(*mp), GFP_KERNEL);
753 if (!nmp)
754 return NULL;
755 xfs_mount_sb_set_rextsize(nmp, &nmp->m_sb, rextsize);
756 nmp->m_sb.sb_rblocks = rblocks;
757 nmp->m_sb.sb_rextents = xfs_blen_to_rtbxlen(nmp, nmp->m_sb.sb_rblocks);
758 nmp->m_sb.sb_rbmblocks = xfs_rtbitmap_blockcount(nmp);
759 nmp->m_sb.sb_rextslog = xfs_compute_rextslog(nmp->m_sb.sb_rextents);
760 if (xfs_has_rtgroups(nmp))
761 nmp->m_sb.sb_rgcount = howmany_64(nmp->m_sb.sb_rextents,
762 nmp->m_sb.sb_rgextents);
763 else
764 nmp->m_sb.sb_rgcount = 1;
765 nmp->m_rsumblocks = xfs_rtsummary_blockcount(nmp, &nmp->m_rsumlevels);
766
767 if (rblocks > 0)
768 nmp->m_features |= XFS_FEAT_REALTIME;
769
770 /* recompute growfsrt reservation from new rsumsize */
771 xfs_trans_resv_calc(nmp, &nmp->m_resv);
772 return nmp;
773 }
774
775 /* Free all the new space and return the number of extents actually freed. */
776 static int
xfs_growfs_rt_free_new(struct xfs_rtgroup * rtg,struct xfs_rtalloc_args * nargs,xfs_rtbxlen_t * freed_rtx)777 xfs_growfs_rt_free_new(
778 struct xfs_rtgroup *rtg,
779 struct xfs_rtalloc_args *nargs,
780 xfs_rtbxlen_t *freed_rtx)
781 {
782 struct xfs_mount *mp = rtg_mount(rtg);
783 xfs_rgnumber_t rgno = rtg_rgno(rtg);
784 xfs_rtxnum_t start_rtx = 0, end_rtx;
785
786 if (rgno < mp->m_sb.sb_rgcount)
787 start_rtx = xfs_rtgroup_extents(mp, rgno);
788 end_rtx = xfs_rtgroup_extents(nargs->mp, rgno);
789
790 /*
791 * Compute the first new extent that we want to free, being careful to
792 * skip past a realtime superblock at the start of the realtime volume.
793 */
794 if (xfs_has_rtsb(nargs->mp) && rgno == 0 && start_rtx == 0)
795 start_rtx++;
796 *freed_rtx = end_rtx - start_rtx;
797 return xfs_rtfree_range(nargs, start_rtx, *freed_rtx);
798 }
799
800 static xfs_rfsblock_t
xfs_growfs_rt_nrblocks(struct xfs_rtgroup * rtg,xfs_rfsblock_t nrblocks,xfs_agblock_t rextsize,xfs_fileoff_t bmbno)801 xfs_growfs_rt_nrblocks(
802 struct xfs_rtgroup *rtg,
803 xfs_rfsblock_t nrblocks,
804 xfs_agblock_t rextsize,
805 xfs_fileoff_t bmbno)
806 {
807 struct xfs_mount *mp = rtg_mount(rtg);
808 xfs_rfsblock_t step;
809
810 step = (bmbno + 1) * mp->m_rtx_per_rbmblock * rextsize;
811 if (xfs_has_rtgroups(mp)) {
812 xfs_rfsblock_t rgblocks = mp->m_sb.sb_rgextents * rextsize;
813
814 step = min(rgblocks, step) + rgblocks * rtg_rgno(rtg);
815 }
816
817 return min(nrblocks, step);
818 }
819
820 /*
821 * If the post-grow filesystem will have an rtsb; we're initializing the first
822 * rtgroup; and the filesystem didn't have a realtime section, write the rtsb
823 * now, and attach the rtsb buffer to the real mount.
824 */
825 static int
xfs_growfs_rt_init_rtsb(const struct xfs_rtalloc_args * nargs,const struct xfs_rtgroup * rtg,const struct xfs_rtalloc_args * args)826 xfs_growfs_rt_init_rtsb(
827 const struct xfs_rtalloc_args *nargs,
828 const struct xfs_rtgroup *rtg,
829 const struct xfs_rtalloc_args *args)
830 {
831 struct xfs_mount *mp = args->mp;
832 struct xfs_buf *rtsb_bp;
833 int error;
834
835 if (!xfs_has_rtsb(nargs->mp))
836 return 0;
837 if (rtg_rgno(rtg) > 0)
838 return 0;
839 if (mp->m_sb.sb_rblocks)
840 return 0;
841
842 error = xfs_buf_get_uncached(mp->m_rtdev_targp, XFS_FSB_TO_BB(mp, 1),
843 &rtsb_bp);
844 if (error)
845 return error;
846
847 rtsb_bp->b_maps[0].bm_bn = XFS_RTSB_DADDR;
848 rtsb_bp->b_ops = &xfs_rtsb_buf_ops;
849
850 xfs_update_rtsb(rtsb_bp, mp->m_sb_bp);
851 mp->m_rtsb_bp = rtsb_bp;
852 error = xfs_bwrite(rtsb_bp);
853 xfs_buf_unlock(rtsb_bp);
854 if (error)
855 return error;
856
857 /* Initialize the rtrmap to reflect the rtsb. */
858 if (rtg_rmap(args->rtg) != NULL)
859 error = xfs_rtrmapbt_init_rtsb(nargs->mp, args->rtg, args->tp);
860
861 return error;
862 }
863
864 static void
xfs_growfs_rt_sb_fields(struct xfs_trans * tp,const struct xfs_mount * nmp)865 xfs_growfs_rt_sb_fields(
866 struct xfs_trans *tp,
867 const struct xfs_mount *nmp)
868 {
869 struct xfs_mount *mp = tp->t_mountp;
870
871 if (nmp->m_sb.sb_rextsize != mp->m_sb.sb_rextsize)
872 xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTSIZE,
873 nmp->m_sb.sb_rextsize - mp->m_sb.sb_rextsize);
874 if (nmp->m_sb.sb_rbmblocks != mp->m_sb.sb_rbmblocks)
875 xfs_trans_mod_sb(tp, XFS_TRANS_SB_RBMBLOCKS,
876 nmp->m_sb.sb_rbmblocks - mp->m_sb.sb_rbmblocks);
877 if (nmp->m_sb.sb_rblocks != mp->m_sb.sb_rblocks)
878 xfs_trans_mod_sb(tp, XFS_TRANS_SB_RBLOCKS,
879 nmp->m_sb.sb_rblocks - mp->m_sb.sb_rblocks);
880 if (nmp->m_sb.sb_rextents != mp->m_sb.sb_rextents)
881 xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTENTS,
882 nmp->m_sb.sb_rextents - mp->m_sb.sb_rextents);
883 if (nmp->m_sb.sb_rextslog != mp->m_sb.sb_rextslog)
884 xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTSLOG,
885 nmp->m_sb.sb_rextslog - mp->m_sb.sb_rextslog);
886 if (nmp->m_sb.sb_rgcount != mp->m_sb.sb_rgcount)
887 xfs_trans_mod_sb(tp, XFS_TRANS_SB_RGCOUNT,
888 nmp->m_sb.sb_rgcount - mp->m_sb.sb_rgcount);
889 }
890
891 static int
xfs_growfs_rt_zoned(struct xfs_rtgroup * rtg,xfs_rfsblock_t nrblocks)892 xfs_growfs_rt_zoned(
893 struct xfs_rtgroup *rtg,
894 xfs_rfsblock_t nrblocks)
895 {
896 struct xfs_mount *mp = rtg_mount(rtg);
897 struct xfs_mount *nmp;
898 struct xfs_trans *tp;
899 xfs_rtbxlen_t freed_rtx;
900 int error;
901
902 /*
903 * Calculate new sb and mount fields for this round. Also ensure the
904 * rtg_extents value is uptodate as the rtbitmap code relies on it.
905 */
906 nmp = xfs_growfs_rt_alloc_fake_mount(mp, nrblocks,
907 mp->m_sb.sb_rextsize);
908 if (!nmp)
909 return -ENOMEM;
910 freed_rtx = nmp->m_sb.sb_rextents - mp->m_sb.sb_rextents;
911
912 xfs_rtgroup_calc_geometry(nmp, rtg, rtg_rgno(rtg),
913 nmp->m_sb.sb_rgcount, nmp->m_sb.sb_rextents);
914
915 error = xfs_trans_alloc(mp, &M_RES(nmp)->tr_growrtfree, 0, 0, 0, &tp);
916 if (error)
917 goto out_free;
918
919 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
920 xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);
921
922 xfs_growfs_rt_sb_fields(tp, nmp);
923 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, freed_rtx);
924
925 error = xfs_trans_commit(tp);
926 if (error)
927 goto out_free;
928
929 /*
930 * Ensure the mount RT feature flag is now set, and compute new
931 * maxlevels for rt btrees.
932 */
933 mp->m_features |= XFS_FEAT_REALTIME;
934 xfs_rtrmapbt_compute_maxlevels(mp);
935 xfs_rtrefcountbt_compute_maxlevels(mp);
936 xfs_zoned_add_available(mp, freed_rtx);
937 out_free:
938 kfree(nmp);
939 return error;
940 }
941
942 static int
xfs_growfs_rt_bmblock(struct xfs_rtgroup * rtg,xfs_rfsblock_t nrblocks,xfs_agblock_t rextsize,xfs_fileoff_t bmbno)943 xfs_growfs_rt_bmblock(
944 struct xfs_rtgroup *rtg,
945 xfs_rfsblock_t nrblocks,
946 xfs_agblock_t rextsize,
947 xfs_fileoff_t bmbno)
948 {
949 struct xfs_mount *mp = rtg_mount(rtg);
950 struct xfs_inode *rbmip = rtg_bitmap(rtg);
951 struct xfs_inode *rsumip = rtg_summary(rtg);
952 struct xfs_rtalloc_args args = {
953 .mp = mp,
954 .rtg = rtg,
955 };
956 struct xfs_rtalloc_args nargs = {
957 .rtg = rtg,
958 };
959 struct xfs_mount *nmp;
960 xfs_rtbxlen_t freed_rtx;
961 int error;
962
963 /*
964 * Calculate new sb and mount fields for this round. Also ensure the
965 * rtg_extents value is uptodate as the rtbitmap code relies on it.
966 */
967 nmp = nargs.mp = xfs_growfs_rt_alloc_fake_mount(mp,
968 xfs_growfs_rt_nrblocks(rtg, nrblocks, rextsize, bmbno),
969 rextsize);
970 if (!nmp)
971 return -ENOMEM;
972
973 xfs_rtgroup_calc_geometry(nmp, rtg, rtg_rgno(rtg),
974 nmp->m_sb.sb_rgcount, nmp->m_sb.sb_rextents);
975
976 /*
977 * Recompute the growfsrt reservation from the new rsumsize, so that the
978 * transaction below use the new, potentially larger value.
979 * */
980 xfs_trans_resv_calc(nmp, &nmp->m_resv);
981 error = xfs_trans_alloc(mp, &M_RES(nmp)->tr_growrtfree, 0, 0, 0,
982 &args.tp);
983 if (error)
984 goto out_free;
985 nargs.tp = args.tp;
986
987 xfs_rtgroup_lock(args.rtg, XFS_RTGLOCK_BITMAP | XFS_RTGLOCK_RMAP);
988 xfs_rtgroup_trans_join(args.tp, args.rtg,
989 XFS_RTGLOCK_BITMAP | XFS_RTGLOCK_RMAP);
990
991 /*
992 * Update the bitmap inode's size ondisk and incore. We need to update
993 * the incore size so that inode inactivation won't punch what it thinks
994 * are "posteof" blocks.
995 */
996 rbmip->i_disk_size = nmp->m_sb.sb_rbmblocks * nmp->m_sb.sb_blocksize;
997 i_size_write(VFS_I(rbmip), rbmip->i_disk_size);
998 xfs_trans_log_inode(args.tp, rbmip, XFS_ILOG_CORE);
999
1000 /*
1001 * Update the summary inode's size. We need to update the incore size
1002 * so that inode inactivation won't punch what it thinks are "posteof"
1003 * blocks.
1004 */
1005 rsumip->i_disk_size = nmp->m_rsumblocks * nmp->m_sb.sb_blocksize;
1006 i_size_write(VFS_I(rsumip), rsumip->i_disk_size);
1007 xfs_trans_log_inode(args.tp, rsumip, XFS_ILOG_CORE);
1008
1009 /*
1010 * Copy summary data from old to new sizes when the real size (not
1011 * block-aligned) changes.
1012 */
1013 if (mp->m_sb.sb_rbmblocks != nmp->m_sb.sb_rbmblocks ||
1014 mp->m_rsumlevels != nmp->m_rsumlevels) {
1015 error = xfs_rtcopy_summary(&args, &nargs);
1016 if (error)
1017 goto out_cancel;
1018 }
1019
1020 error = xfs_growfs_rt_init_rtsb(&nargs, rtg, &args);
1021 if (error)
1022 goto out_cancel;
1023
1024 /*
1025 * Update superblock fields.
1026 */
1027 xfs_growfs_rt_sb_fields(args.tp, nmp);
1028
1029 /*
1030 * Free the new extent.
1031 */
1032 error = xfs_growfs_rt_free_new(rtg, &nargs, &freed_rtx);
1033 xfs_rtbuf_cache_relse(&nargs);
1034 if (error)
1035 goto out_cancel;
1036
1037 /*
1038 * Mark more blocks free in the superblock.
1039 */
1040 xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_FREXTENTS, freed_rtx);
1041
1042 /*
1043 * Update the calculated values in the real mount structure.
1044 */
1045 mp->m_rsumlevels = nmp->m_rsumlevels;
1046 mp->m_rsumblocks = nmp->m_rsumblocks;
1047
1048 /*
1049 * Recompute the growfsrt reservation from the new rsumsize.
1050 */
1051 xfs_trans_resv_calc(mp, &mp->m_resv);
1052
1053 /*
1054 * Sync sb counters now to reflect the updated values. Lazy counters are
1055 * not always updated and in order to avoid inconsistencies between
1056 * frextents and rtextents, it is better to sync the counters.
1057 */
1058
1059 if (xfs_has_lazysbcount(mp))
1060 xfs_log_sb(args.tp);
1061
1062 error = xfs_trans_commit(args.tp);
1063 if (error)
1064 goto out_free;
1065
1066 /*
1067 * Ensure the mount RT feature flag is now set, and compute new
1068 * maxlevels for rt btrees.
1069 */
1070 mp->m_features |= XFS_FEAT_REALTIME;
1071 xfs_rtrmapbt_compute_maxlevels(mp);
1072 xfs_rtrefcountbt_compute_maxlevels(mp);
1073
1074 kfree(nmp);
1075 return 0;
1076
1077 out_cancel:
1078 xfs_trans_cancel(args.tp);
1079 out_free:
1080 kfree(nmp);
1081 return error;
1082 }
1083
1084 static xfs_rtxnum_t
xfs_last_rtgroup_extents(struct xfs_mount * mp)1085 xfs_last_rtgroup_extents(
1086 struct xfs_mount *mp)
1087 {
1088 return mp->m_sb.sb_rextents -
1089 ((xfs_rtxnum_t)(mp->m_sb.sb_rgcount - 1) *
1090 mp->m_sb.sb_rgextents);
1091 }
1092
1093 /*
1094 * This will return the bitmap block number (indexed at 0) that will be
1095 * extended/modified. There are 2 cases here:
1096 * 1. The size of the rtg is such that it is a multiple of
1097 * xfs_rtbitmap_rtx_per_rbmblock() i.e, an integral number of bitmap blocks
1098 * are completely filled up. In this case, we should return
1099 * 1 + (the last used bitmap block number).
1100 * 2. The size of the rtg is not an multiple of xfs_rtbitmap_rtx_per_rbmblock().
1101 * Here we will return the block number of last used block number. In this
1102 * case, we will modify the last used bitmap block to extend the size of the
1103 * rtgroup.
1104 *
1105 * This also deals with the case where there were no rtextents before.
1106 */
1107 static xfs_fileoff_t
xfs_last_rt_bmblock_to_extend(struct xfs_rtgroup * rtg)1108 xfs_last_rt_bmblock_to_extend(
1109 struct xfs_rtgroup *rtg)
1110 {
1111 struct xfs_mount *mp = rtg_mount(rtg);
1112 xfs_rgnumber_t rgno = rtg_rgno(rtg);
1113 xfs_fileoff_t bmbno = 0;
1114 unsigned int mod = 0;
1115
1116 ASSERT(!mp->m_sb.sb_rgcount || rgno >= mp->m_sb.sb_rgcount - 1);
1117
1118 if (mp->m_sb.sb_rgcount && rgno == mp->m_sb.sb_rgcount - 1) {
1119 xfs_rtxnum_t nrext = xfs_last_rtgroup_extents(mp);
1120
1121 /* Also fill up the previous block if not entirely full. */
1122 /* We are doing a -1 to convert it to a 0 based index */
1123 bmbno = xfs_rtbitmap_blockcount_len(mp, nrext) - 1;
1124 div_u64_rem(nrext, xfs_rtbitmap_rtx_per_rbmblock(mp), &mod);
1125 /*
1126 * mod = 0 means that all the current blocks are full. So
1127 * return the next block number to be used for the rtgroup
1128 * growth.
1129 */
1130 if (mod == 0)
1131 bmbno++;
1132 }
1133
1134 return bmbno;
1135 }
1136
1137 /*
1138 * Allocate space to the bitmap and summary files, as necessary.
1139 */
1140 static int
xfs_growfs_rt_alloc_blocks(struct xfs_rtgroup * rtg,xfs_rfsblock_t nrblocks,xfs_agblock_t rextsize,xfs_extlen_t * nrbmblocks)1141 xfs_growfs_rt_alloc_blocks(
1142 struct xfs_rtgroup *rtg,
1143 xfs_rfsblock_t nrblocks,
1144 xfs_agblock_t rextsize,
1145 xfs_extlen_t *nrbmblocks)
1146 {
1147 struct xfs_mount *mp = rtg_mount(rtg);
1148 struct xfs_inode *rbmip = rtg_bitmap(rtg);
1149 struct xfs_inode *rsumip = rtg_summary(rtg);
1150 xfs_extlen_t orbmblocks = 0;
1151 xfs_extlen_t orsumblocks = 0;
1152 struct xfs_mount *nmp;
1153 int error = 0;
1154
1155 nmp = xfs_growfs_rt_alloc_fake_mount(mp, nrblocks, rextsize);
1156 if (!nmp)
1157 return -ENOMEM;
1158 *nrbmblocks = nmp->m_sb.sb_rbmblocks;
1159
1160 if (xfs_has_rtgroups(mp)) {
1161 /*
1162 * For file systems with the rtgroups feature, the RT bitmap and
1163 * summary are always fully allocated, which means that we never
1164 * need to grow the existing files.
1165 *
1166 * But we have to be careful to only fill the bitmap until the
1167 * end of the actually used range.
1168 */
1169 if (rtg_rgno(rtg) == nmp->m_sb.sb_rgcount - 1)
1170 *nrbmblocks = xfs_rtbitmap_blockcount_len(nmp,
1171 xfs_last_rtgroup_extents(nmp));
1172
1173 if (mp->m_sb.sb_rgcount &&
1174 rtg_rgno(rtg) == mp->m_sb.sb_rgcount - 1)
1175 goto out_free;
1176 } else {
1177 /*
1178 * Get the old block counts for bitmap and summary inodes.
1179 * These can't change since other growfs callers are locked out.
1180 */
1181 orbmblocks = XFS_B_TO_FSB(mp, rbmip->i_disk_size);
1182 orsumblocks = XFS_B_TO_FSB(mp, rsumip->i_disk_size);
1183 }
1184
1185 error = xfs_rtfile_initialize_blocks(rtg, XFS_RTGI_BITMAP, orbmblocks,
1186 nmp->m_sb.sb_rbmblocks, NULL);
1187 if (error)
1188 goto out_free;
1189 error = xfs_rtfile_initialize_blocks(rtg, XFS_RTGI_SUMMARY, orsumblocks,
1190 nmp->m_rsumblocks, NULL);
1191 out_free:
1192 kfree(nmp);
1193 return error;
1194 }
1195
1196 static int
xfs_growfs_rtg(struct xfs_mount * mp,xfs_rgnumber_t rgno,xfs_rfsblock_t nrblocks,xfs_agblock_t rextsize)1197 xfs_growfs_rtg(
1198 struct xfs_mount *mp,
1199 xfs_rgnumber_t rgno,
1200 xfs_rfsblock_t nrblocks,
1201 xfs_agblock_t rextsize)
1202 {
1203 uint8_t *old_rsum_cache = NULL;
1204 xfs_extlen_t bmblocks;
1205 xfs_fileoff_t bmbno;
1206 struct xfs_rtgroup *rtg;
1207 unsigned int i;
1208 int error;
1209
1210 rtg = xfs_rtgroup_grab(mp, rgno);
1211 if (!rtg)
1212 return -EINVAL;
1213
1214 for (i = 0; i < XFS_RTGI_MAX; i++) {
1215 error = xfs_rtginode_ensure(rtg, i);
1216 if (error)
1217 goto out_rele;
1218 }
1219
1220 if (xfs_has_zoned(mp)) {
1221 error = xfs_growfs_rt_zoned(rtg, nrblocks);
1222 goto out_rele;
1223 }
1224
1225 error = xfs_growfs_rt_alloc_blocks(rtg, nrblocks, rextsize, &bmblocks);
1226 if (error)
1227 goto out_rele;
1228
1229 if (bmblocks != rtg_mount(rtg)->m_sb.sb_rbmblocks) {
1230 old_rsum_cache = rtg->rtg_rsum_cache;
1231 error = xfs_alloc_rsum_cache(rtg, bmblocks);
1232 if (error)
1233 goto out_rele;
1234 }
1235
1236 for (bmbno = xfs_last_rt_bmblock_to_extend(rtg); bmbno < bmblocks;
1237 bmbno++) {
1238 error = xfs_growfs_rt_bmblock(rtg, nrblocks, rextsize, bmbno);
1239 if (error)
1240 goto out_error;
1241 }
1242
1243 kvfree(old_rsum_cache);
1244 goto out_rele;
1245
1246 out_error:
1247 /*
1248 * Reset rtg_extents to the old value if adding more blocks failed.
1249 */
1250 xfs_rtgroup_calc_geometry(mp, rtg, rtg_rgno(rtg), mp->m_sb.sb_rgcount,
1251 mp->m_sb.sb_rextents);
1252 if (old_rsum_cache) {
1253 kvfree(rtg->rtg_rsum_cache);
1254 rtg->rtg_rsum_cache = old_rsum_cache;
1255 }
1256 out_rele:
1257 xfs_rtgroup_rele(rtg);
1258 return error;
1259 }
1260
1261 int
xfs_growfs_check_rtgeom(const struct xfs_mount * mp,xfs_rfsblock_t dblocks,xfs_rfsblock_t rblocks,xfs_extlen_t rextsize)1262 xfs_growfs_check_rtgeom(
1263 const struct xfs_mount *mp,
1264 xfs_rfsblock_t dblocks,
1265 xfs_rfsblock_t rblocks,
1266 xfs_extlen_t rextsize)
1267 {
1268 xfs_extlen_t min_logfsbs;
1269 struct xfs_mount *nmp;
1270
1271 nmp = xfs_growfs_rt_alloc_fake_mount(mp, rblocks, rextsize);
1272 if (!nmp)
1273 return -ENOMEM;
1274 nmp->m_sb.sb_dblocks = dblocks;
1275
1276 xfs_rtrmapbt_compute_maxlevels(nmp);
1277 xfs_rtrefcountbt_compute_maxlevels(nmp);
1278 xfs_trans_resv_calc(nmp, M_RES(nmp));
1279
1280 /*
1281 * New summary size can't be more than half the size of the log. This
1282 * prevents us from getting a log overflow, since we'll log basically
1283 * the whole summary file at once.
1284 */
1285 min_logfsbs = min_t(xfs_extlen_t, xfs_log_calc_minimum_size(nmp),
1286 nmp->m_rsumblocks * 2);
1287
1288 trace_xfs_growfs_check_rtgeom(mp, min_logfsbs);
1289
1290 if (min_logfsbs > mp->m_sb.sb_logblocks)
1291 goto out_inval;
1292
1293 if (xfs_has_zoned(mp)) {
1294 uint32_t gblocks = mp->m_groups[XG_TYPE_RTG].blocks;
1295 uint32_t rem;
1296
1297 if (rextsize != 1)
1298 goto out_inval;
1299 div_u64_rem(nmp->m_sb.sb_rblocks, gblocks, &rem);
1300 if (rem) {
1301 xfs_warn(mp,
1302 "new RT volume size (%lld) not aligned to RT group size (%d)",
1303 nmp->m_sb.sb_rblocks, gblocks);
1304 goto out_inval;
1305 }
1306 }
1307
1308 kfree(nmp);
1309 return 0;
1310 out_inval:
1311 kfree(nmp);
1312 return -EINVAL;
1313 }
1314
1315 /*
1316 * Compute the new number of rt groups and ensure that /rtgroups exists.
1317 *
1318 * Changing the rtgroup size is not allowed (even if the rt volume hasn't yet
1319 * been initialized) because the userspace ABI doesn't support it.
1320 */
1321 static int
xfs_growfs_rt_prep_groups(struct xfs_mount * mp,xfs_rfsblock_t rblocks,xfs_extlen_t rextsize,xfs_rgnumber_t * new_rgcount)1322 xfs_growfs_rt_prep_groups(
1323 struct xfs_mount *mp,
1324 xfs_rfsblock_t rblocks,
1325 xfs_extlen_t rextsize,
1326 xfs_rgnumber_t *new_rgcount)
1327 {
1328 int error;
1329
1330 *new_rgcount = howmany_64(rblocks, mp->m_sb.sb_rgextents * rextsize);
1331 if (*new_rgcount > XFS_MAX_RGNUMBER)
1332 return -EINVAL;
1333
1334 /* Make sure the /rtgroups dir has been created */
1335 if (!mp->m_rtdirip) {
1336 struct xfs_trans *tp;
1337
1338 tp = xfs_trans_alloc_empty(mp);
1339 error = xfs_rtginode_load_parent(tp);
1340 xfs_trans_cancel(tp);
1341
1342 if (error == -ENOENT)
1343 error = xfs_rtginode_mkdir_parent(mp);
1344 if (error)
1345 return error;
1346 }
1347
1348 return 0;
1349 }
1350
1351 static bool
xfs_grow_last_rtg(struct xfs_mount * mp)1352 xfs_grow_last_rtg(
1353 struct xfs_mount *mp)
1354 {
1355 if (!xfs_has_rtgroups(mp))
1356 return true;
1357 if (mp->m_sb.sb_rgcount == 0)
1358 return false;
1359 return xfs_rtgroup_extents(mp, mp->m_sb.sb_rgcount - 1) <
1360 mp->m_sb.sb_rgextents;
1361 }
1362
1363 /*
1364 * Read in the last block of the RT device to make sure it is accessible.
1365 */
1366 static int
xfs_rt_check_size(struct xfs_mount * mp,xfs_rfsblock_t last_block)1367 xfs_rt_check_size(
1368 struct xfs_mount *mp,
1369 xfs_rfsblock_t last_block)
1370 {
1371 xfs_daddr_t daddr = XFS_FSB_TO_BB(mp, last_block);
1372 struct xfs_buf *bp;
1373 int error;
1374
1375 if (XFS_BB_TO_FSB(mp, daddr) != last_block) {
1376 xfs_warn(mp, "RT device size overflow: %llu != %llu",
1377 XFS_BB_TO_FSB(mp, daddr), last_block);
1378 return -EFBIG;
1379 }
1380
1381 error = xfs_buf_read_uncached(mp->m_rtdev_targp,
1382 XFS_FSB_TO_BB(mp, mp->m_sb.sb_rtstart) + daddr,
1383 XFS_FSB_TO_BB(mp, 1), &bp, NULL);
1384 if (error)
1385 xfs_warn(mp, "cannot read last RT device sector (%lld)",
1386 last_block);
1387 else
1388 xfs_buf_relse(bp);
1389 return error;
1390 }
1391
1392 /*
1393 * Grow the realtime area of the filesystem.
1394 */
1395 int
xfs_growfs_rt(struct xfs_mount * mp,struct xfs_growfs_rt * in)1396 xfs_growfs_rt(
1397 struct xfs_mount *mp,
1398 struct xfs_growfs_rt *in)
1399 {
1400 xfs_rgnumber_t old_rgcount = mp->m_sb.sb_rgcount;
1401 xfs_rgnumber_t new_rgcount = 1;
1402 xfs_rgnumber_t rgno;
1403 xfs_agblock_t old_rextsize = mp->m_sb.sb_rextsize;
1404 int error;
1405
1406 if (!capable(CAP_SYS_ADMIN))
1407 return -EPERM;
1408
1409 /* Needs to have been mounted with an rt device. */
1410 if (!XFS_IS_REALTIME_MOUNT(mp))
1411 return -EINVAL;
1412
1413 if (!mutex_trylock(&mp->m_growlock))
1414 return -EWOULDBLOCK;
1415
1416 /* Shrink not supported. */
1417 error = -EINVAL;
1418 if (in->newblocks <= mp->m_sb.sb_rblocks)
1419 goto out_unlock;
1420 /* Can only change rt extent size when adding rt volume. */
1421 if (mp->m_sb.sb_rblocks > 0 && in->extsize != mp->m_sb.sb_rextsize)
1422 goto out_unlock;
1423
1424 /* Range check the extent size. */
1425 if (XFS_FSB_TO_B(mp, in->extsize) > XFS_MAX_RTEXTSIZE ||
1426 XFS_FSB_TO_B(mp, in->extsize) < XFS_MIN_RTEXTSIZE)
1427 goto out_unlock;
1428
1429 /* Check for features supported only on rtgroups filesystems. */
1430 error = -EOPNOTSUPP;
1431 if (!xfs_has_rtgroups(mp)) {
1432 if (xfs_has_rmapbt(mp))
1433 goto out_unlock;
1434 if (xfs_has_quota(mp))
1435 goto out_unlock;
1436 if (xfs_has_reflink(mp))
1437 goto out_unlock;
1438 } else if (xfs_has_reflink(mp) &&
1439 !xfs_reflink_supports_rextsize(mp, in->extsize))
1440 goto out_unlock;
1441
1442 error = xfs_sb_validate_fsb_count(&mp->m_sb, in->newblocks);
1443 if (error)
1444 goto out_unlock;
1445
1446 error = xfs_rt_check_size(mp, in->newblocks - 1);
1447 if (error)
1448 goto out_unlock;
1449
1450 /*
1451 * Calculate new parameters. These are the final values to be reached.
1452 */
1453 error = -EINVAL;
1454 if (in->newblocks < in->extsize)
1455 goto out_unlock;
1456
1457 /* Make sure the new fs size won't cause problems with the log. */
1458 error = xfs_growfs_check_rtgeom(mp, mp->m_sb.sb_dblocks, in->newblocks,
1459 in->extsize);
1460 if (error)
1461 goto out_unlock;
1462
1463 if (xfs_has_rtgroups(mp)) {
1464 error = xfs_growfs_rt_prep_groups(mp, in->newblocks,
1465 in->extsize, &new_rgcount);
1466 if (error)
1467 goto out_unlock;
1468 }
1469
1470 if (xfs_grow_last_rtg(mp)) {
1471 error = xfs_growfs_rtg(mp, old_rgcount - 1, in->newblocks,
1472 in->extsize);
1473 if (error)
1474 goto out_unlock;
1475 }
1476
1477 for (rgno = old_rgcount; rgno < new_rgcount; rgno++) {
1478 xfs_rtbxlen_t rextents = div_u64(in->newblocks, in->extsize);
1479
1480 error = xfs_rtgroup_alloc(mp, rgno, new_rgcount, rextents);
1481 if (error)
1482 goto out_unlock;
1483
1484 error = xfs_growfs_rtg(mp, rgno, in->newblocks, in->extsize);
1485 if (error) {
1486 struct xfs_rtgroup *rtg;
1487
1488 rtg = xfs_rtgroup_grab(mp, rgno);
1489 if (!WARN_ON_ONCE(!rtg)) {
1490 xfs_rtunmount_rtg(rtg);
1491 xfs_rtgroup_rele(rtg);
1492 xfs_rtgroup_free(mp, rgno);
1493 }
1494 break;
1495 }
1496 }
1497
1498 if (!error && old_rextsize != in->extsize)
1499 error = xfs_growfs_rt_fixup_extsize(mp);
1500
1501 /*
1502 * Update secondary superblocks now the physical grow has completed.
1503 *
1504 * Also do this in case of an error as we might have already
1505 * successfully updated one or more RTGs and incremented sb_rgcount.
1506 */
1507 if (!xfs_is_shutdown(mp)) {
1508 int error2 = xfs_update_secondary_sbs(mp);
1509
1510 if (!error)
1511 error = error2;
1512
1513 /* Reset the rt metadata btree space reservations. */
1514 error2 = xfs_metafile_resv_init(mp);
1515 if (error2 && error2 != -ENOSPC)
1516 error = error2;
1517 }
1518
1519 out_unlock:
1520 mutex_unlock(&mp->m_growlock);
1521 return error;
1522 }
1523
1524 /* Read the realtime superblock and attach it to the mount. */
1525 int
xfs_rtmount_readsb(struct xfs_mount * mp)1526 xfs_rtmount_readsb(
1527 struct xfs_mount *mp)
1528 {
1529 struct xfs_buf *bp;
1530 int error;
1531
1532 if (!xfs_has_rtsb(mp))
1533 return 0;
1534 if (mp->m_sb.sb_rblocks == 0)
1535 return 0;
1536 if (mp->m_rtdev_targp == NULL) {
1537 xfs_warn(mp,
1538 "Filesystem has a realtime volume, use rtdev=device option");
1539 return -ENODEV;
1540 }
1541
1542 /* m_blkbb_log is not set up yet */
1543 error = xfs_buf_read_uncached(mp->m_rtdev_targp, XFS_RTSB_DADDR,
1544 mp->m_sb.sb_blocksize >> BBSHIFT, &bp,
1545 &xfs_rtsb_buf_ops);
1546 if (error) {
1547 xfs_warn(mp, "rt sb validate failed with error %d.", error);
1548 /* bad CRC means corrupted metadata */
1549 if (error == -EFSBADCRC)
1550 error = -EFSCORRUPTED;
1551 return error;
1552 }
1553
1554 mp->m_rtsb_bp = bp;
1555 xfs_buf_unlock(bp);
1556 return 0;
1557 }
1558
1559 /* Detach the realtime superblock from the mount and free it. */
1560 void
xfs_rtmount_freesb(struct xfs_mount * mp)1561 xfs_rtmount_freesb(
1562 struct xfs_mount *mp)
1563 {
1564 struct xfs_buf *bp = mp->m_rtsb_bp;
1565
1566 if (!bp)
1567 return;
1568
1569 xfs_buf_lock(bp);
1570 mp->m_rtsb_bp = NULL;
1571 xfs_buf_relse(bp);
1572 }
1573
1574 /*
1575 * Initialize realtime fields in the mount structure.
1576 */
1577 int /* error */
xfs_rtmount_init(struct xfs_mount * mp)1578 xfs_rtmount_init(
1579 struct xfs_mount *mp) /* file system mount structure */
1580 {
1581 if (mp->m_sb.sb_rblocks == 0)
1582 return 0;
1583 if (mp->m_rtdev_targp == NULL) {
1584 xfs_warn(mp,
1585 "Filesystem has a realtime volume, use rtdev=device option");
1586 return -ENODEV;
1587 }
1588
1589 mp->m_rsumblocks = xfs_rtsummary_blockcount(mp, &mp->m_rsumlevels);
1590
1591 return xfs_rt_check_size(mp, mp->m_sb.sb_rblocks - 1);
1592 }
1593
1594 static int
xfs_rtalloc_count_frextent(struct xfs_rtgroup * rtg,struct xfs_trans * tp,const struct xfs_rtalloc_rec * rec,void * priv)1595 xfs_rtalloc_count_frextent(
1596 struct xfs_rtgroup *rtg,
1597 struct xfs_trans *tp,
1598 const struct xfs_rtalloc_rec *rec,
1599 void *priv)
1600 {
1601 uint64_t *valp = priv;
1602
1603 *valp += rec->ar_extcount;
1604 return 0;
1605 }
1606
1607 /*
1608 * Reinitialize the number of free realtime extents from the realtime bitmap.
1609 * Callers must ensure that there is no other activity in the filesystem.
1610 */
1611 int
xfs_rtalloc_reinit_frextents(struct xfs_mount * mp)1612 xfs_rtalloc_reinit_frextents(
1613 struct xfs_mount *mp)
1614 {
1615 uint64_t val = 0;
1616 int error;
1617
1618 struct xfs_rtgroup *rtg = NULL;
1619
1620 while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1621 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
1622 error = xfs_rtalloc_query_all(rtg, NULL,
1623 xfs_rtalloc_count_frextent, &val);
1624 xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
1625 if (error) {
1626 xfs_rtgroup_rele(rtg);
1627 return error;
1628 }
1629 }
1630
1631 spin_lock(&mp->m_sb_lock);
1632 mp->m_sb.sb_frextents = val;
1633 spin_unlock(&mp->m_sb_lock);
1634 xfs_set_freecounter(mp, XC_FREE_RTEXTENTS, mp->m_sb.sb_frextents);
1635 return 0;
1636 }
1637
1638 /*
1639 * Read in the bmbt of an rt metadata inode so that we never have to load them
1640 * at runtime. This enables the use of shared ILOCKs for rtbitmap scans. Use
1641 * an empty transaction to avoid deadlocking on loops in the bmbt.
1642 */
1643 static inline int
xfs_rtmount_iread_extents(struct xfs_trans * tp,struct xfs_inode * ip)1644 xfs_rtmount_iread_extents(
1645 struct xfs_trans *tp,
1646 struct xfs_inode *ip)
1647 {
1648 int error;
1649
1650 xfs_ilock(ip, XFS_ILOCK_EXCL);
1651
1652 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1653 if (error)
1654 goto out_unlock;
1655
1656 if (xfs_inode_has_attr_fork(ip)) {
1657 error = xfs_iread_extents(tp, ip, XFS_ATTR_FORK);
1658 if (error)
1659 goto out_unlock;
1660 }
1661
1662 out_unlock:
1663 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1664 return error;
1665 }
1666
1667 static int
xfs_rtmount_rtg(struct xfs_mount * mp,struct xfs_trans * tp,struct xfs_rtgroup * rtg)1668 xfs_rtmount_rtg(
1669 struct xfs_mount *mp,
1670 struct xfs_trans *tp,
1671 struct xfs_rtgroup *rtg)
1672 {
1673 int error, i;
1674
1675 for (i = 0; i < XFS_RTGI_MAX; i++) {
1676 error = xfs_rtginode_load(rtg, i, tp);
1677 if (error)
1678 return error;
1679
1680 if (rtg->rtg_inodes[i]) {
1681 error = xfs_rtmount_iread_extents(tp,
1682 rtg->rtg_inodes[i]);
1683 if (error)
1684 return error;
1685 }
1686 }
1687
1688 if (xfs_has_zoned(mp))
1689 return 0;
1690 return xfs_alloc_rsum_cache(rtg, mp->m_sb.sb_rbmblocks);
1691 }
1692
1693 /*
1694 * Get the bitmap and summary inodes and the summary cache into the mount
1695 * structure at mount time.
1696 */
1697 int
xfs_rtmount_inodes(struct xfs_mount * mp)1698 xfs_rtmount_inodes(
1699 struct xfs_mount *mp)
1700 {
1701 struct xfs_trans *tp;
1702 struct xfs_rtgroup *rtg = NULL;
1703 int error;
1704
1705 tp = xfs_trans_alloc_empty(mp);
1706 if (xfs_has_rtgroups(mp) && mp->m_sb.sb_rgcount > 0) {
1707 error = xfs_rtginode_load_parent(tp);
1708 if (error)
1709 goto out_cancel;
1710 }
1711
1712 while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1713 error = xfs_rtmount_rtg(mp, tp, rtg);
1714 if (error) {
1715 xfs_rtgroup_rele(rtg);
1716 xfs_rtunmount_inodes(mp);
1717 break;
1718 }
1719 }
1720
1721 out_cancel:
1722 xfs_trans_cancel(tp);
1723 return error;
1724 }
1725
1726 void
xfs_rtunmount_inodes(struct xfs_mount * mp)1727 xfs_rtunmount_inodes(
1728 struct xfs_mount *mp)
1729 {
1730 struct xfs_rtgroup *rtg = NULL;
1731
1732 while ((rtg = xfs_rtgroup_next(mp, rtg)))
1733 xfs_rtunmount_rtg(rtg);
1734 xfs_rtginode_irele(&mp->m_rtdirip);
1735 }
1736
1737 /*
1738 * Pick an extent for allocation at the start of a new realtime file.
1739 * Use the sequence number stored in the atime field of the bitmap inode.
1740 * Translate this to a fraction of the rtextents, and return the product
1741 * of rtextents and the fraction.
1742 * The fraction sequence is 0, 1/2, 1/4, 3/4, 1/8, ..., 7/8, 1/16, ...
1743 */
1744 static xfs_rtxnum_t
xfs_rtpick_extent(struct xfs_rtgroup * rtg,struct xfs_trans * tp,xfs_rtxlen_t len)1745 xfs_rtpick_extent(
1746 struct xfs_rtgroup *rtg,
1747 struct xfs_trans *tp,
1748 xfs_rtxlen_t len) /* allocation length (rtextents) */
1749 {
1750 struct xfs_mount *mp = rtg_mount(rtg);
1751 struct xfs_inode *rbmip = rtg_bitmap(rtg);
1752 xfs_rtxnum_t b = 0; /* result rtext */
1753 int log2; /* log of sequence number */
1754 uint64_t resid; /* residual after log removed */
1755 uint64_t seq; /* sequence number of file creation */
1756 struct timespec64 ts; /* timespec in inode */
1757
1758 xfs_assert_ilocked(rbmip, XFS_ILOCK_EXCL);
1759
1760 ts = inode_get_atime(VFS_I(rbmip));
1761 if (!(rbmip->i_diflags & XFS_DIFLAG_NEWRTBM)) {
1762 rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
1763 seq = 0;
1764 } else {
1765 seq = ts.tv_sec;
1766 }
1767 log2 = xfs_highbit64(seq);
1768 if (log2 != -1) {
1769 resid = seq - (1ULL << log2);
1770 b = (mp->m_sb.sb_rextents * ((resid << 1) + 1ULL)) >>
1771 (log2 + 1);
1772 if (b >= mp->m_sb.sb_rextents)
1773 div64_u64_rem(b, mp->m_sb.sb_rextents, &b);
1774 if (b + len > mp->m_sb.sb_rextents)
1775 b = mp->m_sb.sb_rextents - len;
1776 }
1777 ts.tv_sec = seq + 1;
1778 inode_set_atime_to_ts(VFS_I(rbmip), ts);
1779 xfs_trans_log_inode(tp, rbmip, XFS_ILOG_CORE);
1780 return b;
1781 }
1782
1783 static void
xfs_rtalloc_align_minmax(xfs_rtxlen_t * raminlen,xfs_rtxlen_t * ramaxlen,xfs_rtxlen_t * prod)1784 xfs_rtalloc_align_minmax(
1785 xfs_rtxlen_t *raminlen,
1786 xfs_rtxlen_t *ramaxlen,
1787 xfs_rtxlen_t *prod)
1788 {
1789 xfs_rtxlen_t newmaxlen = *ramaxlen;
1790 xfs_rtxlen_t newminlen = *raminlen;
1791 xfs_rtxlen_t slack;
1792
1793 slack = newmaxlen % *prod;
1794 if (slack)
1795 newmaxlen -= slack;
1796 slack = newminlen % *prod;
1797 if (slack)
1798 newminlen += *prod - slack;
1799
1800 /*
1801 * If adjusting for extent size hint alignment produces an invalid
1802 * min/max len combination, go ahead without it.
1803 */
1804 if (newmaxlen < newminlen) {
1805 *prod = 1;
1806 return;
1807 }
1808 *ramaxlen = newmaxlen;
1809 *raminlen = newminlen;
1810 }
1811
1812 /* Given a free extent, find any part of it that isn't busy, if possible. */
1813 STATIC bool
xfs_rtalloc_check_busy(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t minlen_rtx,xfs_rtxlen_t maxlen_rtx,xfs_rtxlen_t len_rtx,xfs_rtxlen_t prod,xfs_rtxnum_t rtx,xfs_rtxlen_t * reslen,xfs_rtxnum_t * resrtx,unsigned * busy_gen)1814 xfs_rtalloc_check_busy(
1815 struct xfs_rtalloc_args *args,
1816 xfs_rtxnum_t start,
1817 xfs_rtxlen_t minlen_rtx,
1818 xfs_rtxlen_t maxlen_rtx,
1819 xfs_rtxlen_t len_rtx,
1820 xfs_rtxlen_t prod,
1821 xfs_rtxnum_t rtx,
1822 xfs_rtxlen_t *reslen,
1823 xfs_rtxnum_t *resrtx,
1824 unsigned *busy_gen)
1825 {
1826 struct xfs_rtgroup *rtg = args->rtg;
1827 struct xfs_mount *mp = rtg_mount(rtg);
1828 xfs_agblock_t rgbno = xfs_rtx_to_rgbno(rtg, rtx);
1829 xfs_rgblock_t min_rgbno = xfs_rtx_to_rgbno(rtg, start);
1830 xfs_extlen_t minlen = xfs_rtxlen_to_extlen(mp, minlen_rtx);
1831 xfs_extlen_t len = xfs_rtxlen_to_extlen(mp, len_rtx);
1832 xfs_extlen_t diff;
1833 bool busy;
1834
1835 busy = xfs_extent_busy_trim(rtg_group(rtg), minlen,
1836 xfs_rtxlen_to_extlen(mp, maxlen_rtx), &rgbno, &len,
1837 busy_gen);
1838
1839 /*
1840 * If we have a largish extent that happens to start before min_rgbno,
1841 * see if we can shift it into range...
1842 */
1843 if (rgbno < min_rgbno && rgbno + len > min_rgbno) {
1844 diff = min_rgbno - rgbno;
1845 if (len > diff) {
1846 rgbno += diff;
1847 len -= diff;
1848 }
1849 }
1850
1851 if (prod > 1 && len >= minlen) {
1852 xfs_rgblock_t aligned_rgbno = roundup(rgbno, prod);
1853
1854 diff = aligned_rgbno - rgbno;
1855
1856 *resrtx = xfs_rgbno_to_rtx(mp, aligned_rgbno);
1857 *reslen = xfs_extlen_to_rtxlen(mp,
1858 diff >= len ? 0 : len - diff);
1859 } else {
1860 *resrtx = xfs_rgbno_to_rtx(mp, rgbno);
1861 *reslen = xfs_extlen_to_rtxlen(mp, len);
1862 }
1863
1864 return busy;
1865 }
1866
1867 /*
1868 * Adjust the given free extent so that it isn't busy, or flush the log and
1869 * wait for the space to become unbusy. Only needed for rtgroups.
1870 */
1871 STATIC int
xfs_rtallocate_adjust_for_busy(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)1872 xfs_rtallocate_adjust_for_busy(
1873 struct xfs_rtalloc_args *args,
1874 xfs_rtxnum_t start,
1875 xfs_rtxlen_t minlen,
1876 xfs_rtxlen_t maxlen,
1877 xfs_rtxlen_t *len,
1878 xfs_rtxlen_t prod,
1879 xfs_rtxnum_t *rtx)
1880 {
1881 xfs_rtxnum_t resrtx;
1882 xfs_rtxlen_t reslen;
1883 unsigned busy_gen;
1884 bool busy;
1885 int error;
1886
1887 again:
1888 busy = xfs_rtalloc_check_busy(args, start, minlen, maxlen, *len, prod,
1889 *rtx, &reslen, &resrtx, &busy_gen);
1890 if (!busy)
1891 return 0;
1892
1893 if (reslen < minlen || (start != 0 && resrtx != *rtx)) {
1894 /*
1895 * Enough of the extent was busy that we cannot satisfy the
1896 * allocation, or this is a near allocation and the start of
1897 * the extent is busy. Flush the log and wait for the busy
1898 * situation to resolve.
1899 */
1900 trace_xfs_rtalloc_extent_busy(args->rtg, start, minlen, maxlen,
1901 *len, prod, *rtx, busy_gen);
1902
1903 error = xfs_extent_busy_flush(args->tp, rtg_group(args->rtg),
1904 busy_gen, 0);
1905 if (error)
1906 return error;
1907
1908 goto again;
1909 }
1910
1911 /* Some of the free space wasn't busy, hand that back to the caller. */
1912 trace_xfs_rtalloc_extent_busy_trim(args->rtg, *rtx, *len, resrtx,
1913 reslen);
1914 *len = reslen;
1915 *rtx = resrtx;
1916
1917 return 0;
1918 }
1919
1920 static int
xfs_rtallocate_rtg(struct xfs_trans * tp,xfs_rgnumber_t rgno,xfs_rtblock_t bno_hint,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t prod,bool wasdel,bool initial_user_data,bool * rtlocked,xfs_rtblock_t * bno,xfs_extlen_t * blen)1921 xfs_rtallocate_rtg(
1922 struct xfs_trans *tp,
1923 xfs_rgnumber_t rgno,
1924 xfs_rtblock_t bno_hint,
1925 xfs_rtxlen_t minlen,
1926 xfs_rtxlen_t maxlen,
1927 xfs_rtxlen_t prod,
1928 bool wasdel,
1929 bool initial_user_data,
1930 bool *rtlocked,
1931 xfs_rtblock_t *bno,
1932 xfs_extlen_t *blen)
1933 {
1934 struct xfs_rtalloc_args args = {
1935 .mp = tp->t_mountp,
1936 .tp = tp,
1937 };
1938 xfs_rtxnum_t start = 0;
1939 xfs_rtxnum_t rtx;
1940 xfs_rtxlen_t len = 0;
1941 int error = 0;
1942
1943 args.rtg = xfs_rtgroup_grab(args.mp, rgno);
1944 if (!args.rtg)
1945 return -ENOSPC;
1946
1947 /*
1948 * We need to lock out modifications to both the RT bitmap and summary
1949 * inodes for finding free space in xfs_rtallocate_extent_{near,size}
1950 * and join the bitmap and summary inodes for the actual allocation
1951 * down in xfs_rtallocate_range.
1952 *
1953 * For RTG-enabled file system we don't want to join the inodes to the
1954 * transaction until we are committed to allocate to allocate from this
1955 * RTG so that only one inode of each type is locked at a time.
1956 *
1957 * But for pre-RTG file systems we need to already to join the bitmap
1958 * inode to the transaction for xfs_rtpick_extent, which bumps the
1959 * sequence number in it, so we'll have to join the inode to the
1960 * transaction early here.
1961 *
1962 * This is all a bit messy, but at least the mess is contained in
1963 * this function.
1964 */
1965 if (!*rtlocked) {
1966 xfs_rtgroup_lock(args.rtg, XFS_RTGLOCK_BITMAP);
1967 if (!xfs_has_rtgroups(args.mp))
1968 xfs_rtgroup_trans_join(tp, args.rtg,
1969 XFS_RTGLOCK_BITMAP);
1970 *rtlocked = true;
1971 }
1972
1973 /*
1974 * For an allocation to an empty file at offset 0, pick an extent that
1975 * will space things out in the rt area.
1976 */
1977 if (bno_hint != NULLFSBLOCK)
1978 start = xfs_rtb_to_rtx(args.mp, bno_hint);
1979 else if (!xfs_has_rtgroups(args.mp) && initial_user_data)
1980 start = xfs_rtpick_extent(args.rtg, tp, maxlen);
1981
1982 if (start) {
1983 error = xfs_rtallocate_extent_near(&args, start, minlen, maxlen,
1984 &len, prod, &rtx);
1985 /*
1986 * If we can't allocate near a specific rt extent, try again
1987 * without locality criteria.
1988 */
1989 if (error == -ENOSPC) {
1990 xfs_rtbuf_cache_relse(&args);
1991 error = 0;
1992 }
1993 }
1994
1995 if (!error) {
1996 error = xfs_rtallocate_extent_size(&args, minlen, maxlen, &len,
1997 prod, &rtx);
1998 }
1999
2000 if (error) {
2001 if (xfs_has_rtgroups(args.mp))
2002 goto out_unlock;
2003 goto out_release;
2004 }
2005
2006 if (xfs_has_rtgroups(args.mp)) {
2007 error = xfs_rtallocate_adjust_for_busy(&args, start, minlen,
2008 maxlen, &len, prod, &rtx);
2009 if (error)
2010 goto out_unlock;
2011
2012 xfs_rtgroup_trans_join(tp, args.rtg, XFS_RTGLOCK_BITMAP);
2013 }
2014
2015 error = xfs_rtallocate_range(&args, rtx, len);
2016 if (error)
2017 goto out_release;
2018
2019 xfs_trans_mod_sb(tp, wasdel ?
2020 XFS_TRANS_SB_RES_FREXTENTS : XFS_TRANS_SB_FREXTENTS,
2021 -(long)len);
2022 *bno = xfs_rtx_to_rtb(args.rtg, rtx);
2023 *blen = xfs_rtxlen_to_extlen(args.mp, len);
2024
2025 out_release:
2026 xfs_rtgroup_rele(args.rtg);
2027 xfs_rtbuf_cache_relse(&args);
2028 return error;
2029 out_unlock:
2030 xfs_rtgroup_unlock(args.rtg, XFS_RTGLOCK_BITMAP);
2031 *rtlocked = false;
2032 goto out_release;
2033 }
2034
2035 int
xfs_rtallocate_rtgs(struct xfs_trans * tp,xfs_fsblock_t bno_hint,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t prod,bool wasdel,bool initial_user_data,xfs_rtblock_t * bno,xfs_extlen_t * blen)2036 xfs_rtallocate_rtgs(
2037 struct xfs_trans *tp,
2038 xfs_fsblock_t bno_hint,
2039 xfs_rtxlen_t minlen,
2040 xfs_rtxlen_t maxlen,
2041 xfs_rtxlen_t prod,
2042 bool wasdel,
2043 bool initial_user_data,
2044 xfs_rtblock_t *bno,
2045 xfs_extlen_t *blen)
2046 {
2047 struct xfs_mount *mp = tp->t_mountp;
2048 xfs_rgnumber_t start_rgno, rgno;
2049 int error;
2050
2051 /*
2052 * For now this just blindly iterates over the RTGs for an initial
2053 * allocation. We could try to keep an in-memory rtg_longest member
2054 * to avoid the locking when just looking for big enough free space,
2055 * but for now this keeps things simple.
2056 */
2057 if (bno_hint != NULLFSBLOCK)
2058 start_rgno = xfs_rtb_to_rgno(mp, bno_hint);
2059 else
2060 start_rgno = (atomic_inc_return(&mp->m_rtgrotor) - 1) %
2061 mp->m_sb.sb_rgcount;
2062
2063 rgno = start_rgno;
2064 do {
2065 bool rtlocked = false;
2066
2067 error = xfs_rtallocate_rtg(tp, rgno, bno_hint, minlen, maxlen,
2068 prod, wasdel, initial_user_data, &rtlocked,
2069 bno, blen);
2070 if (error != -ENOSPC)
2071 return error;
2072 ASSERT(!rtlocked);
2073
2074 if (++rgno == mp->m_sb.sb_rgcount)
2075 rgno = 0;
2076 bno_hint = NULLFSBLOCK;
2077 } while (rgno != start_rgno);
2078
2079 return -ENOSPC;
2080 }
2081
2082 static int
xfs_rtallocate_align(struct xfs_bmalloca * ap,xfs_rtxlen_t * ralen,xfs_rtxlen_t * raminlen,xfs_rtxlen_t * prod,bool * noalign)2083 xfs_rtallocate_align(
2084 struct xfs_bmalloca *ap,
2085 xfs_rtxlen_t *ralen,
2086 xfs_rtxlen_t *raminlen,
2087 xfs_rtxlen_t *prod,
2088 bool *noalign)
2089 {
2090 struct xfs_mount *mp = ap->ip->i_mount;
2091 xfs_fileoff_t orig_offset = ap->offset;
2092 xfs_extlen_t minlen = mp->m_sb.sb_rextsize;
2093 xfs_extlen_t align; /* minimum allocation alignment */
2094 xfs_extlen_t mod; /* product factor for allocators */
2095 int error;
2096
2097 if (*noalign) {
2098 align = mp->m_sb.sb_rextsize;
2099 } else {
2100 if (ap->flags & XFS_BMAPI_COWFORK)
2101 align = xfs_get_cowextsz_hint(ap->ip);
2102 else
2103 align = xfs_get_extsz_hint(ap->ip);
2104 if (!align)
2105 align = 1;
2106 if (align == mp->m_sb.sb_rextsize)
2107 *noalign = true;
2108 }
2109
2110 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 1,
2111 ap->eof, 0, ap->conv, &ap->offset, &ap->length);
2112 if (error)
2113 return error;
2114 ASSERT(ap->length);
2115 ASSERT(xfs_extlen_to_rtxmod(mp, ap->length) == 0);
2116
2117 /*
2118 * If we shifted the file offset downward to satisfy an extent size
2119 * hint, increase minlen by that amount so that the allocator won't
2120 * give us an allocation that's too short to cover at least one of the
2121 * blocks that the caller asked for.
2122 */
2123 if (ap->offset != orig_offset)
2124 minlen += orig_offset - ap->offset;
2125
2126 /*
2127 * Set ralen to be the actual requested length in rtextents.
2128 *
2129 * If the old value was close enough to XFS_BMBT_MAX_EXTLEN that
2130 * we rounded up to it, cut it back so it's valid again.
2131 * Note that if it's a really large request (bigger than
2132 * XFS_BMBT_MAX_EXTLEN), we don't hear about that number, and can't
2133 * adjust the starting point to match it.
2134 */
2135 *ralen = xfs_extlen_to_rtxlen(mp, min(ap->length, XFS_MAX_BMBT_EXTLEN));
2136 *raminlen = max_t(xfs_rtxlen_t, 1, xfs_extlen_to_rtxlen(mp, minlen));
2137 ASSERT(*raminlen > 0);
2138 ASSERT(*raminlen <= *ralen);
2139
2140 /*
2141 * Only bother calculating a real prod factor if offset & length are
2142 * perfectly aligned, otherwise it will just get us in trouble.
2143 */
2144 div_u64_rem(ap->offset, align, &mod);
2145 if (mod || ap->length % align)
2146 *prod = 1;
2147 else
2148 *prod = xfs_extlen_to_rtxlen(mp, align);
2149
2150 if (*prod > 1)
2151 xfs_rtalloc_align_minmax(raminlen, ralen, prod);
2152 return 0;
2153 }
2154
2155 int
xfs_bmap_rtalloc(struct xfs_bmalloca * ap)2156 xfs_bmap_rtalloc(
2157 struct xfs_bmalloca *ap)
2158 {
2159 xfs_fileoff_t orig_offset = ap->offset;
2160 xfs_rtxlen_t prod = 0; /* product factor for allocators */
2161 xfs_rtxlen_t ralen = 0; /* realtime allocation length */
2162 xfs_rtblock_t bno_hint = NULLRTBLOCK;
2163 xfs_extlen_t orig_length = ap->length;
2164 xfs_rtxlen_t raminlen;
2165 bool rtlocked = false;
2166 bool noalign = false;
2167 bool initial_user_data =
2168 ap->datatype & XFS_ALLOC_INITIAL_USER_DATA;
2169 int error;
2170
2171 ASSERT(!xfs_has_zoned(ap->tp->t_mountp));
2172
2173 retry:
2174 error = xfs_rtallocate_align(ap, &ralen, &raminlen, &prod, &noalign);
2175 if (error)
2176 return error;
2177
2178 if (xfs_bmap_adjacent(ap))
2179 bno_hint = ap->blkno;
2180
2181 if (xfs_has_rtgroups(ap->ip->i_mount)) {
2182 error = xfs_rtallocate_rtgs(ap->tp, bno_hint, raminlen, ralen,
2183 prod, ap->wasdel, initial_user_data,
2184 &ap->blkno, &ap->length);
2185 } else {
2186 error = xfs_rtallocate_rtg(ap->tp, 0, bno_hint, raminlen, ralen,
2187 prod, ap->wasdel, initial_user_data,
2188 &rtlocked, &ap->blkno, &ap->length);
2189 }
2190
2191 if (error == -ENOSPC) {
2192 if (!noalign) {
2193 /*
2194 * We previously enlarged the request length to try to
2195 * satisfy an extent size hint. The allocator didn't
2196 * return anything, so reset the parameters to the
2197 * original values and try again without alignment
2198 * criteria.
2199 */
2200 ap->offset = orig_offset;
2201 ap->length = orig_length;
2202 noalign = true;
2203 goto retry;
2204 }
2205
2206 ap->blkno = NULLFSBLOCK;
2207 ap->length = 0;
2208 return 0;
2209 }
2210 if (error)
2211 return error;
2212
2213 xfs_bmap_alloc_account(ap);
2214 return 0;
2215 }
2216