1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_alloc.h"
16 #include "xfs_bmap.h"
17 #include "xfs_bmap_btree.h"
18 #include "xfs_bmap_util.h"
19 #include "xfs_trans.h"
20 #include "xfs_trans_space.h"
21 #include "xfs_icache.h"
22 #include "xfs_rtalloc.h"
23 #include "xfs_sb.h"
24 #include "xfs_rtbitmap.h"
25 #include "xfs_rtrmap_btree.h"
26 #include "xfs_quota.h"
27 #include "xfs_log_priv.h"
28 #include "xfs_health.h"
29 #include "xfs_da_format.h"
30 #include "xfs_metafile.h"
31 #include "xfs_rtgroup.h"
32 #include "xfs_error.h"
33 #include "xfs_trace.h"
34 #include "xfs_rtrefcount_btree.h"
35 #include "xfs_reflink.h"
36
37 /*
38 * Return whether there are any free extents in the size range given
39 * by low and high, for the bitmap block bbno.
40 */
41 STATIC int
xfs_rtany_summary(struct xfs_rtalloc_args * args,int low,int high,xfs_fileoff_t bbno,int * maxlog)42 xfs_rtany_summary(
43 struct xfs_rtalloc_args *args,
44 int low, /* low log2 extent size */
45 int high, /* high log2 extent size */
46 xfs_fileoff_t bbno, /* bitmap block number */
47 int *maxlog) /* out: max log2 extent size free */
48 {
49 uint8_t *rsum_cache = args->rtg->rtg_rsum_cache;
50 int error;
51 int log; /* loop counter, log2 of ext. size */
52 xfs_suminfo_t sum; /* summary data */
53
54 /* There are no extents at levels >= rsum_cache[bbno]. */
55 if (rsum_cache) {
56 high = min(high, rsum_cache[bbno] - 1);
57 if (low > high) {
58 *maxlog = -1;
59 return 0;
60 }
61 }
62
63 /*
64 * Loop over logs of extent sizes.
65 */
66 for (log = high; log >= low; log--) {
67 /*
68 * Get one summary datum.
69 */
70 error = xfs_rtget_summary(args, log, bbno, &sum);
71 if (error) {
72 return error;
73 }
74 /*
75 * If there are any, return success.
76 */
77 if (sum) {
78 *maxlog = log;
79 goto out;
80 }
81 }
82 /*
83 * Found nothing, return failure.
84 */
85 *maxlog = -1;
86 out:
87 /* There were no extents at levels > log. */
88 if (rsum_cache && log + 1 < rsum_cache[bbno])
89 rsum_cache[bbno] = log + 1;
90 return 0;
91 }
92
93 /*
94 * Copy and transform the summary file, given the old and new
95 * parameters in the mount structures.
96 */
97 STATIC int
xfs_rtcopy_summary(struct xfs_rtalloc_args * oargs,struct xfs_rtalloc_args * nargs)98 xfs_rtcopy_summary(
99 struct xfs_rtalloc_args *oargs,
100 struct xfs_rtalloc_args *nargs)
101 {
102 xfs_fileoff_t bbno; /* bitmap block number */
103 int error;
104 int log; /* summary level number (log length) */
105 xfs_suminfo_t sum; /* summary data */
106
107 for (log = oargs->mp->m_rsumlevels - 1; log >= 0; log--) {
108 for (bbno = oargs->mp->m_sb.sb_rbmblocks - 1;
109 (xfs_srtblock_t)bbno >= 0;
110 bbno--) {
111 error = xfs_rtget_summary(oargs, log, bbno, &sum);
112 if (error)
113 goto out;
114 if (sum == 0)
115 continue;
116 error = xfs_rtmodify_summary(oargs, log, bbno, -sum);
117 if (error)
118 goto out;
119 error = xfs_rtmodify_summary(nargs, log, bbno, sum);
120 if (error)
121 goto out;
122 ASSERT(sum > 0);
123 }
124 }
125 error = 0;
126 out:
127 xfs_rtbuf_cache_relse(oargs);
128 return 0;
129 }
130 /*
131 * Mark an extent specified by start and len allocated.
132 * Updates all the summary information as well as the bitmap.
133 */
134 STATIC int
xfs_rtallocate_range(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t len)135 xfs_rtallocate_range(
136 struct xfs_rtalloc_args *args,
137 xfs_rtxnum_t start, /* start rtext to allocate */
138 xfs_rtxlen_t len) /* in/out: summary block number */
139 {
140 struct xfs_mount *mp = args->mp;
141 xfs_rtxnum_t end; /* end of the allocated rtext */
142 int error;
143 xfs_rtxnum_t postblock = 0; /* first rtext allocated > end */
144 xfs_rtxnum_t preblock = 0; /* first rtext allocated < start */
145
146 end = start + len - 1;
147 /*
148 * Assume we're allocating out of the middle of a free extent.
149 * We need to find the beginning and end of the extent so we can
150 * properly update the summary.
151 */
152 error = xfs_rtfind_back(args, start, &preblock);
153 if (error)
154 return error;
155
156 /*
157 * Find the next allocated block (end of free extent).
158 */
159 error = xfs_rtfind_forw(args, end, args->rtg->rtg_extents - 1,
160 &postblock);
161 if (error)
162 return error;
163
164 /*
165 * Decrement the summary information corresponding to the entire
166 * (old) free extent.
167 */
168 error = xfs_rtmodify_summary(args,
169 xfs_highbit64(postblock + 1 - preblock),
170 xfs_rtx_to_rbmblock(mp, preblock), -1);
171 if (error)
172 return error;
173
174 /*
175 * If there are blocks not being allocated at the front of the
176 * old extent, add summary data for them to be free.
177 */
178 if (preblock < start) {
179 error = xfs_rtmodify_summary(args,
180 xfs_highbit64(start - preblock),
181 xfs_rtx_to_rbmblock(mp, preblock), 1);
182 if (error)
183 return error;
184 }
185
186 /*
187 * If there are blocks not being allocated at the end of the
188 * old extent, add summary data for them to be free.
189 */
190 if (postblock > end) {
191 error = xfs_rtmodify_summary(args,
192 xfs_highbit64(postblock - end),
193 xfs_rtx_to_rbmblock(mp, end + 1), 1);
194 if (error)
195 return error;
196 }
197
198 /*
199 * Modify the bitmap to mark this extent allocated.
200 */
201 return xfs_rtmodify_range(args, start, len, 0);
202 }
203
204 /* Reduce @rtxlen until it is a multiple of @prod. */
205 static inline xfs_rtxlen_t
xfs_rtalloc_align_len(xfs_rtxlen_t rtxlen,xfs_rtxlen_t prod)206 xfs_rtalloc_align_len(
207 xfs_rtxlen_t rtxlen,
208 xfs_rtxlen_t prod)
209 {
210 if (unlikely(prod > 1))
211 return rounddown(rtxlen, prod);
212 return rtxlen;
213 }
214
215 /*
216 * Make sure we don't run off the end of the rt volume. Be careful that
217 * adjusting maxlen downwards doesn't cause us to fail the alignment checks.
218 */
219 static inline xfs_rtxlen_t
xfs_rtallocate_clamp_len(struct xfs_rtgroup * rtg,xfs_rtxnum_t startrtx,xfs_rtxlen_t rtxlen,xfs_rtxlen_t prod)220 xfs_rtallocate_clamp_len(
221 struct xfs_rtgroup *rtg,
222 xfs_rtxnum_t startrtx,
223 xfs_rtxlen_t rtxlen,
224 xfs_rtxlen_t prod)
225 {
226 xfs_rtxlen_t ret;
227
228 ret = min(rtg->rtg_extents, startrtx + rtxlen) - startrtx;
229 return xfs_rtalloc_align_len(ret, prod);
230 }
231
232 /*
233 * Attempt to allocate an extent minlen<=len<=maxlen starting from
234 * bitmap block bbno. If we don't get maxlen then use prod to trim
235 * the length, if given. Returns error; returns starting block in *rtx.
236 * The lengths are all in rtextents.
237 */
238 STATIC int
xfs_rtallocate_extent_block(struct xfs_rtalloc_args * args,xfs_fileoff_t bbno,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxnum_t * nextp,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)239 xfs_rtallocate_extent_block(
240 struct xfs_rtalloc_args *args,
241 xfs_fileoff_t bbno, /* bitmap block number */
242 xfs_rtxlen_t minlen, /* minimum length to allocate */
243 xfs_rtxlen_t maxlen, /* maximum length to allocate */
244 xfs_rtxlen_t *len, /* out: actual length allocated */
245 xfs_rtxnum_t *nextp, /* out: next rtext to try */
246 xfs_rtxlen_t prod, /* extent product factor */
247 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
248 {
249 struct xfs_mount *mp = args->mp;
250 xfs_rtxnum_t besti = -1; /* best rtext found so far */
251 xfs_rtxnum_t end; /* last rtext in chunk */
252 xfs_rtxnum_t i; /* current rtext trying */
253 xfs_rtxnum_t next; /* next rtext to try */
254 xfs_rtxlen_t scanlen; /* number of free rtx to look for */
255 xfs_rtxlen_t bestlen = 0; /* best length found so far */
256 int stat; /* status from internal calls */
257 int error;
258
259 /*
260 * Loop over all the extents starting in this bitmap block up to the
261 * end of the rt volume, looking for one that's long enough.
262 */
263 end = min(args->rtg->rtg_extents, xfs_rbmblock_to_rtx(mp, bbno + 1)) -
264 1;
265 for (i = xfs_rbmblock_to_rtx(mp, bbno); i <= end; i++) {
266 /* Make sure we don't scan off the end of the rt volume. */
267 scanlen = xfs_rtallocate_clamp_len(args->rtg, i, maxlen, prod);
268 if (scanlen < minlen)
269 break;
270
271 /*
272 * See if there's a free extent of scanlen starting at i.
273 * If it's not so then next will contain the first non-free.
274 */
275 error = xfs_rtcheck_range(args, i, scanlen, 1, &next, &stat);
276 if (error)
277 return error;
278 if (stat) {
279 /*
280 * i to scanlen is all free, allocate and return that.
281 */
282 *len = scanlen;
283 *rtx = i;
284 return 0;
285 }
286
287 /*
288 * In the case where we have a variable-sized allocation
289 * request, figure out how big this free piece is,
290 * and if it's big enough for the minimum, and the best
291 * so far, remember it.
292 */
293 if (minlen < maxlen) {
294 xfs_rtxnum_t thislen; /* this extent size */
295
296 thislen = next - i;
297 if (thislen >= minlen && thislen > bestlen) {
298 besti = i;
299 bestlen = thislen;
300 }
301 }
302 /*
303 * If not done yet, find the start of the next free space.
304 */
305 if (next >= end)
306 break;
307 error = xfs_rtfind_forw(args, next, end, &i);
308 if (error)
309 return error;
310 }
311
312 /* Searched the whole thing & didn't find a maxlen free extent. */
313 if (besti == -1)
314 goto nospace;
315
316 /*
317 * Ensure bestlen is a multiple of prod, but don't return a too-short
318 * extent.
319 */
320 bestlen = xfs_rtalloc_align_len(bestlen, prod);
321 if (bestlen < minlen)
322 goto nospace;
323
324 /*
325 * Pick besti for bestlen & return that.
326 */
327 *len = bestlen;
328 *rtx = besti;
329 return 0;
330 nospace:
331 /* Allocation failed. Set *nextp to the next block to try. */
332 *nextp = next;
333 return -ENOSPC;
334 }
335
336 /*
337 * Allocate an extent of length minlen<=len<=maxlen, starting at block
338 * bno. If we don't get maxlen then use prod to trim the length, if given.
339 * Returns error; returns starting block in *rtx.
340 * The lengths are all in rtextents.
341 */
342 STATIC int
xfs_rtallocate_extent_exact(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)343 xfs_rtallocate_extent_exact(
344 struct xfs_rtalloc_args *args,
345 xfs_rtxnum_t start, /* starting rtext number to allocate */
346 xfs_rtxlen_t minlen, /* minimum length to allocate */
347 xfs_rtxlen_t maxlen, /* maximum length to allocate */
348 xfs_rtxlen_t *len, /* out: actual length allocated */
349 xfs_rtxlen_t prod, /* extent product factor */
350 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
351 {
352 xfs_rtxnum_t next; /* next rtext to try (dummy) */
353 xfs_rtxlen_t alloclen; /* candidate length */
354 xfs_rtxlen_t scanlen; /* number of free rtx to look for */
355 int isfree; /* extent is free */
356 int error;
357
358 ASSERT(minlen % prod == 0);
359 ASSERT(maxlen % prod == 0);
360
361 /* Make sure we don't run off the end of the rt volume. */
362 scanlen = xfs_rtallocate_clamp_len(args->rtg, start, maxlen, prod);
363 if (scanlen < minlen)
364 return -ENOSPC;
365
366 /* Check if the range in question (for scanlen) is free. */
367 error = xfs_rtcheck_range(args, start, scanlen, 1, &next, &isfree);
368 if (error)
369 return error;
370
371 if (isfree) {
372 /* start to scanlen is all free; allocate it. */
373 *len = scanlen;
374 *rtx = start;
375 return 0;
376 }
377
378 /*
379 * If not, allocate what there is, if it's at least minlen.
380 */
381 alloclen = next - start;
382 if (alloclen < minlen)
383 return -ENOSPC;
384
385 /* Ensure alloclen is a multiple of prod. */
386 alloclen = xfs_rtalloc_align_len(alloclen, prod);
387 if (alloclen < minlen)
388 return -ENOSPC;
389
390 *len = alloclen;
391 *rtx = start;
392 return 0;
393 }
394
395 /*
396 * Allocate an extent of length minlen<=len<=maxlen, starting as near
397 * to start as possible. If we don't get maxlen then use prod to trim
398 * the length, if given. The lengths are all in rtextents.
399 */
400 STATIC int
xfs_rtallocate_extent_near(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)401 xfs_rtallocate_extent_near(
402 struct xfs_rtalloc_args *args,
403 xfs_rtxnum_t start, /* starting rtext number to allocate */
404 xfs_rtxlen_t minlen, /* minimum length to allocate */
405 xfs_rtxlen_t maxlen, /* maximum length to allocate */
406 xfs_rtxlen_t *len, /* out: actual length allocated */
407 xfs_rtxlen_t prod, /* extent product factor */
408 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
409 {
410 struct xfs_mount *mp = args->mp;
411 int maxlog; /* max useful extent from summary */
412 xfs_fileoff_t bbno; /* bitmap block number */
413 int error;
414 int i; /* bitmap block offset (loop control) */
415 int j; /* secondary loop control */
416 int log2len; /* log2 of minlen */
417 xfs_rtxnum_t n; /* next rtext to try */
418
419 ASSERT(minlen % prod == 0);
420 ASSERT(maxlen % prod == 0);
421
422 /*
423 * If the block number given is off the end, silently set it to the last
424 * block.
425 */
426 start = min(start, args->rtg->rtg_extents - 1);
427
428 /*
429 * Try the exact allocation first.
430 */
431 error = xfs_rtallocate_extent_exact(args, start, minlen, maxlen, len,
432 prod, rtx);
433 if (error != -ENOSPC)
434 return error;
435
436 bbno = xfs_rtx_to_rbmblock(mp, start);
437 i = 0;
438 j = -1;
439 ASSERT(minlen != 0);
440 log2len = xfs_highbit32(minlen);
441 /*
442 * Loop over all bitmap blocks (bbno + i is current block).
443 */
444 for (;;) {
445 /*
446 * Get summary information of extents of all useful levels
447 * starting in this bitmap block.
448 */
449 error = xfs_rtany_summary(args, log2len, mp->m_rsumlevels - 1,
450 bbno + i, &maxlog);
451 if (error)
452 return error;
453
454 /*
455 * If there are any useful extents starting here, try
456 * allocating one.
457 */
458 if (maxlog >= 0) {
459 xfs_extlen_t maxavail =
460 min_t(xfs_rtblock_t, maxlen,
461 (1ULL << (maxlog + 1)) - 1);
462 /*
463 * On the positive side of the starting location.
464 */
465 if (i >= 0) {
466 /*
467 * Try to allocate an extent starting in
468 * this block.
469 */
470 error = xfs_rtallocate_extent_block(args,
471 bbno + i, minlen, maxavail, len,
472 &n, prod, rtx);
473 if (error != -ENOSPC)
474 return error;
475 }
476 /*
477 * On the negative side of the starting location.
478 */
479 else { /* i < 0 */
480 int maxblocks;
481
482 /*
483 * Loop backwards to find the end of the extent
484 * we found in the realtime summary.
485 *
486 * maxblocks is the maximum possible number of
487 * bitmap blocks from the start of the extent
488 * to the end of the extent.
489 */
490 if (maxlog == 0)
491 maxblocks = 0;
492 else if (maxlog < mp->m_blkbit_log)
493 maxblocks = 1;
494 else
495 maxblocks = 2 << (maxlog - mp->m_blkbit_log);
496
497 /*
498 * We need to check bbno + i + maxblocks down to
499 * bbno + i. We already checked bbno down to
500 * bbno + j + 1, so we don't need to check those
501 * again.
502 */
503 j = min(i + maxblocks, j);
504 for (; j >= i; j--) {
505 error = xfs_rtallocate_extent_block(args,
506 bbno + j, minlen,
507 maxavail, len, &n, prod,
508 rtx);
509 if (error != -ENOSPC)
510 return error;
511 }
512 }
513 }
514 /*
515 * Loop control. If we were on the positive side, and there's
516 * still more blocks on the negative side, go there.
517 */
518 if (i > 0 && (int)bbno - i >= 0)
519 i = -i;
520 /*
521 * If positive, and no more negative, but there are more
522 * positive, go there.
523 */
524 else if (i > 0 && (int)bbno + i < mp->m_sb.sb_rbmblocks - 1)
525 i++;
526 /*
527 * If negative or 0 (just started), and there are positive
528 * blocks to go, go there. The 0 case moves to block 1.
529 */
530 else if (i <= 0 && (int)bbno - i < mp->m_sb.sb_rbmblocks - 1)
531 i = 1 - i;
532 /*
533 * If negative or 0 and there are more negative blocks,
534 * go there.
535 */
536 else if (i <= 0 && (int)bbno + i > 0)
537 i--;
538 /*
539 * Must be done. Return failure.
540 */
541 else
542 break;
543 }
544 return -ENOSPC;
545 }
546
547 static int
xfs_rtalloc_sumlevel(struct xfs_rtalloc_args * args,int l,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t prod,xfs_rtxlen_t * len,xfs_rtxnum_t * rtx)548 xfs_rtalloc_sumlevel(
549 struct xfs_rtalloc_args *args,
550 int l, /* level number */
551 xfs_rtxlen_t minlen, /* minimum length to allocate */
552 xfs_rtxlen_t maxlen, /* maximum length to allocate */
553 xfs_rtxlen_t prod, /* extent product factor */
554 xfs_rtxlen_t *len, /* out: actual length allocated */
555 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
556 {
557 xfs_fileoff_t i; /* bitmap block number */
558 int error;
559
560 for (i = 0; i < args->mp->m_sb.sb_rbmblocks; i++) {
561 xfs_suminfo_t sum; /* summary information for extents */
562 xfs_rtxnum_t n; /* next rtext to be tried */
563
564 error = xfs_rtget_summary(args, l, i, &sum);
565 if (error)
566 return error;
567
568 /*
569 * Nothing there, on to the next block.
570 */
571 if (!sum)
572 continue;
573
574 /*
575 * Try allocating the extent.
576 */
577 error = xfs_rtallocate_extent_block(args, i, minlen, maxlen,
578 len, &n, prod, rtx);
579 if (error != -ENOSPC)
580 return error;
581
582 /*
583 * If the "next block to try" returned from the allocator is
584 * beyond the next bitmap block, skip to that bitmap block.
585 */
586 if (xfs_rtx_to_rbmblock(args->mp, n) > i + 1)
587 i = xfs_rtx_to_rbmblock(args->mp, n) - 1;
588 }
589
590 return -ENOSPC;
591 }
592
593 /*
594 * Allocate an extent of length minlen<=len<=maxlen, with no position
595 * specified. If we don't get maxlen then use prod to trim
596 * the length, if given. The lengths are all in rtextents.
597 */
598 static int
xfs_rtallocate_extent_size(struct xfs_rtalloc_args * args,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)599 xfs_rtallocate_extent_size(
600 struct xfs_rtalloc_args *args,
601 xfs_rtxlen_t minlen, /* minimum length to allocate */
602 xfs_rtxlen_t maxlen, /* maximum length to allocate */
603 xfs_rtxlen_t *len, /* out: actual length allocated */
604 xfs_rtxlen_t prod, /* extent product factor */
605 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
606 {
607 int error;
608 int l; /* level number (loop control) */
609
610 ASSERT(minlen % prod == 0);
611 ASSERT(maxlen % prod == 0);
612 ASSERT(maxlen != 0);
613
614 /*
615 * Loop over all the levels starting with maxlen.
616 *
617 * At each level, look at all the bitmap blocks, to see if there are
618 * extents starting there that are long enough (>= maxlen).
619 *
620 * Note, only on the initial level can the allocation fail if the
621 * summary says there's an extent.
622 */
623 for (l = xfs_highbit32(maxlen); l < args->mp->m_rsumlevels; l++) {
624 error = xfs_rtalloc_sumlevel(args, l, minlen, maxlen, prod, len,
625 rtx);
626 if (error != -ENOSPC)
627 return error;
628 }
629
630 /*
631 * Didn't find any maxlen blocks. Try smaller ones, unless we are
632 * looking for a fixed size extent.
633 */
634 if (minlen > --maxlen)
635 return -ENOSPC;
636 ASSERT(minlen != 0);
637 ASSERT(maxlen != 0);
638
639 /*
640 * Loop over sizes, from maxlen down to minlen.
641 *
642 * This time, when we do the allocations, allow smaller ones to succeed,
643 * but make sure the specified minlen/maxlen are in the possible range
644 * for this summary level.
645 */
646 for (l = xfs_highbit32(maxlen); l >= xfs_highbit32(minlen); l--) {
647 error = xfs_rtalloc_sumlevel(args, l,
648 max_t(xfs_rtxlen_t, minlen, 1 << l),
649 min_t(xfs_rtxlen_t, maxlen, (1 << (l + 1)) - 1),
650 prod, len, rtx);
651 if (error != -ENOSPC)
652 return error;
653 }
654
655 return -ENOSPC;
656 }
657
658 static void
xfs_rtunmount_rtg(struct xfs_rtgroup * rtg)659 xfs_rtunmount_rtg(
660 struct xfs_rtgroup *rtg)
661 {
662 int i;
663
664 for (i = 0; i < XFS_RTGI_MAX; i++)
665 xfs_rtginode_irele(&rtg->rtg_inodes[i]);
666 kvfree(rtg->rtg_rsum_cache);
667 }
668
669 static int
xfs_alloc_rsum_cache(struct xfs_rtgroup * rtg,xfs_extlen_t rbmblocks)670 xfs_alloc_rsum_cache(
671 struct xfs_rtgroup *rtg,
672 xfs_extlen_t rbmblocks)
673 {
674 /*
675 * The rsum cache is initialized to the maximum value, which is
676 * trivially an upper bound on the maximum level with any free extents.
677 */
678 rtg->rtg_rsum_cache = kvmalloc(rbmblocks, GFP_KERNEL);
679 if (!rtg->rtg_rsum_cache)
680 return -ENOMEM;
681 memset(rtg->rtg_rsum_cache, -1, rbmblocks);
682 return 0;
683 }
684
685 /*
686 * If we changed the rt extent size (meaning there was no rt volume previously)
687 * and the root directory had EXTSZINHERIT and RTINHERIT set, it's possible
688 * that the extent size hint on the root directory is no longer congruent with
689 * the new rt extent size. Log the rootdir inode to fix this.
690 */
691 static int
xfs_growfs_rt_fixup_extsize(struct xfs_mount * mp)692 xfs_growfs_rt_fixup_extsize(
693 struct xfs_mount *mp)
694 {
695 struct xfs_inode *ip = mp->m_rootip;
696 struct xfs_trans *tp;
697 int error = 0;
698
699 xfs_ilock(ip, XFS_IOLOCK_EXCL);
700 if (!(ip->i_diflags & XFS_DIFLAG_RTINHERIT) ||
701 !(ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT))
702 goto out_iolock;
703
704 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_ichange, 0, 0, false,
705 &tp);
706 if (error)
707 goto out_iolock;
708
709 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
710 error = xfs_trans_commit(tp);
711 xfs_iunlock(ip, XFS_ILOCK_EXCL);
712
713 out_iolock:
714 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
715 return error;
716 }
717
718 /* Ensure that the rtgroup metadata inode is loaded, creating it if neeeded. */
719 static int
xfs_rtginode_ensure(struct xfs_rtgroup * rtg,enum xfs_rtg_inodes type)720 xfs_rtginode_ensure(
721 struct xfs_rtgroup *rtg,
722 enum xfs_rtg_inodes type)
723 {
724 struct xfs_trans *tp;
725 int error;
726
727 if (rtg->rtg_inodes[type])
728 return 0;
729
730 error = xfs_trans_alloc_empty(rtg_mount(rtg), &tp);
731 if (error)
732 return error;
733 error = xfs_rtginode_load(rtg, type, tp);
734 xfs_trans_cancel(tp);
735
736 if (error != -ENOENT)
737 return 0;
738 return xfs_rtginode_create(rtg, type, true);
739 }
740
741 static struct xfs_mount *
xfs_growfs_rt_alloc_fake_mount(const struct xfs_mount * mp,xfs_rfsblock_t rblocks,xfs_agblock_t rextsize)742 xfs_growfs_rt_alloc_fake_mount(
743 const struct xfs_mount *mp,
744 xfs_rfsblock_t rblocks,
745 xfs_agblock_t rextsize)
746 {
747 struct xfs_mount *nmp;
748
749 nmp = kmemdup(mp, sizeof(*mp), GFP_KERNEL);
750 if (!nmp)
751 return NULL;
752 xfs_mount_sb_set_rextsize(nmp, &nmp->m_sb, rextsize);
753 nmp->m_sb.sb_rblocks = rblocks;
754 nmp->m_sb.sb_rextents = xfs_blen_to_rtbxlen(nmp, nmp->m_sb.sb_rblocks);
755 nmp->m_sb.sb_rbmblocks = xfs_rtbitmap_blockcount(nmp);
756 nmp->m_sb.sb_rextslog = xfs_compute_rextslog(nmp->m_sb.sb_rextents);
757 if (xfs_has_rtgroups(nmp))
758 nmp->m_sb.sb_rgcount = howmany_64(nmp->m_sb.sb_rextents,
759 nmp->m_sb.sb_rgextents);
760 else
761 nmp->m_sb.sb_rgcount = 1;
762 nmp->m_rsumblocks = xfs_rtsummary_blockcount(nmp, &nmp->m_rsumlevels);
763
764 if (rblocks > 0)
765 nmp->m_features |= XFS_FEAT_REALTIME;
766
767 /* recompute growfsrt reservation from new rsumsize */
768 xfs_trans_resv_calc(nmp, &nmp->m_resv);
769 return nmp;
770 }
771
772 /* Free all the new space and return the number of extents actually freed. */
773 static int
xfs_growfs_rt_free_new(struct xfs_rtgroup * rtg,struct xfs_rtalloc_args * nargs,xfs_rtbxlen_t * freed_rtx)774 xfs_growfs_rt_free_new(
775 struct xfs_rtgroup *rtg,
776 struct xfs_rtalloc_args *nargs,
777 xfs_rtbxlen_t *freed_rtx)
778 {
779 struct xfs_mount *mp = rtg_mount(rtg);
780 xfs_rgnumber_t rgno = rtg_rgno(rtg);
781 xfs_rtxnum_t start_rtx = 0, end_rtx;
782
783 if (rgno < mp->m_sb.sb_rgcount)
784 start_rtx = xfs_rtgroup_extents(mp, rgno);
785 end_rtx = xfs_rtgroup_extents(nargs->mp, rgno);
786
787 /*
788 * Compute the first new extent that we want to free, being careful to
789 * skip past a realtime superblock at the start of the realtime volume.
790 */
791 if (xfs_has_rtsb(nargs->mp) && rgno == 0 && start_rtx == 0)
792 start_rtx++;
793 *freed_rtx = end_rtx - start_rtx;
794 return xfs_rtfree_range(nargs, start_rtx, *freed_rtx);
795 }
796
797 static xfs_rfsblock_t
xfs_growfs_rt_nrblocks(struct xfs_rtgroup * rtg,xfs_rfsblock_t nrblocks,xfs_agblock_t rextsize,xfs_fileoff_t bmbno)798 xfs_growfs_rt_nrblocks(
799 struct xfs_rtgroup *rtg,
800 xfs_rfsblock_t nrblocks,
801 xfs_agblock_t rextsize,
802 xfs_fileoff_t bmbno)
803 {
804 struct xfs_mount *mp = rtg_mount(rtg);
805 xfs_rfsblock_t step;
806
807 step = (bmbno + 1) * mp->m_rtx_per_rbmblock * rextsize;
808 if (xfs_has_rtgroups(mp)) {
809 xfs_rfsblock_t rgblocks = mp->m_sb.sb_rgextents * rextsize;
810
811 step = min(rgblocks, step) + rgblocks * rtg_rgno(rtg);
812 }
813
814 return min(nrblocks, step);
815 }
816
817 /*
818 * If the post-grow filesystem will have an rtsb; we're initializing the first
819 * rtgroup; and the filesystem didn't have a realtime section, write the rtsb
820 * now, and attach the rtsb buffer to the real mount.
821 */
822 static int
xfs_growfs_rt_init_rtsb(const struct xfs_rtalloc_args * nargs,const struct xfs_rtgroup * rtg,const struct xfs_rtalloc_args * args)823 xfs_growfs_rt_init_rtsb(
824 const struct xfs_rtalloc_args *nargs,
825 const struct xfs_rtgroup *rtg,
826 const struct xfs_rtalloc_args *args)
827 {
828 struct xfs_mount *mp = args->mp;
829 struct xfs_buf *rtsb_bp;
830 int error;
831
832 if (!xfs_has_rtsb(nargs->mp))
833 return 0;
834 if (rtg_rgno(rtg) > 0)
835 return 0;
836 if (mp->m_sb.sb_rblocks)
837 return 0;
838
839 error = xfs_buf_get_uncached(mp->m_rtdev_targp, XFS_FSB_TO_BB(mp, 1),
840 0, &rtsb_bp);
841 if (error)
842 return error;
843
844 rtsb_bp->b_maps[0].bm_bn = XFS_RTSB_DADDR;
845 rtsb_bp->b_ops = &xfs_rtsb_buf_ops;
846
847 xfs_update_rtsb(rtsb_bp, mp->m_sb_bp);
848 mp->m_rtsb_bp = rtsb_bp;
849 error = xfs_bwrite(rtsb_bp);
850 xfs_buf_unlock(rtsb_bp);
851 if (error)
852 return error;
853
854 /* Initialize the rtrmap to reflect the rtsb. */
855 if (rtg_rmap(args->rtg) != NULL)
856 error = xfs_rtrmapbt_init_rtsb(nargs->mp, args->rtg, args->tp);
857
858 return error;
859 }
860
861 static int
xfs_growfs_rt_bmblock(struct xfs_rtgroup * rtg,xfs_rfsblock_t nrblocks,xfs_agblock_t rextsize,xfs_fileoff_t bmbno)862 xfs_growfs_rt_bmblock(
863 struct xfs_rtgroup *rtg,
864 xfs_rfsblock_t nrblocks,
865 xfs_agblock_t rextsize,
866 xfs_fileoff_t bmbno)
867 {
868 struct xfs_mount *mp = rtg_mount(rtg);
869 struct xfs_inode *rbmip = rtg_bitmap(rtg);
870 struct xfs_inode *rsumip = rtg_summary(rtg);
871 struct xfs_rtalloc_args args = {
872 .mp = mp,
873 .rtg = rtg,
874 };
875 struct xfs_rtalloc_args nargs = {
876 .rtg = rtg,
877 };
878 struct xfs_mount *nmp;
879 xfs_rtbxlen_t freed_rtx;
880 int error;
881
882 /*
883 * Calculate new sb and mount fields for this round. Also ensure the
884 * rtg_extents value is uptodate as the rtbitmap code relies on it.
885 */
886 nmp = nargs.mp = xfs_growfs_rt_alloc_fake_mount(mp,
887 xfs_growfs_rt_nrblocks(rtg, nrblocks, rextsize, bmbno),
888 rextsize);
889 if (!nmp)
890 return -ENOMEM;
891
892 xfs_rtgroup_calc_geometry(nmp, rtg, rtg_rgno(rtg),
893 nmp->m_sb.sb_rgcount, nmp->m_sb.sb_rextents);
894
895 /*
896 * Recompute the growfsrt reservation from the new rsumsize, so that the
897 * transaction below use the new, potentially larger value.
898 * */
899 xfs_trans_resv_calc(nmp, &nmp->m_resv);
900 error = xfs_trans_alloc(mp, &M_RES(nmp)->tr_growrtfree, 0, 0, 0,
901 &args.tp);
902 if (error)
903 goto out_free;
904 nargs.tp = args.tp;
905
906 xfs_rtgroup_lock(args.rtg, XFS_RTGLOCK_BITMAP | XFS_RTGLOCK_RMAP);
907 xfs_rtgroup_trans_join(args.tp, args.rtg,
908 XFS_RTGLOCK_BITMAP | XFS_RTGLOCK_RMAP);
909
910 /*
911 * Update the bitmap inode's size ondisk and incore. We need to update
912 * the incore size so that inode inactivation won't punch what it thinks
913 * are "posteof" blocks.
914 */
915 rbmip->i_disk_size = nmp->m_sb.sb_rbmblocks * nmp->m_sb.sb_blocksize;
916 i_size_write(VFS_I(rbmip), rbmip->i_disk_size);
917 xfs_trans_log_inode(args.tp, rbmip, XFS_ILOG_CORE);
918
919 /*
920 * Update the summary inode's size. We need to update the incore size
921 * so that inode inactivation won't punch what it thinks are "posteof"
922 * blocks.
923 */
924 rsumip->i_disk_size = nmp->m_rsumblocks * nmp->m_sb.sb_blocksize;
925 i_size_write(VFS_I(rsumip), rsumip->i_disk_size);
926 xfs_trans_log_inode(args.tp, rsumip, XFS_ILOG_CORE);
927
928 /*
929 * Copy summary data from old to new sizes when the real size (not
930 * block-aligned) changes.
931 */
932 if (mp->m_sb.sb_rbmblocks != nmp->m_sb.sb_rbmblocks ||
933 mp->m_rsumlevels != nmp->m_rsumlevels) {
934 error = xfs_rtcopy_summary(&args, &nargs);
935 if (error)
936 goto out_cancel;
937 }
938
939 error = xfs_growfs_rt_init_rtsb(&nargs, rtg, &args);
940 if (error)
941 goto out_cancel;
942
943 /*
944 * Update superblock fields.
945 */
946 if (nmp->m_sb.sb_rextsize != mp->m_sb.sb_rextsize)
947 xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_REXTSIZE,
948 nmp->m_sb.sb_rextsize - mp->m_sb.sb_rextsize);
949 if (nmp->m_sb.sb_rbmblocks != mp->m_sb.sb_rbmblocks)
950 xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_RBMBLOCKS,
951 nmp->m_sb.sb_rbmblocks - mp->m_sb.sb_rbmblocks);
952 if (nmp->m_sb.sb_rblocks != mp->m_sb.sb_rblocks)
953 xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_RBLOCKS,
954 nmp->m_sb.sb_rblocks - mp->m_sb.sb_rblocks);
955 if (nmp->m_sb.sb_rextents != mp->m_sb.sb_rextents)
956 xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_REXTENTS,
957 nmp->m_sb.sb_rextents - mp->m_sb.sb_rextents);
958 if (nmp->m_sb.sb_rextslog != mp->m_sb.sb_rextslog)
959 xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_REXTSLOG,
960 nmp->m_sb.sb_rextslog - mp->m_sb.sb_rextslog);
961 if (nmp->m_sb.sb_rgcount != mp->m_sb.sb_rgcount)
962 xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_RGCOUNT,
963 nmp->m_sb.sb_rgcount - mp->m_sb.sb_rgcount);
964
965 /*
966 * Free the new extent.
967 */
968 error = xfs_growfs_rt_free_new(rtg, &nargs, &freed_rtx);
969 xfs_rtbuf_cache_relse(&nargs);
970 if (error)
971 goto out_cancel;
972
973 /*
974 * Mark more blocks free in the superblock.
975 */
976 xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_FREXTENTS, freed_rtx);
977
978 /*
979 * Update the calculated values in the real mount structure.
980 */
981 mp->m_rsumlevels = nmp->m_rsumlevels;
982 mp->m_rsumblocks = nmp->m_rsumblocks;
983
984 /*
985 * Recompute the growfsrt reservation from the new rsumsize.
986 */
987 xfs_trans_resv_calc(mp, &mp->m_resv);
988
989 error = xfs_trans_commit(args.tp);
990 if (error)
991 goto out_free;
992
993 /*
994 * Ensure the mount RT feature flag is now set, and compute new
995 * maxlevels for rt btrees.
996 */
997 mp->m_features |= XFS_FEAT_REALTIME;
998 xfs_rtrmapbt_compute_maxlevels(mp);
999 xfs_rtrefcountbt_compute_maxlevels(mp);
1000
1001 kfree(nmp);
1002 return 0;
1003
1004 out_cancel:
1005 xfs_trans_cancel(args.tp);
1006 out_free:
1007 kfree(nmp);
1008 return error;
1009 }
1010
1011 static xfs_rtxnum_t
xfs_last_rtgroup_extents(struct xfs_mount * mp)1012 xfs_last_rtgroup_extents(
1013 struct xfs_mount *mp)
1014 {
1015 return mp->m_sb.sb_rextents -
1016 ((xfs_rtxnum_t)(mp->m_sb.sb_rgcount - 1) *
1017 mp->m_sb.sb_rgextents);
1018 }
1019
1020 /*
1021 * Calculate the last rbmblock currently used.
1022 *
1023 * This also deals with the case where there were no rtextents before.
1024 */
1025 static xfs_fileoff_t
xfs_last_rt_bmblock(struct xfs_rtgroup * rtg)1026 xfs_last_rt_bmblock(
1027 struct xfs_rtgroup *rtg)
1028 {
1029 struct xfs_mount *mp = rtg_mount(rtg);
1030 xfs_rgnumber_t rgno = rtg_rgno(rtg);
1031 xfs_fileoff_t bmbno = 0;
1032
1033 ASSERT(!mp->m_sb.sb_rgcount || rgno >= mp->m_sb.sb_rgcount - 1);
1034
1035 if (mp->m_sb.sb_rgcount && rgno == mp->m_sb.sb_rgcount - 1) {
1036 xfs_rtxnum_t nrext = xfs_last_rtgroup_extents(mp);
1037
1038 /* Also fill up the previous block if not entirely full. */
1039 bmbno = xfs_rtbitmap_blockcount_len(mp, nrext);
1040 if (xfs_rtx_to_rbmword(mp, nrext) != 0)
1041 bmbno--;
1042 }
1043
1044 return bmbno;
1045 }
1046
1047 /*
1048 * Allocate space to the bitmap and summary files, as necessary.
1049 */
1050 static int
xfs_growfs_rt_alloc_blocks(struct xfs_rtgroup * rtg,xfs_rfsblock_t nrblocks,xfs_agblock_t rextsize,xfs_extlen_t * nrbmblocks)1051 xfs_growfs_rt_alloc_blocks(
1052 struct xfs_rtgroup *rtg,
1053 xfs_rfsblock_t nrblocks,
1054 xfs_agblock_t rextsize,
1055 xfs_extlen_t *nrbmblocks)
1056 {
1057 struct xfs_mount *mp = rtg_mount(rtg);
1058 struct xfs_inode *rbmip = rtg_bitmap(rtg);
1059 struct xfs_inode *rsumip = rtg_summary(rtg);
1060 xfs_extlen_t orbmblocks = 0;
1061 xfs_extlen_t orsumblocks = 0;
1062 struct xfs_mount *nmp;
1063 int error = 0;
1064
1065 nmp = xfs_growfs_rt_alloc_fake_mount(mp, nrblocks, rextsize);
1066 if (!nmp)
1067 return -ENOMEM;
1068 *nrbmblocks = nmp->m_sb.sb_rbmblocks;
1069
1070 if (xfs_has_rtgroups(mp)) {
1071 /*
1072 * For file systems with the rtgroups feature, the RT bitmap and
1073 * summary are always fully allocated, which means that we never
1074 * need to grow the existing files.
1075 *
1076 * But we have to be careful to only fill the bitmap until the
1077 * end of the actually used range.
1078 */
1079 if (rtg_rgno(rtg) == nmp->m_sb.sb_rgcount - 1)
1080 *nrbmblocks = xfs_rtbitmap_blockcount_len(nmp,
1081 xfs_last_rtgroup_extents(nmp));
1082
1083 if (mp->m_sb.sb_rgcount &&
1084 rtg_rgno(rtg) == mp->m_sb.sb_rgcount - 1)
1085 goto out_free;
1086 } else {
1087 /*
1088 * Get the old block counts for bitmap and summary inodes.
1089 * These can't change since other growfs callers are locked out.
1090 */
1091 orbmblocks = XFS_B_TO_FSB(mp, rbmip->i_disk_size);
1092 orsumblocks = XFS_B_TO_FSB(mp, rsumip->i_disk_size);
1093 }
1094
1095 error = xfs_rtfile_initialize_blocks(rtg, XFS_RTGI_BITMAP, orbmblocks,
1096 nmp->m_sb.sb_rbmblocks, NULL);
1097 if (error)
1098 goto out_free;
1099 error = xfs_rtfile_initialize_blocks(rtg, XFS_RTGI_SUMMARY, orsumblocks,
1100 nmp->m_rsumblocks, NULL);
1101 out_free:
1102 kfree(nmp);
1103 return error;
1104 }
1105
1106 static int
xfs_growfs_rtg(struct xfs_mount * mp,xfs_rgnumber_t rgno,xfs_rfsblock_t nrblocks,xfs_agblock_t rextsize)1107 xfs_growfs_rtg(
1108 struct xfs_mount *mp,
1109 xfs_rgnumber_t rgno,
1110 xfs_rfsblock_t nrblocks,
1111 xfs_agblock_t rextsize)
1112 {
1113 uint8_t *old_rsum_cache = NULL;
1114 xfs_extlen_t bmblocks;
1115 xfs_fileoff_t bmbno;
1116 struct xfs_rtgroup *rtg;
1117 unsigned int i;
1118 int error;
1119
1120 rtg = xfs_rtgroup_grab(mp, rgno);
1121 if (!rtg)
1122 return -EINVAL;
1123
1124 for (i = 0; i < XFS_RTGI_MAX; i++) {
1125 error = xfs_rtginode_ensure(rtg, i);
1126 if (error)
1127 goto out_rele;
1128 }
1129
1130 error = xfs_growfs_rt_alloc_blocks(rtg, nrblocks, rextsize, &bmblocks);
1131 if (error)
1132 goto out_rele;
1133
1134 if (bmblocks != rtg_mount(rtg)->m_sb.sb_rbmblocks) {
1135 old_rsum_cache = rtg->rtg_rsum_cache;
1136 error = xfs_alloc_rsum_cache(rtg, bmblocks);
1137 if (error)
1138 goto out_rele;
1139 }
1140
1141 for (bmbno = xfs_last_rt_bmblock(rtg); bmbno < bmblocks; bmbno++) {
1142 error = xfs_growfs_rt_bmblock(rtg, nrblocks, rextsize, bmbno);
1143 if (error)
1144 goto out_error;
1145 }
1146
1147 if (old_rsum_cache)
1148 kvfree(old_rsum_cache);
1149 xfs_rtgroup_rele(rtg);
1150 return 0;
1151
1152 out_error:
1153 /*
1154 * Reset rtg_extents to the old value if adding more blocks failed.
1155 */
1156 xfs_rtgroup_calc_geometry(mp, rtg, rtg_rgno(rtg), mp->m_sb.sb_rgcount,
1157 mp->m_sb.sb_rextents);
1158 if (old_rsum_cache) {
1159 kvfree(rtg->rtg_rsum_cache);
1160 rtg->rtg_rsum_cache = old_rsum_cache;
1161 }
1162 out_rele:
1163 xfs_rtgroup_rele(rtg);
1164 return error;
1165 }
1166
1167 int
xfs_growfs_check_rtgeom(const struct xfs_mount * mp,xfs_rfsblock_t dblocks,xfs_rfsblock_t rblocks,xfs_extlen_t rextsize)1168 xfs_growfs_check_rtgeom(
1169 const struct xfs_mount *mp,
1170 xfs_rfsblock_t dblocks,
1171 xfs_rfsblock_t rblocks,
1172 xfs_extlen_t rextsize)
1173 {
1174 xfs_extlen_t min_logfsbs;
1175 struct xfs_mount *nmp;
1176
1177 nmp = xfs_growfs_rt_alloc_fake_mount(mp, rblocks, rextsize);
1178 if (!nmp)
1179 return -ENOMEM;
1180 nmp->m_sb.sb_dblocks = dblocks;
1181
1182 xfs_rtrmapbt_compute_maxlevels(nmp);
1183 xfs_rtrefcountbt_compute_maxlevels(nmp);
1184 xfs_trans_resv_calc(nmp, M_RES(nmp));
1185
1186 /*
1187 * New summary size can't be more than half the size of the log. This
1188 * prevents us from getting a log overflow, since we'll log basically
1189 * the whole summary file at once.
1190 */
1191 min_logfsbs = min_t(xfs_extlen_t, xfs_log_calc_minimum_size(nmp),
1192 nmp->m_rsumblocks * 2);
1193
1194 kfree(nmp);
1195
1196 if (min_logfsbs > mp->m_sb.sb_logblocks)
1197 return -EINVAL;
1198 return 0;
1199 }
1200
1201 /*
1202 * Compute the new number of rt groups and ensure that /rtgroups exists.
1203 *
1204 * Changing the rtgroup size is not allowed (even if the rt volume hasn't yet
1205 * been initialized) because the userspace ABI doesn't support it.
1206 */
1207 static int
xfs_growfs_rt_prep_groups(struct xfs_mount * mp,xfs_rfsblock_t rblocks,xfs_extlen_t rextsize,xfs_rgnumber_t * new_rgcount)1208 xfs_growfs_rt_prep_groups(
1209 struct xfs_mount *mp,
1210 xfs_rfsblock_t rblocks,
1211 xfs_extlen_t rextsize,
1212 xfs_rgnumber_t *new_rgcount)
1213 {
1214 int error;
1215
1216 *new_rgcount = howmany_64(rblocks, mp->m_sb.sb_rgextents * rextsize);
1217 if (*new_rgcount > XFS_MAX_RGNUMBER)
1218 return -EINVAL;
1219
1220 /* Make sure the /rtgroups dir has been created */
1221 if (!mp->m_rtdirip) {
1222 struct xfs_trans *tp;
1223
1224 error = xfs_trans_alloc_empty(mp, &tp);
1225 if (error)
1226 return error;
1227 error = xfs_rtginode_load_parent(tp);
1228 xfs_trans_cancel(tp);
1229
1230 if (error == -ENOENT)
1231 error = xfs_rtginode_mkdir_parent(mp);
1232 if (error)
1233 return error;
1234 }
1235
1236 return 0;
1237 }
1238
1239 static bool
xfs_grow_last_rtg(struct xfs_mount * mp)1240 xfs_grow_last_rtg(
1241 struct xfs_mount *mp)
1242 {
1243 if (!xfs_has_rtgroups(mp))
1244 return true;
1245 if (mp->m_sb.sb_rgcount == 0)
1246 return false;
1247 return xfs_rtgroup_extents(mp, mp->m_sb.sb_rgcount - 1) <=
1248 mp->m_sb.sb_rgextents;
1249 }
1250
1251 /*
1252 * Grow the realtime area of the filesystem.
1253 */
1254 int
xfs_growfs_rt(struct xfs_mount * mp,struct xfs_growfs_rt * in)1255 xfs_growfs_rt(
1256 struct xfs_mount *mp,
1257 struct xfs_growfs_rt *in)
1258 {
1259 xfs_rgnumber_t old_rgcount = mp->m_sb.sb_rgcount;
1260 xfs_rgnumber_t new_rgcount = 1;
1261 xfs_rgnumber_t rgno;
1262 struct xfs_buf *bp;
1263 xfs_agblock_t old_rextsize = mp->m_sb.sb_rextsize;
1264 int error;
1265
1266 if (!capable(CAP_SYS_ADMIN))
1267 return -EPERM;
1268
1269 /* Needs to have been mounted with an rt device. */
1270 if (!XFS_IS_REALTIME_MOUNT(mp))
1271 return -EINVAL;
1272
1273 if (!mutex_trylock(&mp->m_growlock))
1274 return -EWOULDBLOCK;
1275
1276 /* Shrink not supported. */
1277 error = -EINVAL;
1278 if (in->newblocks <= mp->m_sb.sb_rblocks)
1279 goto out_unlock;
1280 /* Can only change rt extent size when adding rt volume. */
1281 if (mp->m_sb.sb_rblocks > 0 && in->extsize != mp->m_sb.sb_rextsize)
1282 goto out_unlock;
1283
1284 /* Range check the extent size. */
1285 if (XFS_FSB_TO_B(mp, in->extsize) > XFS_MAX_RTEXTSIZE ||
1286 XFS_FSB_TO_B(mp, in->extsize) < XFS_MIN_RTEXTSIZE)
1287 goto out_unlock;
1288
1289 /* Check for features supported only on rtgroups filesystems. */
1290 error = -EOPNOTSUPP;
1291 if (!xfs_has_rtgroups(mp)) {
1292 if (xfs_has_rmapbt(mp))
1293 goto out_unlock;
1294 if (xfs_has_quota(mp))
1295 goto out_unlock;
1296 if (xfs_has_reflink(mp))
1297 goto out_unlock;
1298 } else if (xfs_has_reflink(mp) &&
1299 !xfs_reflink_supports_rextsize(mp, in->extsize))
1300 goto out_unlock;
1301
1302 error = xfs_sb_validate_fsb_count(&mp->m_sb, in->newblocks);
1303 if (error)
1304 goto out_unlock;
1305 /*
1306 * Read in the last block of the device, make sure it exists.
1307 */
1308 error = xfs_buf_read_uncached(mp->m_rtdev_targp,
1309 XFS_FSB_TO_BB(mp, in->newblocks - 1),
1310 XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
1311 if (error)
1312 goto out_unlock;
1313 xfs_buf_relse(bp);
1314
1315 /*
1316 * Calculate new parameters. These are the final values to be reached.
1317 */
1318 error = -EINVAL;
1319 if (in->newblocks < in->extsize)
1320 goto out_unlock;
1321
1322 /* Make sure the new fs size won't cause problems with the log. */
1323 error = xfs_growfs_check_rtgeom(mp, mp->m_sb.sb_dblocks, in->newblocks,
1324 in->extsize);
1325 if (error)
1326 goto out_unlock;
1327
1328 if (xfs_has_rtgroups(mp)) {
1329 error = xfs_growfs_rt_prep_groups(mp, in->newblocks,
1330 in->extsize, &new_rgcount);
1331 if (error)
1332 goto out_unlock;
1333 }
1334
1335 if (xfs_grow_last_rtg(mp)) {
1336 error = xfs_growfs_rtg(mp, old_rgcount - 1, in->newblocks,
1337 in->extsize);
1338 if (error)
1339 goto out_unlock;
1340 }
1341
1342 for (rgno = old_rgcount; rgno < new_rgcount; rgno++) {
1343 xfs_rtbxlen_t rextents = div_u64(in->newblocks, in->extsize);
1344
1345 error = xfs_rtgroup_alloc(mp, rgno, new_rgcount, rextents);
1346 if (error)
1347 goto out_unlock;
1348
1349 error = xfs_growfs_rtg(mp, rgno, in->newblocks, in->extsize);
1350 if (error) {
1351 struct xfs_rtgroup *rtg;
1352
1353 rtg = xfs_rtgroup_grab(mp, rgno);
1354 if (!WARN_ON_ONCE(!rtg)) {
1355 xfs_rtunmount_rtg(rtg);
1356 xfs_rtgroup_rele(rtg);
1357 xfs_rtgroup_free(mp, rgno);
1358 }
1359 break;
1360 }
1361 }
1362
1363 if (!error && old_rextsize != in->extsize)
1364 error = xfs_growfs_rt_fixup_extsize(mp);
1365
1366 /*
1367 * Update secondary superblocks now the physical grow has completed.
1368 *
1369 * Also do this in case of an error as we might have already
1370 * successfully updated one or more RTGs and incremented sb_rgcount.
1371 */
1372 if (!xfs_is_shutdown(mp)) {
1373 int error2 = xfs_update_secondary_sbs(mp);
1374
1375 if (!error)
1376 error = error2;
1377
1378 /* Reset the rt metadata btree space reservations. */
1379 xfs_rt_resv_free(mp);
1380 error2 = xfs_rt_resv_init(mp);
1381 if (error2 && error2 != -ENOSPC)
1382 error = error2;
1383 }
1384
1385 out_unlock:
1386 mutex_unlock(&mp->m_growlock);
1387 return error;
1388 }
1389
1390 /* Read the realtime superblock and attach it to the mount. */
1391 int
xfs_rtmount_readsb(struct xfs_mount * mp)1392 xfs_rtmount_readsb(
1393 struct xfs_mount *mp)
1394 {
1395 struct xfs_buf *bp;
1396 int error;
1397
1398 if (!xfs_has_rtsb(mp))
1399 return 0;
1400 if (mp->m_sb.sb_rblocks == 0)
1401 return 0;
1402 if (mp->m_rtdev_targp == NULL) {
1403 xfs_warn(mp,
1404 "Filesystem has a realtime volume, use rtdev=device option");
1405 return -ENODEV;
1406 }
1407
1408 /* m_blkbb_log is not set up yet */
1409 error = xfs_buf_read_uncached(mp->m_rtdev_targp, XFS_RTSB_DADDR,
1410 mp->m_sb.sb_blocksize >> BBSHIFT, XBF_NO_IOACCT, &bp,
1411 &xfs_rtsb_buf_ops);
1412 if (error) {
1413 xfs_warn(mp, "rt sb validate failed with error %d.", error);
1414 /* bad CRC means corrupted metadata */
1415 if (error == -EFSBADCRC)
1416 error = -EFSCORRUPTED;
1417 return error;
1418 }
1419
1420 mp->m_rtsb_bp = bp;
1421 xfs_buf_unlock(bp);
1422 return 0;
1423 }
1424
1425 /* Detach the realtime superblock from the mount and free it. */
1426 void
xfs_rtmount_freesb(struct xfs_mount * mp)1427 xfs_rtmount_freesb(
1428 struct xfs_mount *mp)
1429 {
1430 struct xfs_buf *bp = mp->m_rtsb_bp;
1431
1432 if (!bp)
1433 return;
1434
1435 xfs_buf_lock(bp);
1436 mp->m_rtsb_bp = NULL;
1437 xfs_buf_relse(bp);
1438 }
1439
1440 /*
1441 * Initialize realtime fields in the mount structure.
1442 */
1443 int /* error */
xfs_rtmount_init(struct xfs_mount * mp)1444 xfs_rtmount_init(
1445 struct xfs_mount *mp) /* file system mount structure */
1446 {
1447 struct xfs_buf *bp; /* buffer for last block of subvolume */
1448 xfs_daddr_t d; /* address of last block of subvolume */
1449 int error;
1450
1451 if (mp->m_sb.sb_rblocks == 0)
1452 return 0;
1453 if (mp->m_rtdev_targp == NULL) {
1454 xfs_warn(mp,
1455 "Filesystem has a realtime volume, use rtdev=device option");
1456 return -ENODEV;
1457 }
1458
1459 mp->m_rsumblocks = xfs_rtsummary_blockcount(mp, &mp->m_rsumlevels);
1460
1461 /*
1462 * Check that the realtime section is an ok size.
1463 */
1464 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks);
1465 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_rblocks) {
1466 xfs_warn(mp, "realtime mount -- %llu != %llu",
1467 (unsigned long long) XFS_BB_TO_FSB(mp, d),
1468 (unsigned long long) mp->m_sb.sb_rblocks);
1469 return -EFBIG;
1470 }
1471 error = xfs_buf_read_uncached(mp->m_rtdev_targp,
1472 d - XFS_FSB_TO_BB(mp, 1),
1473 XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
1474 if (error) {
1475 xfs_warn(mp, "realtime device size check failed");
1476 return error;
1477 }
1478 xfs_buf_relse(bp);
1479 return 0;
1480 }
1481
1482 static int
xfs_rtalloc_count_frextent(struct xfs_rtgroup * rtg,struct xfs_trans * tp,const struct xfs_rtalloc_rec * rec,void * priv)1483 xfs_rtalloc_count_frextent(
1484 struct xfs_rtgroup *rtg,
1485 struct xfs_trans *tp,
1486 const struct xfs_rtalloc_rec *rec,
1487 void *priv)
1488 {
1489 uint64_t *valp = priv;
1490
1491 *valp += rec->ar_extcount;
1492 return 0;
1493 }
1494
1495 /*
1496 * Reinitialize the number of free realtime extents from the realtime bitmap.
1497 * Callers must ensure that there is no other activity in the filesystem.
1498 */
1499 int
xfs_rtalloc_reinit_frextents(struct xfs_mount * mp)1500 xfs_rtalloc_reinit_frextents(
1501 struct xfs_mount *mp)
1502 {
1503 uint64_t val = 0;
1504 int error;
1505
1506 struct xfs_rtgroup *rtg = NULL;
1507
1508 while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1509 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
1510 error = xfs_rtalloc_query_all(rtg, NULL,
1511 xfs_rtalloc_count_frextent, &val);
1512 xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
1513 if (error) {
1514 xfs_rtgroup_rele(rtg);
1515 return error;
1516 }
1517 }
1518
1519 spin_lock(&mp->m_sb_lock);
1520 mp->m_sb.sb_frextents = val;
1521 spin_unlock(&mp->m_sb_lock);
1522 percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
1523 return 0;
1524 }
1525
1526 /* Free space reservations for rt metadata inodes. */
1527 void
xfs_rt_resv_free(struct xfs_mount * mp)1528 xfs_rt_resv_free(
1529 struct xfs_mount *mp)
1530 {
1531 struct xfs_rtgroup *rtg = NULL;
1532 unsigned int i;
1533
1534 while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1535 for (i = 0; i < XFS_RTGI_MAX; i++)
1536 xfs_metafile_resv_free(rtg->rtg_inodes[i]);
1537 }
1538 }
1539
1540 /* Reserve space for rt metadata inodes' space expansion. */
1541 int
xfs_rt_resv_init(struct xfs_mount * mp)1542 xfs_rt_resv_init(
1543 struct xfs_mount *mp)
1544 {
1545 struct xfs_rtgroup *rtg = NULL;
1546 xfs_filblks_t ask;
1547 int error = 0;
1548
1549 while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1550 int err2;
1551
1552 ask = xfs_rtrmapbt_calc_reserves(mp);
1553 err2 = xfs_metafile_resv_init(rtg_rmap(rtg), ask);
1554 if (err2 && !error)
1555 error = err2;
1556
1557 ask = xfs_rtrefcountbt_calc_reserves(mp);
1558 err2 = xfs_metafile_resv_init(rtg_refcount(rtg), ask);
1559 if (err2 && !error)
1560 error = err2;
1561 }
1562
1563 return error;
1564 }
1565
1566 /*
1567 * Read in the bmbt of an rt metadata inode so that we never have to load them
1568 * at runtime. This enables the use of shared ILOCKs for rtbitmap scans. Use
1569 * an empty transaction to avoid deadlocking on loops in the bmbt.
1570 */
1571 static inline int
xfs_rtmount_iread_extents(struct xfs_trans * tp,struct xfs_inode * ip)1572 xfs_rtmount_iread_extents(
1573 struct xfs_trans *tp,
1574 struct xfs_inode *ip)
1575 {
1576 int error;
1577
1578 xfs_ilock(ip, XFS_ILOCK_EXCL);
1579
1580 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1581 if (error)
1582 goto out_unlock;
1583
1584 if (xfs_inode_has_attr_fork(ip)) {
1585 error = xfs_iread_extents(tp, ip, XFS_ATTR_FORK);
1586 if (error)
1587 goto out_unlock;
1588 }
1589
1590 out_unlock:
1591 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1592 return error;
1593 }
1594
1595 static int
xfs_rtmount_rtg(struct xfs_mount * mp,struct xfs_trans * tp,struct xfs_rtgroup * rtg)1596 xfs_rtmount_rtg(
1597 struct xfs_mount *mp,
1598 struct xfs_trans *tp,
1599 struct xfs_rtgroup *rtg)
1600 {
1601 int error, i;
1602
1603 for (i = 0; i < XFS_RTGI_MAX; i++) {
1604 error = xfs_rtginode_load(rtg, i, tp);
1605 if (error)
1606 return error;
1607
1608 if (rtg->rtg_inodes[i]) {
1609 error = xfs_rtmount_iread_extents(tp,
1610 rtg->rtg_inodes[i]);
1611 if (error)
1612 return error;
1613 }
1614 }
1615
1616 return xfs_alloc_rsum_cache(rtg, mp->m_sb.sb_rbmblocks);
1617 }
1618
1619 /*
1620 * Get the bitmap and summary inodes and the summary cache into the mount
1621 * structure at mount time.
1622 */
1623 int
xfs_rtmount_inodes(struct xfs_mount * mp)1624 xfs_rtmount_inodes(
1625 struct xfs_mount *mp)
1626 {
1627 struct xfs_trans *tp;
1628 struct xfs_rtgroup *rtg = NULL;
1629 int error;
1630
1631 error = xfs_trans_alloc_empty(mp, &tp);
1632 if (error)
1633 return error;
1634
1635 if (xfs_has_rtgroups(mp) && mp->m_sb.sb_rgcount > 0) {
1636 error = xfs_rtginode_load_parent(tp);
1637 if (error)
1638 goto out_cancel;
1639 }
1640
1641 while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1642 error = xfs_rtmount_rtg(mp, tp, rtg);
1643 if (error) {
1644 xfs_rtgroup_rele(rtg);
1645 xfs_rtunmount_inodes(mp);
1646 break;
1647 }
1648 }
1649
1650 out_cancel:
1651 xfs_trans_cancel(tp);
1652 return error;
1653 }
1654
1655 void
xfs_rtunmount_inodes(struct xfs_mount * mp)1656 xfs_rtunmount_inodes(
1657 struct xfs_mount *mp)
1658 {
1659 struct xfs_rtgroup *rtg = NULL;
1660
1661 while ((rtg = xfs_rtgroup_next(mp, rtg)))
1662 xfs_rtunmount_rtg(rtg);
1663 xfs_rtginode_irele(&mp->m_rtdirip);
1664 }
1665
1666 /*
1667 * Pick an extent for allocation at the start of a new realtime file.
1668 * Use the sequence number stored in the atime field of the bitmap inode.
1669 * Translate this to a fraction of the rtextents, and return the product
1670 * of rtextents and the fraction.
1671 * The fraction sequence is 0, 1/2, 1/4, 3/4, 1/8, ..., 7/8, 1/16, ...
1672 */
1673 static xfs_rtxnum_t
xfs_rtpick_extent(struct xfs_rtgroup * rtg,struct xfs_trans * tp,xfs_rtxlen_t len)1674 xfs_rtpick_extent(
1675 struct xfs_rtgroup *rtg,
1676 struct xfs_trans *tp,
1677 xfs_rtxlen_t len) /* allocation length (rtextents) */
1678 {
1679 struct xfs_mount *mp = rtg_mount(rtg);
1680 struct xfs_inode *rbmip = rtg_bitmap(rtg);
1681 xfs_rtxnum_t b = 0; /* result rtext */
1682 int log2; /* log of sequence number */
1683 uint64_t resid; /* residual after log removed */
1684 uint64_t seq; /* sequence number of file creation */
1685 struct timespec64 ts; /* timespec in inode */
1686
1687 xfs_assert_ilocked(rbmip, XFS_ILOCK_EXCL);
1688
1689 ts = inode_get_atime(VFS_I(rbmip));
1690 if (!(rbmip->i_diflags & XFS_DIFLAG_NEWRTBM)) {
1691 rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
1692 seq = 0;
1693 } else {
1694 seq = ts.tv_sec;
1695 }
1696 log2 = xfs_highbit64(seq);
1697 if (log2 != -1) {
1698 resid = seq - (1ULL << log2);
1699 b = (mp->m_sb.sb_rextents * ((resid << 1) + 1ULL)) >>
1700 (log2 + 1);
1701 if (b >= mp->m_sb.sb_rextents)
1702 div64_u64_rem(b, mp->m_sb.sb_rextents, &b);
1703 if (b + len > mp->m_sb.sb_rextents)
1704 b = mp->m_sb.sb_rextents - len;
1705 }
1706 ts.tv_sec = seq + 1;
1707 inode_set_atime_to_ts(VFS_I(rbmip), ts);
1708 xfs_trans_log_inode(tp, rbmip, XFS_ILOG_CORE);
1709 return b;
1710 }
1711
1712 static void
xfs_rtalloc_align_minmax(xfs_rtxlen_t * raminlen,xfs_rtxlen_t * ramaxlen,xfs_rtxlen_t * prod)1713 xfs_rtalloc_align_minmax(
1714 xfs_rtxlen_t *raminlen,
1715 xfs_rtxlen_t *ramaxlen,
1716 xfs_rtxlen_t *prod)
1717 {
1718 xfs_rtxlen_t newmaxlen = *ramaxlen;
1719 xfs_rtxlen_t newminlen = *raminlen;
1720 xfs_rtxlen_t slack;
1721
1722 slack = newmaxlen % *prod;
1723 if (slack)
1724 newmaxlen -= slack;
1725 slack = newminlen % *prod;
1726 if (slack)
1727 newminlen += *prod - slack;
1728
1729 /*
1730 * If adjusting for extent size hint alignment produces an invalid
1731 * min/max len combination, go ahead without it.
1732 */
1733 if (newmaxlen < newminlen) {
1734 *prod = 1;
1735 return;
1736 }
1737 *ramaxlen = newmaxlen;
1738 *raminlen = newminlen;
1739 }
1740
1741 /* Given a free extent, find any part of it that isn't busy, if possible. */
1742 STATIC bool
xfs_rtalloc_check_busy(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t minlen_rtx,xfs_rtxlen_t maxlen_rtx,xfs_rtxlen_t len_rtx,xfs_rtxlen_t prod,xfs_rtxnum_t rtx,xfs_rtxlen_t * reslen,xfs_rtxnum_t * resrtx,unsigned * busy_gen)1743 xfs_rtalloc_check_busy(
1744 struct xfs_rtalloc_args *args,
1745 xfs_rtxnum_t start,
1746 xfs_rtxlen_t minlen_rtx,
1747 xfs_rtxlen_t maxlen_rtx,
1748 xfs_rtxlen_t len_rtx,
1749 xfs_rtxlen_t prod,
1750 xfs_rtxnum_t rtx,
1751 xfs_rtxlen_t *reslen,
1752 xfs_rtxnum_t *resrtx,
1753 unsigned *busy_gen)
1754 {
1755 struct xfs_rtgroup *rtg = args->rtg;
1756 struct xfs_mount *mp = rtg_mount(rtg);
1757 xfs_agblock_t rgbno = xfs_rtx_to_rgbno(rtg, rtx);
1758 xfs_rgblock_t min_rgbno = xfs_rtx_to_rgbno(rtg, start);
1759 xfs_extlen_t minlen = xfs_rtxlen_to_extlen(mp, minlen_rtx);
1760 xfs_extlen_t len = xfs_rtxlen_to_extlen(mp, len_rtx);
1761 xfs_extlen_t diff;
1762 bool busy;
1763
1764 busy = xfs_extent_busy_trim(rtg_group(rtg), minlen,
1765 xfs_rtxlen_to_extlen(mp, maxlen_rtx), &rgbno, &len,
1766 busy_gen);
1767
1768 /*
1769 * If we have a largish extent that happens to start before min_rgbno,
1770 * see if we can shift it into range...
1771 */
1772 if (rgbno < min_rgbno && rgbno + len > min_rgbno) {
1773 diff = min_rgbno - rgbno;
1774 if (len > diff) {
1775 rgbno += diff;
1776 len -= diff;
1777 }
1778 }
1779
1780 if (prod > 1 && len >= minlen) {
1781 xfs_rgblock_t aligned_rgbno = roundup(rgbno, prod);
1782
1783 diff = aligned_rgbno - rgbno;
1784
1785 *resrtx = xfs_rgbno_to_rtx(mp, aligned_rgbno);
1786 *reslen = xfs_extlen_to_rtxlen(mp,
1787 diff >= len ? 0 : len - diff);
1788 } else {
1789 *resrtx = xfs_rgbno_to_rtx(mp, rgbno);
1790 *reslen = xfs_extlen_to_rtxlen(mp, len);
1791 }
1792
1793 return busy;
1794 }
1795
1796 /*
1797 * Adjust the given free extent so that it isn't busy, or flush the log and
1798 * wait for the space to become unbusy. Only needed for rtgroups.
1799 */
1800 STATIC int
xfs_rtallocate_adjust_for_busy(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)1801 xfs_rtallocate_adjust_for_busy(
1802 struct xfs_rtalloc_args *args,
1803 xfs_rtxnum_t start,
1804 xfs_rtxlen_t minlen,
1805 xfs_rtxlen_t maxlen,
1806 xfs_rtxlen_t *len,
1807 xfs_rtxlen_t prod,
1808 xfs_rtxnum_t *rtx)
1809 {
1810 xfs_rtxnum_t resrtx;
1811 xfs_rtxlen_t reslen;
1812 unsigned busy_gen;
1813 bool busy;
1814 int error;
1815
1816 again:
1817 busy = xfs_rtalloc_check_busy(args, start, minlen, maxlen, *len, prod,
1818 *rtx, &reslen, &resrtx, &busy_gen);
1819 if (!busy)
1820 return 0;
1821
1822 if (reslen < minlen || (start != 0 && resrtx != *rtx)) {
1823 /*
1824 * Enough of the extent was busy that we cannot satisfy the
1825 * allocation, or this is a near allocation and the start of
1826 * the extent is busy. Flush the log and wait for the busy
1827 * situation to resolve.
1828 */
1829 trace_xfs_rtalloc_extent_busy(args->rtg, start, minlen, maxlen,
1830 *len, prod, *rtx, busy_gen);
1831
1832 error = xfs_extent_busy_flush(args->tp, rtg_group(args->rtg),
1833 busy_gen, 0);
1834 if (error)
1835 return error;
1836
1837 goto again;
1838 }
1839
1840 /* Some of the free space wasn't busy, hand that back to the caller. */
1841 trace_xfs_rtalloc_extent_busy_trim(args->rtg, *rtx, *len, resrtx,
1842 reslen);
1843 *len = reslen;
1844 *rtx = resrtx;
1845
1846 return 0;
1847 }
1848
1849 static int
xfs_rtallocate_rtg(struct xfs_trans * tp,xfs_rgnumber_t rgno,xfs_rtblock_t bno_hint,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t prod,bool wasdel,bool initial_user_data,bool * rtlocked,xfs_rtblock_t * bno,xfs_extlen_t * blen)1850 xfs_rtallocate_rtg(
1851 struct xfs_trans *tp,
1852 xfs_rgnumber_t rgno,
1853 xfs_rtblock_t bno_hint,
1854 xfs_rtxlen_t minlen,
1855 xfs_rtxlen_t maxlen,
1856 xfs_rtxlen_t prod,
1857 bool wasdel,
1858 bool initial_user_data,
1859 bool *rtlocked,
1860 xfs_rtblock_t *bno,
1861 xfs_extlen_t *blen)
1862 {
1863 struct xfs_rtalloc_args args = {
1864 .mp = tp->t_mountp,
1865 .tp = tp,
1866 };
1867 xfs_rtxnum_t start = 0;
1868 xfs_rtxnum_t rtx;
1869 xfs_rtxlen_t len = 0;
1870 int error = 0;
1871
1872 args.rtg = xfs_rtgroup_grab(args.mp, rgno);
1873 if (!args.rtg)
1874 return -ENOSPC;
1875
1876 /*
1877 * We need to lock out modifications to both the RT bitmap and summary
1878 * inodes for finding free space in xfs_rtallocate_extent_{near,size}
1879 * and join the bitmap and summary inodes for the actual allocation
1880 * down in xfs_rtallocate_range.
1881 *
1882 * For RTG-enabled file system we don't want to join the inodes to the
1883 * transaction until we are committed to allocate to allocate from this
1884 * RTG so that only one inode of each type is locked at a time.
1885 *
1886 * But for pre-RTG file systems we need to already to join the bitmap
1887 * inode to the transaction for xfs_rtpick_extent, which bumps the
1888 * sequence number in it, so we'll have to join the inode to the
1889 * transaction early here.
1890 *
1891 * This is all a bit messy, but at least the mess is contained in
1892 * this function.
1893 */
1894 if (!*rtlocked) {
1895 xfs_rtgroup_lock(args.rtg, XFS_RTGLOCK_BITMAP);
1896 if (!xfs_has_rtgroups(args.mp))
1897 xfs_rtgroup_trans_join(tp, args.rtg,
1898 XFS_RTGLOCK_BITMAP);
1899 *rtlocked = true;
1900 }
1901
1902 /*
1903 * For an allocation to an empty file at offset 0, pick an extent that
1904 * will space things out in the rt area.
1905 */
1906 if (bno_hint != NULLFSBLOCK)
1907 start = xfs_rtb_to_rtx(args.mp, bno_hint);
1908 else if (!xfs_has_rtgroups(args.mp) && initial_user_data)
1909 start = xfs_rtpick_extent(args.rtg, tp, maxlen);
1910
1911 if (start) {
1912 error = xfs_rtallocate_extent_near(&args, start, minlen, maxlen,
1913 &len, prod, &rtx);
1914 /*
1915 * If we can't allocate near a specific rt extent, try again
1916 * without locality criteria.
1917 */
1918 if (error == -ENOSPC) {
1919 xfs_rtbuf_cache_relse(&args);
1920 error = 0;
1921 }
1922 }
1923
1924 if (!error) {
1925 error = xfs_rtallocate_extent_size(&args, minlen, maxlen, &len,
1926 prod, &rtx);
1927 }
1928
1929 if (error) {
1930 if (xfs_has_rtgroups(args.mp))
1931 goto out_unlock;
1932 goto out_release;
1933 }
1934
1935 if (xfs_has_rtgroups(args.mp)) {
1936 error = xfs_rtallocate_adjust_for_busy(&args, start, minlen,
1937 maxlen, &len, prod, &rtx);
1938 if (error)
1939 goto out_unlock;
1940
1941 xfs_rtgroup_trans_join(tp, args.rtg, XFS_RTGLOCK_BITMAP);
1942 }
1943
1944 error = xfs_rtallocate_range(&args, rtx, len);
1945 if (error)
1946 goto out_release;
1947
1948 xfs_trans_mod_sb(tp, wasdel ?
1949 XFS_TRANS_SB_RES_FREXTENTS : XFS_TRANS_SB_FREXTENTS,
1950 -(long)len);
1951 *bno = xfs_rtx_to_rtb(args.rtg, rtx);
1952 *blen = xfs_rtxlen_to_extlen(args.mp, len);
1953
1954 out_release:
1955 xfs_rtgroup_rele(args.rtg);
1956 xfs_rtbuf_cache_relse(&args);
1957 return error;
1958 out_unlock:
1959 xfs_rtgroup_unlock(args.rtg, XFS_RTGLOCK_BITMAP);
1960 *rtlocked = false;
1961 goto out_release;
1962 }
1963
1964 int
xfs_rtallocate_rtgs(struct xfs_trans * tp,xfs_fsblock_t bno_hint,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t prod,bool wasdel,bool initial_user_data,xfs_rtblock_t * bno,xfs_extlen_t * blen)1965 xfs_rtallocate_rtgs(
1966 struct xfs_trans *tp,
1967 xfs_fsblock_t bno_hint,
1968 xfs_rtxlen_t minlen,
1969 xfs_rtxlen_t maxlen,
1970 xfs_rtxlen_t prod,
1971 bool wasdel,
1972 bool initial_user_data,
1973 xfs_rtblock_t *bno,
1974 xfs_extlen_t *blen)
1975 {
1976 struct xfs_mount *mp = tp->t_mountp;
1977 xfs_rgnumber_t start_rgno, rgno;
1978 int error;
1979
1980 /*
1981 * For now this just blindly iterates over the RTGs for an initial
1982 * allocation. We could try to keep an in-memory rtg_longest member
1983 * to avoid the locking when just looking for big enough free space,
1984 * but for now this keeps things simple.
1985 */
1986 if (bno_hint != NULLFSBLOCK)
1987 start_rgno = xfs_rtb_to_rgno(mp, bno_hint);
1988 else
1989 start_rgno = (atomic_inc_return(&mp->m_rtgrotor) - 1) %
1990 mp->m_sb.sb_rgcount;
1991
1992 rgno = start_rgno;
1993 do {
1994 bool rtlocked = false;
1995
1996 error = xfs_rtallocate_rtg(tp, rgno, bno_hint, minlen, maxlen,
1997 prod, wasdel, initial_user_data, &rtlocked,
1998 bno, blen);
1999 if (error != -ENOSPC)
2000 return error;
2001 ASSERT(!rtlocked);
2002
2003 if (++rgno == mp->m_sb.sb_rgcount)
2004 rgno = 0;
2005 bno_hint = NULLFSBLOCK;
2006 } while (rgno != start_rgno);
2007
2008 return -ENOSPC;
2009 }
2010
2011 static int
xfs_rtallocate_align(struct xfs_bmalloca * ap,xfs_rtxlen_t * ralen,xfs_rtxlen_t * raminlen,xfs_rtxlen_t * prod,bool * noalign)2012 xfs_rtallocate_align(
2013 struct xfs_bmalloca *ap,
2014 xfs_rtxlen_t *ralen,
2015 xfs_rtxlen_t *raminlen,
2016 xfs_rtxlen_t *prod,
2017 bool *noalign)
2018 {
2019 struct xfs_mount *mp = ap->ip->i_mount;
2020 xfs_fileoff_t orig_offset = ap->offset;
2021 xfs_extlen_t minlen = mp->m_sb.sb_rextsize;
2022 xfs_extlen_t align; /* minimum allocation alignment */
2023 xfs_extlen_t mod; /* product factor for allocators */
2024 int error;
2025
2026 if (*noalign) {
2027 align = mp->m_sb.sb_rextsize;
2028 } else {
2029 if (ap->flags & XFS_BMAPI_COWFORK)
2030 align = xfs_get_cowextsz_hint(ap->ip);
2031 else
2032 align = xfs_get_extsz_hint(ap->ip);
2033 if (!align)
2034 align = 1;
2035 if (align == mp->m_sb.sb_rextsize)
2036 *noalign = true;
2037 }
2038
2039 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 1,
2040 ap->eof, 0, ap->conv, &ap->offset, &ap->length);
2041 if (error)
2042 return error;
2043 ASSERT(ap->length);
2044 ASSERT(xfs_extlen_to_rtxmod(mp, ap->length) == 0);
2045
2046 /*
2047 * If we shifted the file offset downward to satisfy an extent size
2048 * hint, increase minlen by that amount so that the allocator won't
2049 * give us an allocation that's too short to cover at least one of the
2050 * blocks that the caller asked for.
2051 */
2052 if (ap->offset != orig_offset)
2053 minlen += orig_offset - ap->offset;
2054
2055 /*
2056 * Set ralen to be the actual requested length in rtextents.
2057 *
2058 * If the old value was close enough to XFS_BMBT_MAX_EXTLEN that
2059 * we rounded up to it, cut it back so it's valid again.
2060 * Note that if it's a really large request (bigger than
2061 * XFS_BMBT_MAX_EXTLEN), we don't hear about that number, and can't
2062 * adjust the starting point to match it.
2063 */
2064 *ralen = xfs_extlen_to_rtxlen(mp, min(ap->length, XFS_MAX_BMBT_EXTLEN));
2065 *raminlen = max_t(xfs_rtxlen_t, 1, xfs_extlen_to_rtxlen(mp, minlen));
2066 ASSERT(*raminlen > 0);
2067 ASSERT(*raminlen <= *ralen);
2068
2069 /*
2070 * Only bother calculating a real prod factor if offset & length are
2071 * perfectly aligned, otherwise it will just get us in trouble.
2072 */
2073 div_u64_rem(ap->offset, align, &mod);
2074 if (mod || ap->length % align)
2075 *prod = 1;
2076 else
2077 *prod = xfs_extlen_to_rtxlen(mp, align);
2078
2079 if (*prod > 1)
2080 xfs_rtalloc_align_minmax(raminlen, ralen, prod);
2081 return 0;
2082 }
2083
2084 int
xfs_bmap_rtalloc(struct xfs_bmalloca * ap)2085 xfs_bmap_rtalloc(
2086 struct xfs_bmalloca *ap)
2087 {
2088 xfs_fileoff_t orig_offset = ap->offset;
2089 xfs_rtxlen_t prod = 0; /* product factor for allocators */
2090 xfs_rtxlen_t ralen = 0; /* realtime allocation length */
2091 xfs_rtblock_t bno_hint = NULLRTBLOCK;
2092 xfs_extlen_t orig_length = ap->length;
2093 xfs_rtxlen_t raminlen;
2094 bool rtlocked = false;
2095 bool noalign = false;
2096 bool initial_user_data =
2097 ap->datatype & XFS_ALLOC_INITIAL_USER_DATA;
2098 int error;
2099
2100 retry:
2101 error = xfs_rtallocate_align(ap, &ralen, &raminlen, &prod, &noalign);
2102 if (error)
2103 return error;
2104
2105 if (xfs_bmap_adjacent(ap))
2106 bno_hint = ap->blkno;
2107
2108 if (xfs_has_rtgroups(ap->ip->i_mount)) {
2109 error = xfs_rtallocate_rtgs(ap->tp, bno_hint, raminlen, ralen,
2110 prod, ap->wasdel, initial_user_data,
2111 &ap->blkno, &ap->length);
2112 } else {
2113 error = xfs_rtallocate_rtg(ap->tp, 0, bno_hint, raminlen, ralen,
2114 prod, ap->wasdel, initial_user_data,
2115 &rtlocked, &ap->blkno, &ap->length);
2116 }
2117
2118 if (error == -ENOSPC) {
2119 if (!noalign) {
2120 /*
2121 * We previously enlarged the request length to try to
2122 * satisfy an extent size hint. The allocator didn't
2123 * return anything, so reset the parameters to the
2124 * original values and try again without alignment
2125 * criteria.
2126 */
2127 ap->offset = orig_offset;
2128 ap->length = orig_length;
2129 noalign = true;
2130 goto retry;
2131 }
2132
2133 ap->blkno = NULLFSBLOCK;
2134 ap->length = 0;
2135 return 0;
2136 }
2137 if (error)
2138 return error;
2139
2140 xfs_bmap_alloc_account(ap);
2141 return 0;
2142 }
2143