1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_alloc.h"
16 #include "xfs_bmap.h"
17 #include "xfs_bmap_btree.h"
18 #include "xfs_bmap_util.h"
19 #include "xfs_trans.h"
20 #include "xfs_trans_space.h"
21 #include "xfs_icache.h"
22 #include "xfs_rtalloc.h"
23 #include "xfs_sb.h"
24 #include "xfs_rtbitmap.h"
25 #include "xfs_quota.h"
26 #include "xfs_log_priv.h"
27 #include "xfs_health.h"
28 #include "xfs_da_format.h"
29 #include "xfs_metafile.h"
30 #include "xfs_rtgroup.h"
31 #include "xfs_error.h"
32 #include "xfs_trace.h"
33
34 /*
35 * Return whether there are any free extents in the size range given
36 * by low and high, for the bitmap block bbno.
37 */
38 STATIC int
xfs_rtany_summary(struct xfs_rtalloc_args * args,int low,int high,xfs_fileoff_t bbno,int * maxlog)39 xfs_rtany_summary(
40 struct xfs_rtalloc_args *args,
41 int low, /* low log2 extent size */
42 int high, /* high log2 extent size */
43 xfs_fileoff_t bbno, /* bitmap block number */
44 int *maxlog) /* out: max log2 extent size free */
45 {
46 uint8_t *rsum_cache = args->rtg->rtg_rsum_cache;
47 int error;
48 int log; /* loop counter, log2 of ext. size */
49 xfs_suminfo_t sum; /* summary data */
50
51 /* There are no extents at levels >= rsum_cache[bbno]. */
52 if (rsum_cache) {
53 high = min(high, rsum_cache[bbno] - 1);
54 if (low > high) {
55 *maxlog = -1;
56 return 0;
57 }
58 }
59
60 /*
61 * Loop over logs of extent sizes.
62 */
63 for (log = high; log >= low; log--) {
64 /*
65 * Get one summary datum.
66 */
67 error = xfs_rtget_summary(args, log, bbno, &sum);
68 if (error) {
69 return error;
70 }
71 /*
72 * If there are any, return success.
73 */
74 if (sum) {
75 *maxlog = log;
76 goto out;
77 }
78 }
79 /*
80 * Found nothing, return failure.
81 */
82 *maxlog = -1;
83 out:
84 /* There were no extents at levels > log. */
85 if (rsum_cache && log + 1 < rsum_cache[bbno])
86 rsum_cache[bbno] = log + 1;
87 return 0;
88 }
89
90 /*
91 * Copy and transform the summary file, given the old and new
92 * parameters in the mount structures.
93 */
94 STATIC int
xfs_rtcopy_summary(struct xfs_rtalloc_args * oargs,struct xfs_rtalloc_args * nargs)95 xfs_rtcopy_summary(
96 struct xfs_rtalloc_args *oargs,
97 struct xfs_rtalloc_args *nargs)
98 {
99 xfs_fileoff_t bbno; /* bitmap block number */
100 int error;
101 int log; /* summary level number (log length) */
102 xfs_suminfo_t sum; /* summary data */
103
104 for (log = oargs->mp->m_rsumlevels - 1; log >= 0; log--) {
105 for (bbno = oargs->mp->m_sb.sb_rbmblocks - 1;
106 (xfs_srtblock_t)bbno >= 0;
107 bbno--) {
108 error = xfs_rtget_summary(oargs, log, bbno, &sum);
109 if (error)
110 goto out;
111 if (sum == 0)
112 continue;
113 error = xfs_rtmodify_summary(oargs, log, bbno, -sum);
114 if (error)
115 goto out;
116 error = xfs_rtmodify_summary(nargs, log, bbno, sum);
117 if (error)
118 goto out;
119 ASSERT(sum > 0);
120 }
121 }
122 error = 0;
123 out:
124 xfs_rtbuf_cache_relse(oargs);
125 return 0;
126 }
127 /*
128 * Mark an extent specified by start and len allocated.
129 * Updates all the summary information as well as the bitmap.
130 */
131 STATIC int
xfs_rtallocate_range(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t len)132 xfs_rtallocate_range(
133 struct xfs_rtalloc_args *args,
134 xfs_rtxnum_t start, /* start rtext to allocate */
135 xfs_rtxlen_t len) /* in/out: summary block number */
136 {
137 struct xfs_mount *mp = args->mp;
138 xfs_rtxnum_t end; /* end of the allocated rtext */
139 int error;
140 xfs_rtxnum_t postblock = 0; /* first rtext allocated > end */
141 xfs_rtxnum_t preblock = 0; /* first rtext allocated < start */
142
143 end = start + len - 1;
144 /*
145 * Assume we're allocating out of the middle of a free extent.
146 * We need to find the beginning and end of the extent so we can
147 * properly update the summary.
148 */
149 error = xfs_rtfind_back(args, start, &preblock);
150 if (error)
151 return error;
152
153 /*
154 * Find the next allocated block (end of free extent).
155 */
156 error = xfs_rtfind_forw(args, end, args->rtg->rtg_extents - 1,
157 &postblock);
158 if (error)
159 return error;
160
161 /*
162 * Decrement the summary information corresponding to the entire
163 * (old) free extent.
164 */
165 error = xfs_rtmodify_summary(args,
166 xfs_highbit64(postblock + 1 - preblock),
167 xfs_rtx_to_rbmblock(mp, preblock), -1);
168 if (error)
169 return error;
170
171 /*
172 * If there are blocks not being allocated at the front of the
173 * old extent, add summary data for them to be free.
174 */
175 if (preblock < start) {
176 error = xfs_rtmodify_summary(args,
177 xfs_highbit64(start - preblock),
178 xfs_rtx_to_rbmblock(mp, preblock), 1);
179 if (error)
180 return error;
181 }
182
183 /*
184 * If there are blocks not being allocated at the end of the
185 * old extent, add summary data for them to be free.
186 */
187 if (postblock > end) {
188 error = xfs_rtmodify_summary(args,
189 xfs_highbit64(postblock - end),
190 xfs_rtx_to_rbmblock(mp, end + 1), 1);
191 if (error)
192 return error;
193 }
194
195 /*
196 * Modify the bitmap to mark this extent allocated.
197 */
198 return xfs_rtmodify_range(args, start, len, 0);
199 }
200
201 /* Reduce @rtxlen until it is a multiple of @prod. */
202 static inline xfs_rtxlen_t
xfs_rtalloc_align_len(xfs_rtxlen_t rtxlen,xfs_rtxlen_t prod)203 xfs_rtalloc_align_len(
204 xfs_rtxlen_t rtxlen,
205 xfs_rtxlen_t prod)
206 {
207 if (unlikely(prod > 1))
208 return rounddown(rtxlen, prod);
209 return rtxlen;
210 }
211
212 /*
213 * Make sure we don't run off the end of the rt volume. Be careful that
214 * adjusting maxlen downwards doesn't cause us to fail the alignment checks.
215 */
216 static inline xfs_rtxlen_t
xfs_rtallocate_clamp_len(struct xfs_rtgroup * rtg,xfs_rtxnum_t startrtx,xfs_rtxlen_t rtxlen,xfs_rtxlen_t prod)217 xfs_rtallocate_clamp_len(
218 struct xfs_rtgroup *rtg,
219 xfs_rtxnum_t startrtx,
220 xfs_rtxlen_t rtxlen,
221 xfs_rtxlen_t prod)
222 {
223 xfs_rtxlen_t ret;
224
225 ret = min(rtg->rtg_extents, startrtx + rtxlen) - startrtx;
226 return xfs_rtalloc_align_len(ret, prod);
227 }
228
229 /*
230 * Attempt to allocate an extent minlen<=len<=maxlen starting from
231 * bitmap block bbno. If we don't get maxlen then use prod to trim
232 * the length, if given. Returns error; returns starting block in *rtx.
233 * The lengths are all in rtextents.
234 */
235 STATIC int
xfs_rtallocate_extent_block(struct xfs_rtalloc_args * args,xfs_fileoff_t bbno,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxnum_t * nextp,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)236 xfs_rtallocate_extent_block(
237 struct xfs_rtalloc_args *args,
238 xfs_fileoff_t bbno, /* bitmap block number */
239 xfs_rtxlen_t minlen, /* minimum length to allocate */
240 xfs_rtxlen_t maxlen, /* maximum length to allocate */
241 xfs_rtxlen_t *len, /* out: actual length allocated */
242 xfs_rtxnum_t *nextp, /* out: next rtext to try */
243 xfs_rtxlen_t prod, /* extent product factor */
244 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
245 {
246 struct xfs_mount *mp = args->mp;
247 xfs_rtxnum_t besti = -1; /* best rtext found so far */
248 xfs_rtxnum_t end; /* last rtext in chunk */
249 xfs_rtxnum_t i; /* current rtext trying */
250 xfs_rtxnum_t next; /* next rtext to try */
251 xfs_rtxlen_t scanlen; /* number of free rtx to look for */
252 xfs_rtxlen_t bestlen = 0; /* best length found so far */
253 int stat; /* status from internal calls */
254 int error;
255
256 /*
257 * Loop over all the extents starting in this bitmap block up to the
258 * end of the rt volume, looking for one that's long enough.
259 */
260 end = min(args->rtg->rtg_extents, xfs_rbmblock_to_rtx(mp, bbno + 1)) -
261 1;
262 for (i = xfs_rbmblock_to_rtx(mp, bbno); i <= end; i++) {
263 /* Make sure we don't scan off the end of the rt volume. */
264 scanlen = xfs_rtallocate_clamp_len(args->rtg, i, maxlen, prod);
265 if (scanlen < minlen)
266 break;
267
268 /*
269 * See if there's a free extent of scanlen starting at i.
270 * If it's not so then next will contain the first non-free.
271 */
272 error = xfs_rtcheck_range(args, i, scanlen, 1, &next, &stat);
273 if (error)
274 return error;
275 if (stat) {
276 /*
277 * i to scanlen is all free, allocate and return that.
278 */
279 *len = scanlen;
280 *rtx = i;
281 return 0;
282 }
283
284 /*
285 * In the case where we have a variable-sized allocation
286 * request, figure out how big this free piece is,
287 * and if it's big enough for the minimum, and the best
288 * so far, remember it.
289 */
290 if (minlen < maxlen) {
291 xfs_rtxnum_t thislen; /* this extent size */
292
293 thislen = next - i;
294 if (thislen >= minlen && thislen > bestlen) {
295 besti = i;
296 bestlen = thislen;
297 }
298 }
299 /*
300 * If not done yet, find the start of the next free space.
301 */
302 if (next >= end)
303 break;
304 error = xfs_rtfind_forw(args, next, end, &i);
305 if (error)
306 return error;
307 }
308
309 /* Searched the whole thing & didn't find a maxlen free extent. */
310 if (besti == -1)
311 goto nospace;
312
313 /*
314 * Ensure bestlen is a multiple of prod, but don't return a too-short
315 * extent.
316 */
317 bestlen = xfs_rtalloc_align_len(bestlen, prod);
318 if (bestlen < minlen)
319 goto nospace;
320
321 /*
322 * Pick besti for bestlen & return that.
323 */
324 *len = bestlen;
325 *rtx = besti;
326 return 0;
327 nospace:
328 /* Allocation failed. Set *nextp to the next block to try. */
329 *nextp = next;
330 return -ENOSPC;
331 }
332
333 /*
334 * Allocate an extent of length minlen<=len<=maxlen, starting at block
335 * bno. If we don't get maxlen then use prod to trim the length, if given.
336 * Returns error; returns starting block in *rtx.
337 * The lengths are all in rtextents.
338 */
339 STATIC int
xfs_rtallocate_extent_exact(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)340 xfs_rtallocate_extent_exact(
341 struct xfs_rtalloc_args *args,
342 xfs_rtxnum_t start, /* starting rtext number to allocate */
343 xfs_rtxlen_t minlen, /* minimum length to allocate */
344 xfs_rtxlen_t maxlen, /* maximum length to allocate */
345 xfs_rtxlen_t *len, /* out: actual length allocated */
346 xfs_rtxlen_t prod, /* extent product factor */
347 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
348 {
349 xfs_rtxnum_t next; /* next rtext to try (dummy) */
350 xfs_rtxlen_t alloclen; /* candidate length */
351 xfs_rtxlen_t scanlen; /* number of free rtx to look for */
352 int isfree; /* extent is free */
353 int error;
354
355 ASSERT(minlen % prod == 0);
356 ASSERT(maxlen % prod == 0);
357
358 /* Make sure we don't run off the end of the rt volume. */
359 scanlen = xfs_rtallocate_clamp_len(args->rtg, start, maxlen, prod);
360 if (scanlen < minlen)
361 return -ENOSPC;
362
363 /* Check if the range in question (for scanlen) is free. */
364 error = xfs_rtcheck_range(args, start, scanlen, 1, &next, &isfree);
365 if (error)
366 return error;
367
368 if (isfree) {
369 /* start to scanlen is all free; allocate it. */
370 *len = scanlen;
371 *rtx = start;
372 return 0;
373 }
374
375 /*
376 * If not, allocate what there is, if it's at least minlen.
377 */
378 alloclen = next - start;
379 if (alloclen < minlen)
380 return -ENOSPC;
381
382 /* Ensure alloclen is a multiple of prod. */
383 alloclen = xfs_rtalloc_align_len(alloclen, prod);
384 if (alloclen < minlen)
385 return -ENOSPC;
386
387 *len = alloclen;
388 *rtx = start;
389 return 0;
390 }
391
392 /*
393 * Allocate an extent of length minlen<=len<=maxlen, starting as near
394 * to start as possible. If we don't get maxlen then use prod to trim
395 * the length, if given. The lengths are all in rtextents.
396 */
397 STATIC int
xfs_rtallocate_extent_near(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)398 xfs_rtallocate_extent_near(
399 struct xfs_rtalloc_args *args,
400 xfs_rtxnum_t start, /* starting rtext number to allocate */
401 xfs_rtxlen_t minlen, /* minimum length to allocate */
402 xfs_rtxlen_t maxlen, /* maximum length to allocate */
403 xfs_rtxlen_t *len, /* out: actual length allocated */
404 xfs_rtxlen_t prod, /* extent product factor */
405 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
406 {
407 struct xfs_mount *mp = args->mp;
408 int maxlog; /* max useful extent from summary */
409 xfs_fileoff_t bbno; /* bitmap block number */
410 int error;
411 int i; /* bitmap block offset (loop control) */
412 int j; /* secondary loop control */
413 int log2len; /* log2 of minlen */
414 xfs_rtxnum_t n; /* next rtext to try */
415
416 ASSERT(minlen % prod == 0);
417 ASSERT(maxlen % prod == 0);
418
419 /*
420 * If the block number given is off the end, silently set it to the last
421 * block.
422 */
423 start = min(start, args->rtg->rtg_extents - 1);
424
425 /*
426 * Try the exact allocation first.
427 */
428 error = xfs_rtallocate_extent_exact(args, start, minlen, maxlen, len,
429 prod, rtx);
430 if (error != -ENOSPC)
431 return error;
432
433 bbno = xfs_rtx_to_rbmblock(mp, start);
434 i = 0;
435 j = -1;
436 ASSERT(minlen != 0);
437 log2len = xfs_highbit32(minlen);
438 /*
439 * Loop over all bitmap blocks (bbno + i is current block).
440 */
441 for (;;) {
442 /*
443 * Get summary information of extents of all useful levels
444 * starting in this bitmap block.
445 */
446 error = xfs_rtany_summary(args, log2len, mp->m_rsumlevels - 1,
447 bbno + i, &maxlog);
448 if (error)
449 return error;
450
451 /*
452 * If there are any useful extents starting here, try
453 * allocating one.
454 */
455 if (maxlog >= 0) {
456 xfs_extlen_t maxavail =
457 min_t(xfs_rtblock_t, maxlen,
458 (1ULL << (maxlog + 1)) - 1);
459 /*
460 * On the positive side of the starting location.
461 */
462 if (i >= 0) {
463 /*
464 * Try to allocate an extent starting in
465 * this block.
466 */
467 error = xfs_rtallocate_extent_block(args,
468 bbno + i, minlen, maxavail, len,
469 &n, prod, rtx);
470 if (error != -ENOSPC)
471 return error;
472 }
473 /*
474 * On the negative side of the starting location.
475 */
476 else { /* i < 0 */
477 int maxblocks;
478
479 /*
480 * Loop backwards to find the end of the extent
481 * we found in the realtime summary.
482 *
483 * maxblocks is the maximum possible number of
484 * bitmap blocks from the start of the extent
485 * to the end of the extent.
486 */
487 if (maxlog == 0)
488 maxblocks = 0;
489 else if (maxlog < mp->m_blkbit_log)
490 maxblocks = 1;
491 else
492 maxblocks = 2 << (maxlog - mp->m_blkbit_log);
493
494 /*
495 * We need to check bbno + i + maxblocks down to
496 * bbno + i. We already checked bbno down to
497 * bbno + j + 1, so we don't need to check those
498 * again.
499 */
500 j = min(i + maxblocks, j);
501 for (; j >= i; j--) {
502 error = xfs_rtallocate_extent_block(args,
503 bbno + j, minlen,
504 maxavail, len, &n, prod,
505 rtx);
506 if (error != -ENOSPC)
507 return error;
508 }
509 }
510 }
511 /*
512 * Loop control. If we were on the positive side, and there's
513 * still more blocks on the negative side, go there.
514 */
515 if (i > 0 && (int)bbno - i >= 0)
516 i = -i;
517 /*
518 * If positive, and no more negative, but there are more
519 * positive, go there.
520 */
521 else if (i > 0 && (int)bbno + i < mp->m_sb.sb_rbmblocks - 1)
522 i++;
523 /*
524 * If negative or 0 (just started), and there are positive
525 * blocks to go, go there. The 0 case moves to block 1.
526 */
527 else if (i <= 0 && (int)bbno - i < mp->m_sb.sb_rbmblocks - 1)
528 i = 1 - i;
529 /*
530 * If negative or 0 and there are more negative blocks,
531 * go there.
532 */
533 else if (i <= 0 && (int)bbno + i > 0)
534 i--;
535 /*
536 * Must be done. Return failure.
537 */
538 else
539 break;
540 }
541 return -ENOSPC;
542 }
543
544 static int
xfs_rtalloc_sumlevel(struct xfs_rtalloc_args * args,int l,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t prod,xfs_rtxlen_t * len,xfs_rtxnum_t * rtx)545 xfs_rtalloc_sumlevel(
546 struct xfs_rtalloc_args *args,
547 int l, /* level number */
548 xfs_rtxlen_t minlen, /* minimum length to allocate */
549 xfs_rtxlen_t maxlen, /* maximum length to allocate */
550 xfs_rtxlen_t prod, /* extent product factor */
551 xfs_rtxlen_t *len, /* out: actual length allocated */
552 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
553 {
554 xfs_fileoff_t i; /* bitmap block number */
555 int error;
556
557 for (i = 0; i < args->mp->m_sb.sb_rbmblocks; i++) {
558 xfs_suminfo_t sum; /* summary information for extents */
559 xfs_rtxnum_t n; /* next rtext to be tried */
560
561 error = xfs_rtget_summary(args, l, i, &sum);
562 if (error)
563 return error;
564
565 /*
566 * Nothing there, on to the next block.
567 */
568 if (!sum)
569 continue;
570
571 /*
572 * Try allocating the extent.
573 */
574 error = xfs_rtallocate_extent_block(args, i, minlen, maxlen,
575 len, &n, prod, rtx);
576 if (error != -ENOSPC)
577 return error;
578
579 /*
580 * If the "next block to try" returned from the allocator is
581 * beyond the next bitmap block, skip to that bitmap block.
582 */
583 if (xfs_rtx_to_rbmblock(args->mp, n) > i + 1)
584 i = xfs_rtx_to_rbmblock(args->mp, n) - 1;
585 }
586
587 return -ENOSPC;
588 }
589
590 /*
591 * Allocate an extent of length minlen<=len<=maxlen, with no position
592 * specified. If we don't get maxlen then use prod to trim
593 * the length, if given. The lengths are all in rtextents.
594 */
595 STATIC int
xfs_rtallocate_extent_size(struct xfs_rtalloc_args * args,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)596 xfs_rtallocate_extent_size(
597 struct xfs_rtalloc_args *args,
598 xfs_rtxlen_t minlen, /* minimum length to allocate */
599 xfs_rtxlen_t maxlen, /* maximum length to allocate */
600 xfs_rtxlen_t *len, /* out: actual length allocated */
601 xfs_rtxlen_t prod, /* extent product factor */
602 xfs_rtxnum_t *rtx) /* out: start rtext allocated */
603 {
604 int error;
605 int l; /* level number (loop control) */
606
607 ASSERT(minlen % prod == 0);
608 ASSERT(maxlen % prod == 0);
609 ASSERT(maxlen != 0);
610
611 /*
612 * Loop over all the levels starting with maxlen.
613 *
614 * At each level, look at all the bitmap blocks, to see if there are
615 * extents starting there that are long enough (>= maxlen).
616 *
617 * Note, only on the initial level can the allocation fail if the
618 * summary says there's an extent.
619 */
620 for (l = xfs_highbit32(maxlen); l < args->mp->m_rsumlevels; l++) {
621 error = xfs_rtalloc_sumlevel(args, l, minlen, maxlen, prod, len,
622 rtx);
623 if (error != -ENOSPC)
624 return error;
625 }
626
627 /*
628 * Didn't find any maxlen blocks. Try smaller ones, unless we are
629 * looking for a fixed size extent.
630 */
631 if (minlen > --maxlen)
632 return -ENOSPC;
633 ASSERT(minlen != 0);
634 ASSERT(maxlen != 0);
635
636 /*
637 * Loop over sizes, from maxlen down to minlen.
638 *
639 * This time, when we do the allocations, allow smaller ones to succeed,
640 * but make sure the specified minlen/maxlen are in the possible range
641 * for this summary level.
642 */
643 for (l = xfs_highbit32(maxlen); l >= xfs_highbit32(minlen); l--) {
644 error = xfs_rtalloc_sumlevel(args, l,
645 max_t(xfs_rtxlen_t, minlen, 1 << l),
646 min_t(xfs_rtxlen_t, maxlen, (1 << (l + 1)) - 1),
647 prod, len, rtx);
648 if (error != -ENOSPC)
649 return error;
650 }
651
652 return -ENOSPC;
653 }
654
655 static void
xfs_rtunmount_rtg(struct xfs_rtgroup * rtg)656 xfs_rtunmount_rtg(
657 struct xfs_rtgroup *rtg)
658 {
659 int i;
660
661 for (i = 0; i < XFS_RTGI_MAX; i++)
662 xfs_rtginode_irele(&rtg->rtg_inodes[i]);
663 kvfree(rtg->rtg_rsum_cache);
664 }
665
666 static int
xfs_alloc_rsum_cache(struct xfs_rtgroup * rtg,xfs_extlen_t rbmblocks)667 xfs_alloc_rsum_cache(
668 struct xfs_rtgroup *rtg,
669 xfs_extlen_t rbmblocks)
670 {
671 /*
672 * The rsum cache is initialized to the maximum value, which is
673 * trivially an upper bound on the maximum level with any free extents.
674 */
675 rtg->rtg_rsum_cache = kvmalloc(rbmblocks, GFP_KERNEL);
676 if (!rtg->rtg_rsum_cache)
677 return -ENOMEM;
678 memset(rtg->rtg_rsum_cache, -1, rbmblocks);
679 return 0;
680 }
681
682 /*
683 * If we changed the rt extent size (meaning there was no rt volume previously)
684 * and the root directory had EXTSZINHERIT and RTINHERIT set, it's possible
685 * that the extent size hint on the root directory is no longer congruent with
686 * the new rt extent size. Log the rootdir inode to fix this.
687 */
688 static int
xfs_growfs_rt_fixup_extsize(struct xfs_mount * mp)689 xfs_growfs_rt_fixup_extsize(
690 struct xfs_mount *mp)
691 {
692 struct xfs_inode *ip = mp->m_rootip;
693 struct xfs_trans *tp;
694 int error = 0;
695
696 xfs_ilock(ip, XFS_IOLOCK_EXCL);
697 if (!(ip->i_diflags & XFS_DIFLAG_RTINHERIT) ||
698 !(ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT))
699 goto out_iolock;
700
701 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_ichange, 0, 0, false,
702 &tp);
703 if (error)
704 goto out_iolock;
705
706 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
707 error = xfs_trans_commit(tp);
708 xfs_iunlock(ip, XFS_ILOCK_EXCL);
709
710 out_iolock:
711 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
712 return error;
713 }
714
715 /* Ensure that the rtgroup metadata inode is loaded, creating it if neeeded. */
716 static int
xfs_rtginode_ensure(struct xfs_rtgroup * rtg,enum xfs_rtg_inodes type)717 xfs_rtginode_ensure(
718 struct xfs_rtgroup *rtg,
719 enum xfs_rtg_inodes type)
720 {
721 struct xfs_trans *tp;
722 int error;
723
724 if (rtg->rtg_inodes[type])
725 return 0;
726
727 error = xfs_trans_alloc_empty(rtg_mount(rtg), &tp);
728 if (error)
729 return error;
730 error = xfs_rtginode_load(rtg, type, tp);
731 xfs_trans_cancel(tp);
732
733 if (error != -ENOENT)
734 return 0;
735 return xfs_rtginode_create(rtg, type, true);
736 }
737
738 static struct xfs_mount *
xfs_growfs_rt_alloc_fake_mount(const struct xfs_mount * mp,xfs_rfsblock_t rblocks,xfs_agblock_t rextsize)739 xfs_growfs_rt_alloc_fake_mount(
740 const struct xfs_mount *mp,
741 xfs_rfsblock_t rblocks,
742 xfs_agblock_t rextsize)
743 {
744 struct xfs_mount *nmp;
745
746 nmp = kmemdup(mp, sizeof(*mp), GFP_KERNEL);
747 if (!nmp)
748 return NULL;
749 xfs_mount_sb_set_rextsize(nmp, &nmp->m_sb, rextsize);
750 nmp->m_sb.sb_rblocks = rblocks;
751 nmp->m_sb.sb_rextents = xfs_blen_to_rtbxlen(nmp, nmp->m_sb.sb_rblocks);
752 nmp->m_sb.sb_rbmblocks = xfs_rtbitmap_blockcount(nmp);
753 nmp->m_sb.sb_rextslog = xfs_compute_rextslog(nmp->m_sb.sb_rextents);
754 if (xfs_has_rtgroups(nmp))
755 nmp->m_sb.sb_rgcount = howmany_64(nmp->m_sb.sb_rextents,
756 nmp->m_sb.sb_rgextents);
757 else
758 nmp->m_sb.sb_rgcount = 1;
759 nmp->m_rsumblocks = xfs_rtsummary_blockcount(nmp, &nmp->m_rsumlevels);
760
761 if (rblocks > 0)
762 nmp->m_features |= XFS_FEAT_REALTIME;
763
764 /* recompute growfsrt reservation from new rsumsize */
765 xfs_trans_resv_calc(nmp, &nmp->m_resv);
766 return nmp;
767 }
768
769 /* Free all the new space and return the number of extents actually freed. */
770 static int
xfs_growfs_rt_free_new(struct xfs_rtgroup * rtg,struct xfs_rtalloc_args * nargs,xfs_rtbxlen_t * freed_rtx)771 xfs_growfs_rt_free_new(
772 struct xfs_rtgroup *rtg,
773 struct xfs_rtalloc_args *nargs,
774 xfs_rtbxlen_t *freed_rtx)
775 {
776 struct xfs_mount *mp = rtg_mount(rtg);
777 xfs_rgnumber_t rgno = rtg_rgno(rtg);
778 xfs_rtxnum_t start_rtx = 0, end_rtx;
779
780 if (rgno < mp->m_sb.sb_rgcount)
781 start_rtx = xfs_rtgroup_extents(mp, rgno);
782 end_rtx = xfs_rtgroup_extents(nargs->mp, rgno);
783
784 /*
785 * Compute the first new extent that we want to free, being careful to
786 * skip past a realtime superblock at the start of the realtime volume.
787 */
788 if (xfs_has_rtsb(nargs->mp) && rgno == 0 && start_rtx == 0)
789 start_rtx++;
790 *freed_rtx = end_rtx - start_rtx;
791 return xfs_rtfree_range(nargs, start_rtx, *freed_rtx);
792 }
793
794 static xfs_rfsblock_t
xfs_growfs_rt_nrblocks(struct xfs_rtgroup * rtg,xfs_rfsblock_t nrblocks,xfs_agblock_t rextsize,xfs_fileoff_t bmbno)795 xfs_growfs_rt_nrblocks(
796 struct xfs_rtgroup *rtg,
797 xfs_rfsblock_t nrblocks,
798 xfs_agblock_t rextsize,
799 xfs_fileoff_t bmbno)
800 {
801 struct xfs_mount *mp = rtg_mount(rtg);
802 xfs_rfsblock_t step;
803
804 step = (bmbno + 1) * mp->m_rtx_per_rbmblock * rextsize;
805 if (xfs_has_rtgroups(mp)) {
806 xfs_rfsblock_t rgblocks = mp->m_sb.sb_rgextents * rextsize;
807
808 step = min(rgblocks, step) + rgblocks * rtg_rgno(rtg);
809 }
810
811 return min(nrblocks, step);
812 }
813
814 /*
815 * If the post-grow filesystem will have an rtsb; we're initializing the first
816 * rtgroup; and the filesystem didn't have a realtime section, write the rtsb
817 * now, and attach the rtsb buffer to the real mount.
818 */
819 static int
xfs_growfs_rt_init_rtsb(const struct xfs_rtalloc_args * nargs,const struct xfs_rtgroup * rtg,const struct xfs_rtalloc_args * args)820 xfs_growfs_rt_init_rtsb(
821 const struct xfs_rtalloc_args *nargs,
822 const struct xfs_rtgroup *rtg,
823 const struct xfs_rtalloc_args *args)
824 {
825 struct xfs_mount *mp = args->mp;
826 struct xfs_buf *rtsb_bp;
827 int error;
828
829 if (!xfs_has_rtsb(nargs->mp))
830 return 0;
831 if (rtg_rgno(rtg) > 0)
832 return 0;
833 if (mp->m_sb.sb_rblocks)
834 return 0;
835
836 error = xfs_buf_get_uncached(mp->m_rtdev_targp, XFS_FSB_TO_BB(mp, 1),
837 0, &rtsb_bp);
838 if (error)
839 return error;
840
841 rtsb_bp->b_maps[0].bm_bn = XFS_RTSB_DADDR;
842 rtsb_bp->b_ops = &xfs_rtsb_buf_ops;
843
844 xfs_update_rtsb(rtsb_bp, mp->m_sb_bp);
845 mp->m_rtsb_bp = rtsb_bp;
846 error = xfs_bwrite(rtsb_bp);
847 xfs_buf_unlock(rtsb_bp);
848 return error;
849 }
850
851 static int
xfs_growfs_rt_bmblock(struct xfs_rtgroup * rtg,xfs_rfsblock_t nrblocks,xfs_agblock_t rextsize,xfs_fileoff_t bmbno)852 xfs_growfs_rt_bmblock(
853 struct xfs_rtgroup *rtg,
854 xfs_rfsblock_t nrblocks,
855 xfs_agblock_t rextsize,
856 xfs_fileoff_t bmbno)
857 {
858 struct xfs_mount *mp = rtg_mount(rtg);
859 struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
860 struct xfs_inode *rsumip = rtg->rtg_inodes[XFS_RTGI_SUMMARY];
861 struct xfs_rtalloc_args args = {
862 .mp = mp,
863 .rtg = rtg,
864 };
865 struct xfs_rtalloc_args nargs = {
866 .rtg = rtg,
867 };
868 struct xfs_mount *nmp;
869 xfs_rtbxlen_t freed_rtx;
870 int error;
871
872 /*
873 * Calculate new sb and mount fields for this round. Also ensure the
874 * rtg_extents value is uptodate as the rtbitmap code relies on it.
875 */
876 nmp = nargs.mp = xfs_growfs_rt_alloc_fake_mount(mp,
877 xfs_growfs_rt_nrblocks(rtg, nrblocks, rextsize, bmbno),
878 rextsize);
879 if (!nmp)
880 return -ENOMEM;
881
882 xfs_rtgroup_calc_geometry(nmp, rtg, rtg_rgno(rtg),
883 nmp->m_sb.sb_rgcount, nmp->m_sb.sb_rextents);
884
885 /*
886 * Recompute the growfsrt reservation from the new rsumsize, so that the
887 * transaction below use the new, potentially larger value.
888 * */
889 xfs_trans_resv_calc(nmp, &nmp->m_resv);
890 error = xfs_trans_alloc(mp, &M_RES(nmp)->tr_growrtfree, 0, 0, 0,
891 &args.tp);
892 if (error)
893 goto out_free;
894 nargs.tp = args.tp;
895
896 xfs_rtgroup_lock(args.rtg, XFS_RTGLOCK_BITMAP);
897 xfs_rtgroup_trans_join(args.tp, args.rtg, XFS_RTGLOCK_BITMAP);
898
899 /*
900 * Update the bitmap inode's size ondisk and incore. We need to update
901 * the incore size so that inode inactivation won't punch what it thinks
902 * are "posteof" blocks.
903 */
904 rbmip->i_disk_size = nmp->m_sb.sb_rbmblocks * nmp->m_sb.sb_blocksize;
905 i_size_write(VFS_I(rbmip), rbmip->i_disk_size);
906 xfs_trans_log_inode(args.tp, rbmip, XFS_ILOG_CORE);
907
908 /*
909 * Update the summary inode's size. We need to update the incore size
910 * so that inode inactivation won't punch what it thinks are "posteof"
911 * blocks.
912 */
913 rsumip->i_disk_size = nmp->m_rsumblocks * nmp->m_sb.sb_blocksize;
914 i_size_write(VFS_I(rsumip), rsumip->i_disk_size);
915 xfs_trans_log_inode(args.tp, rsumip, XFS_ILOG_CORE);
916
917 /*
918 * Copy summary data from old to new sizes when the real size (not
919 * block-aligned) changes.
920 */
921 if (mp->m_sb.sb_rbmblocks != nmp->m_sb.sb_rbmblocks ||
922 mp->m_rsumlevels != nmp->m_rsumlevels) {
923 error = xfs_rtcopy_summary(&args, &nargs);
924 if (error)
925 goto out_cancel;
926 }
927
928 error = xfs_growfs_rt_init_rtsb(&nargs, rtg, &args);
929 if (error)
930 goto out_cancel;
931
932 /*
933 * Update superblock fields.
934 */
935 if (nmp->m_sb.sb_rextsize != mp->m_sb.sb_rextsize)
936 xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_REXTSIZE,
937 nmp->m_sb.sb_rextsize - mp->m_sb.sb_rextsize);
938 if (nmp->m_sb.sb_rbmblocks != mp->m_sb.sb_rbmblocks)
939 xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_RBMBLOCKS,
940 nmp->m_sb.sb_rbmblocks - mp->m_sb.sb_rbmblocks);
941 if (nmp->m_sb.sb_rblocks != mp->m_sb.sb_rblocks)
942 xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_RBLOCKS,
943 nmp->m_sb.sb_rblocks - mp->m_sb.sb_rblocks);
944 if (nmp->m_sb.sb_rextents != mp->m_sb.sb_rextents)
945 xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_REXTENTS,
946 nmp->m_sb.sb_rextents - mp->m_sb.sb_rextents);
947 if (nmp->m_sb.sb_rextslog != mp->m_sb.sb_rextslog)
948 xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_REXTSLOG,
949 nmp->m_sb.sb_rextslog - mp->m_sb.sb_rextslog);
950 if (nmp->m_sb.sb_rgcount != mp->m_sb.sb_rgcount)
951 xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_RGCOUNT,
952 nmp->m_sb.sb_rgcount - mp->m_sb.sb_rgcount);
953
954 /*
955 * Free the new extent.
956 */
957 error = xfs_growfs_rt_free_new(rtg, &nargs, &freed_rtx);
958 xfs_rtbuf_cache_relse(&nargs);
959 if (error)
960 goto out_cancel;
961
962 /*
963 * Mark more blocks free in the superblock.
964 */
965 xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_FREXTENTS, freed_rtx);
966
967 /*
968 * Update the calculated values in the real mount structure.
969 */
970 mp->m_rsumlevels = nmp->m_rsumlevels;
971 mp->m_rsumblocks = nmp->m_rsumblocks;
972
973 /*
974 * Recompute the growfsrt reservation from the new rsumsize.
975 */
976 xfs_trans_resv_calc(mp, &mp->m_resv);
977
978 error = xfs_trans_commit(args.tp);
979 if (error)
980 goto out_free;
981
982 /*
983 * Ensure the mount RT feature flag is now set.
984 */
985 mp->m_features |= XFS_FEAT_REALTIME;
986
987 kfree(nmp);
988 return 0;
989
990 out_cancel:
991 xfs_trans_cancel(args.tp);
992 out_free:
993 kfree(nmp);
994 return error;
995 }
996
997 static xfs_rtxnum_t
xfs_last_rtgroup_extents(struct xfs_mount * mp)998 xfs_last_rtgroup_extents(
999 struct xfs_mount *mp)
1000 {
1001 return mp->m_sb.sb_rextents -
1002 ((xfs_rtxnum_t)(mp->m_sb.sb_rgcount - 1) *
1003 mp->m_sb.sb_rgextents);
1004 }
1005
1006 /*
1007 * Calculate the last rbmblock currently used.
1008 *
1009 * This also deals with the case where there were no rtextents before.
1010 */
1011 static xfs_fileoff_t
xfs_last_rt_bmblock(struct xfs_rtgroup * rtg)1012 xfs_last_rt_bmblock(
1013 struct xfs_rtgroup *rtg)
1014 {
1015 struct xfs_mount *mp = rtg_mount(rtg);
1016 xfs_rgnumber_t rgno = rtg_rgno(rtg);
1017 xfs_fileoff_t bmbno = 0;
1018
1019 ASSERT(!mp->m_sb.sb_rgcount || rgno >= mp->m_sb.sb_rgcount - 1);
1020
1021 if (mp->m_sb.sb_rgcount && rgno == mp->m_sb.sb_rgcount - 1) {
1022 xfs_rtxnum_t nrext = xfs_last_rtgroup_extents(mp);
1023
1024 /* Also fill up the previous block if not entirely full. */
1025 bmbno = xfs_rtbitmap_blockcount_len(mp, nrext);
1026 if (xfs_rtx_to_rbmword(mp, nrext) != 0)
1027 bmbno--;
1028 }
1029
1030 return bmbno;
1031 }
1032
1033 /*
1034 * Allocate space to the bitmap and summary files, as necessary.
1035 */
1036 static int
xfs_growfs_rt_alloc_blocks(struct xfs_rtgroup * rtg,xfs_rfsblock_t nrblocks,xfs_agblock_t rextsize,xfs_extlen_t * nrbmblocks)1037 xfs_growfs_rt_alloc_blocks(
1038 struct xfs_rtgroup *rtg,
1039 xfs_rfsblock_t nrblocks,
1040 xfs_agblock_t rextsize,
1041 xfs_extlen_t *nrbmblocks)
1042 {
1043 struct xfs_mount *mp = rtg_mount(rtg);
1044 struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
1045 struct xfs_inode *rsumip = rtg->rtg_inodes[XFS_RTGI_SUMMARY];
1046 xfs_extlen_t orbmblocks = 0;
1047 xfs_extlen_t orsumblocks = 0;
1048 struct xfs_mount *nmp;
1049 int error = 0;
1050
1051 nmp = xfs_growfs_rt_alloc_fake_mount(mp, nrblocks, rextsize);
1052 if (!nmp)
1053 return -ENOMEM;
1054 *nrbmblocks = nmp->m_sb.sb_rbmblocks;
1055
1056 if (xfs_has_rtgroups(mp)) {
1057 /*
1058 * For file systems with the rtgroups feature, the RT bitmap and
1059 * summary are always fully allocated, which means that we never
1060 * need to grow the existing files.
1061 *
1062 * But we have to be careful to only fill the bitmap until the
1063 * end of the actually used range.
1064 */
1065 if (rtg_rgno(rtg) == nmp->m_sb.sb_rgcount - 1)
1066 *nrbmblocks = xfs_rtbitmap_blockcount_len(nmp,
1067 xfs_last_rtgroup_extents(nmp));
1068
1069 if (mp->m_sb.sb_rgcount &&
1070 rtg_rgno(rtg) == mp->m_sb.sb_rgcount - 1)
1071 goto out_free;
1072 } else {
1073 /*
1074 * Get the old block counts for bitmap and summary inodes.
1075 * These can't change since other growfs callers are locked out.
1076 */
1077 orbmblocks = XFS_B_TO_FSB(mp, rbmip->i_disk_size);
1078 orsumblocks = XFS_B_TO_FSB(mp, rsumip->i_disk_size);
1079 }
1080
1081 error = xfs_rtfile_initialize_blocks(rtg, XFS_RTGI_BITMAP, orbmblocks,
1082 nmp->m_sb.sb_rbmblocks, NULL);
1083 if (error)
1084 goto out_free;
1085 error = xfs_rtfile_initialize_blocks(rtg, XFS_RTGI_SUMMARY, orsumblocks,
1086 nmp->m_rsumblocks, NULL);
1087 out_free:
1088 kfree(nmp);
1089 return error;
1090 }
1091
1092 static int
xfs_growfs_rtg(struct xfs_mount * mp,xfs_rgnumber_t rgno,xfs_rfsblock_t nrblocks,xfs_agblock_t rextsize)1093 xfs_growfs_rtg(
1094 struct xfs_mount *mp,
1095 xfs_rgnumber_t rgno,
1096 xfs_rfsblock_t nrblocks,
1097 xfs_agblock_t rextsize)
1098 {
1099 uint8_t *old_rsum_cache = NULL;
1100 xfs_extlen_t bmblocks;
1101 xfs_fileoff_t bmbno;
1102 struct xfs_rtgroup *rtg;
1103 unsigned int i;
1104 int error;
1105
1106 rtg = xfs_rtgroup_grab(mp, rgno);
1107 if (!rtg)
1108 return -EINVAL;
1109
1110 for (i = 0; i < XFS_RTGI_MAX; i++) {
1111 error = xfs_rtginode_ensure(rtg, i);
1112 if (error)
1113 goto out_rele;
1114 }
1115
1116 error = xfs_growfs_rt_alloc_blocks(rtg, nrblocks, rextsize, &bmblocks);
1117 if (error)
1118 goto out_rele;
1119
1120 if (bmblocks != rtg_mount(rtg)->m_sb.sb_rbmblocks) {
1121 old_rsum_cache = rtg->rtg_rsum_cache;
1122 error = xfs_alloc_rsum_cache(rtg, bmblocks);
1123 if (error)
1124 goto out_rele;
1125 }
1126
1127 for (bmbno = xfs_last_rt_bmblock(rtg); bmbno < bmblocks; bmbno++) {
1128 error = xfs_growfs_rt_bmblock(rtg, nrblocks, rextsize, bmbno);
1129 if (error)
1130 goto out_error;
1131 }
1132
1133 if (old_rsum_cache)
1134 kvfree(old_rsum_cache);
1135 xfs_rtgroup_rele(rtg);
1136 return 0;
1137
1138 out_error:
1139 /*
1140 * Reset rtg_extents to the old value if adding more blocks failed.
1141 */
1142 xfs_rtgroup_calc_geometry(mp, rtg, rtg_rgno(rtg), mp->m_sb.sb_rgcount,
1143 mp->m_sb.sb_rextents);
1144 if (old_rsum_cache) {
1145 kvfree(rtg->rtg_rsum_cache);
1146 rtg->rtg_rsum_cache = old_rsum_cache;
1147 }
1148 out_rele:
1149 xfs_rtgroup_rele(rtg);
1150 return error;
1151 }
1152
1153 static int
xfs_growfs_check_rtgeom(const struct xfs_mount * mp,xfs_rfsblock_t rblocks,xfs_extlen_t rextsize)1154 xfs_growfs_check_rtgeom(
1155 const struct xfs_mount *mp,
1156 xfs_rfsblock_t rblocks,
1157 xfs_extlen_t rextsize)
1158 {
1159 struct xfs_mount *nmp;
1160 int error = 0;
1161
1162 nmp = xfs_growfs_rt_alloc_fake_mount(mp, rblocks, rextsize);
1163 if (!nmp)
1164 return -ENOMEM;
1165
1166 /*
1167 * New summary size can't be more than half the size of the log. This
1168 * prevents us from getting a log overflow, since we'll log basically
1169 * the whole summary file at once.
1170 */
1171 if (nmp->m_rsumblocks > (mp->m_sb.sb_logblocks >> 1))
1172 error = -EINVAL;
1173
1174 kfree(nmp);
1175 return error;
1176 }
1177
1178 /*
1179 * Compute the new number of rt groups and ensure that /rtgroups exists.
1180 *
1181 * Changing the rtgroup size is not allowed (even if the rt volume hasn't yet
1182 * been initialized) because the userspace ABI doesn't support it.
1183 */
1184 static int
xfs_growfs_rt_prep_groups(struct xfs_mount * mp,xfs_rfsblock_t rblocks,xfs_extlen_t rextsize,xfs_rgnumber_t * new_rgcount)1185 xfs_growfs_rt_prep_groups(
1186 struct xfs_mount *mp,
1187 xfs_rfsblock_t rblocks,
1188 xfs_extlen_t rextsize,
1189 xfs_rgnumber_t *new_rgcount)
1190 {
1191 int error;
1192
1193 *new_rgcount = howmany_64(rblocks, mp->m_sb.sb_rgextents * rextsize);
1194 if (*new_rgcount > XFS_MAX_RGNUMBER)
1195 return -EINVAL;
1196
1197 /* Make sure the /rtgroups dir has been created */
1198 if (!mp->m_rtdirip) {
1199 struct xfs_trans *tp;
1200
1201 error = xfs_trans_alloc_empty(mp, &tp);
1202 if (error)
1203 return error;
1204 error = xfs_rtginode_load_parent(tp);
1205 xfs_trans_cancel(tp);
1206
1207 if (error == -ENOENT)
1208 error = xfs_rtginode_mkdir_parent(mp);
1209 if (error)
1210 return error;
1211 }
1212
1213 return 0;
1214 }
1215
1216 static bool
xfs_grow_last_rtg(struct xfs_mount * mp)1217 xfs_grow_last_rtg(
1218 struct xfs_mount *mp)
1219 {
1220 if (!xfs_has_rtgroups(mp))
1221 return true;
1222 if (mp->m_sb.sb_rgcount == 0)
1223 return false;
1224 return xfs_rtgroup_extents(mp, mp->m_sb.sb_rgcount - 1) <=
1225 mp->m_sb.sb_rgextents;
1226 }
1227
1228 /*
1229 * Grow the realtime area of the filesystem.
1230 */
1231 int
xfs_growfs_rt(struct xfs_mount * mp,struct xfs_growfs_rt * in)1232 xfs_growfs_rt(
1233 struct xfs_mount *mp,
1234 struct xfs_growfs_rt *in)
1235 {
1236 xfs_rgnumber_t old_rgcount = mp->m_sb.sb_rgcount;
1237 xfs_rgnumber_t new_rgcount = 1;
1238 xfs_rgnumber_t rgno;
1239 struct xfs_buf *bp;
1240 xfs_agblock_t old_rextsize = mp->m_sb.sb_rextsize;
1241 int error;
1242
1243 if (!capable(CAP_SYS_ADMIN))
1244 return -EPERM;
1245
1246 /* Needs to have been mounted with an rt device. */
1247 if (!XFS_IS_REALTIME_MOUNT(mp))
1248 return -EINVAL;
1249
1250 if (!mutex_trylock(&mp->m_growlock))
1251 return -EWOULDBLOCK;
1252
1253 /* Shrink not supported. */
1254 error = -EINVAL;
1255 if (in->newblocks <= mp->m_sb.sb_rblocks)
1256 goto out_unlock;
1257 /* Can only change rt extent size when adding rt volume. */
1258 if (mp->m_sb.sb_rblocks > 0 && in->extsize != mp->m_sb.sb_rextsize)
1259 goto out_unlock;
1260
1261 /* Range check the extent size. */
1262 if (XFS_FSB_TO_B(mp, in->extsize) > XFS_MAX_RTEXTSIZE ||
1263 XFS_FSB_TO_B(mp, in->extsize) < XFS_MIN_RTEXTSIZE)
1264 goto out_unlock;
1265
1266 /* Unsupported realtime features. */
1267 error = -EOPNOTSUPP;
1268 if (xfs_has_quota(mp) && !xfs_has_rtgroups(mp))
1269 goto out_unlock;
1270 if (xfs_has_rmapbt(mp) || xfs_has_reflink(mp))
1271 goto out_unlock;
1272
1273 error = xfs_sb_validate_fsb_count(&mp->m_sb, in->newblocks);
1274 if (error)
1275 goto out_unlock;
1276 /*
1277 * Read in the last block of the device, make sure it exists.
1278 */
1279 error = xfs_buf_read_uncached(mp->m_rtdev_targp,
1280 XFS_FSB_TO_BB(mp, in->newblocks - 1),
1281 XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
1282 if (error)
1283 goto out_unlock;
1284 xfs_buf_relse(bp);
1285
1286 /*
1287 * Calculate new parameters. These are the final values to be reached.
1288 */
1289 error = -EINVAL;
1290 if (in->newblocks < in->extsize)
1291 goto out_unlock;
1292
1293 /* Make sure the new fs size won't cause problems with the log. */
1294 error = xfs_growfs_check_rtgeom(mp, in->newblocks, in->extsize);
1295 if (error)
1296 goto out_unlock;
1297
1298 if (xfs_has_rtgroups(mp)) {
1299 error = xfs_growfs_rt_prep_groups(mp, in->newblocks,
1300 in->extsize, &new_rgcount);
1301 if (error)
1302 goto out_unlock;
1303 }
1304
1305 if (xfs_grow_last_rtg(mp)) {
1306 error = xfs_growfs_rtg(mp, old_rgcount - 1, in->newblocks,
1307 in->extsize);
1308 if (error)
1309 goto out_unlock;
1310 }
1311
1312 for (rgno = old_rgcount; rgno < new_rgcount; rgno++) {
1313 xfs_rtbxlen_t rextents = div_u64(in->newblocks, in->extsize);
1314
1315 error = xfs_rtgroup_alloc(mp, rgno, new_rgcount, rextents);
1316 if (error)
1317 goto out_unlock;
1318
1319 error = xfs_growfs_rtg(mp, rgno, in->newblocks, in->extsize);
1320 if (error) {
1321 struct xfs_rtgroup *rtg;
1322
1323 rtg = xfs_rtgroup_grab(mp, rgno);
1324 if (!WARN_ON_ONCE(!rtg)) {
1325 xfs_rtunmount_rtg(rtg);
1326 xfs_rtgroup_rele(rtg);
1327 xfs_rtgroup_free(mp, rgno);
1328 }
1329 break;
1330 }
1331 }
1332
1333 if (!error && old_rextsize != in->extsize)
1334 error = xfs_growfs_rt_fixup_extsize(mp);
1335
1336 /*
1337 * Update secondary superblocks now the physical grow has completed.
1338 *
1339 * Also do this in case of an error as we might have already
1340 * successfully updated one or more RTGs and incremented sb_rgcount.
1341 */
1342 if (!xfs_is_shutdown(mp)) {
1343 int error2 = xfs_update_secondary_sbs(mp);
1344
1345 if (!error)
1346 error = error2;
1347 }
1348
1349 out_unlock:
1350 mutex_unlock(&mp->m_growlock);
1351 return error;
1352 }
1353
1354 /* Read the realtime superblock and attach it to the mount. */
1355 int
xfs_rtmount_readsb(struct xfs_mount * mp)1356 xfs_rtmount_readsb(
1357 struct xfs_mount *mp)
1358 {
1359 struct xfs_buf *bp;
1360 int error;
1361
1362 if (!xfs_has_rtsb(mp))
1363 return 0;
1364 if (mp->m_sb.sb_rblocks == 0)
1365 return 0;
1366 if (mp->m_rtdev_targp == NULL) {
1367 xfs_warn(mp,
1368 "Filesystem has a realtime volume, use rtdev=device option");
1369 return -ENODEV;
1370 }
1371
1372 /* m_blkbb_log is not set up yet */
1373 error = xfs_buf_read_uncached(mp->m_rtdev_targp, XFS_RTSB_DADDR,
1374 mp->m_sb.sb_blocksize >> BBSHIFT, XBF_NO_IOACCT, &bp,
1375 &xfs_rtsb_buf_ops);
1376 if (error) {
1377 xfs_warn(mp, "rt sb validate failed with error %d.", error);
1378 /* bad CRC means corrupted metadata */
1379 if (error == -EFSBADCRC)
1380 error = -EFSCORRUPTED;
1381 return error;
1382 }
1383
1384 mp->m_rtsb_bp = bp;
1385 xfs_buf_unlock(bp);
1386 return 0;
1387 }
1388
1389 /* Detach the realtime superblock from the mount and free it. */
1390 void
xfs_rtmount_freesb(struct xfs_mount * mp)1391 xfs_rtmount_freesb(
1392 struct xfs_mount *mp)
1393 {
1394 struct xfs_buf *bp = mp->m_rtsb_bp;
1395
1396 if (!bp)
1397 return;
1398
1399 xfs_buf_lock(bp);
1400 mp->m_rtsb_bp = NULL;
1401 xfs_buf_relse(bp);
1402 }
1403
1404 /*
1405 * Initialize realtime fields in the mount structure.
1406 */
1407 int /* error */
xfs_rtmount_init(struct xfs_mount * mp)1408 xfs_rtmount_init(
1409 struct xfs_mount *mp) /* file system mount structure */
1410 {
1411 struct xfs_buf *bp; /* buffer for last block of subvolume */
1412 xfs_daddr_t d; /* address of last block of subvolume */
1413 int error;
1414
1415 if (mp->m_sb.sb_rblocks == 0)
1416 return 0;
1417 if (mp->m_rtdev_targp == NULL) {
1418 xfs_warn(mp,
1419 "Filesystem has a realtime volume, use rtdev=device option");
1420 return -ENODEV;
1421 }
1422
1423 mp->m_rsumblocks = xfs_rtsummary_blockcount(mp, &mp->m_rsumlevels);
1424
1425 /*
1426 * Check that the realtime section is an ok size.
1427 */
1428 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks);
1429 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_rblocks) {
1430 xfs_warn(mp, "realtime mount -- %llu != %llu",
1431 (unsigned long long) XFS_BB_TO_FSB(mp, d),
1432 (unsigned long long) mp->m_sb.sb_rblocks);
1433 return -EFBIG;
1434 }
1435 error = xfs_buf_read_uncached(mp->m_rtdev_targp,
1436 d - XFS_FSB_TO_BB(mp, 1),
1437 XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
1438 if (error) {
1439 xfs_warn(mp, "realtime device size check failed");
1440 return error;
1441 }
1442 xfs_buf_relse(bp);
1443 return 0;
1444 }
1445
1446 static int
xfs_rtalloc_count_frextent(struct xfs_rtgroup * rtg,struct xfs_trans * tp,const struct xfs_rtalloc_rec * rec,void * priv)1447 xfs_rtalloc_count_frextent(
1448 struct xfs_rtgroup *rtg,
1449 struct xfs_trans *tp,
1450 const struct xfs_rtalloc_rec *rec,
1451 void *priv)
1452 {
1453 uint64_t *valp = priv;
1454
1455 *valp += rec->ar_extcount;
1456 return 0;
1457 }
1458
1459 /*
1460 * Reinitialize the number of free realtime extents from the realtime bitmap.
1461 * Callers must ensure that there is no other activity in the filesystem.
1462 */
1463 int
xfs_rtalloc_reinit_frextents(struct xfs_mount * mp)1464 xfs_rtalloc_reinit_frextents(
1465 struct xfs_mount *mp)
1466 {
1467 uint64_t val = 0;
1468 int error;
1469
1470 struct xfs_rtgroup *rtg = NULL;
1471
1472 while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1473 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
1474 error = xfs_rtalloc_query_all(rtg, NULL,
1475 xfs_rtalloc_count_frextent, &val);
1476 xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
1477 if (error) {
1478 xfs_rtgroup_rele(rtg);
1479 return error;
1480 }
1481 }
1482
1483 spin_lock(&mp->m_sb_lock);
1484 mp->m_sb.sb_frextents = val;
1485 spin_unlock(&mp->m_sb_lock);
1486 percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
1487 return 0;
1488 }
1489
1490 /*
1491 * Read in the bmbt of an rt metadata inode so that we never have to load them
1492 * at runtime. This enables the use of shared ILOCKs for rtbitmap scans. Use
1493 * an empty transaction to avoid deadlocking on loops in the bmbt.
1494 */
1495 static inline int
xfs_rtmount_iread_extents(struct xfs_trans * tp,struct xfs_inode * ip)1496 xfs_rtmount_iread_extents(
1497 struct xfs_trans *tp,
1498 struct xfs_inode *ip)
1499 {
1500 int error;
1501
1502 xfs_ilock(ip, XFS_ILOCK_EXCL);
1503
1504 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1505 if (error)
1506 goto out_unlock;
1507
1508 if (xfs_inode_has_attr_fork(ip)) {
1509 error = xfs_iread_extents(tp, ip, XFS_ATTR_FORK);
1510 if (error)
1511 goto out_unlock;
1512 }
1513
1514 out_unlock:
1515 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1516 return error;
1517 }
1518
1519 static int
xfs_rtmount_rtg(struct xfs_mount * mp,struct xfs_trans * tp,struct xfs_rtgroup * rtg)1520 xfs_rtmount_rtg(
1521 struct xfs_mount *mp,
1522 struct xfs_trans *tp,
1523 struct xfs_rtgroup *rtg)
1524 {
1525 int error, i;
1526
1527 for (i = 0; i < XFS_RTGI_MAX; i++) {
1528 error = xfs_rtginode_load(rtg, i, tp);
1529 if (error)
1530 return error;
1531
1532 if (rtg->rtg_inodes[i]) {
1533 error = xfs_rtmount_iread_extents(tp,
1534 rtg->rtg_inodes[i]);
1535 if (error)
1536 return error;
1537 }
1538 }
1539
1540 return xfs_alloc_rsum_cache(rtg, mp->m_sb.sb_rbmblocks);
1541 }
1542
1543 /*
1544 * Get the bitmap and summary inodes and the summary cache into the mount
1545 * structure at mount time.
1546 */
1547 int
xfs_rtmount_inodes(struct xfs_mount * mp)1548 xfs_rtmount_inodes(
1549 struct xfs_mount *mp)
1550 {
1551 struct xfs_trans *tp;
1552 struct xfs_rtgroup *rtg = NULL;
1553 int error;
1554
1555 error = xfs_trans_alloc_empty(mp, &tp);
1556 if (error)
1557 return error;
1558
1559 if (xfs_has_rtgroups(mp) && mp->m_sb.sb_rgcount > 0) {
1560 error = xfs_rtginode_load_parent(tp);
1561 if (error)
1562 goto out_cancel;
1563 }
1564
1565 while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1566 error = xfs_rtmount_rtg(mp, tp, rtg);
1567 if (error) {
1568 xfs_rtgroup_rele(rtg);
1569 xfs_rtunmount_inodes(mp);
1570 break;
1571 }
1572 }
1573
1574 out_cancel:
1575 xfs_trans_cancel(tp);
1576 return error;
1577 }
1578
1579 void
xfs_rtunmount_inodes(struct xfs_mount * mp)1580 xfs_rtunmount_inodes(
1581 struct xfs_mount *mp)
1582 {
1583 struct xfs_rtgroup *rtg = NULL;
1584
1585 while ((rtg = xfs_rtgroup_next(mp, rtg)))
1586 xfs_rtunmount_rtg(rtg);
1587 xfs_rtginode_irele(&mp->m_rtdirip);
1588 }
1589
1590 /*
1591 * Pick an extent for allocation at the start of a new realtime file.
1592 * Use the sequence number stored in the atime field of the bitmap inode.
1593 * Translate this to a fraction of the rtextents, and return the product
1594 * of rtextents and the fraction.
1595 * The fraction sequence is 0, 1/2, 1/4, 3/4, 1/8, ..., 7/8, 1/16, ...
1596 */
1597 static xfs_rtxnum_t
xfs_rtpick_extent(struct xfs_rtgroup * rtg,struct xfs_trans * tp,xfs_rtxlen_t len)1598 xfs_rtpick_extent(
1599 struct xfs_rtgroup *rtg,
1600 struct xfs_trans *tp,
1601 xfs_rtxlen_t len) /* allocation length (rtextents) */
1602 {
1603 struct xfs_mount *mp = rtg_mount(rtg);
1604 struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
1605 xfs_rtxnum_t b = 0; /* result rtext */
1606 int log2; /* log of sequence number */
1607 uint64_t resid; /* residual after log removed */
1608 uint64_t seq; /* sequence number of file creation */
1609 struct timespec64 ts; /* timespec in inode */
1610
1611 xfs_assert_ilocked(rbmip, XFS_ILOCK_EXCL);
1612
1613 ts = inode_get_atime(VFS_I(rbmip));
1614 if (!(rbmip->i_diflags & XFS_DIFLAG_NEWRTBM)) {
1615 rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
1616 seq = 0;
1617 } else {
1618 seq = ts.tv_sec;
1619 }
1620 log2 = xfs_highbit64(seq);
1621 if (log2 != -1) {
1622 resid = seq - (1ULL << log2);
1623 b = (mp->m_sb.sb_rextents * ((resid << 1) + 1ULL)) >>
1624 (log2 + 1);
1625 if (b >= mp->m_sb.sb_rextents)
1626 div64_u64_rem(b, mp->m_sb.sb_rextents, &b);
1627 if (b + len > mp->m_sb.sb_rextents)
1628 b = mp->m_sb.sb_rextents - len;
1629 }
1630 ts.tv_sec = seq + 1;
1631 inode_set_atime_to_ts(VFS_I(rbmip), ts);
1632 xfs_trans_log_inode(tp, rbmip, XFS_ILOG_CORE);
1633 return b;
1634 }
1635
1636 static void
xfs_rtalloc_align_minmax(xfs_rtxlen_t * raminlen,xfs_rtxlen_t * ramaxlen,xfs_rtxlen_t * prod)1637 xfs_rtalloc_align_minmax(
1638 xfs_rtxlen_t *raminlen,
1639 xfs_rtxlen_t *ramaxlen,
1640 xfs_rtxlen_t *prod)
1641 {
1642 xfs_rtxlen_t newmaxlen = *ramaxlen;
1643 xfs_rtxlen_t newminlen = *raminlen;
1644 xfs_rtxlen_t slack;
1645
1646 slack = newmaxlen % *prod;
1647 if (slack)
1648 newmaxlen -= slack;
1649 slack = newminlen % *prod;
1650 if (slack)
1651 newminlen += *prod - slack;
1652
1653 /*
1654 * If adjusting for extent size hint alignment produces an invalid
1655 * min/max len combination, go ahead without it.
1656 */
1657 if (newmaxlen < newminlen) {
1658 *prod = 1;
1659 return;
1660 }
1661 *ramaxlen = newmaxlen;
1662 *raminlen = newminlen;
1663 }
1664
1665 /* Given a free extent, find any part of it that isn't busy, if possible. */
1666 STATIC bool
xfs_rtalloc_check_busy(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t minlen_rtx,xfs_rtxlen_t maxlen_rtx,xfs_rtxlen_t len_rtx,xfs_rtxlen_t prod,xfs_rtxnum_t rtx,xfs_rtxlen_t * reslen,xfs_rtxnum_t * resrtx,unsigned * busy_gen)1667 xfs_rtalloc_check_busy(
1668 struct xfs_rtalloc_args *args,
1669 xfs_rtxnum_t start,
1670 xfs_rtxlen_t minlen_rtx,
1671 xfs_rtxlen_t maxlen_rtx,
1672 xfs_rtxlen_t len_rtx,
1673 xfs_rtxlen_t prod,
1674 xfs_rtxnum_t rtx,
1675 xfs_rtxlen_t *reslen,
1676 xfs_rtxnum_t *resrtx,
1677 unsigned *busy_gen)
1678 {
1679 struct xfs_rtgroup *rtg = args->rtg;
1680 struct xfs_mount *mp = rtg_mount(rtg);
1681 xfs_agblock_t rgbno = xfs_rtx_to_rgbno(rtg, rtx);
1682 xfs_rgblock_t min_rgbno = xfs_rtx_to_rgbno(rtg, start);
1683 xfs_extlen_t minlen = xfs_rtxlen_to_extlen(mp, minlen_rtx);
1684 xfs_extlen_t len = xfs_rtxlen_to_extlen(mp, len_rtx);
1685 xfs_extlen_t diff;
1686 bool busy;
1687
1688 busy = xfs_extent_busy_trim(rtg_group(rtg), minlen,
1689 xfs_rtxlen_to_extlen(mp, maxlen_rtx), &rgbno, &len,
1690 busy_gen);
1691
1692 /*
1693 * If we have a largish extent that happens to start before min_rgbno,
1694 * see if we can shift it into range...
1695 */
1696 if (rgbno < min_rgbno && rgbno + len > min_rgbno) {
1697 diff = min_rgbno - rgbno;
1698 if (len > diff) {
1699 rgbno += diff;
1700 len -= diff;
1701 }
1702 }
1703
1704 if (prod > 1 && len >= minlen) {
1705 xfs_rgblock_t aligned_rgbno = roundup(rgbno, prod);
1706
1707 diff = aligned_rgbno - rgbno;
1708
1709 *resrtx = xfs_rgbno_to_rtx(mp, aligned_rgbno);
1710 *reslen = xfs_extlen_to_rtxlen(mp,
1711 diff >= len ? 0 : len - diff);
1712 } else {
1713 *resrtx = xfs_rgbno_to_rtx(mp, rgbno);
1714 *reslen = xfs_extlen_to_rtxlen(mp, len);
1715 }
1716
1717 return busy;
1718 }
1719
1720 /*
1721 * Adjust the given free extent so that it isn't busy, or flush the log and
1722 * wait for the space to become unbusy. Only needed for rtgroups.
1723 */
1724 STATIC int
xfs_rtallocate_adjust_for_busy(struct xfs_rtalloc_args * args,xfs_rtxnum_t start,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t * len,xfs_rtxlen_t prod,xfs_rtxnum_t * rtx)1725 xfs_rtallocate_adjust_for_busy(
1726 struct xfs_rtalloc_args *args,
1727 xfs_rtxnum_t start,
1728 xfs_rtxlen_t minlen,
1729 xfs_rtxlen_t maxlen,
1730 xfs_rtxlen_t *len,
1731 xfs_rtxlen_t prod,
1732 xfs_rtxnum_t *rtx)
1733 {
1734 xfs_rtxnum_t resrtx;
1735 xfs_rtxlen_t reslen;
1736 unsigned busy_gen;
1737 bool busy;
1738 int error;
1739
1740 again:
1741 busy = xfs_rtalloc_check_busy(args, start, minlen, maxlen, *len, prod,
1742 *rtx, &reslen, &resrtx, &busy_gen);
1743 if (!busy)
1744 return 0;
1745
1746 if (reslen < minlen || (start != 0 && resrtx != *rtx)) {
1747 /*
1748 * Enough of the extent was busy that we cannot satisfy the
1749 * allocation, or this is a near allocation and the start of
1750 * the extent is busy. Flush the log and wait for the busy
1751 * situation to resolve.
1752 */
1753 trace_xfs_rtalloc_extent_busy(args->rtg, start, minlen, maxlen,
1754 *len, prod, *rtx, busy_gen);
1755
1756 error = xfs_extent_busy_flush(args->tp, rtg_group(args->rtg),
1757 busy_gen, 0);
1758 if (error)
1759 return error;
1760
1761 goto again;
1762 }
1763
1764 /* Some of the free space wasn't busy, hand that back to the caller. */
1765 trace_xfs_rtalloc_extent_busy_trim(args->rtg, *rtx, *len, resrtx,
1766 reslen);
1767 *len = reslen;
1768 *rtx = resrtx;
1769
1770 return 0;
1771 }
1772
1773 static int
xfs_rtallocate_rtg(struct xfs_trans * tp,xfs_rgnumber_t rgno,xfs_rtblock_t bno_hint,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t prod,bool wasdel,bool initial_user_data,bool * rtlocked,xfs_rtblock_t * bno,xfs_extlen_t * blen)1774 xfs_rtallocate_rtg(
1775 struct xfs_trans *tp,
1776 xfs_rgnumber_t rgno,
1777 xfs_rtblock_t bno_hint,
1778 xfs_rtxlen_t minlen,
1779 xfs_rtxlen_t maxlen,
1780 xfs_rtxlen_t prod,
1781 bool wasdel,
1782 bool initial_user_data,
1783 bool *rtlocked,
1784 xfs_rtblock_t *bno,
1785 xfs_extlen_t *blen)
1786 {
1787 struct xfs_rtalloc_args args = {
1788 .mp = tp->t_mountp,
1789 .tp = tp,
1790 };
1791 xfs_rtxnum_t start = 0;
1792 xfs_rtxnum_t rtx;
1793 xfs_rtxlen_t len = 0;
1794 int error = 0;
1795
1796 args.rtg = xfs_rtgroup_grab(args.mp, rgno);
1797 if (!args.rtg)
1798 return -ENOSPC;
1799
1800 /*
1801 * We need to lock out modifications to both the RT bitmap and summary
1802 * inodes for finding free space in xfs_rtallocate_extent_{near,size}
1803 * and join the bitmap and summary inodes for the actual allocation
1804 * down in xfs_rtallocate_range.
1805 *
1806 * For RTG-enabled file system we don't want to join the inodes to the
1807 * transaction until we are committed to allocate to allocate from this
1808 * RTG so that only one inode of each type is locked at a time.
1809 *
1810 * But for pre-RTG file systems we need to already to join the bitmap
1811 * inode to the transaction for xfs_rtpick_extent, which bumps the
1812 * sequence number in it, so we'll have to join the inode to the
1813 * transaction early here.
1814 *
1815 * This is all a bit messy, but at least the mess is contained in
1816 * this function.
1817 */
1818 if (!*rtlocked) {
1819 xfs_rtgroup_lock(args.rtg, XFS_RTGLOCK_BITMAP);
1820 if (!xfs_has_rtgroups(args.mp))
1821 xfs_rtgroup_trans_join(tp, args.rtg,
1822 XFS_RTGLOCK_BITMAP);
1823 *rtlocked = true;
1824 }
1825
1826 /*
1827 * For an allocation to an empty file at offset 0, pick an extent that
1828 * will space things out in the rt area.
1829 */
1830 if (bno_hint != NULLFSBLOCK)
1831 start = xfs_rtb_to_rtx(args.mp, bno_hint);
1832 else if (!xfs_has_rtgroups(args.mp) && initial_user_data)
1833 start = xfs_rtpick_extent(args.rtg, tp, maxlen);
1834
1835 if (start) {
1836 error = xfs_rtallocate_extent_near(&args, start, minlen, maxlen,
1837 &len, prod, &rtx);
1838 /*
1839 * If we can't allocate near a specific rt extent, try again
1840 * without locality criteria.
1841 */
1842 if (error == -ENOSPC) {
1843 xfs_rtbuf_cache_relse(&args);
1844 error = 0;
1845 }
1846 }
1847
1848 if (!error) {
1849 error = xfs_rtallocate_extent_size(&args, minlen, maxlen, &len,
1850 prod, &rtx);
1851 }
1852
1853 if (error) {
1854 if (xfs_has_rtgroups(args.mp))
1855 goto out_unlock;
1856 goto out_release;
1857 }
1858
1859 if (xfs_has_rtgroups(args.mp)) {
1860 error = xfs_rtallocate_adjust_for_busy(&args, start, minlen,
1861 maxlen, &len, prod, &rtx);
1862 if (error)
1863 goto out_unlock;
1864
1865 xfs_rtgroup_trans_join(tp, args.rtg, XFS_RTGLOCK_BITMAP);
1866 }
1867
1868 error = xfs_rtallocate_range(&args, rtx, len);
1869 if (error)
1870 goto out_release;
1871
1872 xfs_trans_mod_sb(tp, wasdel ?
1873 XFS_TRANS_SB_RES_FREXTENTS : XFS_TRANS_SB_FREXTENTS,
1874 -(long)len);
1875 *bno = xfs_rtx_to_rtb(args.rtg, rtx);
1876 *blen = xfs_rtxlen_to_extlen(args.mp, len);
1877
1878 out_release:
1879 xfs_rtgroup_rele(args.rtg);
1880 xfs_rtbuf_cache_relse(&args);
1881 return error;
1882 out_unlock:
1883 xfs_rtgroup_unlock(args.rtg, XFS_RTGLOCK_BITMAP);
1884 *rtlocked = false;
1885 goto out_release;
1886 }
1887
1888 static int
xfs_rtallocate_rtgs(struct xfs_trans * tp,xfs_fsblock_t bno_hint,xfs_rtxlen_t minlen,xfs_rtxlen_t maxlen,xfs_rtxlen_t prod,bool wasdel,bool initial_user_data,xfs_rtblock_t * bno,xfs_extlen_t * blen)1889 xfs_rtallocate_rtgs(
1890 struct xfs_trans *tp,
1891 xfs_fsblock_t bno_hint,
1892 xfs_rtxlen_t minlen,
1893 xfs_rtxlen_t maxlen,
1894 xfs_rtxlen_t prod,
1895 bool wasdel,
1896 bool initial_user_data,
1897 xfs_rtblock_t *bno,
1898 xfs_extlen_t *blen)
1899 {
1900 struct xfs_mount *mp = tp->t_mountp;
1901 xfs_rgnumber_t start_rgno, rgno;
1902 int error;
1903
1904 /*
1905 * For now this just blindly iterates over the RTGs for an initial
1906 * allocation. We could try to keep an in-memory rtg_longest member
1907 * to avoid the locking when just looking for big enough free space,
1908 * but for now this keeps things simple.
1909 */
1910 if (bno_hint != NULLFSBLOCK)
1911 start_rgno = xfs_rtb_to_rgno(mp, bno_hint);
1912 else
1913 start_rgno = (atomic_inc_return(&mp->m_rtgrotor) - 1) %
1914 mp->m_sb.sb_rgcount;
1915
1916 rgno = start_rgno;
1917 do {
1918 bool rtlocked = false;
1919
1920 error = xfs_rtallocate_rtg(tp, rgno, bno_hint, minlen, maxlen,
1921 prod, wasdel, initial_user_data, &rtlocked,
1922 bno, blen);
1923 if (error != -ENOSPC)
1924 return error;
1925 ASSERT(!rtlocked);
1926
1927 if (++rgno == mp->m_sb.sb_rgcount)
1928 rgno = 0;
1929 bno_hint = NULLFSBLOCK;
1930 } while (rgno != start_rgno);
1931
1932 return -ENOSPC;
1933 }
1934
1935 static int
xfs_rtallocate_align(struct xfs_bmalloca * ap,xfs_rtxlen_t * ralen,xfs_rtxlen_t * raminlen,xfs_rtxlen_t * prod,bool * noalign)1936 xfs_rtallocate_align(
1937 struct xfs_bmalloca *ap,
1938 xfs_rtxlen_t *ralen,
1939 xfs_rtxlen_t *raminlen,
1940 xfs_rtxlen_t *prod,
1941 bool *noalign)
1942 {
1943 struct xfs_mount *mp = ap->ip->i_mount;
1944 xfs_fileoff_t orig_offset = ap->offset;
1945 xfs_extlen_t minlen = mp->m_sb.sb_rextsize;
1946 xfs_extlen_t align; /* minimum allocation alignment */
1947 xfs_extlen_t mod; /* product factor for allocators */
1948 int error;
1949
1950 if (*noalign) {
1951 align = mp->m_sb.sb_rextsize;
1952 } else {
1953 align = xfs_get_extsz_hint(ap->ip);
1954 if (!align)
1955 align = 1;
1956 if (align == mp->m_sb.sb_rextsize)
1957 *noalign = true;
1958 }
1959
1960 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 1,
1961 ap->eof, 0, ap->conv, &ap->offset, &ap->length);
1962 if (error)
1963 return error;
1964 ASSERT(ap->length);
1965 ASSERT(xfs_extlen_to_rtxmod(mp, ap->length) == 0);
1966
1967 /*
1968 * If we shifted the file offset downward to satisfy an extent size
1969 * hint, increase minlen by that amount so that the allocator won't
1970 * give us an allocation that's too short to cover at least one of the
1971 * blocks that the caller asked for.
1972 */
1973 if (ap->offset != orig_offset)
1974 minlen += orig_offset - ap->offset;
1975
1976 /*
1977 * Set ralen to be the actual requested length in rtextents.
1978 *
1979 * If the old value was close enough to XFS_BMBT_MAX_EXTLEN that
1980 * we rounded up to it, cut it back so it's valid again.
1981 * Note that if it's a really large request (bigger than
1982 * XFS_BMBT_MAX_EXTLEN), we don't hear about that number, and can't
1983 * adjust the starting point to match it.
1984 */
1985 *ralen = xfs_extlen_to_rtxlen(mp, min(ap->length, XFS_MAX_BMBT_EXTLEN));
1986 *raminlen = max_t(xfs_rtxlen_t, 1, xfs_extlen_to_rtxlen(mp, minlen));
1987 ASSERT(*raminlen > 0);
1988 ASSERT(*raminlen <= *ralen);
1989
1990 /*
1991 * Only bother calculating a real prod factor if offset & length are
1992 * perfectly aligned, otherwise it will just get us in trouble.
1993 */
1994 div_u64_rem(ap->offset, align, &mod);
1995 if (mod || ap->length % align)
1996 *prod = 1;
1997 else
1998 *prod = xfs_extlen_to_rtxlen(mp, align);
1999
2000 if (*prod > 1)
2001 xfs_rtalloc_align_minmax(raminlen, ralen, prod);
2002 return 0;
2003 }
2004
2005 int
xfs_bmap_rtalloc(struct xfs_bmalloca * ap)2006 xfs_bmap_rtalloc(
2007 struct xfs_bmalloca *ap)
2008 {
2009 xfs_fileoff_t orig_offset = ap->offset;
2010 xfs_rtxlen_t prod = 0; /* product factor for allocators */
2011 xfs_rtxlen_t ralen = 0; /* realtime allocation length */
2012 xfs_rtblock_t bno_hint = NULLRTBLOCK;
2013 xfs_extlen_t orig_length = ap->length;
2014 xfs_rtxlen_t raminlen;
2015 bool rtlocked = false;
2016 bool noalign = false;
2017 bool initial_user_data =
2018 ap->datatype & XFS_ALLOC_INITIAL_USER_DATA;
2019 int error;
2020
2021 retry:
2022 error = xfs_rtallocate_align(ap, &ralen, &raminlen, &prod, &noalign);
2023 if (error)
2024 return error;
2025
2026 if (xfs_bmap_adjacent(ap))
2027 bno_hint = ap->blkno;
2028
2029 if (xfs_has_rtgroups(ap->ip->i_mount)) {
2030 error = xfs_rtallocate_rtgs(ap->tp, bno_hint, raminlen, ralen,
2031 prod, ap->wasdel, initial_user_data,
2032 &ap->blkno, &ap->length);
2033 } else {
2034 error = xfs_rtallocate_rtg(ap->tp, 0, bno_hint, raminlen, ralen,
2035 prod, ap->wasdel, initial_user_data,
2036 &rtlocked, &ap->blkno, &ap->length);
2037 }
2038
2039 if (error == -ENOSPC) {
2040 if (!noalign) {
2041 /*
2042 * We previously enlarged the request length to try to
2043 * satisfy an extent size hint. The allocator didn't
2044 * return anything, so reset the parameters to the
2045 * original values and try again without alignment
2046 * criteria.
2047 */
2048 ap->offset = orig_offset;
2049 ap->length = orig_length;
2050 noalign = true;
2051 goto retry;
2052 }
2053
2054 ap->blkno = NULLFSBLOCK;
2055 ap->length = 0;
2056 return 0;
2057 }
2058 if (error)
2059 return error;
2060
2061 xfs_bmap_alloc_account(ap);
2062 return 0;
2063 }
2064