1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_ialloc.h"
17 #include "xfs_ialloc_btree.h"
18 #include "xfs_alloc.h"
19 #include "xfs_errortag.h"
20 #include "xfs_error.h"
21 #include "xfs_bmap.h"
22 #include "xfs_trans.h"
23 #include "xfs_buf_item.h"
24 #include "xfs_icreate_item.h"
25 #include "xfs_icache.h"
26 #include "xfs_trace.h"
27 #include "xfs_log.h"
28 #include "xfs_rmap.h"
29 #include "xfs_ag.h"
30 #include "xfs_health.h"
31
32 /*
33 * Lookup a record by ino in the btree given by cur.
34 */
35 int /* error */
xfs_inobt_lookup(struct xfs_btree_cur * cur,xfs_agino_t ino,xfs_lookup_t dir,int * stat)36 xfs_inobt_lookup(
37 struct xfs_btree_cur *cur, /* btree cursor */
38 xfs_agino_t ino, /* starting inode of chunk */
39 xfs_lookup_t dir, /* <=, >=, == */
40 int *stat) /* success/failure */
41 {
42 cur->bc_rec.i.ir_startino = ino;
43 cur->bc_rec.i.ir_holemask = 0;
44 cur->bc_rec.i.ir_count = 0;
45 cur->bc_rec.i.ir_freecount = 0;
46 cur->bc_rec.i.ir_free = 0;
47 return xfs_btree_lookup(cur, dir, stat);
48 }
49
50 /*
51 * Update the record referred to by cur to the value given.
52 * This either works (return 0) or gets an EFSCORRUPTED error.
53 */
54 STATIC int /* error */
xfs_inobt_update(struct xfs_btree_cur * cur,xfs_inobt_rec_incore_t * irec)55 xfs_inobt_update(
56 struct xfs_btree_cur *cur, /* btree cursor */
57 xfs_inobt_rec_incore_t *irec) /* btree record */
58 {
59 union xfs_btree_rec rec;
60
61 rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino);
62 if (xfs_has_sparseinodes(cur->bc_mp)) {
63 rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask);
64 rec.inobt.ir_u.sp.ir_count = irec->ir_count;
65 rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount;
66 } else {
67 /* ir_holemask/ir_count not supported on-disk */
68 rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount);
69 }
70 rec.inobt.ir_free = cpu_to_be64(irec->ir_free);
71 return xfs_btree_update(cur, &rec);
72 }
73
74 /* Convert on-disk btree record to incore inobt record. */
75 void
xfs_inobt_btrec_to_irec(struct xfs_mount * mp,const union xfs_btree_rec * rec,struct xfs_inobt_rec_incore * irec)76 xfs_inobt_btrec_to_irec(
77 struct xfs_mount *mp,
78 const union xfs_btree_rec *rec,
79 struct xfs_inobt_rec_incore *irec)
80 {
81 irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino);
82 if (xfs_has_sparseinodes(mp)) {
83 irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask);
84 irec->ir_count = rec->inobt.ir_u.sp.ir_count;
85 irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount;
86 } else {
87 /*
88 * ir_holemask/ir_count not supported on-disk. Fill in hardcoded
89 * values for full inode chunks.
90 */
91 irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL;
92 irec->ir_count = XFS_INODES_PER_CHUNK;
93 irec->ir_freecount =
94 be32_to_cpu(rec->inobt.ir_u.f.ir_freecount);
95 }
96 irec->ir_free = be64_to_cpu(rec->inobt.ir_free);
97 }
98
99 /* Compute the freecount of an incore inode record. */
100 uint8_t
xfs_inobt_rec_freecount(const struct xfs_inobt_rec_incore * irec)101 xfs_inobt_rec_freecount(
102 const struct xfs_inobt_rec_incore *irec)
103 {
104 uint64_t realfree = irec->ir_free;
105
106 if (xfs_inobt_issparse(irec->ir_holemask))
107 realfree &= xfs_inobt_irec_to_allocmask(irec);
108 return hweight64(realfree);
109 }
110
111 /* Simple checks for inode records. */
112 xfs_failaddr_t
xfs_inobt_check_irec(struct xfs_perag * pag,const struct xfs_inobt_rec_incore * irec)113 xfs_inobt_check_irec(
114 struct xfs_perag *pag,
115 const struct xfs_inobt_rec_incore *irec)
116 {
117 /* Record has to be properly aligned within the AG. */
118 if (!xfs_verify_agino(pag, irec->ir_startino))
119 return __this_address;
120 if (!xfs_verify_agino(pag,
121 irec->ir_startino + XFS_INODES_PER_CHUNK - 1))
122 return __this_address;
123 if (irec->ir_count < XFS_INODES_PER_HOLEMASK_BIT ||
124 irec->ir_count > XFS_INODES_PER_CHUNK)
125 return __this_address;
126 if (irec->ir_freecount > XFS_INODES_PER_CHUNK)
127 return __this_address;
128
129 if (xfs_inobt_rec_freecount(irec) != irec->ir_freecount)
130 return __this_address;
131
132 return NULL;
133 }
134
135 static inline int
xfs_inobt_complain_bad_rec(struct xfs_btree_cur * cur,xfs_failaddr_t fa,const struct xfs_inobt_rec_incore * irec)136 xfs_inobt_complain_bad_rec(
137 struct xfs_btree_cur *cur,
138 xfs_failaddr_t fa,
139 const struct xfs_inobt_rec_incore *irec)
140 {
141 struct xfs_mount *mp = cur->bc_mp;
142
143 xfs_warn(mp,
144 "%sbt record corruption in AG %d detected at %pS!",
145 cur->bc_ops->name, cur->bc_ag.pag->pag_agno, fa);
146 xfs_warn(mp,
147 "start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x",
148 irec->ir_startino, irec->ir_count, irec->ir_freecount,
149 irec->ir_free, irec->ir_holemask);
150 xfs_btree_mark_sick(cur);
151 return -EFSCORRUPTED;
152 }
153
154 /*
155 * Get the data from the pointed-to record.
156 */
157 int
xfs_inobt_get_rec(struct xfs_btree_cur * cur,struct xfs_inobt_rec_incore * irec,int * stat)158 xfs_inobt_get_rec(
159 struct xfs_btree_cur *cur,
160 struct xfs_inobt_rec_incore *irec,
161 int *stat)
162 {
163 struct xfs_mount *mp = cur->bc_mp;
164 union xfs_btree_rec *rec;
165 xfs_failaddr_t fa;
166 int error;
167
168 error = xfs_btree_get_rec(cur, &rec, stat);
169 if (error || *stat == 0)
170 return error;
171
172 xfs_inobt_btrec_to_irec(mp, rec, irec);
173 fa = xfs_inobt_check_irec(cur->bc_ag.pag, irec);
174 if (fa)
175 return xfs_inobt_complain_bad_rec(cur, fa, irec);
176
177 return 0;
178 }
179
180 /*
181 * Insert a single inobt record. Cursor must already point to desired location.
182 */
183 int
xfs_inobt_insert_rec(struct xfs_btree_cur * cur,uint16_t holemask,uint8_t count,int32_t freecount,xfs_inofree_t free,int * stat)184 xfs_inobt_insert_rec(
185 struct xfs_btree_cur *cur,
186 uint16_t holemask,
187 uint8_t count,
188 int32_t freecount,
189 xfs_inofree_t free,
190 int *stat)
191 {
192 cur->bc_rec.i.ir_holemask = holemask;
193 cur->bc_rec.i.ir_count = count;
194 cur->bc_rec.i.ir_freecount = freecount;
195 cur->bc_rec.i.ir_free = free;
196 return xfs_btree_insert(cur, stat);
197 }
198
199 /*
200 * Insert records describing a newly allocated inode chunk into the inobt.
201 */
202 STATIC int
xfs_inobt_insert(struct xfs_perag * pag,struct xfs_trans * tp,struct xfs_buf * agbp,xfs_agino_t newino,xfs_agino_t newlen,bool is_finobt)203 xfs_inobt_insert(
204 struct xfs_perag *pag,
205 struct xfs_trans *tp,
206 struct xfs_buf *agbp,
207 xfs_agino_t newino,
208 xfs_agino_t newlen,
209 bool is_finobt)
210 {
211 struct xfs_btree_cur *cur;
212 xfs_agino_t thisino;
213 int i;
214 int error;
215
216 if (is_finobt)
217 cur = xfs_finobt_init_cursor(pag, tp, agbp);
218 else
219 cur = xfs_inobt_init_cursor(pag, tp, agbp);
220
221 for (thisino = newino;
222 thisino < newino + newlen;
223 thisino += XFS_INODES_PER_CHUNK) {
224 error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i);
225 if (error) {
226 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
227 return error;
228 }
229 ASSERT(i == 0);
230
231 error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL,
232 XFS_INODES_PER_CHUNK,
233 XFS_INODES_PER_CHUNK,
234 XFS_INOBT_ALL_FREE, &i);
235 if (error) {
236 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
237 return error;
238 }
239 ASSERT(i == 1);
240 }
241
242 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
243
244 return 0;
245 }
246
247 /*
248 * Verify that the number of free inodes in the AGI is correct.
249 */
250 #ifdef DEBUG
251 static int
xfs_check_agi_freecount(struct xfs_btree_cur * cur)252 xfs_check_agi_freecount(
253 struct xfs_btree_cur *cur)
254 {
255 if (cur->bc_nlevels == 1) {
256 xfs_inobt_rec_incore_t rec;
257 int freecount = 0;
258 int error;
259 int i;
260
261 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
262 if (error)
263 return error;
264
265 do {
266 error = xfs_inobt_get_rec(cur, &rec, &i);
267 if (error)
268 return error;
269
270 if (i) {
271 freecount += rec.ir_freecount;
272 error = xfs_btree_increment(cur, 0, &i);
273 if (error)
274 return error;
275 }
276 } while (i == 1);
277
278 if (!xfs_is_shutdown(cur->bc_mp))
279 ASSERT(freecount == cur->bc_ag.pag->pagi_freecount);
280 }
281 return 0;
282 }
283 #else
284 #define xfs_check_agi_freecount(cur) 0
285 #endif
286
287 /*
288 * Initialise a new set of inodes. When called without a transaction context
289 * (e.g. from recovery) we initiate a delayed write of the inode buffers rather
290 * than logging them (which in a transaction context puts them into the AIL
291 * for writeback rather than the xfsbufd queue).
292 */
293 int
xfs_ialloc_inode_init(struct xfs_mount * mp,struct xfs_trans * tp,struct list_head * buffer_list,int icount,xfs_agnumber_t agno,xfs_agblock_t agbno,xfs_agblock_t length,unsigned int gen)294 xfs_ialloc_inode_init(
295 struct xfs_mount *mp,
296 struct xfs_trans *tp,
297 struct list_head *buffer_list,
298 int icount,
299 xfs_agnumber_t agno,
300 xfs_agblock_t agbno,
301 xfs_agblock_t length,
302 unsigned int gen)
303 {
304 struct xfs_buf *fbuf;
305 struct xfs_dinode *free;
306 int nbufs;
307 int version;
308 int i, j;
309 xfs_daddr_t d;
310 xfs_ino_t ino = 0;
311 int error;
312
313 /*
314 * Loop over the new block(s), filling in the inodes. For small block
315 * sizes, manipulate the inodes in buffers which are multiples of the
316 * blocks size.
317 */
318 nbufs = length / M_IGEO(mp)->blocks_per_cluster;
319
320 /*
321 * Figure out what version number to use in the inodes we create. If
322 * the superblock version has caught up to the one that supports the new
323 * inode format, then use the new inode version. Otherwise use the old
324 * version so that old kernels will continue to be able to use the file
325 * system.
326 *
327 * For v3 inodes, we also need to write the inode number into the inode,
328 * so calculate the first inode number of the chunk here as
329 * XFS_AGB_TO_AGINO() only works within a filesystem block, not
330 * across multiple filesystem blocks (such as a cluster) and so cannot
331 * be used in the cluster buffer loop below.
332 *
333 * Further, because we are writing the inode directly into the buffer
334 * and calculating a CRC on the entire inode, we have ot log the entire
335 * inode so that the entire range the CRC covers is present in the log.
336 * That means for v3 inode we log the entire buffer rather than just the
337 * inode cores.
338 */
339 if (xfs_has_v3inodes(mp)) {
340 version = 3;
341 ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno));
342
343 /*
344 * log the initialisation that is about to take place as an
345 * logical operation. This means the transaction does not
346 * need to log the physical changes to the inode buffers as log
347 * recovery will know what initialisation is actually needed.
348 * Hence we only need to log the buffers as "ordered" buffers so
349 * they track in the AIL as if they were physically logged.
350 */
351 if (tp)
352 xfs_icreate_log(tp, agno, agbno, icount,
353 mp->m_sb.sb_inodesize, length, gen);
354 } else
355 version = 2;
356
357 for (j = 0; j < nbufs; j++) {
358 /*
359 * Get the block.
360 */
361 d = XFS_AGB_TO_DADDR(mp, agno, agbno +
362 (j * M_IGEO(mp)->blocks_per_cluster));
363 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
364 mp->m_bsize * M_IGEO(mp)->blocks_per_cluster,
365 XBF_UNMAPPED, &fbuf);
366 if (error)
367 return error;
368
369 /* Initialize the inode buffers and log them appropriately. */
370 fbuf->b_ops = &xfs_inode_buf_ops;
371 xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
372 for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) {
373 int ioffset = i << mp->m_sb.sb_inodelog;
374
375 free = xfs_make_iptr(mp, fbuf, i);
376 free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
377 free->di_version = version;
378 free->di_gen = cpu_to_be32(gen);
379 free->di_next_unlinked = cpu_to_be32(NULLAGINO);
380
381 if (version == 3) {
382 free->di_ino = cpu_to_be64(ino);
383 ino++;
384 uuid_copy(&free->di_uuid,
385 &mp->m_sb.sb_meta_uuid);
386 xfs_dinode_calc_crc(mp, free);
387 } else if (tp) {
388 /* just log the inode core */
389 xfs_trans_log_buf(tp, fbuf, ioffset,
390 ioffset + XFS_DINODE_SIZE(mp) - 1);
391 }
392 }
393
394 if (tp) {
395 /*
396 * Mark the buffer as an inode allocation buffer so it
397 * sticks in AIL at the point of this allocation
398 * transaction. This ensures the they are on disk before
399 * the tail of the log can be moved past this
400 * transaction (i.e. by preventing relogging from moving
401 * it forward in the log).
402 */
403 xfs_trans_inode_alloc_buf(tp, fbuf);
404 if (version == 3) {
405 /*
406 * Mark the buffer as ordered so that they are
407 * not physically logged in the transaction but
408 * still tracked in the AIL as part of the
409 * transaction and pin the log appropriately.
410 */
411 xfs_trans_ordered_buf(tp, fbuf);
412 }
413 } else {
414 fbuf->b_flags |= XBF_DONE;
415 xfs_buf_delwri_queue(fbuf, buffer_list);
416 xfs_buf_relse(fbuf);
417 }
418 }
419 return 0;
420 }
421
422 /*
423 * Align startino and allocmask for a recently allocated sparse chunk such that
424 * they are fit for insertion (or merge) into the on-disk inode btrees.
425 *
426 * Background:
427 *
428 * When enabled, sparse inode support increases the inode alignment from cluster
429 * size to inode chunk size. This means that the minimum range between two
430 * non-adjacent inode records in the inobt is large enough for a full inode
431 * record. This allows for cluster sized, cluster aligned block allocation
432 * without need to worry about whether the resulting inode record overlaps with
433 * another record in the tree. Without this basic rule, we would have to deal
434 * with the consequences of overlap by potentially undoing recent allocations in
435 * the inode allocation codepath.
436 *
437 * Because of this alignment rule (which is enforced on mount), there are two
438 * inobt possibilities for newly allocated sparse chunks. One is that the
439 * aligned inode record for the chunk covers a range of inodes not already
440 * covered in the inobt (i.e., it is safe to insert a new sparse record). The
441 * other is that a record already exists at the aligned startino that considers
442 * the newly allocated range as sparse. In the latter case, record content is
443 * merged in hope that sparse inode chunks fill to full chunks over time.
444 */
445 STATIC void
xfs_align_sparse_ino(struct xfs_mount * mp,xfs_agino_t * startino,uint16_t * allocmask)446 xfs_align_sparse_ino(
447 struct xfs_mount *mp,
448 xfs_agino_t *startino,
449 uint16_t *allocmask)
450 {
451 xfs_agblock_t agbno;
452 xfs_agblock_t mod;
453 int offset;
454
455 agbno = XFS_AGINO_TO_AGBNO(mp, *startino);
456 mod = agbno % mp->m_sb.sb_inoalignmt;
457 if (!mod)
458 return;
459
460 /* calculate the inode offset and align startino */
461 offset = XFS_AGB_TO_AGINO(mp, mod);
462 *startino -= offset;
463
464 /*
465 * Since startino has been aligned down, left shift allocmask such that
466 * it continues to represent the same physical inodes relative to the
467 * new startino.
468 */
469 *allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT;
470 }
471
472 /*
473 * Determine whether the source inode record can merge into the target. Both
474 * records must be sparse, the inode ranges must match and there must be no
475 * allocation overlap between the records.
476 */
477 STATIC bool
__xfs_inobt_can_merge(struct xfs_inobt_rec_incore * trec,struct xfs_inobt_rec_incore * srec)478 __xfs_inobt_can_merge(
479 struct xfs_inobt_rec_incore *trec, /* tgt record */
480 struct xfs_inobt_rec_incore *srec) /* src record */
481 {
482 uint64_t talloc;
483 uint64_t salloc;
484
485 /* records must cover the same inode range */
486 if (trec->ir_startino != srec->ir_startino)
487 return false;
488
489 /* both records must be sparse */
490 if (!xfs_inobt_issparse(trec->ir_holemask) ||
491 !xfs_inobt_issparse(srec->ir_holemask))
492 return false;
493
494 /* both records must track some inodes */
495 if (!trec->ir_count || !srec->ir_count)
496 return false;
497
498 /* can't exceed capacity of a full record */
499 if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK)
500 return false;
501
502 /* verify there is no allocation overlap */
503 talloc = xfs_inobt_irec_to_allocmask(trec);
504 salloc = xfs_inobt_irec_to_allocmask(srec);
505 if (talloc & salloc)
506 return false;
507
508 return true;
509 }
510
511 /*
512 * Merge the source inode record into the target. The caller must call
513 * __xfs_inobt_can_merge() to ensure the merge is valid.
514 */
515 STATIC void
__xfs_inobt_rec_merge(struct xfs_inobt_rec_incore * trec,struct xfs_inobt_rec_incore * srec)516 __xfs_inobt_rec_merge(
517 struct xfs_inobt_rec_incore *trec, /* target */
518 struct xfs_inobt_rec_incore *srec) /* src */
519 {
520 ASSERT(trec->ir_startino == srec->ir_startino);
521
522 /* combine the counts */
523 trec->ir_count += srec->ir_count;
524 trec->ir_freecount += srec->ir_freecount;
525
526 /*
527 * Merge the holemask and free mask. For both fields, 0 bits refer to
528 * allocated inodes. We combine the allocated ranges with bitwise AND.
529 */
530 trec->ir_holemask &= srec->ir_holemask;
531 trec->ir_free &= srec->ir_free;
532 }
533
534 /*
535 * Insert a new sparse inode chunk into the associated inode allocation btree.
536 * The inode record for the sparse chunk is pre-aligned to a startino that
537 * should match any pre-existing sparse inode record in the tree. This allows
538 * sparse chunks to fill over time.
539 *
540 * If no preexisting record exists, the provided record is inserted.
541 * If there is a preexisting record, the provided record is merged with the
542 * existing record and updated in place. The merged record is returned in nrec.
543 *
544 * It is considered corruption if a merge is requested and not possible. Given
545 * the sparse inode alignment constraints, this should never happen.
546 */
547 STATIC int
xfs_inobt_insert_sprec(struct xfs_perag * pag,struct xfs_trans * tp,struct xfs_buf * agbp,struct xfs_inobt_rec_incore * nrec)548 xfs_inobt_insert_sprec(
549 struct xfs_perag *pag,
550 struct xfs_trans *tp,
551 struct xfs_buf *agbp,
552 struct xfs_inobt_rec_incore *nrec) /* in/out: new/merged rec. */
553 {
554 struct xfs_mount *mp = pag->pag_mount;
555 struct xfs_btree_cur *cur;
556 int error;
557 int i;
558 struct xfs_inobt_rec_incore rec;
559
560 cur = xfs_inobt_init_cursor(pag, tp, agbp);
561
562 /* the new record is pre-aligned so we know where to look */
563 error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
564 if (error)
565 goto error;
566 /* if nothing there, insert a new record and return */
567 if (i == 0) {
568 error = xfs_inobt_insert_rec(cur, nrec->ir_holemask,
569 nrec->ir_count, nrec->ir_freecount,
570 nrec->ir_free, &i);
571 if (error)
572 goto error;
573 if (XFS_IS_CORRUPT(mp, i != 1)) {
574 xfs_btree_mark_sick(cur);
575 error = -EFSCORRUPTED;
576 goto error;
577 }
578
579 goto out;
580 }
581
582 /*
583 * A record exists at this startino. Merge the records.
584 */
585 error = xfs_inobt_get_rec(cur, &rec, &i);
586 if (error)
587 goto error;
588 if (XFS_IS_CORRUPT(mp, i != 1)) {
589 xfs_btree_mark_sick(cur);
590 error = -EFSCORRUPTED;
591 goto error;
592 }
593 if (XFS_IS_CORRUPT(mp, rec.ir_startino != nrec->ir_startino)) {
594 xfs_btree_mark_sick(cur);
595 error = -EFSCORRUPTED;
596 goto error;
597 }
598
599 /*
600 * This should never fail. If we have coexisting records that
601 * cannot merge, something is seriously wrong.
602 */
603 if (XFS_IS_CORRUPT(mp, !__xfs_inobt_can_merge(nrec, &rec))) {
604 xfs_btree_mark_sick(cur);
605 error = -EFSCORRUPTED;
606 goto error;
607 }
608
609 trace_xfs_irec_merge_pre(mp, pag->pag_agno, rec.ir_startino,
610 rec.ir_holemask, nrec->ir_startino,
611 nrec->ir_holemask);
612
613 /* merge to nrec to output the updated record */
614 __xfs_inobt_rec_merge(nrec, &rec);
615
616 trace_xfs_irec_merge_post(mp, pag->pag_agno, nrec->ir_startino,
617 nrec->ir_holemask);
618
619 error = xfs_inobt_rec_check_count(mp, nrec);
620 if (error)
621 goto error;
622
623 error = xfs_inobt_update(cur, nrec);
624 if (error)
625 goto error;
626
627 out:
628 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
629 return 0;
630 error:
631 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
632 return error;
633 }
634
635 /*
636 * Insert a new sparse inode chunk into the free inode btree. The inode
637 * record for the sparse chunk is pre-aligned to a startino that should match
638 * any pre-existing sparse inode record in the tree. This allows sparse chunks
639 * to fill over time.
640 *
641 * The new record is always inserted, overwriting a pre-existing record if
642 * there is one.
643 */
644 STATIC int
xfs_finobt_insert_sprec(struct xfs_perag * pag,struct xfs_trans * tp,struct xfs_buf * agbp,struct xfs_inobt_rec_incore * nrec)645 xfs_finobt_insert_sprec(
646 struct xfs_perag *pag,
647 struct xfs_trans *tp,
648 struct xfs_buf *agbp,
649 struct xfs_inobt_rec_incore *nrec) /* in/out: new rec. */
650 {
651 struct xfs_mount *mp = pag->pag_mount;
652 struct xfs_btree_cur *cur;
653 int error;
654 int i;
655
656 cur = xfs_finobt_init_cursor(pag, tp, agbp);
657
658 /* the new record is pre-aligned so we know where to look */
659 error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
660 if (error)
661 goto error;
662 /* if nothing there, insert a new record and return */
663 if (i == 0) {
664 error = xfs_inobt_insert_rec(cur, nrec->ir_holemask,
665 nrec->ir_count, nrec->ir_freecount,
666 nrec->ir_free, &i);
667 if (error)
668 goto error;
669 if (XFS_IS_CORRUPT(mp, i != 1)) {
670 xfs_btree_mark_sick(cur);
671 error = -EFSCORRUPTED;
672 goto error;
673 }
674 } else {
675 error = xfs_inobt_update(cur, nrec);
676 if (error)
677 goto error;
678 }
679
680 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
681 return 0;
682 error:
683 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
684 return error;
685 }
686
687
688 /*
689 * Allocate new inodes in the allocation group specified by agbp. Returns 0 if
690 * inodes were allocated in this AG; -EAGAIN if there was no space in this AG so
691 * the caller knows it can try another AG, a hard -ENOSPC when over the maximum
692 * inode count threshold, or the usual negative error code for other errors.
693 */
694 STATIC int
xfs_ialloc_ag_alloc(struct xfs_perag * pag,struct xfs_trans * tp,struct xfs_buf * agbp)695 xfs_ialloc_ag_alloc(
696 struct xfs_perag *pag,
697 struct xfs_trans *tp,
698 struct xfs_buf *agbp)
699 {
700 struct xfs_agi *agi;
701 struct xfs_alloc_arg args;
702 int error;
703 xfs_agino_t newino; /* new first inode's number */
704 xfs_agino_t newlen; /* new number of inodes */
705 int isaligned = 0; /* inode allocation at stripe */
706 /* unit boundary */
707 /* init. to full chunk */
708 struct xfs_inobt_rec_incore rec;
709 struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp);
710 uint16_t allocmask = (uint16_t) -1;
711 int do_sparse = 0;
712
713 memset(&args, 0, sizeof(args));
714 args.tp = tp;
715 args.mp = tp->t_mountp;
716 args.fsbno = NULLFSBLOCK;
717 args.oinfo = XFS_RMAP_OINFO_INODES;
718 args.pag = pag;
719
720 #ifdef DEBUG
721 /* randomly do sparse inode allocations */
722 if (xfs_has_sparseinodes(tp->t_mountp) &&
723 igeo->ialloc_min_blks < igeo->ialloc_blks)
724 do_sparse = get_random_u32_below(2);
725 #endif
726
727 /*
728 * Locking will ensure that we don't have two callers in here
729 * at one time.
730 */
731 newlen = igeo->ialloc_inos;
732 if (igeo->maxicount &&
733 percpu_counter_read_positive(&args.mp->m_icount) + newlen >
734 igeo->maxicount)
735 return -ENOSPC;
736 args.minlen = args.maxlen = igeo->ialloc_blks;
737 /*
738 * First try to allocate inodes contiguous with the last-allocated
739 * chunk of inodes. If the filesystem is striped, this will fill
740 * an entire stripe unit with inodes.
741 */
742 agi = agbp->b_addr;
743 newino = be32_to_cpu(agi->agi_newino);
744 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
745 igeo->ialloc_blks;
746 if (do_sparse)
747 goto sparse_alloc;
748 if (likely(newino != NULLAGINO &&
749 (args.agbno < be32_to_cpu(agi->agi_length)))) {
750 args.prod = 1;
751
752 /*
753 * We need to take into account alignment here to ensure that
754 * we don't modify the free list if we fail to have an exact
755 * block. If we don't have an exact match, and every oher
756 * attempt allocation attempt fails, we'll end up cancelling
757 * a dirty transaction and shutting down.
758 *
759 * For an exact allocation, alignment must be 1,
760 * however we need to take cluster alignment into account when
761 * fixing up the freelist. Use the minalignslop field to
762 * indicate that extra blocks might be required for alignment,
763 * but not to use them in the actual exact allocation.
764 */
765 args.alignment = 1;
766 args.minalignslop = igeo->cluster_align - 1;
767
768 /* Allow space for the inode btree to split. */
769 args.minleft = igeo->inobt_maxlevels;
770 error = xfs_alloc_vextent_exact_bno(&args,
771 XFS_AGB_TO_FSB(args.mp, pag->pag_agno,
772 args.agbno));
773 if (error)
774 return error;
775
776 /*
777 * This request might have dirtied the transaction if the AG can
778 * satisfy the request, but the exact block was not available.
779 * If the allocation did fail, subsequent requests will relax
780 * the exact agbno requirement and increase the alignment
781 * instead. It is critical that the total size of the request
782 * (len + alignment + slop) does not increase from this point
783 * on, so reset minalignslop to ensure it is not included in
784 * subsequent requests.
785 */
786 args.minalignslop = 0;
787 }
788
789 if (unlikely(args.fsbno == NULLFSBLOCK)) {
790 /*
791 * Set the alignment for the allocation.
792 * If stripe alignment is turned on then align at stripe unit
793 * boundary.
794 * If the cluster size is smaller than a filesystem block
795 * then we're doing I/O for inodes in filesystem block size
796 * pieces, so don't need alignment anyway.
797 */
798 isaligned = 0;
799 if (igeo->ialloc_align) {
800 ASSERT(!xfs_has_noalign(args.mp));
801 args.alignment = args.mp->m_dalign;
802 isaligned = 1;
803 } else
804 args.alignment = igeo->cluster_align;
805 /*
806 * Allocate a fixed-size extent of inodes.
807 */
808 args.prod = 1;
809 /*
810 * Allow space for the inode btree to split.
811 */
812 args.minleft = igeo->inobt_maxlevels;
813 error = xfs_alloc_vextent_near_bno(&args,
814 XFS_AGB_TO_FSB(args.mp, pag->pag_agno,
815 be32_to_cpu(agi->agi_root)));
816 if (error)
817 return error;
818 }
819
820 /*
821 * If stripe alignment is turned on, then try again with cluster
822 * alignment.
823 */
824 if (isaligned && args.fsbno == NULLFSBLOCK) {
825 args.alignment = igeo->cluster_align;
826 error = xfs_alloc_vextent_near_bno(&args,
827 XFS_AGB_TO_FSB(args.mp, pag->pag_agno,
828 be32_to_cpu(agi->agi_root)));
829 if (error)
830 return error;
831 }
832
833 /*
834 * Finally, try a sparse allocation if the filesystem supports it and
835 * the sparse allocation length is smaller than a full chunk.
836 */
837 if (xfs_has_sparseinodes(args.mp) &&
838 igeo->ialloc_min_blks < igeo->ialloc_blks &&
839 args.fsbno == NULLFSBLOCK) {
840 sparse_alloc:
841 args.alignment = args.mp->m_sb.sb_spino_align;
842 args.prod = 1;
843
844 args.minlen = igeo->ialloc_min_blks;
845 args.maxlen = args.minlen;
846
847 /*
848 * The inode record will be aligned to full chunk size. We must
849 * prevent sparse allocation from AG boundaries that result in
850 * invalid inode records, such as records that start at agbno 0
851 * or extend beyond the AG.
852 *
853 * Set min agbno to the first aligned, non-zero agbno and max to
854 * the last aligned agbno that is at least one full chunk from
855 * the end of the AG.
856 */
857 args.min_agbno = args.mp->m_sb.sb_inoalignmt;
858 args.max_agbno = round_down(args.mp->m_sb.sb_agblocks,
859 args.mp->m_sb.sb_inoalignmt) -
860 igeo->ialloc_blks;
861
862 error = xfs_alloc_vextent_near_bno(&args,
863 XFS_AGB_TO_FSB(args.mp, pag->pag_agno,
864 be32_to_cpu(agi->agi_root)));
865 if (error)
866 return error;
867
868 newlen = XFS_AGB_TO_AGINO(args.mp, args.len);
869 ASSERT(newlen <= XFS_INODES_PER_CHUNK);
870 allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1;
871 }
872
873 if (args.fsbno == NULLFSBLOCK)
874 return -EAGAIN;
875
876 ASSERT(args.len == args.minlen);
877
878 /*
879 * Stamp and write the inode buffers.
880 *
881 * Seed the new inode cluster with a random generation number. This
882 * prevents short-term reuse of generation numbers if a chunk is
883 * freed and then immediately reallocated. We use random numbers
884 * rather than a linear progression to prevent the next generation
885 * number from being easily guessable.
886 */
887 error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag->pag_agno,
888 args.agbno, args.len, get_random_u32());
889
890 if (error)
891 return error;
892 /*
893 * Convert the results.
894 */
895 newino = XFS_AGB_TO_AGINO(args.mp, args.agbno);
896
897 if (xfs_inobt_issparse(~allocmask)) {
898 /*
899 * We've allocated a sparse chunk. Align the startino and mask.
900 */
901 xfs_align_sparse_ino(args.mp, &newino, &allocmask);
902
903 rec.ir_startino = newino;
904 rec.ir_holemask = ~allocmask;
905 rec.ir_count = newlen;
906 rec.ir_freecount = newlen;
907 rec.ir_free = XFS_INOBT_ALL_FREE;
908
909 /*
910 * Insert the sparse record into the inobt and allow for a merge
911 * if necessary. If a merge does occur, rec is updated to the
912 * merged record.
913 */
914 error = xfs_inobt_insert_sprec(pag, tp, agbp, &rec);
915 if (error == -EFSCORRUPTED) {
916 xfs_alert(args.mp,
917 "invalid sparse inode record: ino 0x%llx holemask 0x%x count %u",
918 XFS_AGINO_TO_INO(args.mp, pag->pag_agno,
919 rec.ir_startino),
920 rec.ir_holemask, rec.ir_count);
921 xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE);
922 }
923 if (error)
924 return error;
925
926 /*
927 * We can't merge the part we've just allocated as for the inobt
928 * due to finobt semantics. The original record may or may not
929 * exist independent of whether physical inodes exist in this
930 * sparse chunk.
931 *
932 * We must update the finobt record based on the inobt record.
933 * rec contains the fully merged and up to date inobt record
934 * from the previous call. Set merge false to replace any
935 * existing record with this one.
936 */
937 if (xfs_has_finobt(args.mp)) {
938 error = xfs_finobt_insert_sprec(pag, tp, agbp, &rec);
939 if (error)
940 return error;
941 }
942 } else {
943 /* full chunk - insert new records to both btrees */
944 error = xfs_inobt_insert(pag, tp, agbp, newino, newlen, false);
945 if (error)
946 return error;
947
948 if (xfs_has_finobt(args.mp)) {
949 error = xfs_inobt_insert(pag, tp, agbp, newino,
950 newlen, true);
951 if (error)
952 return error;
953 }
954 }
955
956 /*
957 * Update AGI counts and newino.
958 */
959 be32_add_cpu(&agi->agi_count, newlen);
960 be32_add_cpu(&agi->agi_freecount, newlen);
961 pag->pagi_freecount += newlen;
962 pag->pagi_count += newlen;
963 agi->agi_newino = cpu_to_be32(newino);
964
965 /*
966 * Log allocation group header fields
967 */
968 xfs_ialloc_log_agi(tp, agbp,
969 XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);
970 /*
971 * Modify/log superblock values for inode count and inode free count.
972 */
973 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);
974 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);
975 return 0;
976 }
977
978 /*
979 * Try to retrieve the next record to the left/right from the current one.
980 */
981 STATIC int
xfs_ialloc_next_rec(struct xfs_btree_cur * cur,xfs_inobt_rec_incore_t * rec,int * done,int left)982 xfs_ialloc_next_rec(
983 struct xfs_btree_cur *cur,
984 xfs_inobt_rec_incore_t *rec,
985 int *done,
986 int left)
987 {
988 int error;
989 int i;
990
991 if (left)
992 error = xfs_btree_decrement(cur, 0, &i);
993 else
994 error = xfs_btree_increment(cur, 0, &i);
995
996 if (error)
997 return error;
998 *done = !i;
999 if (i) {
1000 error = xfs_inobt_get_rec(cur, rec, &i);
1001 if (error)
1002 return error;
1003 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
1004 xfs_btree_mark_sick(cur);
1005 return -EFSCORRUPTED;
1006 }
1007 }
1008
1009 return 0;
1010 }
1011
1012 STATIC int
xfs_ialloc_get_rec(struct xfs_btree_cur * cur,xfs_agino_t agino,xfs_inobt_rec_incore_t * rec,int * done)1013 xfs_ialloc_get_rec(
1014 struct xfs_btree_cur *cur,
1015 xfs_agino_t agino,
1016 xfs_inobt_rec_incore_t *rec,
1017 int *done)
1018 {
1019 int error;
1020 int i;
1021
1022 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i);
1023 if (error)
1024 return error;
1025 *done = !i;
1026 if (i) {
1027 error = xfs_inobt_get_rec(cur, rec, &i);
1028 if (error)
1029 return error;
1030 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
1031 xfs_btree_mark_sick(cur);
1032 return -EFSCORRUPTED;
1033 }
1034 }
1035
1036 return 0;
1037 }
1038
1039 /*
1040 * Return the offset of the first free inode in the record. If the inode chunk
1041 * is sparsely allocated, we convert the record holemask to inode granularity
1042 * and mask off the unallocated regions from the inode free mask.
1043 */
1044 STATIC int
xfs_inobt_first_free_inode(struct xfs_inobt_rec_incore * rec)1045 xfs_inobt_first_free_inode(
1046 struct xfs_inobt_rec_incore *rec)
1047 {
1048 xfs_inofree_t realfree;
1049
1050 /* if there are no holes, return the first available offset */
1051 if (!xfs_inobt_issparse(rec->ir_holemask))
1052 return xfs_lowbit64(rec->ir_free);
1053
1054 realfree = xfs_inobt_irec_to_allocmask(rec);
1055 realfree &= rec->ir_free;
1056
1057 return xfs_lowbit64(realfree);
1058 }
1059
1060 /*
1061 * If this AG has corrupt inodes, check if allocating this inode would fail
1062 * with corruption errors. Returns 0 if we're clear, or EAGAIN to try again
1063 * somewhere else.
1064 */
1065 static int
xfs_dialloc_check_ino(struct xfs_perag * pag,struct xfs_trans * tp,xfs_ino_t ino)1066 xfs_dialloc_check_ino(
1067 struct xfs_perag *pag,
1068 struct xfs_trans *tp,
1069 xfs_ino_t ino)
1070 {
1071 struct xfs_imap imap;
1072 struct xfs_buf *bp;
1073 int error;
1074
1075 error = xfs_imap(pag, tp, ino, &imap, 0);
1076 if (error)
1077 return -EAGAIN;
1078
1079 error = xfs_imap_to_bp(pag->pag_mount, tp, &imap, &bp);
1080 if (error)
1081 return -EAGAIN;
1082
1083 xfs_trans_brelse(tp, bp);
1084 return 0;
1085 }
1086
1087 /*
1088 * Allocate an inode using the inobt-only algorithm.
1089 */
1090 STATIC int
xfs_dialloc_ag_inobt(struct xfs_perag * pag,struct xfs_trans * tp,struct xfs_buf * agbp,xfs_ino_t parent,xfs_ino_t * inop)1091 xfs_dialloc_ag_inobt(
1092 struct xfs_perag *pag,
1093 struct xfs_trans *tp,
1094 struct xfs_buf *agbp,
1095 xfs_ino_t parent,
1096 xfs_ino_t *inop)
1097 {
1098 struct xfs_mount *mp = tp->t_mountp;
1099 struct xfs_agi *agi = agbp->b_addr;
1100 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
1101 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
1102 struct xfs_btree_cur *cur, *tcur;
1103 struct xfs_inobt_rec_incore rec, trec;
1104 xfs_ino_t ino;
1105 int error;
1106 int offset;
1107 int i, j;
1108 int searchdistance = 10;
1109
1110 ASSERT(xfs_perag_initialised_agi(pag));
1111 ASSERT(xfs_perag_allows_inodes(pag));
1112 ASSERT(pag->pagi_freecount > 0);
1113
1114 restart_pagno:
1115 cur = xfs_inobt_init_cursor(pag, tp, agbp);
1116 /*
1117 * If pagino is 0 (this is the root inode allocation) use newino.
1118 * This must work because we've just allocated some.
1119 */
1120 if (!pagino)
1121 pagino = be32_to_cpu(agi->agi_newino);
1122
1123 error = xfs_check_agi_freecount(cur);
1124 if (error)
1125 goto error0;
1126
1127 /*
1128 * If in the same AG as the parent, try to get near the parent.
1129 */
1130 if (pagno == pag->pag_agno) {
1131 int doneleft; /* done, to the left */
1132 int doneright; /* done, to the right */
1133
1134 error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
1135 if (error)
1136 goto error0;
1137 if (XFS_IS_CORRUPT(mp, i != 1)) {
1138 xfs_btree_mark_sick(cur);
1139 error = -EFSCORRUPTED;
1140 goto error0;
1141 }
1142
1143 error = xfs_inobt_get_rec(cur, &rec, &j);
1144 if (error)
1145 goto error0;
1146 if (XFS_IS_CORRUPT(mp, j != 1)) {
1147 xfs_btree_mark_sick(cur);
1148 error = -EFSCORRUPTED;
1149 goto error0;
1150 }
1151
1152 if (rec.ir_freecount > 0) {
1153 /*
1154 * Found a free inode in the same chunk
1155 * as the parent, done.
1156 */
1157 goto alloc_inode;
1158 }
1159
1160
1161 /*
1162 * In the same AG as parent, but parent's chunk is full.
1163 */
1164
1165 /* duplicate the cursor, search left & right simultaneously */
1166 error = xfs_btree_dup_cursor(cur, &tcur);
1167 if (error)
1168 goto error0;
1169
1170 /*
1171 * Skip to last blocks looked up if same parent inode.
1172 */
1173 if (pagino != NULLAGINO &&
1174 pag->pagl_pagino == pagino &&
1175 pag->pagl_leftrec != NULLAGINO &&
1176 pag->pagl_rightrec != NULLAGINO) {
1177 error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec,
1178 &trec, &doneleft);
1179 if (error)
1180 goto error1;
1181
1182 error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec,
1183 &rec, &doneright);
1184 if (error)
1185 goto error1;
1186 } else {
1187 /* search left with tcur, back up 1 record */
1188 error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1);
1189 if (error)
1190 goto error1;
1191
1192 /* search right with cur, go forward 1 record. */
1193 error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0);
1194 if (error)
1195 goto error1;
1196 }
1197
1198 /*
1199 * Loop until we find an inode chunk with a free inode.
1200 */
1201 while (--searchdistance > 0 && (!doneleft || !doneright)) {
1202 int useleft; /* using left inode chunk this time */
1203
1204 /* figure out the closer block if both are valid. */
1205 if (!doneleft && !doneright) {
1206 useleft = pagino -
1207 (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) <
1208 rec.ir_startino - pagino;
1209 } else {
1210 useleft = !doneleft;
1211 }
1212
1213 /* free inodes to the left? */
1214 if (useleft && trec.ir_freecount) {
1215 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1216 cur = tcur;
1217
1218 pag->pagl_leftrec = trec.ir_startino;
1219 pag->pagl_rightrec = rec.ir_startino;
1220 pag->pagl_pagino = pagino;
1221 rec = trec;
1222 goto alloc_inode;
1223 }
1224
1225 /* free inodes to the right? */
1226 if (!useleft && rec.ir_freecount) {
1227 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1228
1229 pag->pagl_leftrec = trec.ir_startino;
1230 pag->pagl_rightrec = rec.ir_startino;
1231 pag->pagl_pagino = pagino;
1232 goto alloc_inode;
1233 }
1234
1235 /* get next record to check */
1236 if (useleft) {
1237 error = xfs_ialloc_next_rec(tcur, &trec,
1238 &doneleft, 1);
1239 } else {
1240 error = xfs_ialloc_next_rec(cur, &rec,
1241 &doneright, 0);
1242 }
1243 if (error)
1244 goto error1;
1245 }
1246
1247 if (searchdistance <= 0) {
1248 /*
1249 * Not in range - save last search
1250 * location and allocate a new inode
1251 */
1252 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1253 pag->pagl_leftrec = trec.ir_startino;
1254 pag->pagl_rightrec = rec.ir_startino;
1255 pag->pagl_pagino = pagino;
1256
1257 } else {
1258 /*
1259 * We've reached the end of the btree. because
1260 * we are only searching a small chunk of the
1261 * btree each search, there is obviously free
1262 * inodes closer to the parent inode than we
1263 * are now. restart the search again.
1264 */
1265 pag->pagl_pagino = NULLAGINO;
1266 pag->pagl_leftrec = NULLAGINO;
1267 pag->pagl_rightrec = NULLAGINO;
1268 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1269 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1270 goto restart_pagno;
1271 }
1272 }
1273
1274 /*
1275 * In a different AG from the parent.
1276 * See if the most recently allocated block has any free.
1277 */
1278 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1279 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1280 XFS_LOOKUP_EQ, &i);
1281 if (error)
1282 goto error0;
1283
1284 if (i == 1) {
1285 error = xfs_inobt_get_rec(cur, &rec, &j);
1286 if (error)
1287 goto error0;
1288
1289 if (j == 1 && rec.ir_freecount > 0) {
1290 /*
1291 * The last chunk allocated in the group
1292 * still has a free inode.
1293 */
1294 goto alloc_inode;
1295 }
1296 }
1297 }
1298
1299 /*
1300 * None left in the last group, search the whole AG
1301 */
1302 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1303 if (error)
1304 goto error0;
1305 if (XFS_IS_CORRUPT(mp, i != 1)) {
1306 xfs_btree_mark_sick(cur);
1307 error = -EFSCORRUPTED;
1308 goto error0;
1309 }
1310
1311 for (;;) {
1312 error = xfs_inobt_get_rec(cur, &rec, &i);
1313 if (error)
1314 goto error0;
1315 if (XFS_IS_CORRUPT(mp, i != 1)) {
1316 xfs_btree_mark_sick(cur);
1317 error = -EFSCORRUPTED;
1318 goto error0;
1319 }
1320 if (rec.ir_freecount > 0)
1321 break;
1322 error = xfs_btree_increment(cur, 0, &i);
1323 if (error)
1324 goto error0;
1325 if (XFS_IS_CORRUPT(mp, i != 1)) {
1326 xfs_btree_mark_sick(cur);
1327 error = -EFSCORRUPTED;
1328 goto error0;
1329 }
1330 }
1331
1332 alloc_inode:
1333 offset = xfs_inobt_first_free_inode(&rec);
1334 ASSERT(offset >= 0);
1335 ASSERT(offset < XFS_INODES_PER_CHUNK);
1336 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1337 XFS_INODES_PER_CHUNK) == 0);
1338 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
1339
1340 if (xfs_ag_has_sickness(pag, XFS_SICK_AG_INODES)) {
1341 error = xfs_dialloc_check_ino(pag, tp, ino);
1342 if (error)
1343 goto error0;
1344 }
1345
1346 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1347 rec.ir_freecount--;
1348 error = xfs_inobt_update(cur, &rec);
1349 if (error)
1350 goto error0;
1351 be32_add_cpu(&agi->agi_freecount, -1);
1352 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1353 pag->pagi_freecount--;
1354
1355 error = xfs_check_agi_freecount(cur);
1356 if (error)
1357 goto error0;
1358
1359 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1360 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1361 *inop = ino;
1362 return 0;
1363 error1:
1364 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
1365 error0:
1366 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1367 return error;
1368 }
1369
1370 /*
1371 * Use the free inode btree to allocate an inode based on distance from the
1372 * parent. Note that the provided cursor may be deleted and replaced.
1373 */
1374 STATIC int
xfs_dialloc_ag_finobt_near(xfs_agino_t pagino,struct xfs_btree_cur ** ocur,struct xfs_inobt_rec_incore * rec)1375 xfs_dialloc_ag_finobt_near(
1376 xfs_agino_t pagino,
1377 struct xfs_btree_cur **ocur,
1378 struct xfs_inobt_rec_incore *rec)
1379 {
1380 struct xfs_btree_cur *lcur = *ocur; /* left search cursor */
1381 struct xfs_btree_cur *rcur; /* right search cursor */
1382 struct xfs_inobt_rec_incore rrec;
1383 int error;
1384 int i, j;
1385
1386 error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i);
1387 if (error)
1388 return error;
1389
1390 if (i == 1) {
1391 error = xfs_inobt_get_rec(lcur, rec, &i);
1392 if (error)
1393 return error;
1394 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1)) {
1395 xfs_btree_mark_sick(lcur);
1396 return -EFSCORRUPTED;
1397 }
1398
1399 /*
1400 * See if we've landed in the parent inode record. The finobt
1401 * only tracks chunks with at least one free inode, so record
1402 * existence is enough.
1403 */
1404 if (pagino >= rec->ir_startino &&
1405 pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK))
1406 return 0;
1407 }
1408
1409 error = xfs_btree_dup_cursor(lcur, &rcur);
1410 if (error)
1411 return error;
1412
1413 error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j);
1414 if (error)
1415 goto error_rcur;
1416 if (j == 1) {
1417 error = xfs_inobt_get_rec(rcur, &rrec, &j);
1418 if (error)
1419 goto error_rcur;
1420 if (XFS_IS_CORRUPT(lcur->bc_mp, j != 1)) {
1421 xfs_btree_mark_sick(lcur);
1422 error = -EFSCORRUPTED;
1423 goto error_rcur;
1424 }
1425 }
1426
1427 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1 && j != 1)) {
1428 xfs_btree_mark_sick(lcur);
1429 error = -EFSCORRUPTED;
1430 goto error_rcur;
1431 }
1432 if (i == 1 && j == 1) {
1433 /*
1434 * Both the left and right records are valid. Choose the closer
1435 * inode chunk to the target.
1436 */
1437 if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) >
1438 (rrec.ir_startino - pagino)) {
1439 *rec = rrec;
1440 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1441 *ocur = rcur;
1442 } else {
1443 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1444 }
1445 } else if (j == 1) {
1446 /* only the right record is valid */
1447 *rec = rrec;
1448 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1449 *ocur = rcur;
1450 } else if (i == 1) {
1451 /* only the left record is valid */
1452 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1453 }
1454
1455 return 0;
1456
1457 error_rcur:
1458 xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR);
1459 return error;
1460 }
1461
1462 /*
1463 * Use the free inode btree to find a free inode based on a newino hint. If
1464 * the hint is NULL, find the first free inode in the AG.
1465 */
1466 STATIC int
xfs_dialloc_ag_finobt_newino(struct xfs_agi * agi,struct xfs_btree_cur * cur,struct xfs_inobt_rec_incore * rec)1467 xfs_dialloc_ag_finobt_newino(
1468 struct xfs_agi *agi,
1469 struct xfs_btree_cur *cur,
1470 struct xfs_inobt_rec_incore *rec)
1471 {
1472 int error;
1473 int i;
1474
1475 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1476 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1477 XFS_LOOKUP_EQ, &i);
1478 if (error)
1479 return error;
1480 if (i == 1) {
1481 error = xfs_inobt_get_rec(cur, rec, &i);
1482 if (error)
1483 return error;
1484 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
1485 xfs_btree_mark_sick(cur);
1486 return -EFSCORRUPTED;
1487 }
1488 return 0;
1489 }
1490 }
1491
1492 /*
1493 * Find the first inode available in the AG.
1494 */
1495 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1496 if (error)
1497 return error;
1498 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
1499 xfs_btree_mark_sick(cur);
1500 return -EFSCORRUPTED;
1501 }
1502
1503 error = xfs_inobt_get_rec(cur, rec, &i);
1504 if (error)
1505 return error;
1506 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
1507 xfs_btree_mark_sick(cur);
1508 return -EFSCORRUPTED;
1509 }
1510
1511 return 0;
1512 }
1513
1514 /*
1515 * Update the inobt based on a modification made to the finobt. Also ensure that
1516 * the records from both trees are equivalent post-modification.
1517 */
1518 STATIC int
xfs_dialloc_ag_update_inobt(struct xfs_btree_cur * cur,struct xfs_inobt_rec_incore * frec,int offset)1519 xfs_dialloc_ag_update_inobt(
1520 struct xfs_btree_cur *cur, /* inobt cursor */
1521 struct xfs_inobt_rec_incore *frec, /* finobt record */
1522 int offset) /* inode offset */
1523 {
1524 struct xfs_inobt_rec_incore rec;
1525 int error;
1526 int i;
1527
1528 error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i);
1529 if (error)
1530 return error;
1531 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
1532 xfs_btree_mark_sick(cur);
1533 return -EFSCORRUPTED;
1534 }
1535
1536 error = xfs_inobt_get_rec(cur, &rec, &i);
1537 if (error)
1538 return error;
1539 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
1540 xfs_btree_mark_sick(cur);
1541 return -EFSCORRUPTED;
1542 }
1543 ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) %
1544 XFS_INODES_PER_CHUNK) == 0);
1545
1546 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1547 rec.ir_freecount--;
1548
1549 if (XFS_IS_CORRUPT(cur->bc_mp,
1550 rec.ir_free != frec->ir_free ||
1551 rec.ir_freecount != frec->ir_freecount)) {
1552 xfs_btree_mark_sick(cur);
1553 return -EFSCORRUPTED;
1554 }
1555
1556 return xfs_inobt_update(cur, &rec);
1557 }
1558
1559 /*
1560 * Allocate an inode using the free inode btree, if available. Otherwise, fall
1561 * back to the inobt search algorithm.
1562 *
1563 * The caller selected an AG for us, and made sure that free inodes are
1564 * available.
1565 */
1566 static int
xfs_dialloc_ag(struct xfs_perag * pag,struct xfs_trans * tp,struct xfs_buf * agbp,xfs_ino_t parent,xfs_ino_t * inop)1567 xfs_dialloc_ag(
1568 struct xfs_perag *pag,
1569 struct xfs_trans *tp,
1570 struct xfs_buf *agbp,
1571 xfs_ino_t parent,
1572 xfs_ino_t *inop)
1573 {
1574 struct xfs_mount *mp = tp->t_mountp;
1575 struct xfs_agi *agi = agbp->b_addr;
1576 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
1577 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
1578 struct xfs_btree_cur *cur; /* finobt cursor */
1579 struct xfs_btree_cur *icur; /* inobt cursor */
1580 struct xfs_inobt_rec_incore rec;
1581 xfs_ino_t ino;
1582 int error;
1583 int offset;
1584 int i;
1585
1586 if (!xfs_has_finobt(mp))
1587 return xfs_dialloc_ag_inobt(pag, tp, agbp, parent, inop);
1588
1589 /*
1590 * If pagino is 0 (this is the root inode allocation) use newino.
1591 * This must work because we've just allocated some.
1592 */
1593 if (!pagino)
1594 pagino = be32_to_cpu(agi->agi_newino);
1595
1596 cur = xfs_finobt_init_cursor(pag, tp, agbp);
1597
1598 error = xfs_check_agi_freecount(cur);
1599 if (error)
1600 goto error_cur;
1601
1602 /*
1603 * The search algorithm depends on whether we're in the same AG as the
1604 * parent. If so, find the closest available inode to the parent. If
1605 * not, consider the agi hint or find the first free inode in the AG.
1606 */
1607 if (pag->pag_agno == pagno)
1608 error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec);
1609 else
1610 error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec);
1611 if (error)
1612 goto error_cur;
1613
1614 offset = xfs_inobt_first_free_inode(&rec);
1615 ASSERT(offset >= 0);
1616 ASSERT(offset < XFS_INODES_PER_CHUNK);
1617 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1618 XFS_INODES_PER_CHUNK) == 0);
1619 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
1620
1621 if (xfs_ag_has_sickness(pag, XFS_SICK_AG_INODES)) {
1622 error = xfs_dialloc_check_ino(pag, tp, ino);
1623 if (error)
1624 goto error_cur;
1625 }
1626
1627 /*
1628 * Modify or remove the finobt record.
1629 */
1630 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1631 rec.ir_freecount--;
1632 if (rec.ir_freecount)
1633 error = xfs_inobt_update(cur, &rec);
1634 else
1635 error = xfs_btree_delete(cur, &i);
1636 if (error)
1637 goto error_cur;
1638
1639 /*
1640 * The finobt has now been updated appropriately. We haven't updated the
1641 * agi and superblock yet, so we can create an inobt cursor and validate
1642 * the original freecount. If all is well, make the equivalent update to
1643 * the inobt using the finobt record and offset information.
1644 */
1645 icur = xfs_inobt_init_cursor(pag, tp, agbp);
1646
1647 error = xfs_check_agi_freecount(icur);
1648 if (error)
1649 goto error_icur;
1650
1651 error = xfs_dialloc_ag_update_inobt(icur, &rec, offset);
1652 if (error)
1653 goto error_icur;
1654
1655 /*
1656 * Both trees have now been updated. We must update the perag and
1657 * superblock before we can check the freecount for each btree.
1658 */
1659 be32_add_cpu(&agi->agi_freecount, -1);
1660 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1661 pag->pagi_freecount--;
1662
1663 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1664
1665 error = xfs_check_agi_freecount(icur);
1666 if (error)
1667 goto error_icur;
1668 error = xfs_check_agi_freecount(cur);
1669 if (error)
1670 goto error_icur;
1671
1672 xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR);
1673 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1674 *inop = ino;
1675 return 0;
1676
1677 error_icur:
1678 xfs_btree_del_cursor(icur, XFS_BTREE_ERROR);
1679 error_cur:
1680 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1681 return error;
1682 }
1683
1684 static int
xfs_dialloc_roll(struct xfs_trans ** tpp,struct xfs_buf * agibp)1685 xfs_dialloc_roll(
1686 struct xfs_trans **tpp,
1687 struct xfs_buf *agibp)
1688 {
1689 struct xfs_trans *tp = *tpp;
1690 struct xfs_dquot_acct *dqinfo;
1691 int error;
1692
1693 /*
1694 * Hold to on to the agibp across the commit so no other allocation can
1695 * come in and take the free inodes we just allocated for our caller.
1696 */
1697 xfs_trans_bhold(tp, agibp);
1698
1699 /*
1700 * We want the quota changes to be associated with the next transaction,
1701 * NOT this one. So, detach the dqinfo from this and attach it to the
1702 * next transaction.
1703 */
1704 dqinfo = tp->t_dqinfo;
1705 tp->t_dqinfo = NULL;
1706
1707 error = xfs_trans_roll(&tp);
1708
1709 /* Re-attach the quota info that we detached from prev trx. */
1710 tp->t_dqinfo = dqinfo;
1711
1712 /*
1713 * Join the buffer even on commit error so that the buffer is released
1714 * when the caller cancels the transaction and doesn't have to handle
1715 * this error case specially.
1716 */
1717 xfs_trans_bjoin(tp, agibp);
1718 *tpp = tp;
1719 return error;
1720 }
1721
1722 static bool
xfs_dialloc_good_ag(struct xfs_perag * pag,struct xfs_trans * tp,umode_t mode,int flags,bool ok_alloc)1723 xfs_dialloc_good_ag(
1724 struct xfs_perag *pag,
1725 struct xfs_trans *tp,
1726 umode_t mode,
1727 int flags,
1728 bool ok_alloc)
1729 {
1730 struct xfs_mount *mp = tp->t_mountp;
1731 xfs_extlen_t ineed;
1732 xfs_extlen_t longest = 0;
1733 int needspace;
1734 int error;
1735
1736 if (!pag)
1737 return false;
1738 if (!xfs_perag_allows_inodes(pag))
1739 return false;
1740
1741 if (!xfs_perag_initialised_agi(pag)) {
1742 error = xfs_ialloc_read_agi(pag, tp, 0, NULL);
1743 if (error)
1744 return false;
1745 }
1746
1747 if (pag->pagi_freecount)
1748 return true;
1749 if (!ok_alloc)
1750 return false;
1751
1752 if (!xfs_perag_initialised_agf(pag)) {
1753 error = xfs_alloc_read_agf(pag, tp, flags, NULL);
1754 if (error)
1755 return false;
1756 }
1757
1758 /*
1759 * Check that there is enough free space for the file plus a chunk of
1760 * inodes if we need to allocate some. If this is the first pass across
1761 * the AGs, take into account the potential space needed for alignment
1762 * of inode chunks when checking the longest contiguous free space in
1763 * the AG - this prevents us from getting ENOSPC because we have free
1764 * space larger than ialloc_blks but alignment constraints prevent us
1765 * from using it.
1766 *
1767 * If we can't find an AG with space for full alignment slack to be
1768 * taken into account, we must be near ENOSPC in all AGs. Hence we
1769 * don't include alignment for the second pass and so if we fail
1770 * allocation due to alignment issues then it is most likely a real
1771 * ENOSPC condition.
1772 *
1773 * XXX(dgc): this calculation is now bogus thanks to the per-ag
1774 * reservations that xfs_alloc_fix_freelist() now does via
1775 * xfs_alloc_space_available(). When the AG fills up, pagf_freeblks will
1776 * be more than large enough for the check below to succeed, but
1777 * xfs_alloc_space_available() will fail because of the non-zero
1778 * metadata reservation and hence we won't actually be able to allocate
1779 * more inodes in this AG. We do soooo much unnecessary work near ENOSPC
1780 * because of this.
1781 */
1782 ineed = M_IGEO(mp)->ialloc_min_blks;
1783 if (flags && ineed > 1)
1784 ineed += M_IGEO(mp)->cluster_align;
1785 longest = pag->pagf_longest;
1786 if (!longest)
1787 longest = pag->pagf_flcount > 0;
1788 needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
1789
1790 if (pag->pagf_freeblks < needspace + ineed || longest < ineed)
1791 return false;
1792 return true;
1793 }
1794
1795 static int
xfs_dialloc_try_ag(struct xfs_perag * pag,struct xfs_trans ** tpp,xfs_ino_t parent,xfs_ino_t * new_ino,bool ok_alloc)1796 xfs_dialloc_try_ag(
1797 struct xfs_perag *pag,
1798 struct xfs_trans **tpp,
1799 xfs_ino_t parent,
1800 xfs_ino_t *new_ino,
1801 bool ok_alloc)
1802 {
1803 struct xfs_buf *agbp;
1804 xfs_ino_t ino;
1805 int error;
1806
1807 /*
1808 * Then read in the AGI buffer and recheck with the AGI buffer
1809 * lock held.
1810 */
1811 error = xfs_ialloc_read_agi(pag, *tpp, 0, &agbp);
1812 if (error)
1813 return error;
1814
1815 if (!pag->pagi_freecount) {
1816 if (!ok_alloc) {
1817 error = -EAGAIN;
1818 goto out_release;
1819 }
1820
1821 error = xfs_ialloc_ag_alloc(pag, *tpp, agbp);
1822 if (error < 0)
1823 goto out_release;
1824
1825 /*
1826 * We successfully allocated space for an inode cluster in this
1827 * AG. Roll the transaction so that we can allocate one of the
1828 * new inodes.
1829 */
1830 ASSERT(pag->pagi_freecount > 0);
1831 error = xfs_dialloc_roll(tpp, agbp);
1832 if (error)
1833 goto out_release;
1834 }
1835
1836 /* Allocate an inode in the found AG */
1837 error = xfs_dialloc_ag(pag, *tpp, agbp, parent, &ino);
1838 if (!error)
1839 *new_ino = ino;
1840 return error;
1841
1842 out_release:
1843 xfs_trans_brelse(*tpp, agbp);
1844 return error;
1845 }
1846
1847 /*
1848 * Allocate an on-disk inode.
1849 *
1850 * Mode is used to tell whether the new inode is a directory and hence where to
1851 * locate it. The on-disk inode that is allocated will be returned in @new_ino
1852 * on success, otherwise an error will be set to indicate the failure (e.g.
1853 * -ENOSPC).
1854 */
1855 int
xfs_dialloc(struct xfs_trans ** tpp,const struct xfs_icreate_args * args,xfs_ino_t * new_ino)1856 xfs_dialloc(
1857 struct xfs_trans **tpp,
1858 const struct xfs_icreate_args *args,
1859 xfs_ino_t *new_ino)
1860 {
1861 struct xfs_mount *mp = (*tpp)->t_mountp;
1862 xfs_ino_t parent = args->pip ? args->pip->i_ino : 0;
1863 umode_t mode = args->mode & S_IFMT;
1864 xfs_agnumber_t agno;
1865 int error = 0;
1866 xfs_agnumber_t start_agno;
1867 struct xfs_perag *pag;
1868 struct xfs_ino_geometry *igeo = M_IGEO(mp);
1869 bool ok_alloc = true;
1870 bool low_space = false;
1871 int flags;
1872 xfs_ino_t ino = NULLFSINO;
1873
1874 /*
1875 * Directories, symlinks, and regular files frequently allocate at least
1876 * one block, so factor that potential expansion when we examine whether
1877 * an AG has enough space for file creation.
1878 */
1879 if (S_ISDIR(mode))
1880 start_agno = (atomic_inc_return(&mp->m_agirotor) - 1) %
1881 mp->m_maxagi;
1882 else {
1883 start_agno = XFS_INO_TO_AGNO(mp, parent);
1884 if (start_agno >= mp->m_maxagi)
1885 start_agno = 0;
1886 }
1887
1888 /*
1889 * If we have already hit the ceiling of inode blocks then clear
1890 * ok_alloc so we scan all available agi structures for a free
1891 * inode.
1892 *
1893 * Read rough value of mp->m_icount by percpu_counter_read_positive,
1894 * which will sacrifice the preciseness but improve the performance.
1895 */
1896 if (igeo->maxicount &&
1897 percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos
1898 > igeo->maxicount) {
1899 ok_alloc = false;
1900 }
1901
1902 /*
1903 * If we are near to ENOSPC, we want to prefer allocation from AGs that
1904 * have free inodes in them rather than use up free space allocating new
1905 * inode chunks. Hence we turn off allocation for the first non-blocking
1906 * pass through the AGs if we are near ENOSPC to consume free inodes
1907 * that we can immediately allocate, but then we allow allocation on the
1908 * second pass if we fail to find an AG with free inodes in it.
1909 */
1910 if (percpu_counter_read_positive(&mp->m_fdblocks) <
1911 mp->m_low_space[XFS_LOWSP_1_PCNT]) {
1912 ok_alloc = false;
1913 low_space = true;
1914 }
1915
1916 /*
1917 * Loop until we find an allocation group that either has free inodes
1918 * or in which we can allocate some inodes. Iterate through the
1919 * allocation groups upward, wrapping at the end.
1920 */
1921 flags = XFS_ALLOC_FLAG_TRYLOCK;
1922 retry:
1923 for_each_perag_wrap_at(mp, start_agno, mp->m_maxagi, agno, pag) {
1924 if (xfs_dialloc_good_ag(pag, *tpp, mode, flags, ok_alloc)) {
1925 error = xfs_dialloc_try_ag(pag, tpp, parent,
1926 &ino, ok_alloc);
1927 if (error != -EAGAIN)
1928 break;
1929 error = 0;
1930 }
1931
1932 if (xfs_is_shutdown(mp)) {
1933 error = -EFSCORRUPTED;
1934 break;
1935 }
1936 }
1937 if (pag)
1938 xfs_perag_rele(pag);
1939 if (error)
1940 return error;
1941 if (ino == NULLFSINO) {
1942 if (flags) {
1943 flags = 0;
1944 if (low_space)
1945 ok_alloc = true;
1946 goto retry;
1947 }
1948 return -ENOSPC;
1949 }
1950
1951 /*
1952 * Protect against obviously corrupt allocation btree records. Later
1953 * xfs_iget checks will catch re-allocation of other active in-memory
1954 * and on-disk inodes. If we don't catch reallocating the parent inode
1955 * here we will deadlock in xfs_iget() so we have to do these checks
1956 * first.
1957 */
1958 if (ino == parent || !xfs_verify_dir_ino(mp, ino)) {
1959 xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
1960 xfs_agno_mark_sick(mp, XFS_INO_TO_AGNO(mp, ino),
1961 XFS_SICK_AG_INOBT);
1962 return -EFSCORRUPTED;
1963 }
1964
1965 *new_ino = ino;
1966 return 0;
1967 }
1968
1969 /*
1970 * Free the blocks of an inode chunk. We must consider that the inode chunk
1971 * might be sparse and only free the regions that are allocated as part of the
1972 * chunk.
1973 */
1974 static int
xfs_difree_inode_chunk(struct xfs_trans * tp,xfs_agnumber_t agno,struct xfs_inobt_rec_incore * rec)1975 xfs_difree_inode_chunk(
1976 struct xfs_trans *tp,
1977 xfs_agnumber_t agno,
1978 struct xfs_inobt_rec_incore *rec)
1979 {
1980 struct xfs_mount *mp = tp->t_mountp;
1981 xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp,
1982 rec->ir_startino);
1983 int startidx, endidx;
1984 int nextbit;
1985 xfs_agblock_t agbno;
1986 int contigblk;
1987 DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
1988
1989 if (!xfs_inobt_issparse(rec->ir_holemask)) {
1990 /* not sparse, calculate extent info directly */
1991 return xfs_free_extent_later(tp,
1992 XFS_AGB_TO_FSB(mp, agno, sagbno),
1993 M_IGEO(mp)->ialloc_blks, &XFS_RMAP_OINFO_INODES,
1994 XFS_AG_RESV_NONE, 0);
1995 }
1996
1997 /* holemask is only 16-bits (fits in an unsigned long) */
1998 ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0]));
1999 holemask[0] = rec->ir_holemask;
2000
2001 /*
2002 * Find contiguous ranges of zeroes (i.e., allocated regions) in the
2003 * holemask and convert the start/end index of each range to an extent.
2004 * We start with the start and end index both pointing at the first 0 in
2005 * the mask.
2006 */
2007 startidx = endidx = find_first_zero_bit(holemask,
2008 XFS_INOBT_HOLEMASK_BITS);
2009 nextbit = startidx + 1;
2010 while (startidx < XFS_INOBT_HOLEMASK_BITS) {
2011 int error;
2012
2013 nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS,
2014 nextbit);
2015 /*
2016 * If the next zero bit is contiguous, update the end index of
2017 * the current range and continue.
2018 */
2019 if (nextbit != XFS_INOBT_HOLEMASK_BITS &&
2020 nextbit == endidx + 1) {
2021 endidx = nextbit;
2022 goto next;
2023 }
2024
2025 /*
2026 * nextbit is not contiguous with the current end index. Convert
2027 * the current start/end to an extent and add it to the free
2028 * list.
2029 */
2030 agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) /
2031 mp->m_sb.sb_inopblock;
2032 contigblk = ((endidx - startidx + 1) *
2033 XFS_INODES_PER_HOLEMASK_BIT) /
2034 mp->m_sb.sb_inopblock;
2035
2036 ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
2037 ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
2038 error = xfs_free_extent_later(tp,
2039 XFS_AGB_TO_FSB(mp, agno, agbno), contigblk,
2040 &XFS_RMAP_OINFO_INODES, XFS_AG_RESV_NONE, 0);
2041 if (error)
2042 return error;
2043
2044 /* reset range to current bit and carry on... */
2045 startidx = endidx = nextbit;
2046
2047 next:
2048 nextbit++;
2049 }
2050 return 0;
2051 }
2052
2053 STATIC int
xfs_difree_inobt(struct xfs_perag * pag,struct xfs_trans * tp,struct xfs_buf * agbp,xfs_agino_t agino,struct xfs_icluster * xic,struct xfs_inobt_rec_incore * orec)2054 xfs_difree_inobt(
2055 struct xfs_perag *pag,
2056 struct xfs_trans *tp,
2057 struct xfs_buf *agbp,
2058 xfs_agino_t agino,
2059 struct xfs_icluster *xic,
2060 struct xfs_inobt_rec_incore *orec)
2061 {
2062 struct xfs_mount *mp = pag->pag_mount;
2063 struct xfs_agi *agi = agbp->b_addr;
2064 struct xfs_btree_cur *cur;
2065 struct xfs_inobt_rec_incore rec;
2066 int ilen;
2067 int error;
2068 int i;
2069 int off;
2070
2071 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
2072 ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length));
2073
2074 /*
2075 * Initialize the cursor.
2076 */
2077 cur = xfs_inobt_init_cursor(pag, tp, agbp);
2078
2079 error = xfs_check_agi_freecount(cur);
2080 if (error)
2081 goto error0;
2082
2083 /*
2084 * Look for the entry describing this inode.
2085 */
2086 if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) {
2087 xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.",
2088 __func__, error);
2089 goto error0;
2090 }
2091 if (XFS_IS_CORRUPT(mp, i != 1)) {
2092 xfs_btree_mark_sick(cur);
2093 error = -EFSCORRUPTED;
2094 goto error0;
2095 }
2096 error = xfs_inobt_get_rec(cur, &rec, &i);
2097 if (error) {
2098 xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.",
2099 __func__, error);
2100 goto error0;
2101 }
2102 if (XFS_IS_CORRUPT(mp, i != 1)) {
2103 xfs_btree_mark_sick(cur);
2104 error = -EFSCORRUPTED;
2105 goto error0;
2106 }
2107 /*
2108 * Get the offset in the inode chunk.
2109 */
2110 off = agino - rec.ir_startino;
2111 ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK);
2112 ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off)));
2113 /*
2114 * Mark the inode free & increment the count.
2115 */
2116 rec.ir_free |= XFS_INOBT_MASK(off);
2117 rec.ir_freecount++;
2118
2119 /*
2120 * When an inode chunk is free, it becomes eligible for removal. Don't
2121 * remove the chunk if the block size is large enough for multiple inode
2122 * chunks (that might not be free).
2123 */
2124 if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE &&
2125 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
2126 xic->deleted = true;
2127 xic->first_ino = XFS_AGINO_TO_INO(mp, pag->pag_agno,
2128 rec.ir_startino);
2129 xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
2130
2131 /*
2132 * Remove the inode cluster from the AGI B+Tree, adjust the
2133 * AGI and Superblock inode counts, and mark the disk space
2134 * to be freed when the transaction is committed.
2135 */
2136 ilen = rec.ir_freecount;
2137 be32_add_cpu(&agi->agi_count, -ilen);
2138 be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
2139 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
2140 pag->pagi_freecount -= ilen - 1;
2141 pag->pagi_count -= ilen;
2142 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
2143 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
2144
2145 if ((error = xfs_btree_delete(cur, &i))) {
2146 xfs_warn(mp, "%s: xfs_btree_delete returned error %d.",
2147 __func__, error);
2148 goto error0;
2149 }
2150
2151 error = xfs_difree_inode_chunk(tp, pag->pag_agno, &rec);
2152 if (error)
2153 goto error0;
2154 } else {
2155 xic->deleted = false;
2156
2157 error = xfs_inobt_update(cur, &rec);
2158 if (error) {
2159 xfs_warn(mp, "%s: xfs_inobt_update returned error %d.",
2160 __func__, error);
2161 goto error0;
2162 }
2163
2164 /*
2165 * Change the inode free counts and log the ag/sb changes.
2166 */
2167 be32_add_cpu(&agi->agi_freecount, 1);
2168 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
2169 pag->pagi_freecount++;
2170 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
2171 }
2172
2173 error = xfs_check_agi_freecount(cur);
2174 if (error)
2175 goto error0;
2176
2177 *orec = rec;
2178 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2179 return 0;
2180
2181 error0:
2182 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2183 return error;
2184 }
2185
2186 /*
2187 * Free an inode in the free inode btree.
2188 */
2189 STATIC int
xfs_difree_finobt(struct xfs_perag * pag,struct xfs_trans * tp,struct xfs_buf * agbp,xfs_agino_t agino,struct xfs_inobt_rec_incore * ibtrec)2190 xfs_difree_finobt(
2191 struct xfs_perag *pag,
2192 struct xfs_trans *tp,
2193 struct xfs_buf *agbp,
2194 xfs_agino_t agino,
2195 struct xfs_inobt_rec_incore *ibtrec) /* inobt record */
2196 {
2197 struct xfs_mount *mp = pag->pag_mount;
2198 struct xfs_btree_cur *cur;
2199 struct xfs_inobt_rec_incore rec;
2200 int offset = agino - ibtrec->ir_startino;
2201 int error;
2202 int i;
2203
2204 cur = xfs_finobt_init_cursor(pag, tp, agbp);
2205
2206 error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i);
2207 if (error)
2208 goto error;
2209 if (i == 0) {
2210 /*
2211 * If the record does not exist in the finobt, we must have just
2212 * freed an inode in a previously fully allocated chunk. If not,
2213 * something is out of sync.
2214 */
2215 if (XFS_IS_CORRUPT(mp, ibtrec->ir_freecount != 1)) {
2216 xfs_btree_mark_sick(cur);
2217 error = -EFSCORRUPTED;
2218 goto error;
2219 }
2220
2221 error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask,
2222 ibtrec->ir_count,
2223 ibtrec->ir_freecount,
2224 ibtrec->ir_free, &i);
2225 if (error)
2226 goto error;
2227 ASSERT(i == 1);
2228
2229 goto out;
2230 }
2231
2232 /*
2233 * Read and update the existing record. We could just copy the ibtrec
2234 * across here, but that would defeat the purpose of having redundant
2235 * metadata. By making the modifications independently, we can catch
2236 * corruptions that we wouldn't see if we just copied from one record
2237 * to another.
2238 */
2239 error = xfs_inobt_get_rec(cur, &rec, &i);
2240 if (error)
2241 goto error;
2242 if (XFS_IS_CORRUPT(mp, i != 1)) {
2243 xfs_btree_mark_sick(cur);
2244 error = -EFSCORRUPTED;
2245 goto error;
2246 }
2247
2248 rec.ir_free |= XFS_INOBT_MASK(offset);
2249 rec.ir_freecount++;
2250
2251 if (XFS_IS_CORRUPT(mp,
2252 rec.ir_free != ibtrec->ir_free ||
2253 rec.ir_freecount != ibtrec->ir_freecount)) {
2254 xfs_btree_mark_sick(cur);
2255 error = -EFSCORRUPTED;
2256 goto error;
2257 }
2258
2259 /*
2260 * The content of inobt records should always match between the inobt
2261 * and finobt. The lifecycle of records in the finobt is different from
2262 * the inobt in that the finobt only tracks records with at least one
2263 * free inode. Hence, if all of the inodes are free and we aren't
2264 * keeping inode chunks permanently on disk, remove the record.
2265 * Otherwise, update the record with the new information.
2266 *
2267 * Note that we currently can't free chunks when the block size is large
2268 * enough for multiple chunks. Leave the finobt record to remain in sync
2269 * with the inobt.
2270 */
2271 if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE &&
2272 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
2273 error = xfs_btree_delete(cur, &i);
2274 if (error)
2275 goto error;
2276 ASSERT(i == 1);
2277 } else {
2278 error = xfs_inobt_update(cur, &rec);
2279 if (error)
2280 goto error;
2281 }
2282
2283 out:
2284 error = xfs_check_agi_freecount(cur);
2285 if (error)
2286 goto error;
2287
2288 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2289 return 0;
2290
2291 error:
2292 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2293 return error;
2294 }
2295
2296 /*
2297 * Free disk inode. Carefully avoids touching the incore inode, all
2298 * manipulations incore are the caller's responsibility.
2299 * The on-disk inode is not changed by this operation, only the
2300 * btree (free inode mask) is changed.
2301 */
2302 int
xfs_difree(struct xfs_trans * tp,struct xfs_perag * pag,xfs_ino_t inode,struct xfs_icluster * xic)2303 xfs_difree(
2304 struct xfs_trans *tp,
2305 struct xfs_perag *pag,
2306 xfs_ino_t inode,
2307 struct xfs_icluster *xic)
2308 {
2309 /* REFERENCED */
2310 xfs_agblock_t agbno; /* block number containing inode */
2311 struct xfs_buf *agbp; /* buffer for allocation group header */
2312 xfs_agino_t agino; /* allocation group inode number */
2313 int error; /* error return value */
2314 struct xfs_mount *mp = tp->t_mountp;
2315 struct xfs_inobt_rec_incore rec;/* btree record */
2316
2317 /*
2318 * Break up inode number into its components.
2319 */
2320 if (pag->pag_agno != XFS_INO_TO_AGNO(mp, inode)) {
2321 xfs_warn(mp, "%s: agno != pag->pag_agno (%d != %d).",
2322 __func__, XFS_INO_TO_AGNO(mp, inode), pag->pag_agno);
2323 ASSERT(0);
2324 return -EINVAL;
2325 }
2326 agino = XFS_INO_TO_AGINO(mp, inode);
2327 if (inode != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
2328 xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).",
2329 __func__, (unsigned long long)inode,
2330 (unsigned long long)XFS_AGINO_TO_INO(mp, pag->pag_agno, agino));
2331 ASSERT(0);
2332 return -EINVAL;
2333 }
2334 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2335 if (agbno >= mp->m_sb.sb_agblocks) {
2336 xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).",
2337 __func__, agbno, mp->m_sb.sb_agblocks);
2338 ASSERT(0);
2339 return -EINVAL;
2340 }
2341 /*
2342 * Get the allocation group header.
2343 */
2344 error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
2345 if (error) {
2346 xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
2347 __func__, error);
2348 return error;
2349 }
2350
2351 /*
2352 * Fix up the inode allocation btree.
2353 */
2354 error = xfs_difree_inobt(pag, tp, agbp, agino, xic, &rec);
2355 if (error)
2356 goto error0;
2357
2358 /*
2359 * Fix up the free inode btree.
2360 */
2361 if (xfs_has_finobt(mp)) {
2362 error = xfs_difree_finobt(pag, tp, agbp, agino, &rec);
2363 if (error)
2364 goto error0;
2365 }
2366
2367 return 0;
2368
2369 error0:
2370 return error;
2371 }
2372
2373 STATIC int
xfs_imap_lookup(struct xfs_perag * pag,struct xfs_trans * tp,xfs_agino_t agino,xfs_agblock_t agbno,xfs_agblock_t * chunk_agbno,xfs_agblock_t * offset_agbno,int flags)2374 xfs_imap_lookup(
2375 struct xfs_perag *pag,
2376 struct xfs_trans *tp,
2377 xfs_agino_t agino,
2378 xfs_agblock_t agbno,
2379 xfs_agblock_t *chunk_agbno,
2380 xfs_agblock_t *offset_agbno,
2381 int flags)
2382 {
2383 struct xfs_mount *mp = pag->pag_mount;
2384 struct xfs_inobt_rec_incore rec;
2385 struct xfs_btree_cur *cur;
2386 struct xfs_buf *agbp;
2387 int error;
2388 int i;
2389
2390 error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
2391 if (error) {
2392 xfs_alert(mp,
2393 "%s: xfs_ialloc_read_agi() returned error %d, agno %d",
2394 __func__, error, pag->pag_agno);
2395 return error;
2396 }
2397
2398 /*
2399 * Lookup the inode record for the given agino. If the record cannot be
2400 * found, then it's an invalid inode number and we should abort. Once
2401 * we have a record, we need to ensure it contains the inode number
2402 * we are looking up.
2403 */
2404 cur = xfs_inobt_init_cursor(pag, tp, agbp);
2405 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
2406 if (!error) {
2407 if (i)
2408 error = xfs_inobt_get_rec(cur, &rec, &i);
2409 if (!error && i == 0)
2410 error = -EINVAL;
2411 }
2412
2413 xfs_trans_brelse(tp, agbp);
2414 xfs_btree_del_cursor(cur, error);
2415 if (error)
2416 return error;
2417
2418 /* check that the returned record contains the required inode */
2419 if (rec.ir_startino > agino ||
2420 rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino)
2421 return -EINVAL;
2422
2423 /* for untrusted inodes check it is allocated first */
2424 if ((flags & XFS_IGET_UNTRUSTED) &&
2425 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
2426 return -EINVAL;
2427
2428 *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino);
2429 *offset_agbno = agbno - *chunk_agbno;
2430 return 0;
2431 }
2432
2433 /*
2434 * Return the location of the inode in imap, for mapping it into a buffer.
2435 */
2436 int
xfs_imap(struct xfs_perag * pag,struct xfs_trans * tp,xfs_ino_t ino,struct xfs_imap * imap,uint flags)2437 xfs_imap(
2438 struct xfs_perag *pag,
2439 struct xfs_trans *tp,
2440 xfs_ino_t ino, /* inode to locate */
2441 struct xfs_imap *imap, /* location map structure */
2442 uint flags) /* flags for inode btree lookup */
2443 {
2444 struct xfs_mount *mp = pag->pag_mount;
2445 xfs_agblock_t agbno; /* block number of inode in the alloc group */
2446 xfs_agino_t agino; /* inode number within alloc group */
2447 xfs_agblock_t chunk_agbno; /* first block in inode chunk */
2448 xfs_agblock_t cluster_agbno; /* first block in inode cluster */
2449 int error; /* error code */
2450 int offset; /* index of inode in its buffer */
2451 xfs_agblock_t offset_agbno; /* blks from chunk start to inode */
2452
2453 ASSERT(ino != NULLFSINO);
2454
2455 /*
2456 * Split up the inode number into its parts.
2457 */
2458 agino = XFS_INO_TO_AGINO(mp, ino);
2459 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2460 if (agbno >= mp->m_sb.sb_agblocks ||
2461 ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
2462 error = -EINVAL;
2463 #ifdef DEBUG
2464 /*
2465 * Don't output diagnostic information for untrusted inodes
2466 * as they can be invalid without implying corruption.
2467 */
2468 if (flags & XFS_IGET_UNTRUSTED)
2469 return error;
2470 if (agbno >= mp->m_sb.sb_agblocks) {
2471 xfs_alert(mp,
2472 "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)",
2473 __func__, (unsigned long long)agbno,
2474 (unsigned long)mp->m_sb.sb_agblocks);
2475 }
2476 if (ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
2477 xfs_alert(mp,
2478 "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)",
2479 __func__, ino,
2480 XFS_AGINO_TO_INO(mp, pag->pag_agno, agino));
2481 }
2482 xfs_stack_trace();
2483 #endif /* DEBUG */
2484 return error;
2485 }
2486
2487 /*
2488 * For bulkstat and handle lookups, we have an untrusted inode number
2489 * that we have to verify is valid. We cannot do this just by reading
2490 * the inode buffer as it may have been unlinked and removed leaving
2491 * inodes in stale state on disk. Hence we have to do a btree lookup
2492 * in all cases where an untrusted inode number is passed.
2493 */
2494 if (flags & XFS_IGET_UNTRUSTED) {
2495 error = xfs_imap_lookup(pag, tp, agino, agbno,
2496 &chunk_agbno, &offset_agbno, flags);
2497 if (error)
2498 return error;
2499 goto out_map;
2500 }
2501
2502 /*
2503 * If the inode cluster size is the same as the blocksize or
2504 * smaller we get to the buffer by simple arithmetics.
2505 */
2506 if (M_IGEO(mp)->blocks_per_cluster == 1) {
2507 offset = XFS_INO_TO_OFFSET(mp, ino);
2508 ASSERT(offset < mp->m_sb.sb_inopblock);
2509
2510 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, agbno);
2511 imap->im_len = XFS_FSB_TO_BB(mp, 1);
2512 imap->im_boffset = (unsigned short)(offset <<
2513 mp->m_sb.sb_inodelog);
2514 return 0;
2515 }
2516
2517 /*
2518 * If the inode chunks are aligned then use simple maths to
2519 * find the location. Otherwise we have to do a btree
2520 * lookup to find the location.
2521 */
2522 if (M_IGEO(mp)->inoalign_mask) {
2523 offset_agbno = agbno & M_IGEO(mp)->inoalign_mask;
2524 chunk_agbno = agbno - offset_agbno;
2525 } else {
2526 error = xfs_imap_lookup(pag, tp, agino, agbno,
2527 &chunk_agbno, &offset_agbno, flags);
2528 if (error)
2529 return error;
2530 }
2531
2532 out_map:
2533 ASSERT(agbno >= chunk_agbno);
2534 cluster_agbno = chunk_agbno +
2535 ((offset_agbno / M_IGEO(mp)->blocks_per_cluster) *
2536 M_IGEO(mp)->blocks_per_cluster);
2537 offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
2538 XFS_INO_TO_OFFSET(mp, ino);
2539
2540 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, cluster_agbno);
2541 imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
2542 imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
2543
2544 /*
2545 * If the inode number maps to a block outside the bounds
2546 * of the file system then return NULL rather than calling
2547 * read_buf and panicing when we get an error from the
2548 * driver.
2549 */
2550 if ((imap->im_blkno + imap->im_len) >
2551 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
2552 xfs_alert(mp,
2553 "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)",
2554 __func__, (unsigned long long) imap->im_blkno,
2555 (unsigned long long) imap->im_len,
2556 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
2557 return -EINVAL;
2558 }
2559 return 0;
2560 }
2561
2562 /*
2563 * Log specified fields for the ag hdr (inode section). The growth of the agi
2564 * structure over time requires that we interpret the buffer as two logical
2565 * regions delineated by the end of the unlinked list. This is due to the size
2566 * of the hash table and its location in the middle of the agi.
2567 *
2568 * For example, a request to log a field before agi_unlinked and a field after
2569 * agi_unlinked could cause us to log the entire hash table and use an excessive
2570 * amount of log space. To avoid this behavior, log the region up through
2571 * agi_unlinked in one call and the region after agi_unlinked through the end of
2572 * the structure in another.
2573 */
2574 void
xfs_ialloc_log_agi(struct xfs_trans * tp,struct xfs_buf * bp,uint32_t fields)2575 xfs_ialloc_log_agi(
2576 struct xfs_trans *tp,
2577 struct xfs_buf *bp,
2578 uint32_t fields)
2579 {
2580 int first; /* first byte number */
2581 int last; /* last byte number */
2582 static const short offsets[] = { /* field starting offsets */
2583 /* keep in sync with bit definitions */
2584 offsetof(xfs_agi_t, agi_magicnum),
2585 offsetof(xfs_agi_t, agi_versionnum),
2586 offsetof(xfs_agi_t, agi_seqno),
2587 offsetof(xfs_agi_t, agi_length),
2588 offsetof(xfs_agi_t, agi_count),
2589 offsetof(xfs_agi_t, agi_root),
2590 offsetof(xfs_agi_t, agi_level),
2591 offsetof(xfs_agi_t, agi_freecount),
2592 offsetof(xfs_agi_t, agi_newino),
2593 offsetof(xfs_agi_t, agi_dirino),
2594 offsetof(xfs_agi_t, agi_unlinked),
2595 offsetof(xfs_agi_t, agi_free_root),
2596 offsetof(xfs_agi_t, agi_free_level),
2597 offsetof(xfs_agi_t, agi_iblocks),
2598 sizeof(xfs_agi_t)
2599 };
2600 #ifdef DEBUG
2601 struct xfs_agi *agi = bp->b_addr;
2602
2603 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
2604 #endif
2605
2606 /*
2607 * Compute byte offsets for the first and last fields in the first
2608 * region and log the agi buffer. This only logs up through
2609 * agi_unlinked.
2610 */
2611 if (fields & XFS_AGI_ALL_BITS_R1) {
2612 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1,
2613 &first, &last);
2614 xfs_trans_log_buf(tp, bp, first, last);
2615 }
2616
2617 /*
2618 * Mask off the bits in the first region and calculate the first and
2619 * last field offsets for any bits in the second region.
2620 */
2621 fields &= ~XFS_AGI_ALL_BITS_R1;
2622 if (fields) {
2623 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2,
2624 &first, &last);
2625 xfs_trans_log_buf(tp, bp, first, last);
2626 }
2627 }
2628
2629 static xfs_failaddr_t
xfs_agi_verify(struct xfs_buf * bp)2630 xfs_agi_verify(
2631 struct xfs_buf *bp)
2632 {
2633 struct xfs_mount *mp = bp->b_mount;
2634 struct xfs_agi *agi = bp->b_addr;
2635 xfs_failaddr_t fa;
2636 uint32_t agi_seqno = be32_to_cpu(agi->agi_seqno);
2637 uint32_t agi_length = be32_to_cpu(agi->agi_length);
2638 int i;
2639
2640 if (xfs_has_crc(mp)) {
2641 if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
2642 return __this_address;
2643 if (!xfs_log_check_lsn(mp, be64_to_cpu(agi->agi_lsn)))
2644 return __this_address;
2645 }
2646
2647 /*
2648 * Validate the magic number of the agi block.
2649 */
2650 if (!xfs_verify_magic(bp, agi->agi_magicnum))
2651 return __this_address;
2652 if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
2653 return __this_address;
2654
2655 fa = xfs_validate_ag_length(bp, agi_seqno, agi_length);
2656 if (fa)
2657 return fa;
2658
2659 if (be32_to_cpu(agi->agi_level) < 1 ||
2660 be32_to_cpu(agi->agi_level) > M_IGEO(mp)->inobt_maxlevels)
2661 return __this_address;
2662
2663 if (xfs_has_finobt(mp) &&
2664 (be32_to_cpu(agi->agi_free_level) < 1 ||
2665 be32_to_cpu(agi->agi_free_level) > M_IGEO(mp)->inobt_maxlevels))
2666 return __this_address;
2667
2668 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
2669 if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO))
2670 continue;
2671 if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i])))
2672 return __this_address;
2673 }
2674
2675 return NULL;
2676 }
2677
2678 static void
xfs_agi_read_verify(struct xfs_buf * bp)2679 xfs_agi_read_verify(
2680 struct xfs_buf *bp)
2681 {
2682 struct xfs_mount *mp = bp->b_mount;
2683 xfs_failaddr_t fa;
2684
2685 if (xfs_has_crc(mp) &&
2686 !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF))
2687 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2688 else {
2689 fa = xfs_agi_verify(bp);
2690 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI))
2691 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2692 }
2693 }
2694
2695 static void
xfs_agi_write_verify(struct xfs_buf * bp)2696 xfs_agi_write_verify(
2697 struct xfs_buf *bp)
2698 {
2699 struct xfs_mount *mp = bp->b_mount;
2700 struct xfs_buf_log_item *bip = bp->b_log_item;
2701 struct xfs_agi *agi = bp->b_addr;
2702 xfs_failaddr_t fa;
2703
2704 fa = xfs_agi_verify(bp);
2705 if (fa) {
2706 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2707 return;
2708 }
2709
2710 if (!xfs_has_crc(mp))
2711 return;
2712
2713 if (bip)
2714 agi->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2715 xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF);
2716 }
2717
2718 const struct xfs_buf_ops xfs_agi_buf_ops = {
2719 .name = "xfs_agi",
2720 .magic = { cpu_to_be32(XFS_AGI_MAGIC), cpu_to_be32(XFS_AGI_MAGIC) },
2721 .verify_read = xfs_agi_read_verify,
2722 .verify_write = xfs_agi_write_verify,
2723 .verify_struct = xfs_agi_verify,
2724 };
2725
2726 /*
2727 * Read in the allocation group header (inode allocation section)
2728 */
2729 int
xfs_read_agi(struct xfs_perag * pag,struct xfs_trans * tp,xfs_buf_flags_t flags,struct xfs_buf ** agibpp)2730 xfs_read_agi(
2731 struct xfs_perag *pag,
2732 struct xfs_trans *tp,
2733 xfs_buf_flags_t flags,
2734 struct xfs_buf **agibpp)
2735 {
2736 struct xfs_mount *mp = pag->pag_mount;
2737 int error;
2738
2739 trace_xfs_read_agi(pag->pag_mount, pag->pag_agno);
2740
2741 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
2742 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGI_DADDR(mp)),
2743 XFS_FSS_TO_BB(mp, 1), flags, agibpp, &xfs_agi_buf_ops);
2744 if (xfs_metadata_is_sick(error))
2745 xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
2746 if (error)
2747 return error;
2748 if (tp)
2749 xfs_trans_buf_set_type(tp, *agibpp, XFS_BLFT_AGI_BUF);
2750
2751 xfs_buf_set_ref(*agibpp, XFS_AGI_REF);
2752 return 0;
2753 }
2754
2755 /*
2756 * Read in the agi and initialise the per-ag data. If the caller supplies a
2757 * @agibpp, return the locked AGI buffer to them, otherwise release it.
2758 */
2759 int
xfs_ialloc_read_agi(struct xfs_perag * pag,struct xfs_trans * tp,int flags,struct xfs_buf ** agibpp)2760 xfs_ialloc_read_agi(
2761 struct xfs_perag *pag,
2762 struct xfs_trans *tp,
2763 int flags,
2764 struct xfs_buf **agibpp)
2765 {
2766 struct xfs_buf *agibp;
2767 struct xfs_agi *agi;
2768 int error;
2769
2770 trace_xfs_ialloc_read_agi(pag->pag_mount, pag->pag_agno);
2771
2772 error = xfs_read_agi(pag, tp,
2773 (flags & XFS_IALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2774 &agibp);
2775 if (error)
2776 return error;
2777
2778 agi = agibp->b_addr;
2779 if (!xfs_perag_initialised_agi(pag)) {
2780 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
2781 pag->pagi_count = be32_to_cpu(agi->agi_count);
2782 set_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate);
2783 }
2784
2785 /*
2786 * It's possible for these to be out of sync if
2787 * we are in the middle of a forced shutdown.
2788 */
2789 ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
2790 xfs_is_shutdown(pag->pag_mount));
2791 if (agibpp)
2792 *agibpp = agibp;
2793 else
2794 xfs_trans_brelse(tp, agibp);
2795 return 0;
2796 }
2797
2798 /* How many inodes are backed by inode clusters ondisk? */
2799 STATIC int
xfs_ialloc_count_ondisk(struct xfs_btree_cur * cur,xfs_agino_t low,xfs_agino_t high,unsigned int * allocated)2800 xfs_ialloc_count_ondisk(
2801 struct xfs_btree_cur *cur,
2802 xfs_agino_t low,
2803 xfs_agino_t high,
2804 unsigned int *allocated)
2805 {
2806 struct xfs_inobt_rec_incore irec;
2807 unsigned int ret = 0;
2808 int has_record;
2809 int error;
2810
2811 error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record);
2812 if (error)
2813 return error;
2814
2815 while (has_record) {
2816 unsigned int i, hole_idx;
2817
2818 error = xfs_inobt_get_rec(cur, &irec, &has_record);
2819 if (error)
2820 return error;
2821 if (irec.ir_startino > high)
2822 break;
2823
2824 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
2825 if (irec.ir_startino + i < low)
2826 continue;
2827 if (irec.ir_startino + i > high)
2828 break;
2829
2830 hole_idx = i / XFS_INODES_PER_HOLEMASK_BIT;
2831 if (!(irec.ir_holemask & (1U << hole_idx)))
2832 ret++;
2833 }
2834
2835 error = xfs_btree_increment(cur, 0, &has_record);
2836 if (error)
2837 return error;
2838 }
2839
2840 *allocated = ret;
2841 return 0;
2842 }
2843
2844 /* Is there an inode record covering a given extent? */
2845 int
xfs_ialloc_has_inodes_at_extent(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,enum xbtree_recpacking * outcome)2846 xfs_ialloc_has_inodes_at_extent(
2847 struct xfs_btree_cur *cur,
2848 xfs_agblock_t bno,
2849 xfs_extlen_t len,
2850 enum xbtree_recpacking *outcome)
2851 {
2852 xfs_agino_t agino;
2853 xfs_agino_t last_agino;
2854 unsigned int allocated;
2855 int error;
2856
2857 agino = XFS_AGB_TO_AGINO(cur->bc_mp, bno);
2858 last_agino = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1;
2859
2860 error = xfs_ialloc_count_ondisk(cur, agino, last_agino, &allocated);
2861 if (error)
2862 return error;
2863
2864 if (allocated == 0)
2865 *outcome = XBTREE_RECPACKING_EMPTY;
2866 else if (allocated == last_agino - agino + 1)
2867 *outcome = XBTREE_RECPACKING_FULL;
2868 else
2869 *outcome = XBTREE_RECPACKING_SPARSE;
2870 return 0;
2871 }
2872
2873 struct xfs_ialloc_count_inodes {
2874 xfs_agino_t count;
2875 xfs_agino_t freecount;
2876 };
2877
2878 /* Record inode counts across all inobt records. */
2879 STATIC int
xfs_ialloc_count_inodes_rec(struct xfs_btree_cur * cur,const union xfs_btree_rec * rec,void * priv)2880 xfs_ialloc_count_inodes_rec(
2881 struct xfs_btree_cur *cur,
2882 const union xfs_btree_rec *rec,
2883 void *priv)
2884 {
2885 struct xfs_inobt_rec_incore irec;
2886 struct xfs_ialloc_count_inodes *ci = priv;
2887 xfs_failaddr_t fa;
2888
2889 xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec);
2890 fa = xfs_inobt_check_irec(cur->bc_ag.pag, &irec);
2891 if (fa)
2892 return xfs_inobt_complain_bad_rec(cur, fa, &irec);
2893
2894 ci->count += irec.ir_count;
2895 ci->freecount += irec.ir_freecount;
2896
2897 return 0;
2898 }
2899
2900 /* Count allocated and free inodes under an inobt. */
2901 int
xfs_ialloc_count_inodes(struct xfs_btree_cur * cur,xfs_agino_t * count,xfs_agino_t * freecount)2902 xfs_ialloc_count_inodes(
2903 struct xfs_btree_cur *cur,
2904 xfs_agino_t *count,
2905 xfs_agino_t *freecount)
2906 {
2907 struct xfs_ialloc_count_inodes ci = {0};
2908 int error;
2909
2910 ASSERT(xfs_btree_is_ino(cur->bc_ops));
2911 error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci);
2912 if (error)
2913 return error;
2914
2915 *count = ci.count;
2916 *freecount = ci.freecount;
2917 return 0;
2918 }
2919
2920 /*
2921 * Initialize inode-related geometry information.
2922 *
2923 * Compute the inode btree min and max levels and set maxicount.
2924 *
2925 * Set the inode cluster size. This may still be overridden by the file
2926 * system block size if it is larger than the chosen cluster size.
2927 *
2928 * For v5 filesystems, scale the cluster size with the inode size to keep a
2929 * constant ratio of inode per cluster buffer, but only if mkfs has set the
2930 * inode alignment value appropriately for larger cluster sizes.
2931 *
2932 * Then compute the inode cluster alignment information.
2933 */
2934 void
xfs_ialloc_setup_geometry(struct xfs_mount * mp)2935 xfs_ialloc_setup_geometry(
2936 struct xfs_mount *mp)
2937 {
2938 struct xfs_sb *sbp = &mp->m_sb;
2939 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2940 uint64_t icount;
2941 uint inodes;
2942
2943 igeo->new_diflags2 = 0;
2944 if (xfs_has_bigtime(mp))
2945 igeo->new_diflags2 |= XFS_DIFLAG2_BIGTIME;
2946 if (xfs_has_large_extent_counts(mp))
2947 igeo->new_diflags2 |= XFS_DIFLAG2_NREXT64;
2948
2949 /* Compute inode btree geometry. */
2950 igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
2951 igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, true);
2952 igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, false);
2953 igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2;
2954 igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2;
2955
2956 igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK,
2957 sbp->sb_inopblock);
2958 igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog;
2959
2960 if (sbp->sb_spino_align)
2961 igeo->ialloc_min_blks = sbp->sb_spino_align;
2962 else
2963 igeo->ialloc_min_blks = igeo->ialloc_blks;
2964
2965 /* Compute and fill in value of m_ino_geo.inobt_maxlevels. */
2966 inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG;
2967 igeo->inobt_maxlevels = xfs_btree_compute_maxlevels(igeo->inobt_mnr,
2968 inodes);
2969 ASSERT(igeo->inobt_maxlevels <= xfs_iallocbt_maxlevels_ondisk());
2970
2971 /*
2972 * Set the maximum inode count for this filesystem, being careful not
2973 * to use obviously garbage sb_inopblog/sb_inopblock values. Regular
2974 * users should never get here due to failing sb verification, but
2975 * certain users (xfs_db) need to be usable even with corrupt metadata.
2976 */
2977 if (sbp->sb_imax_pct && igeo->ialloc_blks) {
2978 /*
2979 * Make sure the maximum inode count is a multiple
2980 * of the units we allocate inodes in.
2981 */
2982 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
2983 do_div(icount, 100);
2984 do_div(icount, igeo->ialloc_blks);
2985 igeo->maxicount = XFS_FSB_TO_INO(mp,
2986 icount * igeo->ialloc_blks);
2987 } else {
2988 igeo->maxicount = 0;
2989 }
2990
2991 /*
2992 * Compute the desired size of an inode cluster buffer size, which
2993 * starts at 8K and (on v5 filesystems) scales up with larger inode
2994 * sizes.
2995 *
2996 * Preserve the desired inode cluster size because the sparse inodes
2997 * feature uses that desired size (not the actual size) to compute the
2998 * sparse inode alignment. The mount code validates this value, so we
2999 * cannot change the behavior.
3000 */
3001 igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE;
3002 if (xfs_has_v3inodes(mp)) {
3003 int new_size = igeo->inode_cluster_size_raw;
3004
3005 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
3006 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
3007 igeo->inode_cluster_size_raw = new_size;
3008 }
3009
3010 /* Calculate inode cluster ratios. */
3011 if (igeo->inode_cluster_size_raw > mp->m_sb.sb_blocksize)
3012 igeo->blocks_per_cluster = XFS_B_TO_FSBT(mp,
3013 igeo->inode_cluster_size_raw);
3014 else
3015 igeo->blocks_per_cluster = 1;
3016 igeo->inode_cluster_size = XFS_FSB_TO_B(mp, igeo->blocks_per_cluster);
3017 igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster);
3018
3019 /* Calculate inode cluster alignment. */
3020 if (xfs_has_align(mp) &&
3021 mp->m_sb.sb_inoalignmt >= igeo->blocks_per_cluster)
3022 igeo->cluster_align = mp->m_sb.sb_inoalignmt;
3023 else
3024 igeo->cluster_align = 1;
3025 igeo->inoalign_mask = igeo->cluster_align - 1;
3026 igeo->cluster_align_inodes = XFS_FSB_TO_INO(mp, igeo->cluster_align);
3027
3028 /*
3029 * If we are using stripe alignment, check whether
3030 * the stripe unit is a multiple of the inode alignment
3031 */
3032 if (mp->m_dalign && igeo->inoalign_mask &&
3033 !(mp->m_dalign & igeo->inoalign_mask))
3034 igeo->ialloc_align = mp->m_dalign;
3035 else
3036 igeo->ialloc_align = 0;
3037
3038 if (mp->m_sb.sb_blocksize > PAGE_SIZE)
3039 igeo->min_folio_order = mp->m_sb.sb_blocklog - PAGE_SHIFT;
3040 else
3041 igeo->min_folio_order = 0;
3042 }
3043
3044 /* Compute the location of the root directory inode that is laid out by mkfs. */
3045 xfs_ino_t
xfs_ialloc_calc_rootino(struct xfs_mount * mp,int sunit)3046 xfs_ialloc_calc_rootino(
3047 struct xfs_mount *mp,
3048 int sunit)
3049 {
3050 struct xfs_ino_geometry *igeo = M_IGEO(mp);
3051 xfs_agblock_t first_bno;
3052
3053 /*
3054 * Pre-calculate the geometry of AG 0. We know what it looks like
3055 * because libxfs knows how to create allocation groups now.
3056 *
3057 * first_bno is the first block in which mkfs could possibly have
3058 * allocated the root directory inode, once we factor in the metadata
3059 * that mkfs formats before it. Namely, the four AG headers...
3060 */
3061 first_bno = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize);
3062
3063 /* ...the two free space btree roots... */
3064 first_bno += 2;
3065
3066 /* ...the inode btree root... */
3067 first_bno += 1;
3068
3069 /* ...the initial AGFL... */
3070 first_bno += xfs_alloc_min_freelist(mp, NULL);
3071
3072 /* ...the free inode btree root... */
3073 if (xfs_has_finobt(mp))
3074 first_bno++;
3075
3076 /* ...the reverse mapping btree root... */
3077 if (xfs_has_rmapbt(mp))
3078 first_bno++;
3079
3080 /* ...the reference count btree... */
3081 if (xfs_has_reflink(mp))
3082 first_bno++;
3083
3084 /*
3085 * ...and the log, if it is allocated in the first allocation group.
3086 *
3087 * This can happen with filesystems that only have a single
3088 * allocation group, or very odd geometries created by old mkfs
3089 * versions on very small filesystems.
3090 */
3091 if (xfs_ag_contains_log(mp, 0))
3092 first_bno += mp->m_sb.sb_logblocks;
3093
3094 /*
3095 * Now round first_bno up to whatever allocation alignment is given
3096 * by the filesystem or was passed in.
3097 */
3098 if (xfs_has_dalign(mp) && igeo->ialloc_align > 0)
3099 first_bno = roundup(first_bno, sunit);
3100 else if (xfs_has_align(mp) &&
3101 mp->m_sb.sb_inoalignmt > 1)
3102 first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt);
3103
3104 return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno));
3105 }
3106
3107 /*
3108 * Ensure there are not sparse inode clusters that cross the new EOAG.
3109 *
3110 * This is a no-op for non-spinode filesystems since clusters are always fully
3111 * allocated and checking the bnobt suffices. However, a spinode filesystem
3112 * could have a record where the upper inodes are free blocks. If those blocks
3113 * were removed from the filesystem, the inode record would extend beyond EOAG,
3114 * which will be flagged as corruption.
3115 */
3116 int
xfs_ialloc_check_shrink(struct xfs_perag * pag,struct xfs_trans * tp,struct xfs_buf * agibp,xfs_agblock_t new_length)3117 xfs_ialloc_check_shrink(
3118 struct xfs_perag *pag,
3119 struct xfs_trans *tp,
3120 struct xfs_buf *agibp,
3121 xfs_agblock_t new_length)
3122 {
3123 struct xfs_inobt_rec_incore rec;
3124 struct xfs_btree_cur *cur;
3125 xfs_agino_t agino;
3126 int has;
3127 int error;
3128
3129 if (!xfs_has_sparseinodes(pag->pag_mount))
3130 return 0;
3131
3132 cur = xfs_inobt_init_cursor(pag, tp, agibp);
3133
3134 /* Look up the inobt record that would correspond to the new EOFS. */
3135 agino = XFS_AGB_TO_AGINO(pag->pag_mount, new_length);
3136 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has);
3137 if (error || !has)
3138 goto out;
3139
3140 error = xfs_inobt_get_rec(cur, &rec, &has);
3141 if (error)
3142 goto out;
3143
3144 if (!has) {
3145 xfs_ag_mark_sick(pag, XFS_SICK_AG_INOBT);
3146 error = -EFSCORRUPTED;
3147 goto out;
3148 }
3149
3150 /* If the record covers inodes that would be beyond EOFS, bail out. */
3151 if (rec.ir_startino + XFS_INODES_PER_CHUNK > agino) {
3152 error = -ENOSPC;
3153 goto out;
3154 }
3155 out:
3156 xfs_btree_del_cursor(cur, error);
3157 return error;
3158 }
3159