1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_dir2.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
21 #include "xfs_bmap.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_rtbitmap.h"
25 #include "xfs_errortag.h"
26 #include "xfs_error.h"
27 #include "xfs_quota.h"
28 #include "xfs_trans_space.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_trace.h"
31 #include "xfs_attr_leaf.h"
32 #include "xfs_filestream.h"
33 #include "xfs_rmap.h"
34 #include "xfs_ag.h"
35 #include "xfs_ag_resv.h"
36 #include "xfs_refcount.h"
37 #include "xfs_iomap.h"
38 #include "xfs_health.h"
39 #include "xfs_bmap_item.h"
40 #include "xfs_symlink_remote.h"
41 #include "xfs_inode_util.h"
42 #include "xfs_rtgroup.h"
43 #include "xfs_zone_alloc.h"
44
45 struct kmem_cache *xfs_bmap_intent_cache;
46
47 /*
48 * Miscellaneous helper functions
49 */
50
51 /*
52 * Compute and fill in the value of the maximum depth of a bmap btree
53 * in this filesystem. Done once, during mount.
54 */
55 void
xfs_bmap_compute_maxlevels(xfs_mount_t * mp,int whichfork)56 xfs_bmap_compute_maxlevels(
57 xfs_mount_t *mp, /* file system mount structure */
58 int whichfork) /* data or attr fork */
59 {
60 uint64_t maxblocks; /* max blocks at this level */
61 xfs_extnum_t maxleafents; /* max leaf entries possible */
62 int level; /* btree level */
63 int maxrootrecs; /* max records in root block */
64 int minleafrecs; /* min records in leaf block */
65 int minnoderecs; /* min records in node block */
66 int sz; /* root block size */
67
68 /*
69 * The maximum number of extents in a fork, hence the maximum number of
70 * leaf entries, is controlled by the size of the on-disk extent count.
71 *
72 * Note that we can no longer assume that if we are in ATTR1 that the
73 * fork offset of all the inodes will be
74 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with
75 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed
76 * but probably at various positions. Therefore, for both ATTR1 and
77 * ATTR2 we have to assume the worst case scenario of a minimum size
78 * available.
79 */
80 maxleafents = xfs_iext_max_nextents(xfs_has_large_extent_counts(mp),
81 whichfork);
82 if (whichfork == XFS_DATA_FORK)
83 sz = xfs_bmdr_space_calc(MINDBTPTRS);
84 else
85 sz = xfs_bmdr_space_calc(MINABTPTRS);
86
87 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
88 minleafrecs = mp->m_bmap_dmnr[0];
89 minnoderecs = mp->m_bmap_dmnr[1];
90 maxblocks = howmany_64(maxleafents, minleafrecs);
91 for (level = 1; maxblocks > 1; level++) {
92 if (maxblocks <= maxrootrecs)
93 maxblocks = 1;
94 else
95 maxblocks = howmany_64(maxblocks, minnoderecs);
96 }
97 mp->m_bm_maxlevels[whichfork] = level;
98 ASSERT(mp->m_bm_maxlevels[whichfork] <= xfs_bmbt_maxlevels_ondisk());
99 }
100
101 unsigned int
xfs_bmap_compute_attr_offset(struct xfs_mount * mp)102 xfs_bmap_compute_attr_offset(
103 struct xfs_mount *mp)
104 {
105 if (mp->m_sb.sb_inodesize == 256)
106 return XFS_LITINO(mp) - xfs_bmdr_space_calc(MINABTPTRS);
107 return xfs_bmdr_space_calc(6 * MINABTPTRS);
108 }
109
110 STATIC int /* error */
xfs_bmbt_lookup_eq(struct xfs_btree_cur * cur,struct xfs_bmbt_irec * irec,int * stat)111 xfs_bmbt_lookup_eq(
112 struct xfs_btree_cur *cur,
113 struct xfs_bmbt_irec *irec,
114 int *stat) /* success/failure */
115 {
116 cur->bc_rec.b = *irec;
117 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
118 }
119
120 STATIC int /* error */
xfs_bmbt_lookup_first(struct xfs_btree_cur * cur,int * stat)121 xfs_bmbt_lookup_first(
122 struct xfs_btree_cur *cur,
123 int *stat) /* success/failure */
124 {
125 cur->bc_rec.b.br_startoff = 0;
126 cur->bc_rec.b.br_startblock = 0;
127 cur->bc_rec.b.br_blockcount = 0;
128 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
129 }
130
131 /*
132 * Check if the inode needs to be converted to btree format.
133 */
xfs_bmap_needs_btree(struct xfs_inode * ip,int whichfork)134 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
135 {
136 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
137
138 return whichfork != XFS_COW_FORK &&
139 ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
140 ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork);
141 }
142
143 /*
144 * Check if the inode should be converted to extent format.
145 */
xfs_bmap_wants_extents(struct xfs_inode * ip,int whichfork)146 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
147 {
148 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
149
150 return whichfork != XFS_COW_FORK &&
151 ifp->if_format == XFS_DINODE_FMT_BTREE &&
152 ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork);
153 }
154
155 /*
156 * Update the record referred to by cur to the value given by irec
157 * This either works (return 0) or gets an EFSCORRUPTED error.
158 */
159 STATIC int
xfs_bmbt_update(struct xfs_btree_cur * cur,struct xfs_bmbt_irec * irec)160 xfs_bmbt_update(
161 struct xfs_btree_cur *cur,
162 struct xfs_bmbt_irec *irec)
163 {
164 union xfs_btree_rec rec;
165
166 xfs_bmbt_disk_set_all(&rec.bmbt, irec);
167 return xfs_btree_update(cur, &rec);
168 }
169
170 /*
171 * Compute the worst-case number of indirect blocks that will be used
172 * for ip's delayed extent of length "len".
173 */
174 xfs_filblks_t
xfs_bmap_worst_indlen(struct xfs_inode * ip,xfs_filblks_t len)175 xfs_bmap_worst_indlen(
176 struct xfs_inode *ip, /* incore inode pointer */
177 xfs_filblks_t len) /* delayed extent length */
178 {
179 struct xfs_mount *mp = ip->i_mount;
180 int maxrecs = mp->m_bmap_dmxr[0];
181 int level;
182 xfs_filblks_t rval;
183
184 for (level = 0, rval = 0;
185 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
186 level++) {
187 len += maxrecs - 1;
188 do_div(len, maxrecs);
189 rval += len;
190 if (len == 1)
191 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
192 level - 1;
193 if (level == 0)
194 maxrecs = mp->m_bmap_dmxr[1];
195 }
196 return rval;
197 }
198
199 /*
200 * Calculate the default attribute fork offset for newly created inodes.
201 */
202 uint
xfs_default_attroffset(struct xfs_inode * ip)203 xfs_default_attroffset(
204 struct xfs_inode *ip)
205 {
206 if (ip->i_df.if_format == XFS_DINODE_FMT_DEV)
207 return roundup(sizeof(xfs_dev_t), 8);
208 return M_IGEO(ip->i_mount)->attr_fork_offset;
209 }
210
211 /*
212 * Helper routine to reset inode i_forkoff field when switching attribute fork
213 * from local to extent format - we reset it where possible to make space
214 * available for inline data fork extents.
215 */
216 STATIC void
xfs_bmap_forkoff_reset(xfs_inode_t * ip,int whichfork)217 xfs_bmap_forkoff_reset(
218 xfs_inode_t *ip,
219 int whichfork)
220 {
221 if (whichfork == XFS_ATTR_FORK &&
222 ip->i_df.if_format != XFS_DINODE_FMT_DEV &&
223 ip->i_df.if_format != XFS_DINODE_FMT_BTREE) {
224 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
225
226 if (dfl_forkoff > ip->i_forkoff)
227 ip->i_forkoff = dfl_forkoff;
228 }
229 }
230
231 static int
xfs_bmap_read_buf(struct xfs_mount * mp,struct xfs_trans * tp,xfs_fsblock_t fsbno,struct xfs_buf ** bpp)232 xfs_bmap_read_buf(
233 struct xfs_mount *mp, /* file system mount point */
234 struct xfs_trans *tp, /* transaction pointer */
235 xfs_fsblock_t fsbno, /* file system block number */
236 struct xfs_buf **bpp) /* buffer for fsbno */
237 {
238 struct xfs_buf *bp; /* return value */
239 int error;
240
241 if (!xfs_verify_fsbno(mp, fsbno))
242 return -EFSCORRUPTED;
243 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
244 XFS_FSB_TO_DADDR(mp, fsbno), mp->m_bsize, 0, &bp,
245 &xfs_bmbt_buf_ops);
246 if (!error) {
247 xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
248 *bpp = bp;
249 }
250 return error;
251 }
252
253 #ifdef DEBUG
254 STATIC struct xfs_buf *
xfs_bmap_get_bp(struct xfs_btree_cur * cur,xfs_fsblock_t bno)255 xfs_bmap_get_bp(
256 struct xfs_btree_cur *cur,
257 xfs_fsblock_t bno)
258 {
259 struct xfs_log_item *lip;
260 int i;
261
262 if (!cur)
263 return NULL;
264
265 for (i = 0; i < cur->bc_maxlevels; i++) {
266 if (!cur->bc_levels[i].bp)
267 break;
268 if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno)
269 return cur->bc_levels[i].bp;
270 }
271
272 /* Chase down all the log items to see if the bp is there */
273 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
274 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip;
275
276 if (bip->bli_item.li_type == XFS_LI_BUF &&
277 xfs_buf_daddr(bip->bli_buf) == bno)
278 return bip->bli_buf;
279 }
280
281 return NULL;
282 }
283
284 STATIC void
xfs_check_block(struct xfs_btree_block * block,xfs_mount_t * mp,int root,short sz)285 xfs_check_block(
286 struct xfs_btree_block *block,
287 xfs_mount_t *mp,
288 int root,
289 short sz)
290 {
291 int i, j, dmxr;
292 __be64 *pp, *thispa; /* pointer to block address */
293 xfs_bmbt_key_t *prevp, *keyp;
294
295 ASSERT(be16_to_cpu(block->bb_level) > 0);
296
297 prevp = NULL;
298 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
299 dmxr = mp->m_bmap_dmxr[0];
300 keyp = xfs_bmbt_key_addr(mp, block, i);
301
302 if (prevp) {
303 ASSERT(be64_to_cpu(prevp->br_startoff) <
304 be64_to_cpu(keyp->br_startoff));
305 }
306 prevp = keyp;
307
308 /*
309 * Compare the block numbers to see if there are dups.
310 */
311 if (root)
312 pp = xfs_bmap_broot_ptr_addr(mp, block, i, sz);
313 else
314 pp = xfs_bmbt_ptr_addr(mp, block, i, dmxr);
315
316 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
317 if (root)
318 thispa = xfs_bmap_broot_ptr_addr(mp, block, j, sz);
319 else
320 thispa = xfs_bmbt_ptr_addr(mp, block, j, dmxr);
321 if (*thispa == *pp) {
322 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %lld",
323 __func__, j, i,
324 (unsigned long long)be64_to_cpu(*thispa));
325 xfs_err(mp, "%s: ptrs are equal in node\n",
326 __func__);
327 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
328 }
329 }
330 }
331 }
332
333 /*
334 * Check that the extents for the inode ip are in the right order in all
335 * btree leaves. THis becomes prohibitively expensive for large extent count
336 * files, so don't bother with inodes that have more than 10,000 extents in
337 * them. The btree record ordering checks will still be done, so for such large
338 * bmapbt constructs that is going to catch most corruptions.
339 */
340 STATIC void
xfs_bmap_check_leaf_extents(struct xfs_btree_cur * cur,xfs_inode_t * ip,int whichfork)341 xfs_bmap_check_leaf_extents(
342 struct xfs_btree_cur *cur, /* btree cursor or null */
343 xfs_inode_t *ip, /* incore inode pointer */
344 int whichfork) /* data or attr fork */
345 {
346 struct xfs_mount *mp = ip->i_mount;
347 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
348 struct xfs_btree_block *block; /* current btree block */
349 xfs_fsblock_t bno; /* block # of "block" */
350 struct xfs_buf *bp; /* buffer for "block" */
351 int error; /* error return value */
352 xfs_extnum_t i=0, j; /* index into the extents list */
353 int level; /* btree level, for checking */
354 __be64 *pp; /* pointer to block address */
355 xfs_bmbt_rec_t *ep; /* pointer to current extent */
356 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
357 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
358 int bp_release = 0;
359
360 if (ifp->if_format != XFS_DINODE_FMT_BTREE)
361 return;
362
363 /* skip large extent count inodes */
364 if (ip->i_df.if_nextents > 10000)
365 return;
366
367 bno = NULLFSBLOCK;
368 block = ifp->if_broot;
369 /*
370 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
371 */
372 level = be16_to_cpu(block->bb_level);
373 ASSERT(level > 0);
374 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
375 pp = xfs_bmap_broot_ptr_addr(mp, block, 1, ifp->if_broot_bytes);
376 bno = be64_to_cpu(*pp);
377
378 ASSERT(bno != NULLFSBLOCK);
379 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
380 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
381
382 /*
383 * Go down the tree until leaf level is reached, following the first
384 * pointer (leftmost) at each level.
385 */
386 while (level-- > 0) {
387 /* See if buf is in cur first */
388 bp_release = 0;
389 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
390 if (!bp) {
391 bp_release = 1;
392 error = xfs_bmap_read_buf(mp, NULL, bno, &bp);
393 if (xfs_metadata_is_sick(error))
394 xfs_btree_mark_sick(cur);
395 if (error)
396 goto error_norelse;
397 }
398 block = XFS_BUF_TO_BLOCK(bp);
399 if (level == 0)
400 break;
401
402 /*
403 * Check this block for basic sanity (increasing keys and
404 * no duplicate blocks).
405 */
406
407 xfs_check_block(block, mp, 0, 0);
408 pp = xfs_bmbt_ptr_addr(mp, block, 1, mp->m_bmap_dmxr[1]);
409 bno = be64_to_cpu(*pp);
410 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
411 xfs_btree_mark_sick(cur);
412 error = -EFSCORRUPTED;
413 goto error0;
414 }
415 if (bp_release) {
416 bp_release = 0;
417 xfs_trans_brelse(NULL, bp);
418 }
419 }
420
421 /*
422 * Here with bp and block set to the leftmost leaf node in the tree.
423 */
424 i = 0;
425
426 /*
427 * Loop over all leaf nodes checking that all extents are in the right order.
428 */
429 for (;;) {
430 xfs_fsblock_t nextbno;
431 xfs_extnum_t num_recs;
432
433
434 num_recs = xfs_btree_get_numrecs(block);
435
436 /*
437 * Read-ahead the next leaf block, if any.
438 */
439
440 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
441
442 /*
443 * Check all the extents to make sure they are OK.
444 * If we had a previous block, the last entry should
445 * conform with the first entry in this one.
446 */
447
448 ep = xfs_bmbt_rec_addr(mp, block, 1);
449 if (i) {
450 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
451 xfs_bmbt_disk_get_blockcount(&last) <=
452 xfs_bmbt_disk_get_startoff(ep));
453 }
454 for (j = 1; j < num_recs; j++) {
455 nextp = xfs_bmbt_rec_addr(mp, block, j + 1);
456 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
457 xfs_bmbt_disk_get_blockcount(ep) <=
458 xfs_bmbt_disk_get_startoff(nextp));
459 ep = nextp;
460 }
461
462 last = *ep;
463 i += num_recs;
464 if (bp_release) {
465 bp_release = 0;
466 xfs_trans_brelse(NULL, bp);
467 }
468 bno = nextbno;
469 /*
470 * If we've reached the end, stop.
471 */
472 if (bno == NULLFSBLOCK)
473 break;
474
475 bp_release = 0;
476 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
477 if (!bp) {
478 bp_release = 1;
479 error = xfs_bmap_read_buf(mp, NULL, bno, &bp);
480 if (xfs_metadata_is_sick(error))
481 xfs_btree_mark_sick(cur);
482 if (error)
483 goto error_norelse;
484 }
485 block = XFS_BUF_TO_BLOCK(bp);
486 }
487
488 return;
489
490 error0:
491 xfs_warn(mp, "%s: at error0", __func__);
492 if (bp_release)
493 xfs_trans_brelse(NULL, bp);
494 error_norelse:
495 xfs_warn(mp, "%s: BAD after btree leaves for %llu extents",
496 __func__, i);
497 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
498 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
499 return;
500 }
501
502 /*
503 * Validate that the bmbt_irecs being returned from bmapi are valid
504 * given the caller's original parameters. Specifically check the
505 * ranges of the returned irecs to ensure that they only extend beyond
506 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
507 */
508 STATIC void
xfs_bmap_validate_ret(xfs_fileoff_t bno,xfs_filblks_t len,uint32_t flags,xfs_bmbt_irec_t * mval,int nmap,int ret_nmap)509 xfs_bmap_validate_ret(
510 xfs_fileoff_t bno,
511 xfs_filblks_t len,
512 uint32_t flags,
513 xfs_bmbt_irec_t *mval,
514 int nmap,
515 int ret_nmap)
516 {
517 int i; /* index to map values */
518
519 ASSERT(ret_nmap <= nmap);
520
521 for (i = 0; i < ret_nmap; i++) {
522 ASSERT(mval[i].br_blockcount > 0);
523 if (!(flags & XFS_BMAPI_ENTIRE)) {
524 ASSERT(mval[i].br_startoff >= bno);
525 ASSERT(mval[i].br_blockcount <= len);
526 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
527 bno + len);
528 } else {
529 ASSERT(mval[i].br_startoff < bno + len);
530 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
531 bno);
532 }
533 ASSERT(i == 0 ||
534 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
535 mval[i].br_startoff);
536 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
537 mval[i].br_startblock != HOLESTARTBLOCK);
538 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
539 mval[i].br_state == XFS_EXT_UNWRITTEN);
540 }
541 }
542
543 #else
544 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
545 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
546 #endif /* DEBUG */
547
548 /*
549 * Inode fork format manipulation functions
550 */
551
552 /*
553 * Convert the inode format to extent format if it currently is in btree format,
554 * but the extent list is small enough that it fits into the extent format.
555 *
556 * Since the extents are already in-core, all we have to do is give up the space
557 * for the btree root and pitch the leaf block.
558 */
559 STATIC int /* error */
xfs_bmap_btree_to_extents(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_btree_cur * cur,int * logflagsp,int whichfork)560 xfs_bmap_btree_to_extents(
561 struct xfs_trans *tp, /* transaction pointer */
562 struct xfs_inode *ip, /* incore inode pointer */
563 struct xfs_btree_cur *cur, /* btree cursor */
564 int *logflagsp, /* inode logging flags */
565 int whichfork) /* data or attr fork */
566 {
567 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
568 struct xfs_mount *mp = ip->i_mount;
569 struct xfs_btree_block *rblock = ifp->if_broot;
570 struct xfs_btree_block *cblock;/* child btree block */
571 xfs_fsblock_t cbno; /* child block number */
572 struct xfs_buf *cbp; /* child block's buffer */
573 int error; /* error return value */
574 __be64 *pp; /* ptr to block address */
575 struct xfs_owner_info oinfo;
576
577 /* check if we actually need the extent format first: */
578 if (!xfs_bmap_wants_extents(ip, whichfork))
579 return 0;
580
581 ASSERT(cur);
582 ASSERT(whichfork != XFS_COW_FORK);
583 ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
584 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
585 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
586 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, false) == 1);
587
588 pp = xfs_bmap_broot_ptr_addr(mp, rblock, 1, ifp->if_broot_bytes);
589 cbno = be64_to_cpu(*pp);
590 #ifdef DEBUG
591 if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_verify_fsbno(mp, cbno))) {
592 xfs_btree_mark_sick(cur);
593 return -EFSCORRUPTED;
594 }
595 #endif
596 error = xfs_bmap_read_buf(mp, tp, cbno, &cbp);
597 if (xfs_metadata_is_sick(error))
598 xfs_btree_mark_sick(cur);
599 if (error)
600 return error;
601 cblock = XFS_BUF_TO_BLOCK(cbp);
602 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
603 return error;
604
605 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
606 error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo,
607 XFS_AG_RESV_NONE, 0);
608 if (error)
609 return error;
610
611 ip->i_nblocks--;
612 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
613 xfs_trans_binval(tp, cbp);
614 if (cur->bc_levels[0].bp == cbp)
615 cur->bc_levels[0].bp = NULL;
616 xfs_bmap_broot_realloc(ip, whichfork, 0);
617 ASSERT(ifp->if_broot == NULL);
618 ifp->if_format = XFS_DINODE_FMT_EXTENTS;
619 *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
620 return 0;
621 }
622
623 /*
624 * Convert an extents-format file into a btree-format file.
625 * The new file will have a root block (in the inode) and a single child block.
626 */
627 STATIC int /* error */
xfs_bmap_extents_to_btree(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_btree_cur ** curp,int wasdel,int * logflagsp,int whichfork)628 xfs_bmap_extents_to_btree(
629 struct xfs_trans *tp, /* transaction pointer */
630 struct xfs_inode *ip, /* incore inode pointer */
631 struct xfs_btree_cur **curp, /* cursor returned to caller */
632 int wasdel, /* converting a delayed alloc */
633 int *logflagsp, /* inode logging flags */
634 int whichfork) /* data or attr fork */
635 {
636 struct xfs_btree_block *ablock; /* allocated (child) bt block */
637 struct xfs_buf *abp; /* buffer for ablock */
638 struct xfs_alloc_arg args; /* allocation arguments */
639 struct xfs_bmbt_rec *arp; /* child record pointer */
640 struct xfs_btree_block *block; /* btree root block */
641 struct xfs_btree_cur *cur; /* bmap btree cursor */
642 int error; /* error return value */
643 struct xfs_ifork *ifp; /* inode fork pointer */
644 struct xfs_bmbt_key *kp; /* root block key pointer */
645 struct xfs_mount *mp; /* mount structure */
646 xfs_bmbt_ptr_t *pp; /* root block address pointer */
647 struct xfs_iext_cursor icur;
648 struct xfs_bmbt_irec rec;
649 xfs_extnum_t cnt = 0;
650
651 mp = ip->i_mount;
652 ASSERT(whichfork != XFS_COW_FORK);
653 ifp = xfs_ifork_ptr(ip, whichfork);
654 ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS);
655
656 /*
657 * Make space in the inode incore. This needs to be undone if we fail
658 * to expand the root.
659 */
660 block = xfs_bmap_broot_realloc(ip, whichfork, 1);
661
662 /*
663 * Fill in the root.
664 */
665 xfs_bmbt_init_block(ip, block, NULL, 1, 1);
666 /*
667 * Need a cursor. Can't allocate until bb_level is filled in.
668 */
669 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
670 if (wasdel)
671 cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
672 /*
673 * Convert to a btree with two levels, one record in root.
674 */
675 ifp->if_format = XFS_DINODE_FMT_BTREE;
676 memset(&args, 0, sizeof(args));
677 args.tp = tp;
678 args.mp = mp;
679 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
680
681 args.minlen = args.maxlen = args.prod = 1;
682 args.wasdel = wasdel;
683 *logflagsp = 0;
684 error = xfs_alloc_vextent_start_ag(&args,
685 XFS_INO_TO_FSB(mp, ip->i_ino));
686 if (error)
687 goto out_root_realloc;
688
689 /*
690 * Allocation can't fail, the space was reserved.
691 */
692 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
693 error = -ENOSPC;
694 goto out_root_realloc;
695 }
696
697 cur->bc_bmap.allocated++;
698 ip->i_nblocks++;
699 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
700 error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
701 XFS_FSB_TO_DADDR(mp, args.fsbno),
702 mp->m_bsize, 0, &abp);
703 if (error)
704 goto out_unreserve_dquot;
705
706 /*
707 * Fill in the child block.
708 */
709 ablock = XFS_BUF_TO_BLOCK(abp);
710 xfs_bmbt_init_block(ip, ablock, abp, 0, 0);
711
712 for_each_xfs_iext(ifp, &icur, &rec) {
713 if (isnullstartblock(rec.br_startblock))
714 continue;
715 arp = xfs_bmbt_rec_addr(mp, ablock, 1 + cnt);
716 xfs_bmbt_disk_set_all(arp, &rec);
717 cnt++;
718 }
719 ASSERT(cnt == ifp->if_nextents);
720 xfs_btree_set_numrecs(ablock, cnt);
721
722 /*
723 * Fill in the root key and pointer.
724 */
725 kp = xfs_bmbt_key_addr(mp, block, 1);
726 arp = xfs_bmbt_rec_addr(mp, ablock, 1);
727 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
728 pp = xfs_bmbt_ptr_addr(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
729 be16_to_cpu(block->bb_level)));
730 *pp = cpu_to_be64(args.fsbno);
731
732 /*
733 * Do all this logging at the end so that
734 * the root is at the right level.
735 */
736 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
737 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
738 ASSERT(*curp == NULL);
739 *curp = cur;
740 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
741 return 0;
742
743 out_unreserve_dquot:
744 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
745 out_root_realloc:
746 xfs_bmap_broot_realloc(ip, whichfork, 0);
747 ifp->if_format = XFS_DINODE_FMT_EXTENTS;
748 ASSERT(ifp->if_broot == NULL);
749 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
750
751 return error;
752 }
753
754 /*
755 * Convert a local file to an extents file.
756 * This code is out of bounds for data forks of regular files,
757 * since the file data needs to get logged so things will stay consistent.
758 * (The bmap-level manipulations are ok, though).
759 */
760 void
xfs_bmap_local_to_extents_empty(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork)761 xfs_bmap_local_to_extents_empty(
762 struct xfs_trans *tp,
763 struct xfs_inode *ip,
764 int whichfork)
765 {
766 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
767
768 ASSERT(whichfork != XFS_COW_FORK);
769 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
770 ASSERT(ifp->if_bytes == 0);
771 ASSERT(ifp->if_nextents == 0);
772
773 xfs_bmap_forkoff_reset(ip, whichfork);
774 ifp->if_data = NULL;
775 ifp->if_height = 0;
776 ifp->if_format = XFS_DINODE_FMT_EXTENTS;
777 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
778 }
779
780
781 int /* error */
xfs_bmap_local_to_extents(xfs_trans_t * tp,xfs_inode_t * ip,xfs_extlen_t total,int * logflagsp,int whichfork,void (* init_fn)(struct xfs_trans * tp,struct xfs_buf * bp,struct xfs_inode * ip,struct xfs_ifork * ifp,void * priv),void * priv)782 xfs_bmap_local_to_extents(
783 xfs_trans_t *tp, /* transaction pointer */
784 xfs_inode_t *ip, /* incore inode pointer */
785 xfs_extlen_t total, /* total blocks needed by transaction */
786 int *logflagsp, /* inode logging flags */
787 int whichfork,
788 void (*init_fn)(struct xfs_trans *tp,
789 struct xfs_buf *bp,
790 struct xfs_inode *ip,
791 struct xfs_ifork *ifp, void *priv),
792 void *priv)
793 {
794 int error = 0;
795 int flags; /* logging flags returned */
796 struct xfs_ifork *ifp; /* inode fork pointer */
797 xfs_alloc_arg_t args; /* allocation arguments */
798 struct xfs_buf *bp; /* buffer for extent block */
799 struct xfs_bmbt_irec rec;
800 struct xfs_iext_cursor icur;
801
802 /*
803 * We don't want to deal with the case of keeping inode data inline yet.
804 * So sending the data fork of a regular inode is invalid.
805 */
806 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
807 ifp = xfs_ifork_ptr(ip, whichfork);
808 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
809
810 if (!ifp->if_bytes) {
811 xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
812 flags = XFS_ILOG_CORE;
813 goto done;
814 }
815
816 flags = 0;
817 error = 0;
818 memset(&args, 0, sizeof(args));
819 args.tp = tp;
820 args.mp = ip->i_mount;
821 args.total = total;
822 args.minlen = args.maxlen = args.prod = 1;
823 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
824
825 /*
826 * Allocate a block. We know we need only one, since the
827 * file currently fits in an inode.
828 */
829 args.total = total;
830 args.minlen = args.maxlen = args.prod = 1;
831 error = xfs_alloc_vextent_start_ag(&args,
832 XFS_INO_TO_FSB(args.mp, ip->i_ino));
833 if (error)
834 goto done;
835
836 /* Can't fail, the space was reserved. */
837 ASSERT(args.fsbno != NULLFSBLOCK);
838 ASSERT(args.len == 1);
839 error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
840 XFS_FSB_TO_DADDR(args.mp, args.fsbno),
841 args.mp->m_bsize, 0, &bp);
842 if (error)
843 goto done;
844
845 /*
846 * Initialize the block, copy the data and log the remote buffer.
847 *
848 * The callout is responsible for logging because the remote format
849 * might differ from the local format and thus we don't know how much to
850 * log here. Note that init_fn must also set the buffer log item type
851 * correctly.
852 */
853 init_fn(tp, bp, ip, ifp, priv);
854
855 /* account for the change in fork size */
856 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
857 xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
858 flags |= XFS_ILOG_CORE;
859
860 ifp->if_data = NULL;
861 ifp->if_height = 0;
862
863 rec.br_startoff = 0;
864 rec.br_startblock = args.fsbno;
865 rec.br_blockcount = 1;
866 rec.br_state = XFS_EXT_NORM;
867 xfs_iext_first(ifp, &icur);
868 xfs_iext_insert(ip, &icur, &rec, 0);
869
870 ifp->if_nextents = 1;
871 ip->i_nblocks = 1;
872 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
873 flags |= xfs_ilog_fext(whichfork);
874
875 done:
876 *logflagsp = flags;
877 return error;
878 }
879
880 /*
881 * Called from xfs_bmap_add_attrfork to handle btree format files.
882 */
883 STATIC int /* error */
xfs_bmap_add_attrfork_btree(xfs_trans_t * tp,xfs_inode_t * ip,int * flags)884 xfs_bmap_add_attrfork_btree(
885 xfs_trans_t *tp, /* transaction pointer */
886 xfs_inode_t *ip, /* incore inode pointer */
887 int *flags) /* inode logging flags */
888 {
889 struct xfs_btree_block *block = ip->i_df.if_broot;
890 struct xfs_btree_cur *cur; /* btree cursor */
891 int error; /* error return value */
892 xfs_mount_t *mp; /* file system mount struct */
893 int stat; /* newroot status */
894
895 mp = ip->i_mount;
896
897 if (xfs_bmap_bmdr_space(block) <= xfs_inode_data_fork_size(ip))
898 *flags |= XFS_ILOG_DBROOT;
899 else {
900 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
901 error = xfs_bmbt_lookup_first(cur, &stat);
902 if (error)
903 goto error0;
904 /* must be at least one entry */
905 if (XFS_IS_CORRUPT(mp, stat != 1)) {
906 xfs_btree_mark_sick(cur);
907 error = -EFSCORRUPTED;
908 goto error0;
909 }
910 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
911 goto error0;
912 if (stat == 0) {
913 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
914 return -ENOSPC;
915 }
916 cur->bc_bmap.allocated = 0;
917 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
918 }
919 return 0;
920 error0:
921 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
922 return error;
923 }
924
925 /*
926 * Called from xfs_bmap_add_attrfork to handle extents format files.
927 */
928 STATIC int /* error */
xfs_bmap_add_attrfork_extents(struct xfs_trans * tp,struct xfs_inode * ip,int * flags)929 xfs_bmap_add_attrfork_extents(
930 struct xfs_trans *tp, /* transaction pointer */
931 struct xfs_inode *ip, /* incore inode pointer */
932 int *flags) /* inode logging flags */
933 {
934 struct xfs_btree_cur *cur; /* bmap btree cursor */
935 int error; /* error return value */
936
937 if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <=
938 xfs_inode_data_fork_size(ip))
939 return 0;
940 cur = NULL;
941 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
942 XFS_DATA_FORK);
943 if (cur) {
944 cur->bc_bmap.allocated = 0;
945 xfs_btree_del_cursor(cur, error);
946 }
947 return error;
948 }
949
950 /*
951 * Called from xfs_bmap_add_attrfork to handle local format files. Each
952 * different data fork content type needs a different callout to do the
953 * conversion. Some are basic and only require special block initialisation
954 * callouts for the data formating, others (directories) are so specialised they
955 * handle everything themselves.
956 *
957 * XXX (dgc): investigate whether directory conversion can use the generic
958 * formatting callout. It should be possible - it's just a very complex
959 * formatter.
960 */
961 STATIC int /* error */
xfs_bmap_add_attrfork_local(struct xfs_trans * tp,struct xfs_inode * ip,int * flags)962 xfs_bmap_add_attrfork_local(
963 struct xfs_trans *tp, /* transaction pointer */
964 struct xfs_inode *ip, /* incore inode pointer */
965 int *flags) /* inode logging flags */
966 {
967 struct xfs_da_args dargs; /* args for dir/attr code */
968
969 if (ip->i_df.if_bytes <= xfs_inode_data_fork_size(ip))
970 return 0;
971
972 if (S_ISDIR(VFS_I(ip)->i_mode)) {
973 memset(&dargs, 0, sizeof(dargs));
974 dargs.geo = ip->i_mount->m_dir_geo;
975 dargs.dp = ip;
976 dargs.total = dargs.geo->fsbcount;
977 dargs.whichfork = XFS_DATA_FORK;
978 dargs.trans = tp;
979 dargs.owner = ip->i_ino;
980 return xfs_dir2_sf_to_block(&dargs);
981 }
982
983 if (S_ISLNK(VFS_I(ip)->i_mode))
984 return xfs_bmap_local_to_extents(tp, ip, 1, flags,
985 XFS_DATA_FORK, xfs_symlink_local_to_remote,
986 NULL);
987
988 /* should only be called for types that support local format data */
989 ASSERT(0);
990 xfs_bmap_mark_sick(ip, XFS_ATTR_FORK);
991 return -EFSCORRUPTED;
992 }
993
994 /*
995 * Set an inode attr fork offset based on the format of the data fork.
996 */
997 static int
xfs_bmap_set_attrforkoff(struct xfs_inode * ip,int size)998 xfs_bmap_set_attrforkoff(
999 struct xfs_inode *ip,
1000 int size)
1001 {
1002 int default_size = xfs_default_attroffset(ip) >> 3;
1003
1004 switch (ip->i_df.if_format) {
1005 case XFS_DINODE_FMT_DEV:
1006 ip->i_forkoff = default_size;
1007 break;
1008 case XFS_DINODE_FMT_LOCAL:
1009 case XFS_DINODE_FMT_EXTENTS:
1010 case XFS_DINODE_FMT_BTREE:
1011 ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1012 if (!ip->i_forkoff)
1013 ip->i_forkoff = default_size;
1014 break;
1015 default:
1016 ASSERT(0);
1017 return -EINVAL;
1018 }
1019
1020 return 0;
1021 }
1022
1023 /*
1024 * Convert inode from non-attributed to attributed. Caller must hold the
1025 * ILOCK_EXCL and the file cannot have an attr fork.
1026 */
1027 int /* error code */
xfs_bmap_add_attrfork(struct xfs_trans * tp,struct xfs_inode * ip,int size,int rsvd)1028 xfs_bmap_add_attrfork(
1029 struct xfs_trans *tp,
1030 struct xfs_inode *ip, /* incore inode pointer */
1031 int size, /* space new attribute needs */
1032 int rsvd) /* xact may use reserved blks */
1033 {
1034 struct xfs_mount *mp = tp->t_mountp;
1035 int logflags; /* logging flags */
1036 int error; /* error return value */
1037
1038 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1039 if (!xfs_is_metadir_inode(ip))
1040 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1041 ASSERT(!xfs_inode_has_attr_fork(ip));
1042
1043 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1044 error = xfs_bmap_set_attrforkoff(ip, size);
1045 if (error)
1046 return error;
1047
1048 xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
1049 logflags = 0;
1050 switch (ip->i_df.if_format) {
1051 case XFS_DINODE_FMT_LOCAL:
1052 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
1053 break;
1054 case XFS_DINODE_FMT_EXTENTS:
1055 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
1056 break;
1057 case XFS_DINODE_FMT_BTREE:
1058 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
1059 break;
1060 default:
1061 error = 0;
1062 break;
1063 }
1064 if (logflags)
1065 xfs_trans_log_inode(tp, ip, logflags);
1066 if (error)
1067 return error;
1068 if (!xfs_has_attr(mp)) {
1069 bool log_sb = false;
1070
1071 spin_lock(&mp->m_sb_lock);
1072 if (!xfs_has_attr(mp)) {
1073 xfs_add_attr(mp);
1074 xfs_add_attr2(mp);
1075 log_sb = true;
1076 }
1077 spin_unlock(&mp->m_sb_lock);
1078 if (log_sb)
1079 xfs_log_sb(tp);
1080 }
1081
1082 return 0;
1083 }
1084
1085 /*
1086 * Internal and external extent tree search functions.
1087 */
1088
1089 struct xfs_iread_state {
1090 struct xfs_iext_cursor icur;
1091 xfs_extnum_t loaded;
1092 };
1093
1094 int
xfs_bmap_complain_bad_rec(struct xfs_inode * ip,int whichfork,xfs_failaddr_t fa,const struct xfs_bmbt_irec * irec)1095 xfs_bmap_complain_bad_rec(
1096 struct xfs_inode *ip,
1097 int whichfork,
1098 xfs_failaddr_t fa,
1099 const struct xfs_bmbt_irec *irec)
1100 {
1101 struct xfs_mount *mp = ip->i_mount;
1102 const char *forkname;
1103
1104 switch (whichfork) {
1105 case XFS_DATA_FORK: forkname = "data"; break;
1106 case XFS_ATTR_FORK: forkname = "attr"; break;
1107 case XFS_COW_FORK: forkname = "CoW"; break;
1108 default: forkname = "???"; break;
1109 }
1110
1111 xfs_warn(mp,
1112 "Bmap BTree record corruption in inode 0x%llx %s fork detected at %pS!",
1113 ip->i_ino, forkname, fa);
1114 xfs_warn(mp,
1115 "Offset 0x%llx, start block 0x%llx, block count 0x%llx state 0x%x",
1116 irec->br_startoff, irec->br_startblock, irec->br_blockcount,
1117 irec->br_state);
1118
1119 return -EFSCORRUPTED;
1120 }
1121
1122 /* Stuff every bmbt record from this block into the incore extent map. */
1123 static int
xfs_iread_bmbt_block(struct xfs_btree_cur * cur,int level,void * priv)1124 xfs_iread_bmbt_block(
1125 struct xfs_btree_cur *cur,
1126 int level,
1127 void *priv)
1128 {
1129 struct xfs_iread_state *ir = priv;
1130 struct xfs_mount *mp = cur->bc_mp;
1131 struct xfs_inode *ip = cur->bc_ino.ip;
1132 struct xfs_btree_block *block;
1133 struct xfs_buf *bp;
1134 struct xfs_bmbt_rec *frp;
1135 xfs_extnum_t num_recs;
1136 xfs_extnum_t j;
1137 int whichfork = cur->bc_ino.whichfork;
1138 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1139
1140 block = xfs_btree_get_block(cur, level, &bp);
1141
1142 /* Abort if we find more records than nextents. */
1143 num_recs = xfs_btree_get_numrecs(block);
1144 if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) {
1145 xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
1146 (unsigned long long)ip->i_ino);
1147 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
1148 sizeof(*block), __this_address);
1149 xfs_bmap_mark_sick(ip, whichfork);
1150 return -EFSCORRUPTED;
1151 }
1152
1153 /* Copy records into the incore cache. */
1154 frp = xfs_bmbt_rec_addr(mp, block, 1);
1155 for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
1156 struct xfs_bmbt_irec new;
1157 xfs_failaddr_t fa;
1158
1159 xfs_bmbt_disk_get_all(frp, &new);
1160 fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1161 if (fa) {
1162 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1163 "xfs_iread_extents(2)", frp,
1164 sizeof(*frp), fa);
1165 xfs_bmap_mark_sick(ip, whichfork);
1166 return xfs_bmap_complain_bad_rec(ip, whichfork, fa,
1167 &new);
1168 }
1169 xfs_iext_insert(ip, &ir->icur, &new,
1170 xfs_bmap_fork_to_state(whichfork));
1171 trace_xfs_read_extent(ip, &ir->icur,
1172 xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
1173 xfs_iext_next(ifp, &ir->icur);
1174 }
1175
1176 return 0;
1177 }
1178
1179 /*
1180 * Read in extents from a btree-format inode.
1181 */
1182 int
xfs_iread_extents(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork)1183 xfs_iread_extents(
1184 struct xfs_trans *tp,
1185 struct xfs_inode *ip,
1186 int whichfork)
1187 {
1188 struct xfs_iread_state ir;
1189 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1190 struct xfs_mount *mp = ip->i_mount;
1191 struct xfs_btree_cur *cur;
1192 int error;
1193
1194 if (!xfs_need_iread_extents(ifp))
1195 return 0;
1196
1197 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1198
1199 ir.loaded = 0;
1200 xfs_iext_first(ifp, &ir.icur);
1201 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
1202 error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
1203 XFS_BTREE_VISIT_RECORDS, &ir);
1204 xfs_btree_del_cursor(cur, error);
1205 if (error)
1206 goto out;
1207
1208 if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) {
1209 xfs_bmap_mark_sick(ip, whichfork);
1210 error = -EFSCORRUPTED;
1211 goto out;
1212 }
1213 ASSERT(ir.loaded == xfs_iext_count(ifp));
1214 /*
1215 * Use release semantics so that we can use acquire semantics in
1216 * xfs_need_iread_extents and be guaranteed to see a valid mapping tree
1217 * after that load.
1218 */
1219 smp_store_release(&ifp->if_needextents, 0);
1220 return 0;
1221 out:
1222 if (xfs_metadata_is_sick(error))
1223 xfs_bmap_mark_sick(ip, whichfork);
1224 xfs_iext_destroy(ifp);
1225 return error;
1226 }
1227
1228 /*
1229 * Returns the relative block number of the first unused block(s) in the given
1230 * fork with at least "len" logically contiguous blocks free. This is the
1231 * lowest-address hole if the fork has holes, else the first block past the end
1232 * of fork. Return 0 if the fork is currently local (in-inode).
1233 */
1234 int /* error */
xfs_bmap_first_unused(struct xfs_trans * tp,struct xfs_inode * ip,xfs_extlen_t len,xfs_fileoff_t * first_unused,int whichfork)1235 xfs_bmap_first_unused(
1236 struct xfs_trans *tp, /* transaction pointer */
1237 struct xfs_inode *ip, /* incore inode */
1238 xfs_extlen_t len, /* size of hole to find */
1239 xfs_fileoff_t *first_unused, /* unused block */
1240 int whichfork) /* data or attr fork */
1241 {
1242 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1243 struct xfs_bmbt_irec got;
1244 struct xfs_iext_cursor icur;
1245 xfs_fileoff_t lastaddr = 0;
1246 xfs_fileoff_t lowest, max;
1247 int error;
1248
1249 if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
1250 *first_unused = 0;
1251 return 0;
1252 }
1253
1254 ASSERT(xfs_ifork_has_extents(ifp));
1255
1256 error = xfs_iread_extents(tp, ip, whichfork);
1257 if (error)
1258 return error;
1259
1260 lowest = max = *first_unused;
1261 for_each_xfs_iext(ifp, &icur, &got) {
1262 /*
1263 * See if the hole before this extent will work.
1264 */
1265 if (got.br_startoff >= lowest + len &&
1266 got.br_startoff - max >= len)
1267 break;
1268 lastaddr = got.br_startoff + got.br_blockcount;
1269 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1270 }
1271
1272 *first_unused = max;
1273 return 0;
1274 }
1275
1276 /*
1277 * Returns the file-relative block number of the last block - 1 before
1278 * last_block (input value) in the file.
1279 * This is not based on i_size, it is based on the extent records.
1280 * Returns 0 for local files, as they do not have extent records.
1281 */
1282 int /* error */
xfs_bmap_last_before(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t * last_block,int whichfork)1283 xfs_bmap_last_before(
1284 struct xfs_trans *tp, /* transaction pointer */
1285 struct xfs_inode *ip, /* incore inode */
1286 xfs_fileoff_t *last_block, /* last block */
1287 int whichfork) /* data or attr fork */
1288 {
1289 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1290 struct xfs_bmbt_irec got;
1291 struct xfs_iext_cursor icur;
1292 int error;
1293
1294 switch (ifp->if_format) {
1295 case XFS_DINODE_FMT_LOCAL:
1296 *last_block = 0;
1297 return 0;
1298 case XFS_DINODE_FMT_BTREE:
1299 case XFS_DINODE_FMT_EXTENTS:
1300 break;
1301 default:
1302 ASSERT(0);
1303 xfs_bmap_mark_sick(ip, whichfork);
1304 return -EFSCORRUPTED;
1305 }
1306
1307 error = xfs_iread_extents(tp, ip, whichfork);
1308 if (error)
1309 return error;
1310
1311 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1312 *last_block = 0;
1313 return 0;
1314 }
1315
1316 int
xfs_bmap_last_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * rec,int * is_empty)1317 xfs_bmap_last_extent(
1318 struct xfs_trans *tp,
1319 struct xfs_inode *ip,
1320 int whichfork,
1321 struct xfs_bmbt_irec *rec,
1322 int *is_empty)
1323 {
1324 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1325 struct xfs_iext_cursor icur;
1326 int error;
1327
1328 error = xfs_iread_extents(tp, ip, whichfork);
1329 if (error)
1330 return error;
1331
1332 xfs_iext_last(ifp, &icur);
1333 if (!xfs_iext_get_extent(ifp, &icur, rec))
1334 *is_empty = 1;
1335 else
1336 *is_empty = 0;
1337 return 0;
1338 }
1339
1340 /*
1341 * Check the last inode extent to determine whether this allocation will result
1342 * in blocks being allocated at the end of the file. When we allocate new data
1343 * blocks at the end of the file which do not start at the previous data block,
1344 * we will try to align the new blocks at stripe unit boundaries.
1345 *
1346 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1347 * at, or past the EOF.
1348 */
1349 STATIC int
xfs_bmap_isaeof(struct xfs_bmalloca * bma,int whichfork)1350 xfs_bmap_isaeof(
1351 struct xfs_bmalloca *bma,
1352 int whichfork)
1353 {
1354 struct xfs_bmbt_irec rec;
1355 int is_empty;
1356 int error;
1357
1358 bma->aeof = false;
1359 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1360 &is_empty);
1361 if (error)
1362 return error;
1363
1364 if (is_empty) {
1365 bma->aeof = true;
1366 return 0;
1367 }
1368
1369 /*
1370 * Check if we are allocation or past the last extent, or at least into
1371 * the last delayed allocated extent.
1372 */
1373 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1374 (bma->offset >= rec.br_startoff &&
1375 isnullstartblock(rec.br_startblock));
1376 return 0;
1377 }
1378
1379 /*
1380 * Returns the file-relative block number of the first block past eof in
1381 * the file. This is not based on i_size, it is based on the extent records.
1382 * Returns 0 for local files, as they do not have extent records.
1383 */
1384 int
xfs_bmap_last_offset(struct xfs_inode * ip,xfs_fileoff_t * last_block,int whichfork)1385 xfs_bmap_last_offset(
1386 struct xfs_inode *ip,
1387 xfs_fileoff_t *last_block,
1388 int whichfork)
1389 {
1390 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1391 struct xfs_bmbt_irec rec;
1392 int is_empty;
1393 int error;
1394
1395 *last_block = 0;
1396
1397 if (ifp->if_format == XFS_DINODE_FMT_LOCAL)
1398 return 0;
1399
1400 if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp))) {
1401 xfs_bmap_mark_sick(ip, whichfork);
1402 return -EFSCORRUPTED;
1403 }
1404
1405 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1406 if (error || is_empty)
1407 return error;
1408
1409 *last_block = rec.br_startoff + rec.br_blockcount;
1410 return 0;
1411 }
1412
1413 /*
1414 * Extent tree manipulation functions used during allocation.
1415 */
1416
1417 static inline bool
xfs_bmap_same_rtgroup(struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * left,struct xfs_bmbt_irec * right)1418 xfs_bmap_same_rtgroup(
1419 struct xfs_inode *ip,
1420 int whichfork,
1421 struct xfs_bmbt_irec *left,
1422 struct xfs_bmbt_irec *right)
1423 {
1424 struct xfs_mount *mp = ip->i_mount;
1425
1426 if (xfs_ifork_is_realtime(ip, whichfork) && xfs_has_rtgroups(mp)) {
1427 if (xfs_rtb_to_rgno(mp, left->br_startblock) !=
1428 xfs_rtb_to_rgno(mp, right->br_startblock))
1429 return false;
1430 }
1431
1432 return true;
1433 }
1434
1435 /*
1436 * Convert a delayed allocation to a real allocation.
1437 */
1438 STATIC int /* error */
xfs_bmap_add_extent_delay_real(struct xfs_bmalloca * bma,int whichfork)1439 xfs_bmap_add_extent_delay_real(
1440 struct xfs_bmalloca *bma,
1441 int whichfork)
1442 {
1443 struct xfs_mount *mp = bma->ip->i_mount;
1444 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
1445 struct xfs_bmbt_irec *new = &bma->got;
1446 int error; /* error return value */
1447 int i; /* temp state */
1448 xfs_fileoff_t new_endoff; /* end offset of new entry */
1449 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1450 /* left is 0, right is 1, prev is 2 */
1451 int rval=0; /* return value (logging flags) */
1452 uint32_t state = xfs_bmap_fork_to_state(whichfork);
1453 xfs_filblks_t da_new; /* new count del alloc blocks used */
1454 xfs_filblks_t da_old; /* old count del alloc blocks used */
1455 xfs_filblks_t temp=0; /* value for da_new calculations */
1456 int tmp_rval; /* partial logging flags */
1457 struct xfs_bmbt_irec old;
1458
1459 ASSERT(whichfork != XFS_ATTR_FORK);
1460 ASSERT(!isnullstartblock(new->br_startblock));
1461 ASSERT(!bma->cur || (bma->cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
1462
1463 XFS_STATS_INC(mp, xs_add_exlist);
1464
1465 #define LEFT r[0]
1466 #define RIGHT r[1]
1467 #define PREV r[2]
1468
1469 /*
1470 * Set up a bunch of variables to make the tests simpler.
1471 */
1472 xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1473 new_endoff = new->br_startoff + new->br_blockcount;
1474 ASSERT(isnullstartblock(PREV.br_startblock));
1475 ASSERT(PREV.br_startoff <= new->br_startoff);
1476 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1477
1478 da_old = startblockval(PREV.br_startblock);
1479 da_new = 0;
1480
1481 /*
1482 * Set flags determining what part of the previous delayed allocation
1483 * extent is being replaced by a real allocation.
1484 */
1485 if (PREV.br_startoff == new->br_startoff)
1486 state |= BMAP_LEFT_FILLING;
1487 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1488 state |= BMAP_RIGHT_FILLING;
1489
1490 /*
1491 * Check and set flags if this segment has a left neighbor.
1492 * Don't set contiguous if the combined extent would be too large.
1493 */
1494 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1495 state |= BMAP_LEFT_VALID;
1496 if (isnullstartblock(LEFT.br_startblock))
1497 state |= BMAP_LEFT_DELAY;
1498 }
1499
1500 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1501 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1502 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1503 LEFT.br_state == new->br_state &&
1504 LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
1505 xfs_bmap_same_rtgroup(bma->ip, whichfork, &LEFT, new))
1506 state |= BMAP_LEFT_CONTIG;
1507
1508 /*
1509 * Check and set flags if this segment has a right neighbor.
1510 * Don't set contiguous if the combined extent would be too large.
1511 * Also check for all-three-contiguous being too large.
1512 */
1513 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1514 state |= BMAP_RIGHT_VALID;
1515 if (isnullstartblock(RIGHT.br_startblock))
1516 state |= BMAP_RIGHT_DELAY;
1517 }
1518
1519 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1520 new_endoff == RIGHT.br_startoff &&
1521 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1522 new->br_state == RIGHT.br_state &&
1523 new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
1524 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1525 BMAP_RIGHT_FILLING)) !=
1526 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1527 BMAP_RIGHT_FILLING) ||
1528 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1529 <= XFS_MAX_BMBT_EXTLEN) &&
1530 xfs_bmap_same_rtgroup(bma->ip, whichfork, new, &RIGHT))
1531 state |= BMAP_RIGHT_CONTIG;
1532
1533 error = 0;
1534 /*
1535 * Switch out based on the FILLING and CONTIG state bits.
1536 */
1537 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1538 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1539 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1540 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1541 /*
1542 * Filling in all of a previously delayed allocation extent.
1543 * The left and right neighbors are both contiguous with new.
1544 */
1545 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1546
1547 xfs_iext_remove(bma->ip, &bma->icur, state);
1548 xfs_iext_remove(bma->ip, &bma->icur, state);
1549 xfs_iext_prev(ifp, &bma->icur);
1550 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1551 ifp->if_nextents--;
1552
1553 if (bma->cur == NULL)
1554 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1555 else {
1556 rval = XFS_ILOG_CORE;
1557 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1558 if (error)
1559 goto done;
1560 if (XFS_IS_CORRUPT(mp, i != 1)) {
1561 xfs_btree_mark_sick(bma->cur);
1562 error = -EFSCORRUPTED;
1563 goto done;
1564 }
1565 error = xfs_btree_delete(bma->cur, &i);
1566 if (error)
1567 goto done;
1568 if (XFS_IS_CORRUPT(mp, i != 1)) {
1569 xfs_btree_mark_sick(bma->cur);
1570 error = -EFSCORRUPTED;
1571 goto done;
1572 }
1573 error = xfs_btree_decrement(bma->cur, 0, &i);
1574 if (error)
1575 goto done;
1576 if (XFS_IS_CORRUPT(mp, i != 1)) {
1577 xfs_btree_mark_sick(bma->cur);
1578 error = -EFSCORRUPTED;
1579 goto done;
1580 }
1581 error = xfs_bmbt_update(bma->cur, &LEFT);
1582 if (error)
1583 goto done;
1584 }
1585 ASSERT(da_new <= da_old);
1586 break;
1587
1588 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1589 /*
1590 * Filling in all of a previously delayed allocation extent.
1591 * The left neighbor is contiguous, the right is not.
1592 */
1593 old = LEFT;
1594 LEFT.br_blockcount += PREV.br_blockcount;
1595
1596 xfs_iext_remove(bma->ip, &bma->icur, state);
1597 xfs_iext_prev(ifp, &bma->icur);
1598 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1599
1600 if (bma->cur == NULL)
1601 rval = XFS_ILOG_DEXT;
1602 else {
1603 rval = 0;
1604 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1605 if (error)
1606 goto done;
1607 if (XFS_IS_CORRUPT(mp, i != 1)) {
1608 xfs_btree_mark_sick(bma->cur);
1609 error = -EFSCORRUPTED;
1610 goto done;
1611 }
1612 error = xfs_bmbt_update(bma->cur, &LEFT);
1613 if (error)
1614 goto done;
1615 }
1616 ASSERT(da_new <= da_old);
1617 break;
1618
1619 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1620 /*
1621 * Filling in all of a previously delayed allocation extent.
1622 * The right neighbor is contiguous, the left is not. Take care
1623 * with delay -> unwritten extent allocation here because the
1624 * delalloc record we are overwriting is always written.
1625 */
1626 PREV.br_startblock = new->br_startblock;
1627 PREV.br_blockcount += RIGHT.br_blockcount;
1628 PREV.br_state = new->br_state;
1629
1630 xfs_iext_next(ifp, &bma->icur);
1631 xfs_iext_remove(bma->ip, &bma->icur, state);
1632 xfs_iext_prev(ifp, &bma->icur);
1633 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1634
1635 if (bma->cur == NULL)
1636 rval = XFS_ILOG_DEXT;
1637 else {
1638 rval = 0;
1639 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1640 if (error)
1641 goto done;
1642 if (XFS_IS_CORRUPT(mp, i != 1)) {
1643 xfs_btree_mark_sick(bma->cur);
1644 error = -EFSCORRUPTED;
1645 goto done;
1646 }
1647 error = xfs_bmbt_update(bma->cur, &PREV);
1648 if (error)
1649 goto done;
1650 }
1651 ASSERT(da_new <= da_old);
1652 break;
1653
1654 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1655 /*
1656 * Filling in all of a previously delayed allocation extent.
1657 * Neither the left nor right neighbors are contiguous with
1658 * the new one.
1659 */
1660 PREV.br_startblock = new->br_startblock;
1661 PREV.br_state = new->br_state;
1662 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1663 ifp->if_nextents++;
1664
1665 if (bma->cur == NULL)
1666 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1667 else {
1668 rval = XFS_ILOG_CORE;
1669 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1670 if (error)
1671 goto done;
1672 if (XFS_IS_CORRUPT(mp, i != 0)) {
1673 xfs_btree_mark_sick(bma->cur);
1674 error = -EFSCORRUPTED;
1675 goto done;
1676 }
1677 error = xfs_btree_insert(bma->cur, &i);
1678 if (error)
1679 goto done;
1680 if (XFS_IS_CORRUPT(mp, i != 1)) {
1681 xfs_btree_mark_sick(bma->cur);
1682 error = -EFSCORRUPTED;
1683 goto done;
1684 }
1685 }
1686 ASSERT(da_new <= da_old);
1687 break;
1688
1689 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1690 /*
1691 * Filling in the first part of a previous delayed allocation.
1692 * The left neighbor is contiguous.
1693 */
1694 old = LEFT;
1695 temp = PREV.br_blockcount - new->br_blockcount;
1696 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1697 startblockval(PREV.br_startblock));
1698
1699 LEFT.br_blockcount += new->br_blockcount;
1700
1701 PREV.br_blockcount = temp;
1702 PREV.br_startoff += new->br_blockcount;
1703 PREV.br_startblock = nullstartblock(da_new);
1704
1705 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1706 xfs_iext_prev(ifp, &bma->icur);
1707 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1708
1709 if (bma->cur == NULL)
1710 rval = XFS_ILOG_DEXT;
1711 else {
1712 rval = 0;
1713 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1714 if (error)
1715 goto done;
1716 if (XFS_IS_CORRUPT(mp, i != 1)) {
1717 xfs_btree_mark_sick(bma->cur);
1718 error = -EFSCORRUPTED;
1719 goto done;
1720 }
1721 error = xfs_bmbt_update(bma->cur, &LEFT);
1722 if (error)
1723 goto done;
1724 }
1725 ASSERT(da_new <= da_old);
1726 break;
1727
1728 case BMAP_LEFT_FILLING:
1729 /*
1730 * Filling in the first part of a previous delayed allocation.
1731 * The left neighbor is not contiguous.
1732 */
1733 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1734 ifp->if_nextents++;
1735
1736 if (bma->cur == NULL)
1737 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1738 else {
1739 rval = XFS_ILOG_CORE;
1740 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1741 if (error)
1742 goto done;
1743 if (XFS_IS_CORRUPT(mp, i != 0)) {
1744 xfs_btree_mark_sick(bma->cur);
1745 error = -EFSCORRUPTED;
1746 goto done;
1747 }
1748 error = xfs_btree_insert(bma->cur, &i);
1749 if (error)
1750 goto done;
1751 if (XFS_IS_CORRUPT(mp, i != 1)) {
1752 xfs_btree_mark_sick(bma->cur);
1753 error = -EFSCORRUPTED;
1754 goto done;
1755 }
1756 }
1757
1758 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1759 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1760 &bma->cur, 1, &tmp_rval, whichfork);
1761 rval |= tmp_rval;
1762 if (error)
1763 goto done;
1764 }
1765
1766 temp = PREV.br_blockcount - new->br_blockcount;
1767 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1768 startblockval(PREV.br_startblock) -
1769 (bma->cur ? bma->cur->bc_bmap.allocated : 0));
1770
1771 PREV.br_startoff = new_endoff;
1772 PREV.br_blockcount = temp;
1773 PREV.br_startblock = nullstartblock(da_new);
1774 xfs_iext_next(ifp, &bma->icur);
1775 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1776 xfs_iext_prev(ifp, &bma->icur);
1777 break;
1778
1779 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1780 /*
1781 * Filling in the last part of a previous delayed allocation.
1782 * The right neighbor is contiguous with the new allocation.
1783 */
1784 old = RIGHT;
1785 RIGHT.br_startoff = new->br_startoff;
1786 RIGHT.br_startblock = new->br_startblock;
1787 RIGHT.br_blockcount += new->br_blockcount;
1788
1789 if (bma->cur == NULL)
1790 rval = XFS_ILOG_DEXT;
1791 else {
1792 rval = 0;
1793 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1794 if (error)
1795 goto done;
1796 if (XFS_IS_CORRUPT(mp, i != 1)) {
1797 xfs_btree_mark_sick(bma->cur);
1798 error = -EFSCORRUPTED;
1799 goto done;
1800 }
1801 error = xfs_bmbt_update(bma->cur, &RIGHT);
1802 if (error)
1803 goto done;
1804 }
1805
1806 temp = PREV.br_blockcount - new->br_blockcount;
1807 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1808 startblockval(PREV.br_startblock));
1809
1810 PREV.br_blockcount = temp;
1811 PREV.br_startblock = nullstartblock(da_new);
1812
1813 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1814 xfs_iext_next(ifp, &bma->icur);
1815 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1816 ASSERT(da_new <= da_old);
1817 break;
1818
1819 case BMAP_RIGHT_FILLING:
1820 /*
1821 * Filling in the last part of a previous delayed allocation.
1822 * The right neighbor is not contiguous.
1823 */
1824 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1825 ifp->if_nextents++;
1826
1827 if (bma->cur == NULL)
1828 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1829 else {
1830 rval = XFS_ILOG_CORE;
1831 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1832 if (error)
1833 goto done;
1834 if (XFS_IS_CORRUPT(mp, i != 0)) {
1835 xfs_btree_mark_sick(bma->cur);
1836 error = -EFSCORRUPTED;
1837 goto done;
1838 }
1839 error = xfs_btree_insert(bma->cur, &i);
1840 if (error)
1841 goto done;
1842 if (XFS_IS_CORRUPT(mp, i != 1)) {
1843 xfs_btree_mark_sick(bma->cur);
1844 error = -EFSCORRUPTED;
1845 goto done;
1846 }
1847 }
1848
1849 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1850 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1851 &bma->cur, 1, &tmp_rval, whichfork);
1852 rval |= tmp_rval;
1853 if (error)
1854 goto done;
1855 }
1856
1857 temp = PREV.br_blockcount - new->br_blockcount;
1858 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1859 startblockval(PREV.br_startblock) -
1860 (bma->cur ? bma->cur->bc_bmap.allocated : 0));
1861
1862 PREV.br_startblock = nullstartblock(da_new);
1863 PREV.br_blockcount = temp;
1864 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1865 xfs_iext_next(ifp, &bma->icur);
1866 ASSERT(da_new <= da_old);
1867 break;
1868
1869 case 0:
1870 /*
1871 * Filling in the middle part of a previous delayed allocation.
1872 * Contiguity is impossible here.
1873 * This case is avoided almost all the time.
1874 *
1875 * We start with a delayed allocation:
1876 *
1877 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1878 * PREV @ idx
1879 *
1880 * and we are allocating:
1881 * +rrrrrrrrrrrrrrrrr+
1882 * new
1883 *
1884 * and we set it up for insertion as:
1885 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1886 * new
1887 * PREV @ idx LEFT RIGHT
1888 * inserted at idx + 1
1889 */
1890 old = PREV;
1891
1892 /* LEFT is the new middle */
1893 LEFT = *new;
1894
1895 /* RIGHT is the new right */
1896 RIGHT.br_state = PREV.br_state;
1897 RIGHT.br_startoff = new_endoff;
1898 RIGHT.br_blockcount =
1899 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1900 RIGHT.br_startblock =
1901 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1902 RIGHT.br_blockcount));
1903
1904 /* truncate PREV */
1905 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1906 PREV.br_startblock =
1907 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1908 PREV.br_blockcount));
1909 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1910
1911 xfs_iext_next(ifp, &bma->icur);
1912 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1913 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1914 ifp->if_nextents++;
1915
1916 if (bma->cur == NULL)
1917 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1918 else {
1919 rval = XFS_ILOG_CORE;
1920 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1921 if (error)
1922 goto done;
1923 if (XFS_IS_CORRUPT(mp, i != 0)) {
1924 xfs_btree_mark_sick(bma->cur);
1925 error = -EFSCORRUPTED;
1926 goto done;
1927 }
1928 error = xfs_btree_insert(bma->cur, &i);
1929 if (error)
1930 goto done;
1931 if (XFS_IS_CORRUPT(mp, i != 1)) {
1932 xfs_btree_mark_sick(bma->cur);
1933 error = -EFSCORRUPTED;
1934 goto done;
1935 }
1936 }
1937
1938 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1939 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1940 &bma->cur, 1, &tmp_rval, whichfork);
1941 rval |= tmp_rval;
1942 if (error)
1943 goto done;
1944 }
1945
1946 da_new = startblockval(PREV.br_startblock) +
1947 startblockval(RIGHT.br_startblock);
1948 break;
1949
1950 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1951 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1952 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1953 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1954 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1955 case BMAP_LEFT_CONTIG:
1956 case BMAP_RIGHT_CONTIG:
1957 /*
1958 * These cases are all impossible.
1959 */
1960 ASSERT(0);
1961 }
1962
1963 /* add reverse mapping unless caller opted out */
1964 if (!(bma->flags & XFS_BMAPI_NORMAP))
1965 xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
1966
1967 /* convert to a btree if necessary */
1968 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1969 int tmp_logflags; /* partial log flag return val */
1970
1971 ASSERT(bma->cur == NULL);
1972 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1973 &bma->cur, da_old > 0, &tmp_logflags,
1974 whichfork);
1975 bma->logflags |= tmp_logflags;
1976 if (error)
1977 goto done;
1978 }
1979
1980 if (da_new != da_old)
1981 xfs_mod_delalloc(bma->ip, 0, (int64_t)da_new - da_old);
1982
1983 if (bma->cur) {
1984 da_new += bma->cur->bc_bmap.allocated;
1985 bma->cur->bc_bmap.allocated = 0;
1986 }
1987
1988 /* adjust for changes in reserved delayed indirect blocks */
1989 if (da_new < da_old)
1990 xfs_add_fdblocks(mp, da_old - da_new);
1991 else if (da_new > da_old)
1992 error = xfs_dec_fdblocks(mp, da_new - da_old, true);
1993
1994 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
1995 done:
1996 if (whichfork != XFS_COW_FORK)
1997 bma->logflags |= rval;
1998 return error;
1999 #undef LEFT
2000 #undef RIGHT
2001 #undef PREV
2002 }
2003
2004 /*
2005 * Convert an unwritten allocation to a real allocation or vice versa.
2006 */
2007 int /* error */
xfs_bmap_add_extent_unwritten_real(struct xfs_trans * tp,xfs_inode_t * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_btree_cur ** curp,xfs_bmbt_irec_t * new,int * logflagsp)2008 xfs_bmap_add_extent_unwritten_real(
2009 struct xfs_trans *tp,
2010 xfs_inode_t *ip, /* incore inode pointer */
2011 int whichfork,
2012 struct xfs_iext_cursor *icur,
2013 struct xfs_btree_cur **curp, /* if *curp is null, not a btree */
2014 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2015 int *logflagsp) /* inode logging flags */
2016 {
2017 struct xfs_btree_cur *cur; /* btree cursor */
2018 int error; /* error return value */
2019 int i; /* temp state */
2020 struct xfs_ifork *ifp; /* inode fork pointer */
2021 xfs_fileoff_t new_endoff; /* end offset of new entry */
2022 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2023 /* left is 0, right is 1, prev is 2 */
2024 int rval=0; /* return value (logging flags) */
2025 uint32_t state = xfs_bmap_fork_to_state(whichfork);
2026 struct xfs_mount *mp = ip->i_mount;
2027 struct xfs_bmbt_irec old;
2028
2029 *logflagsp = 0;
2030
2031 cur = *curp;
2032 ifp = xfs_ifork_ptr(ip, whichfork);
2033
2034 ASSERT(!isnullstartblock(new->br_startblock));
2035
2036 XFS_STATS_INC(mp, xs_add_exlist);
2037
2038 #define LEFT r[0]
2039 #define RIGHT r[1]
2040 #define PREV r[2]
2041
2042 /*
2043 * Set up a bunch of variables to make the tests simpler.
2044 */
2045 error = 0;
2046 xfs_iext_get_extent(ifp, icur, &PREV);
2047 ASSERT(new->br_state != PREV.br_state);
2048 new_endoff = new->br_startoff + new->br_blockcount;
2049 ASSERT(PREV.br_startoff <= new->br_startoff);
2050 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2051
2052 /*
2053 * Set flags determining what part of the previous oldext allocation
2054 * extent is being replaced by a newext allocation.
2055 */
2056 if (PREV.br_startoff == new->br_startoff)
2057 state |= BMAP_LEFT_FILLING;
2058 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2059 state |= BMAP_RIGHT_FILLING;
2060
2061 /*
2062 * Check and set flags if this segment has a left neighbor.
2063 * Don't set contiguous if the combined extent would be too large.
2064 */
2065 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2066 state |= BMAP_LEFT_VALID;
2067 if (isnullstartblock(LEFT.br_startblock))
2068 state |= BMAP_LEFT_DELAY;
2069 }
2070
2071 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2072 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2073 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2074 LEFT.br_state == new->br_state &&
2075 LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2076 xfs_bmap_same_rtgroup(ip, whichfork, &LEFT, new))
2077 state |= BMAP_LEFT_CONTIG;
2078
2079 /*
2080 * Check and set flags if this segment has a right neighbor.
2081 * Don't set contiguous if the combined extent would be too large.
2082 * Also check for all-three-contiguous being too large.
2083 */
2084 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2085 state |= BMAP_RIGHT_VALID;
2086 if (isnullstartblock(RIGHT.br_startblock))
2087 state |= BMAP_RIGHT_DELAY;
2088 }
2089
2090 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2091 new_endoff == RIGHT.br_startoff &&
2092 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2093 new->br_state == RIGHT.br_state &&
2094 new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2095 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2096 BMAP_RIGHT_FILLING)) !=
2097 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2098 BMAP_RIGHT_FILLING) ||
2099 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2100 <= XFS_MAX_BMBT_EXTLEN) &&
2101 xfs_bmap_same_rtgroup(ip, whichfork, new, &RIGHT))
2102 state |= BMAP_RIGHT_CONTIG;
2103
2104 /*
2105 * Switch out based on the FILLING and CONTIG state bits.
2106 */
2107 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2108 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2109 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2110 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2111 /*
2112 * Setting all of a previous oldext extent to newext.
2113 * The left and right neighbors are both contiguous with new.
2114 */
2115 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2116
2117 xfs_iext_remove(ip, icur, state);
2118 xfs_iext_remove(ip, icur, state);
2119 xfs_iext_prev(ifp, icur);
2120 xfs_iext_update_extent(ip, state, icur, &LEFT);
2121 ifp->if_nextents -= 2;
2122 if (cur == NULL)
2123 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2124 else {
2125 rval = XFS_ILOG_CORE;
2126 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2127 if (error)
2128 goto done;
2129 if (XFS_IS_CORRUPT(mp, i != 1)) {
2130 xfs_btree_mark_sick(cur);
2131 error = -EFSCORRUPTED;
2132 goto done;
2133 }
2134 if ((error = xfs_btree_delete(cur, &i)))
2135 goto done;
2136 if (XFS_IS_CORRUPT(mp, i != 1)) {
2137 xfs_btree_mark_sick(cur);
2138 error = -EFSCORRUPTED;
2139 goto done;
2140 }
2141 if ((error = xfs_btree_decrement(cur, 0, &i)))
2142 goto done;
2143 if (XFS_IS_CORRUPT(mp, i != 1)) {
2144 xfs_btree_mark_sick(cur);
2145 error = -EFSCORRUPTED;
2146 goto done;
2147 }
2148 if ((error = xfs_btree_delete(cur, &i)))
2149 goto done;
2150 if (XFS_IS_CORRUPT(mp, i != 1)) {
2151 xfs_btree_mark_sick(cur);
2152 error = -EFSCORRUPTED;
2153 goto done;
2154 }
2155 if ((error = xfs_btree_decrement(cur, 0, &i)))
2156 goto done;
2157 if (XFS_IS_CORRUPT(mp, i != 1)) {
2158 xfs_btree_mark_sick(cur);
2159 error = -EFSCORRUPTED;
2160 goto done;
2161 }
2162 error = xfs_bmbt_update(cur, &LEFT);
2163 if (error)
2164 goto done;
2165 }
2166 break;
2167
2168 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2169 /*
2170 * Setting all of a previous oldext extent to newext.
2171 * The left neighbor is contiguous, the right is not.
2172 */
2173 LEFT.br_blockcount += PREV.br_blockcount;
2174
2175 xfs_iext_remove(ip, icur, state);
2176 xfs_iext_prev(ifp, icur);
2177 xfs_iext_update_extent(ip, state, icur, &LEFT);
2178 ifp->if_nextents--;
2179 if (cur == NULL)
2180 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2181 else {
2182 rval = XFS_ILOG_CORE;
2183 error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2184 if (error)
2185 goto done;
2186 if (XFS_IS_CORRUPT(mp, i != 1)) {
2187 xfs_btree_mark_sick(cur);
2188 error = -EFSCORRUPTED;
2189 goto done;
2190 }
2191 if ((error = xfs_btree_delete(cur, &i)))
2192 goto done;
2193 if (XFS_IS_CORRUPT(mp, i != 1)) {
2194 xfs_btree_mark_sick(cur);
2195 error = -EFSCORRUPTED;
2196 goto done;
2197 }
2198 if ((error = xfs_btree_decrement(cur, 0, &i)))
2199 goto done;
2200 if (XFS_IS_CORRUPT(mp, i != 1)) {
2201 xfs_btree_mark_sick(cur);
2202 error = -EFSCORRUPTED;
2203 goto done;
2204 }
2205 error = xfs_bmbt_update(cur, &LEFT);
2206 if (error)
2207 goto done;
2208 }
2209 break;
2210
2211 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2212 /*
2213 * Setting all of a previous oldext extent to newext.
2214 * The right neighbor is contiguous, the left is not.
2215 */
2216 PREV.br_blockcount += RIGHT.br_blockcount;
2217 PREV.br_state = new->br_state;
2218
2219 xfs_iext_next(ifp, icur);
2220 xfs_iext_remove(ip, icur, state);
2221 xfs_iext_prev(ifp, icur);
2222 xfs_iext_update_extent(ip, state, icur, &PREV);
2223 ifp->if_nextents--;
2224
2225 if (cur == NULL)
2226 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2227 else {
2228 rval = XFS_ILOG_CORE;
2229 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2230 if (error)
2231 goto done;
2232 if (XFS_IS_CORRUPT(mp, i != 1)) {
2233 xfs_btree_mark_sick(cur);
2234 error = -EFSCORRUPTED;
2235 goto done;
2236 }
2237 if ((error = xfs_btree_delete(cur, &i)))
2238 goto done;
2239 if (XFS_IS_CORRUPT(mp, i != 1)) {
2240 xfs_btree_mark_sick(cur);
2241 error = -EFSCORRUPTED;
2242 goto done;
2243 }
2244 if ((error = xfs_btree_decrement(cur, 0, &i)))
2245 goto done;
2246 if (XFS_IS_CORRUPT(mp, i != 1)) {
2247 xfs_btree_mark_sick(cur);
2248 error = -EFSCORRUPTED;
2249 goto done;
2250 }
2251 error = xfs_bmbt_update(cur, &PREV);
2252 if (error)
2253 goto done;
2254 }
2255 break;
2256
2257 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2258 /*
2259 * Setting all of a previous oldext extent to newext.
2260 * Neither the left nor right neighbors are contiguous with
2261 * the new one.
2262 */
2263 PREV.br_state = new->br_state;
2264 xfs_iext_update_extent(ip, state, icur, &PREV);
2265
2266 if (cur == NULL)
2267 rval = XFS_ILOG_DEXT;
2268 else {
2269 rval = 0;
2270 error = xfs_bmbt_lookup_eq(cur, new, &i);
2271 if (error)
2272 goto done;
2273 if (XFS_IS_CORRUPT(mp, i != 1)) {
2274 xfs_btree_mark_sick(cur);
2275 error = -EFSCORRUPTED;
2276 goto done;
2277 }
2278 error = xfs_bmbt_update(cur, &PREV);
2279 if (error)
2280 goto done;
2281 }
2282 break;
2283
2284 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2285 /*
2286 * Setting the first part of a previous oldext extent to newext.
2287 * The left neighbor is contiguous.
2288 */
2289 LEFT.br_blockcount += new->br_blockcount;
2290
2291 old = PREV;
2292 PREV.br_startoff += new->br_blockcount;
2293 PREV.br_startblock += new->br_blockcount;
2294 PREV.br_blockcount -= new->br_blockcount;
2295
2296 xfs_iext_update_extent(ip, state, icur, &PREV);
2297 xfs_iext_prev(ifp, icur);
2298 xfs_iext_update_extent(ip, state, icur, &LEFT);
2299
2300 if (cur == NULL)
2301 rval = XFS_ILOG_DEXT;
2302 else {
2303 rval = 0;
2304 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2305 if (error)
2306 goto done;
2307 if (XFS_IS_CORRUPT(mp, i != 1)) {
2308 xfs_btree_mark_sick(cur);
2309 error = -EFSCORRUPTED;
2310 goto done;
2311 }
2312 error = xfs_bmbt_update(cur, &PREV);
2313 if (error)
2314 goto done;
2315 error = xfs_btree_decrement(cur, 0, &i);
2316 if (error)
2317 goto done;
2318 error = xfs_bmbt_update(cur, &LEFT);
2319 if (error)
2320 goto done;
2321 }
2322 break;
2323
2324 case BMAP_LEFT_FILLING:
2325 /*
2326 * Setting the first part of a previous oldext extent to newext.
2327 * The left neighbor is not contiguous.
2328 */
2329 old = PREV;
2330 PREV.br_startoff += new->br_blockcount;
2331 PREV.br_startblock += new->br_blockcount;
2332 PREV.br_blockcount -= new->br_blockcount;
2333
2334 xfs_iext_update_extent(ip, state, icur, &PREV);
2335 xfs_iext_insert(ip, icur, new, state);
2336 ifp->if_nextents++;
2337
2338 if (cur == NULL)
2339 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2340 else {
2341 rval = XFS_ILOG_CORE;
2342 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2343 if (error)
2344 goto done;
2345 if (XFS_IS_CORRUPT(mp, i != 1)) {
2346 xfs_btree_mark_sick(cur);
2347 error = -EFSCORRUPTED;
2348 goto done;
2349 }
2350 error = xfs_bmbt_update(cur, &PREV);
2351 if (error)
2352 goto done;
2353 cur->bc_rec.b = *new;
2354 if ((error = xfs_btree_insert(cur, &i)))
2355 goto done;
2356 if (XFS_IS_CORRUPT(mp, i != 1)) {
2357 xfs_btree_mark_sick(cur);
2358 error = -EFSCORRUPTED;
2359 goto done;
2360 }
2361 }
2362 break;
2363
2364 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2365 /*
2366 * Setting the last part of a previous oldext extent to newext.
2367 * The right neighbor is contiguous with the new allocation.
2368 */
2369 old = PREV;
2370 PREV.br_blockcount -= new->br_blockcount;
2371
2372 RIGHT.br_startoff = new->br_startoff;
2373 RIGHT.br_startblock = new->br_startblock;
2374 RIGHT.br_blockcount += new->br_blockcount;
2375
2376 xfs_iext_update_extent(ip, state, icur, &PREV);
2377 xfs_iext_next(ifp, icur);
2378 xfs_iext_update_extent(ip, state, icur, &RIGHT);
2379
2380 if (cur == NULL)
2381 rval = XFS_ILOG_DEXT;
2382 else {
2383 rval = 0;
2384 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2385 if (error)
2386 goto done;
2387 if (XFS_IS_CORRUPT(mp, i != 1)) {
2388 xfs_btree_mark_sick(cur);
2389 error = -EFSCORRUPTED;
2390 goto done;
2391 }
2392 error = xfs_bmbt_update(cur, &PREV);
2393 if (error)
2394 goto done;
2395 error = xfs_btree_increment(cur, 0, &i);
2396 if (error)
2397 goto done;
2398 error = xfs_bmbt_update(cur, &RIGHT);
2399 if (error)
2400 goto done;
2401 }
2402 break;
2403
2404 case BMAP_RIGHT_FILLING:
2405 /*
2406 * Setting the last part of a previous oldext extent to newext.
2407 * The right neighbor is not contiguous.
2408 */
2409 old = PREV;
2410 PREV.br_blockcount -= new->br_blockcount;
2411
2412 xfs_iext_update_extent(ip, state, icur, &PREV);
2413 xfs_iext_next(ifp, icur);
2414 xfs_iext_insert(ip, icur, new, state);
2415 ifp->if_nextents++;
2416
2417 if (cur == NULL)
2418 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2419 else {
2420 rval = XFS_ILOG_CORE;
2421 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2422 if (error)
2423 goto done;
2424 if (XFS_IS_CORRUPT(mp, i != 1)) {
2425 xfs_btree_mark_sick(cur);
2426 error = -EFSCORRUPTED;
2427 goto done;
2428 }
2429 error = xfs_bmbt_update(cur, &PREV);
2430 if (error)
2431 goto done;
2432 error = xfs_bmbt_lookup_eq(cur, new, &i);
2433 if (error)
2434 goto done;
2435 if (XFS_IS_CORRUPT(mp, i != 0)) {
2436 xfs_btree_mark_sick(cur);
2437 error = -EFSCORRUPTED;
2438 goto done;
2439 }
2440 if ((error = xfs_btree_insert(cur, &i)))
2441 goto done;
2442 if (XFS_IS_CORRUPT(mp, i != 1)) {
2443 xfs_btree_mark_sick(cur);
2444 error = -EFSCORRUPTED;
2445 goto done;
2446 }
2447 }
2448 break;
2449
2450 case 0:
2451 /*
2452 * Setting the middle part of a previous oldext extent to
2453 * newext. Contiguity is impossible here.
2454 * One extent becomes three extents.
2455 */
2456 old = PREV;
2457 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2458
2459 r[0] = *new;
2460 r[1].br_startoff = new_endoff;
2461 r[1].br_blockcount =
2462 old.br_startoff + old.br_blockcount - new_endoff;
2463 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2464 r[1].br_state = PREV.br_state;
2465
2466 xfs_iext_update_extent(ip, state, icur, &PREV);
2467 xfs_iext_next(ifp, icur);
2468 xfs_iext_insert(ip, icur, &r[1], state);
2469 xfs_iext_insert(ip, icur, &r[0], state);
2470 ifp->if_nextents += 2;
2471
2472 if (cur == NULL)
2473 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2474 else {
2475 rval = XFS_ILOG_CORE;
2476 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2477 if (error)
2478 goto done;
2479 if (XFS_IS_CORRUPT(mp, i != 1)) {
2480 xfs_btree_mark_sick(cur);
2481 error = -EFSCORRUPTED;
2482 goto done;
2483 }
2484 /* new right extent - oldext */
2485 error = xfs_bmbt_update(cur, &r[1]);
2486 if (error)
2487 goto done;
2488 /* new left extent - oldext */
2489 cur->bc_rec.b = PREV;
2490 if ((error = xfs_btree_insert(cur, &i)))
2491 goto done;
2492 if (XFS_IS_CORRUPT(mp, i != 1)) {
2493 xfs_btree_mark_sick(cur);
2494 error = -EFSCORRUPTED;
2495 goto done;
2496 }
2497 /*
2498 * Reset the cursor to the position of the new extent
2499 * we are about to insert as we can't trust it after
2500 * the previous insert.
2501 */
2502 error = xfs_bmbt_lookup_eq(cur, new, &i);
2503 if (error)
2504 goto done;
2505 if (XFS_IS_CORRUPT(mp, i != 0)) {
2506 xfs_btree_mark_sick(cur);
2507 error = -EFSCORRUPTED;
2508 goto done;
2509 }
2510 /* new middle extent - newext */
2511 if ((error = xfs_btree_insert(cur, &i)))
2512 goto done;
2513 if (XFS_IS_CORRUPT(mp, i != 1)) {
2514 xfs_btree_mark_sick(cur);
2515 error = -EFSCORRUPTED;
2516 goto done;
2517 }
2518 }
2519 break;
2520
2521 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2522 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2523 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2524 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2525 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2526 case BMAP_LEFT_CONTIG:
2527 case BMAP_RIGHT_CONTIG:
2528 /*
2529 * These cases are all impossible.
2530 */
2531 ASSERT(0);
2532 }
2533
2534 /* update reverse mappings */
2535 xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
2536
2537 /* convert to a btree if necessary */
2538 if (xfs_bmap_needs_btree(ip, whichfork)) {
2539 int tmp_logflags; /* partial log flag return val */
2540
2541 ASSERT(cur == NULL);
2542 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2543 &tmp_logflags, whichfork);
2544 *logflagsp |= tmp_logflags;
2545 if (error)
2546 goto done;
2547 }
2548
2549 /* clear out the allocated field, done with it now in any case. */
2550 if (cur) {
2551 cur->bc_bmap.allocated = 0;
2552 *curp = cur;
2553 }
2554
2555 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2556 done:
2557 *logflagsp |= rval;
2558 return error;
2559 #undef LEFT
2560 #undef RIGHT
2561 #undef PREV
2562 }
2563
2564 /*
2565 * Convert a hole to a real allocation.
2566 */
2567 STATIC int /* error */
xfs_bmap_add_extent_hole_real(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_btree_cur ** curp,struct xfs_bmbt_irec * new,int * logflagsp,uint32_t flags)2568 xfs_bmap_add_extent_hole_real(
2569 struct xfs_trans *tp,
2570 struct xfs_inode *ip,
2571 int whichfork,
2572 struct xfs_iext_cursor *icur,
2573 struct xfs_btree_cur **curp,
2574 struct xfs_bmbt_irec *new,
2575 int *logflagsp,
2576 uint32_t flags)
2577 {
2578 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
2579 struct xfs_mount *mp = ip->i_mount;
2580 struct xfs_btree_cur *cur = *curp;
2581 int error; /* error return value */
2582 int i; /* temp state */
2583 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2584 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2585 int rval=0; /* return value (logging flags) */
2586 uint32_t state = xfs_bmap_fork_to_state(whichfork);
2587 struct xfs_bmbt_irec old;
2588
2589 ASSERT(!isnullstartblock(new->br_startblock));
2590 ASSERT(!cur || !(cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
2591
2592 XFS_STATS_INC(mp, xs_add_exlist);
2593
2594 /*
2595 * Check and set flags if this segment has a left neighbor.
2596 */
2597 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2598 state |= BMAP_LEFT_VALID;
2599 if (isnullstartblock(left.br_startblock))
2600 state |= BMAP_LEFT_DELAY;
2601 }
2602
2603 /*
2604 * Check and set flags if this segment has a current value.
2605 * Not true if we're inserting into the "hole" at eof.
2606 */
2607 if (xfs_iext_get_extent(ifp, icur, &right)) {
2608 state |= BMAP_RIGHT_VALID;
2609 if (isnullstartblock(right.br_startblock))
2610 state |= BMAP_RIGHT_DELAY;
2611 }
2612
2613 /*
2614 * We're inserting a real allocation between "left" and "right".
2615 * Set the contiguity flags. Don't let extents get too large.
2616 */
2617 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2618 left.br_startoff + left.br_blockcount == new->br_startoff &&
2619 left.br_startblock + left.br_blockcount == new->br_startblock &&
2620 left.br_state == new->br_state &&
2621 left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2622 xfs_bmap_same_rtgroup(ip, whichfork, &left, new))
2623 state |= BMAP_LEFT_CONTIG;
2624
2625 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2626 new->br_startoff + new->br_blockcount == right.br_startoff &&
2627 new->br_startblock + new->br_blockcount == right.br_startblock &&
2628 new->br_state == right.br_state &&
2629 new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2630 (!(state & BMAP_LEFT_CONTIG) ||
2631 left.br_blockcount + new->br_blockcount +
2632 right.br_blockcount <= XFS_MAX_BMBT_EXTLEN) &&
2633 xfs_bmap_same_rtgroup(ip, whichfork, new, &right))
2634 state |= BMAP_RIGHT_CONTIG;
2635
2636 error = 0;
2637 /*
2638 * Select which case we're in here, and implement it.
2639 */
2640 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2641 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2642 /*
2643 * New allocation is contiguous with real allocations on the
2644 * left and on the right.
2645 * Merge all three into a single extent record.
2646 */
2647 left.br_blockcount += new->br_blockcount + right.br_blockcount;
2648
2649 xfs_iext_remove(ip, icur, state);
2650 xfs_iext_prev(ifp, icur);
2651 xfs_iext_update_extent(ip, state, icur, &left);
2652 ifp->if_nextents--;
2653
2654 if (cur == NULL) {
2655 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2656 } else {
2657 rval = XFS_ILOG_CORE;
2658 error = xfs_bmbt_lookup_eq(cur, &right, &i);
2659 if (error)
2660 goto done;
2661 if (XFS_IS_CORRUPT(mp, i != 1)) {
2662 xfs_btree_mark_sick(cur);
2663 error = -EFSCORRUPTED;
2664 goto done;
2665 }
2666 error = xfs_btree_delete(cur, &i);
2667 if (error)
2668 goto done;
2669 if (XFS_IS_CORRUPT(mp, i != 1)) {
2670 xfs_btree_mark_sick(cur);
2671 error = -EFSCORRUPTED;
2672 goto done;
2673 }
2674 error = xfs_btree_decrement(cur, 0, &i);
2675 if (error)
2676 goto done;
2677 if (XFS_IS_CORRUPT(mp, i != 1)) {
2678 xfs_btree_mark_sick(cur);
2679 error = -EFSCORRUPTED;
2680 goto done;
2681 }
2682 error = xfs_bmbt_update(cur, &left);
2683 if (error)
2684 goto done;
2685 }
2686 break;
2687
2688 case BMAP_LEFT_CONTIG:
2689 /*
2690 * New allocation is contiguous with a real allocation
2691 * on the left.
2692 * Merge the new allocation with the left neighbor.
2693 */
2694 old = left;
2695 left.br_blockcount += new->br_blockcount;
2696
2697 xfs_iext_prev(ifp, icur);
2698 xfs_iext_update_extent(ip, state, icur, &left);
2699
2700 if (cur == NULL) {
2701 rval = xfs_ilog_fext(whichfork);
2702 } else {
2703 rval = 0;
2704 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2705 if (error)
2706 goto done;
2707 if (XFS_IS_CORRUPT(mp, i != 1)) {
2708 xfs_btree_mark_sick(cur);
2709 error = -EFSCORRUPTED;
2710 goto done;
2711 }
2712 error = xfs_bmbt_update(cur, &left);
2713 if (error)
2714 goto done;
2715 }
2716 break;
2717
2718 case BMAP_RIGHT_CONTIG:
2719 /*
2720 * New allocation is contiguous with a real allocation
2721 * on the right.
2722 * Merge the new allocation with the right neighbor.
2723 */
2724 old = right;
2725
2726 right.br_startoff = new->br_startoff;
2727 right.br_startblock = new->br_startblock;
2728 right.br_blockcount += new->br_blockcount;
2729 xfs_iext_update_extent(ip, state, icur, &right);
2730
2731 if (cur == NULL) {
2732 rval = xfs_ilog_fext(whichfork);
2733 } else {
2734 rval = 0;
2735 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2736 if (error)
2737 goto done;
2738 if (XFS_IS_CORRUPT(mp, i != 1)) {
2739 xfs_btree_mark_sick(cur);
2740 error = -EFSCORRUPTED;
2741 goto done;
2742 }
2743 error = xfs_bmbt_update(cur, &right);
2744 if (error)
2745 goto done;
2746 }
2747 break;
2748
2749 case 0:
2750 /*
2751 * New allocation is not contiguous with another
2752 * real allocation.
2753 * Insert a new entry.
2754 */
2755 xfs_iext_insert(ip, icur, new, state);
2756 ifp->if_nextents++;
2757
2758 if (cur == NULL) {
2759 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2760 } else {
2761 rval = XFS_ILOG_CORE;
2762 error = xfs_bmbt_lookup_eq(cur, new, &i);
2763 if (error)
2764 goto done;
2765 if (XFS_IS_CORRUPT(mp, i != 0)) {
2766 xfs_btree_mark_sick(cur);
2767 error = -EFSCORRUPTED;
2768 goto done;
2769 }
2770 error = xfs_btree_insert(cur, &i);
2771 if (error)
2772 goto done;
2773 if (XFS_IS_CORRUPT(mp, i != 1)) {
2774 xfs_btree_mark_sick(cur);
2775 error = -EFSCORRUPTED;
2776 goto done;
2777 }
2778 }
2779 break;
2780 }
2781
2782 /* add reverse mapping unless caller opted out */
2783 if (!(flags & XFS_BMAPI_NORMAP))
2784 xfs_rmap_map_extent(tp, ip, whichfork, new);
2785
2786 /* convert to a btree if necessary */
2787 if (xfs_bmap_needs_btree(ip, whichfork)) {
2788 int tmp_logflags; /* partial log flag return val */
2789
2790 ASSERT(cur == NULL);
2791 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2792 &tmp_logflags, whichfork);
2793 *logflagsp |= tmp_logflags;
2794 cur = *curp;
2795 if (error)
2796 goto done;
2797 }
2798
2799 /* clear out the allocated field, done with it now in any case. */
2800 if (cur)
2801 cur->bc_bmap.allocated = 0;
2802
2803 xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2804 done:
2805 *logflagsp |= rval;
2806 return error;
2807 }
2808
2809 /*
2810 * Functions used in the extent read, allocate and remove paths
2811 */
2812
2813 /*
2814 * Adjust the size of the new extent based on i_extsize and rt extsize.
2815 */
2816 int
xfs_bmap_extsize_align(xfs_mount_t * mp,xfs_bmbt_irec_t * gotp,xfs_bmbt_irec_t * prevp,xfs_extlen_t extsz,int rt,int eof,int delay,int convert,xfs_fileoff_t * offp,xfs_extlen_t * lenp)2817 xfs_bmap_extsize_align(
2818 xfs_mount_t *mp,
2819 xfs_bmbt_irec_t *gotp, /* next extent pointer */
2820 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
2821 xfs_extlen_t extsz, /* align to this extent size */
2822 int rt, /* is this a realtime inode? */
2823 int eof, /* is extent at end-of-file? */
2824 int delay, /* creating delalloc extent? */
2825 int convert, /* overwriting unwritten extent? */
2826 xfs_fileoff_t *offp, /* in/out: aligned offset */
2827 xfs_extlen_t *lenp) /* in/out: aligned length */
2828 {
2829 xfs_fileoff_t orig_off; /* original offset */
2830 xfs_extlen_t orig_alen; /* original length */
2831 xfs_fileoff_t orig_end; /* original off+len */
2832 xfs_fileoff_t nexto; /* next file offset */
2833 xfs_fileoff_t prevo; /* previous file offset */
2834 xfs_fileoff_t align_off; /* temp for offset */
2835 xfs_extlen_t align_alen; /* temp for length */
2836 xfs_extlen_t temp; /* temp for calculations */
2837
2838 if (convert)
2839 return 0;
2840
2841 orig_off = align_off = *offp;
2842 orig_alen = align_alen = *lenp;
2843 orig_end = orig_off + orig_alen;
2844
2845 /*
2846 * If this request overlaps an existing extent, then don't
2847 * attempt to perform any additional alignment.
2848 */
2849 if (!delay && !eof &&
2850 (orig_off >= gotp->br_startoff) &&
2851 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2852 return 0;
2853 }
2854
2855 /*
2856 * If the file offset is unaligned vs. the extent size
2857 * we need to align it. This will be possible unless
2858 * the file was previously written with a kernel that didn't
2859 * perform this alignment, or if a truncate shot us in the
2860 * foot.
2861 */
2862 div_u64_rem(orig_off, extsz, &temp);
2863 if (temp) {
2864 align_alen += temp;
2865 align_off -= temp;
2866 }
2867
2868 /* Same adjustment for the end of the requested area. */
2869 temp = (align_alen % extsz);
2870 if (temp)
2871 align_alen += extsz - temp;
2872
2873 /*
2874 * For large extent hint sizes, the aligned extent might be larger than
2875 * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so
2876 * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer
2877 * allocation loops handle short allocation just fine, so it is safe to
2878 * do this. We only want to do it when we are forced to, though, because
2879 * it means more allocation operations are required.
2880 */
2881 while (align_alen > XFS_MAX_BMBT_EXTLEN)
2882 align_alen -= extsz;
2883 ASSERT(align_alen <= XFS_MAX_BMBT_EXTLEN);
2884
2885 /*
2886 * If the previous block overlaps with this proposed allocation
2887 * then move the start forward without adjusting the length.
2888 */
2889 if (prevp->br_startoff != NULLFILEOFF) {
2890 if (prevp->br_startblock == HOLESTARTBLOCK)
2891 prevo = prevp->br_startoff;
2892 else
2893 prevo = prevp->br_startoff + prevp->br_blockcount;
2894 } else
2895 prevo = 0;
2896 if (align_off != orig_off && align_off < prevo)
2897 align_off = prevo;
2898 /*
2899 * If the next block overlaps with this proposed allocation
2900 * then move the start back without adjusting the length,
2901 * but not before offset 0.
2902 * This may of course make the start overlap previous block,
2903 * and if we hit the offset 0 limit then the next block
2904 * can still overlap too.
2905 */
2906 if (!eof && gotp->br_startoff != NULLFILEOFF) {
2907 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2908 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2909 nexto = gotp->br_startoff + gotp->br_blockcount;
2910 else
2911 nexto = gotp->br_startoff;
2912 } else
2913 nexto = NULLFILEOFF;
2914 if (!eof &&
2915 align_off + align_alen != orig_end &&
2916 align_off + align_alen > nexto)
2917 align_off = nexto > align_alen ? nexto - align_alen : 0;
2918 /*
2919 * If we're now overlapping the next or previous extent that
2920 * means we can't fit an extsz piece in this hole. Just move
2921 * the start forward to the first valid spot and set
2922 * the length so we hit the end.
2923 */
2924 if (align_off != orig_off && align_off < prevo)
2925 align_off = prevo;
2926 if (align_off + align_alen != orig_end &&
2927 align_off + align_alen > nexto &&
2928 nexto != NULLFILEOFF) {
2929 ASSERT(nexto > prevo);
2930 align_alen = nexto - align_off;
2931 }
2932
2933 /*
2934 * If realtime, and the result isn't a multiple of the realtime
2935 * extent size we need to remove blocks until it is.
2936 */
2937 if (rt && (temp = xfs_extlen_to_rtxmod(mp, align_alen))) {
2938 /*
2939 * We're not covering the original request, or
2940 * we won't be able to once we fix the length.
2941 */
2942 if (orig_off < align_off ||
2943 orig_end > align_off + align_alen ||
2944 align_alen - temp < orig_alen)
2945 return -EINVAL;
2946 /*
2947 * Try to fix it by moving the start up.
2948 */
2949 if (align_off + temp <= orig_off) {
2950 align_alen -= temp;
2951 align_off += temp;
2952 }
2953 /*
2954 * Try to fix it by moving the end in.
2955 */
2956 else if (align_off + align_alen - temp >= orig_end)
2957 align_alen -= temp;
2958 /*
2959 * Set the start to the minimum then trim the length.
2960 */
2961 else {
2962 align_alen -= orig_off - align_off;
2963 align_off = orig_off;
2964 align_alen -= xfs_extlen_to_rtxmod(mp, align_alen);
2965 }
2966 /*
2967 * Result doesn't cover the request, fail it.
2968 */
2969 if (orig_off < align_off || orig_end > align_off + align_alen)
2970 return -EINVAL;
2971 } else {
2972 ASSERT(orig_off >= align_off);
2973 /* see XFS_BMBT_MAX_EXTLEN handling above */
2974 ASSERT(orig_end <= align_off + align_alen ||
2975 align_alen + extsz > XFS_MAX_BMBT_EXTLEN);
2976 }
2977
2978 #ifdef DEBUG
2979 if (!eof && gotp->br_startoff != NULLFILEOFF)
2980 ASSERT(align_off + align_alen <= gotp->br_startoff);
2981 if (prevp->br_startoff != NULLFILEOFF)
2982 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
2983 #endif
2984
2985 *lenp = align_alen;
2986 *offp = align_off;
2987 return 0;
2988 }
2989
2990 static inline bool
xfs_bmap_adjacent_valid(struct xfs_bmalloca * ap,xfs_fsblock_t x,xfs_fsblock_t y)2991 xfs_bmap_adjacent_valid(
2992 struct xfs_bmalloca *ap,
2993 xfs_fsblock_t x,
2994 xfs_fsblock_t y)
2995 {
2996 struct xfs_mount *mp = ap->ip->i_mount;
2997
2998 if (XFS_IS_REALTIME_INODE(ap->ip) &&
2999 (ap->datatype & XFS_ALLOC_USERDATA)) {
3000 if (!xfs_has_rtgroups(mp))
3001 return x < mp->m_sb.sb_rblocks;
3002
3003 return xfs_rtb_to_rgno(mp, x) == xfs_rtb_to_rgno(mp, y) &&
3004 xfs_rtb_to_rgno(mp, x) < mp->m_sb.sb_rgcount &&
3005 xfs_rtb_to_rtx(mp, x) < mp->m_sb.sb_rgextents;
3006
3007 }
3008
3009 return XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) &&
3010 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount &&
3011 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks;
3012 }
3013
3014 #define XFS_ALLOC_GAP_UNITS 4
3015
3016 /* returns true if ap->blkno was modified */
3017 bool
xfs_bmap_adjacent(struct xfs_bmalloca * ap)3018 xfs_bmap_adjacent(
3019 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3020 {
3021 xfs_fsblock_t adjust; /* adjustment to block numbers */
3022
3023 /*
3024 * If allocating at eof, and there's a previous real block,
3025 * try to use its last block as our starting point.
3026 */
3027 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3028 !isnullstartblock(ap->prev.br_startblock) &&
3029 xfs_bmap_adjacent_valid(ap,
3030 ap->prev.br_startblock + ap->prev.br_blockcount,
3031 ap->prev.br_startblock)) {
3032 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3033 /*
3034 * Adjust for the gap between prevp and us.
3035 */
3036 adjust = ap->offset -
3037 (ap->prev.br_startoff + ap->prev.br_blockcount);
3038 if (adjust && xfs_bmap_adjacent_valid(ap, ap->blkno + adjust,
3039 ap->prev.br_startblock))
3040 ap->blkno += adjust;
3041 return true;
3042 }
3043 /*
3044 * If not at eof, then compare the two neighbor blocks.
3045 * Figure out whether either one gives us a good starting point,
3046 * and pick the better one.
3047 */
3048 if (!ap->eof) {
3049 xfs_fsblock_t gotbno; /* right side block number */
3050 xfs_fsblock_t gotdiff=0; /* right side difference */
3051 xfs_fsblock_t prevbno; /* left side block number */
3052 xfs_fsblock_t prevdiff=0; /* left side difference */
3053
3054 /*
3055 * If there's a previous (left) block, select a requested
3056 * start block based on it.
3057 */
3058 if (ap->prev.br_startoff != NULLFILEOFF &&
3059 !isnullstartblock(ap->prev.br_startblock) &&
3060 (prevbno = ap->prev.br_startblock +
3061 ap->prev.br_blockcount) &&
3062 xfs_bmap_adjacent_valid(ap, prevbno,
3063 ap->prev.br_startblock)) {
3064 /*
3065 * Calculate gap to end of previous block.
3066 */
3067 adjust = prevdiff = ap->offset -
3068 (ap->prev.br_startoff +
3069 ap->prev.br_blockcount);
3070 /*
3071 * Figure the startblock based on the previous block's
3072 * end and the gap size.
3073 * Heuristic!
3074 * If the gap is large relative to the piece we're
3075 * allocating, or using it gives us an invalid block
3076 * number, then just use the end of the previous block.
3077 */
3078 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3079 xfs_bmap_adjacent_valid(ap, prevbno + prevdiff,
3080 ap->prev.br_startblock))
3081 prevbno += adjust;
3082 else
3083 prevdiff += adjust;
3084 }
3085 /*
3086 * No previous block or can't follow it, just default.
3087 */
3088 else
3089 prevbno = NULLFSBLOCK;
3090 /*
3091 * If there's a following (right) block, select a requested
3092 * start block based on it.
3093 */
3094 if (!isnullstartblock(ap->got.br_startblock)) {
3095 /*
3096 * Calculate gap to start of next block.
3097 */
3098 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3099 /*
3100 * Figure the startblock based on the next block's
3101 * start and the gap size.
3102 */
3103 gotbno = ap->got.br_startblock;
3104 /*
3105 * Heuristic!
3106 * If the gap is large relative to the piece we're
3107 * allocating, or using it gives us an invalid block
3108 * number, then just use the start of the next block
3109 * offset by our length.
3110 */
3111 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3112 xfs_bmap_adjacent_valid(ap, gotbno - gotdiff,
3113 gotbno))
3114 gotbno -= adjust;
3115 else if (xfs_bmap_adjacent_valid(ap, gotbno - ap->length,
3116 gotbno)) {
3117 gotbno -= ap->length;
3118 gotdiff += adjust - ap->length;
3119 } else
3120 gotdiff += adjust;
3121 }
3122 /*
3123 * No next block, just default.
3124 */
3125 else
3126 gotbno = NULLFSBLOCK;
3127 /*
3128 * If both valid, pick the better one, else the only good
3129 * one, else ap->blkno is already set (to 0 or the inode block).
3130 */
3131 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) {
3132 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3133 return true;
3134 }
3135 if (prevbno != NULLFSBLOCK) {
3136 ap->blkno = prevbno;
3137 return true;
3138 }
3139 if (gotbno != NULLFSBLOCK) {
3140 ap->blkno = gotbno;
3141 return true;
3142 }
3143 }
3144
3145 return false;
3146 }
3147
3148 int
xfs_bmap_longest_free_extent(struct xfs_perag * pag,struct xfs_trans * tp,xfs_extlen_t * blen)3149 xfs_bmap_longest_free_extent(
3150 struct xfs_perag *pag,
3151 struct xfs_trans *tp,
3152 xfs_extlen_t *blen)
3153 {
3154 xfs_extlen_t longest;
3155 int error = 0;
3156
3157 if (!xfs_perag_initialised_agf(pag)) {
3158 error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_TRYLOCK,
3159 NULL);
3160 if (error)
3161 return error;
3162 }
3163
3164 longest = xfs_alloc_longest_free_extent(pag,
3165 xfs_alloc_min_freelist(pag_mount(pag), pag),
3166 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3167 if (*blen < longest)
3168 *blen = longest;
3169
3170 return 0;
3171 }
3172
3173 static xfs_extlen_t
xfs_bmap_select_minlen(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t blen)3174 xfs_bmap_select_minlen(
3175 struct xfs_bmalloca *ap,
3176 struct xfs_alloc_arg *args,
3177 xfs_extlen_t blen)
3178 {
3179
3180 /*
3181 * Since we used XFS_ALLOC_FLAG_TRYLOCK in _longest_free_extent(), it is
3182 * possible that there is enough contiguous free space for this request.
3183 */
3184 if (blen < ap->minlen)
3185 return ap->minlen;
3186
3187 /*
3188 * If the best seen length is less than the request length,
3189 * use the best as the minimum, otherwise we've got the maxlen we
3190 * were asked for.
3191 */
3192 if (blen < args->maxlen)
3193 return blen;
3194 return args->maxlen;
3195 }
3196
3197 static int
xfs_bmap_btalloc_select_lengths(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t * blen)3198 xfs_bmap_btalloc_select_lengths(
3199 struct xfs_bmalloca *ap,
3200 struct xfs_alloc_arg *args,
3201 xfs_extlen_t *blen)
3202 {
3203 struct xfs_mount *mp = args->mp;
3204 struct xfs_perag *pag;
3205 xfs_agnumber_t agno, startag;
3206 int error = 0;
3207
3208 if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3209 args->total = ap->minlen;
3210 args->minlen = ap->minlen;
3211 return 0;
3212 }
3213
3214 args->total = ap->total;
3215 startag = XFS_FSB_TO_AGNO(mp, ap->blkno);
3216 if (startag == NULLAGNUMBER)
3217 startag = 0;
3218
3219 *blen = 0;
3220 for_each_perag_wrap(mp, startag, agno, pag) {
3221 error = xfs_bmap_longest_free_extent(pag, args->tp, blen);
3222 if (error && error != -EAGAIN)
3223 break;
3224 error = 0;
3225 if (*blen >= args->maxlen)
3226 break;
3227 }
3228 if (pag)
3229 xfs_perag_rele(pag);
3230
3231 args->minlen = xfs_bmap_select_minlen(ap, args, *blen);
3232 return error;
3233 }
3234
3235 /* Update all inode and quota accounting for the allocation we just did. */
3236 void
xfs_bmap_alloc_account(struct xfs_bmalloca * ap)3237 xfs_bmap_alloc_account(
3238 struct xfs_bmalloca *ap)
3239 {
3240 bool isrt = XFS_IS_REALTIME_INODE(ap->ip) &&
3241 !(ap->flags & XFS_BMAPI_ATTRFORK);
3242 uint fld;
3243
3244 if (ap->flags & XFS_BMAPI_COWFORK) {
3245 /*
3246 * COW fork blocks are in-core only and thus are treated as
3247 * in-core quota reservation (like delalloc blocks) even when
3248 * converted to real blocks. The quota reservation is not
3249 * accounted to disk until blocks are remapped to the data
3250 * fork. So if these blocks were previously delalloc, we
3251 * already have quota reservation and there's nothing to do
3252 * yet.
3253 */
3254 if (ap->wasdel) {
3255 xfs_mod_delalloc(ap->ip, -(int64_t)ap->length, 0);
3256 return;
3257 }
3258
3259 /*
3260 * Otherwise, we've allocated blocks in a hole. The transaction
3261 * has acquired in-core quota reservation for this extent.
3262 * Rather than account these as real blocks, however, we reduce
3263 * the transaction quota reservation based on the allocation.
3264 * This essentially transfers the transaction quota reservation
3265 * to that of a delalloc extent.
3266 */
3267 ap->ip->i_delayed_blks += ap->length;
3268 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, isrt ?
3269 XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS,
3270 -(long)ap->length);
3271 return;
3272 }
3273
3274 /* data/attr fork only */
3275 ap->ip->i_nblocks += ap->length;
3276 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3277 if (ap->wasdel) {
3278 ap->ip->i_delayed_blks -= ap->length;
3279 xfs_mod_delalloc(ap->ip, -(int64_t)ap->length, 0);
3280 fld = isrt ? XFS_TRANS_DQ_DELRTBCOUNT : XFS_TRANS_DQ_DELBCOUNT;
3281 } else {
3282 fld = isrt ? XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
3283 }
3284
3285 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, fld, ap->length);
3286 }
3287
3288 static int
xfs_bmap_compute_alignments(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args)3289 xfs_bmap_compute_alignments(
3290 struct xfs_bmalloca *ap,
3291 struct xfs_alloc_arg *args)
3292 {
3293 struct xfs_mount *mp = args->mp;
3294 xfs_extlen_t align = 0; /* minimum allocation alignment */
3295 int stripe_align = 0;
3296
3297 /* stripe alignment for allocation is determined by mount parameters */
3298 if (mp->m_swidth && xfs_has_swalloc(mp))
3299 stripe_align = mp->m_swidth;
3300 else if (mp->m_dalign)
3301 stripe_align = mp->m_dalign;
3302
3303 if (ap->flags & XFS_BMAPI_COWFORK)
3304 align = xfs_get_cowextsz_hint(ap->ip);
3305 else if (ap->datatype & XFS_ALLOC_USERDATA)
3306 align = xfs_get_extsz_hint(ap->ip);
3307
3308 /* Try to align start block to any minimum allocation alignment */
3309 if (align > 1 && (ap->flags & XFS_BMAPI_EXTSZALIGN))
3310 args->alignment = align;
3311
3312 if (align) {
3313 if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0,
3314 ap->eof, 0, ap->conv, &ap->offset,
3315 &ap->length))
3316 ASSERT(0);
3317 ASSERT(ap->length);
3318 }
3319
3320 /* apply extent size hints if obtained earlier */
3321 if (align) {
3322 args->prod = align;
3323 div_u64_rem(ap->offset, args->prod, &args->mod);
3324 if (args->mod)
3325 args->mod = args->prod - args->mod;
3326 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3327 args->prod = 1;
3328 args->mod = 0;
3329 } else {
3330 args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3331 div_u64_rem(ap->offset, args->prod, &args->mod);
3332 if (args->mod)
3333 args->mod = args->prod - args->mod;
3334 }
3335
3336 return stripe_align;
3337 }
3338
3339 static void
xfs_bmap_process_allocated_extent(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_fileoff_t orig_offset,xfs_extlen_t orig_length)3340 xfs_bmap_process_allocated_extent(
3341 struct xfs_bmalloca *ap,
3342 struct xfs_alloc_arg *args,
3343 xfs_fileoff_t orig_offset,
3344 xfs_extlen_t orig_length)
3345 {
3346 ap->blkno = args->fsbno;
3347 ap->length = args->len;
3348 /*
3349 * If the extent size hint is active, we tried to round the
3350 * caller's allocation request offset down to extsz and the
3351 * length up to another extsz boundary. If we found a free
3352 * extent we mapped it in starting at this new offset. If the
3353 * newly mapped space isn't long enough to cover any of the
3354 * range of offsets that was originally requested, move the
3355 * mapping up so that we can fill as much of the caller's
3356 * original request as possible. Free space is apparently
3357 * very fragmented so we're unlikely to be able to satisfy the
3358 * hints anyway.
3359 */
3360 if (ap->length <= orig_length)
3361 ap->offset = orig_offset;
3362 else if (ap->offset + ap->length < orig_offset + orig_length)
3363 ap->offset = orig_offset + orig_length - ap->length;
3364 xfs_bmap_alloc_account(ap);
3365 }
3366
3367 static int
xfs_bmap_exact_minlen_extent_alloc(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args)3368 xfs_bmap_exact_minlen_extent_alloc(
3369 struct xfs_bmalloca *ap,
3370 struct xfs_alloc_arg *args)
3371 {
3372 if (ap->minlen != 1) {
3373 args->fsbno = NULLFSBLOCK;
3374 return 0;
3375 }
3376
3377 args->alloc_minlen_only = 1;
3378 args->minlen = args->maxlen = ap->minlen;
3379 args->total = ap->total;
3380
3381 /*
3382 * Unlike the longest extent available in an AG, we don't track
3383 * the length of an AG's shortest extent.
3384 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and
3385 * hence we can afford to start traversing from the 0th AG since
3386 * we need not be concerned about a drop in performance in
3387 * "debug only" code paths.
3388 */
3389 ap->blkno = XFS_AGB_TO_FSB(ap->ip->i_mount, 0, 0);
3390
3391 /*
3392 * Call xfs_bmap_btalloc_low_space here as it first does a "normal" AG
3393 * iteration and then drops args->total to args->minlen, which might be
3394 * required to find an allocation for the transaction reservation when
3395 * the file system is very full.
3396 */
3397 return xfs_bmap_btalloc_low_space(ap, args);
3398 }
3399
3400 /*
3401 * If we are not low on available data blocks and we are allocating at
3402 * EOF, optimise allocation for contiguous file extension and/or stripe
3403 * alignment of the new extent.
3404 *
3405 * NOTE: ap->aeof is only set if the allocation length is >= the
3406 * stripe unit and the allocation offset is at the end of file.
3407 */
3408 static int
xfs_bmap_btalloc_at_eof(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t blen,int stripe_align,bool ag_only)3409 xfs_bmap_btalloc_at_eof(
3410 struct xfs_bmalloca *ap,
3411 struct xfs_alloc_arg *args,
3412 xfs_extlen_t blen,
3413 int stripe_align,
3414 bool ag_only)
3415 {
3416 struct xfs_mount *mp = args->mp;
3417 struct xfs_perag *caller_pag = args->pag;
3418 int error;
3419
3420 /*
3421 * If there are already extents in the file, and xfs_bmap_adjacent() has
3422 * given a better blkno, try an exact EOF block allocation to extend the
3423 * file as a contiguous extent. If that fails, or it's the first
3424 * allocation in a file, just try for a stripe aligned allocation.
3425 */
3426 if (ap->eof) {
3427 xfs_extlen_t nextminlen = 0;
3428
3429 /*
3430 * Compute the minlen+alignment for the next case. Set slop so
3431 * that the value of minlen+alignment+slop doesn't go up between
3432 * the calls.
3433 */
3434 args->alignment = 1;
3435 if (blen > stripe_align && blen <= args->maxlen)
3436 nextminlen = blen - stripe_align;
3437 else
3438 nextminlen = args->minlen;
3439 if (nextminlen + stripe_align > args->minlen + 1)
3440 args->minalignslop = nextminlen + stripe_align -
3441 args->minlen - 1;
3442 else
3443 args->minalignslop = 0;
3444
3445 if (!caller_pag)
3446 args->pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ap->blkno));
3447 error = xfs_alloc_vextent_exact_bno(args, ap->blkno);
3448 if (!caller_pag) {
3449 xfs_perag_put(args->pag);
3450 args->pag = NULL;
3451 }
3452 if (error)
3453 return error;
3454
3455 if (args->fsbno != NULLFSBLOCK)
3456 return 0;
3457 /*
3458 * Exact allocation failed. Reset to try an aligned allocation
3459 * according to the original allocation specification.
3460 */
3461 args->alignment = stripe_align;
3462 args->minlen = nextminlen;
3463 args->minalignslop = 0;
3464 } else {
3465 /*
3466 * Adjust minlen to try and preserve alignment if we
3467 * can't guarantee an aligned maxlen extent.
3468 */
3469 args->alignment = stripe_align;
3470 if (blen > args->alignment &&
3471 blen <= args->maxlen + args->alignment)
3472 args->minlen = blen - args->alignment;
3473 args->minalignslop = 0;
3474 }
3475
3476 if (ag_only) {
3477 error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3478 } else {
3479 args->pag = NULL;
3480 error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3481 ASSERT(args->pag == NULL);
3482 args->pag = caller_pag;
3483 }
3484 if (error)
3485 return error;
3486
3487 if (args->fsbno != NULLFSBLOCK)
3488 return 0;
3489
3490 /*
3491 * Allocation failed, so turn return the allocation args to their
3492 * original non-aligned state so the caller can proceed on allocation
3493 * failure as if this function was never called.
3494 */
3495 args->alignment = 1;
3496 return 0;
3497 }
3498
3499 /*
3500 * We have failed multiple allocation attempts so now are in a low space
3501 * allocation situation. Try a locality first full filesystem minimum length
3502 * allocation whilst still maintaining necessary total block reservation
3503 * requirements.
3504 *
3505 * If that fails, we are now critically low on space, so perform a last resort
3506 * allocation attempt: no reserve, no locality, blocking, minimum length, full
3507 * filesystem free space scan. We also indicate to future allocations in this
3508 * transaction that we are critically low on space so they don't waste time on
3509 * allocation modes that are unlikely to succeed.
3510 */
3511 int
xfs_bmap_btalloc_low_space(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args)3512 xfs_bmap_btalloc_low_space(
3513 struct xfs_bmalloca *ap,
3514 struct xfs_alloc_arg *args)
3515 {
3516 int error;
3517
3518 if (args->minlen > ap->minlen) {
3519 args->minlen = ap->minlen;
3520 error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3521 if (error || args->fsbno != NULLFSBLOCK)
3522 return error;
3523 }
3524
3525 /* Last ditch attempt before failure is declared. */
3526 args->total = ap->minlen;
3527 error = xfs_alloc_vextent_first_ag(args, 0);
3528 if (error)
3529 return error;
3530 ap->tp->t_flags |= XFS_TRANS_LOWMODE;
3531 return 0;
3532 }
3533
3534 static int
xfs_bmap_btalloc_filestreams(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,int stripe_align)3535 xfs_bmap_btalloc_filestreams(
3536 struct xfs_bmalloca *ap,
3537 struct xfs_alloc_arg *args,
3538 int stripe_align)
3539 {
3540 xfs_extlen_t blen = 0;
3541 int error = 0;
3542
3543
3544 error = xfs_filestream_select_ag(ap, args, &blen);
3545 if (error)
3546 return error;
3547 ASSERT(args->pag);
3548
3549 /*
3550 * If we are in low space mode, then optimal allocation will fail so
3551 * prepare for minimal allocation and jump to the low space algorithm
3552 * immediately.
3553 */
3554 if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3555 args->minlen = ap->minlen;
3556 ASSERT(args->fsbno == NULLFSBLOCK);
3557 goto out_low_space;
3558 }
3559
3560 args->minlen = xfs_bmap_select_minlen(ap, args, blen);
3561 if (ap->aeof)
3562 error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3563 true);
3564
3565 if (!error && args->fsbno == NULLFSBLOCK)
3566 error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3567
3568 out_low_space:
3569 /*
3570 * We are now done with the perag reference for the filestreams
3571 * association provided by xfs_filestream_select_ag(). Release it now as
3572 * we've either succeeded, had a fatal error or we are out of space and
3573 * need to do a full filesystem scan for free space which will take it's
3574 * own references.
3575 */
3576 xfs_perag_rele(args->pag);
3577 args->pag = NULL;
3578 if (error || args->fsbno != NULLFSBLOCK)
3579 return error;
3580
3581 return xfs_bmap_btalloc_low_space(ap, args);
3582 }
3583
3584 static int
xfs_bmap_btalloc_best_length(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,int stripe_align)3585 xfs_bmap_btalloc_best_length(
3586 struct xfs_bmalloca *ap,
3587 struct xfs_alloc_arg *args,
3588 int stripe_align)
3589 {
3590 xfs_extlen_t blen = 0;
3591 int error;
3592
3593 ap->blkno = XFS_INO_TO_FSB(args->mp, ap->ip->i_ino);
3594 if (!xfs_bmap_adjacent(ap))
3595 ap->eof = false;
3596
3597 /*
3598 * Search for an allocation group with a single extent large enough for
3599 * the request. If one isn't found, then adjust the minimum allocation
3600 * size to the largest space found.
3601 */
3602 error = xfs_bmap_btalloc_select_lengths(ap, args, &blen);
3603 if (error)
3604 return error;
3605
3606 /*
3607 * Don't attempt optimal EOF allocation if previous allocations barely
3608 * succeeded due to being near ENOSPC. It is highly unlikely we'll get
3609 * optimal or even aligned allocations in this case, so don't waste time
3610 * trying.
3611 */
3612 if (ap->aeof && !(ap->tp->t_flags & XFS_TRANS_LOWMODE)) {
3613 error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3614 false);
3615 if (error || args->fsbno != NULLFSBLOCK)
3616 return error;
3617 }
3618
3619 error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3620 if (error || args->fsbno != NULLFSBLOCK)
3621 return error;
3622
3623 return xfs_bmap_btalloc_low_space(ap, args);
3624 }
3625
3626 static int
xfs_bmap_btalloc(struct xfs_bmalloca * ap)3627 xfs_bmap_btalloc(
3628 struct xfs_bmalloca *ap)
3629 {
3630 struct xfs_mount *mp = ap->ip->i_mount;
3631 struct xfs_alloc_arg args = {
3632 .tp = ap->tp,
3633 .mp = mp,
3634 .fsbno = NULLFSBLOCK,
3635 .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE,
3636 .minleft = ap->minleft,
3637 .wasdel = ap->wasdel,
3638 .resv = XFS_AG_RESV_NONE,
3639 .datatype = ap->datatype,
3640 .alignment = 1,
3641 .minalignslop = 0,
3642 };
3643 xfs_fileoff_t orig_offset;
3644 xfs_extlen_t orig_length;
3645 int error;
3646 int stripe_align;
3647
3648 ASSERT(ap->length);
3649 orig_offset = ap->offset;
3650 orig_length = ap->length;
3651
3652 stripe_align = xfs_bmap_compute_alignments(ap, &args);
3653
3654 /* Trim the allocation back to the maximum an AG can fit. */
3655 args.maxlen = min(ap->length, mp->m_ag_max_usable);
3656
3657 if (unlikely(XFS_TEST_ERROR(mp, XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
3658 error = xfs_bmap_exact_minlen_extent_alloc(ap, &args);
3659 else if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3660 xfs_inode_is_filestream(ap->ip))
3661 error = xfs_bmap_btalloc_filestreams(ap, &args, stripe_align);
3662 else
3663 error = xfs_bmap_btalloc_best_length(ap, &args, stripe_align);
3664 if (error)
3665 return error;
3666
3667 if (args.fsbno != NULLFSBLOCK) {
3668 xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
3669 orig_length);
3670 } else {
3671 ap->blkno = NULLFSBLOCK;
3672 ap->length = 0;
3673 }
3674 return 0;
3675 }
3676
3677 /* Trim extent to fit a logical block range. */
3678 void
xfs_trim_extent(struct xfs_bmbt_irec * irec,xfs_fileoff_t bno,xfs_filblks_t len)3679 xfs_trim_extent(
3680 struct xfs_bmbt_irec *irec,
3681 xfs_fileoff_t bno,
3682 xfs_filblks_t len)
3683 {
3684 xfs_fileoff_t distance;
3685 xfs_fileoff_t end = bno + len;
3686
3687 if (irec->br_startoff + irec->br_blockcount <= bno ||
3688 irec->br_startoff >= end) {
3689 irec->br_blockcount = 0;
3690 return;
3691 }
3692
3693 if (irec->br_startoff < bno) {
3694 distance = bno - irec->br_startoff;
3695 if (isnullstartblock(irec->br_startblock))
3696 irec->br_startblock = DELAYSTARTBLOCK;
3697 if (irec->br_startblock != DELAYSTARTBLOCK &&
3698 irec->br_startblock != HOLESTARTBLOCK)
3699 irec->br_startblock += distance;
3700 irec->br_startoff += distance;
3701 irec->br_blockcount -= distance;
3702 }
3703
3704 if (end < irec->br_startoff + irec->br_blockcount) {
3705 distance = irec->br_startoff + irec->br_blockcount - end;
3706 irec->br_blockcount -= distance;
3707 }
3708 }
3709
3710 /*
3711 * Trim the returned map to the required bounds
3712 */
3713 STATIC void
xfs_bmapi_trim_map(struct xfs_bmbt_irec * mval,struct xfs_bmbt_irec * got,xfs_fileoff_t * bno,xfs_filblks_t len,xfs_fileoff_t obno,xfs_fileoff_t end,int n,uint32_t flags)3714 xfs_bmapi_trim_map(
3715 struct xfs_bmbt_irec *mval,
3716 struct xfs_bmbt_irec *got,
3717 xfs_fileoff_t *bno,
3718 xfs_filblks_t len,
3719 xfs_fileoff_t obno,
3720 xfs_fileoff_t end,
3721 int n,
3722 uint32_t flags)
3723 {
3724 if ((flags & XFS_BMAPI_ENTIRE) ||
3725 got->br_startoff + got->br_blockcount <= obno) {
3726 *mval = *got;
3727 if (isnullstartblock(got->br_startblock))
3728 mval->br_startblock = DELAYSTARTBLOCK;
3729 return;
3730 }
3731
3732 if (obno > *bno)
3733 *bno = obno;
3734 ASSERT((*bno >= obno) || (n == 0));
3735 ASSERT(*bno < end);
3736 mval->br_startoff = *bno;
3737 if (isnullstartblock(got->br_startblock))
3738 mval->br_startblock = DELAYSTARTBLOCK;
3739 else
3740 mval->br_startblock = got->br_startblock +
3741 (*bno - got->br_startoff);
3742 /*
3743 * Return the minimum of what we got and what we asked for for
3744 * the length. We can use the len variable here because it is
3745 * modified below and we could have been there before coming
3746 * here if the first part of the allocation didn't overlap what
3747 * was asked for.
3748 */
3749 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3750 got->br_blockcount - (*bno - got->br_startoff));
3751 mval->br_state = got->br_state;
3752 ASSERT(mval->br_blockcount <= len);
3753 return;
3754 }
3755
3756 /*
3757 * Update and validate the extent map to return
3758 */
3759 STATIC void
xfs_bmapi_update_map(struct xfs_bmbt_irec ** map,xfs_fileoff_t * bno,xfs_filblks_t * len,xfs_fileoff_t obno,xfs_fileoff_t end,int * n,uint32_t flags)3760 xfs_bmapi_update_map(
3761 struct xfs_bmbt_irec **map,
3762 xfs_fileoff_t *bno,
3763 xfs_filblks_t *len,
3764 xfs_fileoff_t obno,
3765 xfs_fileoff_t end,
3766 int *n,
3767 uint32_t flags)
3768 {
3769 xfs_bmbt_irec_t *mval = *map;
3770
3771 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3772 ((mval->br_startoff + mval->br_blockcount) <= end));
3773 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3774 (mval->br_startoff < obno));
3775
3776 *bno = mval->br_startoff + mval->br_blockcount;
3777 *len = end - *bno;
3778 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3779 /* update previous map with new information */
3780 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3781 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3782 ASSERT(mval->br_state == mval[-1].br_state);
3783 mval[-1].br_blockcount = mval->br_blockcount;
3784 mval[-1].br_state = mval->br_state;
3785 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3786 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3787 mval[-1].br_startblock != HOLESTARTBLOCK &&
3788 mval->br_startblock == mval[-1].br_startblock +
3789 mval[-1].br_blockcount &&
3790 mval[-1].br_state == mval->br_state) {
3791 ASSERT(mval->br_startoff ==
3792 mval[-1].br_startoff + mval[-1].br_blockcount);
3793 mval[-1].br_blockcount += mval->br_blockcount;
3794 } else if (*n > 0 &&
3795 mval->br_startblock == DELAYSTARTBLOCK &&
3796 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3797 mval->br_startoff ==
3798 mval[-1].br_startoff + mval[-1].br_blockcount) {
3799 mval[-1].br_blockcount += mval->br_blockcount;
3800 mval[-1].br_state = mval->br_state;
3801 } else if (!((*n == 0) &&
3802 ((mval->br_startoff + mval->br_blockcount) <=
3803 obno))) {
3804 mval++;
3805 (*n)++;
3806 }
3807 *map = mval;
3808 }
3809
3810 /*
3811 * Map file blocks to filesystem blocks without allocation.
3812 */
3813 int
xfs_bmapi_read(struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,struct xfs_bmbt_irec * mval,int * nmap,uint32_t flags)3814 xfs_bmapi_read(
3815 struct xfs_inode *ip,
3816 xfs_fileoff_t bno,
3817 xfs_filblks_t len,
3818 struct xfs_bmbt_irec *mval,
3819 int *nmap,
3820 uint32_t flags)
3821 {
3822 struct xfs_mount *mp = ip->i_mount;
3823 int whichfork = xfs_bmapi_whichfork(flags);
3824 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
3825 struct xfs_bmbt_irec got;
3826 xfs_fileoff_t obno;
3827 xfs_fileoff_t end;
3828 struct xfs_iext_cursor icur;
3829 int error;
3830 bool eof = false;
3831 int n = 0;
3832
3833 ASSERT(*nmap >= 1);
3834 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE)));
3835 xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
3836
3837 if (WARN_ON_ONCE(!ifp)) {
3838 xfs_bmap_mark_sick(ip, whichfork);
3839 return -EFSCORRUPTED;
3840 }
3841
3842 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
3843 XFS_TEST_ERROR(mp, XFS_ERRTAG_BMAPIFORMAT)) {
3844 xfs_bmap_mark_sick(ip, whichfork);
3845 return -EFSCORRUPTED;
3846 }
3847
3848 if (xfs_is_shutdown(mp))
3849 return -EIO;
3850
3851 XFS_STATS_INC(mp, xs_blk_mapr);
3852
3853 error = xfs_iread_extents(NULL, ip, whichfork);
3854 if (error)
3855 return error;
3856
3857 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3858 eof = true;
3859 end = bno + len;
3860 obno = bno;
3861
3862 while (bno < end && n < *nmap) {
3863 /* Reading past eof, act as though there's a hole up to end. */
3864 if (eof)
3865 got.br_startoff = end;
3866 if (got.br_startoff > bno) {
3867 /* Reading in a hole. */
3868 mval->br_startoff = bno;
3869 mval->br_startblock = HOLESTARTBLOCK;
3870 mval->br_blockcount =
3871 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
3872 mval->br_state = XFS_EXT_NORM;
3873 bno += mval->br_blockcount;
3874 len -= mval->br_blockcount;
3875 mval++;
3876 n++;
3877 continue;
3878 }
3879
3880 /* set up the extent map to return. */
3881 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
3882 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
3883
3884 /* If we're done, stop now. */
3885 if (bno >= end || n >= *nmap)
3886 break;
3887
3888 /* Else go on to the next record. */
3889 if (!xfs_iext_next_extent(ifp, &icur, &got))
3890 eof = true;
3891 }
3892 *nmap = n;
3893 return 0;
3894 }
3895
3896 static int
xfs_bmapi_allocate(struct xfs_bmalloca * bma)3897 xfs_bmapi_allocate(
3898 struct xfs_bmalloca *bma)
3899 {
3900 struct xfs_mount *mp = bma->ip->i_mount;
3901 int whichfork = xfs_bmapi_whichfork(bma->flags);
3902 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
3903 int error;
3904
3905 ASSERT(bma->length > 0);
3906 ASSERT(bma->length <= XFS_MAX_BMBT_EXTLEN);
3907
3908 if (bma->flags & XFS_BMAPI_CONTIG)
3909 bma->minlen = bma->length;
3910 else
3911 bma->minlen = 1;
3912
3913 if (!(bma->flags & XFS_BMAPI_METADATA)) {
3914 /*
3915 * For the data and COW fork, the first data in the file is
3916 * treated differently to all other allocations. For the
3917 * attribute fork, we only need to ensure the allocated range
3918 * is not on the busy list.
3919 */
3920 bma->datatype = XFS_ALLOC_NOBUSY;
3921 if (whichfork == XFS_DATA_FORK || whichfork == XFS_COW_FORK) {
3922 bma->datatype |= XFS_ALLOC_USERDATA;
3923 if (bma->offset == 0)
3924 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
3925
3926 if (mp->m_dalign && bma->length >= mp->m_dalign) {
3927 error = xfs_bmap_isaeof(bma, whichfork);
3928 if (error)
3929 return error;
3930 }
3931 }
3932 }
3933
3934 if ((bma->datatype & XFS_ALLOC_USERDATA) &&
3935 XFS_IS_REALTIME_INODE(bma->ip))
3936 error = xfs_bmap_rtalloc(bma);
3937 else
3938 error = xfs_bmap_btalloc(bma);
3939 if (error)
3940 return error;
3941 if (bma->blkno == NULLFSBLOCK)
3942 return -ENOSPC;
3943
3944 if (WARN_ON_ONCE(!xfs_valid_startblock(bma->ip, bma->blkno))) {
3945 xfs_bmap_mark_sick(bma->ip, whichfork);
3946 return -EFSCORRUPTED;
3947 }
3948
3949 if (bma->flags & XFS_BMAPI_ZERO) {
3950 error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
3951 if (error)
3952 return error;
3953 }
3954
3955 if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur)
3956 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
3957 /*
3958 * Bump the number of extents we've allocated
3959 * in this call.
3960 */
3961 bma->nallocs++;
3962
3963 if (bma->cur && bma->wasdel)
3964 bma->cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
3965
3966 bma->got.br_startoff = bma->offset;
3967 bma->got.br_startblock = bma->blkno;
3968 bma->got.br_blockcount = bma->length;
3969 bma->got.br_state = XFS_EXT_NORM;
3970
3971 if (bma->flags & XFS_BMAPI_PREALLOC)
3972 bma->got.br_state = XFS_EXT_UNWRITTEN;
3973
3974 if (bma->wasdel)
3975 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
3976 else
3977 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
3978 whichfork, &bma->icur, &bma->cur, &bma->got,
3979 &bma->logflags, bma->flags);
3980 if (error)
3981 return error;
3982
3983 /*
3984 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
3985 * or xfs_bmap_add_extent_hole_real might have merged it into one of
3986 * the neighbouring ones.
3987 */
3988 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
3989
3990 ASSERT(bma->got.br_startoff <= bma->offset);
3991 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
3992 bma->offset + bma->length);
3993 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
3994 bma->got.br_state == XFS_EXT_UNWRITTEN);
3995 return 0;
3996 }
3997
3998 STATIC int
xfs_bmapi_convert_unwritten(struct xfs_bmalloca * bma,struct xfs_bmbt_irec * mval,xfs_filblks_t len,uint32_t flags)3999 xfs_bmapi_convert_unwritten(
4000 struct xfs_bmalloca *bma,
4001 struct xfs_bmbt_irec *mval,
4002 xfs_filblks_t len,
4003 uint32_t flags)
4004 {
4005 int whichfork = xfs_bmapi_whichfork(flags);
4006 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
4007 int tmp_logflags = 0;
4008 int error;
4009
4010 /* check if we need to do unwritten->real conversion */
4011 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4012 (flags & XFS_BMAPI_PREALLOC))
4013 return 0;
4014
4015 /* check if we need to do real->unwritten conversion */
4016 if (mval->br_state == XFS_EXT_NORM &&
4017 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4018 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4019 return 0;
4020
4021 /*
4022 * Modify (by adding) the state flag, if writing.
4023 */
4024 ASSERT(mval->br_blockcount <= len);
4025 if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) {
4026 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4027 bma->ip, whichfork);
4028 }
4029 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4030 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4031
4032 /*
4033 * Before insertion into the bmbt, zero the range being converted
4034 * if required.
4035 */
4036 if (flags & XFS_BMAPI_ZERO) {
4037 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4038 mval->br_blockcount);
4039 if (error)
4040 return error;
4041 }
4042
4043 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4044 &bma->icur, &bma->cur, mval, &tmp_logflags);
4045 /*
4046 * Log the inode core unconditionally in the unwritten extent conversion
4047 * path because the conversion might not have done so (e.g., if the
4048 * extent count hasn't changed). We need to make sure the inode is dirty
4049 * in the transaction for the sake of fsync(), even if nothing has
4050 * changed, because fsync() will not force the log for this transaction
4051 * unless it sees the inode pinned.
4052 *
4053 * Note: If we're only converting cow fork extents, there aren't
4054 * any on-disk updates to make, so we don't need to log anything.
4055 */
4056 if (whichfork != XFS_COW_FORK)
4057 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4058 if (error)
4059 return error;
4060
4061 /*
4062 * Update our extent pointer, given that
4063 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4064 * of the neighbouring ones.
4065 */
4066 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4067
4068 /*
4069 * We may have combined previously unwritten space with written space,
4070 * so generate another request.
4071 */
4072 if (mval->br_blockcount < len)
4073 return -EAGAIN;
4074 return 0;
4075 }
4076
4077 xfs_extlen_t
xfs_bmapi_minleft(struct xfs_trans * tp,struct xfs_inode * ip,int fork)4078 xfs_bmapi_minleft(
4079 struct xfs_trans *tp,
4080 struct xfs_inode *ip,
4081 int fork)
4082 {
4083 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, fork);
4084
4085 if (tp && tp->t_highest_agno != NULLAGNUMBER)
4086 return 0;
4087 if (ifp->if_format != XFS_DINODE_FMT_BTREE)
4088 return 1;
4089 return be16_to_cpu(ifp->if_broot->bb_level) + 1;
4090 }
4091
4092 /*
4093 * Log whatever the flags say, even if error. Otherwise we might miss detecting
4094 * a case where the data is changed, there's an error, and it's not logged so we
4095 * don't shutdown when we should. Don't bother logging extents/btree changes if
4096 * we converted to the other format.
4097 */
4098 static void
xfs_bmapi_finish(struct xfs_bmalloca * bma,int whichfork,int error)4099 xfs_bmapi_finish(
4100 struct xfs_bmalloca *bma,
4101 int whichfork,
4102 int error)
4103 {
4104 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
4105
4106 if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4107 ifp->if_format != XFS_DINODE_FMT_EXTENTS)
4108 bma->logflags &= ~xfs_ilog_fext(whichfork);
4109 else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4110 ifp->if_format != XFS_DINODE_FMT_BTREE)
4111 bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4112
4113 if (bma->logflags)
4114 xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4115 if (bma->cur)
4116 xfs_btree_del_cursor(bma->cur, error);
4117 }
4118
4119 /*
4120 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4121 * extent state if necessary. Details behaviour is controlled by the flags
4122 * parameter. Only allocates blocks from a single allocation group, to avoid
4123 * locking problems.
4124 *
4125 * Returns 0 on success and places the extent mappings in mval. nmaps is used
4126 * as an input/output parameter where the caller specifies the maximum number
4127 * of mappings that may be returned and xfs_bmapi_write passes back the number
4128 * of mappings (including existing mappings) it found.
4129 *
4130 * Returns a negative error code on failure, including -ENOSPC when it could not
4131 * allocate any blocks and -ENOSR when it did allocate blocks to convert a
4132 * delalloc range, but those blocks were before the passed in range.
4133 */
4134 int
xfs_bmapi_write(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,uint32_t flags,xfs_extlen_t total,struct xfs_bmbt_irec * mval,int * nmap)4135 xfs_bmapi_write(
4136 struct xfs_trans *tp, /* transaction pointer */
4137 struct xfs_inode *ip, /* incore inode */
4138 xfs_fileoff_t bno, /* starting file offs. mapped */
4139 xfs_filblks_t len, /* length to map in file */
4140 uint32_t flags, /* XFS_BMAPI_... */
4141 xfs_extlen_t total, /* total blocks needed */
4142 struct xfs_bmbt_irec *mval, /* output: map values */
4143 int *nmap) /* i/o: mval size/count */
4144 {
4145 struct xfs_bmalloca bma = {
4146 .tp = tp,
4147 .ip = ip,
4148 .total = total,
4149 };
4150 struct xfs_mount *mp = ip->i_mount;
4151 int whichfork = xfs_bmapi_whichfork(flags);
4152 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
4153 xfs_fileoff_t end; /* end of mapped file region */
4154 bool eof = false; /* after the end of extents */
4155 int error; /* error return */
4156 int n; /* current extent index */
4157 xfs_fileoff_t obno; /* old block number (offset) */
4158
4159 #ifdef DEBUG
4160 xfs_fileoff_t orig_bno; /* original block number value */
4161 int orig_flags; /* original flags arg value */
4162 xfs_filblks_t orig_len; /* original value of len arg */
4163 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4164 int orig_nmap; /* original value of *nmap */
4165
4166 orig_bno = bno;
4167 orig_len = len;
4168 orig_flags = flags;
4169 orig_mval = mval;
4170 orig_nmap = *nmap;
4171 #endif
4172
4173 ASSERT(*nmap >= 1);
4174 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4175 ASSERT(tp != NULL);
4176 ASSERT(len > 0);
4177 ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL);
4178 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
4179 ASSERT(!(flags & XFS_BMAPI_REMAP));
4180
4181 /* zeroing is for currently only for data extents, not metadata */
4182 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4183 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4184 /*
4185 * we can allocate unwritten extents or pre-zero allocated blocks,
4186 * but it makes no sense to do both at once. This would result in
4187 * zeroing the unwritten extent twice, but it still being an
4188 * unwritten extent....
4189 */
4190 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4191 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4192
4193 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4194 XFS_TEST_ERROR(mp, XFS_ERRTAG_BMAPIFORMAT)) {
4195 xfs_bmap_mark_sick(ip, whichfork);
4196 return -EFSCORRUPTED;
4197 }
4198
4199 if (xfs_is_shutdown(mp))
4200 return -EIO;
4201
4202 XFS_STATS_INC(mp, xs_blk_mapw);
4203
4204 error = xfs_iread_extents(tp, ip, whichfork);
4205 if (error)
4206 goto error0;
4207
4208 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4209 eof = true;
4210 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4211 bma.prev.br_startoff = NULLFILEOFF;
4212 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4213
4214 n = 0;
4215 end = bno + len;
4216 obno = bno;
4217 while (bno < end && n < *nmap) {
4218 bool need_alloc = false, wasdelay = false;
4219
4220 /* in hole or beyond EOF? */
4221 if (eof || bma.got.br_startoff > bno) {
4222 /*
4223 * CoW fork conversions should /never/ hit EOF or
4224 * holes. There should always be something for us
4225 * to work on.
4226 */
4227 ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4228 (flags & XFS_BMAPI_COWFORK)));
4229
4230 need_alloc = true;
4231 } else if (isnullstartblock(bma.got.br_startblock)) {
4232 wasdelay = true;
4233 }
4234
4235 /*
4236 * First, deal with the hole before the allocated space
4237 * that we found, if any.
4238 */
4239 if (need_alloc || wasdelay) {
4240 bma.eof = eof;
4241 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4242 bma.wasdel = wasdelay;
4243 bma.offset = bno;
4244 bma.flags = flags;
4245
4246 /*
4247 * There's a 32/64 bit type mismatch between the
4248 * allocation length request (which can be 64 bits in
4249 * length) and the bma length request, which is
4250 * xfs_extlen_t and therefore 32 bits. Hence we have to
4251 * be careful and do the min() using the larger type to
4252 * avoid overflows.
4253 */
4254 bma.length = XFS_FILBLKS_MIN(len, XFS_MAX_BMBT_EXTLEN);
4255
4256 if (wasdelay) {
4257 bma.length = XFS_FILBLKS_MIN(bma.length,
4258 bma.got.br_blockcount -
4259 (bno - bma.got.br_startoff));
4260 } else {
4261 if (!eof)
4262 bma.length = XFS_FILBLKS_MIN(bma.length,
4263 bma.got.br_startoff - bno);
4264 }
4265
4266 ASSERT(bma.length > 0);
4267 error = xfs_bmapi_allocate(&bma);
4268 if (error) {
4269 /*
4270 * If we already allocated space in a previous
4271 * iteration return what we go so far when
4272 * running out of space.
4273 */
4274 if (error == -ENOSPC && bma.nallocs)
4275 break;
4276 goto error0;
4277 }
4278
4279 /*
4280 * If this is a CoW allocation, record the data in
4281 * the refcount btree for orphan recovery.
4282 */
4283 if (whichfork == XFS_COW_FORK)
4284 xfs_refcount_alloc_cow_extent(tp,
4285 XFS_IS_REALTIME_INODE(ip),
4286 bma.blkno, bma.length);
4287 }
4288
4289 /* Deal with the allocated space we found. */
4290 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4291 end, n, flags);
4292
4293 /* Execute unwritten extent conversion if necessary */
4294 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4295 if (error == -EAGAIN)
4296 continue;
4297 if (error)
4298 goto error0;
4299
4300 /* update the extent map to return */
4301 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4302
4303 /*
4304 * If we're done, stop now. Stop when we've allocated
4305 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4306 * the transaction may get too big.
4307 */
4308 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4309 break;
4310
4311 /* Else go on to the next record. */
4312 bma.prev = bma.got;
4313 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4314 eof = true;
4315 }
4316
4317 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4318 whichfork);
4319 if (error)
4320 goto error0;
4321
4322 ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE ||
4323 ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
4324 xfs_bmapi_finish(&bma, whichfork, 0);
4325 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4326 orig_nmap, n);
4327
4328 /*
4329 * When converting delayed allocations, xfs_bmapi_allocate ignores
4330 * the passed in bno and always converts from the start of the found
4331 * delalloc extent.
4332 *
4333 * To avoid a successful return with *nmap set to 0, return the magic
4334 * -ENOSR error code for this particular case so that the caller can
4335 * handle it.
4336 */
4337 if (!n) {
4338 ASSERT(bma.nallocs >= *nmap);
4339 return -ENOSR;
4340 }
4341 *nmap = n;
4342 return 0;
4343 error0:
4344 xfs_bmapi_finish(&bma, whichfork, error);
4345 return error;
4346 }
4347
4348 /*
4349 * Convert an existing delalloc extent to real blocks based on file offset. This
4350 * attempts to allocate the entire delalloc extent and may require multiple
4351 * invocations to allocate the target offset if a large enough physical extent
4352 * is not available.
4353 */
4354 static int
xfs_bmapi_convert_one_delalloc(struct xfs_inode * ip,int whichfork,xfs_off_t offset,struct iomap * iomap,unsigned int * seq)4355 xfs_bmapi_convert_one_delalloc(
4356 struct xfs_inode *ip,
4357 int whichfork,
4358 xfs_off_t offset,
4359 struct iomap *iomap,
4360 unsigned int *seq)
4361 {
4362 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
4363 struct xfs_mount *mp = ip->i_mount;
4364 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
4365 struct xfs_bmalloca bma = { NULL };
4366 uint16_t flags = 0;
4367 struct xfs_trans *tp;
4368 int error;
4369
4370 if (whichfork == XFS_COW_FORK)
4371 flags |= IOMAP_F_SHARED;
4372
4373 /*
4374 * Space for the extent and indirect blocks was reserved when the
4375 * delalloc extent was created so there's no need to do so here.
4376 */
4377 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4378 XFS_TRANS_RESERVE, &tp);
4379 if (error)
4380 return error;
4381
4382 xfs_ilock(ip, XFS_ILOCK_EXCL);
4383 xfs_trans_ijoin(tp, ip, 0);
4384
4385 error = xfs_iext_count_extend(tp, ip, whichfork,
4386 XFS_IEXT_ADD_NOSPLIT_CNT);
4387 if (error)
4388 goto out_trans_cancel;
4389
4390 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4391 bma.got.br_startoff > offset_fsb) {
4392 /*
4393 * No extent found in the range we are trying to convert. This
4394 * should only happen for the COW fork, where another thread
4395 * might have moved the extent to the data fork in the meantime.
4396 */
4397 WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4398 error = -EAGAIN;
4399 goto out_trans_cancel;
4400 }
4401
4402 /*
4403 * If we find a real extent here we raced with another thread converting
4404 * the extent. Just return the real extent at this offset.
4405 */
4406 if (!isnullstartblock(bma.got.br_startblock)) {
4407 xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4408 xfs_iomap_inode_sequence(ip, flags));
4409 if (seq)
4410 *seq = READ_ONCE(ifp->if_seq);
4411 goto out_trans_cancel;
4412 }
4413
4414 bma.tp = tp;
4415 bma.ip = ip;
4416 bma.wasdel = true;
4417 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4418
4419 /*
4420 * Always allocate convert from the start of the delalloc extent even if
4421 * that is outside the passed in range to create large contiguous
4422 * extents on disk.
4423 */
4424 bma.offset = bma.got.br_startoff;
4425 bma.length = bma.got.br_blockcount;
4426
4427 /*
4428 * When we're converting the delalloc reservations backing dirty pages
4429 * in the page cache, we must be careful about how we create the new
4430 * extents:
4431 *
4432 * New CoW fork extents are created unwritten, turned into real extents
4433 * when we're about to write the data to disk, and mapped into the data
4434 * fork after the write finishes. End of story.
4435 *
4436 * New data fork extents must be mapped in as unwritten and converted
4437 * to real extents after the write succeeds to avoid exposing stale
4438 * disk contents if we crash.
4439 */
4440 bma.flags = XFS_BMAPI_PREALLOC;
4441 if (whichfork == XFS_COW_FORK)
4442 bma.flags |= XFS_BMAPI_COWFORK;
4443
4444 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4445 bma.prev.br_startoff = NULLFILEOFF;
4446
4447 error = xfs_bmapi_allocate(&bma);
4448 if (error)
4449 goto out_finish;
4450
4451 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4452 XFS_STATS_INC(mp, xs_xstrat_quick);
4453
4454 ASSERT(!isnullstartblock(bma.got.br_startblock));
4455 xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4456 xfs_iomap_inode_sequence(ip, flags));
4457 if (seq)
4458 *seq = READ_ONCE(ifp->if_seq);
4459
4460 if (whichfork == XFS_COW_FORK)
4461 xfs_refcount_alloc_cow_extent(tp, XFS_IS_REALTIME_INODE(ip),
4462 bma.blkno, bma.length);
4463
4464 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4465 whichfork);
4466 if (error)
4467 goto out_finish;
4468
4469 xfs_bmapi_finish(&bma, whichfork, 0);
4470 error = xfs_trans_commit(tp);
4471 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4472 return error;
4473
4474 out_finish:
4475 xfs_bmapi_finish(&bma, whichfork, error);
4476 out_trans_cancel:
4477 xfs_trans_cancel(tp);
4478 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4479 return error;
4480 }
4481
4482 /*
4483 * Pass in a dellalloc extent and convert it to real extents, return the real
4484 * extent that maps offset_fsb in iomap.
4485 */
4486 int
xfs_bmapi_convert_delalloc(struct xfs_inode * ip,int whichfork,loff_t offset,struct iomap * iomap,unsigned int * seq)4487 xfs_bmapi_convert_delalloc(
4488 struct xfs_inode *ip,
4489 int whichfork,
4490 loff_t offset,
4491 struct iomap *iomap,
4492 unsigned int *seq)
4493 {
4494 int error;
4495
4496 /*
4497 * Attempt to allocate whatever delalloc extent currently backs offset
4498 * and put the result into iomap. Allocate in a loop because it may
4499 * take several attempts to allocate real blocks for a contiguous
4500 * delalloc extent if free space is sufficiently fragmented.
4501 */
4502 do {
4503 error = xfs_bmapi_convert_one_delalloc(ip, whichfork, offset,
4504 iomap, seq);
4505 if (error)
4506 return error;
4507 } while (iomap->offset + iomap->length <= offset);
4508
4509 return 0;
4510 }
4511
4512 int
xfs_bmapi_remap(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,xfs_fsblock_t startblock,uint32_t flags)4513 xfs_bmapi_remap(
4514 struct xfs_trans *tp,
4515 struct xfs_inode *ip,
4516 xfs_fileoff_t bno,
4517 xfs_filblks_t len,
4518 xfs_fsblock_t startblock,
4519 uint32_t flags)
4520 {
4521 struct xfs_mount *mp = ip->i_mount;
4522 struct xfs_ifork *ifp;
4523 struct xfs_btree_cur *cur = NULL;
4524 struct xfs_bmbt_irec got;
4525 struct xfs_iext_cursor icur;
4526 int whichfork = xfs_bmapi_whichfork(flags);
4527 int logflags = 0, error;
4528
4529 ifp = xfs_ifork_ptr(ip, whichfork);
4530 ASSERT(len > 0);
4531 ASSERT(len <= (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN);
4532 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
4533 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
4534 XFS_BMAPI_NORMAP)));
4535 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
4536 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
4537
4538 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4539 XFS_TEST_ERROR(mp, XFS_ERRTAG_BMAPIFORMAT)) {
4540 xfs_bmap_mark_sick(ip, whichfork);
4541 return -EFSCORRUPTED;
4542 }
4543
4544 if (xfs_is_shutdown(mp))
4545 return -EIO;
4546
4547 error = xfs_iread_extents(tp, ip, whichfork);
4548 if (error)
4549 return error;
4550
4551 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4552 /* make sure we only reflink into a hole. */
4553 ASSERT(got.br_startoff > bno);
4554 ASSERT(got.br_startoff - bno >= len);
4555 }
4556
4557 ip->i_nblocks += len;
4558 ip->i_delayed_blks -= len; /* see xfs_bmap_defer_add */
4559 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4560
4561 if (ifp->if_format == XFS_DINODE_FMT_BTREE)
4562 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
4563
4564 got.br_startoff = bno;
4565 got.br_startblock = startblock;
4566 got.br_blockcount = len;
4567 if (flags & XFS_BMAPI_PREALLOC)
4568 got.br_state = XFS_EXT_UNWRITTEN;
4569 else
4570 got.br_state = XFS_EXT_NORM;
4571
4572 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
4573 &cur, &got, &logflags, flags);
4574 if (error)
4575 goto error0;
4576
4577 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
4578
4579 error0:
4580 if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS)
4581 logflags &= ~XFS_ILOG_DEXT;
4582 else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE)
4583 logflags &= ~XFS_ILOG_DBROOT;
4584
4585 if (logflags)
4586 xfs_trans_log_inode(tp, ip, logflags);
4587 if (cur)
4588 xfs_btree_del_cursor(cur, error);
4589 return error;
4590 }
4591
4592 /*
4593 * When a delalloc extent is split (e.g., due to a hole punch), the original
4594 * indlen reservation must be shared across the two new extents that are left
4595 * behind.
4596 *
4597 * Given the original reservation and the worst case indlen for the two new
4598 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4599 * reservation fairly across the two new extents. If necessary, steal available
4600 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4601 * ores == 1). The number of stolen blocks is returned. The availability and
4602 * subsequent accounting of stolen blocks is the responsibility of the caller.
4603 */
4604 static void
xfs_bmap_split_indlen(xfs_filblks_t ores,xfs_filblks_t * indlen1,xfs_filblks_t * indlen2)4605 xfs_bmap_split_indlen(
4606 xfs_filblks_t ores, /* original res. */
4607 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4608 xfs_filblks_t *indlen2) /* ext2 worst indlen */
4609 {
4610 xfs_filblks_t len1 = *indlen1;
4611 xfs_filblks_t len2 = *indlen2;
4612 xfs_filblks_t nres = len1 + len2; /* new total res. */
4613 xfs_filblks_t resfactor;
4614
4615 /*
4616 * We can't meet the total required reservation for the two extents.
4617 * Calculate the percent of the overall shortage between both extents
4618 * and apply this percentage to each of the requested indlen values.
4619 * This distributes the shortage fairly and reduces the chances that one
4620 * of the two extents is left with nothing when extents are repeatedly
4621 * split.
4622 */
4623 resfactor = (ores * 100);
4624 do_div(resfactor, nres);
4625 len1 *= resfactor;
4626 do_div(len1, 100);
4627 len2 *= resfactor;
4628 do_div(len2, 100);
4629 ASSERT(len1 + len2 <= ores);
4630 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4631
4632 /*
4633 * Hand out the remainder to each extent. If one of the two reservations
4634 * is zero, we want to make sure that one gets a block first. The loop
4635 * below starts with len1, so hand len2 a block right off the bat if it
4636 * is zero.
4637 */
4638 ores -= (len1 + len2);
4639 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4640 if (ores && !len2 && *indlen2) {
4641 len2++;
4642 ores--;
4643 }
4644 while (ores) {
4645 if (len1 < *indlen1) {
4646 len1++;
4647 ores--;
4648 }
4649 if (!ores)
4650 break;
4651 if (len2 < *indlen2) {
4652 len2++;
4653 ores--;
4654 }
4655 }
4656
4657 *indlen1 = len1;
4658 *indlen2 = len2;
4659 }
4660
4661 void
xfs_bmap_del_extent_delay(struct xfs_inode * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * del,uint32_t bflags)4662 xfs_bmap_del_extent_delay(
4663 struct xfs_inode *ip,
4664 int whichfork,
4665 struct xfs_iext_cursor *icur,
4666 struct xfs_bmbt_irec *got,
4667 struct xfs_bmbt_irec *del,
4668 uint32_t bflags) /* bmapi flags */
4669 {
4670 struct xfs_mount *mp = ip->i_mount;
4671 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
4672 struct xfs_bmbt_irec new;
4673 int64_t da_old, da_new, da_diff = 0;
4674 xfs_fileoff_t del_endoff, got_endoff;
4675 xfs_filblks_t got_indlen, new_indlen, stolen = 0;
4676 uint32_t state = xfs_bmap_fork_to_state(whichfork);
4677 uint64_t fdblocks;
4678 bool isrt;
4679
4680 XFS_STATS_INC(mp, xs_del_exlist);
4681
4682 isrt = xfs_ifork_is_realtime(ip, whichfork);
4683 del_endoff = del->br_startoff + del->br_blockcount;
4684 got_endoff = got->br_startoff + got->br_blockcount;
4685 da_old = startblockval(got->br_startblock);
4686 da_new = 0;
4687
4688 ASSERT(del->br_blockcount > 0);
4689 ASSERT(got->br_startoff <= del->br_startoff);
4690 ASSERT(got_endoff >= del_endoff);
4691
4692 /*
4693 * Update the inode delalloc counter now and wait to update the
4694 * sb counters as we might have to borrow some blocks for the
4695 * indirect block accounting.
4696 */
4697 xfs_quota_unreserve_blkres(ip, del->br_blockcount);
4698 ip->i_delayed_blks -= del->br_blockcount;
4699
4700 if (got->br_startoff == del->br_startoff)
4701 state |= BMAP_LEFT_FILLING;
4702 if (got_endoff == del_endoff)
4703 state |= BMAP_RIGHT_FILLING;
4704
4705 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4706 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4707 /*
4708 * Matches the whole extent. Delete the entry.
4709 */
4710 xfs_iext_remove(ip, icur, state);
4711 xfs_iext_prev(ifp, icur);
4712 break;
4713 case BMAP_LEFT_FILLING:
4714 /*
4715 * Deleting the first part of the extent.
4716 */
4717 got->br_startoff = del_endoff;
4718 got->br_blockcount -= del->br_blockcount;
4719 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4720 got->br_blockcount), da_old);
4721 got->br_startblock = nullstartblock((int)da_new);
4722 xfs_iext_update_extent(ip, state, icur, got);
4723 break;
4724 case BMAP_RIGHT_FILLING:
4725 /*
4726 * Deleting the last part of the extent.
4727 */
4728 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4729 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4730 got->br_blockcount), da_old);
4731 got->br_startblock = nullstartblock((int)da_new);
4732 xfs_iext_update_extent(ip, state, icur, got);
4733 break;
4734 case 0:
4735 /*
4736 * Deleting the middle of the extent.
4737 *
4738 * Distribute the original indlen reservation across the two new
4739 * extents. Steal blocks from the deleted extent if necessary.
4740 * Stealing blocks simply fudges the fdblocks accounting below.
4741 * Warn if either of the new indlen reservations is zero as this
4742 * can lead to delalloc problems.
4743 */
4744 got->br_blockcount = del->br_startoff - got->br_startoff;
4745 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4746
4747 new.br_blockcount = got_endoff - del_endoff;
4748 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4749
4750 WARN_ON_ONCE(!got_indlen || !new_indlen);
4751 /*
4752 * Steal as many blocks as we can to try and satisfy the worst
4753 * case indlen for both new extents.
4754 *
4755 * However, we can't just steal reservations from the data
4756 * blocks if this is an RT inodes as the data and metadata
4757 * blocks come from different pools. We'll have to live with
4758 * under-filled indirect reservation in this case.
4759 */
4760 da_new = got_indlen + new_indlen;
4761 if (da_new > da_old && !isrt) {
4762 stolen = XFS_FILBLKS_MIN(da_new - da_old,
4763 del->br_blockcount);
4764 da_old += stolen;
4765 }
4766 if (da_new > da_old)
4767 xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen);
4768 da_new = got_indlen + new_indlen;
4769
4770 got->br_startblock = nullstartblock((int)got_indlen);
4771
4772 new.br_startoff = del_endoff;
4773 new.br_state = got->br_state;
4774 new.br_startblock = nullstartblock((int)new_indlen);
4775
4776 xfs_iext_update_extent(ip, state, icur, got);
4777 xfs_iext_next(ifp, icur);
4778 xfs_iext_insert(ip, icur, &new, state);
4779
4780 del->br_blockcount -= stolen;
4781 break;
4782 }
4783
4784 ASSERT(da_old >= da_new);
4785 da_diff = da_old - da_new;
4786 fdblocks = da_diff;
4787
4788 if (bflags & XFS_BMAPI_REMAP) {
4789 ;
4790 } else if (isrt) {
4791 xfs_rtbxlen_t rtxlen;
4792
4793 rtxlen = xfs_blen_to_rtbxlen(mp, del->br_blockcount);
4794 if (xfs_is_zoned_inode(ip))
4795 xfs_zoned_add_available(mp, rtxlen);
4796 xfs_add_frextents(mp, rtxlen);
4797 } else {
4798 fdblocks += del->br_blockcount;
4799 }
4800
4801 xfs_add_fdblocks(mp, fdblocks);
4802 xfs_mod_delalloc(ip, -(int64_t)del->br_blockcount, -da_diff);
4803 }
4804
4805 void
xfs_bmap_del_extent_cow(struct xfs_inode * ip,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * del)4806 xfs_bmap_del_extent_cow(
4807 struct xfs_inode *ip,
4808 struct xfs_iext_cursor *icur,
4809 struct xfs_bmbt_irec *got,
4810 struct xfs_bmbt_irec *del)
4811 {
4812 struct xfs_mount *mp = ip->i_mount;
4813 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
4814 struct xfs_bmbt_irec new;
4815 xfs_fileoff_t del_endoff, got_endoff;
4816 uint32_t state = BMAP_COWFORK;
4817
4818 XFS_STATS_INC(mp, xs_del_exlist);
4819
4820 del_endoff = del->br_startoff + del->br_blockcount;
4821 got_endoff = got->br_startoff + got->br_blockcount;
4822
4823 ASSERT(del->br_blockcount > 0);
4824 ASSERT(got->br_startoff <= del->br_startoff);
4825 ASSERT(got_endoff >= del_endoff);
4826 ASSERT(!isnullstartblock(got->br_startblock));
4827
4828 if (got->br_startoff == del->br_startoff)
4829 state |= BMAP_LEFT_FILLING;
4830 if (got_endoff == del_endoff)
4831 state |= BMAP_RIGHT_FILLING;
4832
4833 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4834 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4835 /*
4836 * Matches the whole extent. Delete the entry.
4837 */
4838 xfs_iext_remove(ip, icur, state);
4839 xfs_iext_prev(ifp, icur);
4840 break;
4841 case BMAP_LEFT_FILLING:
4842 /*
4843 * Deleting the first part of the extent.
4844 */
4845 got->br_startoff = del_endoff;
4846 got->br_blockcount -= del->br_blockcount;
4847 got->br_startblock = del->br_startblock + del->br_blockcount;
4848 xfs_iext_update_extent(ip, state, icur, got);
4849 break;
4850 case BMAP_RIGHT_FILLING:
4851 /*
4852 * Deleting the last part of the extent.
4853 */
4854 got->br_blockcount -= del->br_blockcount;
4855 xfs_iext_update_extent(ip, state, icur, got);
4856 break;
4857 case 0:
4858 /*
4859 * Deleting the middle of the extent.
4860 */
4861 got->br_blockcount = del->br_startoff - got->br_startoff;
4862
4863 new.br_startoff = del_endoff;
4864 new.br_blockcount = got_endoff - del_endoff;
4865 new.br_state = got->br_state;
4866 new.br_startblock = del->br_startblock + del->br_blockcount;
4867
4868 xfs_iext_update_extent(ip, state, icur, got);
4869 xfs_iext_next(ifp, icur);
4870 xfs_iext_insert(ip, icur, &new, state);
4871 break;
4872 }
4873 ip->i_delayed_blks -= del->br_blockcount;
4874 }
4875
4876 static int
xfs_bmap_free_rtblocks(struct xfs_trans * tp,struct xfs_bmbt_irec * del)4877 xfs_bmap_free_rtblocks(
4878 struct xfs_trans *tp,
4879 struct xfs_bmbt_irec *del)
4880 {
4881 struct xfs_rtgroup *rtg;
4882 int error;
4883
4884 rtg = xfs_rtgroup_grab(tp->t_mountp, 0);
4885 if (!rtg)
4886 return -EIO;
4887
4888 /*
4889 * Ensure the bitmap and summary inodes are locked and joined to the
4890 * transaction before modifying them.
4891 */
4892 if (!(tp->t_flags & XFS_TRANS_RTBITMAP_LOCKED)) {
4893 tp->t_flags |= XFS_TRANS_RTBITMAP_LOCKED;
4894 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_BITMAP);
4895 xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_BITMAP);
4896 }
4897
4898 error = xfs_rtfree_blocks(tp, rtg, del->br_startblock,
4899 del->br_blockcount);
4900 xfs_rtgroup_rele(rtg);
4901 return error;
4902 }
4903
4904 /*
4905 * Called by xfs_bmapi to update file extent records and the btree
4906 * after removing space.
4907 */
4908 STATIC int /* error */
xfs_bmap_del_extent_real(xfs_inode_t * ip,xfs_trans_t * tp,struct xfs_iext_cursor * icur,struct xfs_btree_cur * cur,xfs_bmbt_irec_t * del,int * logflagsp,int whichfork,uint32_t bflags)4909 xfs_bmap_del_extent_real(
4910 xfs_inode_t *ip, /* incore inode pointer */
4911 xfs_trans_t *tp, /* current transaction pointer */
4912 struct xfs_iext_cursor *icur,
4913 struct xfs_btree_cur *cur, /* if null, not a btree */
4914 xfs_bmbt_irec_t *del, /* data to remove from extents */
4915 int *logflagsp, /* inode logging flags */
4916 int whichfork, /* data or attr fork */
4917 uint32_t bflags) /* bmapi flags */
4918 {
4919 xfs_fsblock_t del_endblock=0; /* first block past del */
4920 xfs_fileoff_t del_endoff; /* first offset past del */
4921 int error = 0; /* error return value */
4922 struct xfs_bmbt_irec got; /* current extent entry */
4923 xfs_fileoff_t got_endoff; /* first offset past got */
4924 int i; /* temp state */
4925 struct xfs_ifork *ifp; /* inode fork pointer */
4926 xfs_mount_t *mp; /* mount structure */
4927 xfs_filblks_t nblks; /* quota/sb block count */
4928 xfs_bmbt_irec_t new; /* new record to be inserted */
4929 /* REFERENCED */
4930 uint qfield; /* quota field to update */
4931 uint32_t state = xfs_bmap_fork_to_state(whichfork);
4932 struct xfs_bmbt_irec old;
4933
4934 *logflagsp = 0;
4935
4936 mp = ip->i_mount;
4937 XFS_STATS_INC(mp, xs_del_exlist);
4938
4939 ifp = xfs_ifork_ptr(ip, whichfork);
4940 ASSERT(del->br_blockcount > 0);
4941 xfs_iext_get_extent(ifp, icur, &got);
4942 ASSERT(got.br_startoff <= del->br_startoff);
4943 del_endoff = del->br_startoff + del->br_blockcount;
4944 got_endoff = got.br_startoff + got.br_blockcount;
4945 ASSERT(got_endoff >= del_endoff);
4946 ASSERT(!isnullstartblock(got.br_startblock));
4947 qfield = 0;
4948
4949 /*
4950 * If it's the case where the directory code is running with no block
4951 * reservation, and the deleted block is in the middle of its extent,
4952 * and the resulting insert of an extent would cause transformation to
4953 * btree format, then reject it. The calling code will then swap blocks
4954 * around instead. We have to do this now, rather than waiting for the
4955 * conversion to btree format, since the transaction will be dirty then.
4956 */
4957 if (tp->t_blk_res == 0 &&
4958 ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
4959 ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) &&
4960 del->br_startoff > got.br_startoff && del_endoff < got_endoff)
4961 return -ENOSPC;
4962
4963 *logflagsp = XFS_ILOG_CORE;
4964 if (xfs_ifork_is_realtime(ip, whichfork))
4965 qfield = XFS_TRANS_DQ_RTBCOUNT;
4966 else
4967 qfield = XFS_TRANS_DQ_BCOUNT;
4968 nblks = del->br_blockcount;
4969
4970 del_endblock = del->br_startblock + del->br_blockcount;
4971 if (cur) {
4972 error = xfs_bmbt_lookup_eq(cur, &got, &i);
4973 if (error)
4974 return error;
4975 if (XFS_IS_CORRUPT(mp, i != 1)) {
4976 xfs_btree_mark_sick(cur);
4977 return -EFSCORRUPTED;
4978 }
4979 }
4980
4981 if (got.br_startoff == del->br_startoff)
4982 state |= BMAP_LEFT_FILLING;
4983 if (got_endoff == del_endoff)
4984 state |= BMAP_RIGHT_FILLING;
4985
4986 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4987 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4988 /*
4989 * Matches the whole extent. Delete the entry.
4990 */
4991 xfs_iext_remove(ip, icur, state);
4992 xfs_iext_prev(ifp, icur);
4993 ifp->if_nextents--;
4994
4995 *logflagsp |= XFS_ILOG_CORE;
4996 if (!cur) {
4997 *logflagsp |= xfs_ilog_fext(whichfork);
4998 break;
4999 }
5000 if ((error = xfs_btree_delete(cur, &i)))
5001 return error;
5002 if (XFS_IS_CORRUPT(mp, i != 1)) {
5003 xfs_btree_mark_sick(cur);
5004 return -EFSCORRUPTED;
5005 }
5006 break;
5007 case BMAP_LEFT_FILLING:
5008 /*
5009 * Deleting the first part of the extent.
5010 */
5011 got.br_startoff = del_endoff;
5012 got.br_startblock = del_endblock;
5013 got.br_blockcount -= del->br_blockcount;
5014 xfs_iext_update_extent(ip, state, icur, &got);
5015 if (!cur) {
5016 *logflagsp |= xfs_ilog_fext(whichfork);
5017 break;
5018 }
5019 error = xfs_bmbt_update(cur, &got);
5020 if (error)
5021 return error;
5022 break;
5023 case BMAP_RIGHT_FILLING:
5024 /*
5025 * Deleting the last part of the extent.
5026 */
5027 got.br_blockcount -= del->br_blockcount;
5028 xfs_iext_update_extent(ip, state, icur, &got);
5029 if (!cur) {
5030 *logflagsp |= xfs_ilog_fext(whichfork);
5031 break;
5032 }
5033 error = xfs_bmbt_update(cur, &got);
5034 if (error)
5035 return error;
5036 break;
5037 case 0:
5038 /*
5039 * Deleting the middle of the extent.
5040 */
5041
5042 old = got;
5043
5044 got.br_blockcount = del->br_startoff - got.br_startoff;
5045 xfs_iext_update_extent(ip, state, icur, &got);
5046
5047 new.br_startoff = del_endoff;
5048 new.br_blockcount = got_endoff - del_endoff;
5049 new.br_state = got.br_state;
5050 new.br_startblock = del_endblock;
5051
5052 *logflagsp |= XFS_ILOG_CORE;
5053 if (cur) {
5054 error = xfs_bmbt_update(cur, &got);
5055 if (error)
5056 return error;
5057 error = xfs_btree_increment(cur, 0, &i);
5058 if (error)
5059 return error;
5060 cur->bc_rec.b = new;
5061 error = xfs_btree_insert(cur, &i);
5062 if (error && error != -ENOSPC)
5063 return error;
5064 /*
5065 * If get no-space back from btree insert, it tried a
5066 * split, and we have a zero block reservation. Fix up
5067 * our state and return the error.
5068 */
5069 if (error == -ENOSPC) {
5070 /*
5071 * Reset the cursor, don't trust it after any
5072 * insert operation.
5073 */
5074 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5075 if (error)
5076 return error;
5077 if (XFS_IS_CORRUPT(mp, i != 1)) {
5078 xfs_btree_mark_sick(cur);
5079 return -EFSCORRUPTED;
5080 }
5081 /*
5082 * Update the btree record back
5083 * to the original value.
5084 */
5085 error = xfs_bmbt_update(cur, &old);
5086 if (error)
5087 return error;
5088 /*
5089 * Reset the extent record back
5090 * to the original value.
5091 */
5092 xfs_iext_update_extent(ip, state, icur, &old);
5093 *logflagsp = 0;
5094 return -ENOSPC;
5095 }
5096 if (XFS_IS_CORRUPT(mp, i != 1)) {
5097 xfs_btree_mark_sick(cur);
5098 return -EFSCORRUPTED;
5099 }
5100 } else
5101 *logflagsp |= xfs_ilog_fext(whichfork);
5102
5103 ifp->if_nextents++;
5104 xfs_iext_next(ifp, icur);
5105 xfs_iext_insert(ip, icur, &new, state);
5106 break;
5107 }
5108
5109 /* remove reverse mapping */
5110 xfs_rmap_unmap_extent(tp, ip, whichfork, del);
5111
5112 /*
5113 * If we need to, add to list of extents to delete.
5114 */
5115 if (!(bflags & XFS_BMAPI_REMAP)) {
5116 bool isrt = xfs_ifork_is_realtime(ip, whichfork);
5117
5118 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5119 xfs_refcount_decrease_extent(tp, isrt, del);
5120 } else if (isrt && !xfs_has_rtgroups(mp)) {
5121 error = xfs_bmap_free_rtblocks(tp, del);
5122 } else {
5123 unsigned int efi_flags = 0;
5124
5125 if ((bflags & XFS_BMAPI_NODISCARD) ||
5126 del->br_state == XFS_EXT_UNWRITTEN)
5127 efi_flags |= XFS_FREE_EXTENT_SKIP_DISCARD;
5128
5129 /*
5130 * Historically, we did not use EFIs to free realtime
5131 * extents. However, when reverse mapping is enabled,
5132 * we must maintain the same order of operations as the
5133 * data device, which is: Remove the file mapping,
5134 * remove the reverse mapping, and then free the
5135 * blocks. Reflink for realtime volumes requires the
5136 * same sort of ordering. Both features rely on
5137 * rtgroups, so let's gate rt EFI usage on rtgroups.
5138 */
5139 if (isrt)
5140 efi_flags |= XFS_FREE_EXTENT_REALTIME;
5141
5142 error = xfs_free_extent_later(tp, del->br_startblock,
5143 del->br_blockcount, NULL,
5144 XFS_AG_RESV_NONE, efi_flags);
5145 }
5146 if (error)
5147 return error;
5148 }
5149
5150 /*
5151 * Adjust inode # blocks in the file.
5152 */
5153 if (nblks)
5154 ip->i_nblocks -= nblks;
5155 /*
5156 * Adjust quota data.
5157 */
5158 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5159 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5160
5161 return 0;
5162 }
5163
5164 /*
5165 * Unmap (remove) blocks from a file.
5166 * If nexts is nonzero then the number of extents to remove is limited to
5167 * that value. If not all extents in the block range can be removed then
5168 * *done is set.
5169 */
5170 static int
__xfs_bunmapi(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t start,xfs_filblks_t * rlen,uint32_t flags,xfs_extnum_t nexts)5171 __xfs_bunmapi(
5172 struct xfs_trans *tp, /* transaction pointer */
5173 struct xfs_inode *ip, /* incore inode */
5174 xfs_fileoff_t start, /* first file offset deleted */
5175 xfs_filblks_t *rlen, /* i/o: amount remaining */
5176 uint32_t flags, /* misc flags */
5177 xfs_extnum_t nexts) /* number of extents max */
5178 {
5179 struct xfs_btree_cur *cur; /* bmap btree cursor */
5180 struct xfs_bmbt_irec del; /* extent being deleted */
5181 int error; /* error return value */
5182 xfs_extnum_t extno; /* extent number in list */
5183 struct xfs_bmbt_irec got; /* current extent record */
5184 struct xfs_ifork *ifp; /* inode fork pointer */
5185 int isrt; /* freeing in rt area */
5186 int logflags; /* transaction logging flags */
5187 xfs_extlen_t mod; /* rt extent offset */
5188 struct xfs_mount *mp = ip->i_mount;
5189 int tmp_logflags; /* partial logging flags */
5190 int wasdel; /* was a delayed alloc extent */
5191 int whichfork; /* data or attribute fork */
5192 xfs_filblks_t len = *rlen; /* length to unmap in file */
5193 xfs_fileoff_t end;
5194 struct xfs_iext_cursor icur;
5195 bool done = false;
5196
5197 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5198
5199 whichfork = xfs_bmapi_whichfork(flags);
5200 ASSERT(whichfork != XFS_COW_FORK);
5201 ifp = xfs_ifork_ptr(ip, whichfork);
5202 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp))) {
5203 xfs_bmap_mark_sick(ip, whichfork);
5204 return -EFSCORRUPTED;
5205 }
5206 if (xfs_is_shutdown(mp))
5207 return -EIO;
5208
5209 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
5210 ASSERT(len > 0);
5211 ASSERT(nexts >= 0);
5212
5213 error = xfs_iread_extents(tp, ip, whichfork);
5214 if (error)
5215 return error;
5216
5217 if (xfs_iext_count(ifp) == 0) {
5218 *rlen = 0;
5219 return 0;
5220 }
5221 XFS_STATS_INC(mp, xs_blk_unmap);
5222 isrt = xfs_ifork_is_realtime(ip, whichfork);
5223 end = start + len;
5224
5225 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5226 *rlen = 0;
5227 return 0;
5228 }
5229 end--;
5230
5231 logflags = 0;
5232 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5233 ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
5234 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5235 } else
5236 cur = NULL;
5237
5238 extno = 0;
5239 while (end != (xfs_fileoff_t)-1 && end >= start &&
5240 (nexts == 0 || extno < nexts)) {
5241 /*
5242 * Is the found extent after a hole in which end lives?
5243 * Just back up to the previous extent, if so.
5244 */
5245 if (got.br_startoff > end &&
5246 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5247 done = true;
5248 break;
5249 }
5250 /*
5251 * Is the last block of this extent before the range
5252 * we're supposed to delete? If so, we're done.
5253 */
5254 end = XFS_FILEOFF_MIN(end,
5255 got.br_startoff + got.br_blockcount - 1);
5256 if (end < start)
5257 break;
5258 /*
5259 * Then deal with the (possibly delayed) allocated space
5260 * we found.
5261 */
5262 del = got;
5263 wasdel = isnullstartblock(del.br_startblock);
5264
5265 if (got.br_startoff < start) {
5266 del.br_startoff = start;
5267 del.br_blockcount -= start - got.br_startoff;
5268 if (!wasdel)
5269 del.br_startblock += start - got.br_startoff;
5270 }
5271 if (del.br_startoff + del.br_blockcount > end + 1)
5272 del.br_blockcount = end + 1 - del.br_startoff;
5273
5274 if (!isrt || (flags & XFS_BMAPI_REMAP))
5275 goto delete;
5276
5277 mod = xfs_rtb_to_rtxoff(mp,
5278 del.br_startblock + del.br_blockcount);
5279 if (mod) {
5280 /*
5281 * Realtime extent not lined up at the end.
5282 * The extent could have been split into written
5283 * and unwritten pieces, or we could just be
5284 * unmapping part of it. But we can't really
5285 * get rid of part of a realtime extent.
5286 */
5287 if (del.br_state == XFS_EXT_UNWRITTEN) {
5288 /*
5289 * This piece is unwritten, or we're not
5290 * using unwritten extents. Skip over it.
5291 */
5292 ASSERT((flags & XFS_BMAPI_REMAP) || end >= mod);
5293 end -= mod > del.br_blockcount ?
5294 del.br_blockcount : mod;
5295 if (end < got.br_startoff &&
5296 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5297 done = true;
5298 break;
5299 }
5300 continue;
5301 }
5302 /*
5303 * It's written, turn it unwritten.
5304 * This is better than zeroing it.
5305 */
5306 ASSERT(del.br_state == XFS_EXT_NORM);
5307 ASSERT(tp->t_blk_res > 0);
5308 /*
5309 * If this spans a realtime extent boundary,
5310 * chop it back to the start of the one we end at.
5311 */
5312 if (del.br_blockcount > mod) {
5313 del.br_startoff += del.br_blockcount - mod;
5314 del.br_startblock += del.br_blockcount - mod;
5315 del.br_blockcount = mod;
5316 }
5317 del.br_state = XFS_EXT_UNWRITTEN;
5318 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5319 whichfork, &icur, &cur, &del,
5320 &logflags);
5321 if (error)
5322 goto error0;
5323 goto nodelete;
5324 }
5325
5326 mod = xfs_rtb_to_rtxoff(mp, del.br_startblock);
5327 if (mod) {
5328 xfs_extlen_t off = mp->m_sb.sb_rextsize - mod;
5329
5330 /*
5331 * Realtime extent is lined up at the end but not
5332 * at the front. We'll get rid of full extents if
5333 * we can.
5334 */
5335 if (del.br_blockcount > off) {
5336 del.br_blockcount -= off;
5337 del.br_startoff += off;
5338 del.br_startblock += off;
5339 } else if (del.br_startoff == start &&
5340 (del.br_state == XFS_EXT_UNWRITTEN ||
5341 tp->t_blk_res == 0)) {
5342 /*
5343 * Can't make it unwritten. There isn't
5344 * a full extent here so just skip it.
5345 */
5346 ASSERT(end >= del.br_blockcount);
5347 end -= del.br_blockcount;
5348 if (got.br_startoff > end &&
5349 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5350 done = true;
5351 break;
5352 }
5353 continue;
5354 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5355 struct xfs_bmbt_irec prev;
5356 xfs_fileoff_t unwrite_start;
5357
5358 /*
5359 * This one is already unwritten.
5360 * It must have a written left neighbor.
5361 * Unwrite the killed part of that one and
5362 * try again.
5363 */
5364 if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5365 ASSERT(0);
5366 ASSERT(prev.br_state == XFS_EXT_NORM);
5367 ASSERT(!isnullstartblock(prev.br_startblock));
5368 ASSERT(del.br_startblock ==
5369 prev.br_startblock + prev.br_blockcount);
5370 unwrite_start = max3(start,
5371 del.br_startoff - mod,
5372 prev.br_startoff);
5373 mod = unwrite_start - prev.br_startoff;
5374 prev.br_startoff = unwrite_start;
5375 prev.br_startblock += mod;
5376 prev.br_blockcount -= mod;
5377 prev.br_state = XFS_EXT_UNWRITTEN;
5378 error = xfs_bmap_add_extent_unwritten_real(tp,
5379 ip, whichfork, &icur, &cur,
5380 &prev, &logflags);
5381 if (error)
5382 goto error0;
5383 goto nodelete;
5384 } else {
5385 ASSERT(del.br_state == XFS_EXT_NORM);
5386 del.br_state = XFS_EXT_UNWRITTEN;
5387 error = xfs_bmap_add_extent_unwritten_real(tp,
5388 ip, whichfork, &icur, &cur,
5389 &del, &logflags);
5390 if (error)
5391 goto error0;
5392 goto nodelete;
5393 }
5394 }
5395
5396 delete:
5397 if (wasdel) {
5398 xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got,
5399 &del, flags);
5400 } else {
5401 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
5402 &del, &tmp_logflags, whichfork,
5403 flags);
5404 logflags |= tmp_logflags;
5405 if (error)
5406 goto error0;
5407 }
5408
5409 end = del.br_startoff - 1;
5410 nodelete:
5411 /*
5412 * If not done go on to the next (previous) record.
5413 */
5414 if (end != (xfs_fileoff_t)-1 && end >= start) {
5415 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5416 (got.br_startoff > end &&
5417 !xfs_iext_prev_extent(ifp, &icur, &got))) {
5418 done = true;
5419 break;
5420 }
5421 extno++;
5422 }
5423 }
5424 if (done || end == (xfs_fileoff_t)-1 || end < start)
5425 *rlen = 0;
5426 else
5427 *rlen = end - start + 1;
5428
5429 /*
5430 * Convert to a btree if necessary.
5431 */
5432 if (xfs_bmap_needs_btree(ip, whichfork)) {
5433 ASSERT(cur == NULL);
5434 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5435 &tmp_logflags, whichfork);
5436 logflags |= tmp_logflags;
5437 } else {
5438 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
5439 whichfork);
5440 }
5441
5442 error0:
5443 /*
5444 * Log everything. Do this after conversion, there's no point in
5445 * logging the extent records if we've converted to btree format.
5446 */
5447 if ((logflags & xfs_ilog_fext(whichfork)) &&
5448 ifp->if_format != XFS_DINODE_FMT_EXTENTS)
5449 logflags &= ~xfs_ilog_fext(whichfork);
5450 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5451 ifp->if_format != XFS_DINODE_FMT_BTREE)
5452 logflags &= ~xfs_ilog_fbroot(whichfork);
5453 /*
5454 * Log inode even in the error case, if the transaction
5455 * is dirty we'll need to shut down the filesystem.
5456 */
5457 if (logflags)
5458 xfs_trans_log_inode(tp, ip, logflags);
5459 if (cur) {
5460 if (!error)
5461 cur->bc_bmap.allocated = 0;
5462 xfs_btree_del_cursor(cur, error);
5463 }
5464 return error;
5465 }
5466
5467 /* Unmap a range of a file. */
5468 int
xfs_bunmapi(xfs_trans_t * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,uint32_t flags,xfs_extnum_t nexts,int * done)5469 xfs_bunmapi(
5470 xfs_trans_t *tp,
5471 struct xfs_inode *ip,
5472 xfs_fileoff_t bno,
5473 xfs_filblks_t len,
5474 uint32_t flags,
5475 xfs_extnum_t nexts,
5476 int *done)
5477 {
5478 int error;
5479
5480 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
5481 *done = (len == 0);
5482 return error;
5483 }
5484
5485 /*
5486 * Determine whether an extent shift can be accomplished by a merge with the
5487 * extent that precedes the target hole of the shift.
5488 */
5489 STATIC bool
xfs_bmse_can_merge(struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * left,struct xfs_bmbt_irec * got,xfs_fileoff_t shift)5490 xfs_bmse_can_merge(
5491 struct xfs_inode *ip,
5492 int whichfork,
5493 struct xfs_bmbt_irec *left, /* preceding extent */
5494 struct xfs_bmbt_irec *got, /* current extent to shift */
5495 xfs_fileoff_t shift) /* shift fsb */
5496 {
5497 xfs_fileoff_t startoff;
5498
5499 startoff = got->br_startoff - shift;
5500
5501 /*
5502 * The extent, once shifted, must be adjacent in-file and on-disk with
5503 * the preceding extent.
5504 */
5505 if ((left->br_startoff + left->br_blockcount != startoff) ||
5506 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5507 (left->br_state != got->br_state) ||
5508 (left->br_blockcount + got->br_blockcount > XFS_MAX_BMBT_EXTLEN) ||
5509 !xfs_bmap_same_rtgroup(ip, whichfork, left, got))
5510 return false;
5511
5512 return true;
5513 }
5514
5515 /*
5516 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5517 * hole in the file. If an extent shift would result in the extent being fully
5518 * adjacent to the extent that currently precedes the hole, we can merge with
5519 * the preceding extent rather than do the shift.
5520 *
5521 * This function assumes the caller has verified a shift-by-merge is possible
5522 * with the provided extents via xfs_bmse_can_merge().
5523 */
5524 STATIC int
xfs_bmse_merge(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,xfs_fileoff_t shift,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * left,struct xfs_btree_cur * cur,int * logflags)5525 xfs_bmse_merge(
5526 struct xfs_trans *tp,
5527 struct xfs_inode *ip,
5528 int whichfork,
5529 xfs_fileoff_t shift, /* shift fsb */
5530 struct xfs_iext_cursor *icur,
5531 struct xfs_bmbt_irec *got, /* extent to shift */
5532 struct xfs_bmbt_irec *left, /* preceding extent */
5533 struct xfs_btree_cur *cur,
5534 int *logflags) /* output */
5535 {
5536 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
5537 struct xfs_bmbt_irec new;
5538 xfs_filblks_t blockcount;
5539 int error, i;
5540 struct xfs_mount *mp = ip->i_mount;
5541
5542 blockcount = left->br_blockcount + got->br_blockcount;
5543
5544 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5545 ASSERT(xfs_bmse_can_merge(ip, whichfork, left, got, shift));
5546
5547 new = *left;
5548 new.br_blockcount = blockcount;
5549
5550 /*
5551 * Update the on-disk extent count, the btree if necessary and log the
5552 * inode.
5553 */
5554 ifp->if_nextents--;
5555 *logflags |= XFS_ILOG_CORE;
5556 if (!cur) {
5557 *logflags |= XFS_ILOG_DEXT;
5558 goto done;
5559 }
5560
5561 /* lookup and remove the extent to merge */
5562 error = xfs_bmbt_lookup_eq(cur, got, &i);
5563 if (error)
5564 return error;
5565 if (XFS_IS_CORRUPT(mp, i != 1)) {
5566 xfs_btree_mark_sick(cur);
5567 return -EFSCORRUPTED;
5568 }
5569
5570 error = xfs_btree_delete(cur, &i);
5571 if (error)
5572 return error;
5573 if (XFS_IS_CORRUPT(mp, i != 1)) {
5574 xfs_btree_mark_sick(cur);
5575 return -EFSCORRUPTED;
5576 }
5577
5578 /* lookup and update size of the previous extent */
5579 error = xfs_bmbt_lookup_eq(cur, left, &i);
5580 if (error)
5581 return error;
5582 if (XFS_IS_CORRUPT(mp, i != 1)) {
5583 xfs_btree_mark_sick(cur);
5584 return -EFSCORRUPTED;
5585 }
5586
5587 error = xfs_bmbt_update(cur, &new);
5588 if (error)
5589 return error;
5590
5591 /* change to extent format if required after extent removal */
5592 error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
5593 if (error)
5594 return error;
5595
5596 done:
5597 xfs_iext_remove(ip, icur, 0);
5598 xfs_iext_prev(ifp, icur);
5599 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5600 &new);
5601
5602 /* update reverse mapping. rmap functions merge the rmaps for us */
5603 xfs_rmap_unmap_extent(tp, ip, whichfork, got);
5604 memcpy(&new, got, sizeof(new));
5605 new.br_startoff = left->br_startoff + left->br_blockcount;
5606 xfs_rmap_map_extent(tp, ip, whichfork, &new);
5607 return 0;
5608 }
5609
5610 static int
xfs_bmap_shift_update_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_btree_cur * cur,int * logflags,xfs_fileoff_t startoff)5611 xfs_bmap_shift_update_extent(
5612 struct xfs_trans *tp,
5613 struct xfs_inode *ip,
5614 int whichfork,
5615 struct xfs_iext_cursor *icur,
5616 struct xfs_bmbt_irec *got,
5617 struct xfs_btree_cur *cur,
5618 int *logflags,
5619 xfs_fileoff_t startoff)
5620 {
5621 struct xfs_mount *mp = ip->i_mount;
5622 struct xfs_bmbt_irec prev = *got;
5623 int error, i;
5624
5625 *logflags |= XFS_ILOG_CORE;
5626
5627 got->br_startoff = startoff;
5628
5629 if (cur) {
5630 error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5631 if (error)
5632 return error;
5633 if (XFS_IS_CORRUPT(mp, i != 1)) {
5634 xfs_btree_mark_sick(cur);
5635 return -EFSCORRUPTED;
5636 }
5637
5638 error = xfs_bmbt_update(cur, got);
5639 if (error)
5640 return error;
5641 } else {
5642 *logflags |= XFS_ILOG_DEXT;
5643 }
5644
5645 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5646 got);
5647
5648 /* update reverse mapping */
5649 xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5650 xfs_rmap_map_extent(tp, ip, whichfork, got);
5651 return 0;
5652 }
5653
5654 int
xfs_bmap_collapse_extents(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t * next_fsb,xfs_fileoff_t offset_shift_fsb,bool * done)5655 xfs_bmap_collapse_extents(
5656 struct xfs_trans *tp,
5657 struct xfs_inode *ip,
5658 xfs_fileoff_t *next_fsb,
5659 xfs_fileoff_t offset_shift_fsb,
5660 bool *done)
5661 {
5662 int whichfork = XFS_DATA_FORK;
5663 struct xfs_mount *mp = ip->i_mount;
5664 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
5665 struct xfs_btree_cur *cur = NULL;
5666 struct xfs_bmbt_irec got, prev;
5667 struct xfs_iext_cursor icur;
5668 xfs_fileoff_t new_startoff;
5669 int error = 0;
5670 int logflags = 0;
5671
5672 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5673 XFS_TEST_ERROR(mp, XFS_ERRTAG_BMAPIFORMAT)) {
5674 xfs_bmap_mark_sick(ip, whichfork);
5675 return -EFSCORRUPTED;
5676 }
5677
5678 if (xfs_is_shutdown(mp))
5679 return -EIO;
5680
5681 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5682
5683 error = xfs_iread_extents(tp, ip, whichfork);
5684 if (error)
5685 return error;
5686
5687 if (ifp->if_format == XFS_DINODE_FMT_BTREE)
5688 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5689
5690 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5691 *done = true;
5692 goto del_cursor;
5693 }
5694 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5695 xfs_bmap_mark_sick(ip, whichfork);
5696 error = -EFSCORRUPTED;
5697 goto del_cursor;
5698 }
5699
5700 new_startoff = got.br_startoff - offset_shift_fsb;
5701 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5702 if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5703 error = -EINVAL;
5704 goto del_cursor;
5705 }
5706
5707 if (xfs_bmse_can_merge(ip, whichfork, &prev, &got,
5708 offset_shift_fsb)) {
5709 error = xfs_bmse_merge(tp, ip, whichfork,
5710 offset_shift_fsb, &icur, &got, &prev,
5711 cur, &logflags);
5712 if (error)
5713 goto del_cursor;
5714 goto done;
5715 }
5716 } else {
5717 if (got.br_startoff < offset_shift_fsb) {
5718 error = -EINVAL;
5719 goto del_cursor;
5720 }
5721 }
5722
5723 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5724 cur, &logflags, new_startoff);
5725 if (error)
5726 goto del_cursor;
5727
5728 done:
5729 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5730 *done = true;
5731 goto del_cursor;
5732 }
5733
5734 *next_fsb = got.br_startoff;
5735 del_cursor:
5736 if (cur)
5737 xfs_btree_del_cursor(cur, error);
5738 if (logflags)
5739 xfs_trans_log_inode(tp, ip, logflags);
5740 return error;
5741 }
5742
5743 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5744 int
xfs_bmap_can_insert_extents(struct xfs_inode * ip,xfs_fileoff_t off,xfs_fileoff_t shift)5745 xfs_bmap_can_insert_extents(
5746 struct xfs_inode *ip,
5747 xfs_fileoff_t off,
5748 xfs_fileoff_t shift)
5749 {
5750 struct xfs_bmbt_irec got;
5751 int is_empty;
5752 int error = 0;
5753
5754 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL);
5755
5756 if (xfs_is_shutdown(ip->i_mount))
5757 return -EIO;
5758
5759 xfs_ilock(ip, XFS_ILOCK_EXCL);
5760 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5761 if (!error && !is_empty && got.br_startoff >= off &&
5762 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
5763 error = -EINVAL;
5764 xfs_iunlock(ip, XFS_ILOCK_EXCL);
5765
5766 return error;
5767 }
5768
5769 int
xfs_bmap_insert_extents(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t * next_fsb,xfs_fileoff_t offset_shift_fsb,bool * done,xfs_fileoff_t stop_fsb)5770 xfs_bmap_insert_extents(
5771 struct xfs_trans *tp,
5772 struct xfs_inode *ip,
5773 xfs_fileoff_t *next_fsb,
5774 xfs_fileoff_t offset_shift_fsb,
5775 bool *done,
5776 xfs_fileoff_t stop_fsb)
5777 {
5778 int whichfork = XFS_DATA_FORK;
5779 struct xfs_mount *mp = ip->i_mount;
5780 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
5781 struct xfs_btree_cur *cur = NULL;
5782 struct xfs_bmbt_irec got, next;
5783 struct xfs_iext_cursor icur;
5784 xfs_fileoff_t new_startoff;
5785 int error = 0;
5786 int logflags = 0;
5787
5788 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5789 XFS_TEST_ERROR(mp, XFS_ERRTAG_BMAPIFORMAT)) {
5790 xfs_bmap_mark_sick(ip, whichfork);
5791 return -EFSCORRUPTED;
5792 }
5793
5794 if (xfs_is_shutdown(mp))
5795 return -EIO;
5796
5797 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5798
5799 error = xfs_iread_extents(tp, ip, whichfork);
5800 if (error)
5801 return error;
5802
5803 if (ifp->if_format == XFS_DINODE_FMT_BTREE)
5804 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5805
5806 if (*next_fsb == NULLFSBLOCK) {
5807 xfs_iext_last(ifp, &icur);
5808 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5809 stop_fsb > got.br_startoff) {
5810 *done = true;
5811 goto del_cursor;
5812 }
5813 } else {
5814 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5815 *done = true;
5816 goto del_cursor;
5817 }
5818 }
5819 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5820 xfs_bmap_mark_sick(ip, whichfork);
5821 error = -EFSCORRUPTED;
5822 goto del_cursor;
5823 }
5824
5825 if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) {
5826 xfs_bmap_mark_sick(ip, whichfork);
5827 error = -EFSCORRUPTED;
5828 goto del_cursor;
5829 }
5830
5831 new_startoff = got.br_startoff + offset_shift_fsb;
5832 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
5833 if (new_startoff + got.br_blockcount > next.br_startoff) {
5834 error = -EINVAL;
5835 goto del_cursor;
5836 }
5837
5838 /*
5839 * Unlike a left shift (which involves a hole punch), a right
5840 * shift does not modify extent neighbors in any way. We should
5841 * never find mergeable extents in this scenario. Check anyways
5842 * and warn if we encounter two extents that could be one.
5843 */
5844 if (xfs_bmse_can_merge(ip, whichfork, &got, &next,
5845 offset_shift_fsb))
5846 WARN_ON_ONCE(1);
5847 }
5848
5849 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5850 cur, &logflags, new_startoff);
5851 if (error)
5852 goto del_cursor;
5853
5854 if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
5855 stop_fsb >= got.br_startoff + got.br_blockcount) {
5856 *done = true;
5857 goto del_cursor;
5858 }
5859
5860 *next_fsb = got.br_startoff;
5861 del_cursor:
5862 if (cur)
5863 xfs_btree_del_cursor(cur, error);
5864 if (logflags)
5865 xfs_trans_log_inode(tp, ip, logflags);
5866 return error;
5867 }
5868
5869 /*
5870 * Splits an extent into two extents at split_fsb block such that it is the
5871 * first block of the current_ext. @ext is a target extent to be split.
5872 * @split_fsb is a block where the extents is split. If split_fsb lies in a
5873 * hole or the first block of extents, just return 0.
5874 */
5875 int
xfs_bmap_split_extent(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t split_fsb)5876 xfs_bmap_split_extent(
5877 struct xfs_trans *tp,
5878 struct xfs_inode *ip,
5879 xfs_fileoff_t split_fsb)
5880 {
5881 int whichfork = XFS_DATA_FORK;
5882 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
5883 struct xfs_btree_cur *cur = NULL;
5884 struct xfs_bmbt_irec got;
5885 struct xfs_bmbt_irec new; /* split extent */
5886 struct xfs_mount *mp = ip->i_mount;
5887 xfs_fsblock_t gotblkcnt; /* new block count for got */
5888 struct xfs_iext_cursor icur;
5889 int error = 0;
5890 int logflags = 0;
5891 int i = 0;
5892
5893 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5894 XFS_TEST_ERROR(mp, XFS_ERRTAG_BMAPIFORMAT)) {
5895 xfs_bmap_mark_sick(ip, whichfork);
5896 return -EFSCORRUPTED;
5897 }
5898
5899 if (xfs_is_shutdown(mp))
5900 return -EIO;
5901
5902 /* Read in all the extents */
5903 error = xfs_iread_extents(tp, ip, whichfork);
5904 if (error)
5905 return error;
5906
5907 /*
5908 * If there are not extents, or split_fsb lies in a hole we are done.
5909 */
5910 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
5911 got.br_startoff >= split_fsb)
5912 return 0;
5913
5914 gotblkcnt = split_fsb - got.br_startoff;
5915 new.br_startoff = split_fsb;
5916 new.br_startblock = got.br_startblock + gotblkcnt;
5917 new.br_blockcount = got.br_blockcount - gotblkcnt;
5918 new.br_state = got.br_state;
5919
5920 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5921 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5922 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5923 if (error)
5924 goto del_cursor;
5925 if (XFS_IS_CORRUPT(mp, i != 1)) {
5926 xfs_btree_mark_sick(cur);
5927 error = -EFSCORRUPTED;
5928 goto del_cursor;
5929 }
5930 }
5931
5932 got.br_blockcount = gotblkcnt;
5933 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
5934 &got);
5935
5936 logflags = XFS_ILOG_CORE;
5937 if (cur) {
5938 error = xfs_bmbt_update(cur, &got);
5939 if (error)
5940 goto del_cursor;
5941 } else
5942 logflags |= XFS_ILOG_DEXT;
5943
5944 /* Add new extent */
5945 xfs_iext_next(ifp, &icur);
5946 xfs_iext_insert(ip, &icur, &new, 0);
5947 ifp->if_nextents++;
5948
5949 if (cur) {
5950 error = xfs_bmbt_lookup_eq(cur, &new, &i);
5951 if (error)
5952 goto del_cursor;
5953 if (XFS_IS_CORRUPT(mp, i != 0)) {
5954 xfs_btree_mark_sick(cur);
5955 error = -EFSCORRUPTED;
5956 goto del_cursor;
5957 }
5958 error = xfs_btree_insert(cur, &i);
5959 if (error)
5960 goto del_cursor;
5961 if (XFS_IS_CORRUPT(mp, i != 1)) {
5962 xfs_btree_mark_sick(cur);
5963 error = -EFSCORRUPTED;
5964 goto del_cursor;
5965 }
5966 }
5967
5968 /*
5969 * Convert to a btree if necessary.
5970 */
5971 if (xfs_bmap_needs_btree(ip, whichfork)) {
5972 int tmp_logflags; /* partial log flag return val */
5973
5974 ASSERT(cur == NULL);
5975 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5976 &tmp_logflags, whichfork);
5977 logflags |= tmp_logflags;
5978 }
5979
5980 del_cursor:
5981 if (cur) {
5982 cur->bc_bmap.allocated = 0;
5983 xfs_btree_del_cursor(cur, error);
5984 }
5985
5986 if (logflags)
5987 xfs_trans_log_inode(tp, ip, logflags);
5988 return error;
5989 }
5990
5991 /* Record a bmap intent. */
5992 static inline void
__xfs_bmap_add(struct xfs_trans * tp,enum xfs_bmap_intent_type type,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * bmap)5993 __xfs_bmap_add(
5994 struct xfs_trans *tp,
5995 enum xfs_bmap_intent_type type,
5996 struct xfs_inode *ip,
5997 int whichfork,
5998 struct xfs_bmbt_irec *bmap)
5999 {
6000 struct xfs_bmap_intent *bi;
6001
6002 if ((whichfork != XFS_DATA_FORK && whichfork != XFS_ATTR_FORK) ||
6003 bmap->br_startblock == HOLESTARTBLOCK ||
6004 bmap->br_startblock == DELAYSTARTBLOCK)
6005 return;
6006
6007 bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
6008 INIT_LIST_HEAD(&bi->bi_list);
6009 bi->bi_type = type;
6010 bi->bi_owner = ip;
6011 bi->bi_whichfork = whichfork;
6012 bi->bi_bmap = *bmap;
6013
6014 xfs_bmap_defer_add(tp, bi);
6015 }
6016
6017 /* Map an extent into a file. */
6018 void
xfs_bmap_map_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * PREV)6019 xfs_bmap_map_extent(
6020 struct xfs_trans *tp,
6021 struct xfs_inode *ip,
6022 int whichfork,
6023 struct xfs_bmbt_irec *PREV)
6024 {
6025 __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, whichfork, PREV);
6026 }
6027
6028 /* Unmap an extent out of a file. */
6029 void
xfs_bmap_unmap_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * PREV)6030 xfs_bmap_unmap_extent(
6031 struct xfs_trans *tp,
6032 struct xfs_inode *ip,
6033 int whichfork,
6034 struct xfs_bmbt_irec *PREV)
6035 {
6036 __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, whichfork, PREV);
6037 }
6038
6039 /*
6040 * Process one of the deferred bmap operations. We pass back the
6041 * btree cursor to maintain our lock on the bmapbt between calls.
6042 */
6043 int
xfs_bmap_finish_one(struct xfs_trans * tp,struct xfs_bmap_intent * bi)6044 xfs_bmap_finish_one(
6045 struct xfs_trans *tp,
6046 struct xfs_bmap_intent *bi)
6047 {
6048 struct xfs_bmbt_irec *bmap = &bi->bi_bmap;
6049 int error = 0;
6050 int flags = 0;
6051
6052 if (bi->bi_whichfork == XFS_ATTR_FORK)
6053 flags |= XFS_BMAPI_ATTRFORK;
6054
6055 ASSERT(tp->t_highest_agno == NULLAGNUMBER);
6056
6057 trace_xfs_bmap_deferred(bi);
6058
6059 if (XFS_TEST_ERROR(tp->t_mountp, XFS_ERRTAG_BMAP_FINISH_ONE))
6060 return -EIO;
6061
6062 switch (bi->bi_type) {
6063 case XFS_BMAP_MAP:
6064 if (bi->bi_bmap.br_state == XFS_EXT_UNWRITTEN)
6065 flags |= XFS_BMAPI_PREALLOC;
6066 error = xfs_bmapi_remap(tp, bi->bi_owner, bmap->br_startoff,
6067 bmap->br_blockcount, bmap->br_startblock,
6068 flags);
6069 bmap->br_blockcount = 0;
6070 break;
6071 case XFS_BMAP_UNMAP:
6072 error = __xfs_bunmapi(tp, bi->bi_owner, bmap->br_startoff,
6073 &bmap->br_blockcount, flags | XFS_BMAPI_REMAP,
6074 1);
6075 break;
6076 default:
6077 ASSERT(0);
6078 xfs_bmap_mark_sick(bi->bi_owner, bi->bi_whichfork);
6079 error = -EFSCORRUPTED;
6080 }
6081
6082 return error;
6083 }
6084
6085 /* Check that an extent does not have invalid flags or bad ranges. */
6086 xfs_failaddr_t
xfs_bmap_validate_extent_raw(struct xfs_mount * mp,bool rtfile,int whichfork,struct xfs_bmbt_irec * irec)6087 xfs_bmap_validate_extent_raw(
6088 struct xfs_mount *mp,
6089 bool rtfile,
6090 int whichfork,
6091 struct xfs_bmbt_irec *irec)
6092 {
6093 if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
6094 return __this_address;
6095
6096 if (rtfile && whichfork == XFS_DATA_FORK) {
6097 if (!xfs_verify_rtbext(mp, irec->br_startblock,
6098 irec->br_blockcount))
6099 return __this_address;
6100 } else {
6101 if (!xfs_verify_fsbext(mp, irec->br_startblock,
6102 irec->br_blockcount))
6103 return __this_address;
6104 }
6105 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
6106 return __this_address;
6107 return NULL;
6108 }
6109
6110 int __init
xfs_bmap_intent_init_cache(void)6111 xfs_bmap_intent_init_cache(void)
6112 {
6113 xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent",
6114 sizeof(struct xfs_bmap_intent),
6115 0, 0, NULL);
6116
6117 return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM;
6118 }
6119
6120 void
xfs_bmap_intent_destroy_cache(void)6121 xfs_bmap_intent_destroy_cache(void)
6122 {
6123 kmem_cache_destroy(xfs_bmap_intent_cache);
6124 xfs_bmap_intent_cache = NULL;
6125 }
6126
6127 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6128 xfs_failaddr_t
xfs_bmap_validate_extent(struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * irec)6129 xfs_bmap_validate_extent(
6130 struct xfs_inode *ip,
6131 int whichfork,
6132 struct xfs_bmbt_irec *irec)
6133 {
6134 return xfs_bmap_validate_extent_raw(ip->i_mount,
6135 XFS_IS_REALTIME_INODE(ip), whichfork, irec);
6136 }
6137
6138 /*
6139 * Used in xfs_itruncate_extents(). This is the maximum number of extents
6140 * freed from a file in a single transaction.
6141 */
6142 #define XFS_ITRUNC_MAX_EXTENTS 2
6143
6144 /*
6145 * Unmap every extent in part of an inode's fork. We don't do any higher level
6146 * invalidation work at all.
6147 */
6148 int
xfs_bunmapi_range(struct xfs_trans ** tpp,struct xfs_inode * ip,uint32_t flags,xfs_fileoff_t startoff,xfs_fileoff_t endoff)6149 xfs_bunmapi_range(
6150 struct xfs_trans **tpp,
6151 struct xfs_inode *ip,
6152 uint32_t flags,
6153 xfs_fileoff_t startoff,
6154 xfs_fileoff_t endoff)
6155 {
6156 xfs_filblks_t unmap_len = endoff - startoff + 1;
6157 int error = 0;
6158
6159 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
6160
6161 while (unmap_len > 0) {
6162 ASSERT((*tpp)->t_highest_agno == NULLAGNUMBER);
6163 error = __xfs_bunmapi(*tpp, ip, startoff, &unmap_len, flags,
6164 XFS_ITRUNC_MAX_EXTENTS);
6165 if (error)
6166 goto out;
6167
6168 /* free the just unmapped extents */
6169 error = xfs_defer_finish(tpp);
6170 if (error)
6171 goto out;
6172 cond_resched();
6173 }
6174 out:
6175 return error;
6176 }
6177
6178 struct xfs_bmap_query_range {
6179 xfs_bmap_query_range_fn fn;
6180 void *priv;
6181 };
6182
6183 /* Format btree record and pass to our callback. */
6184 STATIC int
xfs_bmap_query_range_helper(struct xfs_btree_cur * cur,const union xfs_btree_rec * rec,void * priv)6185 xfs_bmap_query_range_helper(
6186 struct xfs_btree_cur *cur,
6187 const union xfs_btree_rec *rec,
6188 void *priv)
6189 {
6190 struct xfs_bmap_query_range *query = priv;
6191 struct xfs_bmbt_irec irec;
6192 xfs_failaddr_t fa;
6193
6194 xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
6195 fa = xfs_bmap_validate_extent(cur->bc_ino.ip, cur->bc_ino.whichfork,
6196 &irec);
6197 if (fa) {
6198 xfs_btree_mark_sick(cur);
6199 return xfs_bmap_complain_bad_rec(cur->bc_ino.ip,
6200 cur->bc_ino.whichfork, fa, &irec);
6201 }
6202
6203 return query->fn(cur, &irec, query->priv);
6204 }
6205
6206 /* Find all bmaps. */
6207 int
xfs_bmap_query_all(struct xfs_btree_cur * cur,xfs_bmap_query_range_fn fn,void * priv)6208 xfs_bmap_query_all(
6209 struct xfs_btree_cur *cur,
6210 xfs_bmap_query_range_fn fn,
6211 void *priv)
6212 {
6213 struct xfs_bmap_query_range query = {
6214 .priv = priv,
6215 .fn = fn,
6216 };
6217
6218 return xfs_btree_query_all(cur, xfs_bmap_query_range_helper, &query);
6219 }
6220
6221 /* Helper function to extract extent size hint from inode */
6222 xfs_extlen_t
xfs_get_extsz_hint(struct xfs_inode * ip)6223 xfs_get_extsz_hint(
6224 struct xfs_inode *ip)
6225 {
6226 /*
6227 * No point in aligning allocations if we need to COW to actually
6228 * write to them.
6229 */
6230 if (!xfs_is_always_cow_inode(ip) &&
6231 (ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
6232 return ip->i_extsize;
6233 if (XFS_IS_REALTIME_INODE(ip) &&
6234 ip->i_mount->m_sb.sb_rextsize > 1)
6235 return ip->i_mount->m_sb.sb_rextsize;
6236 return 0;
6237 }
6238
6239 /*
6240 * Helper function to extract CoW extent size hint from inode.
6241 * Between the extent size hint and the CoW extent size hint, we
6242 * return the greater of the two. If the value is zero (automatic),
6243 * use the default size.
6244 */
6245 xfs_extlen_t
xfs_get_cowextsz_hint(struct xfs_inode * ip)6246 xfs_get_cowextsz_hint(
6247 struct xfs_inode *ip)
6248 {
6249 xfs_extlen_t a, b;
6250
6251 a = 0;
6252 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
6253 a = ip->i_cowextsize;
6254 if (XFS_IS_REALTIME_INODE(ip)) {
6255 b = 0;
6256 if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
6257 b = ip->i_extsize;
6258 } else {
6259 b = xfs_get_extsz_hint(ip);
6260 }
6261
6262 a = max(a, b);
6263 if (a == 0)
6264 return XFS_DEFAULT_COWEXTSZ_HINT;
6265 return a;
6266 }
6267