1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2014 Red Hat, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_mount.h"
14 #include "xfs_sb.h"
15 #include "xfs_defer.h"
16 #include "xfs_btree.h"
17 #include "xfs_trans.h"
18 #include "xfs_alloc.h"
19 #include "xfs_rmap.h"
20 #include "xfs_rmap_btree.h"
21 #include "xfs_trace.h"
22 #include "xfs_errortag.h"
23 #include "xfs_error.h"
24 #include "xfs_inode.h"
25 #include "xfs_ag.h"
26 #include "xfs_health.h"
27 #include "xfs_rmap_item.h"
28
29 struct kmem_cache *xfs_rmap_intent_cache;
30
31 /*
32 * Lookup the first record less than or equal to [bno, len, owner, offset]
33 * in the btree given by cur.
34 */
35 int
xfs_rmap_lookup_le(struct xfs_btree_cur * cur,xfs_agblock_t bno,uint64_t owner,uint64_t offset,unsigned int flags,struct xfs_rmap_irec * irec,int * stat)36 xfs_rmap_lookup_le(
37 struct xfs_btree_cur *cur,
38 xfs_agblock_t bno,
39 uint64_t owner,
40 uint64_t offset,
41 unsigned int flags,
42 struct xfs_rmap_irec *irec,
43 int *stat)
44 {
45 int get_stat = 0;
46 int error;
47
48 cur->bc_rec.r.rm_startblock = bno;
49 cur->bc_rec.r.rm_blockcount = 0;
50 cur->bc_rec.r.rm_owner = owner;
51 cur->bc_rec.r.rm_offset = offset;
52 cur->bc_rec.r.rm_flags = flags;
53
54 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
55 if (error || !(*stat) || !irec)
56 return error;
57
58 error = xfs_rmap_get_rec(cur, irec, &get_stat);
59 if (error)
60 return error;
61 if (!get_stat) {
62 xfs_btree_mark_sick(cur);
63 return -EFSCORRUPTED;
64 }
65
66 return 0;
67 }
68
69 /*
70 * Lookup the record exactly matching [bno, len, owner, offset]
71 * in the btree given by cur.
72 */
73 int
xfs_rmap_lookup_eq(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,uint64_t owner,uint64_t offset,unsigned int flags,int * stat)74 xfs_rmap_lookup_eq(
75 struct xfs_btree_cur *cur,
76 xfs_agblock_t bno,
77 xfs_extlen_t len,
78 uint64_t owner,
79 uint64_t offset,
80 unsigned int flags,
81 int *stat)
82 {
83 cur->bc_rec.r.rm_startblock = bno;
84 cur->bc_rec.r.rm_blockcount = len;
85 cur->bc_rec.r.rm_owner = owner;
86 cur->bc_rec.r.rm_offset = offset;
87 cur->bc_rec.r.rm_flags = flags;
88 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
89 }
90
91 /*
92 * Update the record referred to by cur to the value given
93 * by [bno, len, owner, offset].
94 * This either works (return 0) or gets an EFSCORRUPTED error.
95 */
96 STATIC int
xfs_rmap_update(struct xfs_btree_cur * cur,struct xfs_rmap_irec * irec)97 xfs_rmap_update(
98 struct xfs_btree_cur *cur,
99 struct xfs_rmap_irec *irec)
100 {
101 union xfs_btree_rec rec;
102 int error;
103
104 trace_xfs_rmap_update(cur, irec->rm_startblock, irec->rm_blockcount,
105 irec->rm_owner, irec->rm_offset, irec->rm_flags);
106
107 rec.rmap.rm_startblock = cpu_to_be32(irec->rm_startblock);
108 rec.rmap.rm_blockcount = cpu_to_be32(irec->rm_blockcount);
109 rec.rmap.rm_owner = cpu_to_be64(irec->rm_owner);
110 rec.rmap.rm_offset = cpu_to_be64(
111 xfs_rmap_irec_offset_pack(irec));
112 error = xfs_btree_update(cur, &rec);
113 if (error)
114 trace_xfs_rmap_update_error(cur, error, _RET_IP_);
115 return error;
116 }
117
118 int
xfs_rmap_insert(struct xfs_btree_cur * rcur,xfs_agblock_t agbno,xfs_extlen_t len,uint64_t owner,uint64_t offset,unsigned int flags)119 xfs_rmap_insert(
120 struct xfs_btree_cur *rcur,
121 xfs_agblock_t agbno,
122 xfs_extlen_t len,
123 uint64_t owner,
124 uint64_t offset,
125 unsigned int flags)
126 {
127 int i;
128 int error;
129
130 trace_xfs_rmap_insert(rcur, agbno, len, owner, offset, flags);
131
132 error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
133 if (error)
134 goto done;
135 if (XFS_IS_CORRUPT(rcur->bc_mp, i != 0)) {
136 xfs_btree_mark_sick(rcur);
137 error = -EFSCORRUPTED;
138 goto done;
139 }
140
141 rcur->bc_rec.r.rm_startblock = agbno;
142 rcur->bc_rec.r.rm_blockcount = len;
143 rcur->bc_rec.r.rm_owner = owner;
144 rcur->bc_rec.r.rm_offset = offset;
145 rcur->bc_rec.r.rm_flags = flags;
146 error = xfs_btree_insert(rcur, &i);
147 if (error)
148 goto done;
149 if (XFS_IS_CORRUPT(rcur->bc_mp, i != 1)) {
150 xfs_btree_mark_sick(rcur);
151 error = -EFSCORRUPTED;
152 goto done;
153 }
154 done:
155 if (error)
156 trace_xfs_rmap_insert_error(rcur, error, _RET_IP_);
157 return error;
158 }
159
160 STATIC int
xfs_rmap_delete(struct xfs_btree_cur * rcur,xfs_agblock_t agbno,xfs_extlen_t len,uint64_t owner,uint64_t offset,unsigned int flags)161 xfs_rmap_delete(
162 struct xfs_btree_cur *rcur,
163 xfs_agblock_t agbno,
164 xfs_extlen_t len,
165 uint64_t owner,
166 uint64_t offset,
167 unsigned int flags)
168 {
169 int i;
170 int error;
171
172 trace_xfs_rmap_delete(rcur, agbno, len, owner, offset, flags);
173
174 error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
175 if (error)
176 goto done;
177 if (XFS_IS_CORRUPT(rcur->bc_mp, i != 1)) {
178 xfs_btree_mark_sick(rcur);
179 error = -EFSCORRUPTED;
180 goto done;
181 }
182
183 error = xfs_btree_delete(rcur, &i);
184 if (error)
185 goto done;
186 if (XFS_IS_CORRUPT(rcur->bc_mp, i != 1)) {
187 xfs_btree_mark_sick(rcur);
188 error = -EFSCORRUPTED;
189 goto done;
190 }
191 done:
192 if (error)
193 trace_xfs_rmap_delete_error(rcur, error, _RET_IP_);
194 return error;
195 }
196
197 /* Convert an internal btree record to an rmap record. */
198 xfs_failaddr_t
xfs_rmap_btrec_to_irec(const union xfs_btree_rec * rec,struct xfs_rmap_irec * irec)199 xfs_rmap_btrec_to_irec(
200 const union xfs_btree_rec *rec,
201 struct xfs_rmap_irec *irec)
202 {
203 irec->rm_startblock = be32_to_cpu(rec->rmap.rm_startblock);
204 irec->rm_blockcount = be32_to_cpu(rec->rmap.rm_blockcount);
205 irec->rm_owner = be64_to_cpu(rec->rmap.rm_owner);
206 return xfs_rmap_irec_offset_unpack(be64_to_cpu(rec->rmap.rm_offset),
207 irec);
208 }
209
210 /* Simple checks for rmap records. */
211 xfs_failaddr_t
xfs_rmap_check_irec(struct xfs_perag * pag,const struct xfs_rmap_irec * irec)212 xfs_rmap_check_irec(
213 struct xfs_perag *pag,
214 const struct xfs_rmap_irec *irec)
215 {
216 struct xfs_mount *mp = pag->pag_mount;
217 bool is_inode;
218 bool is_unwritten;
219 bool is_bmbt;
220 bool is_attr;
221
222 if (irec->rm_blockcount == 0)
223 return __this_address;
224 if (irec->rm_startblock <= XFS_AGFL_BLOCK(mp)) {
225 if (irec->rm_owner != XFS_RMAP_OWN_FS)
226 return __this_address;
227 if (irec->rm_blockcount != XFS_AGFL_BLOCK(mp) + 1)
228 return __this_address;
229 } else {
230 /* check for valid extent range, including overflow */
231 if (!xfs_verify_agbext(pag, irec->rm_startblock,
232 irec->rm_blockcount))
233 return __this_address;
234 }
235
236 if (!(xfs_verify_ino(mp, irec->rm_owner) ||
237 (irec->rm_owner <= XFS_RMAP_OWN_FS &&
238 irec->rm_owner >= XFS_RMAP_OWN_MIN)))
239 return __this_address;
240
241 /* Check flags. */
242 is_inode = !XFS_RMAP_NON_INODE_OWNER(irec->rm_owner);
243 is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK;
244 is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK;
245 is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN;
246
247 if (is_bmbt && irec->rm_offset != 0)
248 return __this_address;
249
250 if (!is_inode && irec->rm_offset != 0)
251 return __this_address;
252
253 if (is_unwritten && (is_bmbt || !is_inode || is_attr))
254 return __this_address;
255
256 if (!is_inode && (is_bmbt || is_unwritten || is_attr))
257 return __this_address;
258
259 /* Check for a valid fork offset, if applicable. */
260 if (is_inode && !is_bmbt &&
261 !xfs_verify_fileext(mp, irec->rm_offset, irec->rm_blockcount))
262 return __this_address;
263
264 return NULL;
265 }
266
267 static inline xfs_failaddr_t
xfs_rmap_check_btrec(struct xfs_btree_cur * cur,const struct xfs_rmap_irec * irec)268 xfs_rmap_check_btrec(
269 struct xfs_btree_cur *cur,
270 const struct xfs_rmap_irec *irec)
271 {
272 if (xfs_btree_is_mem_rmap(cur->bc_ops))
273 return xfs_rmap_check_irec(cur->bc_mem.pag, irec);
274 return xfs_rmap_check_irec(cur->bc_ag.pag, irec);
275 }
276
277 static inline int
xfs_rmap_complain_bad_rec(struct xfs_btree_cur * cur,xfs_failaddr_t fa,const struct xfs_rmap_irec * irec)278 xfs_rmap_complain_bad_rec(
279 struct xfs_btree_cur *cur,
280 xfs_failaddr_t fa,
281 const struct xfs_rmap_irec *irec)
282 {
283 struct xfs_mount *mp = cur->bc_mp;
284
285 if (xfs_btree_is_mem_rmap(cur->bc_ops))
286 xfs_warn(mp,
287 "In-Memory Reverse Mapping BTree record corruption detected at %pS!", fa);
288 else
289 xfs_warn(mp,
290 "Reverse Mapping BTree record corruption in AG %d detected at %pS!",
291 cur->bc_ag.pag->pag_agno, fa);
292 xfs_warn(mp,
293 "Owner 0x%llx, flags 0x%x, start block 0x%x block count 0x%x",
294 irec->rm_owner, irec->rm_flags, irec->rm_startblock,
295 irec->rm_blockcount);
296 xfs_btree_mark_sick(cur);
297 return -EFSCORRUPTED;
298 }
299
300 /*
301 * Get the data from the pointed-to record.
302 */
303 int
xfs_rmap_get_rec(struct xfs_btree_cur * cur,struct xfs_rmap_irec * irec,int * stat)304 xfs_rmap_get_rec(
305 struct xfs_btree_cur *cur,
306 struct xfs_rmap_irec *irec,
307 int *stat)
308 {
309 union xfs_btree_rec *rec;
310 xfs_failaddr_t fa;
311 int error;
312
313 error = xfs_btree_get_rec(cur, &rec, stat);
314 if (error || !*stat)
315 return error;
316
317 fa = xfs_rmap_btrec_to_irec(rec, irec);
318 if (!fa)
319 fa = xfs_rmap_check_btrec(cur, irec);
320 if (fa)
321 return xfs_rmap_complain_bad_rec(cur, fa, irec);
322
323 return 0;
324 }
325
326 struct xfs_find_left_neighbor_info {
327 struct xfs_rmap_irec high;
328 struct xfs_rmap_irec *irec;
329 };
330
331 /* For each rmap given, figure out if it matches the key we want. */
332 STATIC int
xfs_rmap_find_left_neighbor_helper(struct xfs_btree_cur * cur,const struct xfs_rmap_irec * rec,void * priv)333 xfs_rmap_find_left_neighbor_helper(
334 struct xfs_btree_cur *cur,
335 const struct xfs_rmap_irec *rec,
336 void *priv)
337 {
338 struct xfs_find_left_neighbor_info *info = priv;
339
340 trace_xfs_rmap_find_left_neighbor_candidate(cur, rec->rm_startblock,
341 rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
342 rec->rm_flags);
343
344 if (rec->rm_owner != info->high.rm_owner)
345 return 0;
346 if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) &&
347 !(rec->rm_flags & XFS_RMAP_BMBT_BLOCK) &&
348 rec->rm_offset + rec->rm_blockcount - 1 != info->high.rm_offset)
349 return 0;
350
351 *info->irec = *rec;
352 return -ECANCELED;
353 }
354
355 /*
356 * Find the record to the left of the given extent, being careful only to
357 * return a match with the same owner and adjacent physical and logical
358 * block ranges.
359 */
360 STATIC int
xfs_rmap_find_left_neighbor(struct xfs_btree_cur * cur,xfs_agblock_t bno,uint64_t owner,uint64_t offset,unsigned int flags,struct xfs_rmap_irec * irec,int * stat)361 xfs_rmap_find_left_neighbor(
362 struct xfs_btree_cur *cur,
363 xfs_agblock_t bno,
364 uint64_t owner,
365 uint64_t offset,
366 unsigned int flags,
367 struct xfs_rmap_irec *irec,
368 int *stat)
369 {
370 struct xfs_find_left_neighbor_info info;
371 int found = 0;
372 int error;
373
374 *stat = 0;
375 if (bno == 0)
376 return 0;
377 info.high.rm_startblock = bno - 1;
378 info.high.rm_owner = owner;
379 if (!XFS_RMAP_NON_INODE_OWNER(owner) &&
380 !(flags & XFS_RMAP_BMBT_BLOCK)) {
381 if (offset == 0)
382 return 0;
383 info.high.rm_offset = offset - 1;
384 } else
385 info.high.rm_offset = 0;
386 info.high.rm_flags = flags;
387 info.high.rm_blockcount = 0;
388 info.irec = irec;
389
390 trace_xfs_rmap_find_left_neighbor_query(cur, bno, 0, owner, offset,
391 flags);
392
393 /*
394 * Historically, we always used the range query to walk every reverse
395 * mapping that could possibly overlap the key that the caller asked
396 * for, and filter out the ones that don't. That is very slow when
397 * there are a lot of records.
398 *
399 * However, there are two scenarios where the classic btree search can
400 * produce correct results -- if the index contains a record that is an
401 * exact match for the lookup key; and if there are no other records
402 * between the record we want and the key we supplied.
403 *
404 * As an optimization, try a non-overlapped lookup first. This makes
405 * extent conversion and remap operations run a bit faster if the
406 * physical extents aren't being shared. If we don't find what we
407 * want, we fall back to the overlapped query.
408 */
409 error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, irec,
410 &found);
411 if (error)
412 return error;
413 if (found)
414 error = xfs_rmap_find_left_neighbor_helper(cur, irec, &info);
415 if (!error)
416 error = xfs_rmap_query_range(cur, &info.high, &info.high,
417 xfs_rmap_find_left_neighbor_helper, &info);
418 if (error != -ECANCELED)
419 return error;
420
421 *stat = 1;
422 trace_xfs_rmap_find_left_neighbor_result(cur, irec->rm_startblock,
423 irec->rm_blockcount, irec->rm_owner, irec->rm_offset,
424 irec->rm_flags);
425 return 0;
426 }
427
428 /* For each rmap given, figure out if it matches the key we want. */
429 STATIC int
xfs_rmap_lookup_le_range_helper(struct xfs_btree_cur * cur,const struct xfs_rmap_irec * rec,void * priv)430 xfs_rmap_lookup_le_range_helper(
431 struct xfs_btree_cur *cur,
432 const struct xfs_rmap_irec *rec,
433 void *priv)
434 {
435 struct xfs_find_left_neighbor_info *info = priv;
436
437 trace_xfs_rmap_lookup_le_range_candidate(cur, rec->rm_startblock,
438 rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
439 rec->rm_flags);
440
441 if (rec->rm_owner != info->high.rm_owner)
442 return 0;
443 if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) &&
444 !(rec->rm_flags & XFS_RMAP_BMBT_BLOCK) &&
445 (rec->rm_offset > info->high.rm_offset ||
446 rec->rm_offset + rec->rm_blockcount <= info->high.rm_offset))
447 return 0;
448
449 *info->irec = *rec;
450 return -ECANCELED;
451 }
452
453 /*
454 * Find the record to the left of the given extent, being careful only to
455 * return a match with the same owner and overlapping physical and logical
456 * block ranges. This is the overlapping-interval version of
457 * xfs_rmap_lookup_le.
458 */
459 int
xfs_rmap_lookup_le_range(struct xfs_btree_cur * cur,xfs_agblock_t bno,uint64_t owner,uint64_t offset,unsigned int flags,struct xfs_rmap_irec * irec,int * stat)460 xfs_rmap_lookup_le_range(
461 struct xfs_btree_cur *cur,
462 xfs_agblock_t bno,
463 uint64_t owner,
464 uint64_t offset,
465 unsigned int flags,
466 struct xfs_rmap_irec *irec,
467 int *stat)
468 {
469 struct xfs_find_left_neighbor_info info;
470 int found = 0;
471 int error;
472
473 info.high.rm_startblock = bno;
474 info.high.rm_owner = owner;
475 if (!XFS_RMAP_NON_INODE_OWNER(owner) && !(flags & XFS_RMAP_BMBT_BLOCK))
476 info.high.rm_offset = offset;
477 else
478 info.high.rm_offset = 0;
479 info.high.rm_flags = flags;
480 info.high.rm_blockcount = 0;
481 *stat = 0;
482 info.irec = irec;
483
484 trace_xfs_rmap_lookup_le_range(cur, bno, 0, owner, offset, flags);
485
486 /*
487 * Historically, we always used the range query to walk every reverse
488 * mapping that could possibly overlap the key that the caller asked
489 * for, and filter out the ones that don't. That is very slow when
490 * there are a lot of records.
491 *
492 * However, there are two scenarios where the classic btree search can
493 * produce correct results -- if the index contains a record that is an
494 * exact match for the lookup key; and if there are no other records
495 * between the record we want and the key we supplied.
496 *
497 * As an optimization, try a non-overlapped lookup first. This makes
498 * scrub run much faster on most filesystems because bmbt records are
499 * usually an exact match for rmap records. If we don't find what we
500 * want, we fall back to the overlapped query.
501 */
502 error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, irec,
503 &found);
504 if (error)
505 return error;
506 if (found)
507 error = xfs_rmap_lookup_le_range_helper(cur, irec, &info);
508 if (!error)
509 error = xfs_rmap_query_range(cur, &info.high, &info.high,
510 xfs_rmap_lookup_le_range_helper, &info);
511 if (error != -ECANCELED)
512 return error;
513
514 *stat = 1;
515 trace_xfs_rmap_lookup_le_range_result(cur, irec->rm_startblock,
516 irec->rm_blockcount, irec->rm_owner, irec->rm_offset,
517 irec->rm_flags);
518 return 0;
519 }
520
521 /*
522 * Perform all the relevant owner checks for a removal op. If we're doing an
523 * unknown-owner removal then we have no owner information to check.
524 */
525 static int
xfs_rmap_free_check_owner(struct xfs_btree_cur * cur,uint64_t ltoff,struct xfs_rmap_irec * rec,xfs_filblks_t len,uint64_t owner,uint64_t offset,unsigned int flags)526 xfs_rmap_free_check_owner(
527 struct xfs_btree_cur *cur,
528 uint64_t ltoff,
529 struct xfs_rmap_irec *rec,
530 xfs_filblks_t len,
531 uint64_t owner,
532 uint64_t offset,
533 unsigned int flags)
534 {
535 struct xfs_mount *mp = cur->bc_mp;
536 int error = 0;
537
538 if (owner == XFS_RMAP_OWN_UNKNOWN)
539 return 0;
540
541 /* Make sure the unwritten flag matches. */
542 if (XFS_IS_CORRUPT(mp,
543 (flags & XFS_RMAP_UNWRITTEN) !=
544 (rec->rm_flags & XFS_RMAP_UNWRITTEN))) {
545 xfs_btree_mark_sick(cur);
546 error = -EFSCORRUPTED;
547 goto out;
548 }
549
550 /* Make sure the owner matches what we expect to find in the tree. */
551 if (XFS_IS_CORRUPT(mp, owner != rec->rm_owner)) {
552 xfs_btree_mark_sick(cur);
553 error = -EFSCORRUPTED;
554 goto out;
555 }
556
557 /* Check the offset, if necessary. */
558 if (XFS_RMAP_NON_INODE_OWNER(owner))
559 goto out;
560
561 if (flags & XFS_RMAP_BMBT_BLOCK) {
562 if (XFS_IS_CORRUPT(mp,
563 !(rec->rm_flags & XFS_RMAP_BMBT_BLOCK))) {
564 xfs_btree_mark_sick(cur);
565 error = -EFSCORRUPTED;
566 goto out;
567 }
568 } else {
569 if (XFS_IS_CORRUPT(mp, rec->rm_offset > offset)) {
570 xfs_btree_mark_sick(cur);
571 error = -EFSCORRUPTED;
572 goto out;
573 }
574 if (XFS_IS_CORRUPT(mp,
575 offset + len > ltoff + rec->rm_blockcount)) {
576 xfs_btree_mark_sick(cur);
577 error = -EFSCORRUPTED;
578 goto out;
579 }
580 }
581
582 out:
583 return error;
584 }
585
586 /*
587 * Find the extent in the rmap btree and remove it.
588 *
589 * The record we find should always be an exact match for the extent that we're
590 * looking for, since we insert them into the btree without modification.
591 *
592 * Special Case #1: when growing the filesystem, we "free" an extent when
593 * growing the last AG. This extent is new space and so it is not tracked as
594 * used space in the btree. The growfs code will pass in an owner of
595 * XFS_RMAP_OWN_NULL to indicate that it expected that there is no owner of this
596 * extent. We verify that - the extent lookup result in a record that does not
597 * overlap.
598 *
599 * Special Case #2: EFIs do not record the owner of the extent, so when
600 * recovering EFIs from the log we pass in XFS_RMAP_OWN_UNKNOWN to tell the rmap
601 * btree to ignore the owner (i.e. wildcard match) so we don't trigger
602 * corruption checks during log recovery.
603 */
604 STATIC int
xfs_rmap_unmap(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,bool unwritten,const struct xfs_owner_info * oinfo)605 xfs_rmap_unmap(
606 struct xfs_btree_cur *cur,
607 xfs_agblock_t bno,
608 xfs_extlen_t len,
609 bool unwritten,
610 const struct xfs_owner_info *oinfo)
611 {
612 struct xfs_mount *mp = cur->bc_mp;
613 struct xfs_rmap_irec ltrec;
614 uint64_t ltoff;
615 int error = 0;
616 int i;
617 uint64_t owner;
618 uint64_t offset;
619 unsigned int flags;
620 bool ignore_off;
621
622 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
623 ignore_off = XFS_RMAP_NON_INODE_OWNER(owner) ||
624 (flags & XFS_RMAP_BMBT_BLOCK);
625 if (unwritten)
626 flags |= XFS_RMAP_UNWRITTEN;
627 trace_xfs_rmap_unmap(cur, bno, len, unwritten, oinfo);
628
629 /*
630 * We should always have a left record because there's a static record
631 * for the AG headers at rm_startblock == 0 created by mkfs/growfs that
632 * will not ever be removed from the tree.
633 */
634 error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, <rec, &i);
635 if (error)
636 goto out_error;
637 if (XFS_IS_CORRUPT(mp, i != 1)) {
638 xfs_btree_mark_sick(cur);
639 error = -EFSCORRUPTED;
640 goto out_error;
641 }
642
643 trace_xfs_rmap_lookup_le_range_result(cur, ltrec.rm_startblock,
644 ltrec.rm_blockcount, ltrec.rm_owner, ltrec.rm_offset,
645 ltrec.rm_flags);
646 ltoff = ltrec.rm_offset;
647
648 /*
649 * For growfs, the incoming extent must be beyond the left record we
650 * just found as it is new space and won't be used by anyone. This is
651 * just a corruption check as we don't actually do anything with this
652 * extent. Note that we need to use >= instead of > because it might
653 * be the case that the "left" extent goes all the way to EOFS.
654 */
655 if (owner == XFS_RMAP_OWN_NULL) {
656 if (XFS_IS_CORRUPT(mp,
657 bno <
658 ltrec.rm_startblock + ltrec.rm_blockcount)) {
659 xfs_btree_mark_sick(cur);
660 error = -EFSCORRUPTED;
661 goto out_error;
662 }
663 goto out_done;
664 }
665
666 /*
667 * If we're doing an unknown-owner removal for EFI recovery, we expect
668 * to find the full range in the rmapbt or nothing at all. If we
669 * don't find any rmaps overlapping either end of the range, we're
670 * done. Hopefully this means that the EFI creator already queued
671 * (and finished) a RUI to remove the rmap.
672 */
673 if (owner == XFS_RMAP_OWN_UNKNOWN &&
674 ltrec.rm_startblock + ltrec.rm_blockcount <= bno) {
675 struct xfs_rmap_irec rtrec;
676
677 error = xfs_btree_increment(cur, 0, &i);
678 if (error)
679 goto out_error;
680 if (i == 0)
681 goto out_done;
682 error = xfs_rmap_get_rec(cur, &rtrec, &i);
683 if (error)
684 goto out_error;
685 if (XFS_IS_CORRUPT(mp, i != 1)) {
686 xfs_btree_mark_sick(cur);
687 error = -EFSCORRUPTED;
688 goto out_error;
689 }
690 if (rtrec.rm_startblock >= bno + len)
691 goto out_done;
692 }
693
694 /* Make sure the extent we found covers the entire freeing range. */
695 if (XFS_IS_CORRUPT(mp,
696 ltrec.rm_startblock > bno ||
697 ltrec.rm_startblock + ltrec.rm_blockcount <
698 bno + len)) {
699 xfs_btree_mark_sick(cur);
700 error = -EFSCORRUPTED;
701 goto out_error;
702 }
703
704 /* Check owner information. */
705 error = xfs_rmap_free_check_owner(cur, ltoff, <rec, len, owner,
706 offset, flags);
707 if (error)
708 goto out_error;
709
710 if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
711 /* exact match, simply remove the record from rmap tree */
712 trace_xfs_rmap_delete(cur, ltrec.rm_startblock,
713 ltrec.rm_blockcount, ltrec.rm_owner,
714 ltrec.rm_offset, ltrec.rm_flags);
715 error = xfs_btree_delete(cur, &i);
716 if (error)
717 goto out_error;
718 if (XFS_IS_CORRUPT(mp, i != 1)) {
719 xfs_btree_mark_sick(cur);
720 error = -EFSCORRUPTED;
721 goto out_error;
722 }
723 } else if (ltrec.rm_startblock == bno) {
724 /*
725 * overlap left hand side of extent: move the start, trim the
726 * length and update the current record.
727 *
728 * ltbno ltlen
729 * Orig: |oooooooooooooooooooo|
730 * Freeing: |fffffffff|
731 * Result: |rrrrrrrrrr|
732 * bno len
733 */
734 ltrec.rm_startblock += len;
735 ltrec.rm_blockcount -= len;
736 if (!ignore_off)
737 ltrec.rm_offset += len;
738 error = xfs_rmap_update(cur, <rec);
739 if (error)
740 goto out_error;
741 } else if (ltrec.rm_startblock + ltrec.rm_blockcount == bno + len) {
742 /*
743 * overlap right hand side of extent: trim the length and update
744 * the current record.
745 *
746 * ltbno ltlen
747 * Orig: |oooooooooooooooooooo|
748 * Freeing: |fffffffff|
749 * Result: |rrrrrrrrrr|
750 * bno len
751 */
752 ltrec.rm_blockcount -= len;
753 error = xfs_rmap_update(cur, <rec);
754 if (error)
755 goto out_error;
756 } else {
757
758 /*
759 * overlap middle of extent: trim the length of the existing
760 * record to the length of the new left-extent size, increment
761 * the insertion position so we can insert a new record
762 * containing the remaining right-extent space.
763 *
764 * ltbno ltlen
765 * Orig: |oooooooooooooooooooo|
766 * Freeing: |fffffffff|
767 * Result: |rrrrr| |rrrr|
768 * bno len
769 */
770 xfs_extlen_t orig_len = ltrec.rm_blockcount;
771
772 ltrec.rm_blockcount = bno - ltrec.rm_startblock;
773 error = xfs_rmap_update(cur, <rec);
774 if (error)
775 goto out_error;
776
777 error = xfs_btree_increment(cur, 0, &i);
778 if (error)
779 goto out_error;
780
781 cur->bc_rec.r.rm_startblock = bno + len;
782 cur->bc_rec.r.rm_blockcount = orig_len - len -
783 ltrec.rm_blockcount;
784 cur->bc_rec.r.rm_owner = ltrec.rm_owner;
785 if (ignore_off)
786 cur->bc_rec.r.rm_offset = 0;
787 else
788 cur->bc_rec.r.rm_offset = offset + len;
789 cur->bc_rec.r.rm_flags = flags;
790 trace_xfs_rmap_insert(cur, cur->bc_rec.r.rm_startblock,
791 cur->bc_rec.r.rm_blockcount,
792 cur->bc_rec.r.rm_owner,
793 cur->bc_rec.r.rm_offset,
794 cur->bc_rec.r.rm_flags);
795 error = xfs_btree_insert(cur, &i);
796 if (error)
797 goto out_error;
798 }
799
800 out_done:
801 trace_xfs_rmap_unmap_done(cur, bno, len, unwritten, oinfo);
802 out_error:
803 if (error)
804 trace_xfs_rmap_unmap_error(cur, error, _RET_IP_);
805 return error;
806 }
807
808 #ifdef CONFIG_XFS_LIVE_HOOKS
809 /*
810 * Use a static key here to reduce the overhead of rmapbt live updates. If
811 * the compiler supports jump labels, the static branch will be replaced by a
812 * nop sled when there are no hook users. Online fsck is currently the only
813 * caller, so this is a reasonable tradeoff.
814 *
815 * Note: Patching the kernel code requires taking the cpu hotplug lock. Other
816 * parts of the kernel allocate memory with that lock held, which means that
817 * XFS callers cannot hold any locks that might be used by memory reclaim or
818 * writeback when calling the static_branch_{inc,dec} functions.
819 */
820 DEFINE_STATIC_XFS_HOOK_SWITCH(xfs_rmap_hooks_switch);
821
822 void
xfs_rmap_hook_disable(void)823 xfs_rmap_hook_disable(void)
824 {
825 xfs_hooks_switch_off(&xfs_rmap_hooks_switch);
826 }
827
828 void
xfs_rmap_hook_enable(void)829 xfs_rmap_hook_enable(void)
830 {
831 xfs_hooks_switch_on(&xfs_rmap_hooks_switch);
832 }
833
834 /* Call downstream hooks for a reverse mapping update. */
835 static inline void
xfs_rmap_update_hook(struct xfs_trans * tp,struct xfs_perag * pag,enum xfs_rmap_intent_type op,xfs_agblock_t startblock,xfs_extlen_t blockcount,bool unwritten,const struct xfs_owner_info * oinfo)836 xfs_rmap_update_hook(
837 struct xfs_trans *tp,
838 struct xfs_perag *pag,
839 enum xfs_rmap_intent_type op,
840 xfs_agblock_t startblock,
841 xfs_extlen_t blockcount,
842 bool unwritten,
843 const struct xfs_owner_info *oinfo)
844 {
845 if (xfs_hooks_switched_on(&xfs_rmap_hooks_switch)) {
846 struct xfs_rmap_update_params p = {
847 .startblock = startblock,
848 .blockcount = blockcount,
849 .unwritten = unwritten,
850 .oinfo = *oinfo, /* struct copy */
851 };
852
853 if (pag)
854 xfs_hooks_call(&pag->pag_rmap_update_hooks, op, &p);
855 }
856 }
857
858 /* Call the specified function during a reverse mapping update. */
859 int
xfs_rmap_hook_add(struct xfs_perag * pag,struct xfs_rmap_hook * hook)860 xfs_rmap_hook_add(
861 struct xfs_perag *pag,
862 struct xfs_rmap_hook *hook)
863 {
864 return xfs_hooks_add(&pag->pag_rmap_update_hooks, &hook->rmap_hook);
865 }
866
867 /* Stop calling the specified function during a reverse mapping update. */
868 void
xfs_rmap_hook_del(struct xfs_perag * pag,struct xfs_rmap_hook * hook)869 xfs_rmap_hook_del(
870 struct xfs_perag *pag,
871 struct xfs_rmap_hook *hook)
872 {
873 xfs_hooks_del(&pag->pag_rmap_update_hooks, &hook->rmap_hook);
874 }
875
876 /* Configure rmap update hook functions. */
877 void
xfs_rmap_hook_setup(struct xfs_rmap_hook * hook,notifier_fn_t mod_fn)878 xfs_rmap_hook_setup(
879 struct xfs_rmap_hook *hook,
880 notifier_fn_t mod_fn)
881 {
882 xfs_hook_setup(&hook->rmap_hook, mod_fn);
883 }
884 #else
885 # define xfs_rmap_update_hook(t, p, o, s, b, u, oi) do { } while (0)
886 #endif /* CONFIG_XFS_LIVE_HOOKS */
887
888 /*
889 * Remove a reference to an extent in the rmap btree.
890 */
891 int
xfs_rmap_free(struct xfs_trans * tp,struct xfs_buf * agbp,struct xfs_perag * pag,xfs_agblock_t bno,xfs_extlen_t len,const struct xfs_owner_info * oinfo)892 xfs_rmap_free(
893 struct xfs_trans *tp,
894 struct xfs_buf *agbp,
895 struct xfs_perag *pag,
896 xfs_agblock_t bno,
897 xfs_extlen_t len,
898 const struct xfs_owner_info *oinfo)
899 {
900 struct xfs_mount *mp = tp->t_mountp;
901 struct xfs_btree_cur *cur;
902 int error;
903
904 if (!xfs_has_rmapbt(mp))
905 return 0;
906
907 cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
908 xfs_rmap_update_hook(tp, pag, XFS_RMAP_UNMAP, bno, len, false, oinfo);
909 error = xfs_rmap_unmap(cur, bno, len, false, oinfo);
910
911 xfs_btree_del_cursor(cur, error);
912 return error;
913 }
914
915 /*
916 * A mergeable rmap must have the same owner and the same values for
917 * the unwritten, attr_fork, and bmbt flags. The startblock and
918 * offset are checked separately.
919 */
920 static bool
xfs_rmap_is_mergeable(struct xfs_rmap_irec * irec,uint64_t owner,unsigned int flags)921 xfs_rmap_is_mergeable(
922 struct xfs_rmap_irec *irec,
923 uint64_t owner,
924 unsigned int flags)
925 {
926 if (irec->rm_owner == XFS_RMAP_OWN_NULL)
927 return false;
928 if (irec->rm_owner != owner)
929 return false;
930 if ((flags & XFS_RMAP_UNWRITTEN) ^
931 (irec->rm_flags & XFS_RMAP_UNWRITTEN))
932 return false;
933 if ((flags & XFS_RMAP_ATTR_FORK) ^
934 (irec->rm_flags & XFS_RMAP_ATTR_FORK))
935 return false;
936 if ((flags & XFS_RMAP_BMBT_BLOCK) ^
937 (irec->rm_flags & XFS_RMAP_BMBT_BLOCK))
938 return false;
939 return true;
940 }
941
942 /*
943 * When we allocate a new block, the first thing we do is add a reference to
944 * the extent in the rmap btree. This takes the form of a [agbno, length,
945 * owner, offset] record. Flags are encoded in the high bits of the offset
946 * field.
947 */
948 STATIC int
xfs_rmap_map(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,bool unwritten,const struct xfs_owner_info * oinfo)949 xfs_rmap_map(
950 struct xfs_btree_cur *cur,
951 xfs_agblock_t bno,
952 xfs_extlen_t len,
953 bool unwritten,
954 const struct xfs_owner_info *oinfo)
955 {
956 struct xfs_mount *mp = cur->bc_mp;
957 struct xfs_rmap_irec ltrec;
958 struct xfs_rmap_irec gtrec;
959 int have_gt;
960 int have_lt;
961 int error = 0;
962 int i;
963 uint64_t owner;
964 uint64_t offset;
965 unsigned int flags = 0;
966 bool ignore_off;
967
968 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
969 ASSERT(owner != 0);
970 ignore_off = XFS_RMAP_NON_INODE_OWNER(owner) ||
971 (flags & XFS_RMAP_BMBT_BLOCK);
972 if (unwritten)
973 flags |= XFS_RMAP_UNWRITTEN;
974 trace_xfs_rmap_map(cur, bno, len, unwritten, oinfo);
975 ASSERT(!xfs_rmap_should_skip_owner_update(oinfo));
976
977 /*
978 * For the initial lookup, look for an exact match or the left-adjacent
979 * record for our insertion point. This will also give us the record for
980 * start block contiguity tests.
981 */
982 error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, <rec,
983 &have_lt);
984 if (error)
985 goto out_error;
986 if (have_lt) {
987 trace_xfs_rmap_lookup_le_range_result(cur, ltrec.rm_startblock,
988 ltrec.rm_blockcount, ltrec.rm_owner,
989 ltrec.rm_offset, ltrec.rm_flags);
990
991 if (!xfs_rmap_is_mergeable(<rec, owner, flags))
992 have_lt = 0;
993 }
994
995 if (XFS_IS_CORRUPT(mp,
996 have_lt != 0 &&
997 ltrec.rm_startblock + ltrec.rm_blockcount > bno)) {
998 xfs_btree_mark_sick(cur);
999 error = -EFSCORRUPTED;
1000 goto out_error;
1001 }
1002
1003 /*
1004 * Increment the cursor to see if we have a right-adjacent record to our
1005 * insertion point. This will give us the record for end block
1006 * contiguity tests.
1007 */
1008 error = xfs_btree_increment(cur, 0, &have_gt);
1009 if (error)
1010 goto out_error;
1011 if (have_gt) {
1012 error = xfs_rmap_get_rec(cur, >rec, &have_gt);
1013 if (error)
1014 goto out_error;
1015 if (XFS_IS_CORRUPT(mp, have_gt != 1)) {
1016 xfs_btree_mark_sick(cur);
1017 error = -EFSCORRUPTED;
1018 goto out_error;
1019 }
1020 if (XFS_IS_CORRUPT(mp, bno + len > gtrec.rm_startblock)) {
1021 xfs_btree_mark_sick(cur);
1022 error = -EFSCORRUPTED;
1023 goto out_error;
1024 }
1025 trace_xfs_rmap_find_right_neighbor_result(cur,
1026 gtrec.rm_startblock, gtrec.rm_blockcount,
1027 gtrec.rm_owner, gtrec.rm_offset,
1028 gtrec.rm_flags);
1029 if (!xfs_rmap_is_mergeable(>rec, owner, flags))
1030 have_gt = 0;
1031 }
1032
1033 /*
1034 * Note: cursor currently points one record to the right of ltrec, even
1035 * if there is no record in the tree to the right.
1036 */
1037 if (have_lt &&
1038 ltrec.rm_startblock + ltrec.rm_blockcount == bno &&
1039 (ignore_off || ltrec.rm_offset + ltrec.rm_blockcount == offset)) {
1040 /*
1041 * left edge contiguous, merge into left record.
1042 *
1043 * ltbno ltlen
1044 * orig: |ooooooooo|
1045 * adding: |aaaaaaaaa|
1046 * result: |rrrrrrrrrrrrrrrrrrr|
1047 * bno len
1048 */
1049 ltrec.rm_blockcount += len;
1050 if (have_gt &&
1051 bno + len == gtrec.rm_startblock &&
1052 (ignore_off || offset + len == gtrec.rm_offset) &&
1053 (unsigned long)ltrec.rm_blockcount + len +
1054 gtrec.rm_blockcount <= XFS_RMAP_LEN_MAX) {
1055 /*
1056 * right edge also contiguous, delete right record
1057 * and merge into left record.
1058 *
1059 * ltbno ltlen gtbno gtlen
1060 * orig: |ooooooooo| |ooooooooo|
1061 * adding: |aaaaaaaaa|
1062 * result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr|
1063 */
1064 ltrec.rm_blockcount += gtrec.rm_blockcount;
1065 trace_xfs_rmap_delete(cur, gtrec.rm_startblock,
1066 gtrec.rm_blockcount, gtrec.rm_owner,
1067 gtrec.rm_offset, gtrec.rm_flags);
1068 error = xfs_btree_delete(cur, &i);
1069 if (error)
1070 goto out_error;
1071 if (XFS_IS_CORRUPT(mp, i != 1)) {
1072 xfs_btree_mark_sick(cur);
1073 error = -EFSCORRUPTED;
1074 goto out_error;
1075 }
1076 }
1077
1078 /* point the cursor back to the left record and update */
1079 error = xfs_btree_decrement(cur, 0, &have_gt);
1080 if (error)
1081 goto out_error;
1082 error = xfs_rmap_update(cur, <rec);
1083 if (error)
1084 goto out_error;
1085 } else if (have_gt &&
1086 bno + len == gtrec.rm_startblock &&
1087 (ignore_off || offset + len == gtrec.rm_offset)) {
1088 /*
1089 * right edge contiguous, merge into right record.
1090 *
1091 * gtbno gtlen
1092 * Orig: |ooooooooo|
1093 * adding: |aaaaaaaaa|
1094 * Result: |rrrrrrrrrrrrrrrrrrr|
1095 * bno len
1096 */
1097 gtrec.rm_startblock = bno;
1098 gtrec.rm_blockcount += len;
1099 if (!ignore_off)
1100 gtrec.rm_offset = offset;
1101 error = xfs_rmap_update(cur, >rec);
1102 if (error)
1103 goto out_error;
1104 } else {
1105 /*
1106 * no contiguous edge with identical owner, insert
1107 * new record at current cursor position.
1108 */
1109 cur->bc_rec.r.rm_startblock = bno;
1110 cur->bc_rec.r.rm_blockcount = len;
1111 cur->bc_rec.r.rm_owner = owner;
1112 cur->bc_rec.r.rm_offset = offset;
1113 cur->bc_rec.r.rm_flags = flags;
1114 trace_xfs_rmap_insert(cur, bno, len, owner, offset, flags);
1115 error = xfs_btree_insert(cur, &i);
1116 if (error)
1117 goto out_error;
1118 if (XFS_IS_CORRUPT(mp, i != 1)) {
1119 xfs_btree_mark_sick(cur);
1120 error = -EFSCORRUPTED;
1121 goto out_error;
1122 }
1123 }
1124
1125 trace_xfs_rmap_map_done(cur, bno, len, unwritten, oinfo);
1126 out_error:
1127 if (error)
1128 trace_xfs_rmap_map_error(cur, error, _RET_IP_);
1129 return error;
1130 }
1131
1132 /*
1133 * Add a reference to an extent in the rmap btree.
1134 */
1135 int
xfs_rmap_alloc(struct xfs_trans * tp,struct xfs_buf * agbp,struct xfs_perag * pag,xfs_agblock_t bno,xfs_extlen_t len,const struct xfs_owner_info * oinfo)1136 xfs_rmap_alloc(
1137 struct xfs_trans *tp,
1138 struct xfs_buf *agbp,
1139 struct xfs_perag *pag,
1140 xfs_agblock_t bno,
1141 xfs_extlen_t len,
1142 const struct xfs_owner_info *oinfo)
1143 {
1144 struct xfs_mount *mp = tp->t_mountp;
1145 struct xfs_btree_cur *cur;
1146 int error;
1147
1148 if (!xfs_has_rmapbt(mp))
1149 return 0;
1150
1151 cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
1152 xfs_rmap_update_hook(tp, pag, XFS_RMAP_MAP, bno, len, false, oinfo);
1153 error = xfs_rmap_map(cur, bno, len, false, oinfo);
1154
1155 xfs_btree_del_cursor(cur, error);
1156 return error;
1157 }
1158
1159 #define RMAP_LEFT_CONTIG (1 << 0)
1160 #define RMAP_RIGHT_CONTIG (1 << 1)
1161 #define RMAP_LEFT_FILLING (1 << 2)
1162 #define RMAP_RIGHT_FILLING (1 << 3)
1163 #define RMAP_LEFT_VALID (1 << 6)
1164 #define RMAP_RIGHT_VALID (1 << 7)
1165
1166 #define LEFT r[0]
1167 #define RIGHT r[1]
1168 #define PREV r[2]
1169 #define NEW r[3]
1170
1171 /*
1172 * Convert an unwritten extent to a real extent or vice versa.
1173 * Does not handle overlapping extents.
1174 */
1175 STATIC int
xfs_rmap_convert(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,bool unwritten,const struct xfs_owner_info * oinfo)1176 xfs_rmap_convert(
1177 struct xfs_btree_cur *cur,
1178 xfs_agblock_t bno,
1179 xfs_extlen_t len,
1180 bool unwritten,
1181 const struct xfs_owner_info *oinfo)
1182 {
1183 struct xfs_mount *mp = cur->bc_mp;
1184 struct xfs_rmap_irec r[4]; /* neighbor extent entries */
1185 /* left is 0, right is 1, */
1186 /* prev is 2, new is 3 */
1187 uint64_t owner;
1188 uint64_t offset;
1189 uint64_t new_endoff;
1190 unsigned int oldext;
1191 unsigned int newext;
1192 unsigned int flags = 0;
1193 int i;
1194 int state = 0;
1195 int error;
1196
1197 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
1198 ASSERT(!(XFS_RMAP_NON_INODE_OWNER(owner) ||
1199 (flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
1200 oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
1201 new_endoff = offset + len;
1202 trace_xfs_rmap_convert(cur, bno, len, unwritten, oinfo);
1203
1204 /*
1205 * For the initial lookup, look for an exact match or the left-adjacent
1206 * record for our insertion point. This will also give us the record for
1207 * start block contiguity tests.
1208 */
1209 error = xfs_rmap_lookup_le(cur, bno, owner, offset, oldext, &PREV, &i);
1210 if (error)
1211 goto done;
1212 if (XFS_IS_CORRUPT(mp, i != 1)) {
1213 xfs_btree_mark_sick(cur);
1214 error = -EFSCORRUPTED;
1215 goto done;
1216 }
1217
1218 trace_xfs_rmap_lookup_le_range_result(cur, PREV.rm_startblock,
1219 PREV.rm_blockcount, PREV.rm_owner, PREV.rm_offset,
1220 PREV.rm_flags);
1221
1222 ASSERT(PREV.rm_offset <= offset);
1223 ASSERT(PREV.rm_offset + PREV.rm_blockcount >= new_endoff);
1224 ASSERT((PREV.rm_flags & XFS_RMAP_UNWRITTEN) == oldext);
1225 newext = ~oldext & XFS_RMAP_UNWRITTEN;
1226
1227 /*
1228 * Set flags determining what part of the previous oldext allocation
1229 * extent is being replaced by a newext allocation.
1230 */
1231 if (PREV.rm_offset == offset)
1232 state |= RMAP_LEFT_FILLING;
1233 if (PREV.rm_offset + PREV.rm_blockcount == new_endoff)
1234 state |= RMAP_RIGHT_FILLING;
1235
1236 /*
1237 * Decrement the cursor to see if we have a left-adjacent record to our
1238 * insertion point. This will give us the record for end block
1239 * contiguity tests.
1240 */
1241 error = xfs_btree_decrement(cur, 0, &i);
1242 if (error)
1243 goto done;
1244 if (i) {
1245 state |= RMAP_LEFT_VALID;
1246 error = xfs_rmap_get_rec(cur, &LEFT, &i);
1247 if (error)
1248 goto done;
1249 if (XFS_IS_CORRUPT(mp, i != 1)) {
1250 xfs_btree_mark_sick(cur);
1251 error = -EFSCORRUPTED;
1252 goto done;
1253 }
1254 if (XFS_IS_CORRUPT(mp,
1255 LEFT.rm_startblock + LEFT.rm_blockcount >
1256 bno)) {
1257 xfs_btree_mark_sick(cur);
1258 error = -EFSCORRUPTED;
1259 goto done;
1260 }
1261 trace_xfs_rmap_find_left_neighbor_result(cur,
1262 LEFT.rm_startblock, LEFT.rm_blockcount,
1263 LEFT.rm_owner, LEFT.rm_offset, LEFT.rm_flags);
1264 if (LEFT.rm_startblock + LEFT.rm_blockcount == bno &&
1265 LEFT.rm_offset + LEFT.rm_blockcount == offset &&
1266 xfs_rmap_is_mergeable(&LEFT, owner, newext))
1267 state |= RMAP_LEFT_CONTIG;
1268 }
1269
1270 /*
1271 * Increment the cursor to see if we have a right-adjacent record to our
1272 * insertion point. This will give us the record for end block
1273 * contiguity tests.
1274 */
1275 error = xfs_btree_increment(cur, 0, &i);
1276 if (error)
1277 goto done;
1278 if (XFS_IS_CORRUPT(mp, i != 1)) {
1279 xfs_btree_mark_sick(cur);
1280 error = -EFSCORRUPTED;
1281 goto done;
1282 }
1283 error = xfs_btree_increment(cur, 0, &i);
1284 if (error)
1285 goto done;
1286 if (i) {
1287 state |= RMAP_RIGHT_VALID;
1288 error = xfs_rmap_get_rec(cur, &RIGHT, &i);
1289 if (error)
1290 goto done;
1291 if (XFS_IS_CORRUPT(mp, i != 1)) {
1292 xfs_btree_mark_sick(cur);
1293 error = -EFSCORRUPTED;
1294 goto done;
1295 }
1296 if (XFS_IS_CORRUPT(mp, bno + len > RIGHT.rm_startblock)) {
1297 xfs_btree_mark_sick(cur);
1298 error = -EFSCORRUPTED;
1299 goto done;
1300 }
1301 trace_xfs_rmap_find_right_neighbor_result(cur,
1302 RIGHT.rm_startblock, RIGHT.rm_blockcount,
1303 RIGHT.rm_owner, RIGHT.rm_offset,
1304 RIGHT.rm_flags);
1305 if (bno + len == RIGHT.rm_startblock &&
1306 offset + len == RIGHT.rm_offset &&
1307 xfs_rmap_is_mergeable(&RIGHT, owner, newext))
1308 state |= RMAP_RIGHT_CONTIG;
1309 }
1310
1311 /* check that left + prev + right is not too long */
1312 if ((state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1313 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) ==
1314 (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1315 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG) &&
1316 (unsigned long)LEFT.rm_blockcount + len +
1317 RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
1318 state &= ~RMAP_RIGHT_CONTIG;
1319
1320 trace_xfs_rmap_convert_state(cur, state, _RET_IP_);
1321
1322 /* reset the cursor back to PREV */
1323 error = xfs_rmap_lookup_le(cur, bno, owner, offset, oldext, NULL, &i);
1324 if (error)
1325 goto done;
1326 if (XFS_IS_CORRUPT(mp, i != 1)) {
1327 xfs_btree_mark_sick(cur);
1328 error = -EFSCORRUPTED;
1329 goto done;
1330 }
1331
1332 /*
1333 * Switch out based on the FILLING and CONTIG state bits.
1334 */
1335 switch (state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1336 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) {
1337 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1338 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1339 /*
1340 * Setting all of a previous oldext extent to newext.
1341 * The left and right neighbors are both contiguous with new.
1342 */
1343 error = xfs_btree_increment(cur, 0, &i);
1344 if (error)
1345 goto done;
1346 if (XFS_IS_CORRUPT(mp, i != 1)) {
1347 xfs_btree_mark_sick(cur);
1348 error = -EFSCORRUPTED;
1349 goto done;
1350 }
1351 trace_xfs_rmap_delete(cur, RIGHT.rm_startblock,
1352 RIGHT.rm_blockcount, RIGHT.rm_owner,
1353 RIGHT.rm_offset, RIGHT.rm_flags);
1354 error = xfs_btree_delete(cur, &i);
1355 if (error)
1356 goto done;
1357 if (XFS_IS_CORRUPT(mp, i != 1)) {
1358 xfs_btree_mark_sick(cur);
1359 error = -EFSCORRUPTED;
1360 goto done;
1361 }
1362 error = xfs_btree_decrement(cur, 0, &i);
1363 if (error)
1364 goto done;
1365 if (XFS_IS_CORRUPT(mp, i != 1)) {
1366 xfs_btree_mark_sick(cur);
1367 error = -EFSCORRUPTED;
1368 goto done;
1369 }
1370 trace_xfs_rmap_delete(cur, PREV.rm_startblock,
1371 PREV.rm_blockcount, PREV.rm_owner,
1372 PREV.rm_offset, PREV.rm_flags);
1373 error = xfs_btree_delete(cur, &i);
1374 if (error)
1375 goto done;
1376 if (XFS_IS_CORRUPT(mp, i != 1)) {
1377 xfs_btree_mark_sick(cur);
1378 error = -EFSCORRUPTED;
1379 goto done;
1380 }
1381 error = xfs_btree_decrement(cur, 0, &i);
1382 if (error)
1383 goto done;
1384 if (XFS_IS_CORRUPT(mp, i != 1)) {
1385 xfs_btree_mark_sick(cur);
1386 error = -EFSCORRUPTED;
1387 goto done;
1388 }
1389 NEW = LEFT;
1390 NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount;
1391 error = xfs_rmap_update(cur, &NEW);
1392 if (error)
1393 goto done;
1394 break;
1395
1396 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
1397 /*
1398 * Setting all of a previous oldext extent to newext.
1399 * The left neighbor is contiguous, the right is not.
1400 */
1401 trace_xfs_rmap_delete(cur, PREV.rm_startblock,
1402 PREV.rm_blockcount, PREV.rm_owner,
1403 PREV.rm_offset, PREV.rm_flags);
1404 error = xfs_btree_delete(cur, &i);
1405 if (error)
1406 goto done;
1407 if (XFS_IS_CORRUPT(mp, i != 1)) {
1408 xfs_btree_mark_sick(cur);
1409 error = -EFSCORRUPTED;
1410 goto done;
1411 }
1412 error = xfs_btree_decrement(cur, 0, &i);
1413 if (error)
1414 goto done;
1415 if (XFS_IS_CORRUPT(mp, i != 1)) {
1416 xfs_btree_mark_sick(cur);
1417 error = -EFSCORRUPTED;
1418 goto done;
1419 }
1420 NEW = LEFT;
1421 NEW.rm_blockcount += PREV.rm_blockcount;
1422 error = xfs_rmap_update(cur, &NEW);
1423 if (error)
1424 goto done;
1425 break;
1426
1427 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1428 /*
1429 * Setting all of a previous oldext extent to newext.
1430 * The right neighbor is contiguous, the left is not.
1431 */
1432 error = xfs_btree_increment(cur, 0, &i);
1433 if (error)
1434 goto done;
1435 if (XFS_IS_CORRUPT(mp, i != 1)) {
1436 xfs_btree_mark_sick(cur);
1437 error = -EFSCORRUPTED;
1438 goto done;
1439 }
1440 trace_xfs_rmap_delete(cur, RIGHT.rm_startblock,
1441 RIGHT.rm_blockcount, RIGHT.rm_owner,
1442 RIGHT.rm_offset, RIGHT.rm_flags);
1443 error = xfs_btree_delete(cur, &i);
1444 if (error)
1445 goto done;
1446 if (XFS_IS_CORRUPT(mp, i != 1)) {
1447 xfs_btree_mark_sick(cur);
1448 error = -EFSCORRUPTED;
1449 goto done;
1450 }
1451 error = xfs_btree_decrement(cur, 0, &i);
1452 if (error)
1453 goto done;
1454 if (XFS_IS_CORRUPT(mp, i != 1)) {
1455 xfs_btree_mark_sick(cur);
1456 error = -EFSCORRUPTED;
1457 goto done;
1458 }
1459 NEW = PREV;
1460 NEW.rm_blockcount = len + RIGHT.rm_blockcount;
1461 NEW.rm_flags = newext;
1462 error = xfs_rmap_update(cur, &NEW);
1463 if (error)
1464 goto done;
1465 break;
1466
1467 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING:
1468 /*
1469 * Setting all of a previous oldext extent to newext.
1470 * Neither the left nor right neighbors are contiguous with
1471 * the new one.
1472 */
1473 NEW = PREV;
1474 NEW.rm_flags = newext;
1475 error = xfs_rmap_update(cur, &NEW);
1476 if (error)
1477 goto done;
1478 break;
1479
1480 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG:
1481 /*
1482 * Setting the first part of a previous oldext extent to newext.
1483 * The left neighbor is contiguous.
1484 */
1485 NEW = PREV;
1486 NEW.rm_offset += len;
1487 NEW.rm_startblock += len;
1488 NEW.rm_blockcount -= len;
1489 error = xfs_rmap_update(cur, &NEW);
1490 if (error)
1491 goto done;
1492 error = xfs_btree_decrement(cur, 0, &i);
1493 if (error)
1494 goto done;
1495 NEW = LEFT;
1496 NEW.rm_blockcount += len;
1497 error = xfs_rmap_update(cur, &NEW);
1498 if (error)
1499 goto done;
1500 break;
1501
1502 case RMAP_LEFT_FILLING:
1503 /*
1504 * Setting the first part of a previous oldext extent to newext.
1505 * The left neighbor is not contiguous.
1506 */
1507 NEW = PREV;
1508 NEW.rm_startblock += len;
1509 NEW.rm_offset += len;
1510 NEW.rm_blockcount -= len;
1511 error = xfs_rmap_update(cur, &NEW);
1512 if (error)
1513 goto done;
1514 NEW.rm_startblock = bno;
1515 NEW.rm_owner = owner;
1516 NEW.rm_offset = offset;
1517 NEW.rm_blockcount = len;
1518 NEW.rm_flags = newext;
1519 cur->bc_rec.r = NEW;
1520 trace_xfs_rmap_insert(cur, bno, len, owner, offset, newext);
1521 error = xfs_btree_insert(cur, &i);
1522 if (error)
1523 goto done;
1524 if (XFS_IS_CORRUPT(mp, i != 1)) {
1525 xfs_btree_mark_sick(cur);
1526 error = -EFSCORRUPTED;
1527 goto done;
1528 }
1529 break;
1530
1531 case RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1532 /*
1533 * Setting the last part of a previous oldext extent to newext.
1534 * The right neighbor is contiguous with the new allocation.
1535 */
1536 NEW = PREV;
1537 NEW.rm_blockcount -= len;
1538 error = xfs_rmap_update(cur, &NEW);
1539 if (error)
1540 goto done;
1541 error = xfs_btree_increment(cur, 0, &i);
1542 if (error)
1543 goto done;
1544 NEW = RIGHT;
1545 NEW.rm_offset = offset;
1546 NEW.rm_startblock = bno;
1547 NEW.rm_blockcount += len;
1548 error = xfs_rmap_update(cur, &NEW);
1549 if (error)
1550 goto done;
1551 break;
1552
1553 case RMAP_RIGHT_FILLING:
1554 /*
1555 * Setting the last part of a previous oldext extent to newext.
1556 * The right neighbor is not contiguous.
1557 */
1558 NEW = PREV;
1559 NEW.rm_blockcount -= len;
1560 error = xfs_rmap_update(cur, &NEW);
1561 if (error)
1562 goto done;
1563 error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset,
1564 oldext, &i);
1565 if (error)
1566 goto done;
1567 if (XFS_IS_CORRUPT(mp, i != 0)) {
1568 xfs_btree_mark_sick(cur);
1569 error = -EFSCORRUPTED;
1570 goto done;
1571 }
1572 NEW.rm_startblock = bno;
1573 NEW.rm_owner = owner;
1574 NEW.rm_offset = offset;
1575 NEW.rm_blockcount = len;
1576 NEW.rm_flags = newext;
1577 cur->bc_rec.r = NEW;
1578 trace_xfs_rmap_insert(cur, bno, len, owner, offset, newext);
1579 error = xfs_btree_insert(cur, &i);
1580 if (error)
1581 goto done;
1582 if (XFS_IS_CORRUPT(mp, i != 1)) {
1583 xfs_btree_mark_sick(cur);
1584 error = -EFSCORRUPTED;
1585 goto done;
1586 }
1587 break;
1588
1589 case 0:
1590 /*
1591 * Setting the middle part of a previous oldext extent to
1592 * newext. Contiguity is impossible here.
1593 * One extent becomes three extents.
1594 */
1595 /* new right extent - oldext */
1596 NEW.rm_startblock = bno + len;
1597 NEW.rm_owner = owner;
1598 NEW.rm_offset = new_endoff;
1599 NEW.rm_blockcount = PREV.rm_offset + PREV.rm_blockcount -
1600 new_endoff;
1601 NEW.rm_flags = PREV.rm_flags;
1602 error = xfs_rmap_update(cur, &NEW);
1603 if (error)
1604 goto done;
1605 /* new left extent - oldext */
1606 NEW = PREV;
1607 NEW.rm_blockcount = offset - PREV.rm_offset;
1608 cur->bc_rec.r = NEW;
1609 trace_xfs_rmap_insert(cur, NEW.rm_startblock,
1610 NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset,
1611 NEW.rm_flags);
1612 error = xfs_btree_insert(cur, &i);
1613 if (error)
1614 goto done;
1615 if (XFS_IS_CORRUPT(mp, i != 1)) {
1616 xfs_btree_mark_sick(cur);
1617 error = -EFSCORRUPTED;
1618 goto done;
1619 }
1620 /*
1621 * Reset the cursor to the position of the new extent
1622 * we are about to insert as we can't trust it after
1623 * the previous insert.
1624 */
1625 error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset,
1626 oldext, &i);
1627 if (error)
1628 goto done;
1629 if (XFS_IS_CORRUPT(mp, i != 0)) {
1630 xfs_btree_mark_sick(cur);
1631 error = -EFSCORRUPTED;
1632 goto done;
1633 }
1634 /* new middle extent - newext */
1635 cur->bc_rec.r.rm_flags &= ~XFS_RMAP_UNWRITTEN;
1636 cur->bc_rec.r.rm_flags |= newext;
1637 trace_xfs_rmap_insert(cur, bno, len, owner, offset, newext);
1638 error = xfs_btree_insert(cur, &i);
1639 if (error)
1640 goto done;
1641 if (XFS_IS_CORRUPT(mp, i != 1)) {
1642 xfs_btree_mark_sick(cur);
1643 error = -EFSCORRUPTED;
1644 goto done;
1645 }
1646 break;
1647
1648 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
1649 case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
1650 case RMAP_LEFT_FILLING | RMAP_RIGHT_CONTIG:
1651 case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
1652 case RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
1653 case RMAP_LEFT_CONTIG:
1654 case RMAP_RIGHT_CONTIG:
1655 /*
1656 * These cases are all impossible.
1657 */
1658 ASSERT(0);
1659 }
1660
1661 trace_xfs_rmap_convert_done(cur, bno, len, unwritten, oinfo);
1662 done:
1663 if (error)
1664 trace_xfs_rmap_convert_error(cur, error, _RET_IP_);
1665 return error;
1666 }
1667
1668 /*
1669 * Convert an unwritten extent to a real extent or vice versa. If there is no
1670 * possibility of overlapping extents, delegate to the simpler convert
1671 * function.
1672 */
1673 STATIC int
xfs_rmap_convert_shared(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,bool unwritten,const struct xfs_owner_info * oinfo)1674 xfs_rmap_convert_shared(
1675 struct xfs_btree_cur *cur,
1676 xfs_agblock_t bno,
1677 xfs_extlen_t len,
1678 bool unwritten,
1679 const struct xfs_owner_info *oinfo)
1680 {
1681 struct xfs_mount *mp = cur->bc_mp;
1682 struct xfs_rmap_irec r[4]; /* neighbor extent entries */
1683 /* left is 0, right is 1, */
1684 /* prev is 2, new is 3 */
1685 uint64_t owner;
1686 uint64_t offset;
1687 uint64_t new_endoff;
1688 unsigned int oldext;
1689 unsigned int newext;
1690 unsigned int flags = 0;
1691 int i;
1692 int state = 0;
1693 int error;
1694
1695 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
1696 ASSERT(!(XFS_RMAP_NON_INODE_OWNER(owner) ||
1697 (flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
1698 oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
1699 new_endoff = offset + len;
1700 trace_xfs_rmap_convert(cur, bno, len, unwritten, oinfo);
1701
1702 /*
1703 * For the initial lookup, look for and exact match or the left-adjacent
1704 * record for our insertion point. This will also give us the record for
1705 * start block contiguity tests.
1706 */
1707 error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, oldext,
1708 &PREV, &i);
1709 if (error)
1710 goto done;
1711 if (XFS_IS_CORRUPT(mp, i != 1)) {
1712 xfs_btree_mark_sick(cur);
1713 error = -EFSCORRUPTED;
1714 goto done;
1715 }
1716
1717 ASSERT(PREV.rm_offset <= offset);
1718 ASSERT(PREV.rm_offset + PREV.rm_blockcount >= new_endoff);
1719 ASSERT((PREV.rm_flags & XFS_RMAP_UNWRITTEN) == oldext);
1720 newext = ~oldext & XFS_RMAP_UNWRITTEN;
1721
1722 /*
1723 * Set flags determining what part of the previous oldext allocation
1724 * extent is being replaced by a newext allocation.
1725 */
1726 if (PREV.rm_offset == offset)
1727 state |= RMAP_LEFT_FILLING;
1728 if (PREV.rm_offset + PREV.rm_blockcount == new_endoff)
1729 state |= RMAP_RIGHT_FILLING;
1730
1731 /* Is there a left record that abuts our range? */
1732 error = xfs_rmap_find_left_neighbor(cur, bno, owner, offset, newext,
1733 &LEFT, &i);
1734 if (error)
1735 goto done;
1736 if (i) {
1737 state |= RMAP_LEFT_VALID;
1738 if (XFS_IS_CORRUPT(mp,
1739 LEFT.rm_startblock + LEFT.rm_blockcount >
1740 bno)) {
1741 xfs_btree_mark_sick(cur);
1742 error = -EFSCORRUPTED;
1743 goto done;
1744 }
1745 if (xfs_rmap_is_mergeable(&LEFT, owner, newext))
1746 state |= RMAP_LEFT_CONTIG;
1747 }
1748
1749 /* Is there a right record that abuts our range? */
1750 error = xfs_rmap_lookup_eq(cur, bno + len, len, owner, offset + len,
1751 newext, &i);
1752 if (error)
1753 goto done;
1754 if (i) {
1755 state |= RMAP_RIGHT_VALID;
1756 error = xfs_rmap_get_rec(cur, &RIGHT, &i);
1757 if (error)
1758 goto done;
1759 if (XFS_IS_CORRUPT(mp, i != 1)) {
1760 xfs_btree_mark_sick(cur);
1761 error = -EFSCORRUPTED;
1762 goto done;
1763 }
1764 if (XFS_IS_CORRUPT(mp, bno + len > RIGHT.rm_startblock)) {
1765 xfs_btree_mark_sick(cur);
1766 error = -EFSCORRUPTED;
1767 goto done;
1768 }
1769 trace_xfs_rmap_find_right_neighbor_result(cur,
1770 RIGHT.rm_startblock, RIGHT.rm_blockcount,
1771 RIGHT.rm_owner, RIGHT.rm_offset,
1772 RIGHT.rm_flags);
1773 if (xfs_rmap_is_mergeable(&RIGHT, owner, newext))
1774 state |= RMAP_RIGHT_CONTIG;
1775 }
1776
1777 /* check that left + prev + right is not too long */
1778 if ((state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1779 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) ==
1780 (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1781 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG) &&
1782 (unsigned long)LEFT.rm_blockcount + len +
1783 RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
1784 state &= ~RMAP_RIGHT_CONTIG;
1785
1786 trace_xfs_rmap_convert_state(cur, state, _RET_IP_);
1787 /*
1788 * Switch out based on the FILLING and CONTIG state bits.
1789 */
1790 switch (state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1791 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) {
1792 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1793 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1794 /*
1795 * Setting all of a previous oldext extent to newext.
1796 * The left and right neighbors are both contiguous with new.
1797 */
1798 error = xfs_rmap_delete(cur, RIGHT.rm_startblock,
1799 RIGHT.rm_blockcount, RIGHT.rm_owner,
1800 RIGHT.rm_offset, RIGHT.rm_flags);
1801 if (error)
1802 goto done;
1803 error = xfs_rmap_delete(cur, PREV.rm_startblock,
1804 PREV.rm_blockcount, PREV.rm_owner,
1805 PREV.rm_offset, PREV.rm_flags);
1806 if (error)
1807 goto done;
1808 NEW = LEFT;
1809 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1810 NEW.rm_blockcount, NEW.rm_owner,
1811 NEW.rm_offset, NEW.rm_flags, &i);
1812 if (error)
1813 goto done;
1814 if (XFS_IS_CORRUPT(mp, i != 1)) {
1815 xfs_btree_mark_sick(cur);
1816 error = -EFSCORRUPTED;
1817 goto done;
1818 }
1819 NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount;
1820 error = xfs_rmap_update(cur, &NEW);
1821 if (error)
1822 goto done;
1823 break;
1824
1825 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
1826 /*
1827 * Setting all of a previous oldext extent to newext.
1828 * The left neighbor is contiguous, the right is not.
1829 */
1830 error = xfs_rmap_delete(cur, PREV.rm_startblock,
1831 PREV.rm_blockcount, PREV.rm_owner,
1832 PREV.rm_offset, PREV.rm_flags);
1833 if (error)
1834 goto done;
1835 NEW = LEFT;
1836 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1837 NEW.rm_blockcount, NEW.rm_owner,
1838 NEW.rm_offset, NEW.rm_flags, &i);
1839 if (error)
1840 goto done;
1841 if (XFS_IS_CORRUPT(mp, i != 1)) {
1842 xfs_btree_mark_sick(cur);
1843 error = -EFSCORRUPTED;
1844 goto done;
1845 }
1846 NEW.rm_blockcount += PREV.rm_blockcount;
1847 error = xfs_rmap_update(cur, &NEW);
1848 if (error)
1849 goto done;
1850 break;
1851
1852 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1853 /*
1854 * Setting all of a previous oldext extent to newext.
1855 * The right neighbor is contiguous, the left is not.
1856 */
1857 error = xfs_rmap_delete(cur, RIGHT.rm_startblock,
1858 RIGHT.rm_blockcount, RIGHT.rm_owner,
1859 RIGHT.rm_offset, RIGHT.rm_flags);
1860 if (error)
1861 goto done;
1862 NEW = PREV;
1863 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1864 NEW.rm_blockcount, NEW.rm_owner,
1865 NEW.rm_offset, NEW.rm_flags, &i);
1866 if (error)
1867 goto done;
1868 if (XFS_IS_CORRUPT(mp, i != 1)) {
1869 xfs_btree_mark_sick(cur);
1870 error = -EFSCORRUPTED;
1871 goto done;
1872 }
1873 NEW.rm_blockcount += RIGHT.rm_blockcount;
1874 NEW.rm_flags = RIGHT.rm_flags;
1875 error = xfs_rmap_update(cur, &NEW);
1876 if (error)
1877 goto done;
1878 break;
1879
1880 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING:
1881 /*
1882 * Setting all of a previous oldext extent to newext.
1883 * Neither the left nor right neighbors are contiguous with
1884 * the new one.
1885 */
1886 NEW = PREV;
1887 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1888 NEW.rm_blockcount, NEW.rm_owner,
1889 NEW.rm_offset, NEW.rm_flags, &i);
1890 if (error)
1891 goto done;
1892 if (XFS_IS_CORRUPT(mp, i != 1)) {
1893 xfs_btree_mark_sick(cur);
1894 error = -EFSCORRUPTED;
1895 goto done;
1896 }
1897 NEW.rm_flags = newext;
1898 error = xfs_rmap_update(cur, &NEW);
1899 if (error)
1900 goto done;
1901 break;
1902
1903 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG:
1904 /*
1905 * Setting the first part of a previous oldext extent to newext.
1906 * The left neighbor is contiguous.
1907 */
1908 NEW = PREV;
1909 error = xfs_rmap_delete(cur, NEW.rm_startblock,
1910 NEW.rm_blockcount, NEW.rm_owner,
1911 NEW.rm_offset, NEW.rm_flags);
1912 if (error)
1913 goto done;
1914 NEW.rm_offset += len;
1915 NEW.rm_startblock += len;
1916 NEW.rm_blockcount -= len;
1917 error = xfs_rmap_insert(cur, NEW.rm_startblock,
1918 NEW.rm_blockcount, NEW.rm_owner,
1919 NEW.rm_offset, NEW.rm_flags);
1920 if (error)
1921 goto done;
1922 NEW = LEFT;
1923 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1924 NEW.rm_blockcount, NEW.rm_owner,
1925 NEW.rm_offset, NEW.rm_flags, &i);
1926 if (error)
1927 goto done;
1928 if (XFS_IS_CORRUPT(mp, i != 1)) {
1929 xfs_btree_mark_sick(cur);
1930 error = -EFSCORRUPTED;
1931 goto done;
1932 }
1933 NEW.rm_blockcount += len;
1934 error = xfs_rmap_update(cur, &NEW);
1935 if (error)
1936 goto done;
1937 break;
1938
1939 case RMAP_LEFT_FILLING:
1940 /*
1941 * Setting the first part of a previous oldext extent to newext.
1942 * The left neighbor is not contiguous.
1943 */
1944 NEW = PREV;
1945 error = xfs_rmap_delete(cur, NEW.rm_startblock,
1946 NEW.rm_blockcount, NEW.rm_owner,
1947 NEW.rm_offset, NEW.rm_flags);
1948 if (error)
1949 goto done;
1950 NEW.rm_offset += len;
1951 NEW.rm_startblock += len;
1952 NEW.rm_blockcount -= len;
1953 error = xfs_rmap_insert(cur, NEW.rm_startblock,
1954 NEW.rm_blockcount, NEW.rm_owner,
1955 NEW.rm_offset, NEW.rm_flags);
1956 if (error)
1957 goto done;
1958 error = xfs_rmap_insert(cur, bno, len, owner, offset, newext);
1959 if (error)
1960 goto done;
1961 break;
1962
1963 case RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1964 /*
1965 * Setting the last part of a previous oldext extent to newext.
1966 * The right neighbor is contiguous with the new allocation.
1967 */
1968 NEW = PREV;
1969 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1970 NEW.rm_blockcount, NEW.rm_owner,
1971 NEW.rm_offset, NEW.rm_flags, &i);
1972 if (error)
1973 goto done;
1974 if (XFS_IS_CORRUPT(mp, i != 1)) {
1975 xfs_btree_mark_sick(cur);
1976 error = -EFSCORRUPTED;
1977 goto done;
1978 }
1979 NEW.rm_blockcount = offset - NEW.rm_offset;
1980 error = xfs_rmap_update(cur, &NEW);
1981 if (error)
1982 goto done;
1983 NEW = RIGHT;
1984 error = xfs_rmap_delete(cur, NEW.rm_startblock,
1985 NEW.rm_blockcount, NEW.rm_owner,
1986 NEW.rm_offset, NEW.rm_flags);
1987 if (error)
1988 goto done;
1989 NEW.rm_offset = offset;
1990 NEW.rm_startblock = bno;
1991 NEW.rm_blockcount += len;
1992 error = xfs_rmap_insert(cur, NEW.rm_startblock,
1993 NEW.rm_blockcount, NEW.rm_owner,
1994 NEW.rm_offset, NEW.rm_flags);
1995 if (error)
1996 goto done;
1997 break;
1998
1999 case RMAP_RIGHT_FILLING:
2000 /*
2001 * Setting the last part of a previous oldext extent to newext.
2002 * The right neighbor is not contiguous.
2003 */
2004 NEW = PREV;
2005 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
2006 NEW.rm_blockcount, NEW.rm_owner,
2007 NEW.rm_offset, NEW.rm_flags, &i);
2008 if (error)
2009 goto done;
2010 if (XFS_IS_CORRUPT(mp, i != 1)) {
2011 xfs_btree_mark_sick(cur);
2012 error = -EFSCORRUPTED;
2013 goto done;
2014 }
2015 NEW.rm_blockcount -= len;
2016 error = xfs_rmap_update(cur, &NEW);
2017 if (error)
2018 goto done;
2019 error = xfs_rmap_insert(cur, bno, len, owner, offset, newext);
2020 if (error)
2021 goto done;
2022 break;
2023
2024 case 0:
2025 /*
2026 * Setting the middle part of a previous oldext extent to
2027 * newext. Contiguity is impossible here.
2028 * One extent becomes three extents.
2029 */
2030 /* new right extent - oldext */
2031 NEW.rm_startblock = bno + len;
2032 NEW.rm_owner = owner;
2033 NEW.rm_offset = new_endoff;
2034 NEW.rm_blockcount = PREV.rm_offset + PREV.rm_blockcount -
2035 new_endoff;
2036 NEW.rm_flags = PREV.rm_flags;
2037 error = xfs_rmap_insert(cur, NEW.rm_startblock,
2038 NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset,
2039 NEW.rm_flags);
2040 if (error)
2041 goto done;
2042 /* new left extent - oldext */
2043 NEW = PREV;
2044 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
2045 NEW.rm_blockcount, NEW.rm_owner,
2046 NEW.rm_offset, NEW.rm_flags, &i);
2047 if (error)
2048 goto done;
2049 if (XFS_IS_CORRUPT(mp, i != 1)) {
2050 xfs_btree_mark_sick(cur);
2051 error = -EFSCORRUPTED;
2052 goto done;
2053 }
2054 NEW.rm_blockcount = offset - NEW.rm_offset;
2055 error = xfs_rmap_update(cur, &NEW);
2056 if (error)
2057 goto done;
2058 /* new middle extent - newext */
2059 NEW.rm_startblock = bno;
2060 NEW.rm_blockcount = len;
2061 NEW.rm_owner = owner;
2062 NEW.rm_offset = offset;
2063 NEW.rm_flags = newext;
2064 error = xfs_rmap_insert(cur, NEW.rm_startblock,
2065 NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset,
2066 NEW.rm_flags);
2067 if (error)
2068 goto done;
2069 break;
2070
2071 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
2072 case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
2073 case RMAP_LEFT_FILLING | RMAP_RIGHT_CONTIG:
2074 case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
2075 case RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
2076 case RMAP_LEFT_CONTIG:
2077 case RMAP_RIGHT_CONTIG:
2078 /*
2079 * These cases are all impossible.
2080 */
2081 ASSERT(0);
2082 }
2083
2084 trace_xfs_rmap_convert_done(cur, bno, len, unwritten, oinfo);
2085 done:
2086 if (error)
2087 trace_xfs_rmap_convert_error(cur, error, _RET_IP_);
2088 return error;
2089 }
2090
2091 #undef NEW
2092 #undef LEFT
2093 #undef RIGHT
2094 #undef PREV
2095
2096 /*
2097 * Find an extent in the rmap btree and unmap it. For rmap extent types that
2098 * can overlap (data fork rmaps on reflink filesystems) we must be careful
2099 * that the prev/next records in the btree might belong to another owner.
2100 * Therefore we must use delete+insert to alter any of the key fields.
2101 *
2102 * For every other situation there can only be one owner for a given extent,
2103 * so we can call the regular _free function.
2104 */
2105 STATIC int
xfs_rmap_unmap_shared(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,bool unwritten,const struct xfs_owner_info * oinfo)2106 xfs_rmap_unmap_shared(
2107 struct xfs_btree_cur *cur,
2108 xfs_agblock_t bno,
2109 xfs_extlen_t len,
2110 bool unwritten,
2111 const struct xfs_owner_info *oinfo)
2112 {
2113 struct xfs_mount *mp = cur->bc_mp;
2114 struct xfs_rmap_irec ltrec;
2115 uint64_t ltoff;
2116 int error = 0;
2117 int i;
2118 uint64_t owner;
2119 uint64_t offset;
2120 unsigned int flags;
2121
2122 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
2123 if (unwritten)
2124 flags |= XFS_RMAP_UNWRITTEN;
2125 trace_xfs_rmap_unmap(cur, bno, len, unwritten, oinfo);
2126
2127 /*
2128 * We should always have a left record because there's a static record
2129 * for the AG headers at rm_startblock == 0 created by mkfs/growfs that
2130 * will not ever be removed from the tree.
2131 */
2132 error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags,
2133 <rec, &i);
2134 if (error)
2135 goto out_error;
2136 if (XFS_IS_CORRUPT(mp, i != 1)) {
2137 xfs_btree_mark_sick(cur);
2138 error = -EFSCORRUPTED;
2139 goto out_error;
2140 }
2141 ltoff = ltrec.rm_offset;
2142
2143 /* Make sure the extent we found covers the entire freeing range. */
2144 if (XFS_IS_CORRUPT(mp,
2145 ltrec.rm_startblock > bno ||
2146 ltrec.rm_startblock + ltrec.rm_blockcount <
2147 bno + len)) {
2148 xfs_btree_mark_sick(cur);
2149 error = -EFSCORRUPTED;
2150 goto out_error;
2151 }
2152
2153 /* Make sure the owner matches what we expect to find in the tree. */
2154 if (XFS_IS_CORRUPT(mp, owner != ltrec.rm_owner)) {
2155 xfs_btree_mark_sick(cur);
2156 error = -EFSCORRUPTED;
2157 goto out_error;
2158 }
2159
2160 /* Make sure the unwritten flag matches. */
2161 if (XFS_IS_CORRUPT(mp,
2162 (flags & XFS_RMAP_UNWRITTEN) !=
2163 (ltrec.rm_flags & XFS_RMAP_UNWRITTEN))) {
2164 xfs_btree_mark_sick(cur);
2165 error = -EFSCORRUPTED;
2166 goto out_error;
2167 }
2168
2169 /* Check the offset. */
2170 if (XFS_IS_CORRUPT(mp, ltrec.rm_offset > offset)) {
2171 xfs_btree_mark_sick(cur);
2172 error = -EFSCORRUPTED;
2173 goto out_error;
2174 }
2175 if (XFS_IS_CORRUPT(mp, offset > ltoff + ltrec.rm_blockcount)) {
2176 xfs_btree_mark_sick(cur);
2177 error = -EFSCORRUPTED;
2178 goto out_error;
2179 }
2180
2181 if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
2182 /* Exact match, simply remove the record from rmap tree. */
2183 error = xfs_rmap_delete(cur, ltrec.rm_startblock,
2184 ltrec.rm_blockcount, ltrec.rm_owner,
2185 ltrec.rm_offset, ltrec.rm_flags);
2186 if (error)
2187 goto out_error;
2188 } else if (ltrec.rm_startblock == bno) {
2189 /*
2190 * Overlap left hand side of extent: move the start, trim the
2191 * length and update the current record.
2192 *
2193 * ltbno ltlen
2194 * Orig: |oooooooooooooooooooo|
2195 * Freeing: |fffffffff|
2196 * Result: |rrrrrrrrrr|
2197 * bno len
2198 */
2199
2200 /* Delete prev rmap. */
2201 error = xfs_rmap_delete(cur, ltrec.rm_startblock,
2202 ltrec.rm_blockcount, ltrec.rm_owner,
2203 ltrec.rm_offset, ltrec.rm_flags);
2204 if (error)
2205 goto out_error;
2206
2207 /* Add an rmap at the new offset. */
2208 ltrec.rm_startblock += len;
2209 ltrec.rm_blockcount -= len;
2210 ltrec.rm_offset += len;
2211 error = xfs_rmap_insert(cur, ltrec.rm_startblock,
2212 ltrec.rm_blockcount, ltrec.rm_owner,
2213 ltrec.rm_offset, ltrec.rm_flags);
2214 if (error)
2215 goto out_error;
2216 } else if (ltrec.rm_startblock + ltrec.rm_blockcount == bno + len) {
2217 /*
2218 * Overlap right hand side of extent: trim the length and
2219 * update the current record.
2220 *
2221 * ltbno ltlen
2222 * Orig: |oooooooooooooooooooo|
2223 * Freeing: |fffffffff|
2224 * Result: |rrrrrrrrrr|
2225 * bno len
2226 */
2227 error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
2228 ltrec.rm_blockcount, ltrec.rm_owner,
2229 ltrec.rm_offset, ltrec.rm_flags, &i);
2230 if (error)
2231 goto out_error;
2232 if (XFS_IS_CORRUPT(mp, i != 1)) {
2233 xfs_btree_mark_sick(cur);
2234 error = -EFSCORRUPTED;
2235 goto out_error;
2236 }
2237 ltrec.rm_blockcount -= len;
2238 error = xfs_rmap_update(cur, <rec);
2239 if (error)
2240 goto out_error;
2241 } else {
2242 /*
2243 * Overlap middle of extent: trim the length of the existing
2244 * record to the length of the new left-extent size, increment
2245 * the insertion position so we can insert a new record
2246 * containing the remaining right-extent space.
2247 *
2248 * ltbno ltlen
2249 * Orig: |oooooooooooooooooooo|
2250 * Freeing: |fffffffff|
2251 * Result: |rrrrr| |rrrr|
2252 * bno len
2253 */
2254 xfs_extlen_t orig_len = ltrec.rm_blockcount;
2255
2256 /* Shrink the left side of the rmap */
2257 error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
2258 ltrec.rm_blockcount, ltrec.rm_owner,
2259 ltrec.rm_offset, ltrec.rm_flags, &i);
2260 if (error)
2261 goto out_error;
2262 if (XFS_IS_CORRUPT(mp, i != 1)) {
2263 xfs_btree_mark_sick(cur);
2264 error = -EFSCORRUPTED;
2265 goto out_error;
2266 }
2267 ltrec.rm_blockcount = bno - ltrec.rm_startblock;
2268 error = xfs_rmap_update(cur, <rec);
2269 if (error)
2270 goto out_error;
2271
2272 /* Add an rmap at the new offset */
2273 error = xfs_rmap_insert(cur, bno + len,
2274 orig_len - len - ltrec.rm_blockcount,
2275 ltrec.rm_owner, offset + len,
2276 ltrec.rm_flags);
2277 if (error)
2278 goto out_error;
2279 }
2280
2281 trace_xfs_rmap_unmap_done(cur, bno, len, unwritten, oinfo);
2282 out_error:
2283 if (error)
2284 trace_xfs_rmap_unmap_error(cur, error, _RET_IP_);
2285 return error;
2286 }
2287
2288 /*
2289 * Find an extent in the rmap btree and map it. For rmap extent types that
2290 * can overlap (data fork rmaps on reflink filesystems) we must be careful
2291 * that the prev/next records in the btree might belong to another owner.
2292 * Therefore we must use delete+insert to alter any of the key fields.
2293 *
2294 * For every other situation there can only be one owner for a given extent,
2295 * so we can call the regular _alloc function.
2296 */
2297 STATIC int
xfs_rmap_map_shared(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,bool unwritten,const struct xfs_owner_info * oinfo)2298 xfs_rmap_map_shared(
2299 struct xfs_btree_cur *cur,
2300 xfs_agblock_t bno,
2301 xfs_extlen_t len,
2302 bool unwritten,
2303 const struct xfs_owner_info *oinfo)
2304 {
2305 struct xfs_mount *mp = cur->bc_mp;
2306 struct xfs_rmap_irec ltrec;
2307 struct xfs_rmap_irec gtrec;
2308 int have_gt;
2309 int have_lt;
2310 int error = 0;
2311 int i;
2312 uint64_t owner;
2313 uint64_t offset;
2314 unsigned int flags = 0;
2315
2316 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
2317 if (unwritten)
2318 flags |= XFS_RMAP_UNWRITTEN;
2319 trace_xfs_rmap_map(cur, bno, len, unwritten, oinfo);
2320
2321 /* Is there a left record that abuts our range? */
2322 error = xfs_rmap_find_left_neighbor(cur, bno, owner, offset, flags,
2323 <rec, &have_lt);
2324 if (error)
2325 goto out_error;
2326 if (have_lt &&
2327 !xfs_rmap_is_mergeable(<rec, owner, flags))
2328 have_lt = 0;
2329
2330 /* Is there a right record that abuts our range? */
2331 error = xfs_rmap_lookup_eq(cur, bno + len, len, owner, offset + len,
2332 flags, &have_gt);
2333 if (error)
2334 goto out_error;
2335 if (have_gt) {
2336 error = xfs_rmap_get_rec(cur, >rec, &have_gt);
2337 if (error)
2338 goto out_error;
2339 if (XFS_IS_CORRUPT(mp, have_gt != 1)) {
2340 xfs_btree_mark_sick(cur);
2341 error = -EFSCORRUPTED;
2342 goto out_error;
2343 }
2344 trace_xfs_rmap_find_right_neighbor_result(cur,
2345 gtrec.rm_startblock, gtrec.rm_blockcount,
2346 gtrec.rm_owner, gtrec.rm_offset,
2347 gtrec.rm_flags);
2348
2349 if (!xfs_rmap_is_mergeable(>rec, owner, flags))
2350 have_gt = 0;
2351 }
2352
2353 if (have_lt &&
2354 ltrec.rm_startblock + ltrec.rm_blockcount == bno &&
2355 ltrec.rm_offset + ltrec.rm_blockcount == offset) {
2356 /*
2357 * Left edge contiguous, merge into left record.
2358 *
2359 * ltbno ltlen
2360 * orig: |ooooooooo|
2361 * adding: |aaaaaaaaa|
2362 * result: |rrrrrrrrrrrrrrrrrrr|
2363 * bno len
2364 */
2365 ltrec.rm_blockcount += len;
2366 if (have_gt &&
2367 bno + len == gtrec.rm_startblock &&
2368 offset + len == gtrec.rm_offset) {
2369 /*
2370 * Right edge also contiguous, delete right record
2371 * and merge into left record.
2372 *
2373 * ltbno ltlen gtbno gtlen
2374 * orig: |ooooooooo| |ooooooooo|
2375 * adding: |aaaaaaaaa|
2376 * result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr|
2377 */
2378 ltrec.rm_blockcount += gtrec.rm_blockcount;
2379 error = xfs_rmap_delete(cur, gtrec.rm_startblock,
2380 gtrec.rm_blockcount, gtrec.rm_owner,
2381 gtrec.rm_offset, gtrec.rm_flags);
2382 if (error)
2383 goto out_error;
2384 }
2385
2386 /* Point the cursor back to the left record and update. */
2387 error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
2388 ltrec.rm_blockcount, ltrec.rm_owner,
2389 ltrec.rm_offset, ltrec.rm_flags, &i);
2390 if (error)
2391 goto out_error;
2392 if (XFS_IS_CORRUPT(mp, i != 1)) {
2393 xfs_btree_mark_sick(cur);
2394 error = -EFSCORRUPTED;
2395 goto out_error;
2396 }
2397
2398 error = xfs_rmap_update(cur, <rec);
2399 if (error)
2400 goto out_error;
2401 } else if (have_gt &&
2402 bno + len == gtrec.rm_startblock &&
2403 offset + len == gtrec.rm_offset) {
2404 /*
2405 * Right edge contiguous, merge into right record.
2406 *
2407 * gtbno gtlen
2408 * Orig: |ooooooooo|
2409 * adding: |aaaaaaaaa|
2410 * Result: |rrrrrrrrrrrrrrrrrrr|
2411 * bno len
2412 */
2413 /* Delete the old record. */
2414 error = xfs_rmap_delete(cur, gtrec.rm_startblock,
2415 gtrec.rm_blockcount, gtrec.rm_owner,
2416 gtrec.rm_offset, gtrec.rm_flags);
2417 if (error)
2418 goto out_error;
2419
2420 /* Move the start and re-add it. */
2421 gtrec.rm_startblock = bno;
2422 gtrec.rm_blockcount += len;
2423 gtrec.rm_offset = offset;
2424 error = xfs_rmap_insert(cur, gtrec.rm_startblock,
2425 gtrec.rm_blockcount, gtrec.rm_owner,
2426 gtrec.rm_offset, gtrec.rm_flags);
2427 if (error)
2428 goto out_error;
2429 } else {
2430 /*
2431 * No contiguous edge with identical owner, insert
2432 * new record at current cursor position.
2433 */
2434 error = xfs_rmap_insert(cur, bno, len, owner, offset, flags);
2435 if (error)
2436 goto out_error;
2437 }
2438
2439 trace_xfs_rmap_map_done(cur, bno, len, unwritten, oinfo);
2440 out_error:
2441 if (error)
2442 trace_xfs_rmap_map_error(cur, error, _RET_IP_);
2443 return error;
2444 }
2445
2446 /* Insert a raw rmap into the rmapbt. */
2447 int
xfs_rmap_map_raw(struct xfs_btree_cur * cur,struct xfs_rmap_irec * rmap)2448 xfs_rmap_map_raw(
2449 struct xfs_btree_cur *cur,
2450 struct xfs_rmap_irec *rmap)
2451 {
2452 struct xfs_owner_info oinfo;
2453
2454 xfs_owner_info_pack(&oinfo, rmap->rm_owner, rmap->rm_offset,
2455 rmap->rm_flags);
2456
2457 if ((rmap->rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK |
2458 XFS_RMAP_UNWRITTEN)) ||
2459 XFS_RMAP_NON_INODE_OWNER(rmap->rm_owner))
2460 return xfs_rmap_map(cur, rmap->rm_startblock,
2461 rmap->rm_blockcount,
2462 rmap->rm_flags & XFS_RMAP_UNWRITTEN,
2463 &oinfo);
2464
2465 return xfs_rmap_map_shared(cur, rmap->rm_startblock,
2466 rmap->rm_blockcount,
2467 rmap->rm_flags & XFS_RMAP_UNWRITTEN,
2468 &oinfo);
2469 }
2470
2471 struct xfs_rmap_query_range_info {
2472 xfs_rmap_query_range_fn fn;
2473 void *priv;
2474 };
2475
2476 /* Format btree record and pass to our callback. */
2477 STATIC int
xfs_rmap_query_range_helper(struct xfs_btree_cur * cur,const union xfs_btree_rec * rec,void * priv)2478 xfs_rmap_query_range_helper(
2479 struct xfs_btree_cur *cur,
2480 const union xfs_btree_rec *rec,
2481 void *priv)
2482 {
2483 struct xfs_rmap_query_range_info *query = priv;
2484 struct xfs_rmap_irec irec;
2485 xfs_failaddr_t fa;
2486
2487 fa = xfs_rmap_btrec_to_irec(rec, &irec);
2488 if (!fa)
2489 fa = xfs_rmap_check_btrec(cur, &irec);
2490 if (fa)
2491 return xfs_rmap_complain_bad_rec(cur, fa, &irec);
2492
2493 return query->fn(cur, &irec, query->priv);
2494 }
2495
2496 /* Find all rmaps between two keys. */
2497 int
xfs_rmap_query_range(struct xfs_btree_cur * cur,const struct xfs_rmap_irec * low_rec,const struct xfs_rmap_irec * high_rec,xfs_rmap_query_range_fn fn,void * priv)2498 xfs_rmap_query_range(
2499 struct xfs_btree_cur *cur,
2500 const struct xfs_rmap_irec *low_rec,
2501 const struct xfs_rmap_irec *high_rec,
2502 xfs_rmap_query_range_fn fn,
2503 void *priv)
2504 {
2505 union xfs_btree_irec low_brec = { .r = *low_rec };
2506 union xfs_btree_irec high_brec = { .r = *high_rec };
2507 struct xfs_rmap_query_range_info query = { .priv = priv, .fn = fn };
2508
2509 return xfs_btree_query_range(cur, &low_brec, &high_brec,
2510 xfs_rmap_query_range_helper, &query);
2511 }
2512
2513 /* Find all rmaps. */
2514 int
xfs_rmap_query_all(struct xfs_btree_cur * cur,xfs_rmap_query_range_fn fn,void * priv)2515 xfs_rmap_query_all(
2516 struct xfs_btree_cur *cur,
2517 xfs_rmap_query_range_fn fn,
2518 void *priv)
2519 {
2520 struct xfs_rmap_query_range_info query;
2521
2522 query.priv = priv;
2523 query.fn = fn;
2524 return xfs_btree_query_all(cur, xfs_rmap_query_range_helper, &query);
2525 }
2526
2527 /* Commit an rmap operation into the ondisk tree. */
2528 int
__xfs_rmap_finish_intent(struct xfs_btree_cur * rcur,enum xfs_rmap_intent_type op,xfs_agblock_t bno,xfs_extlen_t len,const struct xfs_owner_info * oinfo,bool unwritten)2529 __xfs_rmap_finish_intent(
2530 struct xfs_btree_cur *rcur,
2531 enum xfs_rmap_intent_type op,
2532 xfs_agblock_t bno,
2533 xfs_extlen_t len,
2534 const struct xfs_owner_info *oinfo,
2535 bool unwritten)
2536 {
2537 switch (op) {
2538 case XFS_RMAP_ALLOC:
2539 case XFS_RMAP_MAP:
2540 return xfs_rmap_map(rcur, bno, len, unwritten, oinfo);
2541 case XFS_RMAP_MAP_SHARED:
2542 return xfs_rmap_map_shared(rcur, bno, len, unwritten, oinfo);
2543 case XFS_RMAP_FREE:
2544 case XFS_RMAP_UNMAP:
2545 return xfs_rmap_unmap(rcur, bno, len, unwritten, oinfo);
2546 case XFS_RMAP_UNMAP_SHARED:
2547 return xfs_rmap_unmap_shared(rcur, bno, len, unwritten, oinfo);
2548 case XFS_RMAP_CONVERT:
2549 return xfs_rmap_convert(rcur, bno, len, !unwritten, oinfo);
2550 case XFS_RMAP_CONVERT_SHARED:
2551 return xfs_rmap_convert_shared(rcur, bno, len, !unwritten,
2552 oinfo);
2553 default:
2554 ASSERT(0);
2555 return -EFSCORRUPTED;
2556 }
2557 }
2558
2559 /*
2560 * Process one of the deferred rmap operations. We pass back the
2561 * btree cursor to maintain our lock on the rmapbt between calls.
2562 * This saves time and eliminates a buffer deadlock between the
2563 * superblock and the AGF because we'll always grab them in the same
2564 * order.
2565 */
2566 int
xfs_rmap_finish_one(struct xfs_trans * tp,struct xfs_rmap_intent * ri,struct xfs_btree_cur ** pcur)2567 xfs_rmap_finish_one(
2568 struct xfs_trans *tp,
2569 struct xfs_rmap_intent *ri,
2570 struct xfs_btree_cur **pcur)
2571 {
2572 struct xfs_owner_info oinfo;
2573 struct xfs_mount *mp = tp->t_mountp;
2574 struct xfs_btree_cur *rcur = *pcur;
2575 struct xfs_buf *agbp = NULL;
2576 xfs_agblock_t bno;
2577 bool unwritten;
2578 int error = 0;
2579
2580 trace_xfs_rmap_deferred(mp, ri);
2581
2582 if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_RMAP_FINISH_ONE))
2583 return -EIO;
2584
2585 /*
2586 * If we haven't gotten a cursor or the cursor AG doesn't match
2587 * the startblock, get one now.
2588 */
2589 if (rcur != NULL && rcur->bc_ag.pag != ri->ri_pag) {
2590 xfs_btree_del_cursor(rcur, 0);
2591 rcur = NULL;
2592 *pcur = NULL;
2593 }
2594 if (rcur == NULL) {
2595 /*
2596 * Refresh the freelist before we start changing the
2597 * rmapbt, because a shape change could cause us to
2598 * allocate blocks.
2599 */
2600 error = xfs_free_extent_fix_freelist(tp, ri->ri_pag, &agbp);
2601 if (error) {
2602 xfs_ag_mark_sick(ri->ri_pag, XFS_SICK_AG_AGFL);
2603 return error;
2604 }
2605 if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) {
2606 xfs_ag_mark_sick(ri->ri_pag, XFS_SICK_AG_AGFL);
2607 return -EFSCORRUPTED;
2608 }
2609
2610 *pcur = rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, ri->ri_pag);
2611 }
2612
2613 xfs_rmap_ino_owner(&oinfo, ri->ri_owner, ri->ri_whichfork,
2614 ri->ri_bmap.br_startoff);
2615 unwritten = ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN;
2616 bno = XFS_FSB_TO_AGBNO(rcur->bc_mp, ri->ri_bmap.br_startblock);
2617
2618 error = __xfs_rmap_finish_intent(rcur, ri->ri_type, bno,
2619 ri->ri_bmap.br_blockcount, &oinfo, unwritten);
2620 if (error)
2621 return error;
2622
2623 xfs_rmap_update_hook(tp, ri->ri_pag, ri->ri_type, bno,
2624 ri->ri_bmap.br_blockcount, unwritten, &oinfo);
2625 return 0;
2626 }
2627
2628 /*
2629 * Don't defer an rmap if we aren't an rmap filesystem.
2630 */
2631 static bool
xfs_rmap_update_is_needed(struct xfs_mount * mp,int whichfork)2632 xfs_rmap_update_is_needed(
2633 struct xfs_mount *mp,
2634 int whichfork)
2635 {
2636 return xfs_has_rmapbt(mp) && whichfork != XFS_COW_FORK;
2637 }
2638
2639 /*
2640 * Record a rmap intent; the list is kept sorted first by AG and then by
2641 * increasing age.
2642 */
2643 static void
__xfs_rmap_add(struct xfs_trans * tp,enum xfs_rmap_intent_type type,uint64_t owner,int whichfork,struct xfs_bmbt_irec * bmap)2644 __xfs_rmap_add(
2645 struct xfs_trans *tp,
2646 enum xfs_rmap_intent_type type,
2647 uint64_t owner,
2648 int whichfork,
2649 struct xfs_bmbt_irec *bmap)
2650 {
2651 struct xfs_rmap_intent *ri;
2652
2653 ri = kmem_cache_alloc(xfs_rmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
2654 INIT_LIST_HEAD(&ri->ri_list);
2655 ri->ri_type = type;
2656 ri->ri_owner = owner;
2657 ri->ri_whichfork = whichfork;
2658 ri->ri_bmap = *bmap;
2659
2660 xfs_rmap_defer_add(tp, ri);
2661 }
2662
2663 /* Map an extent into a file. */
2664 void
xfs_rmap_map_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * PREV)2665 xfs_rmap_map_extent(
2666 struct xfs_trans *tp,
2667 struct xfs_inode *ip,
2668 int whichfork,
2669 struct xfs_bmbt_irec *PREV)
2670 {
2671 enum xfs_rmap_intent_type type = XFS_RMAP_MAP;
2672
2673 if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
2674 return;
2675
2676 if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip))
2677 type = XFS_RMAP_MAP_SHARED;
2678
2679 __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV);
2680 }
2681
2682 /* Unmap an extent out of a file. */
2683 void
xfs_rmap_unmap_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * PREV)2684 xfs_rmap_unmap_extent(
2685 struct xfs_trans *tp,
2686 struct xfs_inode *ip,
2687 int whichfork,
2688 struct xfs_bmbt_irec *PREV)
2689 {
2690 enum xfs_rmap_intent_type type = XFS_RMAP_UNMAP;
2691
2692 if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
2693 return;
2694
2695 if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip))
2696 type = XFS_RMAP_UNMAP_SHARED;
2697
2698 __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV);
2699 }
2700
2701 /*
2702 * Convert a data fork extent from unwritten to real or vice versa.
2703 *
2704 * Note that tp can be NULL here as no transaction is used for COW fork
2705 * unwritten conversion.
2706 */
2707 void
xfs_rmap_convert_extent(struct xfs_mount * mp,struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * PREV)2708 xfs_rmap_convert_extent(
2709 struct xfs_mount *mp,
2710 struct xfs_trans *tp,
2711 struct xfs_inode *ip,
2712 int whichfork,
2713 struct xfs_bmbt_irec *PREV)
2714 {
2715 enum xfs_rmap_intent_type type = XFS_RMAP_CONVERT;
2716
2717 if (!xfs_rmap_update_is_needed(mp, whichfork))
2718 return;
2719
2720 if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip))
2721 type = XFS_RMAP_CONVERT_SHARED;
2722
2723 __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV);
2724 }
2725
2726 /* Schedule the creation of an rmap for non-file data. */
2727 void
xfs_rmap_alloc_extent(struct xfs_trans * tp,xfs_agnumber_t agno,xfs_agblock_t bno,xfs_extlen_t len,uint64_t owner)2728 xfs_rmap_alloc_extent(
2729 struct xfs_trans *tp,
2730 xfs_agnumber_t agno,
2731 xfs_agblock_t bno,
2732 xfs_extlen_t len,
2733 uint64_t owner)
2734 {
2735 struct xfs_bmbt_irec bmap;
2736
2737 if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK))
2738 return;
2739
2740 bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
2741 bmap.br_blockcount = len;
2742 bmap.br_startoff = 0;
2743 bmap.br_state = XFS_EXT_NORM;
2744
2745 __xfs_rmap_add(tp, XFS_RMAP_ALLOC, owner, XFS_DATA_FORK, &bmap);
2746 }
2747
2748 /* Schedule the deletion of an rmap for non-file data. */
2749 void
xfs_rmap_free_extent(struct xfs_trans * tp,xfs_agnumber_t agno,xfs_agblock_t bno,xfs_extlen_t len,uint64_t owner)2750 xfs_rmap_free_extent(
2751 struct xfs_trans *tp,
2752 xfs_agnumber_t agno,
2753 xfs_agblock_t bno,
2754 xfs_extlen_t len,
2755 uint64_t owner)
2756 {
2757 struct xfs_bmbt_irec bmap;
2758
2759 if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK))
2760 return;
2761
2762 bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
2763 bmap.br_blockcount = len;
2764 bmap.br_startoff = 0;
2765 bmap.br_state = XFS_EXT_NORM;
2766
2767 __xfs_rmap_add(tp, XFS_RMAP_FREE, owner, XFS_DATA_FORK, &bmap);
2768 }
2769
2770 /* Compare rmap records. Returns -1 if a < b, 1 if a > b, and 0 if equal. */
2771 int
xfs_rmap_compare(const struct xfs_rmap_irec * a,const struct xfs_rmap_irec * b)2772 xfs_rmap_compare(
2773 const struct xfs_rmap_irec *a,
2774 const struct xfs_rmap_irec *b)
2775 {
2776 __u64 oa;
2777 __u64 ob;
2778
2779 oa = xfs_rmap_irec_offset_pack(a);
2780 ob = xfs_rmap_irec_offset_pack(b);
2781
2782 if (a->rm_startblock < b->rm_startblock)
2783 return -1;
2784 else if (a->rm_startblock > b->rm_startblock)
2785 return 1;
2786 else if (a->rm_owner < b->rm_owner)
2787 return -1;
2788 else if (a->rm_owner > b->rm_owner)
2789 return 1;
2790 else if (oa < ob)
2791 return -1;
2792 else if (oa > ob)
2793 return 1;
2794 else
2795 return 0;
2796 }
2797
2798 /*
2799 * Scan the physical storage part of the keyspace of the reverse mapping index
2800 * and tell us if the area has no records, is fully mapped by records, or is
2801 * partially filled.
2802 */
2803 int
xfs_rmap_has_records(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,enum xbtree_recpacking * outcome)2804 xfs_rmap_has_records(
2805 struct xfs_btree_cur *cur,
2806 xfs_agblock_t bno,
2807 xfs_extlen_t len,
2808 enum xbtree_recpacking *outcome)
2809 {
2810 union xfs_btree_key mask = {
2811 .rmap.rm_startblock = cpu_to_be32(-1U),
2812 };
2813 union xfs_btree_irec low;
2814 union xfs_btree_irec high;
2815
2816 memset(&low, 0, sizeof(low));
2817 low.r.rm_startblock = bno;
2818 memset(&high, 0xFF, sizeof(high));
2819 high.r.rm_startblock = bno + len - 1;
2820
2821 return xfs_btree_has_records(cur, &low, &high, &mask, outcome);
2822 }
2823
2824 struct xfs_rmap_ownercount {
2825 /* Owner that we're looking for. */
2826 struct xfs_rmap_irec good;
2827
2828 /* rmap search keys */
2829 struct xfs_rmap_irec low;
2830 struct xfs_rmap_irec high;
2831
2832 struct xfs_rmap_matches *results;
2833
2834 /* Stop early if we find a nonmatch? */
2835 bool stop_on_nonmatch;
2836 };
2837
2838 /* Does this rmap represent space that can have multiple owners? */
2839 static inline bool
xfs_rmap_shareable(struct xfs_mount * mp,const struct xfs_rmap_irec * rmap)2840 xfs_rmap_shareable(
2841 struct xfs_mount *mp,
2842 const struct xfs_rmap_irec *rmap)
2843 {
2844 if (!xfs_has_reflink(mp))
2845 return false;
2846 if (XFS_RMAP_NON_INODE_OWNER(rmap->rm_owner))
2847 return false;
2848 if (rmap->rm_flags & (XFS_RMAP_ATTR_FORK |
2849 XFS_RMAP_BMBT_BLOCK))
2850 return false;
2851 return true;
2852 }
2853
2854 static inline void
xfs_rmap_ownercount_init(struct xfs_rmap_ownercount * roc,xfs_agblock_t bno,xfs_extlen_t len,const struct xfs_owner_info * oinfo,struct xfs_rmap_matches * results)2855 xfs_rmap_ownercount_init(
2856 struct xfs_rmap_ownercount *roc,
2857 xfs_agblock_t bno,
2858 xfs_extlen_t len,
2859 const struct xfs_owner_info *oinfo,
2860 struct xfs_rmap_matches *results)
2861 {
2862 memset(roc, 0, sizeof(*roc));
2863 roc->results = results;
2864
2865 roc->low.rm_startblock = bno;
2866 memset(&roc->high, 0xFF, sizeof(roc->high));
2867 roc->high.rm_startblock = bno + len - 1;
2868
2869 memset(results, 0, sizeof(*results));
2870 roc->good.rm_startblock = bno;
2871 roc->good.rm_blockcount = len;
2872 roc->good.rm_owner = oinfo->oi_owner;
2873 roc->good.rm_offset = oinfo->oi_offset;
2874 if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
2875 roc->good.rm_flags |= XFS_RMAP_ATTR_FORK;
2876 if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
2877 roc->good.rm_flags |= XFS_RMAP_BMBT_BLOCK;
2878 }
2879
2880 /* Figure out if this is a match for the owner. */
2881 STATIC int
xfs_rmap_count_owners_helper(struct xfs_btree_cur * cur,const struct xfs_rmap_irec * rec,void * priv)2882 xfs_rmap_count_owners_helper(
2883 struct xfs_btree_cur *cur,
2884 const struct xfs_rmap_irec *rec,
2885 void *priv)
2886 {
2887 struct xfs_rmap_ownercount *roc = priv;
2888 struct xfs_rmap_irec check = *rec;
2889 unsigned int keyflags;
2890 bool filedata;
2891 int64_t delta;
2892
2893 filedata = !XFS_RMAP_NON_INODE_OWNER(check.rm_owner) &&
2894 !(check.rm_flags & XFS_RMAP_BMBT_BLOCK);
2895
2896 /* Trim the part of check that comes before the comparison range. */
2897 delta = (int64_t)roc->good.rm_startblock - check.rm_startblock;
2898 if (delta > 0) {
2899 check.rm_startblock += delta;
2900 check.rm_blockcount -= delta;
2901 if (filedata)
2902 check.rm_offset += delta;
2903 }
2904
2905 /* Trim the part of check that comes after the comparison range. */
2906 delta = (check.rm_startblock + check.rm_blockcount) -
2907 (roc->good.rm_startblock + roc->good.rm_blockcount);
2908 if (delta > 0)
2909 check.rm_blockcount -= delta;
2910
2911 /* Don't care about unwritten status for establishing ownership. */
2912 keyflags = check.rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK);
2913
2914 if (check.rm_startblock == roc->good.rm_startblock &&
2915 check.rm_blockcount == roc->good.rm_blockcount &&
2916 check.rm_owner == roc->good.rm_owner &&
2917 check.rm_offset == roc->good.rm_offset &&
2918 keyflags == roc->good.rm_flags) {
2919 roc->results->matches++;
2920 } else {
2921 roc->results->non_owner_matches++;
2922 if (xfs_rmap_shareable(cur->bc_mp, &roc->good) ^
2923 xfs_rmap_shareable(cur->bc_mp, &check))
2924 roc->results->bad_non_owner_matches++;
2925 }
2926
2927 if (roc->results->non_owner_matches && roc->stop_on_nonmatch)
2928 return -ECANCELED;
2929
2930 return 0;
2931 }
2932
2933 /* Count the number of owners and non-owners of this range of blocks. */
2934 int
xfs_rmap_count_owners(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,const struct xfs_owner_info * oinfo,struct xfs_rmap_matches * results)2935 xfs_rmap_count_owners(
2936 struct xfs_btree_cur *cur,
2937 xfs_agblock_t bno,
2938 xfs_extlen_t len,
2939 const struct xfs_owner_info *oinfo,
2940 struct xfs_rmap_matches *results)
2941 {
2942 struct xfs_rmap_ownercount roc;
2943 int error;
2944
2945 xfs_rmap_ownercount_init(&roc, bno, len, oinfo, results);
2946 error = xfs_rmap_query_range(cur, &roc.low, &roc.high,
2947 xfs_rmap_count_owners_helper, &roc);
2948 if (error)
2949 return error;
2950
2951 /*
2952 * There can't be any non-owner rmaps that conflict with the given
2953 * owner if we didn't find any rmaps matching the owner.
2954 */
2955 if (!results->matches)
2956 results->bad_non_owner_matches = 0;
2957
2958 return 0;
2959 }
2960
2961 /*
2962 * Given an extent and some owner info, can we find records overlapping
2963 * the extent whose owner info does not match the given owner?
2964 */
2965 int
xfs_rmap_has_other_keys(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,const struct xfs_owner_info * oinfo,bool * has_other)2966 xfs_rmap_has_other_keys(
2967 struct xfs_btree_cur *cur,
2968 xfs_agblock_t bno,
2969 xfs_extlen_t len,
2970 const struct xfs_owner_info *oinfo,
2971 bool *has_other)
2972 {
2973 struct xfs_rmap_matches res;
2974 struct xfs_rmap_ownercount roc;
2975 int error;
2976
2977 xfs_rmap_ownercount_init(&roc, bno, len, oinfo, &res);
2978 roc.stop_on_nonmatch = true;
2979
2980 error = xfs_rmap_query_range(cur, &roc.low, &roc.high,
2981 xfs_rmap_count_owners_helper, &roc);
2982 if (error == -ECANCELED) {
2983 *has_other = true;
2984 return 0;
2985 }
2986 if (error)
2987 return error;
2988
2989 *has_other = false;
2990 return 0;
2991 }
2992
2993 const struct xfs_owner_info XFS_RMAP_OINFO_SKIP_UPDATE = {
2994 .oi_owner = XFS_RMAP_OWN_NULL,
2995 };
2996 const struct xfs_owner_info XFS_RMAP_OINFO_ANY_OWNER = {
2997 .oi_owner = XFS_RMAP_OWN_UNKNOWN,
2998 };
2999 const struct xfs_owner_info XFS_RMAP_OINFO_FS = {
3000 .oi_owner = XFS_RMAP_OWN_FS,
3001 };
3002 const struct xfs_owner_info XFS_RMAP_OINFO_LOG = {
3003 .oi_owner = XFS_RMAP_OWN_LOG,
3004 };
3005 const struct xfs_owner_info XFS_RMAP_OINFO_AG = {
3006 .oi_owner = XFS_RMAP_OWN_AG,
3007 };
3008 const struct xfs_owner_info XFS_RMAP_OINFO_INOBT = {
3009 .oi_owner = XFS_RMAP_OWN_INOBT,
3010 };
3011 const struct xfs_owner_info XFS_RMAP_OINFO_INODES = {
3012 .oi_owner = XFS_RMAP_OWN_INODES,
3013 };
3014 const struct xfs_owner_info XFS_RMAP_OINFO_REFC = {
3015 .oi_owner = XFS_RMAP_OWN_REFC,
3016 };
3017 const struct xfs_owner_info XFS_RMAP_OINFO_COW = {
3018 .oi_owner = XFS_RMAP_OWN_COW,
3019 };
3020
3021 int __init
xfs_rmap_intent_init_cache(void)3022 xfs_rmap_intent_init_cache(void)
3023 {
3024 xfs_rmap_intent_cache = kmem_cache_create("xfs_rmap_intent",
3025 sizeof(struct xfs_rmap_intent),
3026 0, 0, NULL);
3027
3028 return xfs_rmap_intent_cache != NULL ? 0 : -ENOMEM;
3029 }
3030
3031 void
xfs_rmap_intent_destroy_cache(void)3032 xfs_rmap_intent_destroy_cache(void)
3033 {
3034 kmem_cache_destroy(xfs_rmap_intent_cache);
3035 xfs_rmap_intent_cache = NULL;
3036 }
3037