1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010, 2023 Red Hat, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_shared.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_trans.h" 12 #include "xfs_mount.h" 13 #include "xfs_btree.h" 14 #include "xfs_alloc_btree.h" 15 #include "xfs_alloc.h" 16 #include "xfs_discard.h" 17 #include "xfs_error.h" 18 #include "xfs_extent_busy.h" 19 #include "xfs_trace.h" 20 #include "xfs_log.h" 21 #include "xfs_ag.h" 22 #include "xfs_health.h" 23 #include "xfs_rtbitmap.h" 24 25 /* 26 * Notes on an efficient, low latency fstrim algorithm 27 * 28 * We need to walk the filesystem free space and issue discards on the free 29 * space that meet the search criteria (size and location). We cannot issue 30 * discards on extents that might be in use, or are so recently in use they are 31 * still marked as busy. To serialise against extent state changes whilst we are 32 * gathering extents to trim, we must hold the AGF lock to lock out other 33 * allocations and extent free operations that might change extent state. 34 * 35 * However, we cannot just hold the AGF for the entire AG free space walk whilst 36 * we issue discards on each free space that is found. Storage devices can have 37 * extremely slow discard implementations (e.g. ceph RBD) and so walking a 38 * couple of million free extents and issuing synchronous discards on each 39 * extent can take a *long* time. Whilst we are doing this walk, nothing else 40 * can access the AGF, and we can stall transactions and hence the log whilst 41 * modifications wait for the AGF lock to be released. This can lead hung tasks 42 * kicking the hung task timer and rebooting the system. This is bad. 43 * 44 * Hence we need to take a leaf from the bulkstat playbook. It takes the AGI 45 * lock, gathers a range of inode cluster buffers that are allocated, drops the 46 * AGI lock and then reads all the inode cluster buffers and processes them. It 47 * loops doing this, using a cursor to keep track of where it is up to in the AG 48 * for each iteration to restart the INOBT lookup from. 49 * 50 * We can't do this exactly with free space - once we drop the AGF lock, the 51 * state of the free extent is out of our control and we cannot run a discard 52 * safely on it in this situation. Unless, of course, we've marked the free 53 * extent as busy and undergoing a discard operation whilst we held the AGF 54 * locked. 55 * 56 * This is exactly how online discard works - free extents are marked busy when 57 * they are freed, and once the extent free has been committed to the journal, 58 * the busy extent record is marked as "undergoing discard" and the discard is 59 * then issued on the free extent. Once the discard completes, the busy extent 60 * record is removed and the extent is able to be allocated again. 61 * 62 * In the context of fstrim, if we find a free extent we need to discard, we 63 * don't have to discard it immediately. All we need to do it record that free 64 * extent as being busy and under discard, and all the allocation routines will 65 * now avoid trying to allocate it. Hence if we mark the extent as busy under 66 * the AGF lock, we can safely discard it without holding the AGF lock because 67 * nothing will attempt to allocate that free space until the discard completes. 68 * 69 * This also allows us to issue discards asynchronously like we do with online 70 * discard, and so for fast devices fstrim will run much faster as we can have 71 * multiple discard operations in flight at once, as well as pipeline the free 72 * extent search so that it overlaps in flight discard IO. 73 */ 74 75 struct workqueue_struct *xfs_discard_wq; 76 77 static void 78 xfs_discard_endio_work( 79 struct work_struct *work) 80 { 81 struct xfs_busy_extents *extents = 82 container_of(work, struct xfs_busy_extents, endio_work); 83 84 xfs_extent_busy_clear(extents->mount, &extents->extent_list, false); 85 kfree(extents->owner); 86 } 87 88 /* 89 * Queue up the actual completion to a thread to avoid IRQ-safe locking for 90 * pagb_lock. 91 */ 92 static void 93 xfs_discard_endio( 94 struct bio *bio) 95 { 96 struct xfs_busy_extents *extents = bio->bi_private; 97 98 INIT_WORK(&extents->endio_work, xfs_discard_endio_work); 99 queue_work(xfs_discard_wq, &extents->endio_work); 100 bio_put(bio); 101 } 102 103 /* 104 * Walk the discard list and issue discards on all the busy extents in the 105 * list. We plug and chain the bios so that we only need a single completion 106 * call to clear all the busy extents once the discards are complete. 107 */ 108 int 109 xfs_discard_extents( 110 struct xfs_mount *mp, 111 struct xfs_busy_extents *extents) 112 { 113 struct xfs_extent_busy *busyp; 114 struct bio *bio = NULL; 115 struct blk_plug plug; 116 int error = 0; 117 118 blk_start_plug(&plug); 119 list_for_each_entry(busyp, &extents->extent_list, list) { 120 trace_xfs_discard_extent(mp, busyp->agno, busyp->bno, 121 busyp->length); 122 123 error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, 124 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), 125 XFS_FSB_TO_BB(mp, busyp->length), 126 GFP_KERNEL, &bio); 127 if (error && error != -EOPNOTSUPP) { 128 xfs_info(mp, 129 "discard failed for extent [0x%llx,%u], error %d", 130 (unsigned long long)busyp->bno, 131 busyp->length, 132 error); 133 break; 134 } 135 } 136 137 if (bio) { 138 bio->bi_private = extents; 139 bio->bi_end_io = xfs_discard_endio; 140 submit_bio(bio); 141 } else { 142 xfs_discard_endio_work(&extents->endio_work); 143 } 144 blk_finish_plug(&plug); 145 146 return error; 147 } 148 149 struct xfs_trim_cur { 150 xfs_agblock_t start; 151 xfs_extlen_t count; 152 xfs_agblock_t end; 153 xfs_extlen_t minlen; 154 bool by_bno; 155 }; 156 157 static int 158 xfs_trim_gather_extents( 159 struct xfs_perag *pag, 160 struct xfs_trim_cur *tcur, 161 struct xfs_busy_extents *extents, 162 uint64_t *blocks_trimmed) 163 { 164 struct xfs_mount *mp = pag->pag_mount; 165 struct xfs_trans *tp; 166 struct xfs_btree_cur *cur; 167 struct xfs_buf *agbp; 168 int error; 169 int i; 170 int batch = 100; 171 172 /* 173 * Force out the log. This means any transactions that might have freed 174 * space before we take the AGF buffer lock are now on disk, and the 175 * volatile disk cache is flushed. 176 */ 177 xfs_log_force(mp, XFS_LOG_SYNC); 178 179 error = xfs_trans_alloc_empty(mp, &tp); 180 if (error) 181 return error; 182 183 error = xfs_alloc_read_agf(pag, tp, 0, &agbp); 184 if (error) 185 goto out_trans_cancel; 186 187 if (tcur->by_bno) { 188 /* sub-AG discard request always starts at tcur->start */ 189 cur = xfs_bnobt_init_cursor(mp, tp, agbp, pag); 190 error = xfs_alloc_lookup_le(cur, tcur->start, 0, &i); 191 if (!error && !i) 192 error = xfs_alloc_lookup_ge(cur, tcur->start, 0, &i); 193 } else if (tcur->start == 0) { 194 /* first time through a by-len starts with max length */ 195 cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag); 196 error = xfs_alloc_lookup_ge(cur, 0, tcur->count, &i); 197 } else { 198 /* nth time through a by-len starts where we left off */ 199 cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag); 200 error = xfs_alloc_lookup_le(cur, tcur->start, tcur->count, &i); 201 } 202 if (error) 203 goto out_del_cursor; 204 if (i == 0) { 205 /* nothing of that length left in the AG, we are done */ 206 tcur->count = 0; 207 goto out_del_cursor; 208 } 209 210 /* 211 * Loop until we are done with all extents that are large 212 * enough to be worth discarding or we hit batch limits. 213 */ 214 while (i) { 215 xfs_agblock_t fbno; 216 xfs_extlen_t flen; 217 218 error = xfs_alloc_get_rec(cur, &fbno, &flen, &i); 219 if (error) 220 break; 221 if (XFS_IS_CORRUPT(mp, i != 1)) { 222 xfs_btree_mark_sick(cur); 223 error = -EFSCORRUPTED; 224 break; 225 } 226 227 if (--batch <= 0) { 228 /* 229 * Update the cursor to point at this extent so we 230 * restart the next batch from this extent. 231 */ 232 tcur->start = fbno; 233 tcur->count = flen; 234 break; 235 } 236 237 /* 238 * If the extent is entirely outside of the range we are 239 * supposed to skip it. Do not bother to trim down partially 240 * overlapping ranges for now. 241 */ 242 if (fbno + flen < tcur->start) { 243 trace_xfs_discard_exclude(mp, pag->pag_agno, fbno, flen); 244 goto next_extent; 245 } 246 if (fbno > tcur->end) { 247 trace_xfs_discard_exclude(mp, pag->pag_agno, fbno, flen); 248 if (tcur->by_bno) { 249 tcur->count = 0; 250 break; 251 } 252 goto next_extent; 253 } 254 255 /* Trim the extent returned to the range we want. */ 256 if (fbno < tcur->start) { 257 flen -= tcur->start - fbno; 258 fbno = tcur->start; 259 } 260 if (fbno + flen > tcur->end + 1) 261 flen = tcur->end - fbno + 1; 262 263 /* Too small? Give up. */ 264 if (flen < tcur->minlen) { 265 trace_xfs_discard_toosmall(mp, pag->pag_agno, fbno, flen); 266 if (tcur->by_bno) 267 goto next_extent; 268 tcur->count = 0; 269 break; 270 } 271 272 /* 273 * If any blocks in the range are still busy, skip the 274 * discard and try again the next time. 275 */ 276 if (xfs_extent_busy_search(mp, pag, fbno, flen)) { 277 trace_xfs_discard_busy(mp, pag->pag_agno, fbno, flen); 278 goto next_extent; 279 } 280 281 xfs_extent_busy_insert_discard(pag, fbno, flen, 282 &extents->extent_list); 283 *blocks_trimmed += flen; 284 next_extent: 285 if (tcur->by_bno) 286 error = xfs_btree_increment(cur, 0, &i); 287 else 288 error = xfs_btree_decrement(cur, 0, &i); 289 if (error) 290 break; 291 292 /* 293 * If there's no more records in the tree, we are done. Set the 294 * cursor block count to 0 to indicate to the caller that there 295 * is no more extents to search. 296 */ 297 if (i == 0) 298 tcur->count = 0; 299 } 300 301 /* 302 * If there was an error, release all the gathered busy extents because 303 * we aren't going to issue a discard on them any more. 304 */ 305 if (error) 306 xfs_extent_busy_clear(mp, &extents->extent_list, false); 307 out_del_cursor: 308 xfs_btree_del_cursor(cur, error); 309 out_trans_cancel: 310 xfs_trans_cancel(tp); 311 return error; 312 } 313 314 static bool 315 xfs_trim_should_stop(void) 316 { 317 return fatal_signal_pending(current) || freezing(current); 318 } 319 320 /* 321 * Iterate the free list gathering extents and discarding them. We need a cursor 322 * for the repeated iteration of gather/discard loop, so use the longest extent 323 * we found in the last batch as the key to start the next. 324 */ 325 static int 326 xfs_trim_perag_extents( 327 struct xfs_perag *pag, 328 xfs_agblock_t start, 329 xfs_agblock_t end, 330 xfs_extlen_t minlen, 331 uint64_t *blocks_trimmed) 332 { 333 struct xfs_trim_cur tcur = { 334 .start = start, 335 .count = pag->pagf_longest, 336 .end = end, 337 .minlen = minlen, 338 }; 339 int error = 0; 340 341 if (start != 0 || end != pag->block_count) 342 tcur.by_bno = true; 343 344 do { 345 struct xfs_busy_extents *extents; 346 347 extents = kzalloc(sizeof(*extents), GFP_KERNEL); 348 if (!extents) { 349 error = -ENOMEM; 350 break; 351 } 352 353 extents->mount = pag->pag_mount; 354 extents->owner = extents; 355 INIT_LIST_HEAD(&extents->extent_list); 356 357 error = xfs_trim_gather_extents(pag, &tcur, extents, 358 blocks_trimmed); 359 if (error) { 360 kfree(extents); 361 break; 362 } 363 364 /* 365 * We hand the extent list to the discard function here so the 366 * discarded extents can be removed from the busy extent list. 367 * This allows the discards to run asynchronously with gathering 368 * the next round of extents to discard. 369 * 370 * However, we must ensure that we do not reference the extent 371 * list after this function call, as it may have been freed by 372 * the time control returns to us. 373 */ 374 error = xfs_discard_extents(pag->pag_mount, extents); 375 if (error) 376 break; 377 378 if (xfs_trim_should_stop()) 379 break; 380 381 } while (tcur.count != 0); 382 383 return error; 384 385 } 386 387 static int 388 xfs_trim_datadev_extents( 389 struct xfs_mount *mp, 390 xfs_daddr_t start, 391 xfs_daddr_t end, 392 xfs_extlen_t minlen, 393 uint64_t *blocks_trimmed) 394 { 395 xfs_agnumber_t start_agno, end_agno; 396 xfs_agblock_t start_agbno, end_agbno; 397 xfs_daddr_t ddev_end; 398 struct xfs_perag *pag; 399 int last_error = 0, error; 400 401 ddev_end = min_t(xfs_daddr_t, end, 402 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1); 403 404 start_agno = xfs_daddr_to_agno(mp, start); 405 start_agbno = xfs_daddr_to_agbno(mp, start); 406 end_agno = xfs_daddr_to_agno(mp, ddev_end); 407 end_agbno = xfs_daddr_to_agbno(mp, ddev_end); 408 409 for_each_perag_range(mp, start_agno, end_agno, pag) { 410 xfs_agblock_t agend = pag->block_count; 411 412 if (start_agno == end_agno) 413 agend = end_agbno; 414 error = xfs_trim_perag_extents(pag, start_agbno, agend, minlen, 415 blocks_trimmed); 416 if (error) 417 last_error = error; 418 419 if (xfs_trim_should_stop()) { 420 xfs_perag_rele(pag); 421 break; 422 } 423 start_agbno = 0; 424 } 425 426 return last_error; 427 } 428 429 #ifdef CONFIG_XFS_RT 430 struct xfs_trim_rtdev { 431 /* list of rt extents to free */ 432 struct list_head extent_list; 433 434 /* pointer to count of blocks trimmed */ 435 uint64_t *blocks_trimmed; 436 437 /* minimum length that caller allows us to trim */ 438 xfs_rtblock_t minlen_fsb; 439 440 /* restart point for the rtbitmap walk */ 441 xfs_rtxnum_t restart_rtx; 442 443 /* stopping point for the current rtbitmap walk */ 444 xfs_rtxnum_t stop_rtx; 445 }; 446 447 struct xfs_rtx_busy { 448 struct list_head list; 449 xfs_rtblock_t bno; 450 xfs_rtblock_t length; 451 }; 452 453 static void 454 xfs_discard_free_rtdev_extents( 455 struct xfs_trim_rtdev *tr) 456 { 457 struct xfs_rtx_busy *busyp, *n; 458 459 list_for_each_entry_safe(busyp, n, &tr->extent_list, list) { 460 list_del_init(&busyp->list); 461 kfree(busyp); 462 } 463 } 464 465 /* 466 * Walk the discard list and issue discards on all the busy extents in the 467 * list. We plug and chain the bios so that we only need a single completion 468 * call to clear all the busy extents once the discards are complete. 469 */ 470 static int 471 xfs_discard_rtdev_extents( 472 struct xfs_mount *mp, 473 struct xfs_trim_rtdev *tr) 474 { 475 struct block_device *bdev = mp->m_rtdev_targp->bt_bdev; 476 struct xfs_rtx_busy *busyp; 477 struct bio *bio = NULL; 478 struct blk_plug plug; 479 xfs_rtblock_t start = NULLRTBLOCK, length = 0; 480 int error = 0; 481 482 blk_start_plug(&plug); 483 list_for_each_entry(busyp, &tr->extent_list, list) { 484 if (start == NULLRTBLOCK) 485 start = busyp->bno; 486 length += busyp->length; 487 488 trace_xfs_discard_rtextent(mp, busyp->bno, busyp->length); 489 490 error = __blkdev_issue_discard(bdev, 491 XFS_FSB_TO_BB(mp, busyp->bno), 492 XFS_FSB_TO_BB(mp, busyp->length), 493 GFP_NOFS, &bio); 494 if (error) 495 break; 496 } 497 xfs_discard_free_rtdev_extents(tr); 498 499 if (bio) { 500 error = submit_bio_wait(bio); 501 if (error == -EOPNOTSUPP) 502 error = 0; 503 if (error) 504 xfs_info(mp, 505 "discard failed for rtextent [0x%llx,%llu], error %d", 506 (unsigned long long)start, 507 (unsigned long long)length, 508 error); 509 bio_put(bio); 510 } 511 blk_finish_plug(&plug); 512 513 return error; 514 } 515 516 static int 517 xfs_trim_gather_rtextent( 518 struct xfs_mount *mp, 519 struct xfs_trans *tp, 520 const struct xfs_rtalloc_rec *rec, 521 void *priv) 522 { 523 struct xfs_trim_rtdev *tr = priv; 524 struct xfs_rtx_busy *busyp; 525 xfs_rtblock_t rbno, rlen; 526 527 if (rec->ar_startext > tr->stop_rtx) { 528 /* 529 * If we've scanned a large number of rtbitmap blocks, update 530 * the cursor to point at this extent so we restart the next 531 * batch from this extent. 532 */ 533 tr->restart_rtx = rec->ar_startext; 534 return -ECANCELED; 535 } 536 537 rbno = xfs_rtx_to_rtb(mp, rec->ar_startext); 538 rlen = xfs_rtx_to_rtb(mp, rec->ar_extcount); 539 540 /* Ignore too small. */ 541 if (rlen < tr->minlen_fsb) { 542 trace_xfs_discard_rttoosmall(mp, rbno, rlen); 543 return 0; 544 } 545 546 busyp = kzalloc(sizeof(struct xfs_rtx_busy), GFP_KERNEL); 547 if (!busyp) 548 return -ENOMEM; 549 550 busyp->bno = rbno; 551 busyp->length = rlen; 552 INIT_LIST_HEAD(&busyp->list); 553 list_add_tail(&busyp->list, &tr->extent_list); 554 *tr->blocks_trimmed += rlen; 555 556 tr->restart_rtx = rec->ar_startext + rec->ar_extcount; 557 return 0; 558 } 559 560 static int 561 xfs_trim_rtdev_extents( 562 struct xfs_mount *mp, 563 xfs_daddr_t start, 564 xfs_daddr_t end, 565 xfs_daddr_t minlen, 566 uint64_t *blocks_trimmed) 567 { 568 struct xfs_rtalloc_rec low = { }; 569 struct xfs_rtalloc_rec high = { }; 570 struct xfs_trim_rtdev tr = { 571 .blocks_trimmed = blocks_trimmed, 572 .minlen_fsb = XFS_BB_TO_FSB(mp, minlen), 573 }; 574 struct xfs_trans *tp; 575 xfs_daddr_t rtdev_daddr; 576 int error; 577 578 INIT_LIST_HEAD(&tr.extent_list); 579 580 /* Shift the start and end downwards to match the rt device. */ 581 rtdev_daddr = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); 582 if (start > rtdev_daddr) 583 start -= rtdev_daddr; 584 else 585 start = 0; 586 587 if (end <= rtdev_daddr) 588 return 0; 589 end -= rtdev_daddr; 590 591 error = xfs_trans_alloc_empty(mp, &tp); 592 if (error) 593 return error; 594 595 end = min_t(xfs_daddr_t, end, 596 XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks) - 1); 597 598 /* Convert the rt blocks to rt extents */ 599 low.ar_startext = xfs_rtb_to_rtxup(mp, XFS_BB_TO_FSB(mp, start)); 600 high.ar_startext = xfs_rtb_to_rtx(mp, XFS_BB_TO_FSBT(mp, end)); 601 602 /* 603 * Walk the free ranges between low and high. The query_range function 604 * trims the extents returned. 605 */ 606 do { 607 tr.stop_rtx = low.ar_startext + (mp->m_sb.sb_blocksize * NBBY); 608 xfs_rtbitmap_lock_shared(mp, XFS_RBMLOCK_BITMAP); 609 error = xfs_rtalloc_query_range(mp, tp, &low, &high, 610 xfs_trim_gather_rtextent, &tr); 611 612 if (error == -ECANCELED) 613 error = 0; 614 if (error) { 615 xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP); 616 xfs_discard_free_rtdev_extents(&tr); 617 break; 618 } 619 620 if (list_empty(&tr.extent_list)) { 621 xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP); 622 break; 623 } 624 625 error = xfs_discard_rtdev_extents(mp, &tr); 626 xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP); 627 if (error) 628 break; 629 630 low.ar_startext = tr.restart_rtx; 631 } while (!xfs_trim_should_stop() && low.ar_startext <= high.ar_startext); 632 633 xfs_trans_cancel(tp); 634 return error; 635 } 636 #else 637 # define xfs_trim_rtdev_extents(m,s,e,n,b) (-EOPNOTSUPP) 638 #endif /* CONFIG_XFS_RT */ 639 640 /* 641 * trim a range of the filesystem. 642 * 643 * Note: the parameters passed from userspace are byte ranges into the 644 * filesystem which does not match to the format we use for filesystem block 645 * addressing. FSB addressing is sparse (AGNO|AGBNO), while the incoming format 646 * is a linear address range. Hence we need to use DADDR based conversions and 647 * comparisons for determining the correct offset and regions to trim. 648 * 649 * The realtime device is mapped into the FITRIM "address space" immediately 650 * after the data device. 651 */ 652 int 653 xfs_ioc_trim( 654 struct xfs_mount *mp, 655 struct fstrim_range __user *urange) 656 { 657 unsigned int granularity = 658 bdev_discard_granularity(mp->m_ddev_targp->bt_bdev); 659 struct block_device *rt_bdev = NULL; 660 struct fstrim_range range; 661 xfs_daddr_t start, end; 662 xfs_extlen_t minlen; 663 xfs_rfsblock_t max_blocks; 664 uint64_t blocks_trimmed = 0; 665 int error, last_error = 0; 666 667 if (!capable(CAP_SYS_ADMIN)) 668 return -EPERM; 669 if (mp->m_rtdev_targp && 670 bdev_max_discard_sectors(mp->m_rtdev_targp->bt_bdev)) 671 rt_bdev = mp->m_rtdev_targp->bt_bdev; 672 if (!bdev_max_discard_sectors(mp->m_ddev_targp->bt_bdev) && !rt_bdev) 673 return -EOPNOTSUPP; 674 675 if (rt_bdev) 676 granularity = max(granularity, 677 bdev_discard_granularity(rt_bdev)); 678 679 /* 680 * We haven't recovered the log, so we cannot use our bnobt-guided 681 * storage zapping commands. 682 */ 683 if (xfs_has_norecovery(mp)) 684 return -EROFS; 685 686 if (copy_from_user(&range, urange, sizeof(range))) 687 return -EFAULT; 688 689 range.minlen = max_t(u64, granularity, range.minlen); 690 minlen = XFS_B_TO_FSB(mp, range.minlen); 691 692 /* 693 * Truncating down the len isn't actually quite correct, but using 694 * BBTOB would mean we trivially get overflows for values 695 * of ULLONG_MAX or slightly lower. And ULLONG_MAX is the default 696 * used by the fstrim application. In the end it really doesn't 697 * matter as trimming blocks is an advisory interface. 698 */ 699 max_blocks = mp->m_sb.sb_dblocks + mp->m_sb.sb_rblocks; 700 if (range.start >= XFS_FSB_TO_B(mp, max_blocks) || 701 range.minlen > XFS_FSB_TO_B(mp, mp->m_ag_max_usable) || 702 range.len < mp->m_sb.sb_blocksize) 703 return -EINVAL; 704 705 start = BTOBB(range.start); 706 end = start + BTOBBT(range.len) - 1; 707 708 if (bdev_max_discard_sectors(mp->m_ddev_targp->bt_bdev)) { 709 error = xfs_trim_datadev_extents(mp, start, end, minlen, 710 &blocks_trimmed); 711 if (error) 712 last_error = error; 713 } 714 715 if (rt_bdev && !xfs_trim_should_stop()) { 716 error = xfs_trim_rtdev_extents(mp, start, end, minlen, 717 &blocks_trimmed); 718 if (error) 719 last_error = error; 720 } 721 722 if (last_error) 723 return last_error; 724 725 range.len = XFS_FSB_TO_B(mp, blocks_trimmed); 726 if (copy_to_user(urange, &range, sizeof(range))) 727 return -EFAULT; 728 return 0; 729 } 730