1 /* 2 * sufile.c - NILFS segment usage file. 3 * 4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * Written by Koji Sato. 17 * Revised by Ryusuke Konishi. 18 */ 19 20 #include <linux/kernel.h> 21 #include <linux/fs.h> 22 #include <linux/string.h> 23 #include <linux/buffer_head.h> 24 #include <linux/errno.h> 25 #include <linux/nilfs2_fs.h> 26 #include "mdt.h" 27 #include "sufile.h" 28 29 #include <trace/events/nilfs2.h> 30 31 /** 32 * struct nilfs_sufile_info - on-memory private data of sufile 33 * @mi: on-memory private data of metadata file 34 * @ncleansegs: number of clean segments 35 * @allocmin: lower limit of allocatable segment range 36 * @allocmax: upper limit of allocatable segment range 37 */ 38 struct nilfs_sufile_info { 39 struct nilfs_mdt_info mi; 40 unsigned long ncleansegs;/* number of clean segments */ 41 __u64 allocmin; /* lower limit of allocatable segment range */ 42 __u64 allocmax; /* upper limit of allocatable segment range */ 43 }; 44 45 static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile) 46 { 47 return (struct nilfs_sufile_info *)NILFS_MDT(sufile); 48 } 49 50 static inline unsigned long 51 nilfs_sufile_segment_usages_per_block(const struct inode *sufile) 52 { 53 return NILFS_MDT(sufile)->mi_entries_per_block; 54 } 55 56 static unsigned long 57 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum) 58 { 59 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; 60 61 do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); 62 return (unsigned long)t; 63 } 64 65 static unsigned long 66 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum) 67 { 68 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; 69 70 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); 71 } 72 73 static unsigned long 74 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr, 75 __u64 max) 76 { 77 return min_t(unsigned long, 78 nilfs_sufile_segment_usages_per_block(sufile) - 79 nilfs_sufile_get_offset(sufile, curr), 80 max - curr + 1); 81 } 82 83 static struct nilfs_segment_usage * 84 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum, 85 struct buffer_head *bh, void *kaddr) 86 { 87 return kaddr + bh_offset(bh) + 88 nilfs_sufile_get_offset(sufile, segnum) * 89 NILFS_MDT(sufile)->mi_entry_size; 90 } 91 92 static inline int nilfs_sufile_get_header_block(struct inode *sufile, 93 struct buffer_head **bhp) 94 { 95 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp); 96 } 97 98 static inline int 99 nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum, 100 int create, struct buffer_head **bhp) 101 { 102 return nilfs_mdt_get_block(sufile, 103 nilfs_sufile_get_blkoff(sufile, segnum), 104 create, NULL, bhp); 105 } 106 107 static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile, 108 __u64 segnum) 109 { 110 return nilfs_mdt_delete_block(sufile, 111 nilfs_sufile_get_blkoff(sufile, segnum)); 112 } 113 114 static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, 115 u64 ncleanadd, u64 ndirtyadd) 116 { 117 struct nilfs_sufile_header *header; 118 void *kaddr; 119 120 kaddr = kmap_atomic(header_bh->b_page); 121 header = kaddr + bh_offset(header_bh); 122 le64_add_cpu(&header->sh_ncleansegs, ncleanadd); 123 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); 124 kunmap_atomic(kaddr); 125 126 mark_buffer_dirty(header_bh); 127 } 128 129 /** 130 * nilfs_sufile_get_ncleansegs - return the number of clean segments 131 * @sufile: inode of segment usage file 132 */ 133 unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile) 134 { 135 return NILFS_SUI(sufile)->ncleansegs; 136 } 137 138 /** 139 * nilfs_sufile_updatev - modify multiple segment usages at a time 140 * @sufile: inode of segment usage file 141 * @segnumv: array of segment numbers 142 * @nsegs: size of @segnumv array 143 * @create: creation flag 144 * @ndone: place to store number of modified segments on @segnumv 145 * @dofunc: primitive operation for the update 146 * 147 * Description: nilfs_sufile_updatev() repeatedly calls @dofunc 148 * against the given array of segments. The @dofunc is called with 149 * buffers of a header block and the sufile block in which the target 150 * segment usage entry is contained. If @ndone is given, the number 151 * of successfully modified segments from the head is stored in the 152 * place @ndone points to. 153 * 154 * Return Value: On success, zero is returned. On error, one of the 155 * following negative error codes is returned. 156 * 157 * %-EIO - I/O error. 158 * 159 * %-ENOMEM - Insufficient amount of memory available. 160 * 161 * %-ENOENT - Given segment usage is in hole block (may be returned if 162 * @create is zero) 163 * 164 * %-EINVAL - Invalid segment usage number 165 */ 166 int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs, 167 int create, size_t *ndone, 168 void (*dofunc)(struct inode *, __u64, 169 struct buffer_head *, 170 struct buffer_head *)) 171 { 172 struct buffer_head *header_bh, *bh; 173 unsigned long blkoff, prev_blkoff; 174 __u64 *seg; 175 size_t nerr = 0, n = 0; 176 int ret = 0; 177 178 if (unlikely(nsegs == 0)) 179 goto out; 180 181 down_write(&NILFS_MDT(sufile)->mi_sem); 182 for (seg = segnumv; seg < segnumv + nsegs; seg++) { 183 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) { 184 printk(KERN_WARNING 185 "%s: invalid segment number: %llu\n", __func__, 186 (unsigned long long)*seg); 187 nerr++; 188 } 189 } 190 if (nerr > 0) { 191 ret = -EINVAL; 192 goto out_sem; 193 } 194 195 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 196 if (ret < 0) 197 goto out_sem; 198 199 seg = segnumv; 200 blkoff = nilfs_sufile_get_blkoff(sufile, *seg); 201 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); 202 if (ret < 0) 203 goto out_header; 204 205 for (;;) { 206 dofunc(sufile, *seg, header_bh, bh); 207 208 if (++seg >= segnumv + nsegs) 209 break; 210 prev_blkoff = blkoff; 211 blkoff = nilfs_sufile_get_blkoff(sufile, *seg); 212 if (blkoff == prev_blkoff) 213 continue; 214 215 /* get different block */ 216 brelse(bh); 217 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); 218 if (unlikely(ret < 0)) 219 goto out_header; 220 } 221 brelse(bh); 222 223 out_header: 224 n = seg - segnumv; 225 brelse(header_bh); 226 out_sem: 227 up_write(&NILFS_MDT(sufile)->mi_sem); 228 out: 229 if (ndone) 230 *ndone = n; 231 return ret; 232 } 233 234 int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, 235 void (*dofunc)(struct inode *, __u64, 236 struct buffer_head *, 237 struct buffer_head *)) 238 { 239 struct buffer_head *header_bh, *bh; 240 int ret; 241 242 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) { 243 printk(KERN_WARNING "%s: invalid segment number: %llu\n", 244 __func__, (unsigned long long)segnum); 245 return -EINVAL; 246 } 247 down_write(&NILFS_MDT(sufile)->mi_sem); 248 249 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 250 if (ret < 0) 251 goto out_sem; 252 253 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh); 254 if (!ret) { 255 dofunc(sufile, segnum, header_bh, bh); 256 brelse(bh); 257 } 258 brelse(header_bh); 259 260 out_sem: 261 up_write(&NILFS_MDT(sufile)->mi_sem); 262 return ret; 263 } 264 265 /** 266 * nilfs_sufile_set_alloc_range - limit range of segment to be allocated 267 * @sufile: inode of segment usage file 268 * @start: minimum segment number of allocatable region (inclusive) 269 * @end: maximum segment number of allocatable region (inclusive) 270 * 271 * Return Value: On success, 0 is returned. On error, one of the 272 * following negative error codes is returned. 273 * 274 * %-ERANGE - invalid segment region 275 */ 276 int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end) 277 { 278 struct nilfs_sufile_info *sui = NILFS_SUI(sufile); 279 __u64 nsegs; 280 int ret = -ERANGE; 281 282 down_write(&NILFS_MDT(sufile)->mi_sem); 283 nsegs = nilfs_sufile_get_nsegments(sufile); 284 285 if (start <= end && end < nsegs) { 286 sui->allocmin = start; 287 sui->allocmax = end; 288 ret = 0; 289 } 290 up_write(&NILFS_MDT(sufile)->mi_sem); 291 return ret; 292 } 293 294 /** 295 * nilfs_sufile_alloc - allocate a segment 296 * @sufile: inode of segment usage file 297 * @segnump: pointer to segment number 298 * 299 * Description: nilfs_sufile_alloc() allocates a clean segment. 300 * 301 * Return Value: On success, 0 is returned and the segment number of the 302 * allocated segment is stored in the place pointed by @segnump. On error, one 303 * of the following negative error codes is returned. 304 * 305 * %-EIO - I/O error. 306 * 307 * %-ENOMEM - Insufficient amount of memory available. 308 * 309 * %-ENOSPC - No clean segment left. 310 */ 311 int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) 312 { 313 struct buffer_head *header_bh, *su_bh; 314 struct nilfs_sufile_header *header; 315 struct nilfs_segment_usage *su; 316 struct nilfs_sufile_info *sui = NILFS_SUI(sufile); 317 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 318 __u64 segnum, maxsegnum, last_alloc; 319 void *kaddr; 320 unsigned long nsegments, nsus, cnt; 321 int ret, j; 322 323 down_write(&NILFS_MDT(sufile)->mi_sem); 324 325 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 326 if (ret < 0) 327 goto out_sem; 328 kaddr = kmap_atomic(header_bh->b_page); 329 header = kaddr + bh_offset(header_bh); 330 last_alloc = le64_to_cpu(header->sh_last_alloc); 331 kunmap_atomic(kaddr); 332 333 nsegments = nilfs_sufile_get_nsegments(sufile); 334 maxsegnum = sui->allocmax; 335 segnum = last_alloc + 1; 336 if (segnum < sui->allocmin || segnum > sui->allocmax) 337 segnum = sui->allocmin; 338 339 for (cnt = 0; cnt < nsegments; cnt += nsus) { 340 if (segnum > maxsegnum) { 341 if (cnt < sui->allocmax - sui->allocmin + 1) { 342 /* 343 * wrap around in the limited region. 344 * if allocation started from 345 * sui->allocmin, this never happens. 346 */ 347 segnum = sui->allocmin; 348 maxsegnum = last_alloc; 349 } else if (segnum > sui->allocmin && 350 sui->allocmax + 1 < nsegments) { 351 segnum = sui->allocmax + 1; 352 maxsegnum = nsegments - 1; 353 } else if (sui->allocmin > 0) { 354 segnum = 0; 355 maxsegnum = sui->allocmin - 1; 356 } else { 357 break; /* never happens */ 358 } 359 } 360 trace_nilfs2_segment_usage_check(sufile, segnum, cnt); 361 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, 362 &su_bh); 363 if (ret < 0) 364 goto out_header; 365 kaddr = kmap_atomic(su_bh->b_page); 366 su = nilfs_sufile_block_get_segment_usage( 367 sufile, segnum, su_bh, kaddr); 368 369 nsus = nilfs_sufile_segment_usages_in_block( 370 sufile, segnum, maxsegnum); 371 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) { 372 if (!nilfs_segment_usage_clean(su)) 373 continue; 374 /* found a clean segment */ 375 nilfs_segment_usage_set_dirty(su); 376 kunmap_atomic(kaddr); 377 378 kaddr = kmap_atomic(header_bh->b_page); 379 header = kaddr + bh_offset(header_bh); 380 le64_add_cpu(&header->sh_ncleansegs, -1); 381 le64_add_cpu(&header->sh_ndirtysegs, 1); 382 header->sh_last_alloc = cpu_to_le64(segnum); 383 kunmap_atomic(kaddr); 384 385 sui->ncleansegs--; 386 mark_buffer_dirty(header_bh); 387 mark_buffer_dirty(su_bh); 388 nilfs_mdt_mark_dirty(sufile); 389 brelse(su_bh); 390 *segnump = segnum; 391 392 trace_nilfs2_segment_usage_allocated(sufile, segnum); 393 394 goto out_header; 395 } 396 397 kunmap_atomic(kaddr); 398 brelse(su_bh); 399 } 400 401 /* no segments left */ 402 ret = -ENOSPC; 403 404 out_header: 405 brelse(header_bh); 406 407 out_sem: 408 up_write(&NILFS_MDT(sufile)->mi_sem); 409 return ret; 410 } 411 412 void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, 413 struct buffer_head *header_bh, 414 struct buffer_head *su_bh) 415 { 416 struct nilfs_segment_usage *su; 417 void *kaddr; 418 419 kaddr = kmap_atomic(su_bh->b_page); 420 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 421 if (unlikely(!nilfs_segment_usage_clean(su))) { 422 printk(KERN_WARNING "%s: segment %llu must be clean\n", 423 __func__, (unsigned long long)segnum); 424 kunmap_atomic(kaddr); 425 return; 426 } 427 nilfs_segment_usage_set_dirty(su); 428 kunmap_atomic(kaddr); 429 430 nilfs_sufile_mod_counter(header_bh, -1, 1); 431 NILFS_SUI(sufile)->ncleansegs--; 432 433 mark_buffer_dirty(su_bh); 434 nilfs_mdt_mark_dirty(sufile); 435 } 436 437 void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, 438 struct buffer_head *header_bh, 439 struct buffer_head *su_bh) 440 { 441 struct nilfs_segment_usage *su; 442 void *kaddr; 443 int clean, dirty; 444 445 kaddr = kmap_atomic(su_bh->b_page); 446 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 447 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) && 448 su->su_nblocks == cpu_to_le32(0)) { 449 kunmap_atomic(kaddr); 450 return; 451 } 452 clean = nilfs_segment_usage_clean(su); 453 dirty = nilfs_segment_usage_dirty(su); 454 455 /* make the segment garbage */ 456 su->su_lastmod = cpu_to_le64(0); 457 su->su_nblocks = cpu_to_le32(0); 458 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY); 459 kunmap_atomic(kaddr); 460 461 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); 462 NILFS_SUI(sufile)->ncleansegs -= clean; 463 464 mark_buffer_dirty(su_bh); 465 nilfs_mdt_mark_dirty(sufile); 466 } 467 468 void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, 469 struct buffer_head *header_bh, 470 struct buffer_head *su_bh) 471 { 472 struct nilfs_segment_usage *su; 473 void *kaddr; 474 int sudirty; 475 476 kaddr = kmap_atomic(su_bh->b_page); 477 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 478 if (nilfs_segment_usage_clean(su)) { 479 printk(KERN_WARNING "%s: segment %llu is already clean\n", 480 __func__, (unsigned long long)segnum); 481 kunmap_atomic(kaddr); 482 return; 483 } 484 WARN_ON(nilfs_segment_usage_error(su)); 485 WARN_ON(!nilfs_segment_usage_dirty(su)); 486 487 sudirty = nilfs_segment_usage_dirty(su); 488 nilfs_segment_usage_set_clean(su); 489 kunmap_atomic(kaddr); 490 mark_buffer_dirty(su_bh); 491 492 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); 493 NILFS_SUI(sufile)->ncleansegs++; 494 495 nilfs_mdt_mark_dirty(sufile); 496 497 trace_nilfs2_segment_usage_freed(sufile, segnum); 498 } 499 500 /** 501 * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty 502 * @sufile: inode of segment usage file 503 * @segnum: segment number 504 */ 505 int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) 506 { 507 struct buffer_head *bh; 508 int ret; 509 510 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); 511 if (!ret) { 512 mark_buffer_dirty(bh); 513 nilfs_mdt_mark_dirty(sufile); 514 brelse(bh); 515 } 516 return ret; 517 } 518 519 /** 520 * nilfs_sufile_set_segment_usage - set usage of a segment 521 * @sufile: inode of segment usage file 522 * @segnum: segment number 523 * @nblocks: number of live blocks in the segment 524 * @modtime: modification time (option) 525 */ 526 int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, 527 unsigned long nblocks, time_t modtime) 528 { 529 struct buffer_head *bh; 530 struct nilfs_segment_usage *su; 531 void *kaddr; 532 int ret; 533 534 down_write(&NILFS_MDT(sufile)->mi_sem); 535 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); 536 if (ret < 0) 537 goto out_sem; 538 539 kaddr = kmap_atomic(bh->b_page); 540 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); 541 WARN_ON(nilfs_segment_usage_error(su)); 542 if (modtime) 543 su->su_lastmod = cpu_to_le64(modtime); 544 su->su_nblocks = cpu_to_le32(nblocks); 545 kunmap_atomic(kaddr); 546 547 mark_buffer_dirty(bh); 548 nilfs_mdt_mark_dirty(sufile); 549 brelse(bh); 550 551 out_sem: 552 up_write(&NILFS_MDT(sufile)->mi_sem); 553 return ret; 554 } 555 556 /** 557 * nilfs_sufile_get_stat - get segment usage statistics 558 * @sufile: inode of segment usage file 559 * @stat: pointer to a structure of segment usage statistics 560 * 561 * Description: nilfs_sufile_get_stat() returns information about segment 562 * usage. 563 * 564 * Return Value: On success, 0 is returned, and segment usage information is 565 * stored in the place pointed by @stat. On error, one of the following 566 * negative error codes is returned. 567 * 568 * %-EIO - I/O error. 569 * 570 * %-ENOMEM - Insufficient amount of memory available. 571 */ 572 int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) 573 { 574 struct buffer_head *header_bh; 575 struct nilfs_sufile_header *header; 576 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 577 void *kaddr; 578 int ret; 579 580 down_read(&NILFS_MDT(sufile)->mi_sem); 581 582 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 583 if (ret < 0) 584 goto out_sem; 585 586 kaddr = kmap_atomic(header_bh->b_page); 587 header = kaddr + bh_offset(header_bh); 588 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); 589 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); 590 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs); 591 sustat->ss_ctime = nilfs->ns_ctime; 592 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime; 593 spin_lock(&nilfs->ns_last_segment_lock); 594 sustat->ss_prot_seq = nilfs->ns_prot_seq; 595 spin_unlock(&nilfs->ns_last_segment_lock); 596 kunmap_atomic(kaddr); 597 brelse(header_bh); 598 599 out_sem: 600 up_read(&NILFS_MDT(sufile)->mi_sem); 601 return ret; 602 } 603 604 void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, 605 struct buffer_head *header_bh, 606 struct buffer_head *su_bh) 607 { 608 struct nilfs_segment_usage *su; 609 void *kaddr; 610 int suclean; 611 612 kaddr = kmap_atomic(su_bh->b_page); 613 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 614 if (nilfs_segment_usage_error(su)) { 615 kunmap_atomic(kaddr); 616 return; 617 } 618 suclean = nilfs_segment_usage_clean(su); 619 nilfs_segment_usage_set_error(su); 620 kunmap_atomic(kaddr); 621 622 if (suclean) { 623 nilfs_sufile_mod_counter(header_bh, -1, 0); 624 NILFS_SUI(sufile)->ncleansegs--; 625 } 626 mark_buffer_dirty(su_bh); 627 nilfs_mdt_mark_dirty(sufile); 628 } 629 630 /** 631 * nilfs_sufile_truncate_range - truncate range of segment array 632 * @sufile: inode of segment usage file 633 * @start: start segment number (inclusive) 634 * @end: end segment number (inclusive) 635 * 636 * Return Value: On success, 0 is returned. On error, one of the 637 * following negative error codes is returned. 638 * 639 * %-EIO - I/O error. 640 * 641 * %-ENOMEM - Insufficient amount of memory available. 642 * 643 * %-EINVAL - Invalid number of segments specified 644 * 645 * %-EBUSY - Dirty or active segments are present in the range 646 */ 647 static int nilfs_sufile_truncate_range(struct inode *sufile, 648 __u64 start, __u64 end) 649 { 650 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 651 struct buffer_head *header_bh; 652 struct buffer_head *su_bh; 653 struct nilfs_segment_usage *su, *su2; 654 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 655 unsigned long segusages_per_block; 656 unsigned long nsegs, ncleaned; 657 __u64 segnum; 658 void *kaddr; 659 ssize_t n, nc; 660 int ret; 661 int j; 662 663 nsegs = nilfs_sufile_get_nsegments(sufile); 664 665 ret = -EINVAL; 666 if (start > end || start >= nsegs) 667 goto out; 668 669 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 670 if (ret < 0) 671 goto out; 672 673 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile); 674 ncleaned = 0; 675 676 for (segnum = start; segnum <= end; segnum += n) { 677 n = min_t(unsigned long, 678 segusages_per_block - 679 nilfs_sufile_get_offset(sufile, segnum), 680 end - segnum + 1); 681 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, 682 &su_bh); 683 if (ret < 0) { 684 if (ret != -ENOENT) 685 goto out_header; 686 /* hole */ 687 continue; 688 } 689 kaddr = kmap_atomic(su_bh->b_page); 690 su = nilfs_sufile_block_get_segment_usage( 691 sufile, segnum, su_bh, kaddr); 692 su2 = su; 693 for (j = 0; j < n; j++, su = (void *)su + susz) { 694 if ((le32_to_cpu(su->su_flags) & 695 ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) || 696 nilfs_segment_is_active(nilfs, segnum + j)) { 697 ret = -EBUSY; 698 kunmap_atomic(kaddr); 699 brelse(su_bh); 700 goto out_header; 701 } 702 } 703 nc = 0; 704 for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) { 705 if (nilfs_segment_usage_error(su)) { 706 nilfs_segment_usage_set_clean(su); 707 nc++; 708 } 709 } 710 kunmap_atomic(kaddr); 711 if (nc > 0) { 712 mark_buffer_dirty(su_bh); 713 ncleaned += nc; 714 } 715 brelse(su_bh); 716 717 if (n == segusages_per_block) { 718 /* make hole */ 719 nilfs_sufile_delete_segment_usage_block(sufile, segnum); 720 } 721 } 722 ret = 0; 723 724 out_header: 725 if (ncleaned > 0) { 726 NILFS_SUI(sufile)->ncleansegs += ncleaned; 727 nilfs_sufile_mod_counter(header_bh, ncleaned, 0); 728 nilfs_mdt_mark_dirty(sufile); 729 } 730 brelse(header_bh); 731 out: 732 return ret; 733 } 734 735 /** 736 * nilfs_sufile_resize - resize segment array 737 * @sufile: inode of segment usage file 738 * @newnsegs: new number of segments 739 * 740 * Return Value: On success, 0 is returned. On error, one of the 741 * following negative error codes is returned. 742 * 743 * %-EIO - I/O error. 744 * 745 * %-ENOMEM - Insufficient amount of memory available. 746 * 747 * %-ENOSPC - Enough free space is not left for shrinking 748 * 749 * %-EBUSY - Dirty or active segments exist in the region to be truncated 750 */ 751 int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs) 752 { 753 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 754 struct buffer_head *header_bh; 755 struct nilfs_sufile_header *header; 756 struct nilfs_sufile_info *sui = NILFS_SUI(sufile); 757 void *kaddr; 758 unsigned long nsegs, nrsvsegs; 759 int ret = 0; 760 761 down_write(&NILFS_MDT(sufile)->mi_sem); 762 763 nsegs = nilfs_sufile_get_nsegments(sufile); 764 if (nsegs == newnsegs) 765 goto out; 766 767 ret = -ENOSPC; 768 nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs); 769 if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs) 770 goto out; 771 772 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 773 if (ret < 0) 774 goto out; 775 776 if (newnsegs > nsegs) { 777 sui->ncleansegs += newnsegs - nsegs; 778 } else /* newnsegs < nsegs */ { 779 ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1); 780 if (ret < 0) 781 goto out_header; 782 783 sui->ncleansegs -= nsegs - newnsegs; 784 } 785 786 kaddr = kmap_atomic(header_bh->b_page); 787 header = kaddr + bh_offset(header_bh); 788 header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); 789 kunmap_atomic(kaddr); 790 791 mark_buffer_dirty(header_bh); 792 nilfs_mdt_mark_dirty(sufile); 793 nilfs_set_nsegments(nilfs, newnsegs); 794 795 out_header: 796 brelse(header_bh); 797 out: 798 up_write(&NILFS_MDT(sufile)->mi_sem); 799 return ret; 800 } 801 802 /** 803 * nilfs_sufile_get_suinfo - 804 * @sufile: inode of segment usage file 805 * @segnum: segment number to start looking 806 * @buf: array of suinfo 807 * @sisz: byte size of suinfo 808 * @nsi: size of suinfo array 809 * 810 * Description: 811 * 812 * Return Value: On success, 0 is returned and .... On error, one of the 813 * following negative error codes is returned. 814 * 815 * %-EIO - I/O error. 816 * 817 * %-ENOMEM - Insufficient amount of memory available. 818 */ 819 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, 820 unsigned int sisz, size_t nsi) 821 { 822 struct buffer_head *su_bh; 823 struct nilfs_segment_usage *su; 824 struct nilfs_suinfo *si = buf; 825 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 826 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 827 void *kaddr; 828 unsigned long nsegs, segusages_per_block; 829 ssize_t n; 830 int ret, i, j; 831 832 down_read(&NILFS_MDT(sufile)->mi_sem); 833 834 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile); 835 nsegs = min_t(unsigned long, 836 nilfs_sufile_get_nsegments(sufile) - segnum, 837 nsi); 838 for (i = 0; i < nsegs; i += n, segnum += n) { 839 n = min_t(unsigned long, 840 segusages_per_block - 841 nilfs_sufile_get_offset(sufile, segnum), 842 nsegs - i); 843 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, 844 &su_bh); 845 if (ret < 0) { 846 if (ret != -ENOENT) 847 goto out; 848 /* hole */ 849 memset(si, 0, sisz * n); 850 si = (void *)si + sisz * n; 851 continue; 852 } 853 854 kaddr = kmap_atomic(su_bh->b_page); 855 su = nilfs_sufile_block_get_segment_usage( 856 sufile, segnum, su_bh, kaddr); 857 for (j = 0; j < n; 858 j++, su = (void *)su + susz, si = (void *)si + sisz) { 859 si->sui_lastmod = le64_to_cpu(su->su_lastmod); 860 si->sui_nblocks = le32_to_cpu(su->su_nblocks); 861 si->sui_flags = le32_to_cpu(su->su_flags) & 862 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); 863 if (nilfs_segment_is_active(nilfs, segnum + j)) 864 si->sui_flags |= 865 (1UL << NILFS_SEGMENT_USAGE_ACTIVE); 866 } 867 kunmap_atomic(kaddr); 868 brelse(su_bh); 869 } 870 ret = nsegs; 871 872 out: 873 up_read(&NILFS_MDT(sufile)->mi_sem); 874 return ret; 875 } 876 877 /** 878 * nilfs_sufile_set_suinfo - sets segment usage info 879 * @sufile: inode of segment usage file 880 * @buf: array of suinfo_update 881 * @supsz: byte size of suinfo_update 882 * @nsup: size of suinfo_update array 883 * 884 * Description: Takes an array of nilfs_suinfo_update structs and updates 885 * segment usage accordingly. Only the fields indicated by the sup_flags 886 * are updated. 887 * 888 * Return Value: On success, 0 is returned. On error, one of the 889 * following negative error codes is returned. 890 * 891 * %-EIO - I/O error. 892 * 893 * %-ENOMEM - Insufficient amount of memory available. 894 * 895 * %-EINVAL - Invalid values in input (segment number, flags or nblocks) 896 */ 897 ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf, 898 unsigned int supsz, size_t nsup) 899 { 900 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 901 struct buffer_head *header_bh, *bh; 902 struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup; 903 struct nilfs_segment_usage *su; 904 void *kaddr; 905 unsigned long blkoff, prev_blkoff; 906 int cleansi, cleansu, dirtysi, dirtysu; 907 long ncleaned = 0, ndirtied = 0; 908 int ret = 0; 909 910 if (unlikely(nsup == 0)) 911 return ret; 912 913 for (sup = buf; sup < supend; sup = (void *)sup + supsz) { 914 if (sup->sup_segnum >= nilfs->ns_nsegments 915 || (sup->sup_flags & 916 (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS)) 917 || (nilfs_suinfo_update_nblocks(sup) && 918 sup->sup_sui.sui_nblocks > 919 nilfs->ns_blocks_per_segment)) 920 return -EINVAL; 921 } 922 923 down_write(&NILFS_MDT(sufile)->mi_sem); 924 925 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 926 if (ret < 0) 927 goto out_sem; 928 929 sup = buf; 930 blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum); 931 ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh); 932 if (ret < 0) 933 goto out_header; 934 935 for (;;) { 936 kaddr = kmap_atomic(bh->b_page); 937 su = nilfs_sufile_block_get_segment_usage( 938 sufile, sup->sup_segnum, bh, kaddr); 939 940 if (nilfs_suinfo_update_lastmod(sup)) 941 su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod); 942 943 if (nilfs_suinfo_update_nblocks(sup)) 944 su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks); 945 946 if (nilfs_suinfo_update_flags(sup)) { 947 /* 948 * Active flag is a virtual flag projected by running 949 * nilfs kernel code - drop it not to write it to 950 * disk. 951 */ 952 sup->sup_sui.sui_flags &= 953 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); 954 955 cleansi = nilfs_suinfo_clean(&sup->sup_sui); 956 cleansu = nilfs_segment_usage_clean(su); 957 dirtysi = nilfs_suinfo_dirty(&sup->sup_sui); 958 dirtysu = nilfs_segment_usage_dirty(su); 959 960 if (cleansi && !cleansu) 961 ++ncleaned; 962 else if (!cleansi && cleansu) 963 --ncleaned; 964 965 if (dirtysi && !dirtysu) 966 ++ndirtied; 967 else if (!dirtysi && dirtysu) 968 --ndirtied; 969 970 su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags); 971 } 972 973 kunmap_atomic(kaddr); 974 975 sup = (void *)sup + supsz; 976 if (sup >= supend) 977 break; 978 979 prev_blkoff = blkoff; 980 blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum); 981 if (blkoff == prev_blkoff) 982 continue; 983 984 /* get different block */ 985 mark_buffer_dirty(bh); 986 put_bh(bh); 987 ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh); 988 if (unlikely(ret < 0)) 989 goto out_mark; 990 } 991 mark_buffer_dirty(bh); 992 put_bh(bh); 993 994 out_mark: 995 if (ncleaned || ndirtied) { 996 nilfs_sufile_mod_counter(header_bh, (u64)ncleaned, 997 (u64)ndirtied); 998 NILFS_SUI(sufile)->ncleansegs += ncleaned; 999 } 1000 nilfs_mdt_mark_dirty(sufile); 1001 out_header: 1002 put_bh(header_bh); 1003 out_sem: 1004 up_write(&NILFS_MDT(sufile)->mi_sem); 1005 return ret; 1006 } 1007 1008 /** 1009 * nilfs_sufile_trim_fs() - trim ioctl handle function 1010 * @sufile: inode of segment usage file 1011 * @range: fstrim_range structure 1012 * 1013 * start: First Byte to trim 1014 * len: number of Bytes to trim from start 1015 * minlen: minimum extent length in Bytes 1016 * 1017 * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes 1018 * from start to start+len. start is rounded up to the next block boundary 1019 * and start+len is rounded down. For each clean segment blkdev_issue_discard 1020 * function is invoked. 1021 * 1022 * Return Value: On success, 0 is returned or negative error code, otherwise. 1023 */ 1024 int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range) 1025 { 1026 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 1027 struct buffer_head *su_bh; 1028 struct nilfs_segment_usage *su; 1029 void *kaddr; 1030 size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size; 1031 sector_t seg_start, seg_end, start_block, end_block; 1032 sector_t start = 0, nblocks = 0; 1033 u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0; 1034 int ret = 0; 1035 unsigned int sects_per_block; 1036 1037 sects_per_block = (1 << nilfs->ns_blocksize_bits) / 1038 bdev_logical_block_size(nilfs->ns_bdev); 1039 len = range->len >> nilfs->ns_blocksize_bits; 1040 minlen = range->minlen >> nilfs->ns_blocksize_bits; 1041 max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment); 1042 1043 if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits) 1044 return -EINVAL; 1045 1046 start_block = (range->start + nilfs->ns_blocksize - 1) >> 1047 nilfs->ns_blocksize_bits; 1048 1049 /* 1050 * range->len can be very large (actually, it is set to 1051 * ULLONG_MAX by default) - truncate upper end of the range 1052 * carefully so as not to overflow. 1053 */ 1054 if (max_blocks - start_block < len) 1055 end_block = max_blocks - 1; 1056 else 1057 end_block = start_block + len - 1; 1058 1059 segnum = nilfs_get_segnum_of_block(nilfs, start_block); 1060 segnum_end = nilfs_get_segnum_of_block(nilfs, end_block); 1061 1062 down_read(&NILFS_MDT(sufile)->mi_sem); 1063 1064 while (segnum <= segnum_end) { 1065 n = nilfs_sufile_segment_usages_in_block(sufile, segnum, 1066 segnum_end); 1067 1068 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, 1069 &su_bh); 1070 if (ret < 0) { 1071 if (ret != -ENOENT) 1072 goto out_sem; 1073 /* hole */ 1074 segnum += n; 1075 continue; 1076 } 1077 1078 kaddr = kmap_atomic(su_bh->b_page); 1079 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, 1080 su_bh, kaddr); 1081 for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) { 1082 if (!nilfs_segment_usage_clean(su)) 1083 continue; 1084 1085 nilfs_get_segment_range(nilfs, segnum, &seg_start, 1086 &seg_end); 1087 1088 if (!nblocks) { 1089 /* start new extent */ 1090 start = seg_start; 1091 nblocks = seg_end - seg_start + 1; 1092 continue; 1093 } 1094 1095 if (start + nblocks == seg_start) { 1096 /* add to previous extent */ 1097 nblocks += seg_end - seg_start + 1; 1098 continue; 1099 } 1100 1101 /* discard previous extent */ 1102 if (start < start_block) { 1103 nblocks -= start_block - start; 1104 start = start_block; 1105 } 1106 1107 if (nblocks >= minlen) { 1108 kunmap_atomic(kaddr); 1109 1110 ret = blkdev_issue_discard(nilfs->ns_bdev, 1111 start * sects_per_block, 1112 nblocks * sects_per_block, 1113 GFP_NOFS, 0); 1114 if (ret < 0) { 1115 put_bh(su_bh); 1116 goto out_sem; 1117 } 1118 1119 ndiscarded += nblocks; 1120 kaddr = kmap_atomic(su_bh->b_page); 1121 su = nilfs_sufile_block_get_segment_usage( 1122 sufile, segnum, su_bh, kaddr); 1123 } 1124 1125 /* start new extent */ 1126 start = seg_start; 1127 nblocks = seg_end - seg_start + 1; 1128 } 1129 kunmap_atomic(kaddr); 1130 put_bh(su_bh); 1131 } 1132 1133 1134 if (nblocks) { 1135 /* discard last extent */ 1136 if (start < start_block) { 1137 nblocks -= start_block - start; 1138 start = start_block; 1139 } 1140 if (start + nblocks > end_block + 1) 1141 nblocks = end_block - start + 1; 1142 1143 if (nblocks >= minlen) { 1144 ret = blkdev_issue_discard(nilfs->ns_bdev, 1145 start * sects_per_block, 1146 nblocks * sects_per_block, 1147 GFP_NOFS, 0); 1148 if (!ret) 1149 ndiscarded += nblocks; 1150 } 1151 } 1152 1153 out_sem: 1154 up_read(&NILFS_MDT(sufile)->mi_sem); 1155 1156 range->len = ndiscarded << nilfs->ns_blocksize_bits; 1157 return ret; 1158 } 1159 1160 /** 1161 * nilfs_sufile_read - read or get sufile inode 1162 * @sb: super block instance 1163 * @susize: size of a segment usage entry 1164 * @raw_inode: on-disk sufile inode 1165 * @inodep: buffer to store the inode 1166 */ 1167 int nilfs_sufile_read(struct super_block *sb, size_t susize, 1168 struct nilfs_inode *raw_inode, struct inode **inodep) 1169 { 1170 struct inode *sufile; 1171 struct nilfs_sufile_info *sui; 1172 struct buffer_head *header_bh; 1173 struct nilfs_sufile_header *header; 1174 void *kaddr; 1175 int err; 1176 1177 if (susize > sb->s_blocksize) { 1178 printk(KERN_ERR 1179 "NILFS: too large segment usage size: %zu bytes.\n", 1180 susize); 1181 return -EINVAL; 1182 } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) { 1183 printk(KERN_ERR 1184 "NILFS: too small segment usage size: %zu bytes.\n", 1185 susize); 1186 return -EINVAL; 1187 } 1188 1189 sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO); 1190 if (unlikely(!sufile)) 1191 return -ENOMEM; 1192 if (!(sufile->i_state & I_NEW)) 1193 goto out; 1194 1195 err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui)); 1196 if (err) 1197 goto failed; 1198 1199 nilfs_mdt_set_entry_size(sufile, susize, 1200 sizeof(struct nilfs_sufile_header)); 1201 1202 err = nilfs_read_inode_common(sufile, raw_inode); 1203 if (err) 1204 goto failed; 1205 1206 err = nilfs_sufile_get_header_block(sufile, &header_bh); 1207 if (err) 1208 goto failed; 1209 1210 sui = NILFS_SUI(sufile); 1211 kaddr = kmap_atomic(header_bh->b_page); 1212 header = kaddr + bh_offset(header_bh); 1213 sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs); 1214 kunmap_atomic(kaddr); 1215 brelse(header_bh); 1216 1217 sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1; 1218 sui->allocmin = 0; 1219 1220 unlock_new_inode(sufile); 1221 out: 1222 *inodep = sufile; 1223 return 0; 1224 failed: 1225 iget_failed(sufile); 1226 return err; 1227 } 1228