1 /* 2 * sufile.c - NILFS segment usage file. 3 * 4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 * Written by Koji Sato <koji@osrg.net>. 21 * Revised by Ryusuke Konishi <ryusuke@osrg.net>. 22 */ 23 24 #include <linux/kernel.h> 25 #include <linux/fs.h> 26 #include <linux/string.h> 27 #include <linux/buffer_head.h> 28 #include <linux/errno.h> 29 #include <linux/nilfs2_fs.h> 30 #include "mdt.h" 31 #include "sufile.h" 32 33 /** 34 * struct nilfs_sufile_info - on-memory private data of sufile 35 * @mi: on-memory private data of metadata file 36 * @ncleansegs: number of clean segments 37 * @allocmin: lower limit of allocatable segment range 38 * @allocmax: upper limit of allocatable segment range 39 */ 40 struct nilfs_sufile_info { 41 struct nilfs_mdt_info mi; 42 unsigned long ncleansegs;/* number of clean segments */ 43 __u64 allocmin; /* lower limit of allocatable segment range */ 44 __u64 allocmax; /* upper limit of allocatable segment range */ 45 }; 46 47 static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile) 48 { 49 return (struct nilfs_sufile_info *)NILFS_MDT(sufile); 50 } 51 52 static inline unsigned long 53 nilfs_sufile_segment_usages_per_block(const struct inode *sufile) 54 { 55 return NILFS_MDT(sufile)->mi_entries_per_block; 56 } 57 58 static unsigned long 59 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum) 60 { 61 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; 62 do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); 63 return (unsigned long)t; 64 } 65 66 static unsigned long 67 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum) 68 { 69 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; 70 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); 71 } 72 73 static unsigned long 74 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr, 75 __u64 max) 76 { 77 return min_t(unsigned long, 78 nilfs_sufile_segment_usages_per_block(sufile) - 79 nilfs_sufile_get_offset(sufile, curr), 80 max - curr + 1); 81 } 82 83 static struct nilfs_segment_usage * 84 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum, 85 struct buffer_head *bh, void *kaddr) 86 { 87 return kaddr + bh_offset(bh) + 88 nilfs_sufile_get_offset(sufile, segnum) * 89 NILFS_MDT(sufile)->mi_entry_size; 90 } 91 92 static inline int nilfs_sufile_get_header_block(struct inode *sufile, 93 struct buffer_head **bhp) 94 { 95 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp); 96 } 97 98 static inline int 99 nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum, 100 int create, struct buffer_head **bhp) 101 { 102 return nilfs_mdt_get_block(sufile, 103 nilfs_sufile_get_blkoff(sufile, segnum), 104 create, NULL, bhp); 105 } 106 107 static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile, 108 __u64 segnum) 109 { 110 return nilfs_mdt_delete_block(sufile, 111 nilfs_sufile_get_blkoff(sufile, segnum)); 112 } 113 114 static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, 115 u64 ncleanadd, u64 ndirtyadd) 116 { 117 struct nilfs_sufile_header *header; 118 void *kaddr; 119 120 kaddr = kmap_atomic(header_bh->b_page); 121 header = kaddr + bh_offset(header_bh); 122 le64_add_cpu(&header->sh_ncleansegs, ncleanadd); 123 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); 124 kunmap_atomic(kaddr); 125 126 mark_buffer_dirty(header_bh); 127 } 128 129 /** 130 * nilfs_sufile_get_ncleansegs - return the number of clean segments 131 * @sufile: inode of segment usage file 132 */ 133 unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile) 134 { 135 return NILFS_SUI(sufile)->ncleansegs; 136 } 137 138 /** 139 * nilfs_sufile_updatev - modify multiple segment usages at a time 140 * @sufile: inode of segment usage file 141 * @segnumv: array of segment numbers 142 * @nsegs: size of @segnumv array 143 * @create: creation flag 144 * @ndone: place to store number of modified segments on @segnumv 145 * @dofunc: primitive operation for the update 146 * 147 * Description: nilfs_sufile_updatev() repeatedly calls @dofunc 148 * against the given array of segments. The @dofunc is called with 149 * buffers of a header block and the sufile block in which the target 150 * segment usage entry is contained. If @ndone is given, the number 151 * of successfully modified segments from the head is stored in the 152 * place @ndone points to. 153 * 154 * Return Value: On success, zero is returned. On error, one of the 155 * following negative error codes is returned. 156 * 157 * %-EIO - I/O error. 158 * 159 * %-ENOMEM - Insufficient amount of memory available. 160 * 161 * %-ENOENT - Given segment usage is in hole block (may be returned if 162 * @create is zero) 163 * 164 * %-EINVAL - Invalid segment usage number 165 */ 166 int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs, 167 int create, size_t *ndone, 168 void (*dofunc)(struct inode *, __u64, 169 struct buffer_head *, 170 struct buffer_head *)) 171 { 172 struct buffer_head *header_bh, *bh; 173 unsigned long blkoff, prev_blkoff; 174 __u64 *seg; 175 size_t nerr = 0, n = 0; 176 int ret = 0; 177 178 if (unlikely(nsegs == 0)) 179 goto out; 180 181 down_write(&NILFS_MDT(sufile)->mi_sem); 182 for (seg = segnumv; seg < segnumv + nsegs; seg++) { 183 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) { 184 printk(KERN_WARNING 185 "%s: invalid segment number: %llu\n", __func__, 186 (unsigned long long)*seg); 187 nerr++; 188 } 189 } 190 if (nerr > 0) { 191 ret = -EINVAL; 192 goto out_sem; 193 } 194 195 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 196 if (ret < 0) 197 goto out_sem; 198 199 seg = segnumv; 200 blkoff = nilfs_sufile_get_blkoff(sufile, *seg); 201 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); 202 if (ret < 0) 203 goto out_header; 204 205 for (;;) { 206 dofunc(sufile, *seg, header_bh, bh); 207 208 if (++seg >= segnumv + nsegs) 209 break; 210 prev_blkoff = blkoff; 211 blkoff = nilfs_sufile_get_blkoff(sufile, *seg); 212 if (blkoff == prev_blkoff) 213 continue; 214 215 /* get different block */ 216 brelse(bh); 217 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); 218 if (unlikely(ret < 0)) 219 goto out_header; 220 } 221 brelse(bh); 222 223 out_header: 224 n = seg - segnumv; 225 brelse(header_bh); 226 out_sem: 227 up_write(&NILFS_MDT(sufile)->mi_sem); 228 out: 229 if (ndone) 230 *ndone = n; 231 return ret; 232 } 233 234 int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, 235 void (*dofunc)(struct inode *, __u64, 236 struct buffer_head *, 237 struct buffer_head *)) 238 { 239 struct buffer_head *header_bh, *bh; 240 int ret; 241 242 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) { 243 printk(KERN_WARNING "%s: invalid segment number: %llu\n", 244 __func__, (unsigned long long)segnum); 245 return -EINVAL; 246 } 247 down_write(&NILFS_MDT(sufile)->mi_sem); 248 249 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 250 if (ret < 0) 251 goto out_sem; 252 253 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh); 254 if (!ret) { 255 dofunc(sufile, segnum, header_bh, bh); 256 brelse(bh); 257 } 258 brelse(header_bh); 259 260 out_sem: 261 up_write(&NILFS_MDT(sufile)->mi_sem); 262 return ret; 263 } 264 265 /** 266 * nilfs_sufile_set_alloc_range - limit range of segment to be allocated 267 * @sufile: inode of segment usage file 268 * @start: minimum segment number of allocatable region (inclusive) 269 * @end: maximum segment number of allocatable region (inclusive) 270 * 271 * Return Value: On success, 0 is returned. On error, one of the 272 * following negative error codes is returned. 273 * 274 * %-ERANGE - invalid segment region 275 */ 276 int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end) 277 { 278 struct nilfs_sufile_info *sui = NILFS_SUI(sufile); 279 __u64 nsegs; 280 int ret = -ERANGE; 281 282 down_write(&NILFS_MDT(sufile)->mi_sem); 283 nsegs = nilfs_sufile_get_nsegments(sufile); 284 285 if (start <= end && end < nsegs) { 286 sui->allocmin = start; 287 sui->allocmax = end; 288 ret = 0; 289 } 290 up_write(&NILFS_MDT(sufile)->mi_sem); 291 return ret; 292 } 293 294 /** 295 * nilfs_sufile_alloc - allocate a segment 296 * @sufile: inode of segment usage file 297 * @segnump: pointer to segment number 298 * 299 * Description: nilfs_sufile_alloc() allocates a clean segment. 300 * 301 * Return Value: On success, 0 is returned and the segment number of the 302 * allocated segment is stored in the place pointed by @segnump. On error, one 303 * of the following negative error codes is returned. 304 * 305 * %-EIO - I/O error. 306 * 307 * %-ENOMEM - Insufficient amount of memory available. 308 * 309 * %-ENOSPC - No clean segment left. 310 */ 311 int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) 312 { 313 struct buffer_head *header_bh, *su_bh; 314 struct nilfs_sufile_header *header; 315 struct nilfs_segment_usage *su; 316 struct nilfs_sufile_info *sui = NILFS_SUI(sufile); 317 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 318 __u64 segnum, maxsegnum, last_alloc; 319 void *kaddr; 320 unsigned long nsegments, ncleansegs, nsus, cnt; 321 int ret, j; 322 323 down_write(&NILFS_MDT(sufile)->mi_sem); 324 325 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 326 if (ret < 0) 327 goto out_sem; 328 kaddr = kmap_atomic(header_bh->b_page); 329 header = kaddr + bh_offset(header_bh); 330 ncleansegs = le64_to_cpu(header->sh_ncleansegs); 331 last_alloc = le64_to_cpu(header->sh_last_alloc); 332 kunmap_atomic(kaddr); 333 334 nsegments = nilfs_sufile_get_nsegments(sufile); 335 maxsegnum = sui->allocmax; 336 segnum = last_alloc + 1; 337 if (segnum < sui->allocmin || segnum > sui->allocmax) 338 segnum = sui->allocmin; 339 340 for (cnt = 0; cnt < nsegments; cnt += nsus) { 341 if (segnum > maxsegnum) { 342 if (cnt < sui->allocmax - sui->allocmin + 1) { 343 /* 344 * wrap around in the limited region. 345 * if allocation started from 346 * sui->allocmin, this never happens. 347 */ 348 segnum = sui->allocmin; 349 maxsegnum = last_alloc; 350 } else if (segnum > sui->allocmin && 351 sui->allocmax + 1 < nsegments) { 352 segnum = sui->allocmax + 1; 353 maxsegnum = nsegments - 1; 354 } else if (sui->allocmin > 0) { 355 segnum = 0; 356 maxsegnum = sui->allocmin - 1; 357 } else { 358 break; /* never happens */ 359 } 360 } 361 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, 362 &su_bh); 363 if (ret < 0) 364 goto out_header; 365 kaddr = kmap_atomic(su_bh->b_page); 366 su = nilfs_sufile_block_get_segment_usage( 367 sufile, segnum, su_bh, kaddr); 368 369 nsus = nilfs_sufile_segment_usages_in_block( 370 sufile, segnum, maxsegnum); 371 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) { 372 if (!nilfs_segment_usage_clean(su)) 373 continue; 374 /* found a clean segment */ 375 nilfs_segment_usage_set_dirty(su); 376 kunmap_atomic(kaddr); 377 378 kaddr = kmap_atomic(header_bh->b_page); 379 header = kaddr + bh_offset(header_bh); 380 le64_add_cpu(&header->sh_ncleansegs, -1); 381 le64_add_cpu(&header->sh_ndirtysegs, 1); 382 header->sh_last_alloc = cpu_to_le64(segnum); 383 kunmap_atomic(kaddr); 384 385 sui->ncleansegs--; 386 mark_buffer_dirty(header_bh); 387 mark_buffer_dirty(su_bh); 388 nilfs_mdt_mark_dirty(sufile); 389 brelse(su_bh); 390 *segnump = segnum; 391 goto out_header; 392 } 393 394 kunmap_atomic(kaddr); 395 brelse(su_bh); 396 } 397 398 /* no segments left */ 399 ret = -ENOSPC; 400 401 out_header: 402 brelse(header_bh); 403 404 out_sem: 405 up_write(&NILFS_MDT(sufile)->mi_sem); 406 return ret; 407 } 408 409 void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, 410 struct buffer_head *header_bh, 411 struct buffer_head *su_bh) 412 { 413 struct nilfs_segment_usage *su; 414 void *kaddr; 415 416 kaddr = kmap_atomic(su_bh->b_page); 417 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 418 if (unlikely(!nilfs_segment_usage_clean(su))) { 419 printk(KERN_WARNING "%s: segment %llu must be clean\n", 420 __func__, (unsigned long long)segnum); 421 kunmap_atomic(kaddr); 422 return; 423 } 424 nilfs_segment_usage_set_dirty(su); 425 kunmap_atomic(kaddr); 426 427 nilfs_sufile_mod_counter(header_bh, -1, 1); 428 NILFS_SUI(sufile)->ncleansegs--; 429 430 mark_buffer_dirty(su_bh); 431 nilfs_mdt_mark_dirty(sufile); 432 } 433 434 void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, 435 struct buffer_head *header_bh, 436 struct buffer_head *su_bh) 437 { 438 struct nilfs_segment_usage *su; 439 void *kaddr; 440 int clean, dirty; 441 442 kaddr = kmap_atomic(su_bh->b_page); 443 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 444 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) && 445 su->su_nblocks == cpu_to_le32(0)) { 446 kunmap_atomic(kaddr); 447 return; 448 } 449 clean = nilfs_segment_usage_clean(su); 450 dirty = nilfs_segment_usage_dirty(su); 451 452 /* make the segment garbage */ 453 su->su_lastmod = cpu_to_le64(0); 454 su->su_nblocks = cpu_to_le32(0); 455 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY); 456 kunmap_atomic(kaddr); 457 458 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); 459 NILFS_SUI(sufile)->ncleansegs -= clean; 460 461 mark_buffer_dirty(su_bh); 462 nilfs_mdt_mark_dirty(sufile); 463 } 464 465 void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, 466 struct buffer_head *header_bh, 467 struct buffer_head *su_bh) 468 { 469 struct nilfs_segment_usage *su; 470 void *kaddr; 471 int sudirty; 472 473 kaddr = kmap_atomic(su_bh->b_page); 474 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 475 if (nilfs_segment_usage_clean(su)) { 476 printk(KERN_WARNING "%s: segment %llu is already clean\n", 477 __func__, (unsigned long long)segnum); 478 kunmap_atomic(kaddr); 479 return; 480 } 481 WARN_ON(nilfs_segment_usage_error(su)); 482 WARN_ON(!nilfs_segment_usage_dirty(su)); 483 484 sudirty = nilfs_segment_usage_dirty(su); 485 nilfs_segment_usage_set_clean(su); 486 kunmap_atomic(kaddr); 487 mark_buffer_dirty(su_bh); 488 489 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); 490 NILFS_SUI(sufile)->ncleansegs++; 491 492 nilfs_mdt_mark_dirty(sufile); 493 } 494 495 /** 496 * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty 497 * @sufile: inode of segment usage file 498 * @segnum: segment number 499 */ 500 int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) 501 { 502 struct buffer_head *bh; 503 int ret; 504 505 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); 506 if (!ret) { 507 mark_buffer_dirty(bh); 508 nilfs_mdt_mark_dirty(sufile); 509 brelse(bh); 510 } 511 return ret; 512 } 513 514 /** 515 * nilfs_sufile_set_segment_usage - set usage of a segment 516 * @sufile: inode of segment usage file 517 * @segnum: segment number 518 * @nblocks: number of live blocks in the segment 519 * @modtime: modification time (option) 520 */ 521 int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, 522 unsigned long nblocks, time_t modtime) 523 { 524 struct buffer_head *bh; 525 struct nilfs_segment_usage *su; 526 void *kaddr; 527 int ret; 528 529 down_write(&NILFS_MDT(sufile)->mi_sem); 530 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); 531 if (ret < 0) 532 goto out_sem; 533 534 kaddr = kmap_atomic(bh->b_page); 535 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); 536 WARN_ON(nilfs_segment_usage_error(su)); 537 if (modtime) 538 su->su_lastmod = cpu_to_le64(modtime); 539 su->su_nblocks = cpu_to_le32(nblocks); 540 kunmap_atomic(kaddr); 541 542 mark_buffer_dirty(bh); 543 nilfs_mdt_mark_dirty(sufile); 544 brelse(bh); 545 546 out_sem: 547 up_write(&NILFS_MDT(sufile)->mi_sem); 548 return ret; 549 } 550 551 /** 552 * nilfs_sufile_get_stat - get segment usage statistics 553 * @sufile: inode of segment usage file 554 * @stat: pointer to a structure of segment usage statistics 555 * 556 * Description: nilfs_sufile_get_stat() returns information about segment 557 * usage. 558 * 559 * Return Value: On success, 0 is returned, and segment usage information is 560 * stored in the place pointed by @stat. On error, one of the following 561 * negative error codes is returned. 562 * 563 * %-EIO - I/O error. 564 * 565 * %-ENOMEM - Insufficient amount of memory available. 566 */ 567 int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) 568 { 569 struct buffer_head *header_bh; 570 struct nilfs_sufile_header *header; 571 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 572 void *kaddr; 573 int ret; 574 575 down_read(&NILFS_MDT(sufile)->mi_sem); 576 577 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 578 if (ret < 0) 579 goto out_sem; 580 581 kaddr = kmap_atomic(header_bh->b_page); 582 header = kaddr + bh_offset(header_bh); 583 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); 584 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); 585 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs); 586 sustat->ss_ctime = nilfs->ns_ctime; 587 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime; 588 spin_lock(&nilfs->ns_last_segment_lock); 589 sustat->ss_prot_seq = nilfs->ns_prot_seq; 590 spin_unlock(&nilfs->ns_last_segment_lock); 591 kunmap_atomic(kaddr); 592 brelse(header_bh); 593 594 out_sem: 595 up_read(&NILFS_MDT(sufile)->mi_sem); 596 return ret; 597 } 598 599 void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, 600 struct buffer_head *header_bh, 601 struct buffer_head *su_bh) 602 { 603 struct nilfs_segment_usage *su; 604 void *kaddr; 605 int suclean; 606 607 kaddr = kmap_atomic(su_bh->b_page); 608 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 609 if (nilfs_segment_usage_error(su)) { 610 kunmap_atomic(kaddr); 611 return; 612 } 613 suclean = nilfs_segment_usage_clean(su); 614 nilfs_segment_usage_set_error(su); 615 kunmap_atomic(kaddr); 616 617 if (suclean) { 618 nilfs_sufile_mod_counter(header_bh, -1, 0); 619 NILFS_SUI(sufile)->ncleansegs--; 620 } 621 mark_buffer_dirty(su_bh); 622 nilfs_mdt_mark_dirty(sufile); 623 } 624 625 /** 626 * nilfs_sufile_truncate_range - truncate range of segment array 627 * @sufile: inode of segment usage file 628 * @start: start segment number (inclusive) 629 * @end: end segment number (inclusive) 630 * 631 * Return Value: On success, 0 is returned. On error, one of the 632 * following negative error codes is returned. 633 * 634 * %-EIO - I/O error. 635 * 636 * %-ENOMEM - Insufficient amount of memory available. 637 * 638 * %-EINVAL - Invalid number of segments specified 639 * 640 * %-EBUSY - Dirty or active segments are present in the range 641 */ 642 static int nilfs_sufile_truncate_range(struct inode *sufile, 643 __u64 start, __u64 end) 644 { 645 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 646 struct buffer_head *header_bh; 647 struct buffer_head *su_bh; 648 struct nilfs_segment_usage *su, *su2; 649 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 650 unsigned long segusages_per_block; 651 unsigned long nsegs, ncleaned; 652 __u64 segnum; 653 void *kaddr; 654 ssize_t n, nc; 655 int ret; 656 int j; 657 658 nsegs = nilfs_sufile_get_nsegments(sufile); 659 660 ret = -EINVAL; 661 if (start > end || start >= nsegs) 662 goto out; 663 664 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 665 if (ret < 0) 666 goto out; 667 668 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile); 669 ncleaned = 0; 670 671 for (segnum = start; segnum <= end; segnum += n) { 672 n = min_t(unsigned long, 673 segusages_per_block - 674 nilfs_sufile_get_offset(sufile, segnum), 675 end - segnum + 1); 676 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, 677 &su_bh); 678 if (ret < 0) { 679 if (ret != -ENOENT) 680 goto out_header; 681 /* hole */ 682 continue; 683 } 684 kaddr = kmap_atomic(su_bh->b_page); 685 su = nilfs_sufile_block_get_segment_usage( 686 sufile, segnum, su_bh, kaddr); 687 su2 = su; 688 for (j = 0; j < n; j++, su = (void *)su + susz) { 689 if ((le32_to_cpu(su->su_flags) & 690 ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) || 691 nilfs_segment_is_active(nilfs, segnum + j)) { 692 ret = -EBUSY; 693 kunmap_atomic(kaddr); 694 brelse(su_bh); 695 goto out_header; 696 } 697 } 698 nc = 0; 699 for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) { 700 if (nilfs_segment_usage_error(su)) { 701 nilfs_segment_usage_set_clean(su); 702 nc++; 703 } 704 } 705 kunmap_atomic(kaddr); 706 if (nc > 0) { 707 mark_buffer_dirty(su_bh); 708 ncleaned += nc; 709 } 710 brelse(su_bh); 711 712 if (n == segusages_per_block) { 713 /* make hole */ 714 nilfs_sufile_delete_segment_usage_block(sufile, segnum); 715 } 716 } 717 ret = 0; 718 719 out_header: 720 if (ncleaned > 0) { 721 NILFS_SUI(sufile)->ncleansegs += ncleaned; 722 nilfs_sufile_mod_counter(header_bh, ncleaned, 0); 723 nilfs_mdt_mark_dirty(sufile); 724 } 725 brelse(header_bh); 726 out: 727 return ret; 728 } 729 730 /** 731 * nilfs_sufile_resize - resize segment array 732 * @sufile: inode of segment usage file 733 * @newnsegs: new number of segments 734 * 735 * Return Value: On success, 0 is returned. On error, one of the 736 * following negative error codes is returned. 737 * 738 * %-EIO - I/O error. 739 * 740 * %-ENOMEM - Insufficient amount of memory available. 741 * 742 * %-ENOSPC - Enough free space is not left for shrinking 743 * 744 * %-EBUSY - Dirty or active segments exist in the region to be truncated 745 */ 746 int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs) 747 { 748 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 749 struct buffer_head *header_bh; 750 struct nilfs_sufile_header *header; 751 struct nilfs_sufile_info *sui = NILFS_SUI(sufile); 752 void *kaddr; 753 unsigned long nsegs, nrsvsegs; 754 int ret = 0; 755 756 down_write(&NILFS_MDT(sufile)->mi_sem); 757 758 nsegs = nilfs_sufile_get_nsegments(sufile); 759 if (nsegs == newnsegs) 760 goto out; 761 762 ret = -ENOSPC; 763 nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs); 764 if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs) 765 goto out; 766 767 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 768 if (ret < 0) 769 goto out; 770 771 if (newnsegs > nsegs) { 772 sui->ncleansegs += newnsegs - nsegs; 773 } else /* newnsegs < nsegs */ { 774 ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1); 775 if (ret < 0) 776 goto out_header; 777 778 sui->ncleansegs -= nsegs - newnsegs; 779 } 780 781 kaddr = kmap_atomic(header_bh->b_page); 782 header = kaddr + bh_offset(header_bh); 783 header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); 784 kunmap_atomic(kaddr); 785 786 mark_buffer_dirty(header_bh); 787 nilfs_mdt_mark_dirty(sufile); 788 nilfs_set_nsegments(nilfs, newnsegs); 789 790 out_header: 791 brelse(header_bh); 792 out: 793 up_write(&NILFS_MDT(sufile)->mi_sem); 794 return ret; 795 } 796 797 /** 798 * nilfs_sufile_get_suinfo - 799 * @sufile: inode of segment usage file 800 * @segnum: segment number to start looking 801 * @buf: array of suinfo 802 * @sisz: byte size of suinfo 803 * @nsi: size of suinfo array 804 * 805 * Description: 806 * 807 * Return Value: On success, 0 is returned and .... On error, one of the 808 * following negative error codes is returned. 809 * 810 * %-EIO - I/O error. 811 * 812 * %-ENOMEM - Insufficient amount of memory available. 813 */ 814 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, 815 unsigned sisz, size_t nsi) 816 { 817 struct buffer_head *su_bh; 818 struct nilfs_segment_usage *su; 819 struct nilfs_suinfo *si = buf; 820 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 821 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 822 void *kaddr; 823 unsigned long nsegs, segusages_per_block; 824 ssize_t n; 825 int ret, i, j; 826 827 down_read(&NILFS_MDT(sufile)->mi_sem); 828 829 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile); 830 nsegs = min_t(unsigned long, 831 nilfs_sufile_get_nsegments(sufile) - segnum, 832 nsi); 833 for (i = 0; i < nsegs; i += n, segnum += n) { 834 n = min_t(unsigned long, 835 segusages_per_block - 836 nilfs_sufile_get_offset(sufile, segnum), 837 nsegs - i); 838 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, 839 &su_bh); 840 if (ret < 0) { 841 if (ret != -ENOENT) 842 goto out; 843 /* hole */ 844 memset(si, 0, sisz * n); 845 si = (void *)si + sisz * n; 846 continue; 847 } 848 849 kaddr = kmap_atomic(su_bh->b_page); 850 su = nilfs_sufile_block_get_segment_usage( 851 sufile, segnum, su_bh, kaddr); 852 for (j = 0; j < n; 853 j++, su = (void *)su + susz, si = (void *)si + sisz) { 854 si->sui_lastmod = le64_to_cpu(su->su_lastmod); 855 si->sui_nblocks = le32_to_cpu(su->su_nblocks); 856 si->sui_flags = le32_to_cpu(su->su_flags) & 857 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); 858 if (nilfs_segment_is_active(nilfs, segnum + j)) 859 si->sui_flags |= 860 (1UL << NILFS_SEGMENT_USAGE_ACTIVE); 861 } 862 kunmap_atomic(kaddr); 863 brelse(su_bh); 864 } 865 ret = nsegs; 866 867 out: 868 up_read(&NILFS_MDT(sufile)->mi_sem); 869 return ret; 870 } 871 872 /** 873 * nilfs_sufile_set_suinfo - sets segment usage info 874 * @sufile: inode of segment usage file 875 * @buf: array of suinfo_update 876 * @supsz: byte size of suinfo_update 877 * @nsup: size of suinfo_update array 878 * 879 * Description: Takes an array of nilfs_suinfo_update structs and updates 880 * segment usage accordingly. Only the fields indicated by the sup_flags 881 * are updated. 882 * 883 * Return Value: On success, 0 is returned. On error, one of the 884 * following negative error codes is returned. 885 * 886 * %-EIO - I/O error. 887 * 888 * %-ENOMEM - Insufficient amount of memory available. 889 * 890 * %-EINVAL - Invalid values in input (segment number, flags or nblocks) 891 */ 892 ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf, 893 unsigned supsz, size_t nsup) 894 { 895 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 896 struct buffer_head *header_bh, *bh; 897 struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup; 898 struct nilfs_segment_usage *su; 899 void *kaddr; 900 unsigned long blkoff, prev_blkoff; 901 int cleansi, cleansu, dirtysi, dirtysu; 902 long ncleaned = 0, ndirtied = 0; 903 int ret = 0; 904 905 if (unlikely(nsup == 0)) 906 return ret; 907 908 for (sup = buf; sup < supend; sup = (void *)sup + supsz) { 909 if (sup->sup_segnum >= nilfs->ns_nsegments 910 || (sup->sup_flags & 911 (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS)) 912 || (nilfs_suinfo_update_nblocks(sup) && 913 sup->sup_sui.sui_nblocks > 914 nilfs->ns_blocks_per_segment)) 915 return -EINVAL; 916 } 917 918 down_write(&NILFS_MDT(sufile)->mi_sem); 919 920 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 921 if (ret < 0) 922 goto out_sem; 923 924 sup = buf; 925 blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum); 926 ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh); 927 if (ret < 0) 928 goto out_header; 929 930 for (;;) { 931 kaddr = kmap_atomic(bh->b_page); 932 su = nilfs_sufile_block_get_segment_usage( 933 sufile, sup->sup_segnum, bh, kaddr); 934 935 if (nilfs_suinfo_update_lastmod(sup)) 936 su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod); 937 938 if (nilfs_suinfo_update_nblocks(sup)) 939 su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks); 940 941 if (nilfs_suinfo_update_flags(sup)) { 942 /* 943 * Active flag is a virtual flag projected by running 944 * nilfs kernel code - drop it not to write it to 945 * disk. 946 */ 947 sup->sup_sui.sui_flags &= 948 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); 949 950 cleansi = nilfs_suinfo_clean(&sup->sup_sui); 951 cleansu = nilfs_segment_usage_clean(su); 952 dirtysi = nilfs_suinfo_dirty(&sup->sup_sui); 953 dirtysu = nilfs_segment_usage_dirty(su); 954 955 if (cleansi && !cleansu) 956 ++ncleaned; 957 else if (!cleansi && cleansu) 958 --ncleaned; 959 960 if (dirtysi && !dirtysu) 961 ++ndirtied; 962 else if (!dirtysi && dirtysu) 963 --ndirtied; 964 965 su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags); 966 } 967 968 kunmap_atomic(kaddr); 969 970 sup = (void *)sup + supsz; 971 if (sup >= supend) 972 break; 973 974 prev_blkoff = blkoff; 975 blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum); 976 if (blkoff == prev_blkoff) 977 continue; 978 979 /* get different block */ 980 mark_buffer_dirty(bh); 981 put_bh(bh); 982 ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh); 983 if (unlikely(ret < 0)) 984 goto out_mark; 985 } 986 mark_buffer_dirty(bh); 987 put_bh(bh); 988 989 out_mark: 990 if (ncleaned || ndirtied) { 991 nilfs_sufile_mod_counter(header_bh, (u64)ncleaned, 992 (u64)ndirtied); 993 NILFS_SUI(sufile)->ncleansegs += ncleaned; 994 } 995 nilfs_mdt_mark_dirty(sufile); 996 out_header: 997 put_bh(header_bh); 998 out_sem: 999 up_write(&NILFS_MDT(sufile)->mi_sem); 1000 return ret; 1001 } 1002 1003 /** 1004 * nilfs_sufile_trim_fs() - trim ioctl handle function 1005 * @sufile: inode of segment usage file 1006 * @range: fstrim_range structure 1007 * 1008 * start: First Byte to trim 1009 * len: number of Bytes to trim from start 1010 * minlen: minimum extent length in Bytes 1011 * 1012 * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes 1013 * from start to start+len. start is rounded up to the next block boundary 1014 * and start+len is rounded down. For each clean segment blkdev_issue_discard 1015 * function is invoked. 1016 * 1017 * Return Value: On success, 0 is returned or negative error code, otherwise. 1018 */ 1019 int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range) 1020 { 1021 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 1022 struct buffer_head *su_bh; 1023 struct nilfs_segment_usage *su; 1024 void *kaddr; 1025 size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size; 1026 sector_t seg_start, seg_end, start_block, end_block; 1027 sector_t start = 0, nblocks = 0; 1028 u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0; 1029 int ret = 0; 1030 unsigned int sects_per_block; 1031 1032 sects_per_block = (1 << nilfs->ns_blocksize_bits) / 1033 bdev_logical_block_size(nilfs->ns_bdev); 1034 len = range->len >> nilfs->ns_blocksize_bits; 1035 minlen = range->minlen >> nilfs->ns_blocksize_bits; 1036 max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment); 1037 1038 if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits) 1039 return -EINVAL; 1040 1041 start_block = (range->start + nilfs->ns_blocksize - 1) >> 1042 nilfs->ns_blocksize_bits; 1043 1044 /* 1045 * range->len can be very large (actually, it is set to 1046 * ULLONG_MAX by default) - truncate upper end of the range 1047 * carefully so as not to overflow. 1048 */ 1049 if (max_blocks - start_block < len) 1050 end_block = max_blocks - 1; 1051 else 1052 end_block = start_block + len - 1; 1053 1054 segnum = nilfs_get_segnum_of_block(nilfs, start_block); 1055 segnum_end = nilfs_get_segnum_of_block(nilfs, end_block); 1056 1057 down_read(&NILFS_MDT(sufile)->mi_sem); 1058 1059 while (segnum <= segnum_end) { 1060 n = nilfs_sufile_segment_usages_in_block(sufile, segnum, 1061 segnum_end); 1062 1063 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, 1064 &su_bh); 1065 if (ret < 0) { 1066 if (ret != -ENOENT) 1067 goto out_sem; 1068 /* hole */ 1069 segnum += n; 1070 continue; 1071 } 1072 1073 kaddr = kmap_atomic(su_bh->b_page); 1074 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, 1075 su_bh, kaddr); 1076 for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) { 1077 if (!nilfs_segment_usage_clean(su)) 1078 continue; 1079 1080 nilfs_get_segment_range(nilfs, segnum, &seg_start, 1081 &seg_end); 1082 1083 if (!nblocks) { 1084 /* start new extent */ 1085 start = seg_start; 1086 nblocks = seg_end - seg_start + 1; 1087 continue; 1088 } 1089 1090 if (start + nblocks == seg_start) { 1091 /* add to previous extent */ 1092 nblocks += seg_end - seg_start + 1; 1093 continue; 1094 } 1095 1096 /* discard previous extent */ 1097 if (start < start_block) { 1098 nblocks -= start_block - start; 1099 start = start_block; 1100 } 1101 1102 if (nblocks >= minlen) { 1103 kunmap_atomic(kaddr); 1104 1105 ret = blkdev_issue_discard(nilfs->ns_bdev, 1106 start * sects_per_block, 1107 nblocks * sects_per_block, 1108 GFP_NOFS, 0); 1109 if (ret < 0) { 1110 put_bh(su_bh); 1111 goto out_sem; 1112 } 1113 1114 ndiscarded += nblocks; 1115 kaddr = kmap_atomic(su_bh->b_page); 1116 su = nilfs_sufile_block_get_segment_usage( 1117 sufile, segnum, su_bh, kaddr); 1118 } 1119 1120 /* start new extent */ 1121 start = seg_start; 1122 nblocks = seg_end - seg_start + 1; 1123 } 1124 kunmap_atomic(kaddr); 1125 put_bh(su_bh); 1126 } 1127 1128 1129 if (nblocks) { 1130 /* discard last extent */ 1131 if (start < start_block) { 1132 nblocks -= start_block - start; 1133 start = start_block; 1134 } 1135 if (start + nblocks > end_block + 1) 1136 nblocks = end_block - start + 1; 1137 1138 if (nblocks >= minlen) { 1139 ret = blkdev_issue_discard(nilfs->ns_bdev, 1140 start * sects_per_block, 1141 nblocks * sects_per_block, 1142 GFP_NOFS, 0); 1143 if (!ret) 1144 ndiscarded += nblocks; 1145 } 1146 } 1147 1148 out_sem: 1149 up_read(&NILFS_MDT(sufile)->mi_sem); 1150 1151 range->len = ndiscarded << nilfs->ns_blocksize_bits; 1152 return ret; 1153 } 1154 1155 /** 1156 * nilfs_sufile_read - read or get sufile inode 1157 * @sb: super block instance 1158 * @susize: size of a segment usage entry 1159 * @raw_inode: on-disk sufile inode 1160 * @inodep: buffer to store the inode 1161 */ 1162 int nilfs_sufile_read(struct super_block *sb, size_t susize, 1163 struct nilfs_inode *raw_inode, struct inode **inodep) 1164 { 1165 struct inode *sufile; 1166 struct nilfs_sufile_info *sui; 1167 struct buffer_head *header_bh; 1168 struct nilfs_sufile_header *header; 1169 void *kaddr; 1170 int err; 1171 1172 if (susize > sb->s_blocksize) { 1173 printk(KERN_ERR 1174 "NILFS: too large segment usage size: %zu bytes.\n", 1175 susize); 1176 return -EINVAL; 1177 } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) { 1178 printk(KERN_ERR 1179 "NILFS: too small segment usage size: %zu bytes.\n", 1180 susize); 1181 return -EINVAL; 1182 } 1183 1184 sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO); 1185 if (unlikely(!sufile)) 1186 return -ENOMEM; 1187 if (!(sufile->i_state & I_NEW)) 1188 goto out; 1189 1190 err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui)); 1191 if (err) 1192 goto failed; 1193 1194 nilfs_mdt_set_entry_size(sufile, susize, 1195 sizeof(struct nilfs_sufile_header)); 1196 1197 err = nilfs_read_inode_common(sufile, raw_inode); 1198 if (err) 1199 goto failed; 1200 1201 err = nilfs_sufile_get_header_block(sufile, &header_bh); 1202 if (err) 1203 goto failed; 1204 1205 sui = NILFS_SUI(sufile); 1206 kaddr = kmap_atomic(header_bh->b_page); 1207 header = kaddr + bh_offset(header_bh); 1208 sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs); 1209 kunmap_atomic(kaddr); 1210 brelse(header_bh); 1211 1212 sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1; 1213 sui->allocmin = 0; 1214 1215 unlock_new_inode(sufile); 1216 out: 1217 *inodep = sufile; 1218 return 0; 1219 failed: 1220 iget_failed(sufile); 1221 return err; 1222 } 1223