1 /* 2 * cpfile.c - NILFS checkpoint file. 3 * 4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 * Written by Koji Sato <koji@osrg.net>. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/fs.h> 25 #include <linux/string.h> 26 #include <linux/buffer_head.h> 27 #include <linux/errno.h> 28 #include <linux/nilfs2_fs.h> 29 #include "mdt.h" 30 #include "cpfile.h" 31 32 33 static inline unsigned long 34 nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile) 35 { 36 return NILFS_MDT(cpfile)->mi_entries_per_block; 37 } 38 39 /* block number from the beginning of the file */ 40 static unsigned long 41 nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno) 42 { 43 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1; 44 do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); 45 return (unsigned long)tcno; 46 } 47 48 /* offset in block */ 49 static unsigned long 50 nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno) 51 { 52 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1; 53 return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); 54 } 55 56 static unsigned long 57 nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile, 58 __u64 curr, 59 __u64 max) 60 { 61 return min_t(__u64, 62 nilfs_cpfile_checkpoints_per_block(cpfile) - 63 nilfs_cpfile_get_offset(cpfile, curr), 64 max - curr); 65 } 66 67 static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile, 68 __u64 cno) 69 { 70 return nilfs_cpfile_get_blkoff(cpfile, cno) == 0; 71 } 72 73 static unsigned int 74 nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile, 75 struct buffer_head *bh, 76 void *kaddr, 77 unsigned int n) 78 { 79 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); 80 unsigned int count; 81 82 count = le32_to_cpu(cp->cp_checkpoints_count) + n; 83 cp->cp_checkpoints_count = cpu_to_le32(count); 84 return count; 85 } 86 87 static unsigned int 88 nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile, 89 struct buffer_head *bh, 90 void *kaddr, 91 unsigned int n) 92 { 93 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); 94 unsigned int count; 95 96 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n); 97 count = le32_to_cpu(cp->cp_checkpoints_count) - n; 98 cp->cp_checkpoints_count = cpu_to_le32(count); 99 return count; 100 } 101 102 static inline struct nilfs_cpfile_header * 103 nilfs_cpfile_block_get_header(const struct inode *cpfile, 104 struct buffer_head *bh, 105 void *kaddr) 106 { 107 return kaddr + bh_offset(bh); 108 } 109 110 static struct nilfs_checkpoint * 111 nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno, 112 struct buffer_head *bh, 113 void *kaddr) 114 { 115 return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) * 116 NILFS_MDT(cpfile)->mi_entry_size; 117 } 118 119 static void nilfs_cpfile_block_init(struct inode *cpfile, 120 struct buffer_head *bh, 121 void *kaddr) 122 { 123 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); 124 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; 125 int n = nilfs_cpfile_checkpoints_per_block(cpfile); 126 127 while (n-- > 0) { 128 nilfs_checkpoint_set_invalid(cp); 129 cp = (void *)cp + cpsz; 130 } 131 } 132 133 static inline int nilfs_cpfile_get_header_block(struct inode *cpfile, 134 struct buffer_head **bhp) 135 { 136 return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp); 137 } 138 139 static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile, 140 __u64 cno, 141 int create, 142 struct buffer_head **bhp) 143 { 144 return nilfs_mdt_get_block(cpfile, 145 nilfs_cpfile_get_blkoff(cpfile, cno), 146 create, nilfs_cpfile_block_init, bhp); 147 } 148 149 static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile, 150 __u64 cno) 151 { 152 return nilfs_mdt_delete_block(cpfile, 153 nilfs_cpfile_get_blkoff(cpfile, cno)); 154 } 155 156 /** 157 * nilfs_cpfile_get_checkpoint - get a checkpoint 158 * @cpfile: inode of checkpoint file 159 * @cno: checkpoint number 160 * @create: create flag 161 * @cpp: pointer to a checkpoint 162 * @bhp: pointer to a buffer head 163 * 164 * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint 165 * specified by @cno. A new checkpoint will be created if @cno is the current 166 * checkpoint number and @create is nonzero. 167 * 168 * Return Value: On success, 0 is returned, and the checkpoint and the 169 * buffer head of the buffer on which the checkpoint is located are stored in 170 * the place pointed by @cpp and @bhp, respectively. On error, one of the 171 * following negative error codes is returned. 172 * 173 * %-EIO - I/O error. 174 * 175 * %-ENOMEM - Insufficient amount of memory available. 176 * 177 * %-ENOENT - No such checkpoint. 178 * 179 * %-EINVAL - invalid checkpoint. 180 */ 181 int nilfs_cpfile_get_checkpoint(struct inode *cpfile, 182 __u64 cno, 183 int create, 184 struct nilfs_checkpoint **cpp, 185 struct buffer_head **bhp) 186 { 187 struct buffer_head *header_bh, *cp_bh; 188 struct nilfs_cpfile_header *header; 189 struct nilfs_checkpoint *cp; 190 void *kaddr; 191 int ret; 192 193 if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) || 194 (cno < nilfs_mdt_cno(cpfile) && create))) 195 return -EINVAL; 196 197 down_write(&NILFS_MDT(cpfile)->mi_sem); 198 199 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); 200 if (ret < 0) 201 goto out_sem; 202 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh); 203 if (ret < 0) 204 goto out_header; 205 kaddr = kmap(cp_bh->b_page); 206 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 207 if (nilfs_checkpoint_invalid(cp)) { 208 if (!create) { 209 kunmap(cp_bh->b_page); 210 brelse(cp_bh); 211 ret = -ENOENT; 212 goto out_header; 213 } 214 /* a newly-created checkpoint */ 215 nilfs_checkpoint_clear_invalid(cp); 216 if (!nilfs_cpfile_is_in_first(cpfile, cno)) 217 nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh, 218 kaddr, 1); 219 nilfs_mdt_mark_buffer_dirty(cp_bh); 220 221 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 222 header = nilfs_cpfile_block_get_header(cpfile, header_bh, 223 kaddr); 224 le64_add_cpu(&header->ch_ncheckpoints, 1); 225 kunmap_atomic(kaddr, KM_USER0); 226 nilfs_mdt_mark_buffer_dirty(header_bh); 227 nilfs_mdt_mark_dirty(cpfile); 228 } 229 230 if (cpp != NULL) 231 *cpp = cp; 232 *bhp = cp_bh; 233 234 out_header: 235 brelse(header_bh); 236 237 out_sem: 238 up_write(&NILFS_MDT(cpfile)->mi_sem); 239 return ret; 240 } 241 242 /** 243 * nilfs_cpfile_put_checkpoint - put a checkpoint 244 * @cpfile: inode of checkpoint file 245 * @cno: checkpoint number 246 * @bh: buffer head 247 * 248 * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint 249 * specified by @cno. @bh must be the buffer head which has been returned by 250 * a previous call to nilfs_cpfile_get_checkpoint() with @cno. 251 */ 252 void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno, 253 struct buffer_head *bh) 254 { 255 kunmap(bh->b_page); 256 brelse(bh); 257 } 258 259 /** 260 * nilfs_cpfile_delete_checkpoints - delete checkpoints 261 * @cpfile: inode of checkpoint file 262 * @start: start checkpoint number 263 * @end: end checkpoint numer 264 * 265 * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in 266 * the period from @start to @end, excluding @end itself. The checkpoints 267 * which have been already deleted are ignored. 268 * 269 * Return Value: On success, 0 is returned. On error, one of the following 270 * negative error codes is returned. 271 * 272 * %-EIO - I/O error. 273 * 274 * %-ENOMEM - Insufficient amount of memory available. 275 * 276 * %-EINVAL - invalid checkpoints. 277 */ 278 int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, 279 __u64 start, 280 __u64 end) 281 { 282 struct buffer_head *header_bh, *cp_bh; 283 struct nilfs_cpfile_header *header; 284 struct nilfs_checkpoint *cp; 285 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; 286 __u64 cno; 287 void *kaddr; 288 unsigned long tnicps; 289 int ret, ncps, nicps, count, i; 290 291 if (unlikely(start == 0 || start > end)) { 292 printk(KERN_ERR "%s: invalid range of checkpoint numbers: " 293 "[%llu, %llu)\n", __func__, 294 (unsigned long long)start, (unsigned long long)end); 295 return -EINVAL; 296 } 297 298 down_write(&NILFS_MDT(cpfile)->mi_sem); 299 300 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); 301 if (ret < 0) 302 goto out_sem; 303 tnicps = 0; 304 305 for (cno = start; cno < end; cno += ncps) { 306 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end); 307 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); 308 if (ret < 0) { 309 if (ret != -ENOENT) 310 goto out_header; 311 /* skip hole */ 312 ret = 0; 313 continue; 314 } 315 316 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 317 cp = nilfs_cpfile_block_get_checkpoint( 318 cpfile, cno, cp_bh, kaddr); 319 nicps = 0; 320 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) { 321 WARN_ON(nilfs_checkpoint_snapshot(cp)); 322 if (!nilfs_checkpoint_invalid(cp)) { 323 nilfs_checkpoint_set_invalid(cp); 324 nicps++; 325 } 326 } 327 if (nicps > 0) { 328 tnicps += nicps; 329 nilfs_mdt_mark_buffer_dirty(cp_bh); 330 nilfs_mdt_mark_dirty(cpfile); 331 if (!nilfs_cpfile_is_in_first(cpfile, cno) && 332 (count = nilfs_cpfile_block_sub_valid_checkpoints( 333 cpfile, cp_bh, kaddr, nicps)) == 0) { 334 /* make hole */ 335 kunmap_atomic(kaddr, KM_USER0); 336 brelse(cp_bh); 337 ret = nilfs_cpfile_delete_checkpoint_block( 338 cpfile, cno); 339 if (ret == 0) 340 continue; 341 printk(KERN_ERR "%s: cannot delete block\n", 342 __func__); 343 goto out_header; 344 } 345 } 346 347 kunmap_atomic(kaddr, KM_USER0); 348 brelse(cp_bh); 349 } 350 351 if (tnicps > 0) { 352 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 353 header = nilfs_cpfile_block_get_header(cpfile, header_bh, 354 kaddr); 355 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps); 356 nilfs_mdt_mark_buffer_dirty(header_bh); 357 nilfs_mdt_mark_dirty(cpfile); 358 kunmap_atomic(kaddr, KM_USER0); 359 } 360 361 out_header: 362 brelse(header_bh); 363 364 out_sem: 365 up_write(&NILFS_MDT(cpfile)->mi_sem); 366 return ret; 367 } 368 369 static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile, 370 struct nilfs_checkpoint *cp, 371 struct nilfs_cpinfo *ci) 372 { 373 ci->ci_flags = le32_to_cpu(cp->cp_flags); 374 ci->ci_cno = le64_to_cpu(cp->cp_cno); 375 ci->ci_create = le64_to_cpu(cp->cp_create); 376 ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc); 377 ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count); 378 ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count); 379 ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); 380 } 381 382 static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, 383 void *buf, unsigned cisz, size_t nci) 384 { 385 struct nilfs_checkpoint *cp; 386 struct nilfs_cpinfo *ci = buf; 387 struct buffer_head *bh; 388 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; 389 __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop; 390 void *kaddr; 391 int n, ret; 392 int ncps, i; 393 394 if (cno == 0) 395 return -ENOENT; /* checkpoint number 0 is invalid */ 396 down_read(&NILFS_MDT(cpfile)->mi_sem); 397 398 for (n = 0; cno < cur_cno && n < nci; cno += ncps) { 399 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno); 400 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); 401 if (ret < 0) { 402 if (ret != -ENOENT) 403 goto out; 404 continue; /* skip hole */ 405 } 406 407 kaddr = kmap_atomic(bh->b_page, KM_USER0); 408 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); 409 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) { 410 if (!nilfs_checkpoint_invalid(cp)) { 411 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, 412 ci); 413 ci = (void *)ci + cisz; 414 n++; 415 } 416 } 417 kunmap_atomic(kaddr, KM_USER0); 418 brelse(bh); 419 } 420 421 ret = n; 422 if (n > 0) { 423 ci = (void *)ci - cisz; 424 *cnop = ci->ci_cno + 1; 425 } 426 427 out: 428 up_read(&NILFS_MDT(cpfile)->mi_sem); 429 return ret; 430 } 431 432 static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, 433 void *buf, unsigned cisz, size_t nci) 434 { 435 struct buffer_head *bh; 436 struct nilfs_cpfile_header *header; 437 struct nilfs_checkpoint *cp; 438 struct nilfs_cpinfo *ci = buf; 439 __u64 curr = *cnop, next; 440 unsigned long curr_blkoff, next_blkoff; 441 void *kaddr; 442 int n = 0, ret; 443 444 down_read(&NILFS_MDT(cpfile)->mi_sem); 445 446 if (curr == 0) { 447 ret = nilfs_cpfile_get_header_block(cpfile, &bh); 448 if (ret < 0) 449 goto out; 450 kaddr = kmap_atomic(bh->b_page, KM_USER0); 451 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); 452 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next); 453 kunmap_atomic(kaddr, KM_USER0); 454 brelse(bh); 455 if (curr == 0) { 456 ret = 0; 457 goto out; 458 } 459 } else if (unlikely(curr == ~(__u64)0)) { 460 ret = 0; 461 goto out; 462 } 463 464 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr); 465 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh); 466 if (unlikely(ret < 0)) { 467 if (ret == -ENOENT) 468 ret = 0; /* No snapshots (started from a hole block) */ 469 goto out; 470 } 471 kaddr = kmap_atomic(bh->b_page, KM_USER0); 472 while (n < nci) { 473 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr); 474 curr = ~(__u64)0; /* Terminator */ 475 if (unlikely(nilfs_checkpoint_invalid(cp) || 476 !nilfs_checkpoint_snapshot(cp))) 477 break; 478 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci); 479 ci = (void *)ci + cisz; 480 n++; 481 next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); 482 if (next == 0) 483 break; /* reach end of the snapshot list */ 484 485 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next); 486 if (curr_blkoff != next_blkoff) { 487 kunmap_atomic(kaddr, KM_USER0); 488 brelse(bh); 489 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 490 0, &bh); 491 if (unlikely(ret < 0)) { 492 WARN_ON(ret == -ENOENT); 493 goto out; 494 } 495 kaddr = kmap_atomic(bh->b_page, KM_USER0); 496 } 497 curr = next; 498 curr_blkoff = next_blkoff; 499 } 500 kunmap_atomic(kaddr, KM_USER0); 501 brelse(bh); 502 *cnop = curr; 503 ret = n; 504 505 out: 506 up_read(&NILFS_MDT(cpfile)->mi_sem); 507 return ret; 508 } 509 510 /** 511 * nilfs_cpfile_get_cpinfo - 512 * @cpfile: 513 * @cno: 514 * @ci: 515 * @nci: 516 */ 517 518 ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode, 519 void *buf, unsigned cisz, size_t nci) 520 { 521 switch (mode) { 522 case NILFS_CHECKPOINT: 523 return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci); 524 case NILFS_SNAPSHOT: 525 return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci); 526 default: 527 return -EINVAL; 528 } 529 } 530 531 /** 532 * nilfs_cpfile_delete_checkpoint - 533 * @cpfile: 534 * @cno: 535 */ 536 int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno) 537 { 538 struct nilfs_cpinfo ci; 539 __u64 tcno = cno; 540 ssize_t nci; 541 542 nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1); 543 if (nci < 0) 544 return nci; 545 else if (nci == 0 || ci.ci_cno != cno) 546 return -ENOENT; 547 else if (nilfs_cpinfo_snapshot(&ci)) 548 return -EBUSY; 549 550 return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1); 551 } 552 553 static struct nilfs_snapshot_list * 554 nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile, 555 __u64 cno, 556 struct buffer_head *bh, 557 void *kaddr) 558 { 559 struct nilfs_cpfile_header *header; 560 struct nilfs_checkpoint *cp; 561 struct nilfs_snapshot_list *list; 562 563 if (cno != 0) { 564 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); 565 list = &cp->cp_snapshot_list; 566 } else { 567 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); 568 list = &header->ch_snapshot_list; 569 } 570 return list; 571 } 572 573 static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) 574 { 575 struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh; 576 struct nilfs_cpfile_header *header; 577 struct nilfs_checkpoint *cp; 578 struct nilfs_snapshot_list *list; 579 __u64 curr, prev; 580 unsigned long curr_blkoff, prev_blkoff; 581 void *kaddr; 582 int ret; 583 584 if (cno == 0) 585 return -ENOENT; /* checkpoint number 0 is invalid */ 586 down_write(&NILFS_MDT(cpfile)->mi_sem); 587 588 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); 589 if (ret < 0) 590 goto out_sem; 591 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 592 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 593 if (nilfs_checkpoint_invalid(cp)) { 594 ret = -ENOENT; 595 kunmap_atomic(kaddr, KM_USER0); 596 goto out_cp; 597 } 598 if (nilfs_checkpoint_snapshot(cp)) { 599 ret = 0; 600 kunmap_atomic(kaddr, KM_USER0); 601 goto out_cp; 602 } 603 kunmap_atomic(kaddr, KM_USER0); 604 605 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); 606 if (ret < 0) 607 goto out_cp; 608 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 609 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); 610 list = &header->ch_snapshot_list; 611 curr_bh = header_bh; 612 get_bh(curr_bh); 613 curr = 0; 614 curr_blkoff = 0; 615 prev = le64_to_cpu(list->ssl_prev); 616 while (prev > cno) { 617 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev); 618 curr = prev; 619 if (curr_blkoff != prev_blkoff) { 620 kunmap_atomic(kaddr, KM_USER0); 621 brelse(curr_bh); 622 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 623 0, &curr_bh); 624 if (ret < 0) 625 goto out_header; 626 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0); 627 } 628 curr_blkoff = prev_blkoff; 629 cp = nilfs_cpfile_block_get_checkpoint( 630 cpfile, curr, curr_bh, kaddr); 631 list = &cp->cp_snapshot_list; 632 prev = le64_to_cpu(list->ssl_prev); 633 } 634 kunmap_atomic(kaddr, KM_USER0); 635 636 if (prev != 0) { 637 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, 638 &prev_bh); 639 if (ret < 0) 640 goto out_curr; 641 } else { 642 prev_bh = header_bh; 643 get_bh(prev_bh); 644 } 645 646 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0); 647 list = nilfs_cpfile_block_get_snapshot_list( 648 cpfile, curr, curr_bh, kaddr); 649 list->ssl_prev = cpu_to_le64(cno); 650 kunmap_atomic(kaddr, KM_USER0); 651 652 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 653 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 654 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr); 655 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev); 656 nilfs_checkpoint_set_snapshot(cp); 657 kunmap_atomic(kaddr, KM_USER0); 658 659 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0); 660 list = nilfs_cpfile_block_get_snapshot_list( 661 cpfile, prev, prev_bh, kaddr); 662 list->ssl_next = cpu_to_le64(cno); 663 kunmap_atomic(kaddr, KM_USER0); 664 665 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 666 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); 667 le64_add_cpu(&header->ch_nsnapshots, 1); 668 kunmap_atomic(kaddr, KM_USER0); 669 670 nilfs_mdt_mark_buffer_dirty(prev_bh); 671 nilfs_mdt_mark_buffer_dirty(curr_bh); 672 nilfs_mdt_mark_buffer_dirty(cp_bh); 673 nilfs_mdt_mark_buffer_dirty(header_bh); 674 nilfs_mdt_mark_dirty(cpfile); 675 676 brelse(prev_bh); 677 678 out_curr: 679 brelse(curr_bh); 680 681 out_header: 682 brelse(header_bh); 683 684 out_cp: 685 brelse(cp_bh); 686 687 out_sem: 688 up_write(&NILFS_MDT(cpfile)->mi_sem); 689 return ret; 690 } 691 692 static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno) 693 { 694 struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh; 695 struct nilfs_cpfile_header *header; 696 struct nilfs_checkpoint *cp; 697 struct nilfs_snapshot_list *list; 698 __u64 next, prev; 699 void *kaddr; 700 int ret; 701 702 if (cno == 0) 703 return -ENOENT; /* checkpoint number 0 is invalid */ 704 down_write(&NILFS_MDT(cpfile)->mi_sem); 705 706 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); 707 if (ret < 0) 708 goto out_sem; 709 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 710 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 711 if (nilfs_checkpoint_invalid(cp)) { 712 ret = -ENOENT; 713 kunmap_atomic(kaddr, KM_USER0); 714 goto out_cp; 715 } 716 if (!nilfs_checkpoint_snapshot(cp)) { 717 ret = 0; 718 kunmap_atomic(kaddr, KM_USER0); 719 goto out_cp; 720 } 721 722 list = &cp->cp_snapshot_list; 723 next = le64_to_cpu(list->ssl_next); 724 prev = le64_to_cpu(list->ssl_prev); 725 kunmap_atomic(kaddr, KM_USER0); 726 727 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); 728 if (ret < 0) 729 goto out_cp; 730 if (next != 0) { 731 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0, 732 &next_bh); 733 if (ret < 0) 734 goto out_header; 735 } else { 736 next_bh = header_bh; 737 get_bh(next_bh); 738 } 739 if (prev != 0) { 740 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, 741 &prev_bh); 742 if (ret < 0) 743 goto out_next; 744 } else { 745 prev_bh = header_bh; 746 get_bh(prev_bh); 747 } 748 749 kaddr = kmap_atomic(next_bh->b_page, KM_USER0); 750 list = nilfs_cpfile_block_get_snapshot_list( 751 cpfile, next, next_bh, kaddr); 752 list->ssl_prev = cpu_to_le64(prev); 753 kunmap_atomic(kaddr, KM_USER0); 754 755 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0); 756 list = nilfs_cpfile_block_get_snapshot_list( 757 cpfile, prev, prev_bh, kaddr); 758 list->ssl_next = cpu_to_le64(next); 759 kunmap_atomic(kaddr, KM_USER0); 760 761 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 762 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 763 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0); 764 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0); 765 nilfs_checkpoint_clear_snapshot(cp); 766 kunmap_atomic(kaddr, KM_USER0); 767 768 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 769 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); 770 le64_add_cpu(&header->ch_nsnapshots, -1); 771 kunmap_atomic(kaddr, KM_USER0); 772 773 nilfs_mdt_mark_buffer_dirty(next_bh); 774 nilfs_mdt_mark_buffer_dirty(prev_bh); 775 nilfs_mdt_mark_buffer_dirty(cp_bh); 776 nilfs_mdt_mark_buffer_dirty(header_bh); 777 nilfs_mdt_mark_dirty(cpfile); 778 779 brelse(prev_bh); 780 781 out_next: 782 brelse(next_bh); 783 784 out_header: 785 brelse(header_bh); 786 787 out_cp: 788 brelse(cp_bh); 789 790 out_sem: 791 up_write(&NILFS_MDT(cpfile)->mi_sem); 792 return ret; 793 } 794 795 /** 796 * nilfs_cpfile_is_snapshot - 797 * @cpfile: inode of checkpoint file 798 * @cno: checkpoint number 799 * 800 * Description: 801 * 802 * Return Value: On success, 1 is returned if the checkpoint specified by 803 * @cno is a snapshot, or 0 if not. On error, one of the following negative 804 * error codes is returned. 805 * 806 * %-EIO - I/O error. 807 * 808 * %-ENOMEM - Insufficient amount of memory available. 809 * 810 * %-ENOENT - No such checkpoint. 811 */ 812 int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno) 813 { 814 struct buffer_head *bh; 815 struct nilfs_checkpoint *cp; 816 void *kaddr; 817 int ret; 818 819 if (cno == 0) 820 return -ENOENT; /* checkpoint number 0 is invalid */ 821 down_read(&NILFS_MDT(cpfile)->mi_sem); 822 823 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); 824 if (ret < 0) 825 goto out; 826 kaddr = kmap_atomic(bh->b_page, KM_USER0); 827 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); 828 ret = nilfs_checkpoint_snapshot(cp); 829 kunmap_atomic(kaddr, KM_USER0); 830 brelse(bh); 831 832 out: 833 up_read(&NILFS_MDT(cpfile)->mi_sem); 834 return ret; 835 } 836 837 /** 838 * nilfs_cpfile_change_cpmode - change checkpoint mode 839 * @cpfile: inode of checkpoint file 840 * @cno: checkpoint number 841 * @status: mode of checkpoint 842 * 843 * Description: nilfs_change_cpmode() changes the mode of the checkpoint 844 * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT. 845 * 846 * Return Value: On success, 0 is returned. On error, one of the following 847 * negative error codes is returned. 848 * 849 * %-EIO - I/O error. 850 * 851 * %-ENOMEM - Insufficient amount of memory available. 852 * 853 * %-ENOENT - No such checkpoint. 854 */ 855 int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode) 856 { 857 struct the_nilfs *nilfs; 858 int ret; 859 860 nilfs = NILFS_MDT(cpfile)->mi_nilfs; 861 862 switch (mode) { 863 case NILFS_CHECKPOINT: 864 /* 865 * Check for protecting existing snapshot mounts: 866 * ns_mount_mutex is used to make this operation atomic and 867 * exclusive with a new mount job. Though it doesn't cover 868 * umount, it's enough for the purpose. 869 */ 870 mutex_lock(&nilfs->ns_mount_mutex); 871 if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) { 872 /* Current implementation does not have to protect 873 plain read-only mounts since they are exclusive 874 with a read/write mount and are protected from the 875 cleaner. */ 876 ret = -EBUSY; 877 } else 878 ret = nilfs_cpfile_clear_snapshot(cpfile, cno); 879 mutex_unlock(&nilfs->ns_mount_mutex); 880 return ret; 881 case NILFS_SNAPSHOT: 882 return nilfs_cpfile_set_snapshot(cpfile, cno); 883 default: 884 return -EINVAL; 885 } 886 } 887 888 /** 889 * nilfs_cpfile_get_stat - get checkpoint statistics 890 * @cpfile: inode of checkpoint file 891 * @stat: pointer to a structure of checkpoint statistics 892 * 893 * Description: nilfs_cpfile_get_stat() returns information about checkpoints. 894 * 895 * Return Value: On success, 0 is returned, and checkpoints information is 896 * stored in the place pointed by @stat. On error, one of the following 897 * negative error codes is returned. 898 * 899 * %-EIO - I/O error. 900 * 901 * %-ENOMEM - Insufficient amount of memory available. 902 */ 903 int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat) 904 { 905 struct buffer_head *bh; 906 struct nilfs_cpfile_header *header; 907 void *kaddr; 908 int ret; 909 910 down_read(&NILFS_MDT(cpfile)->mi_sem); 911 912 ret = nilfs_cpfile_get_header_block(cpfile, &bh); 913 if (ret < 0) 914 goto out_sem; 915 kaddr = kmap_atomic(bh->b_page, KM_USER0); 916 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); 917 cpstat->cs_cno = nilfs_mdt_cno(cpfile); 918 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints); 919 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots); 920 kunmap_atomic(kaddr, KM_USER0); 921 brelse(bh); 922 923 out_sem: 924 up_read(&NILFS_MDT(cpfile)->mi_sem); 925 return ret; 926 } 927