1 /* 2 * fs/libfs.c 3 * Library for filesystems writers. 4 */ 5 6 #include <linux/module.h> 7 #include <linux/pagemap.h> 8 #include <linux/slab.h> 9 #include <linux/mount.h> 10 #include <linux/vfs.h> 11 #include <linux/mutex.h> 12 #include <linux/exportfs.h> 13 #include <linux/writeback.h> 14 #include <linux/buffer_head.h> 15 16 #include <asm/uaccess.h> 17 18 int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, 19 struct kstat *stat) 20 { 21 struct inode *inode = dentry->d_inode; 22 generic_fillattr(inode, stat); 23 stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9); 24 return 0; 25 } 26 27 int simple_statfs(struct dentry *dentry, struct kstatfs *buf) 28 { 29 buf->f_type = dentry->d_sb->s_magic; 30 buf->f_bsize = PAGE_CACHE_SIZE; 31 buf->f_namelen = NAME_MAX; 32 return 0; 33 } 34 35 /* 36 * Retaining negative dentries for an in-memory filesystem just wastes 37 * memory and lookup time: arrange for them to be deleted immediately. 38 */ 39 static int simple_delete_dentry(struct dentry *dentry) 40 { 41 return 1; 42 } 43 44 /* 45 * Lookup the data. This is trivial - if the dentry didn't already 46 * exist, we know it is negative. Set d_op to delete negative dentries. 47 */ 48 struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) 49 { 50 static const struct dentry_operations simple_dentry_operations = { 51 .d_delete = simple_delete_dentry, 52 }; 53 54 if (dentry->d_name.len > NAME_MAX) 55 return ERR_PTR(-ENAMETOOLONG); 56 dentry->d_op = &simple_dentry_operations; 57 d_add(dentry, NULL); 58 return NULL; 59 } 60 61 int simple_sync_file(struct file * file, struct dentry *dentry, int datasync) 62 { 63 return 0; 64 } 65 66 int dcache_dir_open(struct inode *inode, struct file *file) 67 { 68 static struct qstr cursor_name = {.len = 1, .name = "."}; 69 70 file->private_data = d_alloc(file->f_path.dentry, &cursor_name); 71 72 return file->private_data ? 0 : -ENOMEM; 73 } 74 75 int dcache_dir_close(struct inode *inode, struct file *file) 76 { 77 dput(file->private_data); 78 return 0; 79 } 80 81 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin) 82 { 83 mutex_lock(&file->f_path.dentry->d_inode->i_mutex); 84 switch (origin) { 85 case 1: 86 offset += file->f_pos; 87 case 0: 88 if (offset >= 0) 89 break; 90 default: 91 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex); 92 return -EINVAL; 93 } 94 if (offset != file->f_pos) { 95 file->f_pos = offset; 96 if (file->f_pos >= 2) { 97 struct list_head *p; 98 struct dentry *cursor = file->private_data; 99 loff_t n = file->f_pos - 2; 100 101 spin_lock(&dcache_lock); 102 list_del(&cursor->d_u.d_child); 103 p = file->f_path.dentry->d_subdirs.next; 104 while (n && p != &file->f_path.dentry->d_subdirs) { 105 struct dentry *next; 106 next = list_entry(p, struct dentry, d_u.d_child); 107 if (!d_unhashed(next) && next->d_inode) 108 n--; 109 p = p->next; 110 } 111 list_add_tail(&cursor->d_u.d_child, p); 112 spin_unlock(&dcache_lock); 113 } 114 } 115 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex); 116 return offset; 117 } 118 119 /* Relationship between i_mode and the DT_xxx types */ 120 static inline unsigned char dt_type(struct inode *inode) 121 { 122 return (inode->i_mode >> 12) & 15; 123 } 124 125 /* 126 * Directory is locked and all positive dentries in it are safe, since 127 * for ramfs-type trees they can't go away without unlink() or rmdir(), 128 * both impossible due to the lock on directory. 129 */ 130 131 int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) 132 { 133 struct dentry *dentry = filp->f_path.dentry; 134 struct dentry *cursor = filp->private_data; 135 struct list_head *p, *q = &cursor->d_u.d_child; 136 ino_t ino; 137 int i = filp->f_pos; 138 139 switch (i) { 140 case 0: 141 ino = dentry->d_inode->i_ino; 142 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) 143 break; 144 filp->f_pos++; 145 i++; 146 /* fallthrough */ 147 case 1: 148 ino = parent_ino(dentry); 149 if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0) 150 break; 151 filp->f_pos++; 152 i++; 153 /* fallthrough */ 154 default: 155 spin_lock(&dcache_lock); 156 if (filp->f_pos == 2) 157 list_move(q, &dentry->d_subdirs); 158 159 for (p=q->next; p != &dentry->d_subdirs; p=p->next) { 160 struct dentry *next; 161 next = list_entry(p, struct dentry, d_u.d_child); 162 if (d_unhashed(next) || !next->d_inode) 163 continue; 164 165 spin_unlock(&dcache_lock); 166 if (filldir(dirent, next->d_name.name, 167 next->d_name.len, filp->f_pos, 168 next->d_inode->i_ino, 169 dt_type(next->d_inode)) < 0) 170 return 0; 171 spin_lock(&dcache_lock); 172 /* next is still alive */ 173 list_move(q, p); 174 p = q; 175 filp->f_pos++; 176 } 177 spin_unlock(&dcache_lock); 178 } 179 return 0; 180 } 181 182 ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos) 183 { 184 return -EISDIR; 185 } 186 187 const struct file_operations simple_dir_operations = { 188 .open = dcache_dir_open, 189 .release = dcache_dir_close, 190 .llseek = dcache_dir_lseek, 191 .read = generic_read_dir, 192 .readdir = dcache_readdir, 193 .fsync = simple_sync_file, 194 }; 195 196 const struct inode_operations simple_dir_inode_operations = { 197 .lookup = simple_lookup, 198 }; 199 200 static const struct super_operations simple_super_operations = { 201 .statfs = simple_statfs, 202 }; 203 204 /* 205 * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that 206 * will never be mountable) 207 */ 208 int get_sb_pseudo(struct file_system_type *fs_type, char *name, 209 const struct super_operations *ops, unsigned long magic, 210 struct vfsmount *mnt) 211 { 212 struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); 213 struct dentry *dentry; 214 struct inode *root; 215 struct qstr d_name = {.name = name, .len = strlen(name)}; 216 217 if (IS_ERR(s)) 218 return PTR_ERR(s); 219 220 s->s_flags = MS_NOUSER; 221 s->s_maxbytes = MAX_LFS_FILESIZE; 222 s->s_blocksize = PAGE_SIZE; 223 s->s_blocksize_bits = PAGE_SHIFT; 224 s->s_magic = magic; 225 s->s_op = ops ? ops : &simple_super_operations; 226 s->s_time_gran = 1; 227 root = new_inode(s); 228 if (!root) 229 goto Enomem; 230 /* 231 * since this is the first inode, make it number 1. New inodes created 232 * after this must take care not to collide with it (by passing 233 * max_reserved of 1 to iunique). 234 */ 235 root->i_ino = 1; 236 root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR; 237 root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME; 238 dentry = d_alloc(NULL, &d_name); 239 if (!dentry) { 240 iput(root); 241 goto Enomem; 242 } 243 dentry->d_sb = s; 244 dentry->d_parent = dentry; 245 d_instantiate(dentry, root); 246 s->s_root = dentry; 247 s->s_flags |= MS_ACTIVE; 248 simple_set_mnt(mnt, s); 249 return 0; 250 251 Enomem: 252 deactivate_locked_super(s); 253 return -ENOMEM; 254 } 255 256 int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 257 { 258 struct inode *inode = old_dentry->d_inode; 259 260 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 261 inc_nlink(inode); 262 atomic_inc(&inode->i_count); 263 dget(dentry); 264 d_instantiate(dentry, inode); 265 return 0; 266 } 267 268 static inline int simple_positive(struct dentry *dentry) 269 { 270 return dentry->d_inode && !d_unhashed(dentry); 271 } 272 273 int simple_empty(struct dentry *dentry) 274 { 275 struct dentry *child; 276 int ret = 0; 277 278 spin_lock(&dcache_lock); 279 list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) 280 if (simple_positive(child)) 281 goto out; 282 ret = 1; 283 out: 284 spin_unlock(&dcache_lock); 285 return ret; 286 } 287 288 int simple_unlink(struct inode *dir, struct dentry *dentry) 289 { 290 struct inode *inode = dentry->d_inode; 291 292 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 293 drop_nlink(inode); 294 dput(dentry); 295 return 0; 296 } 297 298 int simple_rmdir(struct inode *dir, struct dentry *dentry) 299 { 300 if (!simple_empty(dentry)) 301 return -ENOTEMPTY; 302 303 drop_nlink(dentry->d_inode); 304 simple_unlink(dir, dentry); 305 drop_nlink(dir); 306 return 0; 307 } 308 309 int simple_rename(struct inode *old_dir, struct dentry *old_dentry, 310 struct inode *new_dir, struct dentry *new_dentry) 311 { 312 struct inode *inode = old_dentry->d_inode; 313 int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode); 314 315 if (!simple_empty(new_dentry)) 316 return -ENOTEMPTY; 317 318 if (new_dentry->d_inode) { 319 simple_unlink(new_dir, new_dentry); 320 if (they_are_dirs) 321 drop_nlink(old_dir); 322 } else if (they_are_dirs) { 323 drop_nlink(old_dir); 324 inc_nlink(new_dir); 325 } 326 327 old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime = 328 new_dir->i_mtime = inode->i_ctime = CURRENT_TIME; 329 330 return 0; 331 } 332 333 int simple_readpage(struct file *file, struct page *page) 334 { 335 clear_highpage(page); 336 flush_dcache_page(page); 337 SetPageUptodate(page); 338 unlock_page(page); 339 return 0; 340 } 341 342 int simple_write_begin(struct file *file, struct address_space *mapping, 343 loff_t pos, unsigned len, unsigned flags, 344 struct page **pagep, void **fsdata) 345 { 346 struct page *page; 347 pgoff_t index; 348 349 index = pos >> PAGE_CACHE_SHIFT; 350 351 page = grab_cache_page_write_begin(mapping, index, flags); 352 if (!page) 353 return -ENOMEM; 354 355 *pagep = page; 356 357 if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) { 358 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 359 360 zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE); 361 } 362 return 0; 363 } 364 365 /** 366 * simple_write_end - .write_end helper for non-block-device FSes 367 * @available: See .write_end of address_space_operations 368 * @file: " 369 * @mapping: " 370 * @pos: " 371 * @len: " 372 * @copied: " 373 * @page: " 374 * @fsdata: " 375 * 376 * simple_write_end does the minimum needed for updating a page after writing is 377 * done. It has the same API signature as the .write_end of 378 * address_space_operations vector. So it can just be set onto .write_end for 379 * FSes that don't need any other processing. i_mutex is assumed to be held. 380 * Block based filesystems should use generic_write_end(). 381 * NOTE: Even though i_size might get updated by this function, mark_inode_dirty 382 * is not called, so a filesystem that actually does store data in .write_inode 383 * should extend on what's done here with a call to mark_inode_dirty() in the 384 * case that i_size has changed. 385 */ 386 int simple_write_end(struct file *file, struct address_space *mapping, 387 loff_t pos, unsigned len, unsigned copied, 388 struct page *page, void *fsdata) 389 { 390 struct inode *inode = page->mapping->host; 391 loff_t last_pos = pos + copied; 392 393 /* zero the stale part of the page if we did a short copy */ 394 if (copied < len) { 395 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 396 397 zero_user(page, from + copied, len - copied); 398 } 399 400 if (!PageUptodate(page)) 401 SetPageUptodate(page); 402 /* 403 * No need to use i_size_read() here, the i_size 404 * cannot change under us because we hold the i_mutex. 405 */ 406 if (last_pos > inode->i_size) 407 i_size_write(inode, last_pos); 408 409 set_page_dirty(page); 410 unlock_page(page); 411 page_cache_release(page); 412 413 return copied; 414 } 415 416 /* 417 * the inodes created here are not hashed. If you use iunique to generate 418 * unique inode values later for this filesystem, then you must take care 419 * to pass it an appropriate max_reserved value to avoid collisions. 420 */ 421 int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files) 422 { 423 struct inode *inode; 424 struct dentry *root; 425 struct dentry *dentry; 426 int i; 427 428 s->s_blocksize = PAGE_CACHE_SIZE; 429 s->s_blocksize_bits = PAGE_CACHE_SHIFT; 430 s->s_magic = magic; 431 s->s_op = &simple_super_operations; 432 s->s_time_gran = 1; 433 434 inode = new_inode(s); 435 if (!inode) 436 return -ENOMEM; 437 /* 438 * because the root inode is 1, the files array must not contain an 439 * entry at index 1 440 */ 441 inode->i_ino = 1; 442 inode->i_mode = S_IFDIR | 0755; 443 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 444 inode->i_op = &simple_dir_inode_operations; 445 inode->i_fop = &simple_dir_operations; 446 inode->i_nlink = 2; 447 root = d_alloc_root(inode); 448 if (!root) { 449 iput(inode); 450 return -ENOMEM; 451 } 452 for (i = 0; !files->name || files->name[0]; i++, files++) { 453 if (!files->name) 454 continue; 455 456 /* warn if it tries to conflict with the root inode */ 457 if (unlikely(i == 1)) 458 printk(KERN_WARNING "%s: %s passed in a files array" 459 "with an index of 1!\n", __func__, 460 s->s_type->name); 461 462 dentry = d_alloc_name(root, files->name); 463 if (!dentry) 464 goto out; 465 inode = new_inode(s); 466 if (!inode) 467 goto out; 468 inode->i_mode = S_IFREG | files->mode; 469 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 470 inode->i_fop = files->ops; 471 inode->i_ino = i; 472 d_add(dentry, inode); 473 } 474 s->s_root = root; 475 return 0; 476 out: 477 d_genocide(root); 478 dput(root); 479 return -ENOMEM; 480 } 481 482 static DEFINE_SPINLOCK(pin_fs_lock); 483 484 int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *count) 485 { 486 struct vfsmount *mnt = NULL; 487 spin_lock(&pin_fs_lock); 488 if (unlikely(!*mount)) { 489 spin_unlock(&pin_fs_lock); 490 mnt = vfs_kern_mount(type, 0, type->name, NULL); 491 if (IS_ERR(mnt)) 492 return PTR_ERR(mnt); 493 spin_lock(&pin_fs_lock); 494 if (!*mount) 495 *mount = mnt; 496 } 497 mntget(*mount); 498 ++*count; 499 spin_unlock(&pin_fs_lock); 500 mntput(mnt); 501 return 0; 502 } 503 504 void simple_release_fs(struct vfsmount **mount, int *count) 505 { 506 struct vfsmount *mnt; 507 spin_lock(&pin_fs_lock); 508 mnt = *mount; 509 if (!--*count) 510 *mount = NULL; 511 spin_unlock(&pin_fs_lock); 512 mntput(mnt); 513 } 514 515 /** 516 * simple_read_from_buffer - copy data from the buffer to user space 517 * @to: the user space buffer to read to 518 * @count: the maximum number of bytes to read 519 * @ppos: the current position in the buffer 520 * @from: the buffer to read from 521 * @available: the size of the buffer 522 * 523 * The simple_read_from_buffer() function reads up to @count bytes from the 524 * buffer @from at offset @ppos into the user space address starting at @to. 525 * 526 * On success, the number of bytes read is returned and the offset @ppos is 527 * advanced by this number, or negative value is returned on error. 528 **/ 529 ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos, 530 const void *from, size_t available) 531 { 532 loff_t pos = *ppos; 533 size_t ret; 534 535 if (pos < 0) 536 return -EINVAL; 537 if (pos >= available || !count) 538 return 0; 539 if (count > available - pos) 540 count = available - pos; 541 ret = copy_to_user(to, from + pos, count); 542 if (ret == count) 543 return -EFAULT; 544 count -= ret; 545 *ppos = pos + count; 546 return count; 547 } 548 549 /** 550 * simple_write_to_buffer - copy data from user space to the buffer 551 * @to: the buffer to write to 552 * @available: the size of the buffer 553 * @ppos: the current position in the buffer 554 * @from: the user space buffer to read from 555 * @count: the maximum number of bytes to read 556 * 557 * The simple_write_to_buffer() function reads up to @count bytes from the user 558 * space address starting at @from into the buffer @to at offset @ppos. 559 * 560 * On success, the number of bytes written is returned and the offset @ppos is 561 * advanced by this number, or negative value is returned on error. 562 **/ 563 ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, 564 const void __user *from, size_t count) 565 { 566 loff_t pos = *ppos; 567 size_t res; 568 569 if (pos < 0) 570 return -EINVAL; 571 if (pos >= available || !count) 572 return 0; 573 if (count > available - pos) 574 count = available - pos; 575 res = copy_from_user(to + pos, from, count); 576 if (res == count) 577 return -EFAULT; 578 count -= res; 579 *ppos = pos + count; 580 return count; 581 } 582 583 /** 584 * memory_read_from_buffer - copy data from the buffer 585 * @to: the kernel space buffer to read to 586 * @count: the maximum number of bytes to read 587 * @ppos: the current position in the buffer 588 * @from: the buffer to read from 589 * @available: the size of the buffer 590 * 591 * The memory_read_from_buffer() function reads up to @count bytes from the 592 * buffer @from at offset @ppos into the kernel space address starting at @to. 593 * 594 * On success, the number of bytes read is returned and the offset @ppos is 595 * advanced by this number, or negative value is returned on error. 596 **/ 597 ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, 598 const void *from, size_t available) 599 { 600 loff_t pos = *ppos; 601 602 if (pos < 0) 603 return -EINVAL; 604 if (pos >= available) 605 return 0; 606 if (count > available - pos) 607 count = available - pos; 608 memcpy(to, from + pos, count); 609 *ppos = pos + count; 610 611 return count; 612 } 613 614 /* 615 * Transaction based IO. 616 * The file expects a single write which triggers the transaction, and then 617 * possibly a read which collects the result - which is stored in a 618 * file-local buffer. 619 */ 620 621 void simple_transaction_set(struct file *file, size_t n) 622 { 623 struct simple_transaction_argresp *ar = file->private_data; 624 625 BUG_ON(n > SIMPLE_TRANSACTION_LIMIT); 626 627 /* 628 * The barrier ensures that ar->size will really remain zero until 629 * ar->data is ready for reading. 630 */ 631 smp_mb(); 632 ar->size = n; 633 } 634 635 char *simple_transaction_get(struct file *file, const char __user *buf, size_t size) 636 { 637 struct simple_transaction_argresp *ar; 638 static DEFINE_SPINLOCK(simple_transaction_lock); 639 640 if (size > SIMPLE_TRANSACTION_LIMIT - 1) 641 return ERR_PTR(-EFBIG); 642 643 ar = (struct simple_transaction_argresp *)get_zeroed_page(GFP_KERNEL); 644 if (!ar) 645 return ERR_PTR(-ENOMEM); 646 647 spin_lock(&simple_transaction_lock); 648 649 /* only one write allowed per open */ 650 if (file->private_data) { 651 spin_unlock(&simple_transaction_lock); 652 free_page((unsigned long)ar); 653 return ERR_PTR(-EBUSY); 654 } 655 656 file->private_data = ar; 657 658 spin_unlock(&simple_transaction_lock); 659 660 if (copy_from_user(ar->data, buf, size)) 661 return ERR_PTR(-EFAULT); 662 663 return ar->data; 664 } 665 666 ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos) 667 { 668 struct simple_transaction_argresp *ar = file->private_data; 669 670 if (!ar) 671 return 0; 672 return simple_read_from_buffer(buf, size, pos, ar->data, ar->size); 673 } 674 675 int simple_transaction_release(struct inode *inode, struct file *file) 676 { 677 free_page((unsigned long)file->private_data); 678 return 0; 679 } 680 681 /* Simple attribute files */ 682 683 struct simple_attr { 684 int (*get)(void *, u64 *); 685 int (*set)(void *, u64); 686 char get_buf[24]; /* enough to store a u64 and "\n\0" */ 687 char set_buf[24]; 688 void *data; 689 const char *fmt; /* format for read operation */ 690 struct mutex mutex; /* protects access to these buffers */ 691 }; 692 693 /* simple_attr_open is called by an actual attribute open file operation 694 * to set the attribute specific access operations. */ 695 int simple_attr_open(struct inode *inode, struct file *file, 696 int (*get)(void *, u64 *), int (*set)(void *, u64), 697 const char *fmt) 698 { 699 struct simple_attr *attr; 700 701 attr = kmalloc(sizeof(*attr), GFP_KERNEL); 702 if (!attr) 703 return -ENOMEM; 704 705 attr->get = get; 706 attr->set = set; 707 attr->data = inode->i_private; 708 attr->fmt = fmt; 709 mutex_init(&attr->mutex); 710 711 file->private_data = attr; 712 713 return nonseekable_open(inode, file); 714 } 715 716 int simple_attr_release(struct inode *inode, struct file *file) 717 { 718 kfree(file->private_data); 719 return 0; 720 } 721 722 /* read from the buffer that is filled with the get function */ 723 ssize_t simple_attr_read(struct file *file, char __user *buf, 724 size_t len, loff_t *ppos) 725 { 726 struct simple_attr *attr; 727 size_t size; 728 ssize_t ret; 729 730 attr = file->private_data; 731 732 if (!attr->get) 733 return -EACCES; 734 735 ret = mutex_lock_interruptible(&attr->mutex); 736 if (ret) 737 return ret; 738 739 if (*ppos) { /* continued read */ 740 size = strlen(attr->get_buf); 741 } else { /* first read */ 742 u64 val; 743 ret = attr->get(attr->data, &val); 744 if (ret) 745 goto out; 746 747 size = scnprintf(attr->get_buf, sizeof(attr->get_buf), 748 attr->fmt, (unsigned long long)val); 749 } 750 751 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); 752 out: 753 mutex_unlock(&attr->mutex); 754 return ret; 755 } 756 757 /* interpret the buffer as a number to call the set function with */ 758 ssize_t simple_attr_write(struct file *file, const char __user *buf, 759 size_t len, loff_t *ppos) 760 { 761 struct simple_attr *attr; 762 u64 val; 763 size_t size; 764 ssize_t ret; 765 766 attr = file->private_data; 767 if (!attr->set) 768 return -EACCES; 769 770 ret = mutex_lock_interruptible(&attr->mutex); 771 if (ret) 772 return ret; 773 774 ret = -EFAULT; 775 size = min(sizeof(attr->set_buf) - 1, len); 776 if (copy_from_user(attr->set_buf, buf, size)) 777 goto out; 778 779 attr->set_buf[size] = '\0'; 780 val = simple_strtol(attr->set_buf, NULL, 0); 781 ret = attr->set(attr->data, val); 782 if (ret == 0) 783 ret = len; /* on success, claim we got the whole input */ 784 out: 785 mutex_unlock(&attr->mutex); 786 return ret; 787 } 788 789 /** 790 * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation 791 * @sb: filesystem to do the file handle conversion on 792 * @fid: file handle to convert 793 * @fh_len: length of the file handle in bytes 794 * @fh_type: type of file handle 795 * @get_inode: filesystem callback to retrieve inode 796 * 797 * This function decodes @fid as long as it has one of the well-known 798 * Linux filehandle types and calls @get_inode on it to retrieve the 799 * inode for the object specified in the file handle. 800 */ 801 struct dentry *generic_fh_to_dentry(struct super_block *sb, struct fid *fid, 802 int fh_len, int fh_type, struct inode *(*get_inode) 803 (struct super_block *sb, u64 ino, u32 gen)) 804 { 805 struct inode *inode = NULL; 806 807 if (fh_len < 2) 808 return NULL; 809 810 switch (fh_type) { 811 case FILEID_INO32_GEN: 812 case FILEID_INO32_GEN_PARENT: 813 inode = get_inode(sb, fid->i32.ino, fid->i32.gen); 814 break; 815 } 816 817 return d_obtain_alias(inode); 818 } 819 EXPORT_SYMBOL_GPL(generic_fh_to_dentry); 820 821 /** 822 * generic_fh_to_dentry - generic helper for the fh_to_parent export operation 823 * @sb: filesystem to do the file handle conversion on 824 * @fid: file handle to convert 825 * @fh_len: length of the file handle in bytes 826 * @fh_type: type of file handle 827 * @get_inode: filesystem callback to retrieve inode 828 * 829 * This function decodes @fid as long as it has one of the well-known 830 * Linux filehandle types and calls @get_inode on it to retrieve the 831 * inode for the _parent_ object specified in the file handle if it 832 * is specified in the file handle, or NULL otherwise. 833 */ 834 struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid, 835 int fh_len, int fh_type, struct inode *(*get_inode) 836 (struct super_block *sb, u64 ino, u32 gen)) 837 { 838 struct inode *inode = NULL; 839 840 if (fh_len <= 2) 841 return NULL; 842 843 switch (fh_type) { 844 case FILEID_INO32_GEN_PARENT: 845 inode = get_inode(sb, fid->i32.parent_ino, 846 (fh_len > 3 ? fid->i32.parent_gen : 0)); 847 break; 848 } 849 850 return d_obtain_alias(inode); 851 } 852 EXPORT_SYMBOL_GPL(generic_fh_to_parent); 853 854 int simple_fsync(struct file *file, struct dentry *dentry, int datasync) 855 { 856 struct writeback_control wbc = { 857 .sync_mode = WB_SYNC_ALL, 858 .nr_to_write = 0, /* metadata-only; caller takes care of data */ 859 }; 860 struct inode *inode = dentry->d_inode; 861 int err; 862 int ret; 863 864 ret = sync_mapping_buffers(inode->i_mapping); 865 if (!(inode->i_state & I_DIRTY)) 866 return ret; 867 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 868 return ret; 869 870 err = sync_inode(inode, &wbc); 871 if (ret == 0) 872 ret = err; 873 return ret; 874 } 875 EXPORT_SYMBOL(simple_fsync); 876 877 EXPORT_SYMBOL(dcache_dir_close); 878 EXPORT_SYMBOL(dcache_dir_lseek); 879 EXPORT_SYMBOL(dcache_dir_open); 880 EXPORT_SYMBOL(dcache_readdir); 881 EXPORT_SYMBOL(generic_read_dir); 882 EXPORT_SYMBOL(get_sb_pseudo); 883 EXPORT_SYMBOL(simple_write_begin); 884 EXPORT_SYMBOL(simple_write_end); 885 EXPORT_SYMBOL(simple_dir_inode_operations); 886 EXPORT_SYMBOL(simple_dir_operations); 887 EXPORT_SYMBOL(simple_empty); 888 EXPORT_SYMBOL(simple_fill_super); 889 EXPORT_SYMBOL(simple_getattr); 890 EXPORT_SYMBOL(simple_link); 891 EXPORT_SYMBOL(simple_lookup); 892 EXPORT_SYMBOL(simple_pin_fs); 893 EXPORT_SYMBOL(simple_readpage); 894 EXPORT_SYMBOL(simple_release_fs); 895 EXPORT_SYMBOL(simple_rename); 896 EXPORT_SYMBOL(simple_rmdir); 897 EXPORT_SYMBOL(simple_statfs); 898 EXPORT_SYMBOL(simple_sync_file); 899 EXPORT_SYMBOL(simple_unlink); 900 EXPORT_SYMBOL(simple_read_from_buffer); 901 EXPORT_SYMBOL(simple_write_to_buffer); 902 EXPORT_SYMBOL(memory_read_from_buffer); 903 EXPORT_SYMBOL(simple_transaction_set); 904 EXPORT_SYMBOL(simple_transaction_get); 905 EXPORT_SYMBOL(simple_transaction_read); 906 EXPORT_SYMBOL(simple_transaction_release); 907 EXPORT_SYMBOL_GPL(simple_attr_open); 908 EXPORT_SYMBOL_GPL(simple_attr_release); 909 EXPORT_SYMBOL_GPL(simple_attr_read); 910 EXPORT_SYMBOL_GPL(simple_attr_write); 911