1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * NILFS regular file handling primitives including fsync(). 4 * 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Written by Amagai Yoshiji and Ryusuke Konishi. 8 */ 9 10 #include <linux/fs.h> 11 #include <linux/mm.h> 12 #include <linux/writeback.h> 13 #include "nilfs.h" 14 #include "segment.h" 15 16 int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 17 { 18 /* 19 * Called from fsync() system call 20 * This is the only entry point that can catch write and synch 21 * timing for both data blocks and intermediate blocks. 22 * 23 * This function should be implemented when the writeback function 24 * will be implemented. 25 */ 26 struct the_nilfs *nilfs; 27 struct inode *inode = file->f_mapping->host; 28 int err = 0; 29 30 if (nilfs_inode_dirty(inode)) { 31 if (datasync) 32 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 33 start, end); 34 else 35 err = nilfs_construct_segment(inode->i_sb); 36 } 37 38 nilfs = inode->i_sb->s_fs_info; 39 if (!err) 40 err = nilfs_flush_device(nilfs); 41 42 return err; 43 } 44 45 static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf) 46 { 47 struct vm_area_struct *vma = vmf->vma; 48 struct page *page = vmf->page; 49 struct inode *inode = file_inode(vma->vm_file); 50 struct nilfs_transaction_info ti; 51 int ret = 0; 52 53 if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info))) 54 return VM_FAULT_SIGBUS; /* -ENOSPC */ 55 56 sb_start_pagefault(inode->i_sb); 57 lock_page(page); 58 if (page->mapping != inode->i_mapping || 59 page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) { 60 unlock_page(page); 61 ret = -EFAULT; /* make the VM retry the fault */ 62 goto out; 63 } 64 65 /* 66 * check to see if the page is mapped already (no holes) 67 */ 68 if (PageMappedToDisk(page)) 69 goto mapped; 70 71 if (page_has_buffers(page)) { 72 struct buffer_head *bh, *head; 73 int fully_mapped = 1; 74 75 bh = head = page_buffers(page); 76 do { 77 if (!buffer_mapped(bh)) { 78 fully_mapped = 0; 79 break; 80 } 81 } while (bh = bh->b_this_page, bh != head); 82 83 if (fully_mapped) { 84 SetPageMappedToDisk(page); 85 goto mapped; 86 } 87 } 88 unlock_page(page); 89 90 /* 91 * fill hole blocks 92 */ 93 ret = nilfs_transaction_begin(inode->i_sb, &ti, 1); 94 /* never returns -ENOMEM, but may return -ENOSPC */ 95 if (unlikely(ret)) 96 goto out; 97 98 file_update_time(vma->vm_file); 99 ret = block_page_mkwrite(vma, vmf, nilfs_get_block); 100 if (ret) { 101 nilfs_transaction_abort(inode->i_sb); 102 goto out; 103 } 104 nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits)); 105 nilfs_transaction_commit(inode->i_sb); 106 107 mapped: 108 wait_for_stable_page(page); 109 out: 110 sb_end_pagefault(inode->i_sb); 111 return vmf_fs_error(ret); 112 } 113 114 static const struct vm_operations_struct nilfs_file_vm_ops = { 115 .fault = filemap_fault, 116 .map_pages = filemap_map_pages, 117 .page_mkwrite = nilfs_page_mkwrite, 118 }; 119 120 static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma) 121 { 122 file_accessed(file); 123 vma->vm_ops = &nilfs_file_vm_ops; 124 return 0; 125 } 126 127 /* 128 * We have mostly NULL's here: the current defaults are ok for 129 * the nilfs filesystem. 130 */ 131 const struct file_operations nilfs_file_operations = { 132 .llseek = generic_file_llseek, 133 .read_iter = generic_file_read_iter, 134 .write_iter = generic_file_write_iter, 135 .unlocked_ioctl = nilfs_ioctl, 136 #ifdef CONFIG_COMPAT 137 .compat_ioctl = nilfs_compat_ioctl, 138 #endif /* CONFIG_COMPAT */ 139 .mmap = nilfs_file_mmap, 140 .open = generic_file_open, 141 /* .release = nilfs_release_file, */ 142 .fsync = nilfs_sync_file, 143 .splice_read = filemap_splice_read, 144 .splice_write = iter_file_splice_write, 145 }; 146 147 const struct inode_operations nilfs_file_inode_operations = { 148 .setattr = nilfs_setattr, 149 .permission = nilfs_permission, 150 .fiemap = nilfs_fiemap, 151 .fileattr_get = nilfs_fileattr_get, 152 .fileattr_set = nilfs_fileattr_set, 153 }; 154 155 /* end of file */ 156