xref: /linux/fs/udf/file.c (revision 6e17c6de3ddf3073741d9c91a796ee696914d8a0)
1 /*
2  * file.c
3  *
4  * PURPOSE
5  *  File handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * COPYRIGHT
8  *  This file is distributed under the terms of the GNU General Public
9  *  License (GPL). Copies of the GPL can be obtained from:
10  *    ftp://prep.ai.mit.edu/pub/gnu/GPL
11  *  Each contributing author retains all rights to their own work.
12  *
13  *  (C) 1998-1999 Dave Boynton
14  *  (C) 1998-2004 Ben Fennema
15  *  (C) 1999-2000 Stelias Computing Inc
16  *
17  * HISTORY
18  *
19  *  10/02/98 dgb  Attempt to integrate into udf.o
20  *  10/07/98      Switched to using generic_readpage, etc., like isofs
21  *                And it works!
22  *  12/06/98 blf  Added udf_file_read. uses generic_file_read for all cases but
23  *                ICBTAG_FLAG_AD_IN_ICB.
24  *  04/06/99      64 bit file handling on 32 bit systems taken from ext2 file.c
25  *  05/12/99      Preliminary file write support
26  */
27 
28 #include "udfdecl.h"
29 #include <linux/fs.h>
30 #include <linux/uaccess.h>
31 #include <linux/kernel.h>
32 #include <linux/string.h> /* memset */
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/pagemap.h>
36 #include <linux/uio.h>
37 
38 #include "udf_i.h"
39 #include "udf_sb.h"
40 
41 static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
42 {
43 	struct vm_area_struct *vma = vmf->vma;
44 	struct inode *inode = file_inode(vma->vm_file);
45 	struct address_space *mapping = inode->i_mapping;
46 	struct page *page = vmf->page;
47 	loff_t size;
48 	unsigned int end;
49 	vm_fault_t ret = VM_FAULT_LOCKED;
50 	int err;
51 
52 	sb_start_pagefault(inode->i_sb);
53 	file_update_time(vma->vm_file);
54 	filemap_invalidate_lock_shared(mapping);
55 	lock_page(page);
56 	size = i_size_read(inode);
57 	if (page->mapping != inode->i_mapping || page_offset(page) >= size) {
58 		unlock_page(page);
59 		ret = VM_FAULT_NOPAGE;
60 		goto out_unlock;
61 	}
62 	/* Space is already allocated for in-ICB file */
63 	if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
64 		goto out_dirty;
65 	if (page->index == size >> PAGE_SHIFT)
66 		end = size & ~PAGE_MASK;
67 	else
68 		end = PAGE_SIZE;
69 	err = __block_write_begin(page, 0, end, udf_get_block);
70 	if (!err)
71 		err = block_commit_write(page, 0, end);
72 	if (err < 0) {
73 		unlock_page(page);
74 		ret = block_page_mkwrite_return(err);
75 		goto out_unlock;
76 	}
77 out_dirty:
78 	set_page_dirty(page);
79 	wait_for_stable_page(page);
80 out_unlock:
81 	filemap_invalidate_unlock_shared(mapping);
82 	sb_end_pagefault(inode->i_sb);
83 	return ret;
84 }
85 
86 static const struct vm_operations_struct udf_file_vm_ops = {
87 	.fault		= filemap_fault,
88 	.map_pages	= filemap_map_pages,
89 	.page_mkwrite	= udf_page_mkwrite,
90 };
91 
92 static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
93 {
94 	ssize_t retval;
95 	struct file *file = iocb->ki_filp;
96 	struct inode *inode = file_inode(file);
97 	struct udf_inode_info *iinfo = UDF_I(inode);
98 
99 	inode_lock(inode);
100 
101 	retval = generic_write_checks(iocb, from);
102 	if (retval <= 0)
103 		goto out;
104 
105 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
106 	    inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
107 				 iocb->ki_pos + iov_iter_count(from))) {
108 		filemap_invalidate_lock(inode->i_mapping);
109 		retval = udf_expand_file_adinicb(inode);
110 		filemap_invalidate_unlock(inode->i_mapping);
111 		if (retval)
112 			goto out;
113 	}
114 
115 	retval = __generic_file_write_iter(iocb, from);
116 out:
117 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && retval > 0) {
118 		down_write(&iinfo->i_data_sem);
119 		iinfo->i_lenAlloc = inode->i_size;
120 		up_write(&iinfo->i_data_sem);
121 	}
122 	inode_unlock(inode);
123 
124 	if (retval > 0) {
125 		mark_inode_dirty(inode);
126 		retval = generic_write_sync(iocb, retval);
127 	}
128 
129 	return retval;
130 }
131 
132 long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
133 {
134 	struct inode *inode = file_inode(filp);
135 	long old_block, new_block;
136 	int result;
137 
138 	if (file_permission(filp, MAY_READ) != 0) {
139 		udf_debug("no permission to access inode %lu\n", inode->i_ino);
140 		return -EPERM;
141 	}
142 
143 	if (!arg && ((cmd == UDF_GETVOLIDENT) || (cmd == UDF_GETEASIZE) ||
144 		     (cmd == UDF_RELOCATE_BLOCKS) || (cmd == UDF_GETEABLOCK))) {
145 		udf_debug("invalid argument to udf_ioctl\n");
146 		return -EINVAL;
147 	}
148 
149 	switch (cmd) {
150 	case UDF_GETVOLIDENT:
151 		if (copy_to_user((char __user *)arg,
152 				 UDF_SB(inode->i_sb)->s_volume_ident, 32))
153 			return -EFAULT;
154 		return 0;
155 	case UDF_RELOCATE_BLOCKS:
156 		if (!capable(CAP_SYS_ADMIN))
157 			return -EPERM;
158 		if (get_user(old_block, (long __user *)arg))
159 			return -EFAULT;
160 		result = udf_relocate_blocks(inode->i_sb,
161 						old_block, &new_block);
162 		if (result == 0)
163 			result = put_user(new_block, (long __user *)arg);
164 		return result;
165 	case UDF_GETEASIZE:
166 		return put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg);
167 	case UDF_GETEABLOCK:
168 		return copy_to_user((char __user *)arg,
169 				    UDF_I(inode)->i_data,
170 				    UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0;
171 	default:
172 		return -ENOIOCTLCMD;
173 	}
174 
175 	return 0;
176 }
177 
178 static int udf_release_file(struct inode *inode, struct file *filp)
179 {
180 	if (filp->f_mode & FMODE_WRITE &&
181 	    atomic_read(&inode->i_writecount) == 1) {
182 		/*
183 		 * Grab i_mutex to avoid races with writes changing i_size
184 		 * while we are running.
185 		 */
186 		inode_lock(inode);
187 		down_write(&UDF_I(inode)->i_data_sem);
188 		udf_discard_prealloc(inode);
189 		udf_truncate_tail_extent(inode);
190 		up_write(&UDF_I(inode)->i_data_sem);
191 		inode_unlock(inode);
192 	}
193 	return 0;
194 }
195 
196 static int udf_file_mmap(struct file *file, struct vm_area_struct *vma)
197 {
198 	file_accessed(file);
199 	vma->vm_ops = &udf_file_vm_ops;
200 
201 	return 0;
202 }
203 
204 const struct file_operations udf_file_operations = {
205 	.read_iter		= generic_file_read_iter,
206 	.unlocked_ioctl		= udf_ioctl,
207 	.open			= generic_file_open,
208 	.mmap			= udf_file_mmap,
209 	.write_iter		= udf_file_write_iter,
210 	.release		= udf_release_file,
211 	.fsync			= generic_file_fsync,
212 	.splice_read		= filemap_splice_read,
213 	.splice_write		= iter_file_splice_write,
214 	.llseek			= generic_file_llseek,
215 };
216 
217 static int udf_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
218 		       struct iattr *attr)
219 {
220 	struct inode *inode = d_inode(dentry);
221 	struct super_block *sb = inode->i_sb;
222 	int error;
223 
224 	error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
225 	if (error)
226 		return error;
227 
228 	if ((attr->ia_valid & ATTR_UID) &&
229 	    UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET) &&
230 	    !uid_eq(attr->ia_uid, UDF_SB(sb)->s_uid))
231 		return -EPERM;
232 	if ((attr->ia_valid & ATTR_GID) &&
233 	    UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET) &&
234 	    !gid_eq(attr->ia_gid, UDF_SB(sb)->s_gid))
235 		return -EPERM;
236 
237 	if ((attr->ia_valid & ATTR_SIZE) &&
238 	    attr->ia_size != i_size_read(inode)) {
239 		error = udf_setsize(inode, attr->ia_size);
240 		if (error)
241 			return error;
242 	}
243 
244 	if (attr->ia_valid & ATTR_MODE)
245 		udf_update_extra_perms(inode, attr->ia_mode);
246 
247 	setattr_copy(&nop_mnt_idmap, inode, attr);
248 	mark_inode_dirty(inode);
249 	return 0;
250 }
251 
252 const struct inode_operations udf_file_inode_operations = {
253 	.setattr		= udf_setattr,
254 };
255