xref: /linux/fs/ocfs2/file.c (revision 9d9c1cfec01cdbf24bd9322ed555713a20422115)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * file.c
4  *
5  * File open, close, extend, truncate
6  *
7  * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
8  */
9 
10 #include <linux/capability.h>
11 #include <linux/fs.h>
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/highmem.h>
15 #include <linux/pagemap.h>
16 #include <linux/uio.h>
17 #include <linux/sched.h>
18 #include <linux/splice.h>
19 #include <linux/mount.h>
20 #include <linux/writeback.h>
21 #include <linux/falloc.h>
22 #include <linux/quotaops.h>
23 #include <linux/blkdev.h>
24 #include <linux/backing-dev.h>
25 
26 #include <cluster/masklog.h>
27 
28 #include "ocfs2.h"
29 
30 #include "alloc.h"
31 #include "aops.h"
32 #include "dir.h"
33 #include "dlmglue.h"
34 #include "extent_map.h"
35 #include "file.h"
36 #include "sysfile.h"
37 #include "inode.h"
38 #include "ioctl.h"
39 #include "journal.h"
40 #include "locks.h"
41 #include "mmap.h"
42 #include "suballoc.h"
43 #include "super.h"
44 #include "xattr.h"
45 #include "acl.h"
46 #include "quota.h"
47 #include "refcounttree.h"
48 #include "ocfs2_trace.h"
49 
50 #include "buffer_head_io.h"
51 
ocfs2_init_file_private(struct inode * inode,struct file * file)52 static int ocfs2_init_file_private(struct inode *inode, struct file *file)
53 {
54 	struct ocfs2_file_private *fp;
55 
56 	fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
57 	if (!fp)
58 		return -ENOMEM;
59 
60 	fp->fp_file = file;
61 	mutex_init(&fp->fp_mutex);
62 	ocfs2_file_lock_res_init(&fp->fp_flock, fp);
63 	file->private_data = fp;
64 
65 	return 0;
66 }
67 
ocfs2_free_file_private(struct inode * inode,struct file * file)68 static void ocfs2_free_file_private(struct inode *inode, struct file *file)
69 {
70 	struct ocfs2_file_private *fp = file->private_data;
71 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
72 
73 	if (fp) {
74 		ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
75 		ocfs2_lock_res_free(&fp->fp_flock);
76 		kfree(fp);
77 		file->private_data = NULL;
78 	}
79 }
80 
ocfs2_file_open(struct inode * inode,struct file * file)81 static int ocfs2_file_open(struct inode *inode, struct file *file)
82 {
83 	int status;
84 	int mode = file->f_flags;
85 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
86 
87 	trace_ocfs2_file_open(inode, file, file->f_path.dentry,
88 			      (unsigned long long)oi->ip_blkno,
89 			      file->f_path.dentry->d_name.len,
90 			      file->f_path.dentry->d_name.name, mode);
91 
92 	if (file->f_mode & FMODE_WRITE) {
93 		status = dquot_initialize(inode);
94 		if (status)
95 			goto leave;
96 	}
97 
98 	spin_lock(&oi->ip_lock);
99 
100 	/* Check that the inode hasn't been wiped from disk by another
101 	 * node. If it hasn't then we're safe as long as we hold the
102 	 * spin lock until our increment of open count. */
103 	if (oi->ip_flags & OCFS2_INODE_DELETED) {
104 		spin_unlock(&oi->ip_lock);
105 
106 		status = -ENOENT;
107 		goto leave;
108 	}
109 
110 	if (mode & O_DIRECT)
111 		oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
112 
113 	oi->ip_open_count++;
114 	spin_unlock(&oi->ip_lock);
115 
116 	status = ocfs2_init_file_private(inode, file);
117 	if (status) {
118 		/*
119 		 * We want to set open count back if we're failing the
120 		 * open.
121 		 */
122 		spin_lock(&oi->ip_lock);
123 		oi->ip_open_count--;
124 		spin_unlock(&oi->ip_lock);
125 	}
126 
127 	file->f_mode |= FMODE_NOWAIT;
128 
129 leave:
130 	return status;
131 }
132 
ocfs2_file_release(struct inode * inode,struct file * file)133 static int ocfs2_file_release(struct inode *inode, struct file *file)
134 {
135 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
136 
137 	spin_lock(&oi->ip_lock);
138 	if (!--oi->ip_open_count)
139 		oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
140 
141 	trace_ocfs2_file_release(inode, file, file->f_path.dentry,
142 				 oi->ip_blkno,
143 				 file->f_path.dentry->d_name.len,
144 				 file->f_path.dentry->d_name.name,
145 				 oi->ip_open_count);
146 	spin_unlock(&oi->ip_lock);
147 
148 	ocfs2_free_file_private(inode, file);
149 
150 	return 0;
151 }
152 
ocfs2_dir_open(struct inode * inode,struct file * file)153 static int ocfs2_dir_open(struct inode *inode, struct file *file)
154 {
155 	return ocfs2_init_file_private(inode, file);
156 }
157 
ocfs2_dir_release(struct inode * inode,struct file * file)158 static int ocfs2_dir_release(struct inode *inode, struct file *file)
159 {
160 	ocfs2_free_file_private(inode, file);
161 	return 0;
162 }
163 
ocfs2_sync_file(struct file * file,loff_t start,loff_t end,int datasync)164 static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
165 			   int datasync)
166 {
167 	int err = 0;
168 	struct inode *inode = file->f_mapping->host;
169 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
170 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
171 	journal_t *journal = osb->journal->j_journal;
172 	int ret;
173 	tid_t commit_tid;
174 	bool needs_barrier = false;
175 
176 	trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
177 			      oi->ip_blkno,
178 			      file->f_path.dentry->d_name.len,
179 			      file->f_path.dentry->d_name.name,
180 			      (unsigned long long)datasync);
181 
182 	if (unlikely(ocfs2_emergency_state(osb)))
183 		return -EROFS;
184 
185 	err = file_write_and_wait_range(file, start, end);
186 	if (err)
187 		return err;
188 
189 	commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid;
190 	if (journal->j_flags & JBD2_BARRIER &&
191 	    !jbd2_trans_will_send_data_barrier(journal, commit_tid))
192 		needs_barrier = true;
193 	err = jbd2_complete_transaction(journal, commit_tid);
194 	if (needs_barrier) {
195 		ret = blkdev_issue_flush(inode->i_sb->s_bdev);
196 		if (!err)
197 			err = ret;
198 	}
199 
200 	if (err)
201 		mlog_errno(err);
202 
203 	return (err < 0) ? -EIO : 0;
204 }
205 
ocfs2_should_update_atime(struct inode * inode,struct vfsmount * vfsmnt)206 int ocfs2_should_update_atime(struct inode *inode,
207 			      struct vfsmount *vfsmnt)
208 {
209 	struct timespec64 now;
210 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
211 
212 	if (unlikely(ocfs2_emergency_state(osb)))
213 		return 0;
214 
215 	if ((inode->i_flags & S_NOATIME) ||
216 	    ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
217 		return 0;
218 
219 	/*
220 	 * We can be called with no vfsmnt structure - NFSD will
221 	 * sometimes do this.
222 	 *
223 	 * Note that our action here is different than touch_atime() -
224 	 * if we can't tell whether this is a noatime mount, then we
225 	 * don't know whether to trust the value of s_atime_quantum.
226 	 */
227 	if (vfsmnt == NULL)
228 		return 0;
229 
230 	if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
231 	    ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
232 		return 0;
233 
234 	if (vfsmnt->mnt_flags & MNT_RELATIME) {
235 		struct timespec64 ctime = inode_get_ctime(inode);
236 		struct timespec64 atime = inode_get_atime(inode);
237 		struct timespec64 mtime = inode_get_mtime(inode);
238 
239 		if ((timespec64_compare(&atime, &mtime) <= 0) ||
240 		    (timespec64_compare(&atime, &ctime) <= 0))
241 			return 1;
242 
243 		return 0;
244 	}
245 
246 	now = current_time(inode);
247 	if ((now.tv_sec - inode_get_atime_sec(inode) <= osb->s_atime_quantum))
248 		return 0;
249 	else
250 		return 1;
251 }
252 
ocfs2_update_inode_atime(struct inode * inode,struct buffer_head * bh)253 int ocfs2_update_inode_atime(struct inode *inode,
254 			     struct buffer_head *bh)
255 {
256 	int ret;
257 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
258 	handle_t *handle;
259 	struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
260 
261 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
262 	if (IS_ERR(handle)) {
263 		ret = PTR_ERR(handle);
264 		mlog_errno(ret);
265 		goto out;
266 	}
267 
268 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
269 				      OCFS2_JOURNAL_ACCESS_WRITE);
270 	if (ret) {
271 		mlog_errno(ret);
272 		goto out_commit;
273 	}
274 
275 	/*
276 	 * Don't use ocfs2_mark_inode_dirty() here as we don't always
277 	 * have i_rwsem to guard against concurrent changes to other
278 	 * inode fields.
279 	 */
280 	inode_set_atime_to_ts(inode, current_time(inode));
281 	di->i_atime = cpu_to_le64(inode_get_atime_sec(inode));
282 	di->i_atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
283 	ocfs2_update_inode_fsync_trans(handle, inode, 0);
284 	ocfs2_journal_dirty(handle, bh);
285 
286 out_commit:
287 	ocfs2_commit_trans(osb, handle);
288 out:
289 	return ret;
290 }
291 
ocfs2_set_inode_size(handle_t * handle,struct inode * inode,struct buffer_head * fe_bh,u64 new_i_size)292 int ocfs2_set_inode_size(handle_t *handle,
293 				struct inode *inode,
294 				struct buffer_head *fe_bh,
295 				u64 new_i_size)
296 {
297 	int status;
298 
299 	i_size_write(inode, new_i_size);
300 	inode->i_blocks = ocfs2_inode_sector_count(inode);
301 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
302 
303 	status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
304 	if (status < 0) {
305 		mlog_errno(status);
306 		goto bail;
307 	}
308 
309 bail:
310 	return status;
311 }
312 
ocfs2_simple_size_update(struct inode * inode,struct buffer_head * di_bh,u64 new_i_size)313 int ocfs2_simple_size_update(struct inode *inode,
314 			     struct buffer_head *di_bh,
315 			     u64 new_i_size)
316 {
317 	int ret;
318 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
319 	handle_t *handle = NULL;
320 
321 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
322 	if (IS_ERR(handle)) {
323 		ret = PTR_ERR(handle);
324 		mlog_errno(ret);
325 		goto out;
326 	}
327 
328 	ret = ocfs2_set_inode_size(handle, inode, di_bh,
329 				   new_i_size);
330 	if (ret < 0)
331 		mlog_errno(ret);
332 
333 	ocfs2_update_inode_fsync_trans(handle, inode, 0);
334 	ocfs2_commit_trans(osb, handle);
335 out:
336 	return ret;
337 }
338 
ocfs2_cow_file_pos(struct inode * inode,struct buffer_head * fe_bh,u64 offset)339 static int ocfs2_cow_file_pos(struct inode *inode,
340 			      struct buffer_head *fe_bh,
341 			      u64 offset)
342 {
343 	int status;
344 	u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
345 	unsigned int num_clusters = 0;
346 	unsigned int ext_flags = 0;
347 
348 	/*
349 	 * If the new offset is aligned to the range of the cluster, there is
350 	 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
351 	 * CoW either.
352 	 */
353 	if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
354 		return 0;
355 
356 	status = ocfs2_get_clusters(inode, cpos, &phys,
357 				    &num_clusters, &ext_flags);
358 	if (status) {
359 		mlog_errno(status);
360 		goto out;
361 	}
362 
363 	if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
364 		goto out;
365 
366 	return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
367 
368 out:
369 	return status;
370 }
371 
ocfs2_orphan_for_truncate(struct ocfs2_super * osb,struct inode * inode,struct buffer_head * fe_bh,u64 new_i_size)372 static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
373 				     struct inode *inode,
374 				     struct buffer_head *fe_bh,
375 				     u64 new_i_size)
376 {
377 	int status;
378 	handle_t *handle;
379 	struct ocfs2_dinode *di;
380 	u64 cluster_bytes;
381 
382 	/*
383 	 * We need to CoW the cluster contains the offset if it is reflinked
384 	 * since we will call ocfs2_zero_range_for_truncate later which will
385 	 * write "0" from offset to the end of the cluster.
386 	 */
387 	status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
388 	if (status) {
389 		mlog_errno(status);
390 		return status;
391 	}
392 
393 	/* TODO: This needs to actually orphan the inode in this
394 	 * transaction. */
395 
396 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
397 	if (IS_ERR(handle)) {
398 		status = PTR_ERR(handle);
399 		mlog_errno(status);
400 		goto out;
401 	}
402 
403 	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
404 					 OCFS2_JOURNAL_ACCESS_WRITE);
405 	if (status < 0) {
406 		mlog_errno(status);
407 		goto out_commit;
408 	}
409 
410 	/*
411 	 * Do this before setting i_size.
412 	 */
413 	cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
414 	status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
415 					       cluster_bytes);
416 	if (status) {
417 		mlog_errno(status);
418 		goto out_commit;
419 	}
420 
421 	i_size_write(inode, new_i_size);
422 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
423 
424 	di = (struct ocfs2_dinode *) fe_bh->b_data;
425 	di->i_size = cpu_to_le64(new_i_size);
426 	di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(inode));
427 	di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
428 	ocfs2_update_inode_fsync_trans(handle, inode, 0);
429 
430 	ocfs2_journal_dirty(handle, fe_bh);
431 
432 out_commit:
433 	ocfs2_commit_trans(osb, handle);
434 out:
435 	return status;
436 }
437 
ocfs2_truncate_file(struct inode * inode,struct buffer_head * di_bh,u64 new_i_size)438 int ocfs2_truncate_file(struct inode *inode,
439 			       struct buffer_head *di_bh,
440 			       u64 new_i_size)
441 {
442 	int status = 0;
443 	struct ocfs2_dinode *fe = NULL;
444 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
445 
446 	/* We trust di_bh because it comes from ocfs2_inode_lock(), which
447 	 * already validated it */
448 	fe = (struct ocfs2_dinode *) di_bh->b_data;
449 
450 	trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
451 				  (unsigned long long)le64_to_cpu(fe->i_size),
452 				  (unsigned long long)new_i_size);
453 
454 	mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
455 			"Inode %llu, inode i_size = %lld != di "
456 			"i_size = %llu, i_flags = 0x%x\n",
457 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
458 			i_size_read(inode),
459 			(unsigned long long)le64_to_cpu(fe->i_size),
460 			le32_to_cpu(fe->i_flags));
461 
462 	if (new_i_size > le64_to_cpu(fe->i_size)) {
463 		trace_ocfs2_truncate_file_error(
464 			(unsigned long long)le64_to_cpu(fe->i_size),
465 			(unsigned long long)new_i_size);
466 		status = -EINVAL;
467 		mlog_errno(status);
468 		goto bail;
469 	}
470 
471 	down_write(&OCFS2_I(inode)->ip_alloc_sem);
472 
473 	ocfs2_resv_discard(&osb->osb_la_resmap,
474 			   &OCFS2_I(inode)->ip_la_data_resv);
475 
476 	/*
477 	 * The inode lock forced other nodes to sync and drop their
478 	 * pages, which (correctly) happens even if we have a truncate
479 	 * without allocation change - ocfs2 cluster sizes can be much
480 	 * greater than page size, so we have to truncate them
481 	 * anyway.
482 	 */
483 
484 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
485 		unmap_mapping_range(inode->i_mapping,
486 				    new_i_size + PAGE_SIZE - 1, 0, 1);
487 		truncate_inode_pages(inode->i_mapping, new_i_size);
488 		status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
489 					       i_size_read(inode), 1);
490 		if (status)
491 			mlog_errno(status);
492 
493 		goto bail_unlock_sem;
494 	}
495 
496 	/* alright, we're going to need to do a full blown alloc size
497 	 * change. Orphan the inode so that recovery can complete the
498 	 * truncate if necessary. This does the task of marking
499 	 * i_size. */
500 	status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
501 	if (status < 0) {
502 		mlog_errno(status);
503 		goto bail_unlock_sem;
504 	}
505 
506 	unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
507 	truncate_inode_pages(inode->i_mapping, new_i_size);
508 
509 	status = ocfs2_commit_truncate(osb, inode, di_bh);
510 	if (status < 0) {
511 		mlog_errno(status);
512 		goto bail_unlock_sem;
513 	}
514 
515 	/* TODO: orphan dir cleanup here. */
516 bail_unlock_sem:
517 	up_write(&OCFS2_I(inode)->ip_alloc_sem);
518 
519 bail:
520 	if (!status && OCFS2_I(inode)->ip_clusters == 0)
521 		status = ocfs2_try_remove_refcount_tree(inode, di_bh);
522 
523 	return status;
524 }
525 
526 /*
527  * extend file allocation only here.
528  * we'll update all the disk stuff, and oip->alloc_size
529  *
530  * expect stuff to be locked, a transaction started and enough data /
531  * metadata reservations in the contexts.
532  *
533  * Will return -EAGAIN, and a reason if a restart is needed.
534  * If passed in, *reason will always be set, even in error.
535  */
ocfs2_add_inode_data(struct ocfs2_super * osb,struct inode * inode,u32 * logical_offset,u32 clusters_to_add,int mark_unwritten,struct buffer_head * fe_bh,handle_t * handle,struct ocfs2_alloc_context * data_ac,struct ocfs2_alloc_context * meta_ac,enum ocfs2_alloc_restarted * reason_ret)536 int ocfs2_add_inode_data(struct ocfs2_super *osb,
537 			 struct inode *inode,
538 			 u32 *logical_offset,
539 			 u32 clusters_to_add,
540 			 int mark_unwritten,
541 			 struct buffer_head *fe_bh,
542 			 handle_t *handle,
543 			 struct ocfs2_alloc_context *data_ac,
544 			 struct ocfs2_alloc_context *meta_ac,
545 			 enum ocfs2_alloc_restarted *reason_ret)
546 {
547 	struct ocfs2_extent_tree et;
548 
549 	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
550 	return ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
551 					   clusters_to_add, mark_unwritten,
552 					   data_ac, meta_ac, reason_ret);
553 }
554 
ocfs2_extend_allocation(struct inode * inode,u32 logical_start,u32 clusters_to_add,int mark_unwritten)555 static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
556 				   u32 clusters_to_add, int mark_unwritten)
557 {
558 	int status = 0;
559 	int restart_func = 0;
560 	int credits;
561 	u32 prev_clusters;
562 	struct buffer_head *bh = NULL;
563 	struct ocfs2_dinode *fe = NULL;
564 	handle_t *handle = NULL;
565 	struct ocfs2_alloc_context *data_ac = NULL;
566 	struct ocfs2_alloc_context *meta_ac = NULL;
567 	enum ocfs2_alloc_restarted why = RESTART_NONE;
568 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
569 	struct ocfs2_extent_tree et;
570 	int did_quota = 0;
571 
572 	/*
573 	 * Unwritten extent only exists for file systems which
574 	 * support holes.
575 	 */
576 	BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
577 
578 	status = ocfs2_read_inode_block(inode, &bh);
579 	if (status < 0) {
580 		mlog_errno(status);
581 		goto leave;
582 	}
583 	fe = (struct ocfs2_dinode *) bh->b_data;
584 
585 restart_all:
586 	BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
587 
588 	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
589 	status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
590 				       &data_ac, &meta_ac);
591 	if (status) {
592 		mlog_errno(status);
593 		goto leave;
594 	}
595 
596 	credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
597 	handle = ocfs2_start_trans(osb, credits);
598 	if (IS_ERR(handle)) {
599 		status = PTR_ERR(handle);
600 		handle = NULL;
601 		mlog_errno(status);
602 		goto leave;
603 	}
604 
605 restarted_transaction:
606 	trace_ocfs2_extend_allocation(
607 		(unsigned long long)OCFS2_I(inode)->ip_blkno,
608 		(unsigned long long)i_size_read(inode),
609 		le32_to_cpu(fe->i_clusters), clusters_to_add,
610 		why, restart_func);
611 
612 	status = dquot_alloc_space_nodirty(inode,
613 			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
614 	if (status)
615 		goto leave;
616 	did_quota = 1;
617 
618 	/* reserve a write to the file entry early on - that we if we
619 	 * run out of credits in the allocation path, we can still
620 	 * update i_size. */
621 	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
622 					 OCFS2_JOURNAL_ACCESS_WRITE);
623 	if (status < 0) {
624 		mlog_errno(status);
625 		goto leave;
626 	}
627 
628 	prev_clusters = OCFS2_I(inode)->ip_clusters;
629 
630 	status = ocfs2_add_inode_data(osb,
631 				      inode,
632 				      &logical_start,
633 				      clusters_to_add,
634 				      mark_unwritten,
635 				      bh,
636 				      handle,
637 				      data_ac,
638 				      meta_ac,
639 				      &why);
640 	if ((status < 0) && (status != -EAGAIN)) {
641 		if (status != -ENOSPC)
642 			mlog_errno(status);
643 		goto leave;
644 	}
645 	ocfs2_update_inode_fsync_trans(handle, inode, 1);
646 	ocfs2_journal_dirty(handle, bh);
647 
648 	spin_lock(&OCFS2_I(inode)->ip_lock);
649 	clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
650 	spin_unlock(&OCFS2_I(inode)->ip_lock);
651 	/* Release unused quota reservation */
652 	dquot_free_space(inode,
653 			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
654 	did_quota = 0;
655 
656 	if (why != RESTART_NONE && clusters_to_add) {
657 		if (why == RESTART_META) {
658 			restart_func = 1;
659 			status = 0;
660 		} else {
661 			BUG_ON(why != RESTART_TRANS);
662 
663 			status = ocfs2_allocate_extend_trans(handle, 1);
664 			if (status < 0) {
665 				/* handle still has to be committed at
666 				 * this point. */
667 				status = -ENOMEM;
668 				mlog_errno(status);
669 				goto leave;
670 			}
671 			goto restarted_transaction;
672 		}
673 	}
674 
675 	trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
676 	     le32_to_cpu(fe->i_clusters),
677 	     (unsigned long long)le64_to_cpu(fe->i_size),
678 	     OCFS2_I(inode)->ip_clusters,
679 	     (unsigned long long)i_size_read(inode));
680 
681 leave:
682 	if (status < 0 && did_quota)
683 		dquot_free_space(inode,
684 			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
685 	if (handle) {
686 		ocfs2_commit_trans(osb, handle);
687 		handle = NULL;
688 	}
689 	if (data_ac) {
690 		ocfs2_free_alloc_context(data_ac);
691 		data_ac = NULL;
692 	}
693 	if (meta_ac) {
694 		ocfs2_free_alloc_context(meta_ac);
695 		meta_ac = NULL;
696 	}
697 	if ((!status) && restart_func) {
698 		restart_func = 0;
699 		goto restart_all;
700 	}
701 	brelse(bh);
702 	bh = NULL;
703 
704 	return status;
705 }
706 
707 /*
708  * While a write will already be ordering the data, a truncate will not.
709  * Thus, we need to explicitly order the zeroed pages.
710  */
ocfs2_zero_start_ordered_transaction(struct inode * inode,struct buffer_head * di_bh,loff_t start_byte,loff_t length)711 static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
712 						      struct buffer_head *di_bh,
713 						      loff_t start_byte,
714 						      loff_t length)
715 {
716 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
717 	handle_t *handle = NULL;
718 	int ret = 0;
719 
720 	if (!ocfs2_should_order_data(inode))
721 		goto out;
722 
723 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
724 	if (IS_ERR(handle)) {
725 		ret = -ENOMEM;
726 		mlog_errno(ret);
727 		goto out;
728 	}
729 
730 	ret = ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length);
731 	if (ret < 0) {
732 		mlog_errno(ret);
733 		goto out;
734 	}
735 
736 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
737 				      OCFS2_JOURNAL_ACCESS_WRITE);
738 	if (ret)
739 		mlog_errno(ret);
740 	ocfs2_update_inode_fsync_trans(handle, inode, 1);
741 
742 out:
743 	if (ret) {
744 		if (!IS_ERR(handle))
745 			ocfs2_commit_trans(osb, handle);
746 		handle = ERR_PTR(ret);
747 	}
748 	return handle;
749 }
750 
751 /* Some parts of this taken from generic_cont_expand, which turned out
752  * to be too fragile to do exactly what we need without us having to
753  * worry about recursive locking in ->write_begin() and ->write_end(). */
ocfs2_write_zero_page(struct inode * inode,u64 abs_from,u64 abs_to,struct buffer_head * di_bh)754 static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
755 				 u64 abs_to, struct buffer_head *di_bh)
756 {
757 	struct address_space *mapping = inode->i_mapping;
758 	struct folio *folio;
759 	unsigned long index = abs_from >> PAGE_SHIFT;
760 	handle_t *handle;
761 	int ret = 0;
762 	unsigned zero_from, zero_to, block_start, block_end;
763 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
764 
765 	BUG_ON(abs_from >= abs_to);
766 	BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
767 	BUG_ON(abs_from & (inode->i_blkbits - 1));
768 
769 	handle = ocfs2_zero_start_ordered_transaction(inode, di_bh,
770 						      abs_from,
771 						      abs_to - abs_from);
772 	if (IS_ERR(handle)) {
773 		ret = PTR_ERR(handle);
774 		goto out;
775 	}
776 
777 	folio = __filemap_get_folio(mapping, index,
778 			FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
779 	if (IS_ERR(folio)) {
780 		ret = PTR_ERR(folio);
781 		mlog_errno(ret);
782 		goto out_commit_trans;
783 	}
784 
785 	/* Get the offsets within the folio that we want to zero */
786 	zero_from = offset_in_folio(folio, abs_from);
787 	zero_to = offset_in_folio(folio, abs_to);
788 	if (!zero_to)
789 		zero_to = folio_size(folio);
790 
791 	trace_ocfs2_write_zero_page(
792 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
793 			(unsigned long long)abs_from,
794 			(unsigned long long)abs_to,
795 			index, zero_from, zero_to);
796 
797 	/* We know that zero_from is block aligned */
798 	for (block_start = zero_from; block_start < zero_to;
799 	     block_start = block_end) {
800 		block_end = block_start + i_blocksize(inode);
801 
802 		/*
803 		 * block_start is block-aligned.  Bump it by one to force
804 		 * __block_write_begin and block_commit_write to zero the
805 		 * whole block.
806 		 */
807 		ret = __block_write_begin(folio, block_start + 1, 0,
808 					  ocfs2_get_block);
809 		if (ret < 0) {
810 			mlog_errno(ret);
811 			goto out_unlock;
812 		}
813 
814 
815 		/* must not update i_size! */
816 		block_commit_write(folio, block_start + 1, block_start + 1);
817 	}
818 
819 	/*
820 	 * fs-writeback will release the dirty pages without page lock
821 	 * whose offset are over inode size, the release happens at
822 	 * block_write_full_folio().
823 	 */
824 	i_size_write(inode, abs_to);
825 	inode->i_blocks = ocfs2_inode_sector_count(inode);
826 	di->i_size = cpu_to_le64((u64)i_size_read(inode));
827 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
828 	di->i_mtime = di->i_ctime = cpu_to_le64(inode_get_mtime_sec(inode));
829 	di->i_ctime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
830 	di->i_mtime_nsec = di->i_ctime_nsec;
831 	if (handle) {
832 		ocfs2_journal_dirty(handle, di_bh);
833 		ocfs2_update_inode_fsync_trans(handle, inode, 1);
834 	}
835 
836 out_unlock:
837 	folio_unlock(folio);
838 	folio_put(folio);
839 out_commit_trans:
840 	if (handle)
841 		ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
842 out:
843 	return ret;
844 }
845 
846 /*
847  * Find the next range to zero.  We do this in terms of bytes because
848  * that's what ocfs2_zero_extend() wants, and it is dealing with the
849  * pagecache.  We may return multiple extents.
850  *
851  * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
852  * needs to be zeroed.  range_start and range_end return the next zeroing
853  * range.  A subsequent call should pass the previous range_end as its
854  * zero_start.  If range_end is 0, there's nothing to do.
855  *
856  * Unwritten extents are skipped over.  Refcounted extents are CoWd.
857  */
ocfs2_zero_extend_get_range(struct inode * inode,struct buffer_head * di_bh,u64 zero_start,u64 zero_end,u64 * range_start,u64 * range_end)858 static int ocfs2_zero_extend_get_range(struct inode *inode,
859 				       struct buffer_head *di_bh,
860 				       u64 zero_start, u64 zero_end,
861 				       u64 *range_start, u64 *range_end)
862 {
863 	int rc = 0, needs_cow = 0;
864 	u32 p_cpos, zero_clusters = 0;
865 	u32 zero_cpos =
866 		zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
867 	u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
868 	unsigned int num_clusters = 0;
869 	unsigned int ext_flags = 0;
870 
871 	while (zero_cpos < last_cpos) {
872 		rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
873 					&num_clusters, &ext_flags);
874 		if (rc) {
875 			mlog_errno(rc);
876 			goto out;
877 		}
878 
879 		if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
880 			zero_clusters = num_clusters;
881 			if (ext_flags & OCFS2_EXT_REFCOUNTED)
882 				needs_cow = 1;
883 			break;
884 		}
885 
886 		zero_cpos += num_clusters;
887 	}
888 	if (!zero_clusters) {
889 		*range_end = 0;
890 		goto out;
891 	}
892 
893 	while ((zero_cpos + zero_clusters) < last_cpos) {
894 		rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
895 					&p_cpos, &num_clusters,
896 					&ext_flags);
897 		if (rc) {
898 			mlog_errno(rc);
899 			goto out;
900 		}
901 
902 		if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
903 			break;
904 		if (ext_flags & OCFS2_EXT_REFCOUNTED)
905 			needs_cow = 1;
906 		zero_clusters += num_clusters;
907 	}
908 	if ((zero_cpos + zero_clusters) > last_cpos)
909 		zero_clusters = last_cpos - zero_cpos;
910 
911 	if (needs_cow) {
912 		rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
913 					zero_clusters, UINT_MAX);
914 		if (rc) {
915 			mlog_errno(rc);
916 			goto out;
917 		}
918 	}
919 
920 	*range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
921 	*range_end = ocfs2_clusters_to_bytes(inode->i_sb,
922 					     zero_cpos + zero_clusters);
923 
924 out:
925 	return rc;
926 }
927 
928 /*
929  * Zero one range returned from ocfs2_zero_extend_get_range().  The caller
930  * has made sure that the entire range needs zeroing.
931  */
ocfs2_zero_extend_range(struct inode * inode,u64 range_start,u64 range_end,struct buffer_head * di_bh)932 static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
933 				   u64 range_end, struct buffer_head *di_bh)
934 {
935 	int rc = 0;
936 	u64 next_pos;
937 	u64 zero_pos = range_start;
938 
939 	trace_ocfs2_zero_extend_range(
940 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
941 			(unsigned long long)range_start,
942 			(unsigned long long)range_end);
943 	BUG_ON(range_start >= range_end);
944 
945 	while (zero_pos < range_end) {
946 		next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
947 		if (next_pos > range_end)
948 			next_pos = range_end;
949 		rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
950 		if (rc < 0) {
951 			mlog_errno(rc);
952 			break;
953 		}
954 		zero_pos = next_pos;
955 
956 		/*
957 		 * Very large extends have the potential to lock up
958 		 * the cpu for extended periods of time.
959 		 */
960 		cond_resched();
961 	}
962 
963 	return rc;
964 }
965 
ocfs2_zero_extend(struct inode * inode,struct buffer_head * di_bh,loff_t zero_to_size)966 int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
967 		      loff_t zero_to_size)
968 {
969 	int ret = 0;
970 	u64 zero_start, range_start = 0, range_end = 0;
971 	struct super_block *sb = inode->i_sb;
972 
973 	zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
974 	trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
975 				(unsigned long long)zero_start,
976 				(unsigned long long)i_size_read(inode));
977 	while (zero_start < zero_to_size) {
978 		ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
979 						  zero_to_size,
980 						  &range_start,
981 						  &range_end);
982 		if (ret) {
983 			mlog_errno(ret);
984 			break;
985 		}
986 		if (!range_end)
987 			break;
988 		/* Trim the ends */
989 		if (range_start < zero_start)
990 			range_start = zero_start;
991 		if (range_end > zero_to_size)
992 			range_end = zero_to_size;
993 
994 		ret = ocfs2_zero_extend_range(inode, range_start,
995 					      range_end, di_bh);
996 		if (ret) {
997 			mlog_errno(ret);
998 			break;
999 		}
1000 		zero_start = range_end;
1001 	}
1002 
1003 	return ret;
1004 }
1005 
ocfs2_extend_no_holes(struct inode * inode,struct buffer_head * di_bh,u64 new_i_size,u64 zero_to)1006 int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
1007 			  u64 new_i_size, u64 zero_to)
1008 {
1009 	int ret;
1010 	u32 clusters_to_add;
1011 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1012 
1013 	/*
1014 	 * Only quota files call this without a bh, and they can't be
1015 	 * refcounted.
1016 	 */
1017 	BUG_ON(!di_bh && ocfs2_is_refcount_inode(inode));
1018 	BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
1019 
1020 	clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
1021 	if (clusters_to_add < oi->ip_clusters)
1022 		clusters_to_add = 0;
1023 	else
1024 		clusters_to_add -= oi->ip_clusters;
1025 
1026 	if (clusters_to_add) {
1027 		ret = ocfs2_extend_allocation(inode, oi->ip_clusters,
1028 					      clusters_to_add, 0);
1029 		if (ret) {
1030 			mlog_errno(ret);
1031 			goto out;
1032 		}
1033 	}
1034 
1035 	/*
1036 	 * Call this even if we don't add any clusters to the tree. We
1037 	 * still need to zero the area between the old i_size and the
1038 	 * new i_size.
1039 	 */
1040 	ret = ocfs2_zero_extend(inode, di_bh, zero_to);
1041 	if (ret < 0)
1042 		mlog_errno(ret);
1043 
1044 out:
1045 	return ret;
1046 }
1047 
ocfs2_extend_file(struct inode * inode,struct buffer_head * di_bh,u64 new_i_size)1048 static int ocfs2_extend_file(struct inode *inode,
1049 			     struct buffer_head *di_bh,
1050 			     u64 new_i_size)
1051 {
1052 	int ret = 0;
1053 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1054 
1055 	BUG_ON(!di_bh);
1056 
1057 	/* setattr sometimes calls us like this. */
1058 	if (new_i_size == 0)
1059 		goto out;
1060 
1061 	if (i_size_read(inode) == new_i_size)
1062 		goto out;
1063 	BUG_ON(new_i_size < i_size_read(inode));
1064 
1065 	/*
1066 	 * The alloc sem blocks people in read/write from reading our
1067 	 * allocation until we're done changing it. We depend on
1068 	 * i_rwsem to block other extend/truncate calls while we're
1069 	 * here.  We even have to hold it for sparse files because there
1070 	 * might be some tail zeroing.
1071 	 */
1072 	down_write(&oi->ip_alloc_sem);
1073 
1074 	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1075 		/*
1076 		 * We can optimize small extends by keeping the inodes
1077 		 * inline data.
1078 		 */
1079 		if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1080 			up_write(&oi->ip_alloc_sem);
1081 			goto out_update_size;
1082 		}
1083 
1084 		ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1085 		if (ret) {
1086 			up_write(&oi->ip_alloc_sem);
1087 			mlog_errno(ret);
1088 			goto out;
1089 		}
1090 	}
1091 
1092 	if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1093 		ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
1094 	else
1095 		ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
1096 					    new_i_size);
1097 
1098 	up_write(&oi->ip_alloc_sem);
1099 
1100 	if (ret < 0) {
1101 		mlog_errno(ret);
1102 		goto out;
1103 	}
1104 
1105 out_update_size:
1106 	ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1107 	if (ret < 0)
1108 		mlog_errno(ret);
1109 
1110 out:
1111 	return ret;
1112 }
1113 
ocfs2_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)1114 int ocfs2_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1115 		  struct iattr *attr)
1116 {
1117 	int status = 0, size_change;
1118 	int inode_locked = 0;
1119 	struct inode *inode = d_inode(dentry);
1120 	struct super_block *sb = inode->i_sb;
1121 	struct ocfs2_super *osb = OCFS2_SB(sb);
1122 	struct buffer_head *bh = NULL;
1123 	handle_t *handle = NULL;
1124 	struct dquot *transfer_to[MAXQUOTAS] = { };
1125 	int qtype;
1126 	int had_lock;
1127 	struct ocfs2_lock_holder oh;
1128 
1129 	trace_ocfs2_setattr(inode, dentry,
1130 			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
1131 			    dentry->d_name.len, dentry->d_name.name,
1132 			    attr->ia_valid,
1133 				attr->ia_valid & ATTR_MODE ? attr->ia_mode : 0,
1134 				attr->ia_valid & ATTR_UID ?
1135 					from_kuid(&init_user_ns, attr->ia_uid) : 0,
1136 				attr->ia_valid & ATTR_GID ?
1137 					from_kgid(&init_user_ns, attr->ia_gid) : 0);
1138 
1139 	status = ocfs2_emergency_state(osb);
1140 	if (unlikely(status)) {
1141 		mlog_errno(status);
1142 		goto bail;
1143 	}
1144 
1145 	/* ensuring we don't even attempt to truncate a symlink */
1146 	if (S_ISLNK(inode->i_mode))
1147 		attr->ia_valid &= ~ATTR_SIZE;
1148 
1149 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1150 			   | ATTR_GID | ATTR_UID | ATTR_MODE)
1151 	if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
1152 		return 0;
1153 
1154 	status = setattr_prepare(&nop_mnt_idmap, dentry, attr);
1155 	if (status)
1156 		return status;
1157 
1158 	if (is_quota_modification(&nop_mnt_idmap, inode, attr)) {
1159 		status = dquot_initialize(inode);
1160 		if (status)
1161 			return status;
1162 	}
1163 	size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1164 	if (size_change) {
1165 		/*
1166 		 * Here we should wait dio to finish before inode lock
1167 		 * to avoid a deadlock between ocfs2_setattr() and
1168 		 * ocfs2_dio_end_io_write()
1169 		 */
1170 		inode_dio_wait(inode);
1171 
1172 		status = ocfs2_rw_lock(inode, 1);
1173 		if (status < 0) {
1174 			mlog_errno(status);
1175 			goto bail;
1176 		}
1177 	}
1178 
1179 	had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
1180 	if (had_lock < 0) {
1181 		status = had_lock;
1182 		goto bail_unlock_rw;
1183 	} else if (had_lock) {
1184 		/*
1185 		 * As far as we know, ocfs2_setattr() could only be the first
1186 		 * VFS entry point in the call chain of recursive cluster
1187 		 * locking issue.
1188 		 *
1189 		 * For instance:
1190 		 * chmod_common()
1191 		 *  notify_change()
1192 		 *   ocfs2_setattr()
1193 		 *    posix_acl_chmod()
1194 		 *     ocfs2_iop_get_acl()
1195 		 *
1196 		 * But, we're not 100% sure if it's always true, because the
1197 		 * ordering of the VFS entry points in the call chain is out
1198 		 * of our control. So, we'd better dump the stack here to
1199 		 * catch the other cases of recursive locking.
1200 		 */
1201 		mlog(ML_ERROR, "Another case of recursive locking:\n");
1202 		dump_stack();
1203 	}
1204 	inode_locked = 1;
1205 
1206 	if (size_change) {
1207 		status = inode_newsize_ok(inode, attr->ia_size);
1208 		if (status)
1209 			goto bail_unlock;
1210 
1211 		if (i_size_read(inode) >= attr->ia_size) {
1212 			if (ocfs2_should_order_data(inode)) {
1213 				status = ocfs2_begin_ordered_truncate(inode,
1214 								      attr->ia_size);
1215 				if (status)
1216 					goto bail_unlock;
1217 			}
1218 			status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1219 		} else
1220 			status = ocfs2_extend_file(inode, bh, attr->ia_size);
1221 		if (status < 0) {
1222 			if (status != -ENOSPC)
1223 				mlog_errno(status);
1224 			status = -ENOSPC;
1225 			goto bail_unlock;
1226 		}
1227 	}
1228 
1229 	if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
1230 	    (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
1231 		/*
1232 		 * Gather pointers to quota structures so that allocation /
1233 		 * freeing of quota structures happens here and not inside
1234 		 * dquot_transfer() where we have problems with lock ordering
1235 		 */
1236 		if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)
1237 		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1238 		    OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1239 			transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
1240 			if (IS_ERR(transfer_to[USRQUOTA])) {
1241 				status = PTR_ERR(transfer_to[USRQUOTA]);
1242 				transfer_to[USRQUOTA] = NULL;
1243 				goto bail_unlock;
1244 			}
1245 		}
1246 		if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)
1247 		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1248 		    OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1249 			transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
1250 			if (IS_ERR(transfer_to[GRPQUOTA])) {
1251 				status = PTR_ERR(transfer_to[GRPQUOTA]);
1252 				transfer_to[GRPQUOTA] = NULL;
1253 				goto bail_unlock;
1254 			}
1255 		}
1256 		down_write(&OCFS2_I(inode)->ip_alloc_sem);
1257 		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1258 					   2 * ocfs2_quota_trans_credits(sb));
1259 		if (IS_ERR(handle)) {
1260 			status = PTR_ERR(handle);
1261 			mlog_errno(status);
1262 			goto bail_unlock_alloc;
1263 		}
1264 		status = __dquot_transfer(inode, transfer_to);
1265 		if (status < 0)
1266 			goto bail_commit;
1267 	} else {
1268 		down_write(&OCFS2_I(inode)->ip_alloc_sem);
1269 		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1270 		if (IS_ERR(handle)) {
1271 			status = PTR_ERR(handle);
1272 			mlog_errno(status);
1273 			goto bail_unlock_alloc;
1274 		}
1275 	}
1276 
1277 	setattr_copy(&nop_mnt_idmap, inode, attr);
1278 	mark_inode_dirty(inode);
1279 
1280 	status = ocfs2_mark_inode_dirty(handle, inode, bh);
1281 	if (status < 0)
1282 		mlog_errno(status);
1283 
1284 bail_commit:
1285 	ocfs2_commit_trans(osb, handle);
1286 bail_unlock_alloc:
1287 	up_write(&OCFS2_I(inode)->ip_alloc_sem);
1288 bail_unlock:
1289 	if (status && inode_locked) {
1290 		ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1291 		inode_locked = 0;
1292 	}
1293 bail_unlock_rw:
1294 	if (size_change)
1295 		ocfs2_rw_unlock(inode, 1);
1296 bail:
1297 
1298 	/* Release quota pointers in case we acquired them */
1299 	for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
1300 		dqput(transfer_to[qtype]);
1301 
1302 	if (!status && attr->ia_valid & ATTR_MODE) {
1303 		status = ocfs2_acl_chmod(inode, bh);
1304 		if (status < 0)
1305 			mlog_errno(status);
1306 	}
1307 	if (inode_locked)
1308 		ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1309 
1310 	brelse(bh);
1311 	return status;
1312 }
1313 
ocfs2_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)1314 int ocfs2_getattr(struct mnt_idmap *idmap, const struct path *path,
1315 		  struct kstat *stat, u32 request_mask, unsigned int flags)
1316 {
1317 	struct inode *inode = d_inode(path->dentry);
1318 	struct super_block *sb = path->dentry->d_sb;
1319 	struct ocfs2_super *osb = sb->s_fs_info;
1320 	int err;
1321 
1322 	err = ocfs2_inode_revalidate(path->dentry);
1323 	if (err) {
1324 		if (err != -ENOENT)
1325 			mlog_errno(err);
1326 		goto bail;
1327 	}
1328 
1329 	generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
1330 	/*
1331 	 * If there is inline data in the inode, the inode will normally not
1332 	 * have data blocks allocated (it may have an external xattr block).
1333 	 * Report at least one sector for such files, so tools like tar, rsync,
1334 	 * others don't incorrectly think the file is completely sparse.
1335 	 */
1336 	if (unlikely(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1337 		stat->blocks += (stat->size + 511)>>9;
1338 
1339 	/* We set the blksize from the cluster size for performance */
1340 	stat->blksize = osb->s_clustersize;
1341 
1342 bail:
1343 	return err;
1344 }
1345 
ocfs2_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)1346 int ocfs2_permission(struct mnt_idmap *idmap, struct inode *inode,
1347 		     int mask)
1348 {
1349 	int ret, had_lock;
1350 	struct ocfs2_lock_holder oh;
1351 
1352 	if (mask & MAY_NOT_BLOCK)
1353 		return -ECHILD;
1354 
1355 	had_lock = ocfs2_inode_lock_tracker(inode, NULL, 0, &oh);
1356 	if (had_lock < 0) {
1357 		ret = had_lock;
1358 		goto out;
1359 	} else if (had_lock) {
1360 		/* See comments in ocfs2_setattr() for details.
1361 		 * The call chain of this case could be:
1362 		 * do_sys_open()
1363 		 *  may_open()
1364 		 *   inode_permission()
1365 		 *    ocfs2_permission()
1366 		 *     ocfs2_iop_get_acl()
1367 		 */
1368 		mlog(ML_ERROR, "Another case of recursive locking:\n");
1369 		dump_stack();
1370 	}
1371 
1372 	ret = generic_permission(&nop_mnt_idmap, inode, mask);
1373 
1374 	ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
1375 out:
1376 	return ret;
1377 }
1378 
__ocfs2_write_remove_suid(struct inode * inode,struct buffer_head * bh)1379 static int __ocfs2_write_remove_suid(struct inode *inode,
1380 				     struct buffer_head *bh)
1381 {
1382 	int ret;
1383 	handle_t *handle;
1384 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1385 	struct ocfs2_dinode *di;
1386 
1387 	trace_ocfs2_write_remove_suid(
1388 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1389 			inode->i_mode);
1390 
1391 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1392 	if (IS_ERR(handle)) {
1393 		ret = PTR_ERR(handle);
1394 		mlog_errno(ret);
1395 		goto out;
1396 	}
1397 
1398 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1399 				      OCFS2_JOURNAL_ACCESS_WRITE);
1400 	if (ret < 0) {
1401 		mlog_errno(ret);
1402 		goto out_trans;
1403 	}
1404 
1405 	inode->i_mode &= ~S_ISUID;
1406 	if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1407 		inode->i_mode &= ~S_ISGID;
1408 
1409 	di = (struct ocfs2_dinode *) bh->b_data;
1410 	di->i_mode = cpu_to_le16(inode->i_mode);
1411 	ocfs2_update_inode_fsync_trans(handle, inode, 0);
1412 
1413 	ocfs2_journal_dirty(handle, bh);
1414 
1415 out_trans:
1416 	ocfs2_commit_trans(osb, handle);
1417 out:
1418 	return ret;
1419 }
1420 
ocfs2_write_remove_suid(struct inode * inode)1421 static int ocfs2_write_remove_suid(struct inode *inode)
1422 {
1423 	int ret;
1424 	struct buffer_head *bh = NULL;
1425 
1426 	ret = ocfs2_read_inode_block(inode, &bh);
1427 	if (ret < 0) {
1428 		mlog_errno(ret);
1429 		goto out;
1430 	}
1431 
1432 	ret =  __ocfs2_write_remove_suid(inode, bh);
1433 out:
1434 	brelse(bh);
1435 	return ret;
1436 }
1437 
1438 /*
1439  * Allocate enough extents to cover the region starting at byte offset
1440  * start for len bytes. Existing extents are skipped, any extents
1441  * added are marked as "unwritten".
1442  */
ocfs2_allocate_unwritten_extents(struct inode * inode,u64 start,u64 len)1443 static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1444 					    u64 start, u64 len)
1445 {
1446 	int ret;
1447 	u32 cpos, phys_cpos, clusters, alloc_size;
1448 	u64 end = start + len;
1449 	struct buffer_head *di_bh = NULL;
1450 
1451 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1452 		ret = ocfs2_read_inode_block(inode, &di_bh);
1453 		if (ret) {
1454 			mlog_errno(ret);
1455 			goto out;
1456 		}
1457 
1458 		/*
1459 		 * Nothing to do if the requested reservation range
1460 		 * fits within the inode.
1461 		 */
1462 		if (ocfs2_size_fits_inline_data(di_bh, end))
1463 			goto out;
1464 
1465 		ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1466 		if (ret) {
1467 			mlog_errno(ret);
1468 			goto out;
1469 		}
1470 	}
1471 
1472 	/*
1473 	 * We consider both start and len to be inclusive.
1474 	 */
1475 	cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1476 	clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1477 	clusters -= cpos;
1478 
1479 	while (clusters) {
1480 		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1481 					 &alloc_size, NULL);
1482 		if (ret) {
1483 			mlog_errno(ret);
1484 			goto out;
1485 		}
1486 
1487 		/*
1488 		 * Hole or existing extent len can be arbitrary, so
1489 		 * cap it to our own allocation request.
1490 		 */
1491 		if (alloc_size > clusters)
1492 			alloc_size = clusters;
1493 
1494 		if (phys_cpos) {
1495 			/*
1496 			 * We already have an allocation at this
1497 			 * region so we can safely skip it.
1498 			 */
1499 			goto next;
1500 		}
1501 
1502 		ret = ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1503 		if (ret) {
1504 			if (ret != -ENOSPC)
1505 				mlog_errno(ret);
1506 			goto out;
1507 		}
1508 
1509 next:
1510 		cpos += alloc_size;
1511 		clusters -= alloc_size;
1512 	}
1513 
1514 	ret = 0;
1515 out:
1516 
1517 	brelse(di_bh);
1518 	return ret;
1519 }
1520 
1521 /*
1522  * Truncate a byte range, avoiding pages within partial clusters. This
1523  * preserves those pages for the zeroing code to write to.
1524  */
ocfs2_truncate_cluster_pages(struct inode * inode,u64 byte_start,u64 byte_len)1525 static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1526 					 u64 byte_len)
1527 {
1528 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1529 	loff_t start, end;
1530 	struct address_space *mapping = inode->i_mapping;
1531 
1532 	start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1533 	end = byte_start + byte_len;
1534 	end = end & ~(osb->s_clustersize - 1);
1535 
1536 	if (start < end) {
1537 		unmap_mapping_range(mapping, start, end - start, 0);
1538 		truncate_inode_pages_range(mapping, start, end - 1);
1539 	}
1540 }
1541 
1542 /*
1543  * zero out partial blocks of one cluster.
1544  *
1545  * start: file offset where zero starts, will be made upper block aligned.
1546  * len: it will be trimmed to the end of current cluster if "start + len"
1547  *      is bigger than it.
1548  */
ocfs2_zeroout_partial_cluster(struct inode * inode,u64 start,u64 len)1549 static int ocfs2_zeroout_partial_cluster(struct inode *inode,
1550 					u64 start, u64 len)
1551 {
1552 	int ret;
1553 	u64 start_block, end_block, nr_blocks;
1554 	u64 p_block, offset;
1555 	u32 cluster, p_cluster, nr_clusters;
1556 	struct super_block *sb = inode->i_sb;
1557 	u64 end = ocfs2_align_bytes_to_clusters(sb, start);
1558 
1559 	if (start + len < end)
1560 		end = start + len;
1561 
1562 	start_block = ocfs2_blocks_for_bytes(sb, start);
1563 	end_block = ocfs2_blocks_for_bytes(sb, end);
1564 	nr_blocks = end_block - start_block;
1565 	if (!nr_blocks)
1566 		return 0;
1567 
1568 	cluster = ocfs2_bytes_to_clusters(sb, start);
1569 	ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
1570 				&nr_clusters, NULL);
1571 	if (ret)
1572 		return ret;
1573 	if (!p_cluster)
1574 		return 0;
1575 
1576 	offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
1577 	p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
1578 	return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
1579 }
1580 
ocfs2_zero_partial_clusters(struct inode * inode,u64 start,u64 len)1581 static int ocfs2_zero_partial_clusters(struct inode *inode,
1582 				       u64 start, u64 len)
1583 {
1584 	int ret = 0;
1585 	u64 tmpend = 0;
1586 	u64 end = start + len;
1587 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1588 	unsigned int csize = osb->s_clustersize;
1589 	handle_t *handle;
1590 	loff_t isize = i_size_read(inode);
1591 
1592 	/*
1593 	 * The "start" and "end" values are NOT necessarily part of
1594 	 * the range whose allocation is being deleted. Rather, this
1595 	 * is what the user passed in with the request. We must zero
1596 	 * partial clusters here. There's no need to worry about
1597 	 * physical allocation - the zeroing code knows to skip holes.
1598 	 */
1599 	trace_ocfs2_zero_partial_clusters(
1600 		(unsigned long long)OCFS2_I(inode)->ip_blkno,
1601 		(unsigned long long)start, (unsigned long long)end);
1602 
1603 	/*
1604 	 * If both edges are on a cluster boundary then there's no
1605 	 * zeroing required as the region is part of the allocation to
1606 	 * be truncated.
1607 	 */
1608 	if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1609 		goto out;
1610 
1611 	/* No page cache for EOF blocks, issue zero out to disk. */
1612 	if (end > isize) {
1613 		/*
1614 		 * zeroout eof blocks in last cluster starting from
1615 		 * "isize" even "start" > "isize" because it is
1616 		 * complicated to zeroout just at "start" as "start"
1617 		 * may be not aligned with block size, buffer write
1618 		 * would be required to do that, but out of eof buffer
1619 		 * write is not supported.
1620 		 */
1621 		ret = ocfs2_zeroout_partial_cluster(inode, isize,
1622 					end - isize);
1623 		if (ret) {
1624 			mlog_errno(ret);
1625 			goto out;
1626 		}
1627 		if (start >= isize)
1628 			goto out;
1629 		end = isize;
1630 	}
1631 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1632 	if (IS_ERR(handle)) {
1633 		ret = PTR_ERR(handle);
1634 		mlog_errno(ret);
1635 		goto out;
1636 	}
1637 
1638 	/*
1639 	 * If start is on a cluster boundary and end is somewhere in another
1640 	 * cluster, we have not COWed the cluster starting at start, unless
1641 	 * end is also within the same cluster. So, in this case, we skip this
1642 	 * first call to ocfs2_zero_range_for_truncate() truncate and move on
1643 	 * to the next one.
1644 	 */
1645 	if ((start & (csize - 1)) != 0) {
1646 		/*
1647 		 * We want to get the byte offset of the end of the 1st
1648 		 * cluster.
1649 		 */
1650 		tmpend = (u64)osb->s_clustersize +
1651 			(start & ~(osb->s_clustersize - 1));
1652 		if (tmpend > end)
1653 			tmpend = end;
1654 
1655 		trace_ocfs2_zero_partial_clusters_range1(
1656 			(unsigned long long)start,
1657 			(unsigned long long)tmpend);
1658 
1659 		ret = ocfs2_zero_range_for_truncate(inode, handle, start,
1660 						    tmpend);
1661 		if (ret)
1662 			mlog_errno(ret);
1663 	}
1664 
1665 	if (tmpend < end) {
1666 		/*
1667 		 * This may make start and end equal, but the zeroing
1668 		 * code will skip any work in that case so there's no
1669 		 * need to catch it up here.
1670 		 */
1671 		start = end & ~(osb->s_clustersize - 1);
1672 
1673 		trace_ocfs2_zero_partial_clusters_range2(
1674 			(unsigned long long)start, (unsigned long long)end);
1675 
1676 		ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1677 		if (ret)
1678 			mlog_errno(ret);
1679 	}
1680 	ocfs2_update_inode_fsync_trans(handle, inode, 1);
1681 
1682 	ocfs2_commit_trans(osb, handle);
1683 out:
1684 	return ret;
1685 }
1686 
ocfs2_find_rec(struct ocfs2_extent_list * el,u32 pos)1687 static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1688 {
1689 	int i;
1690 	struct ocfs2_extent_rec *rec = NULL;
1691 
1692 	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1693 
1694 		rec = &el->l_recs[i];
1695 
1696 		if (le32_to_cpu(rec->e_cpos) < pos)
1697 			break;
1698 	}
1699 
1700 	return i;
1701 }
1702 
1703 /*
1704  * Helper to calculate the punching pos and length in one run, we handle the
1705  * following three cases in order:
1706  *
1707  * - remove the entire record
1708  * - remove a partial record
1709  * - no record needs to be removed (hole-punching completed)
1710 */
ocfs2_calc_trunc_pos(struct inode * inode,struct ocfs2_extent_list * el,struct ocfs2_extent_rec * rec,u32 trunc_start,u32 * trunc_cpos,u32 * trunc_len,u32 * trunc_end,u64 * blkno,int * done)1711 static void ocfs2_calc_trunc_pos(struct inode *inode,
1712 				 struct ocfs2_extent_list *el,
1713 				 struct ocfs2_extent_rec *rec,
1714 				 u32 trunc_start, u32 *trunc_cpos,
1715 				 u32 *trunc_len, u32 *trunc_end,
1716 				 u64 *blkno, int *done)
1717 {
1718 	int ret = 0;
1719 	u32 coff, range;
1720 
1721 	range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1722 
1723 	if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1724 		/*
1725 		 * remove an entire extent record.
1726 		 */
1727 		*trunc_cpos = le32_to_cpu(rec->e_cpos);
1728 		/*
1729 		 * Skip holes if any.
1730 		 */
1731 		if (range < *trunc_end)
1732 			*trunc_end = range;
1733 		*trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1734 		*blkno = le64_to_cpu(rec->e_blkno);
1735 		*trunc_end = le32_to_cpu(rec->e_cpos);
1736 	} else if (range > trunc_start) {
1737 		/*
1738 		 * remove a partial extent record, which means we're
1739 		 * removing the last extent record.
1740 		 */
1741 		*trunc_cpos = trunc_start;
1742 		/*
1743 		 * skip hole if any.
1744 		 */
1745 		if (range < *trunc_end)
1746 			*trunc_end = range;
1747 		*trunc_len = *trunc_end - trunc_start;
1748 		coff = trunc_start - le32_to_cpu(rec->e_cpos);
1749 		*blkno = le64_to_cpu(rec->e_blkno) +
1750 				ocfs2_clusters_to_blocks(inode->i_sb, coff);
1751 		*trunc_end = trunc_start;
1752 	} else {
1753 		/*
1754 		 * It may have two following possibilities:
1755 		 *
1756 		 * - last record has been removed
1757 		 * - trunc_start was within a hole
1758 		 *
1759 		 * both two cases mean the completion of hole punching.
1760 		 */
1761 		ret = 1;
1762 	}
1763 
1764 	*done = ret;
1765 }
1766 
ocfs2_remove_inode_range(struct inode * inode,struct buffer_head * di_bh,u64 byte_start,u64 byte_len)1767 int ocfs2_remove_inode_range(struct inode *inode,
1768 			     struct buffer_head *di_bh, u64 byte_start,
1769 			     u64 byte_len)
1770 {
1771 	int ret = 0, flags = 0, done = 0, i;
1772 	u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1773 	u32 cluster_in_el;
1774 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1775 	struct ocfs2_cached_dealloc_ctxt dealloc;
1776 	struct address_space *mapping = inode->i_mapping;
1777 	struct ocfs2_extent_tree et;
1778 	struct ocfs2_path *path = NULL;
1779 	struct ocfs2_extent_list *el = NULL;
1780 	struct ocfs2_extent_rec *rec = NULL;
1781 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1782 	u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
1783 
1784 	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1785 	ocfs2_init_dealloc_ctxt(&dealloc);
1786 
1787 	trace_ocfs2_remove_inode_range(
1788 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1789 			(unsigned long long)byte_start,
1790 			(unsigned long long)byte_len);
1791 
1792 	if (byte_len == 0)
1793 		return 0;
1794 
1795 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1796 		int id_count = ocfs2_max_inline_data_with_xattr(inode->i_sb, di);
1797 
1798 		if (byte_start > id_count || byte_start + byte_len > id_count) {
1799 			ret = -EINVAL;
1800 			mlog_errno(ret);
1801 			goto out;
1802 		}
1803 
1804 		ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1805 					    byte_start + byte_len, 0);
1806 		if (ret) {
1807 			mlog_errno(ret);
1808 			goto out;
1809 		}
1810 		/*
1811 		 * There's no need to get fancy with the page cache
1812 		 * truncate of an inline-data inode. We're talking
1813 		 * about less than a page here, which will be cached
1814 		 * in the dinode buffer anyway.
1815 		 */
1816 		unmap_mapping_range(mapping, 0, 0, 0);
1817 		truncate_inode_pages(mapping, 0);
1818 		goto out;
1819 	}
1820 
1821 	/*
1822 	 * For reflinks, we may need to CoW 2 clusters which might be
1823 	 * partially zero'd later, if hole's start and end offset were
1824 	 * within one cluster(means is not exactly aligned to clustersize).
1825 	 */
1826 
1827 	if (ocfs2_is_refcount_inode(inode)) {
1828 		ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1829 		if (ret) {
1830 			mlog_errno(ret);
1831 			goto out;
1832 		}
1833 
1834 		ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1835 		if (ret) {
1836 			mlog_errno(ret);
1837 			goto out;
1838 		}
1839 	}
1840 
1841 	trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1842 	trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1843 	cluster_in_el = trunc_end;
1844 
1845 	ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1846 	if (ret) {
1847 		mlog_errno(ret);
1848 		goto out;
1849 	}
1850 
1851 	path = ocfs2_new_path_from_et(&et);
1852 	if (!path) {
1853 		ret = -ENOMEM;
1854 		mlog_errno(ret);
1855 		goto out;
1856 	}
1857 
1858 	while (trunc_end > trunc_start) {
1859 
1860 		ret = ocfs2_find_path(INODE_CACHE(inode), path,
1861 				      cluster_in_el);
1862 		if (ret) {
1863 			mlog_errno(ret);
1864 			goto out;
1865 		}
1866 
1867 		el = path_leaf_el(path);
1868 
1869 		i = ocfs2_find_rec(el, trunc_end);
1870 		/*
1871 		 * Need to go to previous extent block.
1872 		 */
1873 		if (i < 0) {
1874 			if (path->p_tree_depth == 0)
1875 				break;
1876 
1877 			ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1878 							    path,
1879 							    &cluster_in_el);
1880 			if (ret) {
1881 				mlog_errno(ret);
1882 				goto out;
1883 			}
1884 
1885 			/*
1886 			 * We've reached the leftmost extent block,
1887 			 * it's safe to leave.
1888 			 */
1889 			if (cluster_in_el == 0)
1890 				break;
1891 
1892 			/*
1893 			 * The 'pos' searched for previous extent block is
1894 			 * always one cluster less than actual trunc_end.
1895 			 */
1896 			trunc_end = cluster_in_el + 1;
1897 
1898 			ocfs2_reinit_path(path, 1);
1899 
1900 			continue;
1901 
1902 		} else
1903 			rec = &el->l_recs[i];
1904 
1905 		ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1906 				     &trunc_len, &trunc_end, &blkno, &done);
1907 		if (done)
1908 			break;
1909 
1910 		flags = rec->e_flags;
1911 		phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1912 
1913 		ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1914 					       phys_cpos, trunc_len, flags,
1915 					       &dealloc, refcount_loc, false);
1916 		if (ret < 0) {
1917 			mlog_errno(ret);
1918 			goto out;
1919 		}
1920 
1921 		cluster_in_el = trunc_end;
1922 
1923 		ocfs2_reinit_path(path, 1);
1924 	}
1925 
1926 	ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1927 
1928 out:
1929 	ocfs2_free_path(path);
1930 	ocfs2_schedule_truncate_log_flush(osb, 1);
1931 	ocfs2_run_deallocs(osb, &dealloc);
1932 
1933 	return ret;
1934 }
1935 
1936 /*
1937  * Parts of this function taken from xfs_change_file_space()
1938  */
__ocfs2_change_file_space(struct file * file,struct inode * inode,loff_t f_pos,unsigned int cmd,struct ocfs2_space_resv * sr,int change_size)1939 static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1940 				     loff_t f_pos, unsigned int cmd,
1941 				     struct ocfs2_space_resv *sr,
1942 				     int change_size)
1943 {
1944 	int ret;
1945 	s64 llen;
1946 	loff_t size, orig_isize;
1947 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1948 	struct buffer_head *di_bh = NULL;
1949 	handle_t *handle;
1950 	unsigned long long max_off = inode->i_sb->s_maxbytes;
1951 
1952 	if (unlikely(ocfs2_emergency_state(osb)))
1953 		return -EROFS;
1954 
1955 	inode_lock(inode);
1956 
1957 	/* Wait all existing dio workers, newcomers will block on i_rwsem */
1958 	inode_dio_wait(inode);
1959 	/*
1960 	 * This prevents concurrent writes on other nodes
1961 	 */
1962 	ret = ocfs2_rw_lock(inode, 1);
1963 	if (ret) {
1964 		mlog_errno(ret);
1965 		goto out;
1966 	}
1967 
1968 	ret = ocfs2_inode_lock(inode, &di_bh, 1);
1969 	if (ret) {
1970 		mlog_errno(ret);
1971 		goto out_rw_unlock;
1972 	}
1973 
1974 	if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1975 		ret = -EPERM;
1976 		goto out_inode_unlock;
1977 	}
1978 
1979 	switch (sr->l_whence) {
1980 	case 0: /*SEEK_SET*/
1981 		break;
1982 	case 1: /*SEEK_CUR*/
1983 		sr->l_start += f_pos;
1984 		break;
1985 	case 2: /*SEEK_END*/
1986 		sr->l_start += i_size_read(inode);
1987 		break;
1988 	default:
1989 		ret = -EINVAL;
1990 		goto out_inode_unlock;
1991 	}
1992 	sr->l_whence = 0;
1993 
1994 	llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1995 
1996 	if (sr->l_start < 0
1997 	    || sr->l_start > max_off
1998 	    || (sr->l_start + llen) < 0
1999 	    || (sr->l_start + llen) > max_off) {
2000 		ret = -EINVAL;
2001 		goto out_inode_unlock;
2002 	}
2003 	size = sr->l_start + sr->l_len;
2004 
2005 	if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64 ||
2006 	    cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) {
2007 		if (sr->l_len <= 0) {
2008 			ret = -EINVAL;
2009 			goto out_inode_unlock;
2010 		}
2011 	}
2012 
2013 	if (file && setattr_should_drop_suidgid(&nop_mnt_idmap, file_inode(file))) {
2014 		ret = __ocfs2_write_remove_suid(inode, di_bh);
2015 		if (ret) {
2016 			mlog_errno(ret);
2017 			goto out_inode_unlock;
2018 		}
2019 	}
2020 
2021 	down_write(&OCFS2_I(inode)->ip_alloc_sem);
2022 	switch (cmd) {
2023 	case OCFS2_IOC_RESVSP:
2024 	case OCFS2_IOC_RESVSP64:
2025 		/*
2026 		 * This takes unsigned offsets, but the signed ones we
2027 		 * pass have been checked against overflow above.
2028 		 */
2029 		ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
2030 						       sr->l_len);
2031 		break;
2032 	case OCFS2_IOC_UNRESVSP:
2033 	case OCFS2_IOC_UNRESVSP64:
2034 		ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
2035 					       sr->l_len);
2036 		break;
2037 	default:
2038 		ret = -EINVAL;
2039 	}
2040 
2041 	orig_isize = i_size_read(inode);
2042 	/* zeroout eof blocks in the cluster. */
2043 	if (!ret && change_size && orig_isize < size) {
2044 		ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
2045 					size - orig_isize);
2046 		if (!ret)
2047 			i_size_write(inode, size);
2048 	}
2049 	up_write(&OCFS2_I(inode)->ip_alloc_sem);
2050 	if (ret) {
2051 		mlog_errno(ret);
2052 		goto out_inode_unlock;
2053 	}
2054 
2055 	/*
2056 	 * We update c/mtime for these changes
2057 	 */
2058 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
2059 	if (IS_ERR(handle)) {
2060 		ret = PTR_ERR(handle);
2061 		mlog_errno(ret);
2062 		goto out_inode_unlock;
2063 	}
2064 
2065 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
2066 	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
2067 	if (ret < 0)
2068 		mlog_errno(ret);
2069 
2070 	if (file && (file->f_flags & O_SYNC))
2071 		handle->h_sync = 1;
2072 
2073 	ocfs2_commit_trans(osb, handle);
2074 
2075 out_inode_unlock:
2076 	brelse(di_bh);
2077 	ocfs2_inode_unlock(inode, 1);
2078 out_rw_unlock:
2079 	ocfs2_rw_unlock(inode, 1);
2080 
2081 out:
2082 	inode_unlock(inode);
2083 	return ret;
2084 }
2085 
ocfs2_change_file_space(struct file * file,unsigned int cmd,struct ocfs2_space_resv * sr)2086 int ocfs2_change_file_space(struct file *file, unsigned int cmd,
2087 			    struct ocfs2_space_resv *sr)
2088 {
2089 	struct inode *inode = file_inode(file);
2090 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2091 	int ret;
2092 
2093 	if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
2094 	    !ocfs2_writes_unwritten_extents(osb))
2095 		return -ENOTTY;
2096 	else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
2097 		 !ocfs2_sparse_alloc(osb))
2098 		return -ENOTTY;
2099 
2100 	if (!S_ISREG(inode->i_mode))
2101 		return -EINVAL;
2102 
2103 	if (!(file->f_mode & FMODE_WRITE))
2104 		return -EBADF;
2105 
2106 	ret = mnt_want_write_file(file);
2107 	if (ret)
2108 		return ret;
2109 	ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
2110 	mnt_drop_write_file(file);
2111 	return ret;
2112 }
2113 
ocfs2_fallocate(struct file * file,int mode,loff_t offset,loff_t len)2114 static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
2115 			    loff_t len)
2116 {
2117 	struct inode *inode = file_inode(file);
2118 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2119 	struct ocfs2_space_resv sr;
2120 	int change_size = 1;
2121 	int cmd = OCFS2_IOC_RESVSP64;
2122 	int ret = 0;
2123 
2124 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2125 		return -EOPNOTSUPP;
2126 	if (!ocfs2_writes_unwritten_extents(osb))
2127 		return -EOPNOTSUPP;
2128 
2129 	if (mode & FALLOC_FL_KEEP_SIZE) {
2130 		change_size = 0;
2131 	} else {
2132 		ret = inode_newsize_ok(inode, offset + len);
2133 		if (ret)
2134 			return ret;
2135 	}
2136 
2137 	if (mode & FALLOC_FL_PUNCH_HOLE)
2138 		cmd = OCFS2_IOC_UNRESVSP64;
2139 
2140 	sr.l_whence = 0;
2141 	sr.l_start = (s64)offset;
2142 	sr.l_len = (s64)len;
2143 
2144 	return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
2145 					 change_size);
2146 }
2147 
ocfs2_check_range_for_refcount(struct inode * inode,loff_t pos,size_t count)2148 int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
2149 				   size_t count)
2150 {
2151 	int ret = 0;
2152 	unsigned int extent_flags;
2153 	u32 cpos, clusters, extent_len, phys_cpos;
2154 	struct super_block *sb = inode->i_sb;
2155 
2156 	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
2157 	    !ocfs2_is_refcount_inode(inode) ||
2158 	    OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2159 		return 0;
2160 
2161 	cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
2162 	clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
2163 
2164 	while (clusters) {
2165 		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
2166 					 &extent_flags);
2167 		if (ret < 0) {
2168 			mlog_errno(ret);
2169 			goto out;
2170 		}
2171 
2172 		if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
2173 			ret = 1;
2174 			break;
2175 		}
2176 
2177 		if (extent_len > clusters)
2178 			extent_len = clusters;
2179 
2180 		clusters -= extent_len;
2181 		cpos += extent_len;
2182 	}
2183 out:
2184 	return ret;
2185 }
2186 
ocfs2_is_io_unaligned(struct inode * inode,size_t count,loff_t pos)2187 static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
2188 {
2189 	int blockmask = inode->i_sb->s_blocksize - 1;
2190 	loff_t final_size = pos + count;
2191 
2192 	if ((pos & blockmask) || (final_size & blockmask))
2193 		return 1;
2194 	return 0;
2195 }
2196 
ocfs2_inode_lock_for_extent_tree(struct inode * inode,struct buffer_head ** di_bh,int meta_level,int write_sem,int wait)2197 static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
2198 					    struct buffer_head **di_bh,
2199 					    int meta_level,
2200 					    int write_sem,
2201 					    int wait)
2202 {
2203 	int ret = 0;
2204 
2205 	if (wait)
2206 		ret = ocfs2_inode_lock(inode, di_bh, meta_level);
2207 	else
2208 		ret = ocfs2_try_inode_lock(inode, di_bh, meta_level);
2209 	if (ret < 0)
2210 		goto out;
2211 
2212 	if (wait) {
2213 		if (write_sem)
2214 			down_write(&OCFS2_I(inode)->ip_alloc_sem);
2215 		else
2216 			down_read(&OCFS2_I(inode)->ip_alloc_sem);
2217 	} else {
2218 		if (write_sem)
2219 			ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2220 		else
2221 			ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2222 
2223 		if (!ret) {
2224 			ret = -EAGAIN;
2225 			goto out_unlock;
2226 		}
2227 	}
2228 
2229 	return ret;
2230 
2231 out_unlock:
2232 	brelse(*di_bh);
2233 	*di_bh = NULL;
2234 	ocfs2_inode_unlock(inode, meta_level);
2235 out:
2236 	return ret;
2237 }
2238 
ocfs2_inode_unlock_for_extent_tree(struct inode * inode,struct buffer_head ** di_bh,int meta_level,int write_sem)2239 static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
2240 					       struct buffer_head **di_bh,
2241 					       int meta_level,
2242 					       int write_sem)
2243 {
2244 	if (write_sem)
2245 		up_write(&OCFS2_I(inode)->ip_alloc_sem);
2246 	else
2247 		up_read(&OCFS2_I(inode)->ip_alloc_sem);
2248 
2249 	brelse(*di_bh);
2250 	*di_bh = NULL;
2251 
2252 	if (meta_level >= 0)
2253 		ocfs2_inode_unlock(inode, meta_level);
2254 }
2255 
ocfs2_prepare_inode_for_write(struct file * file,loff_t pos,size_t count,int wait)2256 static int ocfs2_prepare_inode_for_write(struct file *file,
2257 					 loff_t pos, size_t count, int wait)
2258 {
2259 	int ret = 0, meta_level = 0, overwrite_io = 0;
2260 	int write_sem = 0;
2261 	struct dentry *dentry = file->f_path.dentry;
2262 	struct inode *inode = d_inode(dentry);
2263 	struct buffer_head *di_bh = NULL;
2264 	u32 cpos;
2265 	u32 clusters;
2266 
2267 	/*
2268 	 * We start with a read level meta lock and only jump to an ex
2269 	 * if we need to make modifications here.
2270 	 */
2271 	for(;;) {
2272 		ret = ocfs2_inode_lock_for_extent_tree(inode,
2273 						       &di_bh,
2274 						       meta_level,
2275 						       write_sem,
2276 						       wait);
2277 		if (ret < 0) {
2278 			if (ret != -EAGAIN)
2279 				mlog_errno(ret);
2280 			goto out;
2281 		}
2282 
2283 		/*
2284 		 * Check if IO will overwrite allocated blocks in case
2285 		 * IOCB_NOWAIT flag is set.
2286 		 */
2287 		if (!wait && !overwrite_io) {
2288 			overwrite_io = 1;
2289 
2290 			ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
2291 			if (ret < 0) {
2292 				if (ret != -EAGAIN)
2293 					mlog_errno(ret);
2294 				goto out_unlock;
2295 			}
2296 		}
2297 
2298 		/* Clear suid / sgid if necessary. We do this here
2299 		 * instead of later in the write path because
2300 		 * remove_suid() calls ->setattr without any hint that
2301 		 * we may have already done our cluster locking. Since
2302 		 * ocfs2_setattr() *must* take cluster locks to
2303 		 * proceed, this will lead us to recursively lock the
2304 		 * inode. There's also the dinode i_size state which
2305 		 * can be lost via setattr during extending writes (we
2306 		 * set inode->i_size at the end of a write. */
2307 		if (setattr_should_drop_suidgid(&nop_mnt_idmap, inode)) {
2308 			if (meta_level == 0) {
2309 				ocfs2_inode_unlock_for_extent_tree(inode,
2310 								   &di_bh,
2311 								   meta_level,
2312 								   write_sem);
2313 				meta_level = 1;
2314 				continue;
2315 			}
2316 
2317 			ret = ocfs2_write_remove_suid(inode);
2318 			if (ret < 0) {
2319 				mlog_errno(ret);
2320 				goto out_unlock;
2321 			}
2322 		}
2323 
2324 		ret = ocfs2_check_range_for_refcount(inode, pos, count);
2325 		if (ret == 1) {
2326 			ocfs2_inode_unlock_for_extent_tree(inode,
2327 							   &di_bh,
2328 							   meta_level,
2329 							   write_sem);
2330 			meta_level = 1;
2331 			write_sem = 1;
2332 			ret = ocfs2_inode_lock_for_extent_tree(inode,
2333 							       &di_bh,
2334 							       meta_level,
2335 							       write_sem,
2336 							       wait);
2337 			if (ret < 0) {
2338 				if (ret != -EAGAIN)
2339 					mlog_errno(ret);
2340 				goto out;
2341 			}
2342 
2343 			cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2344 			clusters =
2345 				ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2346 			ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
2347 		}
2348 
2349 		if (ret < 0) {
2350 			if (ret != -EAGAIN)
2351 				mlog_errno(ret);
2352 			goto out_unlock;
2353 		}
2354 
2355 		break;
2356 	}
2357 
2358 out_unlock:
2359 	trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
2360 					    pos, count, wait);
2361 
2362 	ocfs2_inode_unlock_for_extent_tree(inode,
2363 					   &di_bh,
2364 					   meta_level,
2365 					   write_sem);
2366 
2367 out:
2368 	return ret;
2369 }
2370 
ocfs2_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2371 static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
2372 				    struct iov_iter *from)
2373 {
2374 	int rw_level;
2375 	ssize_t written = 0;
2376 	ssize_t ret;
2377 	size_t count = iov_iter_count(from);
2378 	struct file *file = iocb->ki_filp;
2379 	struct inode *inode = file_inode(file);
2380 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2381 	int full_coherency = !(osb->s_mount_opt &
2382 			       OCFS2_MOUNT_COHERENCY_BUFFERED);
2383 	void *saved_ki_complete = NULL;
2384 	int append_write = ((iocb->ki_pos + count) >=
2385 			i_size_read(inode) ? 1 : 0);
2386 	int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2387 	int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2388 
2389 	trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
2390 		(unsigned long long)OCFS2_I(inode)->ip_blkno,
2391 		file->f_path.dentry->d_name.len,
2392 		file->f_path.dentry->d_name.name,
2393 		(unsigned int)from->nr_segs);	/* GRRRRR */
2394 
2395 	if (!direct_io && nowait)
2396 		return -EOPNOTSUPP;
2397 
2398 	if (count == 0)
2399 		return 0;
2400 
2401 	if (nowait) {
2402 		if (!inode_trylock(inode))
2403 			return -EAGAIN;
2404 	} else
2405 		inode_lock(inode);
2406 
2407 	ocfs2_iocb_init_rw_locked(iocb);
2408 
2409 	/*
2410 	 * Concurrent O_DIRECT writes are allowed with
2411 	 * mount_option "coherency=buffered".
2412 	 * For append write, we must take rw EX.
2413 	 */
2414 	rw_level = (!direct_io || full_coherency || append_write);
2415 
2416 	if (nowait)
2417 		ret = ocfs2_try_rw_lock(inode, rw_level);
2418 	else
2419 		ret = ocfs2_rw_lock(inode, rw_level);
2420 	if (ret < 0) {
2421 		if (ret != -EAGAIN)
2422 			mlog_errno(ret);
2423 		goto out_mutex;
2424 	}
2425 
2426 	/*
2427 	 * O_DIRECT writes with "coherency=full" need to take EX cluster
2428 	 * inode_lock to guarantee coherency.
2429 	 */
2430 	if (direct_io && full_coherency) {
2431 		/*
2432 		 * We need to take and drop the inode lock to force
2433 		 * other nodes to drop their caches.  Buffered I/O
2434 		 * already does this in write_begin().
2435 		 */
2436 		if (nowait)
2437 			ret = ocfs2_try_inode_lock(inode, NULL, 1);
2438 		else
2439 			ret = ocfs2_inode_lock(inode, NULL, 1);
2440 		if (ret < 0) {
2441 			if (ret != -EAGAIN)
2442 				mlog_errno(ret);
2443 			goto out;
2444 		}
2445 
2446 		ocfs2_inode_unlock(inode, 1);
2447 	}
2448 
2449 	ret = generic_write_checks(iocb, from);
2450 	if (ret <= 0) {
2451 		if (ret)
2452 			mlog_errno(ret);
2453 		goto out;
2454 	}
2455 	count = ret;
2456 
2457 	ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, !nowait);
2458 	if (ret < 0) {
2459 		if (ret != -EAGAIN)
2460 			mlog_errno(ret);
2461 		goto out;
2462 	}
2463 
2464 	if (direct_io && !is_sync_kiocb(iocb) &&
2465 	    ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) {
2466 		/*
2467 		 * Make it a sync io if it's an unaligned aio.
2468 		 */
2469 		saved_ki_complete = xchg(&iocb->ki_complete, NULL);
2470 	}
2471 
2472 	/* communicate with ocfs2_dio_end_io */
2473 	ocfs2_iocb_set_rw_locked(iocb, rw_level);
2474 
2475 	written = __generic_file_write_iter(iocb, from);
2476 	/* buffered aio wouldn't have proper lock coverage today */
2477 	BUG_ON(written == -EIOCBQUEUED && !direct_io);
2478 
2479 	/*
2480 	 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2481 	 * function pointer which is called when o_direct io completes so that
2482 	 * it can unlock our rw lock.
2483 	 * Unfortunately there are error cases which call end_io and others
2484 	 * that don't.  so we don't have to unlock the rw_lock if either an
2485 	 * async dio is going to do it in the future or an end_io after an
2486 	 * error has already done it.
2487 	 */
2488 	if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2489 		rw_level = -1;
2490 	}
2491 
2492 	if (unlikely(written <= 0))
2493 		goto out;
2494 
2495 	if (((file->f_flags & O_DSYNC) && !direct_io) ||
2496 	    IS_SYNC(inode)) {
2497 		ret = filemap_fdatawrite_range(file->f_mapping,
2498 					       iocb->ki_pos - written,
2499 					       iocb->ki_pos - 1);
2500 		if (ret < 0)
2501 			written = ret;
2502 
2503 		if (!ret) {
2504 			ret = jbd2_journal_force_commit(osb->journal->j_journal);
2505 			if (ret < 0)
2506 				written = ret;
2507 		}
2508 
2509 		if (!ret)
2510 			ret = filemap_fdatawait_range(file->f_mapping,
2511 						      iocb->ki_pos - written,
2512 						      iocb->ki_pos - 1);
2513 	}
2514 
2515 out:
2516 	if (saved_ki_complete)
2517 		xchg(&iocb->ki_complete, saved_ki_complete);
2518 
2519 	if (rw_level != -1)
2520 		ocfs2_rw_unlock(inode, rw_level);
2521 
2522 out_mutex:
2523 	inode_unlock(inode);
2524 
2525 	if (written)
2526 		ret = written;
2527 	return ret;
2528 }
2529 
ocfs2_file_read_iter(struct kiocb * iocb,struct iov_iter * to)2530 static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
2531 				   struct iov_iter *to)
2532 {
2533 	int ret = 0, rw_level = -1, lock_level = 0;
2534 	struct file *filp = iocb->ki_filp;
2535 	struct inode *inode = file_inode(filp);
2536 	int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2537 	int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2538 
2539 	trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
2540 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
2541 			filp->f_path.dentry->d_name.len,
2542 			filp->f_path.dentry->d_name.name,
2543 			to->nr_segs);	/* GRRRRR */
2544 
2545 
2546 	if (!inode) {
2547 		ret = -EINVAL;
2548 		mlog_errno(ret);
2549 		goto bail;
2550 	}
2551 
2552 	if (!direct_io && nowait)
2553 		return -EOPNOTSUPP;
2554 
2555 	ocfs2_iocb_init_rw_locked(iocb);
2556 
2557 	/*
2558 	 * buffered reads protect themselves in ->read_folio().  O_DIRECT reads
2559 	 * need locks to protect pending reads from racing with truncate.
2560 	 */
2561 	if (direct_io) {
2562 		if (nowait)
2563 			ret = ocfs2_try_rw_lock(inode, 0);
2564 		else
2565 			ret = ocfs2_rw_lock(inode, 0);
2566 
2567 		if (ret < 0) {
2568 			if (ret != -EAGAIN)
2569 				mlog_errno(ret);
2570 			goto bail;
2571 		}
2572 		rw_level = 0;
2573 		/* communicate with ocfs2_dio_end_io */
2574 		ocfs2_iocb_set_rw_locked(iocb, rw_level);
2575 	}
2576 
2577 	/*
2578 	 * We're fine letting folks race truncates and extending
2579 	 * writes with read across the cluster, just like they can
2580 	 * locally. Hence no rw_lock during read.
2581 	 *
2582 	 * Take and drop the meta data lock to update inode fields
2583 	 * like i_size. This allows the checks down below
2584 	 * copy_splice_read() a chance of actually working.
2585 	 */
2586 	ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level,
2587 				     !nowait);
2588 	if (ret < 0) {
2589 		if (ret != -EAGAIN)
2590 			mlog_errno(ret);
2591 		goto bail;
2592 	}
2593 	ocfs2_inode_unlock(inode, lock_level);
2594 
2595 	ret = generic_file_read_iter(iocb, to);
2596 	trace_generic_file_read_iter_ret(ret);
2597 
2598 	/* buffered aio wouldn't have proper lock coverage today */
2599 	BUG_ON(ret == -EIOCBQUEUED && !direct_io);
2600 
2601 	/* see ocfs2_file_write_iter */
2602 	if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2603 		rw_level = -1;
2604 	}
2605 
2606 bail:
2607 	if (rw_level != -1)
2608 		ocfs2_rw_unlock(inode, rw_level);
2609 
2610 	return ret;
2611 }
2612 
ocfs2_file_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)2613 static ssize_t ocfs2_file_splice_read(struct file *in, loff_t *ppos,
2614 				      struct pipe_inode_info *pipe,
2615 				      size_t len, unsigned int flags)
2616 {
2617 	struct inode *inode = file_inode(in);
2618 	ssize_t ret = 0;
2619 	int lock_level = 0;
2620 
2621 	trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry,
2622 				     (unsigned long long)OCFS2_I(inode)->ip_blkno,
2623 				     in->f_path.dentry->d_name.len,
2624 				     in->f_path.dentry->d_name.name,
2625 				     flags);
2626 
2627 	/*
2628 	 * We're fine letting folks race truncates and extending writes with
2629 	 * read across the cluster, just like they can locally.  Hence no
2630 	 * rw_lock during read.
2631 	 *
2632 	 * Take and drop the meta data lock to update inode fields like i_size.
2633 	 * This allows the checks down below filemap_splice_read() a chance of
2634 	 * actually working.
2635 	 */
2636 	ret = ocfs2_inode_lock_atime(inode, in->f_path.mnt, &lock_level, 1);
2637 	if (ret < 0) {
2638 		if (ret != -EAGAIN)
2639 			mlog_errno(ret);
2640 		goto bail;
2641 	}
2642 	ocfs2_inode_unlock(inode, lock_level);
2643 
2644 	ret = filemap_splice_read(in, ppos, pipe, len, flags);
2645 	trace_filemap_splice_read_ret(ret);
2646 bail:
2647 	return ret;
2648 }
2649 
2650 /* Refer generic_file_llseek_unlocked() */
ocfs2_file_llseek(struct file * file,loff_t offset,int whence)2651 static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence)
2652 {
2653 	struct inode *inode = file->f_mapping->host;
2654 	int ret = 0;
2655 
2656 	inode_lock(inode);
2657 
2658 	switch (whence) {
2659 	case SEEK_SET:
2660 		break;
2661 	case SEEK_END:
2662 		/* SEEK_END requires the OCFS2 inode lock for the file
2663 		 * because it references the file's size.
2664 		 */
2665 		ret = ocfs2_inode_lock(inode, NULL, 0);
2666 		if (ret < 0) {
2667 			mlog_errno(ret);
2668 			goto out;
2669 		}
2670 		offset += i_size_read(inode);
2671 		ocfs2_inode_unlock(inode, 0);
2672 		break;
2673 	case SEEK_CUR:
2674 		if (offset == 0) {
2675 			offset = file->f_pos;
2676 			goto out;
2677 		}
2678 		offset += file->f_pos;
2679 		break;
2680 	case SEEK_DATA:
2681 	case SEEK_HOLE:
2682 		ret = ocfs2_seek_data_hole_offset(file, &offset, whence);
2683 		if (ret)
2684 			goto out;
2685 		break;
2686 	default:
2687 		ret = -EINVAL;
2688 		goto out;
2689 	}
2690 
2691 	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2692 
2693 out:
2694 	inode_unlock(inode);
2695 	if (ret)
2696 		return ret;
2697 	return offset;
2698 }
2699 
ocfs2_remap_file_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,loff_t len,unsigned int remap_flags)2700 static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in,
2701 				     struct file *file_out, loff_t pos_out,
2702 				     loff_t len, unsigned int remap_flags)
2703 {
2704 	struct inode *inode_in = file_inode(file_in);
2705 	struct inode *inode_out = file_inode(file_out);
2706 	struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb);
2707 	struct buffer_head *in_bh = NULL, *out_bh = NULL;
2708 	bool same_inode = (inode_in == inode_out);
2709 	loff_t remapped = 0;
2710 	ssize_t ret;
2711 
2712 	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
2713 		return -EINVAL;
2714 	if (!ocfs2_refcount_tree(osb))
2715 		return -EOPNOTSUPP;
2716 	if (unlikely(ocfs2_emergency_state(osb)))
2717 		return -EROFS;
2718 
2719 	/* Lock both files against IO */
2720 	ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh);
2721 	if (ret)
2722 		return ret;
2723 
2724 	/* Check file eligibility and prepare for block sharing. */
2725 	ret = -EINVAL;
2726 	if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) ||
2727 	    (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE))
2728 		goto out_unlock;
2729 
2730 	ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
2731 			&len, remap_flags);
2732 	if (ret < 0 || len == 0)
2733 		goto out_unlock;
2734 
2735 	/* Lock out changes to the allocation maps and remap. */
2736 	down_write(&OCFS2_I(inode_in)->ip_alloc_sem);
2737 	if (!same_inode)
2738 		down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem,
2739 				  SINGLE_DEPTH_NESTING);
2740 
2741 	/* Zap any page cache for the destination file's range. */
2742 	truncate_inode_pages_range(&inode_out->i_data,
2743 				   round_down(pos_out, PAGE_SIZE),
2744 				   round_up(pos_out + len, PAGE_SIZE) - 1);
2745 
2746 	remapped = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in,
2747 			inode_out, out_bh, pos_out, len);
2748 	up_write(&OCFS2_I(inode_in)->ip_alloc_sem);
2749 	if (!same_inode)
2750 		up_write(&OCFS2_I(inode_out)->ip_alloc_sem);
2751 	if (remapped < 0) {
2752 		ret = remapped;
2753 		mlog_errno(ret);
2754 		goto out_unlock;
2755 	}
2756 
2757 	/*
2758 	 * Empty the extent map so that we may get the right extent
2759 	 * record from the disk.
2760 	 */
2761 	ocfs2_extent_map_trunc(inode_in, 0);
2762 	ocfs2_extent_map_trunc(inode_out, 0);
2763 
2764 	ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len);
2765 	if (ret) {
2766 		mlog_errno(ret);
2767 		goto out_unlock;
2768 	}
2769 
2770 out_unlock:
2771 	ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
2772 	return remapped > 0 ? remapped : ret;
2773 }
2774 
ocfs2_dir_llseek(struct file * file,loff_t offset,int whence)2775 static loff_t ocfs2_dir_llseek(struct file *file, loff_t offset, int whence)
2776 {
2777 	struct ocfs2_file_private *fp = file->private_data;
2778 
2779 	return generic_llseek_cookie(file, offset, whence, &fp->cookie);
2780 }
2781 
2782 const struct inode_operations ocfs2_file_iops = {
2783 	.setattr	= ocfs2_setattr,
2784 	.getattr	= ocfs2_getattr,
2785 	.permission	= ocfs2_permission,
2786 	.listxattr	= ocfs2_listxattr,
2787 	.fiemap		= ocfs2_fiemap,
2788 	.get_inode_acl	= ocfs2_iop_get_acl,
2789 	.set_acl	= ocfs2_iop_set_acl,
2790 	.fileattr_get	= ocfs2_fileattr_get,
2791 	.fileattr_set	= ocfs2_fileattr_set,
2792 };
2793 
2794 const struct inode_operations ocfs2_special_file_iops = {
2795 	.setattr	= ocfs2_setattr,
2796 	.getattr	= ocfs2_getattr,
2797 	.listxattr	= ocfs2_listxattr,
2798 	.permission	= ocfs2_permission,
2799 	.get_inode_acl	= ocfs2_iop_get_acl,
2800 	.set_acl	= ocfs2_iop_set_acl,
2801 };
2802 
2803 /*
2804  * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2805  * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2806  */
2807 const struct file_operations ocfs2_fops = {
2808 	.llseek		= ocfs2_file_llseek,
2809 	.mmap_prepare	= ocfs2_mmap_prepare,
2810 	.fsync		= ocfs2_sync_file,
2811 	.release	= ocfs2_file_release,
2812 	.open		= ocfs2_file_open,
2813 	.read_iter	= ocfs2_file_read_iter,
2814 	.write_iter	= ocfs2_file_write_iter,
2815 	.unlocked_ioctl	= ocfs2_ioctl,
2816 #ifdef CONFIG_COMPAT
2817 	.compat_ioctl   = ocfs2_compat_ioctl,
2818 #endif
2819 	.lock		= ocfs2_lock,
2820 	.flock		= ocfs2_flock,
2821 	.splice_read	= ocfs2_file_splice_read,
2822 	.splice_write	= iter_file_splice_write,
2823 	.fallocate	= ocfs2_fallocate,
2824 	.remap_file_range = ocfs2_remap_file_range,
2825 	.fop_flags	= FOP_ASYNC_LOCK,
2826 };
2827 
2828 WRAP_DIR_ITER(ocfs2_readdir) // FIXME!
2829 const struct file_operations ocfs2_dops = {
2830 	.llseek		= ocfs2_dir_llseek,
2831 	.read		= generic_read_dir,
2832 	.iterate_shared	= shared_ocfs2_readdir,
2833 	.fsync		= ocfs2_sync_file,
2834 	.release	= ocfs2_dir_release,
2835 	.open		= ocfs2_dir_open,
2836 	.unlocked_ioctl	= ocfs2_ioctl,
2837 #ifdef CONFIG_COMPAT
2838 	.compat_ioctl   = ocfs2_compat_ioctl,
2839 #endif
2840 	.lock		= ocfs2_lock,
2841 	.flock		= ocfs2_flock,
2842 	.fop_flags	= FOP_ASYNC_LOCK,
2843 };
2844 
2845 /*
2846  * POSIX-lockless variants of our file_operations.
2847  *
2848  * These will be used if the underlying cluster stack does not support
2849  * posix file locking, if the user passes the "localflocks" mount
2850  * option, or if we have a local-only fs.
2851  *
2852  * ocfs2_flock is in here because all stacks handle UNIX file locks,
2853  * so we still want it in the case of no stack support for
2854  * plocks. Internally, it will do the right thing when asked to ignore
2855  * the cluster.
2856  */
2857 const struct file_operations ocfs2_fops_no_plocks = {
2858 	.llseek		= ocfs2_file_llseek,
2859 	.mmap_prepare	= ocfs2_mmap_prepare,
2860 	.fsync		= ocfs2_sync_file,
2861 	.release	= ocfs2_file_release,
2862 	.open		= ocfs2_file_open,
2863 	.read_iter	= ocfs2_file_read_iter,
2864 	.write_iter	= ocfs2_file_write_iter,
2865 	.unlocked_ioctl	= ocfs2_ioctl,
2866 #ifdef CONFIG_COMPAT
2867 	.compat_ioctl   = ocfs2_compat_ioctl,
2868 #endif
2869 	.flock		= ocfs2_flock,
2870 	.splice_read	= filemap_splice_read,
2871 	.splice_write	= iter_file_splice_write,
2872 	.fallocate	= ocfs2_fallocate,
2873 	.remap_file_range = ocfs2_remap_file_range,
2874 };
2875 
2876 const struct file_operations ocfs2_dops_no_plocks = {
2877 	.llseek		= ocfs2_dir_llseek,
2878 	.read		= generic_read_dir,
2879 	.iterate_shared	= shared_ocfs2_readdir,
2880 	.fsync		= ocfs2_sync_file,
2881 	.release	= ocfs2_dir_release,
2882 	.open		= ocfs2_dir_open,
2883 	.unlocked_ioctl	= ocfs2_ioctl,
2884 #ifdef CONFIG_COMPAT
2885 	.compat_ioctl   = ocfs2_compat_ioctl,
2886 #endif
2887 	.flock		= ocfs2_flock,
2888 };
2889