xref: /linux/fs/ocfs2/aops.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/slab.h>
8 #include <linux/highmem.h>
9 #include <linux/pagemap.h>
10 #include <asm/byteorder.h>
11 #include <linux/swap.h>
12 #include <linux/mpage.h>
13 #include <linux/quotaops.h>
14 #include <linux/blkdev.h>
15 #include <linux/uio.h>
16 #include <linux/mm.h>
17 
18 #include <cluster/masklog.h>
19 
20 #include "ocfs2.h"
21 
22 #include "alloc.h"
23 #include "aops.h"
24 #include "dlmglue.h"
25 #include "extent_map.h"
26 #include "file.h"
27 #include "inode.h"
28 #include "journal.h"
29 #include "suballoc.h"
30 #include "super.h"
31 #include "symlink.h"
32 #include "refcounttree.h"
33 #include "ocfs2_trace.h"
34 
35 #include "buffer_head_io.h"
36 #include "dir.h"
37 #include "namei.h"
38 #include "sysfile.h"
39 
40 #define OCFS2_DIO_MARK_EXTENT_BATCH 200
41 
42 static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
43 				   struct buffer_head *bh_result, int create)
44 {
45 	int err = -EIO;
46 	int status;
47 	struct ocfs2_dinode *fe = NULL;
48 	struct buffer_head *bh = NULL;
49 	struct buffer_head *buffer_cache_bh = NULL;
50 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
51 
52 	trace_ocfs2_symlink_get_block(
53 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
54 			(unsigned long long)iblock, bh_result, create);
55 
56 	BUG_ON(ocfs2_inode_is_fast_symlink(inode));
57 
58 	if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) {
59 		mlog(ML_ERROR, "block offset > PATH_MAX: %llu",
60 		     (unsigned long long)iblock);
61 		goto bail;
62 	}
63 
64 	status = ocfs2_read_inode_block(inode, &bh);
65 	if (status < 0) {
66 		mlog_errno(status);
67 		goto bail;
68 	}
69 	fe = (struct ocfs2_dinode *) bh->b_data;
70 
71 	if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
72 						    le32_to_cpu(fe->i_clusters))) {
73 		err = -ENOMEM;
74 		mlog(ML_ERROR, "block offset is outside the allocated size: "
75 		     "%llu\n", (unsigned long long)iblock);
76 		goto bail;
77 	}
78 
79 	/* We don't use the page cache to create symlink data, so if
80 	 * need be, copy it over from the buffer cache. */
81 	if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
82 		u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
83 			    iblock;
84 		buffer_cache_bh = sb_getblk(osb->sb, blkno);
85 		if (!buffer_cache_bh) {
86 			err = -ENOMEM;
87 			mlog(ML_ERROR, "couldn't getblock for symlink!\n");
88 			goto bail;
89 		}
90 
91 		/* we haven't locked out transactions, so a commit
92 		 * could've happened. Since we've got a reference on
93 		 * the bh, even if it commits while we're doing the
94 		 * copy, the data is still good. */
95 		if (buffer_jbd(buffer_cache_bh) && ocfs2_inode_is_new(inode)) {
96 			memcpy_to_folio(bh_result->b_folio,
97 					bh_result->b_size * iblock,
98 					buffer_cache_bh->b_data,
99 					bh_result->b_size);
100 			set_buffer_uptodate(bh_result);
101 		}
102 		brelse(buffer_cache_bh);
103 	}
104 
105 	map_bh(bh_result, inode->i_sb,
106 	       le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
107 
108 	err = 0;
109 
110 bail:
111 	brelse(bh);
112 
113 	return err;
114 }
115 
116 static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock,
117 		    struct buffer_head *bh_result, int create)
118 {
119 	int ret = 0;
120 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
121 
122 	down_read(&oi->ip_alloc_sem);
123 	ret = ocfs2_get_block(inode, iblock, bh_result, create);
124 	up_read(&oi->ip_alloc_sem);
125 
126 	return ret;
127 }
128 
129 int ocfs2_get_block(struct inode *inode, sector_t iblock,
130 		    struct buffer_head *bh_result, int create)
131 {
132 	int err = 0;
133 	unsigned int ext_flags;
134 	u64 max_blocks = bh_result->b_size >> inode->i_blkbits;
135 	u64 p_blkno, count, past_eof;
136 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
137 
138 	trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno,
139 			      (unsigned long long)iblock, bh_result, create);
140 
141 	if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
142 		mlog(ML_NOTICE, "get_block on system inode 0x%p (%llu)\n",
143 		     inode, inode->i_ino);
144 
145 	if (S_ISLNK(inode->i_mode)) {
146 		/* this always does I/O for some reason. */
147 		err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
148 		goto bail;
149 	}
150 
151 	err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count,
152 					  &ext_flags);
153 	if (err) {
154 		mlog(ML_ERROR, "get_blocks() failed, inode: 0x%p, "
155 		     "block: %llu\n", inode, (unsigned long long)iblock);
156 		goto bail;
157 	}
158 
159 	if (max_blocks < count)
160 		count = max_blocks;
161 
162 	/*
163 	 * ocfs2 never allocates in this function - the only time we
164 	 * need to use BH_New is when we're extending i_size on a file
165 	 * system which doesn't support holes, in which case BH_New
166 	 * allows __block_write_begin() to zero.
167 	 *
168 	 * If we see this on a sparse file system, then a truncate has
169 	 * raced us and removed the cluster. In this case, we clear
170 	 * the buffers dirty and uptodate bits and let the buffer code
171 	 * ignore it as a hole.
172 	 */
173 	if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) {
174 		clear_buffer_dirty(bh_result);
175 		clear_buffer_uptodate(bh_result);
176 		goto bail;
177 	}
178 
179 	/* Treat the unwritten extent as a hole for zeroing purposes. */
180 	if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
181 		map_bh(bh_result, inode->i_sb, p_blkno);
182 
183 	bh_result->b_size = count << inode->i_blkbits;
184 
185 	if (!ocfs2_sparse_alloc(osb)) {
186 		if (p_blkno == 0) {
187 			err = -EIO;
188 			mlog(ML_ERROR,
189 			     "iblock = %llu p_blkno = %llu blkno=(%llu)\n",
190 			     (unsigned long long)iblock,
191 			     (unsigned long long)p_blkno,
192 			     (unsigned long long)OCFS2_I(inode)->ip_blkno);
193 			mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
194 			dump_stack();
195 			goto bail;
196 		}
197 	}
198 
199 	past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
200 
201 	trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno,
202 				  (unsigned long long)past_eof);
203 	if (create && (iblock >= past_eof))
204 		set_buffer_new(bh_result);
205 
206 bail:
207 	if (err < 0)
208 		err = -EIO;
209 
210 	return err;
211 }
212 
213 int ocfs2_read_inline_data(struct inode *inode, struct folio *folio,
214 			   struct buffer_head *di_bh)
215 {
216 	loff_t size;
217 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
218 
219 	if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) {
220 		ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag\n",
221 			    (unsigned long long)OCFS2_I(inode)->ip_blkno);
222 		return -EROFS;
223 	}
224 
225 	size = i_size_read(inode);
226 
227 	if (size > folio_size(folio) ||
228 	    size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
229 		ocfs2_error(inode->i_sb,
230 			    "Inode %llu has with inline data has bad size: %Lu\n",
231 			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
232 			    (unsigned long long)size);
233 		return -EROFS;
234 	}
235 
236 	folio_fill_tail(folio, 0, di->id2.i_data.id_data, size);
237 	folio_mark_uptodate(folio);
238 
239 	return 0;
240 }
241 
242 static int ocfs2_readpage_inline(struct inode *inode, struct folio *folio)
243 {
244 	int ret;
245 	struct buffer_head *di_bh = NULL;
246 
247 	BUG_ON(!folio_test_locked(folio));
248 	BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
249 
250 	ret = ocfs2_read_inode_block(inode, &di_bh);
251 	if (ret) {
252 		mlog_errno(ret);
253 		goto out;
254 	}
255 
256 	ret = ocfs2_read_inline_data(inode, folio, di_bh);
257 out:
258 	folio_unlock(folio);
259 
260 	brelse(di_bh);
261 	return ret;
262 }
263 
264 static int ocfs2_read_folio(struct file *file, struct folio *folio)
265 {
266 	struct inode *inode = folio->mapping->host;
267 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
268 	loff_t start = folio_pos(folio);
269 	int ret, unlock = 1;
270 
271 	trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, folio->index);
272 
273 	ret = ocfs2_inode_lock_with_folio(inode, NULL, 0, folio);
274 	if (ret != 0) {
275 		if (ret == AOP_TRUNCATED_PAGE)
276 			unlock = 0;
277 		mlog_errno(ret);
278 		goto out;
279 	}
280 
281 	if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
282 		/*
283 		 * Unlock the folio and cycle ip_alloc_sem so that we don't
284 		 * busyloop waiting for ip_alloc_sem to unlock
285 		 */
286 		ret = AOP_TRUNCATED_PAGE;
287 		folio_unlock(folio);
288 		unlock = 0;
289 		down_read(&oi->ip_alloc_sem);
290 		up_read(&oi->ip_alloc_sem);
291 		goto out_inode_unlock;
292 	}
293 
294 	/*
295 	 * i_size might have just been updated as we grabbed the meta lock.  We
296 	 * might now be discovering a truncate that hit on another node.
297 	 * block_read_full_folio->get_block freaks out if it is asked to read
298 	 * beyond the end of a file, so we check here.  Callers
299 	 * (generic_file_read, vm_ops->fault) are clever enough to check i_size
300 	 * and notice that the folio they just read isn't needed.
301 	 *
302 	 * XXX sys_readahead() seems to get that wrong?
303 	 */
304 	if (start >= i_size_read(inode)) {
305 		folio_zero_segment(folio, 0, folio_size(folio));
306 		folio_mark_uptodate(folio);
307 		ret = 0;
308 		goto out_alloc;
309 	}
310 
311 	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
312 		ret = ocfs2_readpage_inline(inode, folio);
313 	else
314 		ret = block_read_full_folio(folio, ocfs2_get_block);
315 	unlock = 0;
316 
317 out_alloc:
318 	up_read(&oi->ip_alloc_sem);
319 out_inode_unlock:
320 	ocfs2_inode_unlock(inode, 0);
321 out:
322 	if (unlock)
323 		folio_unlock(folio);
324 	return ret;
325 }
326 
327 /*
328  * This is used only for read-ahead. Failures or difficult to handle
329  * situations are safe to ignore.
330  *
331  * Right now, we don't bother with BH_Boundary - in-inode extent lists
332  * are quite large (243 extents on 4k blocks), so most inodes don't
333  * grow out to a tree. If need be, detecting boundary extents could
334  * trivially be added in a future version of ocfs2_get_block().
335  */
336 static void ocfs2_readahead(struct readahead_control *rac)
337 {
338 	int ret;
339 	struct inode *inode = rac->mapping->host;
340 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
341 
342 	/*
343 	 * Use the nonblocking flag for the dlm code to avoid page
344 	 * lock inversion, but don't bother with retrying.
345 	 */
346 	ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK);
347 	if (ret)
348 		return;
349 
350 	if (down_read_trylock(&oi->ip_alloc_sem) == 0)
351 		goto out_unlock;
352 
353 	/*
354 	 * Don't bother with inline-data. There isn't anything
355 	 * to read-ahead in that case anyway...
356 	 */
357 	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
358 		goto out_up;
359 
360 	/*
361 	 * Check whether a remote node truncated this file - we just
362 	 * drop out in that case as it's not worth handling here.
363 	 */
364 	if (readahead_pos(rac) >= i_size_read(inode))
365 		goto out_up;
366 
367 	mpage_readahead(rac, ocfs2_get_block);
368 
369 out_up:
370 	up_read(&oi->ip_alloc_sem);
371 out_unlock:
372 	ocfs2_inode_unlock(inode, 0);
373 }
374 
375 /* Note: Because we don't support holes, our allocation has
376  * already happened (allocation writes zeros to the file data)
377  * so we don't have to worry about ordered writes in
378  * ocfs2_writepages.
379  *
380  * ->writepages is called during the process of invalidating the page cache
381  * during blocked lock processing.  It can't block on any cluster locks
382  * to during block mapping.  It's relying on the fact that the block
383  * mapping can't have disappeared under the dirty pages that it is
384  * being asked to write back.
385  */
386 static int ocfs2_writepages(struct address_space *mapping,
387 		struct writeback_control *wbc)
388 {
389 	return mpage_writepages(mapping, wbc, ocfs2_get_block);
390 }
391 
392 /* Taken from ext3. We don't necessarily need the full blown
393  * functionality yet, but IMHO it's better to cut and paste the whole
394  * thing so we can avoid introducing our own bugs (and easily pick up
395  * their fixes when they happen) --Mark */
396 int walk_page_buffers(	handle_t *handle,
397 			struct buffer_head *head,
398 			unsigned from,
399 			unsigned to,
400 			int *partial,
401 			int (*fn)(	handle_t *handle,
402 					struct buffer_head *bh))
403 {
404 	struct buffer_head *bh;
405 	unsigned block_start, block_end;
406 	unsigned blocksize = head->b_size;
407 	int err, ret = 0;
408 	struct buffer_head *next;
409 
410 	for (	bh = head, block_start = 0;
411 		ret == 0 && (bh != head || !block_start);
412 	    	block_start = block_end, bh = next)
413 	{
414 		next = bh->b_this_page;
415 		block_end = block_start + blocksize;
416 		if (block_end <= from || block_start >= to) {
417 			if (partial && !buffer_uptodate(bh))
418 				*partial = 1;
419 			continue;
420 		}
421 		err = (*fn)(handle, bh);
422 		if (!ret)
423 			ret = err;
424 	}
425 	return ret;
426 }
427 
428 static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
429 {
430 	sector_t status;
431 	u64 p_blkno = 0;
432 	int err = 0;
433 	struct inode *inode = mapping->host;
434 
435 	trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno,
436 			 (unsigned long long)block);
437 
438 	/*
439 	 * The swap code (ab-)uses ->bmap to get a block mapping and then
440 	 * bypasseѕ the file system for actual I/O.  We really can't allow
441 	 * that on refcounted inodes, so we have to skip out here.  And yes,
442 	 * 0 is the magic code for a bmap error..
443 	 */
444 	if (ocfs2_is_refcount_inode(inode))
445 		return 0;
446 
447 	/* We don't need to lock journal system files, since they aren't
448 	 * accessed concurrently from multiple nodes.
449 	 */
450 	if (!INODE_JOURNAL(inode)) {
451 		err = ocfs2_inode_lock(inode, NULL, 0);
452 		if (err) {
453 			if (err != -ENOENT)
454 				mlog_errno(err);
455 			goto bail;
456 		}
457 		down_read(&OCFS2_I(inode)->ip_alloc_sem);
458 	}
459 
460 	if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
461 		err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
462 						  NULL);
463 
464 	if (!INODE_JOURNAL(inode)) {
465 		up_read(&OCFS2_I(inode)->ip_alloc_sem);
466 		ocfs2_inode_unlock(inode, 0);
467 	}
468 
469 	if (err) {
470 		mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
471 		     (unsigned long long)block);
472 		mlog_errno(err);
473 		goto bail;
474 	}
475 
476 bail:
477 	status = err ? 0 : p_blkno;
478 
479 	return status;
480 }
481 
482 static bool ocfs2_release_folio(struct folio *folio, gfp_t wait)
483 {
484 	if (!folio_buffers(folio))
485 		return false;
486 	return try_to_free_buffers(folio);
487 }
488 
489 static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
490 					    u32 cpos,
491 					    unsigned int *start,
492 					    unsigned int *end)
493 {
494 	unsigned int cluster_start = 0, cluster_end = PAGE_SIZE;
495 
496 	if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) {
497 		unsigned int cpp;
498 
499 		cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits);
500 
501 		cluster_start = cpos % cpp;
502 		cluster_start = cluster_start << osb->s_clustersize_bits;
503 
504 		cluster_end = cluster_start + osb->s_clustersize;
505 	}
506 
507 	BUG_ON(cluster_start > PAGE_SIZE);
508 	BUG_ON(cluster_end > PAGE_SIZE);
509 
510 	if (start)
511 		*start = cluster_start;
512 	if (end)
513 		*end = cluster_end;
514 }
515 
516 /*
517  * 'from' and 'to' are the region in the page to avoid zeroing.
518  *
519  * If pagesize > clustersize, this function will avoid zeroing outside
520  * of the cluster boundary.
521  *
522  * from == to == 0 is code for "zero the entire cluster region"
523  */
524 static void ocfs2_clear_folio_regions(struct folio *folio,
525 				     struct ocfs2_super *osb, u32 cpos,
526 				     unsigned from, unsigned to)
527 {
528 	void *kaddr;
529 	unsigned int cluster_start, cluster_end;
530 
531 	ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
532 
533 	kaddr = kmap_local_folio(folio, 0);
534 
535 	if (from || to) {
536 		if (from > cluster_start)
537 			memset(kaddr + cluster_start, 0, from - cluster_start);
538 		if (to < cluster_end)
539 			memset(kaddr + to, 0, cluster_end - to);
540 	} else {
541 		memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
542 	}
543 
544 	kunmap_local(kaddr);
545 }
546 
547 /*
548  * Nonsparse file systems fully allocate before we get to the write
549  * code. This prevents ocfs2_write() from tagging the write as an
550  * allocating one, which means ocfs2_map_folio_blocks() might try to
551  * read-in the blocks at the tail of our file. Avoid reading them by
552  * testing i_size against each block offset.
553  */
554 static int ocfs2_should_read_blk(struct inode *inode, struct folio *folio,
555 				 unsigned int block_start)
556 {
557 	u64 offset = folio_pos(folio) + block_start;
558 
559 	if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
560 		return 1;
561 
562 	if (i_size_read(inode) > offset)
563 		return 1;
564 
565 	return 0;
566 }
567 
568 /*
569  * Some of this taken from __block_write_begin(). We already have our
570  * mapping by now though, and the entire write will be allocating or
571  * it won't, so not much need to use BH_New.
572  *
573  * This will also skip zeroing, which is handled externally.
574  */
575 int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno,
576 			  struct inode *inode, unsigned int from,
577 			  unsigned int to, int new)
578 {
579 	int ret = 0;
580 	struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
581 	unsigned int block_end, block_start;
582 	unsigned int bsize = i_blocksize(inode);
583 
584 	head = folio_buffers(folio);
585 	if (!head)
586 		head = create_empty_buffers(folio, bsize, 0);
587 
588 	for (bh = head, block_start = 0; bh != head || !block_start;
589 	     bh = bh->b_this_page, block_start += bsize) {
590 		block_end = block_start + bsize;
591 
592 		clear_buffer_new(bh);
593 
594 		/*
595 		 * Ignore blocks outside of our i/o range -
596 		 * they may belong to unallocated clusters.
597 		 */
598 		if (block_start >= to || block_end <= from) {
599 			if (folio_test_uptodate(folio))
600 				set_buffer_uptodate(bh);
601 			continue;
602 		}
603 
604 		/*
605 		 * For an allocating write with cluster size >= page
606 		 * size, we always write the entire page.
607 		 */
608 		if (new)
609 			set_buffer_new(bh);
610 
611 		if (!buffer_mapped(bh)) {
612 			map_bh(bh, inode->i_sb, *p_blkno);
613 			clean_bdev_bh_alias(bh);
614 		}
615 
616 		if (folio_test_uptodate(folio)) {
617 			set_buffer_uptodate(bh);
618 		} else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
619 			   !buffer_new(bh) &&
620 			   ocfs2_should_read_blk(inode, folio, block_start) &&
621 			   (block_start < from || block_end > to)) {
622 			bh_read_nowait(bh, 0);
623 			*wait_bh++=bh;
624 		}
625 
626 		*p_blkno = *p_blkno + 1;
627 	}
628 
629 	/*
630 	 * If we issued read requests - let them complete.
631 	 */
632 	while(wait_bh > wait) {
633 		wait_on_buffer(*--wait_bh);
634 		if (!buffer_uptodate(*wait_bh))
635 			ret = -EIO;
636 	}
637 
638 	if (ret == 0 || !new)
639 		return ret;
640 
641 	/*
642 	 * If we get -EIO above, zero out any newly allocated blocks
643 	 * to avoid exposing stale data.
644 	 */
645 	bh = head;
646 	block_start = 0;
647 	do {
648 		block_end = block_start + bsize;
649 		if (block_end <= from)
650 			goto next_bh;
651 		if (block_start >= to)
652 			break;
653 
654 		folio_zero_range(folio, block_start, bh->b_size);
655 		set_buffer_uptodate(bh);
656 		mark_buffer_dirty(bh);
657 
658 next_bh:
659 		block_start = block_end;
660 		bh = bh->b_this_page;
661 	} while (bh != head);
662 
663 	return ret;
664 }
665 
666 #if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
667 #define OCFS2_MAX_CTXT_PAGES	1
668 #else
669 #define OCFS2_MAX_CTXT_PAGES	(OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE)
670 #endif
671 
672 #define OCFS2_MAX_CLUSTERS_PER_PAGE	(PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE)
673 
674 struct ocfs2_unwritten_extent {
675 	struct list_head	ue_node;
676 	struct list_head	ue_ip_node;
677 	u32			ue_cpos;
678 	u32			ue_phys;
679 };
680 
681 /*
682  * Describe the state of a single cluster to be written to.
683  */
684 struct ocfs2_write_cluster_desc {
685 	u32		c_cpos;
686 	u32		c_phys;
687 	/*
688 	 * Give this a unique field because c_phys eventually gets
689 	 * filled.
690 	 */
691 	unsigned	c_new;
692 	unsigned	c_clear_unwritten;
693 	unsigned	c_needs_zero;
694 };
695 
696 struct ocfs2_write_ctxt {
697 	/* Logical cluster position / len of write */
698 	u32				w_cpos;
699 	u32				w_clen;
700 
701 	/* First cluster allocated in a nonsparse extend */
702 	u32				w_first_new_cpos;
703 
704 	/* Type of caller. Must be one of buffer, mmap, direct.  */
705 	ocfs2_write_type_t		w_type;
706 
707 	struct ocfs2_write_cluster_desc	w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
708 
709 	/*
710 	 * This is true if page_size > cluster_size.
711 	 *
712 	 * It triggers a set of special cases during write which might
713 	 * have to deal with allocating writes to partial pages.
714 	 */
715 	unsigned int			w_large_pages;
716 
717 	/*
718 	 * Folios involved in this write.
719 	 *
720 	 * w_target_folio is the folio being written to by the user.
721 	 *
722 	 * w_folios is an array of folios which always contains
723 	 * w_target_folio, and in the case of an allocating write with
724 	 * page_size < cluster size, it will contain zero'd and mapped
725 	 * pages adjacent to w_target_folio which need to be written
726 	 * out in so that future reads from that region will get
727 	 * zero's.
728 	 */
729 	unsigned int			w_num_folios;
730 	struct folio			*w_folios[OCFS2_MAX_CTXT_PAGES];
731 	struct folio			*w_target_folio;
732 
733 	/*
734 	 * w_target_locked is used for page_mkwrite path indicating no unlocking
735 	 * against w_target_folio in ocfs2_write_end_nolock.
736 	 */
737 	unsigned int			w_target_locked:1;
738 
739 	/*
740 	 * ocfs2_write_end() uses this to know what the real range to
741 	 * write in the target should be.
742 	 */
743 	unsigned int			w_target_from;
744 	unsigned int			w_target_to;
745 
746 	/*
747 	 * We could use journal_current_handle() but this is cleaner,
748 	 * IMHO -Mark
749 	 */
750 	handle_t			*w_handle;
751 
752 	struct buffer_head		*w_di_bh;
753 
754 	struct ocfs2_cached_dealloc_ctxt w_dealloc;
755 
756 	struct list_head		w_unwritten_list;
757 	unsigned int			w_unwritten_count;
758 };
759 
760 void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios)
761 {
762 	int i;
763 
764 	for(i = 0; i < num_folios; i++) {
765 		if (!folios[i])
766 			continue;
767 		folio_unlock(folios[i]);
768 		folio_mark_accessed(folios[i]);
769 		folio_put(folios[i]);
770 	}
771 }
772 
773 static void ocfs2_unlock_folios(struct ocfs2_write_ctxt *wc)
774 {
775 	int i;
776 
777 	/*
778 	 * w_target_locked is only set to true in the page_mkwrite() case.
779 	 * The intent is to allow us to lock the target page from write_begin()
780 	 * to write_end(). The caller must hold a ref on w_target_folio.
781 	 */
782 	if (wc->w_target_locked) {
783 		BUG_ON(!wc->w_target_folio);
784 		for (i = 0; i < wc->w_num_folios; i++) {
785 			if (wc->w_target_folio == wc->w_folios[i]) {
786 				wc->w_folios[i] = NULL;
787 				break;
788 			}
789 		}
790 		folio_mark_accessed(wc->w_target_folio);
791 		folio_put(wc->w_target_folio);
792 	}
793 	ocfs2_unlock_and_free_folios(wc->w_folios, wc->w_num_folios);
794 }
795 
796 static void ocfs2_free_unwritten_list(struct inode *inode,
797 				 struct list_head *head)
798 {
799 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
800 	struct ocfs2_unwritten_extent *ue = NULL, *tmp = NULL;
801 
802 	list_for_each_entry_safe(ue, tmp, head, ue_node) {
803 		list_del(&ue->ue_node);
804 		spin_lock(&oi->ip_lock);
805 		list_del(&ue->ue_ip_node);
806 		spin_unlock(&oi->ip_lock);
807 		kfree(ue);
808 	}
809 }
810 
811 static void ocfs2_free_write_ctxt(struct inode *inode,
812 				  struct ocfs2_write_ctxt *wc)
813 {
814 	ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list);
815 	ocfs2_unlock_folios(wc);
816 	brelse(wc->w_di_bh);
817 	kfree(wc);
818 }
819 
820 static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
821 				  struct ocfs2_super *osb, loff_t pos,
822 				  unsigned len, ocfs2_write_type_t type,
823 				  struct buffer_head *di_bh)
824 {
825 	u32 cend;
826 	struct ocfs2_write_ctxt *wc;
827 
828 	wc = kzalloc_obj(struct ocfs2_write_ctxt, GFP_NOFS);
829 	if (!wc)
830 		return -ENOMEM;
831 
832 	wc->w_cpos = pos >> osb->s_clustersize_bits;
833 	wc->w_first_new_cpos = UINT_MAX;
834 	cend = (pos + len - 1) >> osb->s_clustersize_bits;
835 	wc->w_clen = cend - wc->w_cpos + 1;
836 	get_bh(di_bh);
837 	wc->w_di_bh = di_bh;
838 	wc->w_type = type;
839 
840 	if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits))
841 		wc->w_large_pages = 1;
842 	else
843 		wc->w_large_pages = 0;
844 
845 	ocfs2_init_dealloc_ctxt(&wc->w_dealloc);
846 	INIT_LIST_HEAD(&wc->w_unwritten_list);
847 
848 	*wcp = wc;
849 
850 	return 0;
851 }
852 
853 /*
854  * If a page has any new buffers, zero them out here, and mark them uptodate
855  * and dirty so they'll be written out (in order to prevent uninitialised
856  * block data from leaking). And clear the new bit.
857  */
858 static void ocfs2_zero_new_buffers(struct folio *folio, size_t from, size_t to)
859 {
860 	unsigned int block_start, block_end;
861 	struct buffer_head *head, *bh;
862 
863 	BUG_ON(!folio_test_locked(folio));
864 	head = folio_buffers(folio);
865 	if (!head)
866 		return;
867 
868 	bh = head;
869 	block_start = 0;
870 	do {
871 		block_end = block_start + bh->b_size;
872 
873 		if (buffer_new(bh)) {
874 			if (block_end > from && block_start < to) {
875 				if (!folio_test_uptodate(folio)) {
876 					unsigned start, end;
877 
878 					start = max(from, block_start);
879 					end = min(to, block_end);
880 
881 					folio_zero_segment(folio, start, end);
882 					set_buffer_uptodate(bh);
883 				}
884 
885 				clear_buffer_new(bh);
886 				mark_buffer_dirty(bh);
887 			}
888 		}
889 
890 		block_start = block_end;
891 		bh = bh->b_this_page;
892 	} while (bh != head);
893 }
894 
895 /*
896  * Only called when we have a failure during allocating write to write
897  * zero's to the newly allocated region.
898  */
899 static void ocfs2_write_failure(struct inode *inode,
900 				struct ocfs2_write_ctxt *wc,
901 				loff_t user_pos, unsigned user_len)
902 {
903 	int i;
904 	unsigned from = user_pos & (PAGE_SIZE - 1),
905 		to = user_pos + user_len;
906 
907 	if (wc->w_target_folio)
908 		ocfs2_zero_new_buffers(wc->w_target_folio, from, to);
909 
910 	for (i = 0; i < wc->w_num_folios; i++) {
911 		struct folio *folio = wc->w_folios[i];
912 
913 		if (folio && folio_buffers(folio)) {
914 			if (ocfs2_should_order_data(inode))
915 				ocfs2_jbd2_inode_add_write(wc->w_handle, inode,
916 							   user_pos, user_len);
917 
918 			block_commit_write(folio, from, to);
919 		}
920 	}
921 }
922 
923 static int ocfs2_prepare_folio_for_write(struct inode *inode, u64 *p_blkno,
924 		struct ocfs2_write_ctxt *wc, struct folio *folio, u32 cpos,
925 		loff_t user_pos, unsigned user_len, int new)
926 {
927 	int ret;
928 	unsigned int map_from = 0, map_to = 0;
929 	unsigned int cluster_start, cluster_end;
930 	unsigned int user_data_from = 0, user_data_to = 0;
931 
932 	ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos,
933 					&cluster_start, &cluster_end);
934 
935 	/* treat the write as new if the a hole/lseek spanned across
936 	 * the page boundary.
937 	 */
938 	new = new | ((i_size_read(inode) <= folio_pos(folio)) &&
939 			(folio_pos(folio) <= user_pos));
940 
941 	if (folio == wc->w_target_folio) {
942 		map_from = user_pos & (PAGE_SIZE - 1);
943 		map_to = map_from + user_len;
944 
945 		if (new)
946 			ret = ocfs2_map_folio_blocks(folio, p_blkno, inode,
947 					cluster_start, cluster_end, new);
948 		else
949 			ret = ocfs2_map_folio_blocks(folio, p_blkno, inode,
950 					map_from, map_to, new);
951 		if (ret) {
952 			mlog_errno(ret);
953 			goto out;
954 		}
955 
956 		user_data_from = map_from;
957 		user_data_to = map_to;
958 		if (new) {
959 			map_from = cluster_start;
960 			map_to = cluster_end;
961 		}
962 	} else {
963 		/*
964 		 * If we haven't allocated the new folio yet, we
965 		 * shouldn't be writing it out without copying user
966 		 * data. This is likely a math error from the caller.
967 		 */
968 		BUG_ON(!new);
969 
970 		map_from = cluster_start;
971 		map_to = cluster_end;
972 
973 		ret = ocfs2_map_folio_blocks(folio, p_blkno, inode,
974 				cluster_start, cluster_end, new);
975 		if (ret) {
976 			mlog_errno(ret);
977 			goto out;
978 		}
979 	}
980 
981 	/*
982 	 * Parts of newly allocated folios need to be zero'd.
983 	 *
984 	 * Above, we have also rewritten 'to' and 'from' - as far as
985 	 * the rest of the function is concerned, the entire cluster
986 	 * range inside of a folio needs to be written.
987 	 *
988 	 * We can skip this if the folio is uptodate - it's already
989 	 * been zero'd from being read in as a hole.
990 	 */
991 	if (new && !folio_test_uptodate(folio))
992 		ocfs2_clear_folio_regions(folio, OCFS2_SB(inode->i_sb),
993 					 cpos, user_data_from, user_data_to);
994 
995 	flush_dcache_folio(folio);
996 
997 out:
998 	return ret;
999 }
1000 
1001 /*
1002  * This function will only grab one clusters worth of pages.
1003  */
1004 static int ocfs2_grab_folios_for_write(struct address_space *mapping,
1005 		struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos,
1006 		unsigned user_len, int new, struct folio *mmap_folio)
1007 {
1008 	int ret = 0, i;
1009 	unsigned long start, target_index, end_index, index;
1010 	struct inode *inode = mapping->host;
1011 	loff_t last_byte;
1012 
1013 	target_index = user_pos >> PAGE_SHIFT;
1014 
1015 	/*
1016 	 * Figure out how many pages we'll be manipulating here. For
1017 	 * non allocating write, we just change the one
1018 	 * page. Otherwise, we'll need a whole clusters worth.  If we're
1019 	 * writing past i_size, we only need enough pages to cover the
1020 	 * last page of the write.
1021 	 */
1022 	if (new) {
1023 		wc->w_num_folios = ocfs2_pages_per_cluster(inode->i_sb);
1024 		start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
1025 		/*
1026 		 * We need the index *past* the last page we could possibly
1027 		 * touch.  This is the page past the end of the write or
1028 		 * i_size, whichever is greater.
1029 		 */
1030 		last_byte = max(user_pos + user_len, i_size_read(inode));
1031 		BUG_ON(last_byte < 1);
1032 		end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
1033 		if ((start + wc->w_num_folios) > end_index)
1034 			wc->w_num_folios = end_index - start;
1035 	} else {
1036 		wc->w_num_folios = 1;
1037 		start = target_index;
1038 	}
1039 	end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
1040 
1041 	for(i = 0; i < wc->w_num_folios; i++) {
1042 		index = start + i;
1043 
1044 		if (index >= target_index && index <= end_index &&
1045 		    wc->w_type == OCFS2_WRITE_MMAP) {
1046 			/*
1047 			 * ocfs2_pagemkwrite() is a little different
1048 			 * and wants us to directly use the page
1049 			 * passed in.
1050 			 */
1051 			folio_lock(mmap_folio);
1052 
1053 			/* Exit and let the caller retry */
1054 			if (mmap_folio->mapping != mapping) {
1055 				WARN_ON(mmap_folio->mapping);
1056 				folio_unlock(mmap_folio);
1057 				ret = -EAGAIN;
1058 				goto out;
1059 			}
1060 
1061 			folio_get(mmap_folio);
1062 			wc->w_folios[i] = mmap_folio;
1063 			wc->w_target_locked = true;
1064 		} else if (index >= target_index && index <= end_index &&
1065 			   wc->w_type == OCFS2_WRITE_DIRECT) {
1066 			/* Direct write has no mapping page. */
1067 			wc->w_folios[i] = NULL;
1068 			continue;
1069 		} else {
1070 			wc->w_folios[i] = __filemap_get_folio(mapping, index,
1071 					FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
1072 					GFP_NOFS);
1073 			if (IS_ERR(wc->w_folios[i])) {
1074 				ret = PTR_ERR(wc->w_folios[i]);
1075 				mlog_errno(ret);
1076 				wc->w_folios[i] = NULL;
1077 				goto out;
1078 			}
1079 		}
1080 		folio_wait_stable(wc->w_folios[i]);
1081 
1082 		if (index == target_index)
1083 			wc->w_target_folio = wc->w_folios[i];
1084 	}
1085 out:
1086 	if (ret)
1087 		wc->w_target_locked = false;
1088 	return ret;
1089 }
1090 
1091 /*
1092  * Prepare a single cluster for write one cluster into the file.
1093  */
1094 static int ocfs2_write_cluster(struct address_space *mapping,
1095 			       u32 *phys, unsigned int new,
1096 			       unsigned int clear_unwritten,
1097 			       unsigned int should_zero,
1098 			       struct ocfs2_alloc_context *data_ac,
1099 			       struct ocfs2_alloc_context *meta_ac,
1100 			       struct ocfs2_write_ctxt *wc, u32 cpos,
1101 			       loff_t user_pos, unsigned user_len)
1102 {
1103 	int ret, i;
1104 	u64 p_blkno;
1105 	struct inode *inode = mapping->host;
1106 	struct ocfs2_extent_tree et;
1107 	int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
1108 
1109 	if (new) {
1110 		u32 tmp_pos;
1111 
1112 		/*
1113 		 * This is safe to call with the page locks - it won't take
1114 		 * any additional semaphores or cluster locks.
1115 		 */
1116 		tmp_pos = cpos;
1117 		ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode,
1118 					   &tmp_pos, 1, !clear_unwritten,
1119 					   wc->w_di_bh, wc->w_handle,
1120 					   data_ac, meta_ac, NULL);
1121 		/*
1122 		 * This shouldn't happen because we must have already
1123 		 * calculated the correct meta data allocation required. The
1124 		 * internal tree allocation code should know how to increase
1125 		 * transaction credits itself.
1126 		 *
1127 		 * If need be, we could handle -EAGAIN for a
1128 		 * RESTART_TRANS here.
1129 		 */
1130 		mlog_bug_on_msg(ret == -EAGAIN,
1131 				"Inode %llu: EAGAIN return during allocation.\n",
1132 				(unsigned long long)OCFS2_I(inode)->ip_blkno);
1133 		if (ret < 0) {
1134 			mlog_errno(ret);
1135 			goto out;
1136 		}
1137 	} else if (clear_unwritten) {
1138 		ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
1139 					      wc->w_di_bh);
1140 		ret = ocfs2_mark_extent_written(inode, &et,
1141 						wc->w_handle, cpos, 1, *phys,
1142 						meta_ac, &wc->w_dealloc);
1143 		if (ret < 0) {
1144 			mlog_errno(ret);
1145 			goto out;
1146 		}
1147 	}
1148 
1149 	/*
1150 	 * The only reason this should fail is due to an inability to
1151 	 * find the extent added.
1152 	 */
1153 	ret = ocfs2_get_clusters(inode, cpos, phys, NULL, NULL);
1154 	if (ret < 0) {
1155 		mlog(ML_ERROR, "Get physical blkno failed for inode %llu, "
1156 			    "at logical cluster %u",
1157 			    (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos);
1158 		goto out;
1159 	}
1160 
1161 	BUG_ON(*phys == 0);
1162 
1163 	p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *phys);
1164 	if (!should_zero)
1165 		p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1);
1166 
1167 	for (i = 0; i < wc->w_num_folios; i++) {
1168 		int tmpret;
1169 
1170 		/* This is the direct io target page. */
1171 		if (wc->w_folios[i] == NULL) {
1172 			p_blkno += (1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits));
1173 			continue;
1174 		}
1175 
1176 		tmpret = ocfs2_prepare_folio_for_write(inode, &p_blkno, wc,
1177 				wc->w_folios[i], cpos, user_pos, user_len,
1178 				should_zero);
1179 		if (tmpret) {
1180 			mlog_errno(tmpret);
1181 			if (ret == 0)
1182 				ret = tmpret;
1183 		}
1184 	}
1185 
1186 	/*
1187 	 * We only have cleanup to do in case of allocating write.
1188 	 */
1189 	if (ret && new)
1190 		ocfs2_write_failure(inode, wc, user_pos, user_len);
1191 
1192 out:
1193 
1194 	return ret;
1195 }
1196 
1197 static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
1198 				       struct ocfs2_alloc_context *data_ac,
1199 				       struct ocfs2_alloc_context *meta_ac,
1200 				       struct ocfs2_write_ctxt *wc,
1201 				       loff_t pos, unsigned len)
1202 {
1203 	int ret, i;
1204 	loff_t cluster_off;
1205 	unsigned int local_len = len;
1206 	struct ocfs2_write_cluster_desc *desc;
1207 	struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb);
1208 
1209 	for (i = 0; i < wc->w_clen; i++) {
1210 		desc = &wc->w_desc[i];
1211 
1212 		/*
1213 		 * We have to make sure that the total write passed in
1214 		 * doesn't extend past a single cluster.
1215 		 */
1216 		local_len = len;
1217 		cluster_off = pos & (osb->s_clustersize - 1);
1218 		if ((cluster_off + local_len) > osb->s_clustersize)
1219 			local_len = osb->s_clustersize - cluster_off;
1220 
1221 		ret = ocfs2_write_cluster(mapping, &desc->c_phys,
1222 					  desc->c_new,
1223 					  desc->c_clear_unwritten,
1224 					  desc->c_needs_zero,
1225 					  data_ac, meta_ac,
1226 					  wc, desc->c_cpos, pos, local_len);
1227 		if (ret) {
1228 			mlog_errno(ret);
1229 			goto out;
1230 		}
1231 
1232 		len -= local_len;
1233 		pos += local_len;
1234 	}
1235 
1236 	ret = 0;
1237 out:
1238 	return ret;
1239 }
1240 
1241 /*
1242  * ocfs2_write_end() wants to know which parts of the target page it
1243  * should complete the write on. It's easiest to compute them ahead of
1244  * time when a more complete view of the write is available.
1245  */
1246 static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1247 					struct ocfs2_write_ctxt *wc,
1248 					loff_t pos, unsigned len, int alloc)
1249 {
1250 	struct ocfs2_write_cluster_desc *desc;
1251 
1252 	wc->w_target_from = pos & (PAGE_SIZE - 1);
1253 	wc->w_target_to = wc->w_target_from + len;
1254 
1255 	if (alloc == 0)
1256 		return;
1257 
1258 	/*
1259 	 * Allocating write - we may have different boundaries based
1260 	 * on page size and cluster size.
1261 	 *
1262 	 * NOTE: We can no longer compute one value from the other as
1263 	 * the actual write length and user provided length may be
1264 	 * different.
1265 	 */
1266 
1267 	if (wc->w_large_pages) {
1268 		/*
1269 		 * We only care about the 1st and last cluster within
1270 		 * our range and whether they should be zero'd or not. Either
1271 		 * value may be extended out to the start/end of a
1272 		 * newly allocated cluster.
1273 		 */
1274 		desc = &wc->w_desc[0];
1275 		if (desc->c_needs_zero)
1276 			ocfs2_figure_cluster_boundaries(osb,
1277 							desc->c_cpos,
1278 							&wc->w_target_from,
1279 							NULL);
1280 
1281 		desc = &wc->w_desc[wc->w_clen - 1];
1282 		if (desc->c_needs_zero)
1283 			ocfs2_figure_cluster_boundaries(osb,
1284 							desc->c_cpos,
1285 							NULL,
1286 							&wc->w_target_to);
1287 	} else {
1288 		wc->w_target_from = 0;
1289 		wc->w_target_to = PAGE_SIZE;
1290 	}
1291 }
1292 
1293 /*
1294  * Check if this extent is marked UNWRITTEN by direct io. If so, we need not to
1295  * do the zero work. And should not to clear UNWRITTEN since it will be cleared
1296  * by the direct io procedure.
1297  * If this is a new extent that allocated by direct io, we should mark it in
1298  * the ip_unwritten_list.
1299  */
1300 static int ocfs2_unwritten_check(struct inode *inode,
1301 				 struct ocfs2_write_ctxt *wc,
1302 				 struct ocfs2_write_cluster_desc *desc)
1303 {
1304 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1305 	struct ocfs2_unwritten_extent *ue = NULL, *new = NULL;
1306 	int ret = 0;
1307 
1308 	if (!desc->c_needs_zero)
1309 		return 0;
1310 
1311 retry:
1312 	spin_lock(&oi->ip_lock);
1313 	/* Needs not to zero no metter buffer or direct. The one who is zero
1314 	 * the cluster is doing zero. And he will clear unwritten after all
1315 	 * cluster io finished. */
1316 	list_for_each_entry(ue, &oi->ip_unwritten_list, ue_ip_node) {
1317 		if (desc->c_cpos == ue->ue_cpos) {
1318 			BUG_ON(desc->c_new);
1319 			desc->c_needs_zero = 0;
1320 			desc->c_clear_unwritten = 0;
1321 			goto unlock;
1322 		}
1323 	}
1324 
1325 	if (wc->w_type != OCFS2_WRITE_DIRECT)
1326 		goto unlock;
1327 
1328 	if (new == NULL) {
1329 		spin_unlock(&oi->ip_lock);
1330 		new = kmalloc_obj(struct ocfs2_unwritten_extent, GFP_NOFS);
1331 		if (new == NULL) {
1332 			ret = -ENOMEM;
1333 			goto out;
1334 		}
1335 		goto retry;
1336 	}
1337 	/* This direct write will doing zero. */
1338 	new->ue_cpos = desc->c_cpos;
1339 	new->ue_phys = desc->c_phys;
1340 	desc->c_clear_unwritten = 0;
1341 	list_add_tail(&new->ue_ip_node, &oi->ip_unwritten_list);
1342 	list_add_tail(&new->ue_node, &wc->w_unwritten_list);
1343 	wc->w_unwritten_count++;
1344 	new = NULL;
1345 unlock:
1346 	spin_unlock(&oi->ip_lock);
1347 out:
1348 	kfree(new);
1349 	return ret;
1350 }
1351 
1352 /*
1353  * Populate each single-cluster write descriptor in the write context
1354  * with information about the i/o to be done.
1355  *
1356  * Returns the number of clusters that will have to be allocated, as
1357  * well as a worst case estimate of the number of extent records that
1358  * would have to be created during a write to an unwritten region.
1359  */
1360 static int ocfs2_populate_write_desc(struct inode *inode,
1361 				     struct ocfs2_write_ctxt *wc,
1362 				     unsigned int *clusters_to_alloc,
1363 				     unsigned int *extents_to_split)
1364 {
1365 	int ret;
1366 	struct ocfs2_write_cluster_desc *desc;
1367 	unsigned int num_clusters = 0;
1368 	unsigned int ext_flags = 0;
1369 	u32 phys = 0;
1370 	int i;
1371 
1372 	*clusters_to_alloc = 0;
1373 	*extents_to_split = 0;
1374 
1375 	for (i = 0; i < wc->w_clen; i++) {
1376 		desc = &wc->w_desc[i];
1377 		desc->c_cpos = wc->w_cpos + i;
1378 
1379 		if (num_clusters == 0) {
1380 			/*
1381 			 * Need to look up the next extent record.
1382 			 */
1383 			ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys,
1384 						 &num_clusters, &ext_flags);
1385 			if (ret) {
1386 				mlog_errno(ret);
1387 				goto out;
1388 			}
1389 
1390 			/* We should already CoW the refcountd extent. */
1391 			BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
1392 
1393 			/*
1394 			 * Assume worst case - that we're writing in
1395 			 * the middle of the extent.
1396 			 *
1397 			 * We can assume that the write proceeds from
1398 			 * left to right, in which case the extent
1399 			 * insert code is smart enough to coalesce the
1400 			 * next splits into the previous records created.
1401 			 */
1402 			if (ext_flags & OCFS2_EXT_UNWRITTEN)
1403 				*extents_to_split = *extents_to_split + 2;
1404 		} else if (phys) {
1405 			/*
1406 			 * Only increment phys if it doesn't describe
1407 			 * a hole.
1408 			 */
1409 			phys++;
1410 		}
1411 
1412 		/*
1413 		 * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
1414 		 * file that got extended.  w_first_new_cpos tells us
1415 		 * where the newly allocated clusters are so we can
1416 		 * zero them.
1417 		 */
1418 		if (desc->c_cpos >= wc->w_first_new_cpos) {
1419 			BUG_ON(phys == 0);
1420 			desc->c_needs_zero = 1;
1421 		}
1422 
1423 		desc->c_phys = phys;
1424 		if (phys == 0) {
1425 			desc->c_new = 1;
1426 			desc->c_needs_zero = 1;
1427 			desc->c_clear_unwritten = 1;
1428 			*clusters_to_alloc = *clusters_to_alloc + 1;
1429 		}
1430 
1431 		if (ext_flags & OCFS2_EXT_UNWRITTEN) {
1432 			desc->c_clear_unwritten = 1;
1433 			desc->c_needs_zero = 1;
1434 		}
1435 
1436 		ret = ocfs2_unwritten_check(inode, wc, desc);
1437 		if (ret) {
1438 			mlog_errno(ret);
1439 			goto out;
1440 		}
1441 
1442 		num_clusters--;
1443 	}
1444 
1445 	ret = 0;
1446 out:
1447 	return ret;
1448 }
1449 
1450 static int ocfs2_write_begin_inline(struct address_space *mapping,
1451 				    struct inode *inode,
1452 				    struct ocfs2_write_ctxt *wc)
1453 {
1454 	int ret;
1455 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1456 	struct folio *folio;
1457 	handle_t *handle;
1458 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1459 
1460 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1461 	if (IS_ERR(handle)) {
1462 		ret = PTR_ERR(handle);
1463 		mlog_errno(ret);
1464 		goto out;
1465 	}
1466 
1467 	folio = __filemap_get_folio(mapping, 0,
1468 			FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
1469 	if (IS_ERR(folio)) {
1470 		ocfs2_commit_trans(osb, handle);
1471 		ret = PTR_ERR(folio);
1472 		mlog_errno(ret);
1473 		goto out;
1474 	}
1475 	/*
1476 	 * If we don't set w_num_folios then this folio won't get unlocked
1477 	 * and freed on cleanup of the write context.
1478 	 */
1479 	wc->w_target_folio = folio;
1480 	wc->w_folios[0] = folio;
1481 	wc->w_num_folios = 1;
1482 
1483 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
1484 				      OCFS2_JOURNAL_ACCESS_WRITE);
1485 	if (ret) {
1486 		ocfs2_commit_trans(osb, handle);
1487 
1488 		mlog_errno(ret);
1489 		goto out;
1490 	}
1491 
1492 	if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1493 		ocfs2_set_inode_data_inline(inode, di);
1494 
1495 	if (!folio_test_uptodate(folio)) {
1496 		ret = ocfs2_read_inline_data(inode, folio, wc->w_di_bh);
1497 		if (ret) {
1498 			ocfs2_commit_trans(osb, handle);
1499 
1500 			goto out;
1501 		}
1502 	}
1503 
1504 	wc->w_handle = handle;
1505 out:
1506 	return ret;
1507 }
1508 
1509 int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size)
1510 {
1511 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1512 
1513 	if (new_size <= le16_to_cpu(di->id2.i_data.id_count))
1514 		return 1;
1515 	return 0;
1516 }
1517 
1518 static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
1519 		struct inode *inode, loff_t pos, size_t len,
1520 		struct folio *mmap_folio, struct ocfs2_write_ctxt *wc)
1521 {
1522 	int ret, written = 0;
1523 	loff_t end = pos + len;
1524 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1525 	struct ocfs2_dinode *di = NULL;
1526 
1527 	trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno,
1528 					     len, (unsigned long long)pos,
1529 					     oi->ip_dyn_features);
1530 
1531 	/*
1532 	 * Handle inodes which already have inline data 1st.
1533 	 */
1534 	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1535 		if (mmap_folio == NULL &&
1536 		    ocfs2_size_fits_inline_data(wc->w_di_bh, end))
1537 			goto do_inline_write;
1538 
1539 		/*
1540 		 * The write won't fit - we have to give this inode an
1541 		 * inline extent list now.
1542 		 */
1543 		ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh);
1544 		if (ret)
1545 			mlog_errno(ret);
1546 		goto out;
1547 	}
1548 
1549 	/*
1550 	 * Check whether the inode can accept inline data.
1551 	 */
1552 	if (oi->ip_clusters != 0 || i_size_read(inode) != 0)
1553 		return 0;
1554 
1555 	/*
1556 	 * Check whether the write can fit.
1557 	 */
1558 	di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1559 	if (mmap_folio ||
1560 	    end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di))
1561 		return 0;
1562 
1563 do_inline_write:
1564 	ret = ocfs2_write_begin_inline(mapping, inode, wc);
1565 	if (ret) {
1566 		mlog_errno(ret);
1567 		goto out;
1568 	}
1569 
1570 	/*
1571 	 * This signals to the caller that the data can be written
1572 	 * inline.
1573 	 */
1574 	written = 1;
1575 out:
1576 	return written ? written : ret;
1577 }
1578 
1579 /*
1580  * This function only does anything for file systems which can't
1581  * handle sparse files.
1582  *
1583  * What we want to do here is fill in any hole between the current end
1584  * of allocation and the end of our write. That way the rest of the
1585  * write path can treat it as an non-allocating write, which has no
1586  * special case code for sparse/nonsparse files.
1587  */
1588 static int ocfs2_expand_nonsparse_inode(struct inode *inode,
1589 					struct buffer_head *di_bh,
1590 					loff_t pos, unsigned len,
1591 					struct ocfs2_write_ctxt *wc)
1592 {
1593 	int ret;
1594 	loff_t newsize = pos + len;
1595 
1596 	BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
1597 
1598 	if (newsize <= i_size_read(inode))
1599 		return 0;
1600 
1601 	ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos);
1602 	if (ret)
1603 		mlog_errno(ret);
1604 
1605 	/* There is no wc if this is call from direct. */
1606 	if (wc)
1607 		wc->w_first_new_cpos =
1608 			ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
1609 
1610 	return ret;
1611 }
1612 
1613 static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
1614 			   loff_t pos)
1615 {
1616 	int ret = 0;
1617 
1618 	BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
1619 	if (pos > i_size_read(inode))
1620 		ret = ocfs2_zero_extend(inode, di_bh, pos);
1621 
1622 	return ret;
1623 }
1624 
1625 int ocfs2_write_begin_nolock(struct address_space *mapping,
1626 		loff_t pos, unsigned len, ocfs2_write_type_t type,
1627 		struct folio **foliop, void **fsdata,
1628 		struct buffer_head *di_bh, struct folio *mmap_folio)
1629 {
1630 	int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
1631 	unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
1632 	struct ocfs2_write_ctxt *wc;
1633 	struct inode *inode = mapping->host;
1634 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1635 	struct ocfs2_dinode *di;
1636 	struct ocfs2_alloc_context *data_ac = NULL;
1637 	struct ocfs2_alloc_context *meta_ac = NULL;
1638 	handle_t *handle;
1639 	struct ocfs2_extent_tree et;
1640 	int try_free = 1, ret1;
1641 
1642 try_again:
1643 	ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, type, di_bh);
1644 	if (ret) {
1645 		mlog_errno(ret);
1646 		return ret;
1647 	}
1648 
1649 	if (ocfs2_supports_inline_data(osb)) {
1650 		ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
1651 						     mmap_folio, wc);
1652 		if (ret == 1) {
1653 			ret = 0;
1654 			goto success;
1655 		}
1656 		if (ret < 0) {
1657 			mlog_errno(ret);
1658 			goto out;
1659 		}
1660 	}
1661 
1662 	/* Direct io change i_size late, should not zero tail here. */
1663 	if (type != OCFS2_WRITE_DIRECT) {
1664 		if (ocfs2_sparse_alloc(osb))
1665 			ret = ocfs2_zero_tail(inode, di_bh, pos);
1666 		else
1667 			ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos,
1668 							   len, wc);
1669 		if (ret) {
1670 			mlog_errno(ret);
1671 			goto out;
1672 		}
1673 	}
1674 
1675 	ret = ocfs2_check_range_for_refcount(inode, pos, len);
1676 	if (ret < 0) {
1677 		mlog_errno(ret);
1678 		goto out;
1679 	} else if (ret == 1) {
1680 		clusters_need = wc->w_clen;
1681 		ret = ocfs2_refcount_cow(inode, di_bh,
1682 					 wc->w_cpos, wc->w_clen, UINT_MAX);
1683 		if (ret) {
1684 			mlog_errno(ret);
1685 			goto out;
1686 		}
1687 	}
1688 
1689 	ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc,
1690 					&extents_to_split);
1691 	if (ret) {
1692 		mlog_errno(ret);
1693 		goto out;
1694 	}
1695 	clusters_need += clusters_to_alloc;
1696 
1697 	di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1698 
1699 	trace_ocfs2_write_begin_nolock(
1700 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1701 			(long long)i_size_read(inode),
1702 			le32_to_cpu(di->i_clusters),
1703 			pos, len, type, mmap_folio,
1704 			clusters_to_alloc, extents_to_split);
1705 
1706 	/*
1707 	 * We set w_target_from, w_target_to here so that
1708 	 * ocfs2_write_end() knows which range in the target page to
1709 	 * write out. An allocation requires that we write the entire
1710 	 * cluster range.
1711 	 */
1712 	if (clusters_to_alloc || extents_to_split) {
1713 		/*
1714 		 * XXX: We are stretching the limits of
1715 		 * ocfs2_lock_allocators(). It greatly over-estimates
1716 		 * the work to be done.
1717 		 */
1718 		ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
1719 					      wc->w_di_bh);
1720 		ret = ocfs2_lock_allocators(inode, &et,
1721 					    clusters_to_alloc, extents_to_split,
1722 					    &data_ac, &meta_ac);
1723 		if (ret) {
1724 			mlog_errno(ret);
1725 			goto out;
1726 		}
1727 
1728 		if (data_ac)
1729 			data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
1730 
1731 		credits = ocfs2_calc_extend_credits(inode->i_sb,
1732 						    &di->id2.i_list);
1733 	} else if (type == OCFS2_WRITE_DIRECT)
1734 		/* direct write needs not to start trans if no extents alloc. */
1735 		goto success;
1736 
1737 	/*
1738 	 * We have to zero sparse allocated clusters, unwritten extent clusters,
1739 	 * and non-sparse clusters we just extended.  For non-sparse writes,
1740 	 * we know zeros will only be needed in the first and/or last cluster.
1741 	 */
1742 	if (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
1743 			   wc->w_desc[wc->w_clen - 1].c_needs_zero))
1744 		cluster_of_pages = 1;
1745 	else
1746 		cluster_of_pages = 0;
1747 
1748 	ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
1749 
1750 	handle = ocfs2_start_trans(osb, credits);
1751 	if (IS_ERR(handle)) {
1752 		ret = PTR_ERR(handle);
1753 		mlog_errno(ret);
1754 		goto out;
1755 	}
1756 
1757 	wc->w_handle = handle;
1758 
1759 	if (clusters_to_alloc) {
1760 		ret = dquot_alloc_space_nodirty(inode,
1761 			ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
1762 		if (ret)
1763 			goto out_commit;
1764 	}
1765 
1766 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
1767 				      OCFS2_JOURNAL_ACCESS_WRITE);
1768 	if (ret) {
1769 		mlog_errno(ret);
1770 		goto out_quota;
1771 	}
1772 
1773 	/*
1774 	 * Fill our folio array first. That way we've grabbed enough so
1775 	 * that we can zero and flush if we error after adding the
1776 	 * extent.
1777 	 */
1778 	ret = ocfs2_grab_folios_for_write(mapping, wc, wc->w_cpos, pos, len,
1779 			cluster_of_pages, mmap_folio);
1780 	if (ret) {
1781 		/*
1782 		 * ocfs2_grab_folios_for_write() returns -EAGAIN if it
1783 		 * could not lock the target folio. In this case, we exit
1784 		 * with no error and no target folio. This will trigger
1785 		 * the caller, page_mkwrite(), to re-try the operation.
1786 		 */
1787 		if (type == OCFS2_WRITE_MMAP && ret == -EAGAIN) {
1788 			BUG_ON(wc->w_target_folio);
1789 			ret = 0;
1790 			goto out_quota;
1791 		}
1792 
1793 		mlog_errno(ret);
1794 		goto out_quota;
1795 	}
1796 
1797 	ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
1798 					  len);
1799 	if (ret) {
1800 		mlog_errno(ret);
1801 		goto out_quota;
1802 	}
1803 
1804 	if (data_ac)
1805 		ocfs2_free_alloc_context(data_ac);
1806 	if (meta_ac)
1807 		ocfs2_free_alloc_context(meta_ac);
1808 
1809 success:
1810 	if (foliop)
1811 		*foliop = wc->w_target_folio;
1812 	*fsdata = wc;
1813 	return 0;
1814 out_quota:
1815 	if (clusters_to_alloc)
1816 		dquot_free_space(inode,
1817 			  ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
1818 out_commit:
1819 	ocfs2_commit_trans(osb, handle);
1820 
1821 out:
1822 	/*
1823 	 * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(),
1824 	 * even in case of error here like ENOSPC and ENOMEM. So, we need
1825 	 * to unlock the target page manually to prevent deadlocks when
1826 	 * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED
1827 	 * to VM code.
1828 	 */
1829 	if (wc->w_target_locked)
1830 		folio_unlock(mmap_folio);
1831 
1832 	ocfs2_free_write_ctxt(inode, wc);
1833 
1834 	if (data_ac) {
1835 		ocfs2_free_alloc_context(data_ac);
1836 		data_ac = NULL;
1837 	}
1838 	if (meta_ac) {
1839 		ocfs2_free_alloc_context(meta_ac);
1840 		meta_ac = NULL;
1841 	}
1842 
1843 	if (ret == -ENOSPC && try_free) {
1844 		/*
1845 		 * Try to free some truncate log so that we can have enough
1846 		 * clusters to allocate.
1847 		 */
1848 		try_free = 0;
1849 
1850 		ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need);
1851 		if (ret1 == 1)
1852 			goto try_again;
1853 
1854 		if (ret1 < 0)
1855 			mlog_errno(ret1);
1856 	}
1857 
1858 	return ret;
1859 }
1860 
1861 static int ocfs2_write_begin(const struct kiocb *iocb,
1862 			     struct address_space *mapping,
1863 			     loff_t pos, unsigned len,
1864 			     struct folio **foliop, void **fsdata)
1865 {
1866 	int ret;
1867 	struct buffer_head *di_bh = NULL;
1868 	struct inode *inode = mapping->host;
1869 
1870 	ret = ocfs2_inode_lock(inode, &di_bh, 1);
1871 	if (ret) {
1872 		mlog_errno(ret);
1873 		return ret;
1874 	}
1875 
1876 	/*
1877 	 * Take alloc sem here to prevent concurrent lookups. That way
1878 	 * the mapping, zeroing and tree manipulation within
1879 	 * ocfs2_write() will be safe against ->read_folio(). This
1880 	 * should also serve to lock out allocation from a shared
1881 	 * writeable region.
1882 	 */
1883 	down_write(&OCFS2_I(inode)->ip_alloc_sem);
1884 
1885 	ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER,
1886 				       foliop, fsdata, di_bh, NULL);
1887 	if (ret) {
1888 		mlog_errno(ret);
1889 		goto out_fail;
1890 	}
1891 
1892 	brelse(di_bh);
1893 
1894 	return 0;
1895 
1896 out_fail:
1897 	up_write(&OCFS2_I(inode)->ip_alloc_sem);
1898 
1899 	brelse(di_bh);
1900 	ocfs2_inode_unlock(inode, 1);
1901 
1902 	return ret;
1903 }
1904 
1905 static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
1906 				   unsigned len, unsigned *copied,
1907 				   struct ocfs2_dinode *di,
1908 				   struct ocfs2_write_ctxt *wc)
1909 {
1910 	if (unlikely(*copied < len)) {
1911 		if (!folio_test_uptodate(wc->w_target_folio)) {
1912 			*copied = 0;
1913 			return;
1914 		}
1915 	}
1916 
1917 	memcpy_from_folio(di->id2.i_data.id_data + pos, wc->w_target_folio,
1918 			pos, *copied);
1919 
1920 	trace_ocfs2_write_end_inline(
1921 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
1922 	     (unsigned long long)pos, *copied,
1923 	     le16_to_cpu(di->id2.i_data.id_count),
1924 	     le16_to_cpu(di->i_dyn_features));
1925 }
1926 
1927 int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos,
1928 		unsigned len, unsigned copied, void *fsdata)
1929 {
1930 	int i, ret;
1931 	size_t from, to, start = pos & (PAGE_SIZE - 1);
1932 	struct inode *inode = mapping->host;
1933 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1934 	struct ocfs2_write_ctxt *wc = fsdata;
1935 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1936 	handle_t *handle = wc->w_handle;
1937 
1938 	BUG_ON(!list_empty(&wc->w_unwritten_list));
1939 
1940 	if (handle) {
1941 		ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
1942 				wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE);
1943 		if (ret) {
1944 			copied = ret;
1945 			mlog_errno(ret);
1946 			goto out;
1947 		}
1948 	}
1949 
1950 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1951 		ocfs2_write_end_inline(inode, pos, len, &copied, di, wc);
1952 		goto out_write_size;
1953 	}
1954 
1955 	if (unlikely(copied < len) && wc->w_target_folio) {
1956 		loff_t new_isize;
1957 
1958 		if (!folio_test_uptodate(wc->w_target_folio))
1959 			copied = 0;
1960 
1961 		new_isize = max_t(loff_t, i_size_read(inode), pos + copied);
1962 		if (new_isize > folio_pos(wc->w_target_folio))
1963 			ocfs2_zero_new_buffers(wc->w_target_folio, start+copied,
1964 					       start+len);
1965 		else {
1966 			/*
1967 			 * When folio is fully beyond new isize (data copy
1968 			 * failed), do not bother zeroing the folio. Invalidate
1969 			 * it instead so that writeback does not get confused
1970 			 * put page & buffer dirty bits into inconsistent
1971 			 * state.
1972 			 */
1973 			block_invalidate_folio(wc->w_target_folio, 0,
1974 					folio_size(wc->w_target_folio));
1975 		}
1976 	}
1977 	if (wc->w_target_folio)
1978 		flush_dcache_folio(wc->w_target_folio);
1979 
1980 	for (i = 0; i < wc->w_num_folios; i++) {
1981 		struct folio *folio = wc->w_folios[i];
1982 
1983 		/* This is the direct io target folio */
1984 		if (folio == NULL)
1985 			continue;
1986 
1987 		if (folio == wc->w_target_folio) {
1988 			from = wc->w_target_from;
1989 			to = wc->w_target_to;
1990 
1991 			BUG_ON(from > folio_size(folio) ||
1992 			       to > folio_size(folio) ||
1993 			       to < from);
1994 		} else {
1995 			/*
1996 			 * Pages adjacent to the target (if any) imply
1997 			 * a hole-filling write in which case we want
1998 			 * to flush their entire range.
1999 			 */
2000 			from = 0;
2001 			to = folio_size(folio);
2002 		}
2003 
2004 		if (folio_buffers(folio)) {
2005 			if (handle && ocfs2_should_order_data(inode)) {
2006 				loff_t start_byte = folio_pos(folio) + from;
2007 				loff_t length = to - from;
2008 				ocfs2_jbd2_inode_add_write(handle, inode,
2009 							   start_byte, length);
2010 			}
2011 			block_commit_write(folio, from, to);
2012 		}
2013 	}
2014 
2015 out_write_size:
2016 	/* Direct io do not update i_size here. */
2017 	if (wc->w_type != OCFS2_WRITE_DIRECT) {
2018 		pos += copied;
2019 		if (pos > i_size_read(inode)) {
2020 			i_size_write(inode, pos);
2021 			mark_inode_dirty(inode);
2022 		}
2023 		inode->i_blocks = ocfs2_inode_sector_count(inode);
2024 		di->i_size = cpu_to_le64((u64)i_size_read(inode));
2025 		inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
2026 		di->i_mtime = di->i_ctime = cpu_to_le64(inode_get_mtime_sec(inode));
2027 		di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
2028 		if (handle)
2029 			ocfs2_update_inode_fsync_trans(handle, inode, 1);
2030 	}
2031 	if (handle)
2032 		ocfs2_journal_dirty(handle, wc->w_di_bh);
2033 
2034 out:
2035 	/* unlock pages before dealloc since it needs acquiring j_trans_barrier
2036 	 * lock, or it will cause a deadlock since journal commit threads holds
2037 	 * this lock and will ask for the page lock when flushing the data.
2038 	 * put it here to preserve the unlock order.
2039 	 */
2040 	ocfs2_unlock_folios(wc);
2041 
2042 	if (handle)
2043 		ocfs2_commit_trans(osb, handle);
2044 
2045 	ocfs2_run_deallocs(osb, &wc->w_dealloc);
2046 
2047 	brelse(wc->w_di_bh);
2048 	kfree(wc);
2049 
2050 	return copied;
2051 }
2052 
2053 static int ocfs2_write_end(const struct kiocb *iocb,
2054 			   struct address_space *mapping,
2055 			   loff_t pos, unsigned len, unsigned copied,
2056 			   struct folio *folio, void *fsdata)
2057 {
2058 	int ret;
2059 	struct inode *inode = mapping->host;
2060 
2061 	ret = ocfs2_write_end_nolock(mapping, pos, len, copied, fsdata);
2062 
2063 	up_write(&OCFS2_I(inode)->ip_alloc_sem);
2064 	ocfs2_inode_unlock(inode, 1);
2065 
2066 	return ret;
2067 }
2068 
2069 struct ocfs2_dio_write_ctxt {
2070 	struct list_head	dw_zero_list;
2071 	unsigned		dw_zero_count;
2072 	int			dw_orphaned;
2073 	pid_t			dw_writer_pid;
2074 };
2075 
2076 static struct ocfs2_dio_write_ctxt *
2077 ocfs2_dio_alloc_write_ctx(struct buffer_head *bh, int *alloc)
2078 {
2079 	struct ocfs2_dio_write_ctxt *dwc = NULL;
2080 
2081 	if (bh->b_private)
2082 		return bh->b_private;
2083 
2084 	dwc = kmalloc_obj(struct ocfs2_dio_write_ctxt, GFP_NOFS);
2085 	if (dwc == NULL)
2086 		return NULL;
2087 	INIT_LIST_HEAD(&dwc->dw_zero_list);
2088 	dwc->dw_zero_count = 0;
2089 	dwc->dw_orphaned = 0;
2090 	dwc->dw_writer_pid = task_pid_nr(current);
2091 	bh->b_private = dwc;
2092 	*alloc = 1;
2093 
2094 	return dwc;
2095 }
2096 
2097 static void ocfs2_dio_free_write_ctx(struct inode *inode,
2098 				     struct ocfs2_dio_write_ctxt *dwc)
2099 {
2100 	ocfs2_free_unwritten_list(inode, &dwc->dw_zero_list);
2101 	kfree(dwc);
2102 }
2103 
2104 /*
2105  * TODO: Make this into a generic get_blocks function.
2106  *
2107  * From do_direct_io in direct-io.c:
2108  *  "So what we do is to permit the ->get_blocks function to populate
2109  *   bh.b_size with the size of IO which is permitted at this offset and
2110  *   this i_blkbits."
2111  *
2112  * This function is called directly from get_more_blocks in direct-io.c.
2113  *
2114  * called like this: dio->get_blocks(dio->inode, fs_startblk,
2115  * 					fs_count, map_bh, dio->rw == WRITE);
2116  */
2117 static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
2118 			       struct buffer_head *bh_result, int create)
2119 {
2120 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2121 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
2122 	struct ocfs2_write_ctxt *wc;
2123 	struct ocfs2_write_cluster_desc *desc = NULL;
2124 	struct ocfs2_dio_write_ctxt *dwc = NULL;
2125 	struct buffer_head *di_bh = NULL;
2126 	u64 p_blkno;
2127 	unsigned int i_blkbits = inode->i_sb->s_blocksize_bits;
2128 	loff_t pos = iblock << i_blkbits;
2129 	sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits;
2130 	unsigned len, total_len = bh_result->b_size;
2131 	int ret = 0, first_get_block = 0;
2132 
2133 	len = osb->s_clustersize - (pos & (osb->s_clustersize - 1));
2134 	len = min(total_len, len);
2135 
2136 	/*
2137 	 * bh_result->b_size is count in get_more_blocks according to write
2138 	 * "pos" and "end", we need map twice to return different buffer state:
2139 	 * 1. area in file size, not set NEW;
2140 	 * 2. area out file size, set  NEW.
2141 	 *
2142 	 *		   iblock    endblk
2143 	 * |--------|---------|---------|---------
2144 	 * |<-------area in file------->|
2145 	 */
2146 
2147 	if ((iblock <= endblk) &&
2148 	    ((iblock + ((len - 1) >> i_blkbits)) > endblk))
2149 		len = (endblk - iblock + 1) << i_blkbits;
2150 
2151 	mlog(0, "get block of %llu at %llu:%u req %u\n",
2152 			inode->i_ino, pos, len, total_len);
2153 
2154 	/*
2155 	 * Because we need to change file size in ocfs2_dio_end_io_write(), or
2156 	 * we may need to add it to orphan dir. So can not fall to fast path
2157 	 * while file size will be changed.
2158 	 */
2159 	if (pos + total_len <= i_size_read(inode)) {
2160 
2161 		/* This is the fast path for re-write. */
2162 		ret = ocfs2_lock_get_block(inode, iblock, bh_result, create);
2163 		if (buffer_mapped(bh_result) &&
2164 		    !buffer_new(bh_result) &&
2165 		    ret == 0)
2166 			goto out;
2167 
2168 		/* Clear state set by ocfs2_get_block. */
2169 		bh_result->b_state = 0;
2170 	}
2171 
2172 	dwc = ocfs2_dio_alloc_write_ctx(bh_result, &first_get_block);
2173 	if (unlikely(dwc == NULL)) {
2174 		ret = -ENOMEM;
2175 		mlog_errno(ret);
2176 		goto out;
2177 	}
2178 
2179 	if (ocfs2_clusters_for_bytes(inode->i_sb, pos + total_len) >
2180 	    ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)) &&
2181 	    !dwc->dw_orphaned) {
2182 		/*
2183 		 * when we are going to alloc extents beyond file size, add the
2184 		 * inode to orphan dir, so we can recall those spaces when
2185 		 * system crashed during write.
2186 		 */
2187 		ret = ocfs2_add_inode_to_orphan(osb, inode);
2188 		if (ret < 0) {
2189 			mlog_errno(ret);
2190 			goto out;
2191 		}
2192 		dwc->dw_orphaned = 1;
2193 	}
2194 
2195 	ret = ocfs2_inode_lock(inode, &di_bh, 1);
2196 	if (ret) {
2197 		mlog_errno(ret);
2198 		goto out;
2199 	}
2200 
2201 	down_write(&oi->ip_alloc_sem);
2202 
2203 	if (first_get_block) {
2204 		if (ocfs2_sparse_alloc(osb))
2205 			ret = ocfs2_zero_tail(inode, di_bh, pos);
2206 		else
2207 			ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos,
2208 							   total_len, NULL);
2209 		if (ret < 0) {
2210 			mlog_errno(ret);
2211 			goto unlock;
2212 		}
2213 	}
2214 
2215 	ret = ocfs2_write_begin_nolock(inode->i_mapping, pos, len,
2216 				       OCFS2_WRITE_DIRECT, NULL,
2217 				       (void **)&wc, di_bh, NULL);
2218 	if (ret) {
2219 		mlog_errno(ret);
2220 		goto unlock;
2221 	}
2222 
2223 	desc = &wc->w_desc[0];
2224 
2225 	p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, desc->c_phys);
2226 	BUG_ON(p_blkno == 0);
2227 	p_blkno += iblock & (u64)(ocfs2_clusters_to_blocks(inode->i_sb, 1) - 1);
2228 
2229 	map_bh(bh_result, inode->i_sb, p_blkno);
2230 	bh_result->b_size = len;
2231 	if (desc->c_needs_zero)
2232 		set_buffer_new(bh_result);
2233 
2234 	if (iblock > endblk)
2235 		set_buffer_new(bh_result);
2236 
2237 	/* May sleep in end_io. It should not happen in a irq context. So defer
2238 	 * it to dio work queue. */
2239 	set_buffer_defer_completion(bh_result);
2240 
2241 	if (!list_empty(&wc->w_unwritten_list)) {
2242 		struct ocfs2_unwritten_extent *ue = NULL;
2243 
2244 		ue = list_first_entry(&wc->w_unwritten_list,
2245 				      struct ocfs2_unwritten_extent,
2246 				      ue_node);
2247 		BUG_ON(ue->ue_cpos != desc->c_cpos);
2248 		/* The physical address may be 0, fill it. */
2249 		ue->ue_phys = desc->c_phys;
2250 
2251 		list_splice_tail_init(&wc->w_unwritten_list, &dwc->dw_zero_list);
2252 		dwc->dw_zero_count += wc->w_unwritten_count;
2253 	}
2254 
2255 	ret = ocfs2_write_end_nolock(inode->i_mapping, pos, len, len, wc);
2256 	BUG_ON(ret != len);
2257 	ret = 0;
2258 unlock:
2259 	up_write(&oi->ip_alloc_sem);
2260 	ocfs2_inode_unlock(inode, 1);
2261 	brelse(di_bh);
2262 out:
2263 	return ret;
2264 }
2265 
2266 static int ocfs2_dio_end_io_write(struct inode *inode,
2267 				  struct ocfs2_dio_write_ctxt *dwc,
2268 				  loff_t offset,
2269 				  ssize_t bytes)
2270 {
2271 	struct ocfs2_cached_dealloc_ctxt dealloc;
2272 	struct ocfs2_extent_tree et;
2273 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2274 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
2275 	struct ocfs2_unwritten_extent *ue = NULL;
2276 	struct buffer_head *di_bh = NULL;
2277 	struct ocfs2_dinode *di;
2278 	struct ocfs2_alloc_context *data_ac = NULL;
2279 	struct ocfs2_alloc_context *meta_ac = NULL;
2280 	handle_t *handle = NULL;
2281 	loff_t end = offset + bytes;
2282 	int ret = 0, credits = 0, batch = 0;
2283 
2284 	ocfs2_init_dealloc_ctxt(&dealloc);
2285 
2286 	/* We do clear unwritten, delete orphan, change i_size here. If neither
2287 	 * of these happen, we can skip all this. */
2288 	if (list_empty(&dwc->dw_zero_list) &&
2289 	    end <= i_size_read(inode) &&
2290 	    !dwc->dw_orphaned)
2291 		goto out;
2292 
2293 	ret = ocfs2_inode_lock(inode, &di_bh, 1);
2294 	if (ret < 0) {
2295 		mlog_errno(ret);
2296 		goto out;
2297 	}
2298 
2299 	down_write(&oi->ip_alloc_sem);
2300 	di = (struct ocfs2_dinode *)di_bh->b_data;
2301 
2302 	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
2303 
2304 	/* Attach dealloc with extent tree in case that we may reuse extents
2305 	 * which are already unlinked from current extent tree due to extent
2306 	 * rotation and merging.
2307 	 */
2308 	et.et_dealloc = &dealloc;
2309 
2310 	ret = ocfs2_lock_allocators(inode, &et, 0, dwc->dw_zero_count*2,
2311 				    &data_ac, &meta_ac);
2312 	if (ret) {
2313 		mlog_errno(ret);
2314 		goto unlock;
2315 	}
2316 
2317 	credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list);
2318 
2319 	list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) {
2320 		if (!handle) {
2321 			handle = ocfs2_start_trans(osb, credits);
2322 			if (IS_ERR(handle)) {
2323 				ret = PTR_ERR(handle);
2324 				mlog_errno(ret);
2325 				goto unlock;
2326 			}
2327 			ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
2328 					OCFS2_JOURNAL_ACCESS_WRITE);
2329 			if (ret) {
2330 				mlog_errno(ret);
2331 				goto commit;
2332 			}
2333 		}
2334 		ret = ocfs2_assure_trans_credits(handle, credits);
2335 		if (ret < 0) {
2336 			mlog_errno(ret);
2337 			goto commit;
2338 		}
2339 		ret = ocfs2_mark_extent_written(inode, &et, handle,
2340 						ue->ue_cpos, 1,
2341 						ue->ue_phys,
2342 						meta_ac, &dealloc);
2343 		if (ret < 0) {
2344 			mlog_errno(ret);
2345 			goto commit;
2346 		}
2347 
2348 		if (++batch == OCFS2_DIO_MARK_EXTENT_BATCH) {
2349 			ocfs2_commit_trans(osb, handle);
2350 			handle = NULL;
2351 			batch = 0;
2352 		}
2353 	}
2354 
2355 	if (end > i_size_read(inode)) {
2356 		if (!handle) {
2357 			handle = ocfs2_start_trans(osb, credits);
2358 			if (IS_ERR(handle)) {
2359 				ret = PTR_ERR(handle);
2360 				mlog_errno(ret);
2361 				goto unlock;
2362 			}
2363 		}
2364 		ret = ocfs2_set_inode_size(handle, inode, di_bh, end);
2365 		if (ret < 0)
2366 			mlog_errno(ret);
2367 	}
2368 
2369 commit:
2370 	if (handle)
2371 		ocfs2_commit_trans(osb, handle);
2372 unlock:
2373 	up_write(&oi->ip_alloc_sem);
2374 
2375 	/* everything looks good, let's start the cleanup */
2376 	if (!ret && dwc->dw_orphaned) {
2377 		BUG_ON(dwc->dw_writer_pid != task_pid_nr(current));
2378 
2379 		ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, 0, 0);
2380 		if (ret < 0)
2381 			mlog_errno(ret);
2382 	}
2383 	ocfs2_inode_unlock(inode, 1);
2384 	brelse(di_bh);
2385 out:
2386 	if (data_ac)
2387 		ocfs2_free_alloc_context(data_ac);
2388 	if (meta_ac)
2389 		ocfs2_free_alloc_context(meta_ac);
2390 	ocfs2_run_deallocs(osb, &dealloc);
2391 	ocfs2_dio_free_write_ctx(inode, dwc);
2392 
2393 	return ret;
2394 }
2395 
2396 /*
2397  * ocfs2_dio_end_io is called by the dio core when a dio is finished.  We're
2398  * particularly interested in the aio/dio case.  We use the rw_lock DLM lock
2399  * to protect io on one node from truncation on another.
2400  */
2401 static int ocfs2_dio_end_io(struct kiocb *iocb,
2402 			    loff_t offset,
2403 			    ssize_t bytes,
2404 			    void *private)
2405 {
2406 	struct inode *inode = file_inode(iocb->ki_filp);
2407 	int level;
2408 	int ret = 0;
2409 
2410 	/* this io's submitter should not have unlocked this before we could */
2411 	BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
2412 
2413 	if (bytes <= 0)
2414 		mlog_ratelimited(ML_ERROR, "Direct IO failed, bytes = %lld",
2415 				 (long long)bytes);
2416 	if (private) {
2417 		if (bytes > 0)
2418 			ret = ocfs2_dio_end_io_write(inode, private, offset,
2419 						     bytes);
2420 		else
2421 			ocfs2_dio_free_write_ctx(inode, private);
2422 	}
2423 
2424 	ocfs2_iocb_clear_rw_locked(iocb);
2425 
2426 	level = ocfs2_iocb_rw_locked_level(iocb);
2427 	ocfs2_rw_unlock(inode, level);
2428 	return ret;
2429 }
2430 
2431 static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2432 {
2433 	struct file *file = iocb->ki_filp;
2434 	struct inode *inode = file->f_mapping->host;
2435 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2436 	get_block_t *get_block;
2437 
2438 	/*
2439 	 * Fallback to buffered I/O if we see an inode without
2440 	 * extents.
2441 	 */
2442 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2443 		return 0;
2444 
2445 	/* Fallback to buffered I/O if we do not support append dio. */
2446 	if (iocb->ki_pos + iter->count > i_size_read(inode) &&
2447 	    !ocfs2_supports_append_dio(osb))
2448 		return 0;
2449 
2450 	if (iov_iter_rw(iter) == READ)
2451 		get_block = ocfs2_lock_get_block;
2452 	else
2453 		get_block = ocfs2_dio_wr_get_block;
2454 
2455 	return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
2456 				    iter, get_block,
2457 				    ocfs2_dio_end_io, 0);
2458 }
2459 
2460 const struct address_space_operations ocfs2_aops = {
2461 	.dirty_folio		= block_dirty_folio,
2462 	.read_folio		= ocfs2_read_folio,
2463 	.readahead		= ocfs2_readahead,
2464 	.writepages		= ocfs2_writepages,
2465 	.write_begin		= ocfs2_write_begin,
2466 	.write_end		= ocfs2_write_end,
2467 	.bmap			= ocfs2_bmap,
2468 	.direct_IO		= ocfs2_direct_IO,
2469 	.invalidate_folio	= block_invalidate_folio,
2470 	.release_folio		= ocfs2_release_folio,
2471 	.migrate_folio		= buffer_migrate_folio,
2472 	.is_partially_uptodate	= block_is_partially_uptodate,
2473 	.error_remove_folio	= generic_error_remove_folio,
2474 };
2475