xref: /linux/fs/ntfs/iomap.c (revision cdd4dc3aebeab43a72ce0bc2b5bab6f0a80b97a5)
1*b041ca56SNamjae Jeon // SPDX-License-Identifier: GPL-2.0-or-later
2*b041ca56SNamjae Jeon /*
3*b041ca56SNamjae Jeon  * iomap callack functions
4*b041ca56SNamjae Jeon  *
5*b041ca56SNamjae Jeon  * Copyright (c) 2025 LG Electronics Co., Ltd.
6*b041ca56SNamjae Jeon  */
7*b041ca56SNamjae Jeon 
8*b041ca56SNamjae Jeon #include <linux/writeback.h>
9*b041ca56SNamjae Jeon 
10*b041ca56SNamjae Jeon #include "attrib.h"
11*b041ca56SNamjae Jeon #include "mft.h"
12*b041ca56SNamjae Jeon #include "ntfs.h"
13*b041ca56SNamjae Jeon #include "iomap.h"
14*b041ca56SNamjae Jeon 
15*b041ca56SNamjae Jeon static void ntfs_iomap_put_folio_non_resident(struct inode *inode, loff_t pos,
16*b041ca56SNamjae Jeon 					      unsigned int len, struct folio *folio)
17*b041ca56SNamjae Jeon {
18*b041ca56SNamjae Jeon 	struct ntfs_inode *ni = NTFS_I(inode);
19*b041ca56SNamjae Jeon 	unsigned long sector_size = 1UL << inode->i_blkbits;
20*b041ca56SNamjae Jeon 	loff_t start_down, end_up, init;
21*b041ca56SNamjae Jeon 
22*b041ca56SNamjae Jeon 	start_down = round_down(pos, sector_size);
23*b041ca56SNamjae Jeon 	end_up = (pos + len - 1) | (sector_size - 1);
24*b041ca56SNamjae Jeon 	init = ni->initialized_size;
25*b041ca56SNamjae Jeon 
26*b041ca56SNamjae Jeon 	if (init >= start_down && init <= end_up) {
27*b041ca56SNamjae Jeon 		if (init < pos) {
28*b041ca56SNamjae Jeon 			loff_t offset = offset_in_folio(folio, pos + len);
29*b041ca56SNamjae Jeon 
30*b041ca56SNamjae Jeon 			if (offset == 0)
31*b041ca56SNamjae Jeon 				offset = folio_size(folio);
32*b041ca56SNamjae Jeon 			folio_zero_segments(folio,
33*b041ca56SNamjae Jeon 					    offset_in_folio(folio, init),
34*b041ca56SNamjae Jeon 					    offset_in_folio(folio, pos),
35*b041ca56SNamjae Jeon 					    offset,
36*b041ca56SNamjae Jeon 					    folio_size(folio));
37*b041ca56SNamjae Jeon 
38*b041ca56SNamjae Jeon 		} else  {
39*b041ca56SNamjae Jeon 			loff_t offset = max_t(loff_t, pos + len, init);
40*b041ca56SNamjae Jeon 
41*b041ca56SNamjae Jeon 			offset = offset_in_folio(folio, offset);
42*b041ca56SNamjae Jeon 			if (offset == 0)
43*b041ca56SNamjae Jeon 				offset = folio_size(folio);
44*b041ca56SNamjae Jeon 			folio_zero_segment(folio,
45*b041ca56SNamjae Jeon 					   offset,
46*b041ca56SNamjae Jeon 					   folio_size(folio));
47*b041ca56SNamjae Jeon 		}
48*b041ca56SNamjae Jeon 	} else if (init <= pos) {
49*b041ca56SNamjae Jeon 		loff_t offset = 0, offset2 = offset_in_folio(folio, pos + len);
50*b041ca56SNamjae Jeon 
51*b041ca56SNamjae Jeon 		if ((init >> folio_shift(folio)) == (pos >> folio_shift(folio)))
52*b041ca56SNamjae Jeon 			offset = offset_in_folio(folio, init);
53*b041ca56SNamjae Jeon 		if (offset2 == 0)
54*b041ca56SNamjae Jeon 			offset2 = folio_size(folio);
55*b041ca56SNamjae Jeon 		folio_zero_segments(folio,
56*b041ca56SNamjae Jeon 				    offset,
57*b041ca56SNamjae Jeon 				    offset_in_folio(folio, pos),
58*b041ca56SNamjae Jeon 				    offset2,
59*b041ca56SNamjae Jeon 				    folio_size(folio));
60*b041ca56SNamjae Jeon 	}
61*b041ca56SNamjae Jeon 	folio_unlock(folio);
62*b041ca56SNamjae Jeon 	folio_put(folio);
63*b041ca56SNamjae Jeon }
64*b041ca56SNamjae Jeon 
65*b041ca56SNamjae Jeon /*
66*b041ca56SNamjae Jeon  * iomap_zero_range is called for an area beyond the initialized size,
67*b041ca56SNamjae Jeon  * garbage values can be read, so zeroing out is needed.
68*b041ca56SNamjae Jeon  */
69*b041ca56SNamjae Jeon static void ntfs_iomap_put_folio(struct inode *inode, loff_t pos,
70*b041ca56SNamjae Jeon 		unsigned int len, struct folio *folio)
71*b041ca56SNamjae Jeon {
72*b041ca56SNamjae Jeon 	if (NInoNonResident(NTFS_I(inode)))
73*b041ca56SNamjae Jeon 		return ntfs_iomap_put_folio_non_resident(inode, pos,
74*b041ca56SNamjae Jeon 							 len, folio);
75*b041ca56SNamjae Jeon 	folio_unlock(folio);
76*b041ca56SNamjae Jeon 	folio_put(folio);
77*b041ca56SNamjae Jeon }
78*b041ca56SNamjae Jeon 
79*b041ca56SNamjae Jeon const struct iomap_write_ops ntfs_iomap_folio_ops = {
80*b041ca56SNamjae Jeon 	.put_folio = ntfs_iomap_put_folio,
81*b041ca56SNamjae Jeon };
82*b041ca56SNamjae Jeon 
83*b041ca56SNamjae Jeon static int ntfs_read_iomap_begin_resident(struct inode *inode, loff_t offset, loff_t length,
84*b041ca56SNamjae Jeon 		unsigned int flags, struct iomap *iomap)
85*b041ca56SNamjae Jeon {
86*b041ca56SNamjae Jeon 	struct ntfs_inode *base_ni, *ni = NTFS_I(inode);
87*b041ca56SNamjae Jeon 	struct ntfs_attr_search_ctx *ctx;
88*b041ca56SNamjae Jeon 	loff_t i_size;
89*b041ca56SNamjae Jeon 	u32 attr_len;
90*b041ca56SNamjae Jeon 	int err = 0;
91*b041ca56SNamjae Jeon 	char *kattr;
92*b041ca56SNamjae Jeon 
93*b041ca56SNamjae Jeon 	if (NInoAttr(ni))
94*b041ca56SNamjae Jeon 		base_ni = ni->ext.base_ntfs_ino;
95*b041ca56SNamjae Jeon 	else
96*b041ca56SNamjae Jeon 		base_ni = ni;
97*b041ca56SNamjae Jeon 
98*b041ca56SNamjae Jeon 	ctx = ntfs_attr_get_search_ctx(base_ni, NULL);
99*b041ca56SNamjae Jeon 	if (!ctx) {
100*b041ca56SNamjae Jeon 		err = -ENOMEM;
101*b041ca56SNamjae Jeon 		goto out;
102*b041ca56SNamjae Jeon 	}
103*b041ca56SNamjae Jeon 
104*b041ca56SNamjae Jeon 	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
105*b041ca56SNamjae Jeon 			CASE_SENSITIVE, 0, NULL, 0, ctx);
106*b041ca56SNamjae Jeon 	if (unlikely(err))
107*b041ca56SNamjae Jeon 		goto out;
108*b041ca56SNamjae Jeon 
109*b041ca56SNamjae Jeon 	attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
110*b041ca56SNamjae Jeon 	if (unlikely(attr_len > ni->initialized_size))
111*b041ca56SNamjae Jeon 		attr_len = ni->initialized_size;
112*b041ca56SNamjae Jeon 	i_size = i_size_read(inode);
113*b041ca56SNamjae Jeon 
114*b041ca56SNamjae Jeon 	if (unlikely(attr_len > i_size)) {
115*b041ca56SNamjae Jeon 		/* Race with shrinking truncate. */
116*b041ca56SNamjae Jeon 		attr_len = i_size;
117*b041ca56SNamjae Jeon 	}
118*b041ca56SNamjae Jeon 
119*b041ca56SNamjae Jeon 	if (offset >= attr_len) {
120*b041ca56SNamjae Jeon 		if (flags & IOMAP_REPORT)
121*b041ca56SNamjae Jeon 			err = -ENOENT;
122*b041ca56SNamjae Jeon 		else {
123*b041ca56SNamjae Jeon 			iomap->type = IOMAP_HOLE;
124*b041ca56SNamjae Jeon 			iomap->offset = offset;
125*b041ca56SNamjae Jeon 			iomap->length = length;
126*b041ca56SNamjae Jeon 		}
127*b041ca56SNamjae Jeon 		goto out;
128*b041ca56SNamjae Jeon 	}
129*b041ca56SNamjae Jeon 
130*b041ca56SNamjae Jeon 	kattr = (u8 *)ctx->attr + le16_to_cpu(ctx->attr->data.resident.value_offset);
131*b041ca56SNamjae Jeon 
132*b041ca56SNamjae Jeon 	iomap->inline_data = kmemdup(kattr, attr_len, GFP_KERNEL);
133*b041ca56SNamjae Jeon 	if (!iomap->inline_data) {
134*b041ca56SNamjae Jeon 		err = -ENOMEM;
135*b041ca56SNamjae Jeon 		goto out;
136*b041ca56SNamjae Jeon 	}
137*b041ca56SNamjae Jeon 
138*b041ca56SNamjae Jeon 	iomap->type = IOMAP_INLINE;
139*b041ca56SNamjae Jeon 	iomap->offset = 0;
140*b041ca56SNamjae Jeon 	iomap->length = attr_len;
141*b041ca56SNamjae Jeon 
142*b041ca56SNamjae Jeon out:
143*b041ca56SNamjae Jeon 	if (ctx)
144*b041ca56SNamjae Jeon 		ntfs_attr_put_search_ctx(ctx);
145*b041ca56SNamjae Jeon 
146*b041ca56SNamjae Jeon 	return err;
147*b041ca56SNamjae Jeon }
148*b041ca56SNamjae Jeon 
149*b041ca56SNamjae Jeon /*
150*b041ca56SNamjae Jeon  * ntfs_read_iomap_begin_non_resident - map non-resident NTFS file data
151*b041ca56SNamjae Jeon  * @inode:		inode to map
152*b041ca56SNamjae Jeon  * @offset:		file offset to map
153*b041ca56SNamjae Jeon  * @length:		length of mapping
154*b041ca56SNamjae Jeon  * @flags:		IOMAP flags
155*b041ca56SNamjae Jeon  * @iomap:		iomap structure to fill
156*b041ca56SNamjae Jeon  * @need_unwritten:	true if UNWRITTEN extent type is needed
157*b041ca56SNamjae Jeon  *
158*b041ca56SNamjae Jeon  * Map a range of a non-resident NTFS file to an iomap extent.
159*b041ca56SNamjae Jeon  *
160*b041ca56SNamjae Jeon  * NTFS UNWRITTEN extent handling:
161*b041ca56SNamjae Jeon  * ================================
162*b041ca56SNamjae Jeon  * The concept of an unwritten extent in NTFS is slightly different from
163*b041ca56SNamjae Jeon  * that of other filesystems. NTFS conceptually manages only a single
164*b041ca56SNamjae Jeon  * continuous unwritten region, which is strictly defined based on
165*b041ca56SNamjae Jeon  * initialized_size.
166*b041ca56SNamjae Jeon  *
167*b041ca56SNamjae Jeon  * File offset layout:
168*b041ca56SNamjae Jeon  *   0                        initialized_size                   i_size(EOF)
169*b041ca56SNamjae Jeon  *   |----------#0----------|----------#1----------|----------#2----------|
170*b041ca56SNamjae Jeon  *   | Actual data          | Pre-allocated        | Pre-allocated        |
171*b041ca56SNamjae Jeon  *   | (user written)       | (within initialized) | (initialized ~ EOF)  |
172*b041ca56SNamjae Jeon  *   |----------------------|----------------------|----------------------|
173*b041ca56SNamjae Jeon  *   MAPPED                 MAPPED                 UNWRITTEN (conditionally)
174*b041ca56SNamjae Jeon  *
175*b041ca56SNamjae Jeon  * Region #0: User-written data, initialized and valid.
176*b041ca56SNamjae Jeon  * Region #1: Pre-allocated within initialized_size, must be zero-initialized
177*b041ca56SNamjae Jeon  *            by the filesystem before exposure to userspace.
178*b041ca56SNamjae Jeon  * Region #2: Pre-allocated beyond initialized_size, does not need initialization.
179*b041ca56SNamjae Jeon  *
180*b041ca56SNamjae Jeon  * The @need_unwritten parameter controls whether region #2 is mapped as
181*b041ca56SNamjae Jeon  * IOMAP_UNWRITTEN or IOMAP_MAPPED:
182*b041ca56SNamjae Jeon  * - For seek operations (SEEK_DATA/SEEK_HOLE): IOMAP_MAPPED is needed to
183*b041ca56SNamjae Jeon  *   prevent iomap_seek_data from incorrectly interpreting pre-allocated
184*b041ca56SNamjae Jeon  *   space as a hole. Since NTFS does not support multiple unwritten extents,
185*b041ca56SNamjae Jeon  *   all pre-allocated regions should be treated as data, not holes.
186*b041ca56SNamjae Jeon  * - For zero_range operations: IOMAP_MAPPED is needed to be zeroed out.
187*b041ca56SNamjae Jeon  *
188*b041ca56SNamjae Jeon  * Return: 0 on success, negative error code on failure.
189*b041ca56SNamjae Jeon  */
190*b041ca56SNamjae Jeon static int ntfs_read_iomap_begin_non_resident(struct inode *inode, loff_t offset,
191*b041ca56SNamjae Jeon 		loff_t length, unsigned int flags, struct iomap *iomap,
192*b041ca56SNamjae Jeon 		bool need_unwritten)
193*b041ca56SNamjae Jeon {
194*b041ca56SNamjae Jeon 	struct ntfs_inode *ni = NTFS_I(inode);
195*b041ca56SNamjae Jeon 	s64 vcn;
196*b041ca56SNamjae Jeon 	s64 lcn;
197*b041ca56SNamjae Jeon 	struct runlist_element *rl;
198*b041ca56SNamjae Jeon 	struct ntfs_volume *vol = ni->vol;
199*b041ca56SNamjae Jeon 	loff_t vcn_ofs;
200*b041ca56SNamjae Jeon 	loff_t rl_length;
201*b041ca56SNamjae Jeon 
202*b041ca56SNamjae Jeon 	vcn = ntfs_bytes_to_cluster(vol, offset);
203*b041ca56SNamjae Jeon 	vcn_ofs = ntfs_bytes_to_cluster_off(vol, offset);
204*b041ca56SNamjae Jeon 
205*b041ca56SNamjae Jeon 	down_write(&ni->runlist.lock);
206*b041ca56SNamjae Jeon 	rl = ntfs_attr_vcn_to_rl(ni, vcn, &lcn);
207*b041ca56SNamjae Jeon 	if (IS_ERR(rl)) {
208*b041ca56SNamjae Jeon 		up_write(&ni->runlist.lock);
209*b041ca56SNamjae Jeon 		return PTR_ERR(rl);
210*b041ca56SNamjae Jeon 	}
211*b041ca56SNamjae Jeon 
212*b041ca56SNamjae Jeon 	if (flags & IOMAP_REPORT) {
213*b041ca56SNamjae Jeon 		if (lcn < LCN_HOLE) {
214*b041ca56SNamjae Jeon 			up_write(&ni->runlist.lock);
215*b041ca56SNamjae Jeon 			return -ENOENT;
216*b041ca56SNamjae Jeon 		}
217*b041ca56SNamjae Jeon 	} else if (lcn < LCN_ENOENT) {
218*b041ca56SNamjae Jeon 		up_write(&ni->runlist.lock);
219*b041ca56SNamjae Jeon 		return -EINVAL;
220*b041ca56SNamjae Jeon 	}
221*b041ca56SNamjae Jeon 
222*b041ca56SNamjae Jeon 	iomap->bdev = inode->i_sb->s_bdev;
223*b041ca56SNamjae Jeon 	iomap->offset = offset;
224*b041ca56SNamjae Jeon 
225*b041ca56SNamjae Jeon 	if (lcn <= LCN_DELALLOC) {
226*b041ca56SNamjae Jeon 		if (lcn == LCN_DELALLOC)
227*b041ca56SNamjae Jeon 			iomap->type = IOMAP_DELALLOC;
228*b041ca56SNamjae Jeon 		else
229*b041ca56SNamjae Jeon 			iomap->type = IOMAP_HOLE;
230*b041ca56SNamjae Jeon 		iomap->addr = IOMAP_NULL_ADDR;
231*b041ca56SNamjae Jeon 	} else {
232*b041ca56SNamjae Jeon 		if (need_unwritten && offset >= ni->initialized_size)
233*b041ca56SNamjae Jeon 			iomap->type = IOMAP_UNWRITTEN;
234*b041ca56SNamjae Jeon 		else
235*b041ca56SNamjae Jeon 			iomap->type = IOMAP_MAPPED;
236*b041ca56SNamjae Jeon 		iomap->addr = ntfs_cluster_to_bytes(vol, lcn) + vcn_ofs;
237*b041ca56SNamjae Jeon 	}
238*b041ca56SNamjae Jeon 
239*b041ca56SNamjae Jeon 	rl_length = ntfs_cluster_to_bytes(vol, rl->length - (vcn - rl->vcn));
240*b041ca56SNamjae Jeon 
241*b041ca56SNamjae Jeon 	if (rl_length == 0 && rl->lcn > LCN_DELALLOC) {
242*b041ca56SNamjae Jeon 		ntfs_error(inode->i_sb,
243*b041ca56SNamjae Jeon 				"runlist(vcn : %lld, length : %lld, lcn : %lld) is corrupted\n",
244*b041ca56SNamjae Jeon 				rl->vcn, rl->length, rl->lcn);
245*b041ca56SNamjae Jeon 		up_write(&ni->runlist.lock);
246*b041ca56SNamjae Jeon 		return -EIO;
247*b041ca56SNamjae Jeon 	}
248*b041ca56SNamjae Jeon 
249*b041ca56SNamjae Jeon 	if (rl_length && length > rl_length - vcn_ofs)
250*b041ca56SNamjae Jeon 		iomap->length = rl_length - vcn_ofs;
251*b041ca56SNamjae Jeon 	else
252*b041ca56SNamjae Jeon 		iomap->length = length;
253*b041ca56SNamjae Jeon 	up_write(&ni->runlist.lock);
254*b041ca56SNamjae Jeon 
255*b041ca56SNamjae Jeon 	if (!(flags & IOMAP_ZERO) &&
256*b041ca56SNamjae Jeon 			iomap->type == IOMAP_MAPPED &&
257*b041ca56SNamjae Jeon 			iomap->offset < ni->initialized_size &&
258*b041ca56SNamjae Jeon 			iomap->offset + iomap->length > ni->initialized_size) {
259*b041ca56SNamjae Jeon 		iomap->length = round_up(ni->initialized_size, 1 << inode->i_blkbits) -
260*b041ca56SNamjae Jeon 			iomap->offset;
261*b041ca56SNamjae Jeon 	}
262*b041ca56SNamjae Jeon 	iomap->flags |= IOMAP_F_MERGED;
263*b041ca56SNamjae Jeon 
264*b041ca56SNamjae Jeon 	return 0;
265*b041ca56SNamjae Jeon }
266*b041ca56SNamjae Jeon 
267*b041ca56SNamjae Jeon static int __ntfs_read_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
268*b041ca56SNamjae Jeon 		unsigned int flags, struct iomap *iomap, struct iomap *srcmap,
269*b041ca56SNamjae Jeon 		bool need_unwritten)
270*b041ca56SNamjae Jeon {
271*b041ca56SNamjae Jeon 	if (NInoNonResident(NTFS_I(inode)))
272*b041ca56SNamjae Jeon 		return ntfs_read_iomap_begin_non_resident(inode, offset, length,
273*b041ca56SNamjae Jeon 				flags, iomap, need_unwritten);
274*b041ca56SNamjae Jeon 	return ntfs_read_iomap_begin_resident(inode, offset, length,
275*b041ca56SNamjae Jeon 					     flags, iomap);
276*b041ca56SNamjae Jeon }
277*b041ca56SNamjae Jeon 
278*b041ca56SNamjae Jeon static int ntfs_read_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
279*b041ca56SNamjae Jeon 		unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
280*b041ca56SNamjae Jeon {
281*b041ca56SNamjae Jeon 	return __ntfs_read_iomap_begin(inode, offset, length, flags, iomap,
282*b041ca56SNamjae Jeon 			srcmap, true);
283*b041ca56SNamjae Jeon }
284*b041ca56SNamjae Jeon 
285*b041ca56SNamjae Jeon static int ntfs_read_iomap_end(struct inode *inode, loff_t pos, loff_t length,
286*b041ca56SNamjae Jeon 		ssize_t written, unsigned int flags, struct iomap *iomap)
287*b041ca56SNamjae Jeon {
288*b041ca56SNamjae Jeon 	if (iomap->type == IOMAP_INLINE)
289*b041ca56SNamjae Jeon 		kfree(iomap->inline_data);
290*b041ca56SNamjae Jeon 
291*b041ca56SNamjae Jeon 	return written;
292*b041ca56SNamjae Jeon }
293*b041ca56SNamjae Jeon 
294*b041ca56SNamjae Jeon const struct iomap_ops ntfs_read_iomap_ops = {
295*b041ca56SNamjae Jeon 	.iomap_begin = ntfs_read_iomap_begin,
296*b041ca56SNamjae Jeon 	.iomap_end = ntfs_read_iomap_end,
297*b041ca56SNamjae Jeon };
298*b041ca56SNamjae Jeon 
299*b041ca56SNamjae Jeon /*
300*b041ca56SNamjae Jeon  * Check that the cached iomap still matches the NTFS runlist before
301*b041ca56SNamjae Jeon  * iomap_zero_range() is called. if the runlist changes while iomap is
302*b041ca56SNamjae Jeon  * iterating a cached iomap, iomap_zero_range() may overwrite folios
303*b041ca56SNamjae Jeon  * that have been already written with valid data.
304*b041ca56SNamjae Jeon  */
305*b041ca56SNamjae Jeon static bool ntfs_iomap_valid(struct inode *inode, const struct iomap *iomap)
306*b041ca56SNamjae Jeon {
307*b041ca56SNamjae Jeon 	struct ntfs_inode *ni = NTFS_I(inode);
308*b041ca56SNamjae Jeon 	struct runlist_element *rl;
309*b041ca56SNamjae Jeon 	s64 vcn, lcn;
310*b041ca56SNamjae Jeon 
311*b041ca56SNamjae Jeon 	if (!NInoNonResident(ni))
312*b041ca56SNamjae Jeon 		return false;
313*b041ca56SNamjae Jeon 
314*b041ca56SNamjae Jeon 	vcn = iomap->offset >> ni->vol->cluster_size_bits;
315*b041ca56SNamjae Jeon 
316*b041ca56SNamjae Jeon 	down_read(&ni->runlist.lock);
317*b041ca56SNamjae Jeon 	rl = __ntfs_attr_find_vcn_nolock(&ni->runlist, vcn);
318*b041ca56SNamjae Jeon 	if (IS_ERR(rl)) {
319*b041ca56SNamjae Jeon 		up_read(&ni->runlist.lock);
320*b041ca56SNamjae Jeon 		return false;
321*b041ca56SNamjae Jeon 	}
322*b041ca56SNamjae Jeon 	lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
323*b041ca56SNamjae Jeon 	up_read(&ni->runlist.lock);
324*b041ca56SNamjae Jeon 	return lcn == LCN_DELALLOC;
325*b041ca56SNamjae Jeon }
326*b041ca56SNamjae Jeon 
327*b041ca56SNamjae Jeon static const struct iomap_write_ops ntfs_zero_iomap_folio_ops = {
328*b041ca56SNamjae Jeon 	.put_folio = ntfs_iomap_put_folio,
329*b041ca56SNamjae Jeon 	.iomap_valid = ntfs_iomap_valid,
330*b041ca56SNamjae Jeon };
331*b041ca56SNamjae Jeon 
332*b041ca56SNamjae Jeon static int ntfs_seek_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
333*b041ca56SNamjae Jeon 		unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
334*b041ca56SNamjae Jeon {
335*b041ca56SNamjae Jeon 	return __ntfs_read_iomap_begin(inode, offset, length, flags, iomap,
336*b041ca56SNamjae Jeon 			srcmap, false);
337*b041ca56SNamjae Jeon }
338*b041ca56SNamjae Jeon 
339*b041ca56SNamjae Jeon static int ntfs_zero_read_iomap_end(struct inode *inode, loff_t pos, loff_t length,
340*b041ca56SNamjae Jeon 		ssize_t written, unsigned int flags, struct iomap *iomap)
341*b041ca56SNamjae Jeon {
342*b041ca56SNamjae Jeon 	if ((flags & IOMAP_ZERO) && (iomap->flags & IOMAP_F_STALE))
343*b041ca56SNamjae Jeon 		return -EPERM;
344*b041ca56SNamjae Jeon 	return written;
345*b041ca56SNamjae Jeon }
346*b041ca56SNamjae Jeon 
347*b041ca56SNamjae Jeon static const struct iomap_ops ntfs_zero_read_iomap_ops = {
348*b041ca56SNamjae Jeon 	.iomap_begin = ntfs_seek_iomap_begin,
349*b041ca56SNamjae Jeon 	.iomap_end = ntfs_zero_read_iomap_end,
350*b041ca56SNamjae Jeon };
351*b041ca56SNamjae Jeon 
352*b041ca56SNamjae Jeon const struct iomap_ops ntfs_seek_iomap_ops = {
353*b041ca56SNamjae Jeon 	.iomap_begin = ntfs_seek_iomap_begin,
354*b041ca56SNamjae Jeon 	.iomap_end = ntfs_read_iomap_end,
355*b041ca56SNamjae Jeon };
356*b041ca56SNamjae Jeon 
357*b041ca56SNamjae Jeon int ntfs_dio_zero_range(struct inode *inode, loff_t offset, loff_t length)
358*b041ca56SNamjae Jeon {
359*b041ca56SNamjae Jeon 	if ((offset | length) & (SECTOR_SIZE - 1))
360*b041ca56SNamjae Jeon 		return -EINVAL;
361*b041ca56SNamjae Jeon 
362*b041ca56SNamjae Jeon 	return  blkdev_issue_zeroout(inode->i_sb->s_bdev,
363*b041ca56SNamjae Jeon 				     offset >> SECTOR_SHIFT,
364*b041ca56SNamjae Jeon 				     length >> SECTOR_SHIFT,
365*b041ca56SNamjae Jeon 				     GFP_NOFS,
366*b041ca56SNamjae Jeon 				     BLKDEV_ZERO_NOUNMAP);
367*b041ca56SNamjae Jeon }
368*b041ca56SNamjae Jeon 
369*b041ca56SNamjae Jeon static int ntfs_zero_range(struct inode *inode, loff_t offset, loff_t length)
370*b041ca56SNamjae Jeon {
371*b041ca56SNamjae Jeon 	return iomap_zero_range(inode,
372*b041ca56SNamjae Jeon 				offset, length,
373*b041ca56SNamjae Jeon 				NULL,
374*b041ca56SNamjae Jeon 				&ntfs_zero_read_iomap_ops,
375*b041ca56SNamjae Jeon 				&ntfs_zero_iomap_folio_ops,
376*b041ca56SNamjae Jeon 				NULL);
377*b041ca56SNamjae Jeon }
378*b041ca56SNamjae Jeon 
379*b041ca56SNamjae Jeon static int ntfs_write_simple_iomap_begin_non_resident(struct inode *inode, loff_t offset,
380*b041ca56SNamjae Jeon 						      loff_t length, struct iomap *iomap)
381*b041ca56SNamjae Jeon {
382*b041ca56SNamjae Jeon 	struct ntfs_inode *ni = NTFS_I(inode);
383*b041ca56SNamjae Jeon 	struct ntfs_volume *vol = ni->vol;
384*b041ca56SNamjae Jeon 	loff_t vcn_ofs, rl_length;
385*b041ca56SNamjae Jeon 	struct runlist_element *rl, *rlc;
386*b041ca56SNamjae Jeon 	bool is_retry = false;
387*b041ca56SNamjae Jeon 	int err;
388*b041ca56SNamjae Jeon 	s64 vcn, lcn;
389*b041ca56SNamjae Jeon 	s64 max_clu_count =
390*b041ca56SNamjae Jeon 		ntfs_bytes_to_cluster(vol, round_up(length, vol->cluster_size));
391*b041ca56SNamjae Jeon 
392*b041ca56SNamjae Jeon 	vcn = ntfs_bytes_to_cluster(vol, offset);
393*b041ca56SNamjae Jeon 	vcn_ofs = ntfs_bytes_to_cluster_off(vol, offset);
394*b041ca56SNamjae Jeon 
395*b041ca56SNamjae Jeon 	down_read(&ni->runlist.lock);
396*b041ca56SNamjae Jeon 	rl = ni->runlist.rl;
397*b041ca56SNamjae Jeon 	if (!rl) {
398*b041ca56SNamjae Jeon 		up_read(&ni->runlist.lock);
399*b041ca56SNamjae Jeon 		err = ntfs_map_runlist(ni, vcn);
400*b041ca56SNamjae Jeon 		if (err) {
401*b041ca56SNamjae Jeon 			mutex_unlock(&ni->mrec_lock);
402*b041ca56SNamjae Jeon 			return -ENOENT;
403*b041ca56SNamjae Jeon 		}
404*b041ca56SNamjae Jeon 		down_read(&ni->runlist.lock);
405*b041ca56SNamjae Jeon 		rl = ni->runlist.rl;
406*b041ca56SNamjae Jeon 	}
407*b041ca56SNamjae Jeon 	up_read(&ni->runlist.lock);
408*b041ca56SNamjae Jeon 
409*b041ca56SNamjae Jeon 	down_write(&ni->runlist.lock);
410*b041ca56SNamjae Jeon remap_rl:
411*b041ca56SNamjae Jeon 	/* Seek to element containing target vcn. */
412*b041ca56SNamjae Jeon 	rl = __ntfs_attr_find_vcn_nolock(&ni->runlist, vcn);
413*b041ca56SNamjae Jeon 	if (IS_ERR(rl)) {
414*b041ca56SNamjae Jeon 		up_write(&ni->runlist.lock);
415*b041ca56SNamjae Jeon 		mutex_unlock(&ni->mrec_lock);
416*b041ca56SNamjae Jeon 		return -EIO;
417*b041ca56SNamjae Jeon 	}
418*b041ca56SNamjae Jeon 	lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
419*b041ca56SNamjae Jeon 
420*b041ca56SNamjae Jeon 	if (lcn <= LCN_RL_NOT_MAPPED && is_retry == false) {
421*b041ca56SNamjae Jeon 		is_retry = true;
422*b041ca56SNamjae Jeon 		if (!ntfs_map_runlist_nolock(ni, vcn, NULL)) {
423*b041ca56SNamjae Jeon 			rl = ni->runlist.rl;
424*b041ca56SNamjae Jeon 			goto remap_rl;
425*b041ca56SNamjae Jeon 		}
426*b041ca56SNamjae Jeon 	}
427*b041ca56SNamjae Jeon 
428*b041ca56SNamjae Jeon 	max_clu_count = min(max_clu_count, rl->length - (vcn - rl->vcn));
429*b041ca56SNamjae Jeon 	if (max_clu_count == 0) {
430*b041ca56SNamjae Jeon 		ntfs_error(inode->i_sb,
431*b041ca56SNamjae Jeon 				"runlist(vcn : %lld, length : %lld) is corrupted\n",
432*b041ca56SNamjae Jeon 				rl->vcn, rl->length);
433*b041ca56SNamjae Jeon 		up_write(&ni->runlist.lock);
434*b041ca56SNamjae Jeon 		mutex_unlock(&ni->mrec_lock);
435*b041ca56SNamjae Jeon 		return -EIO;
436*b041ca56SNamjae Jeon 	}
437*b041ca56SNamjae Jeon 
438*b041ca56SNamjae Jeon 	iomap->bdev = inode->i_sb->s_bdev;
439*b041ca56SNamjae Jeon 	iomap->offset = offset;
440*b041ca56SNamjae Jeon 
441*b041ca56SNamjae Jeon 	if (lcn <= LCN_DELALLOC) {
442*b041ca56SNamjae Jeon 		if (lcn < LCN_DELALLOC) {
443*b041ca56SNamjae Jeon 			max_clu_count =
444*b041ca56SNamjae Jeon 				ntfs_available_clusters_count(vol, max_clu_count);
445*b041ca56SNamjae Jeon 			if (max_clu_count < 0) {
446*b041ca56SNamjae Jeon 				err = max_clu_count;
447*b041ca56SNamjae Jeon 				up_write(&ni->runlist.lock);
448*b041ca56SNamjae Jeon 				mutex_unlock(&ni->mrec_lock);
449*b041ca56SNamjae Jeon 				return err;
450*b041ca56SNamjae Jeon 			}
451*b041ca56SNamjae Jeon 		}
452*b041ca56SNamjae Jeon 
453*b041ca56SNamjae Jeon 		iomap->type = IOMAP_DELALLOC;
454*b041ca56SNamjae Jeon 		iomap->addr = IOMAP_NULL_ADDR;
455*b041ca56SNamjae Jeon 
456*b041ca56SNamjae Jeon 		if (lcn <= LCN_HOLE) {
457*b041ca56SNamjae Jeon 			size_t new_rl_count;
458*b041ca56SNamjae Jeon 
459*b041ca56SNamjae Jeon 			rlc = kmalloc(sizeof(struct runlist_element) * 2,
460*b041ca56SNamjae Jeon 					GFP_NOFS);
461*b041ca56SNamjae Jeon 			if (!rlc) {
462*b041ca56SNamjae Jeon 				up_write(&ni->runlist.lock);
463*b041ca56SNamjae Jeon 				mutex_unlock(&ni->mrec_lock);
464*b041ca56SNamjae Jeon 				return -ENOMEM;
465*b041ca56SNamjae Jeon 			}
466*b041ca56SNamjae Jeon 
467*b041ca56SNamjae Jeon 			rlc->vcn = vcn;
468*b041ca56SNamjae Jeon 			rlc->lcn = LCN_DELALLOC;
469*b041ca56SNamjae Jeon 			rlc->length = max_clu_count;
470*b041ca56SNamjae Jeon 
471*b041ca56SNamjae Jeon 			rlc[1].vcn = vcn + max_clu_count;
472*b041ca56SNamjae Jeon 			rlc[1].lcn = LCN_RL_NOT_MAPPED;
473*b041ca56SNamjae Jeon 			rlc[1].length = 0;
474*b041ca56SNamjae Jeon 
475*b041ca56SNamjae Jeon 			rl = ntfs_runlists_merge(&ni->runlist, rlc, 0,
476*b041ca56SNamjae Jeon 					&new_rl_count);
477*b041ca56SNamjae Jeon 			if (IS_ERR(rl)) {
478*b041ca56SNamjae Jeon 				ntfs_error(vol->sb, "Failed to merge runlists");
479*b041ca56SNamjae Jeon 				up_write(&ni->runlist.lock);
480*b041ca56SNamjae Jeon 				mutex_unlock(&ni->mrec_lock);
481*b041ca56SNamjae Jeon 				kvfree(rlc);
482*b041ca56SNamjae Jeon 				return PTR_ERR(rl);
483*b041ca56SNamjae Jeon 			}
484*b041ca56SNamjae Jeon 
485*b041ca56SNamjae Jeon 			ni->runlist.rl = rl;
486*b041ca56SNamjae Jeon 			ni->runlist.count = new_rl_count;
487*b041ca56SNamjae Jeon 			ni->i_dealloc_clusters += max_clu_count;
488*b041ca56SNamjae Jeon 		}
489*b041ca56SNamjae Jeon 		up_write(&ni->runlist.lock);
490*b041ca56SNamjae Jeon 		mutex_unlock(&ni->mrec_lock);
491*b041ca56SNamjae Jeon 
492*b041ca56SNamjae Jeon 		if (lcn < LCN_DELALLOC)
493*b041ca56SNamjae Jeon 			ntfs_hold_dirty_clusters(vol, max_clu_count);
494*b041ca56SNamjae Jeon 
495*b041ca56SNamjae Jeon 		rl_length = ntfs_cluster_to_bytes(vol, max_clu_count);
496*b041ca56SNamjae Jeon 		if (length > rl_length - vcn_ofs)
497*b041ca56SNamjae Jeon 			iomap->length = rl_length - vcn_ofs;
498*b041ca56SNamjae Jeon 		else
499*b041ca56SNamjae Jeon 			iomap->length = length;
500*b041ca56SNamjae Jeon 
501*b041ca56SNamjae Jeon 		iomap->flags = IOMAP_F_NEW;
502*b041ca56SNamjae Jeon 		if (lcn <= LCN_HOLE) {
503*b041ca56SNamjae Jeon 			loff_t end = offset + length;
504*b041ca56SNamjae Jeon 
505*b041ca56SNamjae Jeon 			if (vcn_ofs || ((vol->cluster_size > iomap->length) &&
506*b041ca56SNamjae Jeon 					end < ni->initialized_size)) {
507*b041ca56SNamjae Jeon 				loff_t z_start, z_end;
508*b041ca56SNamjae Jeon 
509*b041ca56SNamjae Jeon 				z_start = vcn << vol->cluster_size_bits;
510*b041ca56SNamjae Jeon 				z_end = min_t(loff_t, z_start + vol->cluster_size,
511*b041ca56SNamjae Jeon 					      i_size_read(inode));
512*b041ca56SNamjae Jeon 				if (z_end > z_start)
513*b041ca56SNamjae Jeon 					err = ntfs_zero_range(inode,
514*b041ca56SNamjae Jeon 							      z_start,
515*b041ca56SNamjae Jeon 							      z_end - z_start);
516*b041ca56SNamjae Jeon 			}
517*b041ca56SNamjae Jeon 			if ((!err || err == -EPERM) &&
518*b041ca56SNamjae Jeon 			    max_clu_count > 1 &&
519*b041ca56SNamjae Jeon 			    (iomap->length & vol->cluster_size_mask &&
520*b041ca56SNamjae Jeon 			     end < ni->initialized_size)) {
521*b041ca56SNamjae Jeon 				loff_t z_start, z_end;
522*b041ca56SNamjae Jeon 
523*b041ca56SNamjae Jeon 				z_start = (vcn + max_clu_count - 1) <<
524*b041ca56SNamjae Jeon 					vol->cluster_size_bits;
525*b041ca56SNamjae Jeon 				z_end = min_t(loff_t, z_start + vol->cluster_size,
526*b041ca56SNamjae Jeon 					      i_size_read(inode));
527*b041ca56SNamjae Jeon 				if (z_end > z_start)
528*b041ca56SNamjae Jeon 					err = ntfs_zero_range(inode,
529*b041ca56SNamjae Jeon 							      z_start,
530*b041ca56SNamjae Jeon 							      z_end - z_start);
531*b041ca56SNamjae Jeon 			}
532*b041ca56SNamjae Jeon 
533*b041ca56SNamjae Jeon 			if (err == -EPERM)
534*b041ca56SNamjae Jeon 				err = 0;
535*b041ca56SNamjae Jeon 			if (err) {
536*b041ca56SNamjae Jeon 				ntfs_release_dirty_clusters(vol, max_clu_count);
537*b041ca56SNamjae Jeon 				return err;
538*b041ca56SNamjae Jeon 			}
539*b041ca56SNamjae Jeon 		}
540*b041ca56SNamjae Jeon 	} else {
541*b041ca56SNamjae Jeon 		up_write(&ni->runlist.lock);
542*b041ca56SNamjae Jeon 		mutex_unlock(&ni->mrec_lock);
543*b041ca56SNamjae Jeon 
544*b041ca56SNamjae Jeon 		iomap->type = IOMAP_MAPPED;
545*b041ca56SNamjae Jeon 		iomap->addr = ntfs_cluster_to_bytes(vol, lcn) + vcn_ofs;
546*b041ca56SNamjae Jeon 
547*b041ca56SNamjae Jeon 		rl_length = ntfs_cluster_to_bytes(vol, max_clu_count);
548*b041ca56SNamjae Jeon 		if (length > rl_length - vcn_ofs)
549*b041ca56SNamjae Jeon 			iomap->length = rl_length - vcn_ofs;
550*b041ca56SNamjae Jeon 		else
551*b041ca56SNamjae Jeon 			iomap->length = length;
552*b041ca56SNamjae Jeon 	}
553*b041ca56SNamjae Jeon 
554*b041ca56SNamjae Jeon 	return 0;
555*b041ca56SNamjae Jeon }
556*b041ca56SNamjae Jeon 
557*b041ca56SNamjae Jeon #define NTFS_IOMAP_FLAGS_BEGIN		BIT(1)
558*b041ca56SNamjae Jeon #define NTFS_IOMAP_FLAGS_DIO		BIT(2)
559*b041ca56SNamjae Jeon #define	NTFS_IOMAP_FLAGS_MKWRITE	BIT(3)
560*b041ca56SNamjae Jeon #define	NTFS_IOMAP_FLAGS_WRITEBACK	BIT(4)
561*b041ca56SNamjae Jeon 
562*b041ca56SNamjae Jeon static int ntfs_write_da_iomap_begin_non_resident(struct inode *inode,
563*b041ca56SNamjae Jeon 		loff_t offset, loff_t length, unsigned int flags,
564*b041ca56SNamjae Jeon 		struct iomap *iomap, int ntfs_iomap_flags)
565*b041ca56SNamjae Jeon {
566*b041ca56SNamjae Jeon 	struct ntfs_inode *ni = NTFS_I(inode);
567*b041ca56SNamjae Jeon 	struct ntfs_volume *vol = ni->vol;
568*b041ca56SNamjae Jeon 	loff_t vcn_ofs, rl_length;
569*b041ca56SNamjae Jeon 	s64 vcn, start_lcn, lcn_count;
570*b041ca56SNamjae Jeon 	bool balloc = false, update_mp;
571*b041ca56SNamjae Jeon 	int err;
572*b041ca56SNamjae Jeon 	s64 max_clu_count =
573*b041ca56SNamjae Jeon 		ntfs_bytes_to_cluster(vol, round_up(length, vol->cluster_size));
574*b041ca56SNamjae Jeon 
575*b041ca56SNamjae Jeon 	vcn = ntfs_bytes_to_cluster(vol, offset);
576*b041ca56SNamjae Jeon 	vcn_ofs = ntfs_bytes_to_cluster_off(vol, offset);
577*b041ca56SNamjae Jeon 
578*b041ca56SNamjae Jeon 	update_mp = ntfs_iomap_flags & (NTFS_IOMAP_FLAGS_DIO | NTFS_IOMAP_FLAGS_MKWRITE) ||
579*b041ca56SNamjae Jeon 			NInoAttr(ni) || ni->mft_no < FILE_first_user;
580*b041ca56SNamjae Jeon 	down_write(&ni->runlist.lock);
581*b041ca56SNamjae Jeon 	err = ntfs_attr_map_cluster(ni, vcn, &start_lcn, &lcn_count,
582*b041ca56SNamjae Jeon 			max_clu_count, &balloc, update_mp,
583*b041ca56SNamjae Jeon 			ntfs_iomap_flags & NTFS_IOMAP_FLAGS_WRITEBACK);
584*b041ca56SNamjae Jeon 	up_write(&ni->runlist.lock);
585*b041ca56SNamjae Jeon 	mutex_unlock(&ni->mrec_lock);
586*b041ca56SNamjae Jeon 	if (err) {
587*b041ca56SNamjae Jeon 		ni->i_dealloc_clusters = 0;
588*b041ca56SNamjae Jeon 		return err;
589*b041ca56SNamjae Jeon 	}
590*b041ca56SNamjae Jeon 
591*b041ca56SNamjae Jeon 	iomap->bdev = inode->i_sb->s_bdev;
592*b041ca56SNamjae Jeon 	iomap->offset = offset;
593*b041ca56SNamjae Jeon 
594*b041ca56SNamjae Jeon 	rl_length = ntfs_cluster_to_bytes(vol, lcn_count);
595*b041ca56SNamjae Jeon 	if (length > rl_length - vcn_ofs)
596*b041ca56SNamjae Jeon 		iomap->length = rl_length - vcn_ofs;
597*b041ca56SNamjae Jeon 	else
598*b041ca56SNamjae Jeon 		iomap->length = length;
599*b041ca56SNamjae Jeon 
600*b041ca56SNamjae Jeon 	if (start_lcn == LCN_HOLE)
601*b041ca56SNamjae Jeon 		iomap->type = IOMAP_HOLE;
602*b041ca56SNamjae Jeon 	else
603*b041ca56SNamjae Jeon 		iomap->type = IOMAP_MAPPED;
604*b041ca56SNamjae Jeon 	if (balloc == true)
605*b041ca56SNamjae Jeon 		iomap->flags = IOMAP_F_NEW;
606*b041ca56SNamjae Jeon 
607*b041ca56SNamjae Jeon 	iomap->addr = ntfs_cluster_to_bytes(vol, start_lcn) + vcn_ofs;
608*b041ca56SNamjae Jeon 
609*b041ca56SNamjae Jeon 	if (balloc == true) {
610*b041ca56SNamjae Jeon 		if (flags & IOMAP_DIRECT ||
611*b041ca56SNamjae Jeon 		    ntfs_iomap_flags & NTFS_IOMAP_FLAGS_MKWRITE) {
612*b041ca56SNamjae Jeon 			loff_t end = offset + length;
613*b041ca56SNamjae Jeon 
614*b041ca56SNamjae Jeon 			if (vcn_ofs || ((vol->cluster_size > iomap->length) &&
615*b041ca56SNamjae Jeon 					end < ni->initialized_size))
616*b041ca56SNamjae Jeon 				err = ntfs_dio_zero_range(inode,
617*b041ca56SNamjae Jeon 							  start_lcn <<
618*b041ca56SNamjae Jeon 							  vol->cluster_size_bits,
619*b041ca56SNamjae Jeon 							  vol->cluster_size);
620*b041ca56SNamjae Jeon 			if (!err && lcn_count > 1 &&
621*b041ca56SNamjae Jeon 			    (iomap->length & vol->cluster_size_mask &&
622*b041ca56SNamjae Jeon 			     end < ni->initialized_size))
623*b041ca56SNamjae Jeon 				err = ntfs_dio_zero_range(inode,
624*b041ca56SNamjae Jeon 							  (start_lcn + lcn_count - 1) <<
625*b041ca56SNamjae Jeon 							  vol->cluster_size_bits,
626*b041ca56SNamjae Jeon 							  vol->cluster_size);
627*b041ca56SNamjae Jeon 		} else {
628*b041ca56SNamjae Jeon 			if (lcn_count > ni->i_dealloc_clusters)
629*b041ca56SNamjae Jeon 				ni->i_dealloc_clusters = 0;
630*b041ca56SNamjae Jeon 			else
631*b041ca56SNamjae Jeon 				ni->i_dealloc_clusters -= lcn_count;
632*b041ca56SNamjae Jeon 		}
633*b041ca56SNamjae Jeon 		if (err < 0)
634*b041ca56SNamjae Jeon 			return err;
635*b041ca56SNamjae Jeon 	}
636*b041ca56SNamjae Jeon 
637*b041ca56SNamjae Jeon 	if (ntfs_iomap_flags & NTFS_IOMAP_FLAGS_MKWRITE &&
638*b041ca56SNamjae Jeon 	    iomap->offset + iomap->length > ni->initialized_size) {
639*b041ca56SNamjae Jeon 		err = ntfs_attr_set_initialized_size(ni, iomap->offset +
640*b041ca56SNamjae Jeon 				iomap->length);
641*b041ca56SNamjae Jeon 	}
642*b041ca56SNamjae Jeon 
643*b041ca56SNamjae Jeon 	return err;
644*b041ca56SNamjae Jeon }
645*b041ca56SNamjae Jeon 
646*b041ca56SNamjae Jeon static int ntfs_write_iomap_begin_resident(struct inode *inode, loff_t offset,
647*b041ca56SNamjae Jeon 		struct iomap *iomap)
648*b041ca56SNamjae Jeon {
649*b041ca56SNamjae Jeon 	struct ntfs_inode *ni = NTFS_I(inode);
650*b041ca56SNamjae Jeon 	struct attr_record *a;
651*b041ca56SNamjae Jeon 	struct ntfs_attr_search_ctx *ctx;
652*b041ca56SNamjae Jeon 	u32 attr_len;
653*b041ca56SNamjae Jeon 	int err = 0;
654*b041ca56SNamjae Jeon 	char *kattr;
655*b041ca56SNamjae Jeon 
656*b041ca56SNamjae Jeon 	ctx = ntfs_attr_get_search_ctx(ni, NULL);
657*b041ca56SNamjae Jeon 	if (!ctx) {
658*b041ca56SNamjae Jeon 		err = -ENOMEM;
659*b041ca56SNamjae Jeon 		goto out;
660*b041ca56SNamjae Jeon 	}
661*b041ca56SNamjae Jeon 
662*b041ca56SNamjae Jeon 	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
663*b041ca56SNamjae Jeon 			CASE_SENSITIVE, 0, NULL, 0, ctx);
664*b041ca56SNamjae Jeon 	if (err) {
665*b041ca56SNamjae Jeon 		if (err == -ENOENT)
666*b041ca56SNamjae Jeon 			err = -EIO;
667*b041ca56SNamjae Jeon 		goto out;
668*b041ca56SNamjae Jeon 	}
669*b041ca56SNamjae Jeon 
670*b041ca56SNamjae Jeon 	a = ctx->attr;
671*b041ca56SNamjae Jeon 	/* The total length of the attribute value. */
672*b041ca56SNamjae Jeon 	attr_len = le32_to_cpu(a->data.resident.value_length);
673*b041ca56SNamjae Jeon 	kattr = (u8 *)a + le16_to_cpu(a->data.resident.value_offset);
674*b041ca56SNamjae Jeon 
675*b041ca56SNamjae Jeon 	iomap->inline_data = kmemdup(kattr, attr_len, GFP_KERNEL);
676*b041ca56SNamjae Jeon 	if (!iomap->inline_data) {
677*b041ca56SNamjae Jeon 		err = -ENOMEM;
678*b041ca56SNamjae Jeon 		goto out;
679*b041ca56SNamjae Jeon 	}
680*b041ca56SNamjae Jeon 
681*b041ca56SNamjae Jeon 	iomap->type = IOMAP_INLINE;
682*b041ca56SNamjae Jeon 	iomap->offset = 0;
683*b041ca56SNamjae Jeon 	/* iomap requires there is only one INLINE_DATA extent */
684*b041ca56SNamjae Jeon 	iomap->length = attr_len;
685*b041ca56SNamjae Jeon 
686*b041ca56SNamjae Jeon out:
687*b041ca56SNamjae Jeon 	if (ctx)
688*b041ca56SNamjae Jeon 		ntfs_attr_put_search_ctx(ctx);
689*b041ca56SNamjae Jeon 	mutex_unlock(&ni->mrec_lock);
690*b041ca56SNamjae Jeon 	return err;
691*b041ca56SNamjae Jeon }
692*b041ca56SNamjae Jeon 
693*b041ca56SNamjae Jeon static int ntfs_write_iomap_begin_non_resident(struct inode *inode, loff_t offset,
694*b041ca56SNamjae Jeon 					       loff_t length, unsigned int flags,
695*b041ca56SNamjae Jeon 					       struct iomap *iomap, int ntfs_iomap_flags)
696*b041ca56SNamjae Jeon {
697*b041ca56SNamjae Jeon 	struct ntfs_inode *ni = NTFS_I(inode);
698*b041ca56SNamjae Jeon 
699*b041ca56SNamjae Jeon 	if (ntfs_iomap_flags & (NTFS_IOMAP_FLAGS_BEGIN | NTFS_IOMAP_FLAGS_DIO) &&
700*b041ca56SNamjae Jeon 	    offset + length > ni->initialized_size) {
701*b041ca56SNamjae Jeon 		int ret;
702*b041ca56SNamjae Jeon 
703*b041ca56SNamjae Jeon 		ret = ntfs_extend_initialized_size(inode, offset,
704*b041ca56SNamjae Jeon 						   offset + length,
705*b041ca56SNamjae Jeon 						   ntfs_iomap_flags &
706*b041ca56SNamjae Jeon 						   NTFS_IOMAP_FLAGS_DIO);
707*b041ca56SNamjae Jeon 		if (ret < 0)
708*b041ca56SNamjae Jeon 			return ret;
709*b041ca56SNamjae Jeon 	}
710*b041ca56SNamjae Jeon 
711*b041ca56SNamjae Jeon 	mutex_lock(&ni->mrec_lock);
712*b041ca56SNamjae Jeon 	if (ntfs_iomap_flags & NTFS_IOMAP_FLAGS_BEGIN)
713*b041ca56SNamjae Jeon 		return  ntfs_write_simple_iomap_begin_non_resident(inode, offset,
714*b041ca56SNamjae Jeon 								   length, iomap);
715*b041ca56SNamjae Jeon 	else
716*b041ca56SNamjae Jeon 		return ntfs_write_da_iomap_begin_non_resident(inode,
717*b041ca56SNamjae Jeon 							      offset, length,
718*b041ca56SNamjae Jeon 							      flags, iomap,
719*b041ca56SNamjae Jeon 							      ntfs_iomap_flags);
720*b041ca56SNamjae Jeon }
721*b041ca56SNamjae Jeon 
722*b041ca56SNamjae Jeon static int __ntfs_write_iomap_begin(struct inode *inode, loff_t offset,
723*b041ca56SNamjae Jeon 				    loff_t length, unsigned int flags,
724*b041ca56SNamjae Jeon 				    struct iomap *iomap, int ntfs_iomap_flags)
725*b041ca56SNamjae Jeon {
726*b041ca56SNamjae Jeon 	struct ntfs_inode *ni = NTFS_I(inode);
727*b041ca56SNamjae Jeon 	loff_t end = offset + length;
728*b041ca56SNamjae Jeon 
729*b041ca56SNamjae Jeon 	if (NVolShutdown(ni->vol))
730*b041ca56SNamjae Jeon 		return -EIO;
731*b041ca56SNamjae Jeon 
732*b041ca56SNamjae Jeon 	if (ntfs_iomap_flags & (NTFS_IOMAP_FLAGS_BEGIN | NTFS_IOMAP_FLAGS_DIO) &&
733*b041ca56SNamjae Jeon 	    end > ni->data_size) {
734*b041ca56SNamjae Jeon 		struct ntfs_volume *vol = ni->vol;
735*b041ca56SNamjae Jeon 		int ret;
736*b041ca56SNamjae Jeon 
737*b041ca56SNamjae Jeon 		mutex_lock(&ni->mrec_lock);
738*b041ca56SNamjae Jeon 		if (end > ni->allocated_size &&
739*b041ca56SNamjae Jeon 		    end < ni->allocated_size + vol->preallocated_size)
740*b041ca56SNamjae Jeon 			ret = ntfs_attr_expand(ni, end,
741*b041ca56SNamjae Jeon 					ni->allocated_size + vol->preallocated_size);
742*b041ca56SNamjae Jeon 		else
743*b041ca56SNamjae Jeon 			ret = ntfs_attr_expand(ni, end, 0);
744*b041ca56SNamjae Jeon 		mutex_unlock(&ni->mrec_lock);
745*b041ca56SNamjae Jeon 		if (ret)
746*b041ca56SNamjae Jeon 			return ret;
747*b041ca56SNamjae Jeon 	}
748*b041ca56SNamjae Jeon 
749*b041ca56SNamjae Jeon 	if (!NInoNonResident(ni)) {
750*b041ca56SNamjae Jeon 		mutex_lock(&ni->mrec_lock);
751*b041ca56SNamjae Jeon 		return ntfs_write_iomap_begin_resident(inode, offset, iomap);
752*b041ca56SNamjae Jeon 	}
753*b041ca56SNamjae Jeon 	return  ntfs_write_iomap_begin_non_resident(inode, offset, length, flags,
754*b041ca56SNamjae Jeon 						    iomap, ntfs_iomap_flags);
755*b041ca56SNamjae Jeon }
756*b041ca56SNamjae Jeon 
757*b041ca56SNamjae Jeon static int ntfs_write_iomap_begin(struct inode *inode, loff_t offset,
758*b041ca56SNamjae Jeon 				  loff_t length, unsigned int flags,
759*b041ca56SNamjae Jeon 				  struct iomap *iomap, struct iomap *srcmap)
760*b041ca56SNamjae Jeon {
761*b041ca56SNamjae Jeon 	return __ntfs_write_iomap_begin(inode, offset, length, flags, iomap,
762*b041ca56SNamjae Jeon 			NTFS_IOMAP_FLAGS_BEGIN);
763*b041ca56SNamjae Jeon }
764*b041ca56SNamjae Jeon 
765*b041ca56SNamjae Jeon static int ntfs_write_iomap_end_resident(struct inode *inode, loff_t pos,
766*b041ca56SNamjae Jeon 					 loff_t length, ssize_t written,
767*b041ca56SNamjae Jeon 					 unsigned int flags, struct iomap *iomap)
768*b041ca56SNamjae Jeon {
769*b041ca56SNamjae Jeon 	struct ntfs_inode *ni = NTFS_I(inode);
770*b041ca56SNamjae Jeon 	struct ntfs_attr_search_ctx *ctx;
771*b041ca56SNamjae Jeon 	u32 attr_len;
772*b041ca56SNamjae Jeon 	int err;
773*b041ca56SNamjae Jeon 	char *kattr;
774*b041ca56SNamjae Jeon 
775*b041ca56SNamjae Jeon 	mutex_lock(&ni->mrec_lock);
776*b041ca56SNamjae Jeon 	ctx = ntfs_attr_get_search_ctx(ni, NULL);
777*b041ca56SNamjae Jeon 	if (!ctx) {
778*b041ca56SNamjae Jeon 		written = -ENOMEM;
779*b041ca56SNamjae Jeon 		mutex_unlock(&ni->mrec_lock);
780*b041ca56SNamjae Jeon 		return written;
781*b041ca56SNamjae Jeon 	}
782*b041ca56SNamjae Jeon 
783*b041ca56SNamjae Jeon 	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
784*b041ca56SNamjae Jeon 			       CASE_SENSITIVE, 0, NULL, 0, ctx);
785*b041ca56SNamjae Jeon 	if (err) {
786*b041ca56SNamjae Jeon 		if (err == -ENOENT)
787*b041ca56SNamjae Jeon 			err = -EIO;
788*b041ca56SNamjae Jeon 		written = err;
789*b041ca56SNamjae Jeon 		goto err_out;
790*b041ca56SNamjae Jeon 	}
791*b041ca56SNamjae Jeon 
792*b041ca56SNamjae Jeon 	/* The total length of the attribute value. */
793*b041ca56SNamjae Jeon 	attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
794*b041ca56SNamjae Jeon 	if (pos >= attr_len || pos + written > attr_len)
795*b041ca56SNamjae Jeon 		goto err_out;
796*b041ca56SNamjae Jeon 
797*b041ca56SNamjae Jeon 	kattr = (u8 *)ctx->attr + le16_to_cpu(ctx->attr->data.resident.value_offset);
798*b041ca56SNamjae Jeon 	memcpy(kattr + pos, iomap_inline_data(iomap, pos), written);
799*b041ca56SNamjae Jeon 	mark_mft_record_dirty(ctx->ntfs_ino);
800*b041ca56SNamjae Jeon err_out:
801*b041ca56SNamjae Jeon 	ntfs_attr_put_search_ctx(ctx);
802*b041ca56SNamjae Jeon 	kfree(iomap->inline_data);
803*b041ca56SNamjae Jeon 	mutex_unlock(&ni->mrec_lock);
804*b041ca56SNamjae Jeon 	return written;
805*b041ca56SNamjae Jeon 
806*b041ca56SNamjae Jeon }
807*b041ca56SNamjae Jeon 
808*b041ca56SNamjae Jeon static int ntfs_write_iomap_end(struct inode *inode, loff_t pos, loff_t length,
809*b041ca56SNamjae Jeon 				ssize_t written, unsigned int flags,
810*b041ca56SNamjae Jeon 				struct iomap *iomap)
811*b041ca56SNamjae Jeon {
812*b041ca56SNamjae Jeon 	if (iomap->type == IOMAP_INLINE)
813*b041ca56SNamjae Jeon 		return ntfs_write_iomap_end_resident(inode, pos, length,
814*b041ca56SNamjae Jeon 						     written, flags, iomap);
815*b041ca56SNamjae Jeon 	return written;
816*b041ca56SNamjae Jeon }
817*b041ca56SNamjae Jeon 
818*b041ca56SNamjae Jeon const struct iomap_ops ntfs_write_iomap_ops = {
819*b041ca56SNamjae Jeon 	.iomap_begin		= ntfs_write_iomap_begin,
820*b041ca56SNamjae Jeon 	.iomap_end		= ntfs_write_iomap_end,
821*b041ca56SNamjae Jeon };
822*b041ca56SNamjae Jeon 
823*b041ca56SNamjae Jeon static int ntfs_page_mkwrite_iomap_begin(struct inode *inode, loff_t offset,
824*b041ca56SNamjae Jeon 				  loff_t length, unsigned int flags,
825*b041ca56SNamjae Jeon 				  struct iomap *iomap, struct iomap *srcmap)
826*b041ca56SNamjae Jeon {
827*b041ca56SNamjae Jeon 	return __ntfs_write_iomap_begin(inode, offset, length, flags, iomap,
828*b041ca56SNamjae Jeon 			NTFS_IOMAP_FLAGS_MKWRITE);
829*b041ca56SNamjae Jeon }
830*b041ca56SNamjae Jeon 
831*b041ca56SNamjae Jeon const struct iomap_ops ntfs_page_mkwrite_iomap_ops = {
832*b041ca56SNamjae Jeon 	.iomap_begin		= ntfs_page_mkwrite_iomap_begin,
833*b041ca56SNamjae Jeon 	.iomap_end		= ntfs_write_iomap_end,
834*b041ca56SNamjae Jeon };
835*b041ca56SNamjae Jeon 
836*b041ca56SNamjae Jeon static int ntfs_dio_iomap_begin(struct inode *inode, loff_t offset,
837*b041ca56SNamjae Jeon 				  loff_t length, unsigned int flags,
838*b041ca56SNamjae Jeon 				  struct iomap *iomap, struct iomap *srcmap)
839*b041ca56SNamjae Jeon {
840*b041ca56SNamjae Jeon 	return __ntfs_write_iomap_begin(inode, offset, length, flags, iomap,
841*b041ca56SNamjae Jeon 			NTFS_IOMAP_FLAGS_DIO);
842*b041ca56SNamjae Jeon }
843*b041ca56SNamjae Jeon 
844*b041ca56SNamjae Jeon const struct iomap_ops ntfs_dio_iomap_ops = {
845*b041ca56SNamjae Jeon 	.iomap_begin		= ntfs_dio_iomap_begin,
846*b041ca56SNamjae Jeon 	.iomap_end		= ntfs_write_iomap_end,
847*b041ca56SNamjae Jeon };
848*b041ca56SNamjae Jeon 
849*b041ca56SNamjae Jeon static ssize_t ntfs_writeback_range(struct iomap_writepage_ctx *wpc,
850*b041ca56SNamjae Jeon 		struct folio *folio, u64 offset, unsigned int len, u64 end_pos)
851*b041ca56SNamjae Jeon {
852*b041ca56SNamjae Jeon 	if (offset < wpc->iomap.offset ||
853*b041ca56SNamjae Jeon 	    offset >= wpc->iomap.offset + wpc->iomap.length) {
854*b041ca56SNamjae Jeon 		int error;
855*b041ca56SNamjae Jeon 
856*b041ca56SNamjae Jeon 		error = __ntfs_write_iomap_begin(wpc->inode, offset,
857*b041ca56SNamjae Jeon 				NTFS_I(wpc->inode)->allocated_size - offset,
858*b041ca56SNamjae Jeon 				IOMAP_WRITE, &wpc->iomap,
859*b041ca56SNamjae Jeon 				NTFS_IOMAP_FLAGS_WRITEBACK);
860*b041ca56SNamjae Jeon 		if (error)
861*b041ca56SNamjae Jeon 			return error;
862*b041ca56SNamjae Jeon 	}
863*b041ca56SNamjae Jeon 
864*b041ca56SNamjae Jeon 	return iomap_add_to_ioend(wpc, folio, offset, end_pos, len);
865*b041ca56SNamjae Jeon }
866*b041ca56SNamjae Jeon 
867*b041ca56SNamjae Jeon const struct iomap_writeback_ops ntfs_writeback_ops = {
868*b041ca56SNamjae Jeon 	.writeback_range	= ntfs_writeback_range,
869*b041ca56SNamjae Jeon 	.writeback_submit	= iomap_ioend_writeback_submit,
870*b041ca56SNamjae Jeon };
871