xref: /linux/fs/ntfs/iomap.c (revision ff9726d7a0068e6c2ae1969415285d12ef4d5c6f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * iomap callack functions
4  *
5  * Copyright (c) 2025 LG Electronics Co., Ltd.
6  */
7 
8 #include <linux/writeback.h>
9 
10 #include "attrib.h"
11 #include "mft.h"
12 #include "ntfs.h"
13 #include "iomap.h"
14 
ntfs_iomap_put_folio_non_resident(struct inode * inode,loff_t pos,unsigned int len,struct folio * folio)15 static void ntfs_iomap_put_folio_non_resident(struct inode *inode, loff_t pos,
16 					      unsigned int len, struct folio *folio)
17 {
18 	struct ntfs_inode *ni = NTFS_I(inode);
19 	unsigned long sector_size = 1UL << inode->i_blkbits;
20 	loff_t start_down, end_up, init;
21 
22 	start_down = round_down(pos, sector_size);
23 	end_up = (pos + len - 1) | (sector_size - 1);
24 	init = ni->initialized_size;
25 
26 	if (init >= start_down && init <= end_up) {
27 		if (init < pos) {
28 			loff_t offset = offset_in_folio(folio, pos + len);
29 
30 			if (offset == 0)
31 				offset = folio_size(folio);
32 			folio_zero_segments(folio,
33 					    offset_in_folio(folio, init),
34 					    offset_in_folio(folio, pos),
35 					    offset,
36 					    folio_size(folio));
37 
38 		} else  {
39 			loff_t offset = max_t(loff_t, pos + len, init);
40 
41 			offset = offset_in_folio(folio, offset);
42 			if (offset == 0)
43 				offset = folio_size(folio);
44 			folio_zero_segment(folio,
45 					   offset,
46 					   folio_size(folio));
47 		}
48 	} else if (init <= pos) {
49 		loff_t offset = 0, offset2 = offset_in_folio(folio, pos + len);
50 
51 		if ((init >> folio_shift(folio)) == (pos >> folio_shift(folio)))
52 			offset = offset_in_folio(folio, init);
53 		if (offset2 == 0)
54 			offset2 = folio_size(folio);
55 		folio_zero_segments(folio,
56 				    offset,
57 				    offset_in_folio(folio, pos),
58 				    offset2,
59 				    folio_size(folio));
60 	}
61 	folio_unlock(folio);
62 	folio_put(folio);
63 }
64 
65 /*
66  * iomap_zero_range is called for an area beyond the initialized size,
67  * garbage values can be read, so zeroing out is needed.
68  */
ntfs_iomap_put_folio(struct inode * inode,loff_t pos,unsigned int len,struct folio * folio)69 static void ntfs_iomap_put_folio(struct inode *inode, loff_t pos,
70 		unsigned int len, struct folio *folio)
71 {
72 	if (NInoNonResident(NTFS_I(inode)))
73 		return ntfs_iomap_put_folio_non_resident(inode, pos,
74 							 len, folio);
75 	folio_unlock(folio);
76 	folio_put(folio);
77 }
78 
79 const struct iomap_write_ops ntfs_iomap_folio_ops = {
80 	.put_folio = ntfs_iomap_put_folio,
81 };
82 
ntfs_read_iomap_begin_resident(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap)83 static int ntfs_read_iomap_begin_resident(struct inode *inode, loff_t offset, loff_t length,
84 		unsigned int flags, struct iomap *iomap)
85 {
86 	struct ntfs_inode *base_ni, *ni = NTFS_I(inode);
87 	struct ntfs_attr_search_ctx *ctx;
88 	loff_t i_size;
89 	u32 attr_len;
90 	int err = 0;
91 	char *kattr;
92 	struct page *ipage;
93 
94 	if (NInoAttr(ni))
95 		base_ni = ni->ext.base_ntfs_ino;
96 	else
97 		base_ni = ni;
98 
99 	ctx = ntfs_attr_get_search_ctx(base_ni, NULL);
100 	if (!ctx) {
101 		err = -ENOMEM;
102 		goto out;
103 	}
104 
105 	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
106 			CASE_SENSITIVE, 0, NULL, 0, ctx);
107 	if (unlikely(err))
108 		goto out;
109 
110 	attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
111 	if (unlikely(attr_len > ni->initialized_size))
112 		attr_len = ni->initialized_size;
113 	i_size = i_size_read(inode);
114 
115 	if (unlikely(attr_len > i_size)) {
116 		/* Race with shrinking truncate. */
117 		attr_len = i_size;
118 	}
119 
120 	if (offset >= attr_len) {
121 		if (flags & IOMAP_REPORT)
122 			err = -ENOENT;
123 		else {
124 			iomap->type = IOMAP_HOLE;
125 			iomap->offset = offset;
126 			iomap->length = length;
127 		}
128 		goto out;
129 	}
130 
131 	kattr = (u8 *)ctx->attr + le16_to_cpu(ctx->attr->data.resident.value_offset);
132 
133 	ipage = alloc_page(GFP_NOFS | __GFP_ZERO);
134 	if (!ipage) {
135 		err = -ENOMEM;
136 		goto out;
137 	}
138 
139 	memcpy(page_address(ipage), kattr, attr_len);
140 	iomap->type = IOMAP_INLINE;
141 	iomap->inline_data = page_address(ipage);
142 	iomap->offset = 0;
143 	iomap->length = attr_len;
144 	iomap->private = ipage;
145 
146 out:
147 	if (ctx)
148 		ntfs_attr_put_search_ctx(ctx);
149 
150 	return err;
151 }
152 
153 /*
154  * ntfs_read_iomap_begin_non_resident - map non-resident NTFS file data
155  * @inode:		inode to map
156  * @offset:		file offset to map
157  * @length:		length of mapping
158  * @flags:		IOMAP flags
159  * @iomap:		iomap structure to fill
160  * @need_unwritten:	true if UNWRITTEN extent type is needed
161  *
162  * Map a range of a non-resident NTFS file to an iomap extent.
163  *
164  * NTFS UNWRITTEN extent handling:
165  * ================================
166  * The concept of an unwritten extent in NTFS is slightly different from
167  * that of other filesystems. NTFS conceptually manages only a single
168  * continuous unwritten region, which is strictly defined based on
169  * initialized_size.
170  *
171  * File offset layout:
172  *   0                        initialized_size                   i_size(EOF)
173  *   |----------#0----------|----------#1----------|----------#2----------|
174  *   | Actual data          | Pre-allocated        | Pre-allocated        |
175  *   | (user written)       | (within initialized) | (initialized ~ EOF)  |
176  *   |----------------------|----------------------|----------------------|
177  *   MAPPED                 MAPPED                 UNWRITTEN (conditionally)
178  *
179  * Region #0: User-written data, initialized and valid.
180  * Region #1: Pre-allocated within initialized_size, must be zero-initialized
181  *            by the filesystem before exposure to userspace.
182  * Region #2: Pre-allocated beyond initialized_size, does not need initialization.
183  *
184  * The @need_unwritten parameter controls whether region #2 is mapped as
185  * IOMAP_UNWRITTEN or IOMAP_MAPPED:
186  * - For seek operations (SEEK_DATA/SEEK_HOLE): IOMAP_MAPPED is needed to
187  *   prevent iomap_seek_data from incorrectly interpreting pre-allocated
188  *   space as a hole. Since NTFS does not support multiple unwritten extents,
189  *   all pre-allocated regions should be treated as data, not holes.
190  * - For zero_range operations: IOMAP_MAPPED is needed to be zeroed out.
191  *
192  * Return: 0 on success, negative error code on failure.
193  */
ntfs_read_iomap_begin_non_resident(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,bool need_unwritten)194 static int ntfs_read_iomap_begin_non_resident(struct inode *inode, loff_t offset,
195 		loff_t length, unsigned int flags, struct iomap *iomap,
196 		bool need_unwritten)
197 {
198 	struct ntfs_inode *ni = NTFS_I(inode);
199 	s64 vcn;
200 	s64 lcn;
201 	struct runlist_element *rl;
202 	struct ntfs_volume *vol = ni->vol;
203 	loff_t vcn_ofs;
204 	loff_t rl_length;
205 
206 	vcn = ntfs_bytes_to_cluster(vol, offset);
207 	vcn_ofs = ntfs_bytes_to_cluster_off(vol, offset);
208 
209 	down_write(&ni->runlist.lock);
210 	rl = ntfs_attr_vcn_to_rl(ni, vcn, &lcn);
211 	if (IS_ERR(rl)) {
212 		up_write(&ni->runlist.lock);
213 		return PTR_ERR(rl);
214 	}
215 
216 	if (flags & IOMAP_REPORT) {
217 		if (lcn < LCN_HOLE) {
218 			up_write(&ni->runlist.lock);
219 			return -ENOENT;
220 		}
221 	} else if (lcn < LCN_ENOENT) {
222 		up_write(&ni->runlist.lock);
223 		return -EINVAL;
224 	}
225 
226 	iomap->bdev = inode->i_sb->s_bdev;
227 	iomap->offset = offset;
228 
229 	if (lcn <= LCN_DELALLOC) {
230 		if (lcn == LCN_DELALLOC)
231 			iomap->type = IOMAP_DELALLOC;
232 		else
233 			iomap->type = IOMAP_HOLE;
234 		iomap->addr = IOMAP_NULL_ADDR;
235 	} else {
236 		if (need_unwritten && offset >= ni->initialized_size)
237 			iomap->type = IOMAP_UNWRITTEN;
238 		else
239 			iomap->type = IOMAP_MAPPED;
240 		iomap->addr = ntfs_cluster_to_bytes(vol, lcn) + vcn_ofs;
241 	}
242 
243 	rl_length = ntfs_cluster_to_bytes(vol, rl->length - (vcn - rl->vcn));
244 
245 	if (rl_length == 0 && rl->lcn > LCN_DELALLOC) {
246 		ntfs_error(inode->i_sb,
247 				"runlist(vcn : %lld, length : %lld, lcn : %lld) is corrupted\n",
248 				rl->vcn, rl->length, rl->lcn);
249 		up_write(&ni->runlist.lock);
250 		return -EIO;
251 	}
252 
253 	if (rl_length && length > rl_length - vcn_ofs)
254 		iomap->length = rl_length - vcn_ofs;
255 	else
256 		iomap->length = length;
257 	up_write(&ni->runlist.lock);
258 
259 	if (!(flags & IOMAP_ZERO) &&
260 			iomap->type == IOMAP_MAPPED &&
261 			iomap->offset < ni->initialized_size &&
262 			iomap->offset + iomap->length > ni->initialized_size) {
263 		iomap->length = round_up(ni->initialized_size, 1 << inode->i_blkbits) -
264 			iomap->offset;
265 	}
266 	iomap->flags |= IOMAP_F_MERGED;
267 
268 	return 0;
269 }
270 
__ntfs_read_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap,bool need_unwritten)271 static int __ntfs_read_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
272 		unsigned int flags, struct iomap *iomap, struct iomap *srcmap,
273 		bool need_unwritten)
274 {
275 	if (NInoNonResident(NTFS_I(inode)))
276 		return ntfs_read_iomap_begin_non_resident(inode, offset, length,
277 				flags, iomap, need_unwritten);
278 	return ntfs_read_iomap_begin_resident(inode, offset, length,
279 					     flags, iomap);
280 }
281 
ntfs_read_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)282 static int ntfs_read_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
283 		unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
284 {
285 	return __ntfs_read_iomap_begin(inode, offset, length, flags, iomap,
286 			srcmap, true);
287 }
288 
ntfs_read_iomap_end(struct inode * inode,loff_t pos,loff_t length,ssize_t written,unsigned int flags,struct iomap * iomap)289 static int ntfs_read_iomap_end(struct inode *inode, loff_t pos, loff_t length,
290 		ssize_t written, unsigned int flags, struct iomap *iomap)
291 {
292 	if (iomap->type == IOMAP_INLINE) {
293 		struct page *ipage = iomap->private;
294 
295 		put_page(ipage);
296 	}
297 
298 	return written;
299 }
300 
301 const struct iomap_ops ntfs_read_iomap_ops = {
302 	.iomap_begin = ntfs_read_iomap_begin,
303 	.iomap_end = ntfs_read_iomap_end,
304 };
305 
306 /*
307  * Check that the cached iomap still matches the NTFS runlist before
308  * iomap_zero_range() is called. if the runlist changes while iomap is
309  * iterating a cached iomap, iomap_zero_range() may overwrite folios
310  * that have been already written with valid data.
311  */
ntfs_iomap_valid(struct inode * inode,const struct iomap * iomap)312 static bool ntfs_iomap_valid(struct inode *inode, const struct iomap *iomap)
313 {
314 	struct ntfs_inode *ni = NTFS_I(inode);
315 	struct runlist_element *rl;
316 	s64 vcn, lcn;
317 
318 	if (!NInoNonResident(ni))
319 		return false;
320 
321 	vcn = iomap->offset >> ni->vol->cluster_size_bits;
322 
323 	down_read(&ni->runlist.lock);
324 	rl = __ntfs_attr_find_vcn_nolock(&ni->runlist, vcn);
325 	if (IS_ERR(rl)) {
326 		up_read(&ni->runlist.lock);
327 		return false;
328 	}
329 	lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
330 	up_read(&ni->runlist.lock);
331 	return lcn == LCN_DELALLOC;
332 }
333 
334 static const struct iomap_write_ops ntfs_zero_iomap_folio_ops = {
335 	.put_folio = ntfs_iomap_put_folio,
336 	.iomap_valid = ntfs_iomap_valid,
337 };
338 
ntfs_seek_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)339 static int ntfs_seek_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
340 		unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
341 {
342 	return __ntfs_read_iomap_begin(inode, offset, length, flags, iomap,
343 			srcmap, false);
344 }
345 
ntfs_zero_read_iomap_end(struct inode * inode,loff_t pos,loff_t length,ssize_t written,unsigned int flags,struct iomap * iomap)346 static int ntfs_zero_read_iomap_end(struct inode *inode, loff_t pos, loff_t length,
347 		ssize_t written, unsigned int flags, struct iomap *iomap)
348 {
349 	if ((flags & IOMAP_ZERO) && (iomap->flags & IOMAP_F_STALE))
350 		return -EPERM;
351 	return written;
352 }
353 
354 static const struct iomap_ops ntfs_zero_read_iomap_ops = {
355 	.iomap_begin = ntfs_seek_iomap_begin,
356 	.iomap_end = ntfs_zero_read_iomap_end,
357 };
358 
359 const struct iomap_ops ntfs_seek_iomap_ops = {
360 	.iomap_begin = ntfs_seek_iomap_begin,
361 	.iomap_end = ntfs_read_iomap_end,
362 };
363 
ntfs_dio_zero_range(struct inode * inode,loff_t offset,loff_t length)364 int ntfs_dio_zero_range(struct inode *inode, loff_t offset, loff_t length)
365 {
366 	if ((offset | length) & (SECTOR_SIZE - 1))
367 		return -EINVAL;
368 
369 	return  blkdev_issue_zeroout(inode->i_sb->s_bdev,
370 				     offset >> SECTOR_SHIFT,
371 				     length >> SECTOR_SHIFT,
372 				     GFP_NOFS,
373 				     BLKDEV_ZERO_NOUNMAP);
374 }
375 
ntfs_zero_range(struct inode * inode,loff_t offset,loff_t length)376 static int ntfs_zero_range(struct inode *inode, loff_t offset, loff_t length)
377 {
378 	return iomap_zero_range(inode,
379 				offset, length,
380 				NULL,
381 				&ntfs_zero_read_iomap_ops,
382 				&ntfs_zero_iomap_folio_ops,
383 				NULL);
384 }
385 
ntfs_write_simple_iomap_begin_non_resident(struct inode * inode,loff_t offset,loff_t length,struct iomap * iomap)386 static int ntfs_write_simple_iomap_begin_non_resident(struct inode *inode, loff_t offset,
387 						      loff_t length, struct iomap *iomap)
388 {
389 	struct ntfs_inode *ni = NTFS_I(inode);
390 	struct ntfs_volume *vol = ni->vol;
391 	loff_t vcn_ofs, rl_length;
392 	struct runlist_element *rl, *rlc;
393 	bool is_retry = false;
394 	int err = 0;
395 	s64 vcn, lcn;
396 	s64 max_clu_count =
397 		ntfs_bytes_to_cluster(vol, round_up(length, vol->cluster_size));
398 
399 	vcn = ntfs_bytes_to_cluster(vol, offset);
400 	vcn_ofs = ntfs_bytes_to_cluster_off(vol, offset);
401 
402 	down_read(&ni->runlist.lock);
403 	rl = ni->runlist.rl;
404 	if (!rl) {
405 		up_read(&ni->runlist.lock);
406 		err = ntfs_map_runlist(ni, vcn);
407 		if (err) {
408 			mutex_unlock(&ni->mrec_lock);
409 			return -ENOENT;
410 		}
411 		down_read(&ni->runlist.lock);
412 		rl = ni->runlist.rl;
413 	}
414 	up_read(&ni->runlist.lock);
415 
416 	down_write(&ni->runlist.lock);
417 remap_rl:
418 	/* Seek to element containing target vcn. */
419 	rl = __ntfs_attr_find_vcn_nolock(&ni->runlist, vcn);
420 	if (IS_ERR(rl)) {
421 		up_write(&ni->runlist.lock);
422 		mutex_unlock(&ni->mrec_lock);
423 		return -EIO;
424 	}
425 	lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
426 
427 	if (lcn <= LCN_RL_NOT_MAPPED && is_retry == false) {
428 		is_retry = true;
429 		if (!ntfs_map_runlist_nolock(ni, vcn, NULL)) {
430 			rl = ni->runlist.rl;
431 			goto remap_rl;
432 		}
433 	}
434 
435 	max_clu_count = min(max_clu_count, rl->length - (vcn - rl->vcn));
436 	if (max_clu_count == 0) {
437 		ntfs_error(inode->i_sb,
438 				"runlist(vcn : %lld, length : %lld) is corrupted\n",
439 				rl->vcn, rl->length);
440 		up_write(&ni->runlist.lock);
441 		mutex_unlock(&ni->mrec_lock);
442 		return -EIO;
443 	}
444 
445 	iomap->bdev = inode->i_sb->s_bdev;
446 	iomap->offset = offset;
447 
448 	if (lcn <= LCN_DELALLOC) {
449 		if (lcn < LCN_DELALLOC) {
450 			max_clu_count =
451 				ntfs_available_clusters_count(vol, max_clu_count);
452 			if (max_clu_count < 0) {
453 				err = max_clu_count;
454 				up_write(&ni->runlist.lock);
455 				mutex_unlock(&ni->mrec_lock);
456 				return err;
457 			}
458 		}
459 
460 		iomap->type = IOMAP_DELALLOC;
461 		iomap->addr = IOMAP_NULL_ADDR;
462 
463 		if (lcn <= LCN_HOLE) {
464 			size_t new_rl_count;
465 
466 			rlc = kmalloc(sizeof(struct runlist_element) * 2,
467 					GFP_NOFS);
468 			if (!rlc) {
469 				up_write(&ni->runlist.lock);
470 				mutex_unlock(&ni->mrec_lock);
471 				return -ENOMEM;
472 			}
473 
474 			rlc->vcn = vcn;
475 			rlc->lcn = LCN_DELALLOC;
476 			rlc->length = max_clu_count;
477 
478 			rlc[1].vcn = vcn + max_clu_count;
479 			rlc[1].lcn = LCN_RL_NOT_MAPPED;
480 			rlc[1].length = 0;
481 
482 			rl = ntfs_runlists_merge(&ni->runlist, rlc, 0,
483 					&new_rl_count);
484 			if (IS_ERR(rl)) {
485 				ntfs_error(vol->sb, "Failed to merge runlists");
486 				up_write(&ni->runlist.lock);
487 				mutex_unlock(&ni->mrec_lock);
488 				kvfree(rlc);
489 				return PTR_ERR(rl);
490 			}
491 
492 			ni->runlist.rl = rl;
493 			ni->runlist.count = new_rl_count;
494 			ni->i_dealloc_clusters += max_clu_count;
495 		}
496 		up_write(&ni->runlist.lock);
497 		mutex_unlock(&ni->mrec_lock);
498 
499 		if (lcn < LCN_DELALLOC)
500 			ntfs_hold_dirty_clusters(vol, max_clu_count);
501 
502 		rl_length = ntfs_cluster_to_bytes(vol, max_clu_count);
503 		if (length > rl_length - vcn_ofs)
504 			iomap->length = rl_length - vcn_ofs;
505 		else
506 			iomap->length = length;
507 
508 		iomap->flags = IOMAP_F_NEW;
509 		if (lcn <= LCN_HOLE) {
510 			loff_t end = offset + length;
511 
512 			if (vcn_ofs || ((vol->cluster_size > iomap->length) &&
513 					end < ni->initialized_size)) {
514 				loff_t z_start, z_end;
515 
516 				z_start = vcn << vol->cluster_size_bits;
517 				z_end = min_t(loff_t, z_start + vol->cluster_size,
518 					      i_size_read(inode));
519 				if (z_end > z_start)
520 					err = ntfs_zero_range(inode,
521 							      z_start,
522 							      z_end - z_start);
523 			}
524 			if ((!err || err == -EPERM) &&
525 			    max_clu_count > 1 &&
526 			    (iomap->length & vol->cluster_size_mask &&
527 			     end < ni->initialized_size)) {
528 				loff_t z_start, z_end;
529 
530 				z_start = (vcn + max_clu_count - 1) <<
531 					vol->cluster_size_bits;
532 				z_end = min_t(loff_t, z_start + vol->cluster_size,
533 					      i_size_read(inode));
534 				if (z_end > z_start)
535 					err = ntfs_zero_range(inode,
536 							      z_start,
537 							      z_end - z_start);
538 			}
539 
540 			if (err == -EPERM)
541 				err = 0;
542 			if (err) {
543 				ntfs_release_dirty_clusters(vol, max_clu_count);
544 				return err;
545 			}
546 		}
547 	} else {
548 		up_write(&ni->runlist.lock);
549 		mutex_unlock(&ni->mrec_lock);
550 
551 		iomap->type = IOMAP_MAPPED;
552 		iomap->addr = ntfs_cluster_to_bytes(vol, lcn) + vcn_ofs;
553 
554 		rl_length = ntfs_cluster_to_bytes(vol, max_clu_count);
555 		if (length > rl_length - vcn_ofs)
556 			iomap->length = rl_length - vcn_ofs;
557 		else
558 			iomap->length = length;
559 	}
560 
561 	return 0;
562 }
563 
564 #define NTFS_IOMAP_FLAGS_BEGIN		BIT(1)
565 #define NTFS_IOMAP_FLAGS_DIO		BIT(2)
566 #define	NTFS_IOMAP_FLAGS_MKWRITE	BIT(3)
567 #define	NTFS_IOMAP_FLAGS_WRITEBACK	BIT(4)
568 
ntfs_write_da_iomap_begin_non_resident(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,int ntfs_iomap_flags)569 static int ntfs_write_da_iomap_begin_non_resident(struct inode *inode,
570 		loff_t offset, loff_t length, unsigned int flags,
571 		struct iomap *iomap, int ntfs_iomap_flags)
572 {
573 	struct ntfs_inode *ni = NTFS_I(inode);
574 	struct ntfs_volume *vol = ni->vol;
575 	loff_t vcn_ofs, rl_length;
576 	s64 vcn, start_lcn, lcn_count;
577 	bool balloc = false, update_mp;
578 	int err;
579 	s64 max_clu_count =
580 		ntfs_bytes_to_cluster(vol, round_up(length, vol->cluster_size));
581 
582 	vcn = ntfs_bytes_to_cluster(vol, offset);
583 	vcn_ofs = ntfs_bytes_to_cluster_off(vol, offset);
584 
585 	update_mp = ntfs_iomap_flags & (NTFS_IOMAP_FLAGS_DIO | NTFS_IOMAP_FLAGS_MKWRITE) ||
586 			NInoAttr(ni) || ni->mft_no < FILE_first_user;
587 	down_write(&ni->runlist.lock);
588 	err = ntfs_attr_map_cluster(ni, vcn, &start_lcn, &lcn_count,
589 			max_clu_count, &balloc, update_mp,
590 			ntfs_iomap_flags & NTFS_IOMAP_FLAGS_WRITEBACK);
591 	up_write(&ni->runlist.lock);
592 	mutex_unlock(&ni->mrec_lock);
593 	if (err) {
594 		ni->i_dealloc_clusters = 0;
595 		return err;
596 	}
597 
598 	iomap->bdev = inode->i_sb->s_bdev;
599 	iomap->offset = offset;
600 
601 	rl_length = ntfs_cluster_to_bytes(vol, lcn_count);
602 	if (length > rl_length - vcn_ofs)
603 		iomap->length = rl_length - vcn_ofs;
604 	else
605 		iomap->length = length;
606 
607 	if (start_lcn == LCN_HOLE)
608 		iomap->type = IOMAP_HOLE;
609 	else
610 		iomap->type = IOMAP_MAPPED;
611 	if (balloc == true)
612 		iomap->flags = IOMAP_F_NEW;
613 
614 	iomap->addr = ntfs_cluster_to_bytes(vol, start_lcn) + vcn_ofs;
615 
616 	if (balloc == true) {
617 		if (flags & IOMAP_DIRECT ||
618 		    ntfs_iomap_flags & NTFS_IOMAP_FLAGS_MKWRITE) {
619 			loff_t end = offset + length;
620 
621 			if (vcn_ofs || ((vol->cluster_size > iomap->length) &&
622 					end < ni->initialized_size))
623 				err = ntfs_dio_zero_range(inode,
624 							  start_lcn <<
625 							  vol->cluster_size_bits,
626 							  vol->cluster_size);
627 			if (!err && lcn_count > 1 &&
628 			    (iomap->length & vol->cluster_size_mask &&
629 			     end < ni->initialized_size))
630 				err = ntfs_dio_zero_range(inode,
631 							  (start_lcn + lcn_count - 1) <<
632 							  vol->cluster_size_bits,
633 							  vol->cluster_size);
634 		} else {
635 			if (lcn_count > ni->i_dealloc_clusters)
636 				ni->i_dealloc_clusters = 0;
637 			else
638 				ni->i_dealloc_clusters -= lcn_count;
639 		}
640 		if (err < 0)
641 			return err;
642 	}
643 
644 	if (ntfs_iomap_flags & NTFS_IOMAP_FLAGS_MKWRITE &&
645 	    iomap->offset + iomap->length > ni->initialized_size) {
646 		err = ntfs_attr_set_initialized_size(ni, iomap->offset +
647 				iomap->length);
648 	}
649 
650 	return err;
651 }
652 
ntfs_write_iomap_begin_resident(struct inode * inode,loff_t offset,struct iomap * iomap)653 static int ntfs_write_iomap_begin_resident(struct inode *inode, loff_t offset,
654 		struct iomap *iomap)
655 {
656 	struct ntfs_inode *ni = NTFS_I(inode);
657 	struct attr_record *a;
658 	struct ntfs_attr_search_ctx *ctx;
659 	u32 attr_len;
660 	int err = 0;
661 	char *kattr;
662 	struct page *ipage;
663 
664 	ctx = ntfs_attr_get_search_ctx(ni, NULL);
665 	if (!ctx) {
666 		err = -ENOMEM;
667 		goto out;
668 	}
669 
670 	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
671 			CASE_SENSITIVE, 0, NULL, 0, ctx);
672 	if (err) {
673 		if (err == -ENOENT)
674 			err = -EIO;
675 		goto out;
676 	}
677 
678 	a = ctx->attr;
679 	/* The total length of the attribute value. */
680 	attr_len = le32_to_cpu(a->data.resident.value_length);
681 	kattr = (u8 *)a + le16_to_cpu(a->data.resident.value_offset);
682 
683 	ipage = alloc_page(GFP_NOFS | __GFP_ZERO);
684 	if (!ipage) {
685 		err = -ENOMEM;
686 		goto out;
687 	}
688 
689 	memcpy(page_address(ipage), kattr, attr_len);
690 	iomap->type = IOMAP_INLINE;
691 	iomap->inline_data = page_address(ipage);
692 	iomap->offset = 0;
693 	/* iomap requires there is only one INLINE_DATA extent */
694 	iomap->length = attr_len;
695 	iomap->private = ipage;
696 
697 out:
698 	if (ctx)
699 		ntfs_attr_put_search_ctx(ctx);
700 	mutex_unlock(&ni->mrec_lock);
701 	return err;
702 }
703 
ntfs_write_iomap_begin_non_resident(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,int ntfs_iomap_flags)704 static int ntfs_write_iomap_begin_non_resident(struct inode *inode, loff_t offset,
705 					       loff_t length, unsigned int flags,
706 					       struct iomap *iomap, int ntfs_iomap_flags)
707 {
708 	struct ntfs_inode *ni = NTFS_I(inode);
709 
710 	if (ntfs_iomap_flags & (NTFS_IOMAP_FLAGS_BEGIN | NTFS_IOMAP_FLAGS_DIO) &&
711 	    offset + length > ni->initialized_size) {
712 		int ret;
713 
714 		ret = ntfs_extend_initialized_size(inode, offset,
715 						   offset + length,
716 						   ntfs_iomap_flags &
717 						   NTFS_IOMAP_FLAGS_DIO);
718 		if (ret < 0)
719 			return ret;
720 	}
721 
722 	mutex_lock(&ni->mrec_lock);
723 	if (ntfs_iomap_flags & NTFS_IOMAP_FLAGS_BEGIN)
724 		return  ntfs_write_simple_iomap_begin_non_resident(inode, offset,
725 								   length, iomap);
726 	else
727 		return ntfs_write_da_iomap_begin_non_resident(inode,
728 							      offset, length,
729 							      flags, iomap,
730 							      ntfs_iomap_flags);
731 }
732 
__ntfs_write_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,int ntfs_iomap_flags)733 static int __ntfs_write_iomap_begin(struct inode *inode, loff_t offset,
734 				    loff_t length, unsigned int flags,
735 				    struct iomap *iomap, int ntfs_iomap_flags)
736 {
737 	struct ntfs_inode *ni = NTFS_I(inode);
738 	loff_t end = offset + length;
739 
740 	if (NVolShutdown(ni->vol))
741 		return -EIO;
742 
743 	if (ntfs_iomap_flags & (NTFS_IOMAP_FLAGS_BEGIN | NTFS_IOMAP_FLAGS_DIO) &&
744 	    end > ni->data_size) {
745 		struct ntfs_volume *vol = ni->vol;
746 		int ret;
747 
748 		mutex_lock(&ni->mrec_lock);
749 		if (end > ni->allocated_size &&
750 		    end < ni->allocated_size + vol->preallocated_size)
751 			ret = ntfs_attr_expand(ni, end,
752 					ni->allocated_size + vol->preallocated_size);
753 		else
754 			ret = ntfs_attr_expand(ni, end, 0);
755 		mutex_unlock(&ni->mrec_lock);
756 		if (ret)
757 			return ret;
758 	}
759 
760 	if (!NInoNonResident(ni)) {
761 		mutex_lock(&ni->mrec_lock);
762 		return ntfs_write_iomap_begin_resident(inode, offset, iomap);
763 	}
764 	return  ntfs_write_iomap_begin_non_resident(inode, offset, length, flags,
765 						    iomap, ntfs_iomap_flags);
766 }
767 
ntfs_write_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)768 static int ntfs_write_iomap_begin(struct inode *inode, loff_t offset,
769 				  loff_t length, unsigned int flags,
770 				  struct iomap *iomap, struct iomap *srcmap)
771 {
772 	return __ntfs_write_iomap_begin(inode, offset, length, flags, iomap,
773 			NTFS_IOMAP_FLAGS_BEGIN);
774 }
775 
ntfs_write_iomap_end_resident(struct inode * inode,loff_t pos,loff_t length,ssize_t written,unsigned int flags,struct iomap * iomap)776 static int ntfs_write_iomap_end_resident(struct inode *inode, loff_t pos,
777 					 loff_t length, ssize_t written,
778 					 unsigned int flags, struct iomap *iomap)
779 {
780 	struct ntfs_inode *ni = NTFS_I(inode);
781 	struct ntfs_attr_search_ctx *ctx;
782 	u32 attr_len;
783 	int err;
784 	char *kattr;
785 	struct page *ipage = iomap->private;
786 
787 	mutex_lock(&ni->mrec_lock);
788 	ctx = ntfs_attr_get_search_ctx(ni, NULL);
789 	if (!ctx) {
790 		written = -ENOMEM;
791 		mutex_unlock(&ni->mrec_lock);
792 		return written;
793 	}
794 
795 	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
796 			       CASE_SENSITIVE, 0, NULL, 0, ctx);
797 	if (err) {
798 		if (err == -ENOENT)
799 			err = -EIO;
800 		written = err;
801 		goto err_out;
802 	}
803 
804 	/* The total length of the attribute value. */
805 	attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
806 	if (pos >= attr_len || pos + written > attr_len)
807 		goto err_out;
808 
809 	kattr = (u8 *)ctx->attr + le16_to_cpu(ctx->attr->data.resident.value_offset);
810 	memcpy(kattr + pos, iomap_inline_data(iomap, pos), written);
811 	mark_mft_record_dirty(ctx->ntfs_ino);
812 err_out:
813 	ntfs_attr_put_search_ctx(ctx);
814 	put_page(ipage);
815 	mutex_unlock(&ni->mrec_lock);
816 	return written;
817 
818 }
819 
ntfs_write_iomap_end(struct inode * inode,loff_t pos,loff_t length,ssize_t written,unsigned int flags,struct iomap * iomap)820 static int ntfs_write_iomap_end(struct inode *inode, loff_t pos, loff_t length,
821 				ssize_t written, unsigned int flags,
822 				struct iomap *iomap)
823 {
824 	if (iomap->type == IOMAP_INLINE)
825 		return ntfs_write_iomap_end_resident(inode, pos, length,
826 						     written, flags, iomap);
827 	return written;
828 }
829 
830 const struct iomap_ops ntfs_write_iomap_ops = {
831 	.iomap_begin		= ntfs_write_iomap_begin,
832 	.iomap_end		= ntfs_write_iomap_end,
833 };
834 
ntfs_page_mkwrite_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)835 static int ntfs_page_mkwrite_iomap_begin(struct inode *inode, loff_t offset,
836 				  loff_t length, unsigned int flags,
837 				  struct iomap *iomap, struct iomap *srcmap)
838 {
839 	return __ntfs_write_iomap_begin(inode, offset, length, flags, iomap,
840 			NTFS_IOMAP_FLAGS_MKWRITE);
841 }
842 
843 const struct iomap_ops ntfs_page_mkwrite_iomap_ops = {
844 	.iomap_begin		= ntfs_page_mkwrite_iomap_begin,
845 	.iomap_end		= ntfs_write_iomap_end,
846 };
847 
ntfs_dio_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)848 static int ntfs_dio_iomap_begin(struct inode *inode, loff_t offset,
849 				  loff_t length, unsigned int flags,
850 				  struct iomap *iomap, struct iomap *srcmap)
851 {
852 	return __ntfs_write_iomap_begin(inode, offset, length, flags, iomap,
853 			NTFS_IOMAP_FLAGS_DIO);
854 }
855 
856 const struct iomap_ops ntfs_dio_iomap_ops = {
857 	.iomap_begin		= ntfs_dio_iomap_begin,
858 	.iomap_end		= ntfs_write_iomap_end,
859 };
860 
ntfs_writeback_range(struct iomap_writepage_ctx * wpc,struct folio * folio,u64 offset,unsigned int len,u64 end_pos)861 static ssize_t ntfs_writeback_range(struct iomap_writepage_ctx *wpc,
862 		struct folio *folio, u64 offset, unsigned int len, u64 end_pos)
863 {
864 	if (offset < wpc->iomap.offset ||
865 	    offset >= wpc->iomap.offset + wpc->iomap.length) {
866 		int error;
867 
868 		error = __ntfs_write_iomap_begin(wpc->inode, offset,
869 				NTFS_I(wpc->inode)->allocated_size - offset,
870 				IOMAP_WRITE, &wpc->iomap,
871 				NTFS_IOMAP_FLAGS_WRITEBACK);
872 		if (error)
873 			return error;
874 	}
875 
876 	return iomap_add_to_ioend(wpc, folio, offset, end_pos, len);
877 }
878 
879 const struct iomap_writeback_ops ntfs_writeback_ops = {
880 	.writeback_range	= ntfs_writeback_range,
881 	.writeback_submit	= iomap_ioend_writeback_submit,
882 };
883