1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * NTFS kernel file operations.
4 *
5 * Copyright (c) 2001-2015 Anton Altaparmakov and Tuxera Inc.
6 * Copyright (c) 2025 LG Electronics Co., Ltd.
7 */
8
9 #include <linux/writeback.h>
10 #include <linux/blkdev.h>
11 #include <linux/fs.h>
12 #include <linux/iomap.h>
13 #include <linux/uio.h>
14 #include <linux/posix_acl.h>
15 #include <linux/posix_acl_xattr.h>
16 #include <linux/compat.h>
17 #include <linux/falloc.h>
18
19 #include "lcnalloc.h"
20 #include "ntfs.h"
21 #include "reparse.h"
22 #include "ea.h"
23 #include "iomap.h"
24 #include "bitmap.h"
25
26 #include <linux/filelock.h>
27
28 /*
29 * ntfs_file_open - called when an inode is about to be opened
30 * @vi: inode to be opened
31 * @filp: file structure describing the inode
32 *
33 * Limit file size to the page cache limit on architectures where unsigned long
34 * is 32-bits. This is the most we can do for now without overflowing the page
35 * cache page index. Doing it this way means we don't run into problems because
36 * of existing too large files. It would be better to allow the user to read
37 * the beginning of the file but I doubt very much anyone is going to hit this
38 * check on a 32-bit architecture, so there is no point in adding the extra
39 * complexity required to support this.
40 *
41 * On 64-bit architectures, the check is hopefully optimized away by the
42 * compiler.
43 *
44 * After the check passes, just call generic_file_open() to do its work.
45 */
ntfs_file_open(struct inode * vi,struct file * filp)46 static int ntfs_file_open(struct inode *vi, struct file *filp)
47 {
48 struct ntfs_inode *ni = NTFS_I(vi);
49
50 if (NVolShutdown(ni->vol))
51 return -EIO;
52
53 if (sizeof(unsigned long) < 8) {
54 if (i_size_read(vi) > MAX_LFS_FILESIZE)
55 return -EOVERFLOW;
56 }
57
58 filp->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT;
59
60 return generic_file_open(vi, filp);
61 }
62
63 /*
64 * Trim preallocated space on file release.
65 *
66 * When the preallo_size mount option is set (default 64KB), writes extend
67 * allocated_size and runlist in units of preallocated size to reduce
68 * runlist merge overhead for small writes. This can leave
69 * allocated_size > data_size if not all preallocated space is used.
70 *
71 * We perform the trim here because ->release() is called only when
72 * the file is no longer open. At this point, no further writes can occur,
73 * so it is safe to reclaim the unused preallocated space.
74 *
75 * Returns 0 on success, or negative error on failure.
76 */
ntfs_trim_prealloc(struct inode * vi)77 static int ntfs_trim_prealloc(struct inode *vi)
78 {
79 struct ntfs_inode *ni = NTFS_I(vi);
80 struct ntfs_volume *vol = ni->vol;
81 struct runlist_element *rl;
82 s64 aligned_data_size;
83 s64 vcn_ds, vcn_tr;
84 ssize_t rc;
85 int err = 0;
86
87 inode_lock(vi);
88 mutex_lock(&ni->mrec_lock);
89 down_write(&ni->runlist.lock);
90
91 aligned_data_size = round_up(ni->data_size, vol->cluster_size);
92 if (aligned_data_size >= ni->allocated_size)
93 goto out_unlock;
94
95 vcn_ds = ntfs_bytes_to_cluster(vol, aligned_data_size);
96 vcn_tr = -1;
97 rc = ni->runlist.count - 2;
98 rl = ni->runlist.rl;
99
100 while (rc >= 0 && rl[rc].lcn == LCN_HOLE && vcn_ds <= rl[rc].vcn) {
101 vcn_tr = rl[rc].vcn;
102 rc--;
103 }
104
105 if (vcn_tr >= 0) {
106 err = ntfs_rl_truncate_nolock(vol, &ni->runlist, vcn_tr);
107 if (err) {
108 kvfree(ni->runlist.rl);
109 ni->runlist.rl = NULL;
110 ntfs_error(vol->sb, "Preallocated block rollback failed");
111 } else {
112 ni->allocated_size = ntfs_cluster_to_bytes(vol, vcn_tr);
113 err = ntfs_attr_update_mapping_pairs(ni, 0);
114 if (err)
115 ntfs_error(vol->sb,
116 "Failed to rollback mapping pairs for prealloc");
117 }
118 }
119
120 out_unlock:
121 up_write(&ni->runlist.lock);
122 mutex_unlock(&ni->mrec_lock);
123 inode_unlock(vi);
124
125 return err;
126 }
127
ntfs_file_release(struct inode * vi,struct file * filp)128 static int ntfs_file_release(struct inode *vi, struct file *filp)
129 {
130 if (!NInoCompressed(NTFS_I(vi)))
131 return ntfs_trim_prealloc(vi);
132
133 return 0;
134 }
135
136 /*
137 * ntfs_file_fsync - sync a file to disk
138 * @filp: file to be synced
139 * @start: start offset to be synced
140 * @end: end offset to be synced
141 * @datasync: if non-zero only flush user data and not metadata
142 *
143 * Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync
144 * system calls. This function is inspired by fs/buffer.c::file_fsync().
145 *
146 * If @datasync is false, write the mft record and all associated extent mft
147 * records as well as the $DATA attribute and then sync the block device.
148 *
149 * If @datasync is true and the attribute is non-resident, we skip the writing
150 * of the mft record and all associated extent mft records (this might still
151 * happen due to the write_inode_now() call).
152 *
153 * Also, if @datasync is true, we do not wait on the inode to be written out
154 * but we always wait on the page cache pages to be written out.
155 */
ntfs_file_fsync(struct file * filp,loff_t start,loff_t end,int datasync)156 static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end,
157 int datasync)
158 {
159 struct inode *vi = filp->f_mapping->host;
160 struct ntfs_inode *ni = NTFS_I(vi);
161 struct ntfs_volume *vol = ni->vol;
162 int err, ret = 0;
163 struct inode *parent_vi, *ia_vi;
164 struct ntfs_attr_search_ctx *ctx;
165
166 ntfs_debug("Entering for inode 0x%llx.", ni->mft_no);
167
168 if (NVolShutdown(vol))
169 return -EIO;
170
171 err = file_write_and_wait_range(filp, start, end);
172 if (err)
173 return err;
174
175 if (!datasync || !NInoNonResident(NTFS_I(vi)))
176 ret = __ntfs_write_inode(vi, 1);
177 write_inode_now(vi, !datasync);
178
179 ctx = ntfs_attr_get_search_ctx(ni, NULL);
180 if (!ctx)
181 return -ENOMEM;
182
183 mutex_lock_nested(&ni->mrec_lock, NTFS_INODE_MUTEX_NORMAL_CHILD);
184 while (!(err = ntfs_attr_lookup(AT_UNUSED, NULL, 0, 0, 0, NULL, 0, ctx))) {
185 if (ctx->attr->type == AT_FILE_NAME) {
186 struct file_name_attr *fn = (struct file_name_attr *)((u8 *)ctx->attr +
187 le16_to_cpu(ctx->attr->data.resident.value_offset));
188
189 parent_vi = ntfs_iget(vi->i_sb, MREF_LE(fn->parent_directory));
190 if (IS_ERR(parent_vi))
191 continue;
192 mutex_lock_nested(&NTFS_I(parent_vi)->mrec_lock, NTFS_INODE_MUTEX_NORMAL);
193 ia_vi = ntfs_index_iget(parent_vi, I30, 4);
194 mutex_unlock(&NTFS_I(parent_vi)->mrec_lock);
195 if (IS_ERR(ia_vi)) {
196 iput(parent_vi);
197 continue;
198 }
199 write_inode_now(ia_vi, 1);
200 iput(ia_vi);
201 write_inode_now(parent_vi, 1);
202 iput(parent_vi);
203 } else if (ctx->attr->non_resident) {
204 struct inode *attr_vi;
205 __le16 *name;
206
207 name = (__le16 *)((u8 *)ctx->attr + le16_to_cpu(ctx->attr->name_offset));
208 if (ctx->attr->type == AT_DATA && ctx->attr->name_length == 0)
209 continue;
210
211 attr_vi = ntfs_attr_iget(vi, ctx->attr->type,
212 name, ctx->attr->name_length);
213 if (IS_ERR(attr_vi))
214 continue;
215 spin_lock(&attr_vi->i_lock);
216 if (inode_state_read_once(attr_vi) & I_DIRTY_PAGES) {
217 spin_unlock(&attr_vi->i_lock);
218 filemap_write_and_wait(attr_vi->i_mapping);
219 } else
220 spin_unlock(&attr_vi->i_lock);
221 iput(attr_vi);
222 }
223 }
224 mutex_unlock(&ni->mrec_lock);
225 ntfs_attr_put_search_ctx(ctx);
226
227 write_inode_now(vol->mftbmp_ino, 1);
228 down_write(&vol->lcnbmp_lock);
229 write_inode_now(vol->lcnbmp_ino, 1);
230 up_write(&vol->lcnbmp_lock);
231 write_inode_now(vol->mft_ino, 1);
232
233 /*
234 * NOTE: If we were to use mapping->private_list (see ext2 and
235 * fs/buffer.c) for dirty blocks then we could optimize the below to be
236 * sync_mapping_buffers(vi->i_mapping).
237 */
238 err = sync_blockdev(vi->i_sb->s_bdev);
239 if (unlikely(err && !ret))
240 ret = err;
241 if (likely(!ret))
242 ntfs_debug("Done.");
243 else
244 ntfs_warning(vi->i_sb,
245 "Failed to f%ssync inode 0x%llx. Error %u.",
246 datasync ? "data" : "", ni->mft_no, -ret);
247 if (!ret)
248 blkdev_issue_flush(vi->i_sb->s_bdev);
249 return ret;
250 }
251
ntfs_setattr_size(struct inode * vi,struct iattr * attr)252 static int ntfs_setattr_size(struct inode *vi, struct iattr *attr)
253 {
254 struct ntfs_inode *ni = NTFS_I(vi);
255 int err;
256 loff_t old_size = vi->i_size;
257
258 if (NInoCompressed(ni) || NInoEncrypted(ni)) {
259 ntfs_warning(vi->i_sb,
260 "Changes in inode size are not supported yet for %s files, ignoring.",
261 NInoCompressed(ni) ? "compressed" : "encrypted");
262 return -EOPNOTSUPP;
263 }
264
265 err = inode_newsize_ok(vi, attr->ia_size);
266 if (err)
267 return err;
268
269 inode_dio_wait(vi);
270 truncate_setsize(vi, attr->ia_size);
271 err = ntfs_truncate_vfs(vi, attr->ia_size, old_size);
272 if (err) {
273 i_size_write(vi, old_size);
274 return err;
275 }
276
277 if (NInoNonResident(ni) && attr->ia_size > old_size &&
278 old_size % PAGE_SIZE != 0) {
279 loff_t len = min_t(loff_t,
280 round_up(old_size, PAGE_SIZE) - old_size,
281 attr->ia_size - old_size);
282 err = iomap_zero_range(vi, old_size, len,
283 NULL, &ntfs_seek_iomap_ops,
284 &ntfs_iomap_folio_ops, NULL);
285 }
286
287 return err;
288 }
289
290 /*
291 * ntfs_setattr
292 *
293 * Called from notify_change() when an attribute is being changed.
294 *
295 * NOTE: Changes in inode size are not supported yet for compressed or
296 * encrypted files.
297 */
ntfs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)298 int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
299 struct iattr *attr)
300 {
301 struct inode *vi = d_inode(dentry);
302 int err;
303 unsigned int ia_valid = attr->ia_valid;
304 struct ntfs_inode *ni = NTFS_I(vi);
305 struct ntfs_volume *vol = ni->vol;
306
307 if (NVolShutdown(vol))
308 return -EIO;
309
310 err = setattr_prepare(idmap, dentry, attr);
311 if (err)
312 goto out;
313
314 if (!(vol->vol_flags & VOLUME_IS_DIRTY))
315 ntfs_set_volume_flags(vol, VOLUME_IS_DIRTY);
316
317 if (ia_valid & ATTR_SIZE) {
318 err = ntfs_setattr_size(vi, attr);
319 if (err)
320 goto out;
321
322 ia_valid |= ATTR_MTIME | ATTR_CTIME;
323 }
324
325 setattr_copy(idmap, vi, attr);
326
327 if (vol->sb->s_flags & SB_POSIXACL && !S_ISLNK(vi->i_mode)) {
328 err = posix_acl_chmod(idmap, dentry, vi->i_mode);
329 if (err)
330 goto out;
331 }
332
333 if (0222 & vi->i_mode)
334 ni->flags &= ~FILE_ATTR_READONLY;
335 else
336 ni->flags |= FILE_ATTR_READONLY;
337
338 if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE)) {
339 unsigned int flags = 0;
340
341 if (ia_valid & ATTR_UID)
342 flags |= NTFS_EA_UID;
343 if (ia_valid & ATTR_GID)
344 flags |= NTFS_EA_GID;
345 if (ia_valid & ATTR_MODE)
346 flags |= NTFS_EA_MODE;
347
348 if (S_ISDIR(vi->i_mode))
349 vi->i_mode &= ~vol->dmask;
350 else
351 vi->i_mode &= ~vol->fmask;
352
353 mutex_lock(&ni->mrec_lock);
354 ntfs_ea_set_wsl_inode(vi, 0, NULL, flags);
355 mutex_unlock(&ni->mrec_lock);
356 }
357
358 mark_inode_dirty(vi);
359 out:
360 return err;
361 }
362
ntfs_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,unsigned int request_mask,unsigned int query_flags)363 int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
364 struct kstat *stat, unsigned int request_mask,
365 unsigned int query_flags)
366 {
367 struct inode *inode = d_backing_inode(path->dentry);
368 struct ntfs_inode *ni = NTFS_I(inode);
369
370 generic_fillattr(idmap, request_mask, inode, stat);
371
372 stat->blksize = NTFS_SB(inode->i_sb)->cluster_size;
373 stat->blocks = (((u64)NTFS_I(inode)->i_dealloc_clusters <<
374 NTFS_SB(inode->i_sb)->cluster_size_bits) >> 9) + inode->i_blocks;
375 stat->result_mask |= STATX_BTIME;
376 stat->btime = NTFS_I(inode)->i_crtime;
377
378 if (NInoCompressed(ni))
379 stat->attributes |= STATX_ATTR_COMPRESSED;
380
381 if (NInoEncrypted(ni))
382 stat->attributes |= STATX_ATTR_ENCRYPTED;
383
384 if (inode->i_flags & S_IMMUTABLE)
385 stat->attributes |= STATX_ATTR_IMMUTABLE;
386
387 if (inode->i_flags & S_APPEND)
388 stat->attributes |= STATX_ATTR_APPEND;
389
390 stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED |
391 STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND;
392
393 /*
394 * If it's a compressed or encrypted file, NTFS currently
395 * does not support DIO. For normal files, we report the bdev
396 * logical block size.
397 */
398 if (request_mask & STATX_DIOALIGN && S_ISREG(inode->i_mode)) {
399 unsigned int align =
400 bdev_logical_block_size(inode->i_sb->s_bdev);
401
402 stat->result_mask |= STATX_DIOALIGN;
403 if (!NInoCompressed(ni) && !NInoEncrypted(ni)) {
404 stat->dio_mem_align = align;
405 stat->dio_offset_align = align;
406 }
407 }
408
409 return 0;
410 }
411
ntfs_file_llseek(struct file * file,loff_t offset,int whence)412 static loff_t ntfs_file_llseek(struct file *file, loff_t offset, int whence)
413 {
414 struct inode *inode = file->f_mapping->host;
415
416 switch (whence) {
417 case SEEK_HOLE:
418 inode_lock_shared(inode);
419 offset = iomap_seek_hole(inode, offset, &ntfs_seek_iomap_ops);
420 inode_unlock_shared(inode);
421 break;
422 case SEEK_DATA:
423 inode_lock_shared(inode);
424 offset = iomap_seek_data(inode, offset, &ntfs_seek_iomap_ops);
425 inode_unlock_shared(inode);
426 break;
427 default:
428 return generic_file_llseek_size(file, offset, whence,
429 inode->i_sb->s_maxbytes,
430 i_size_read(inode));
431 }
432 if (offset < 0)
433 return offset;
434 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
435 }
436
ntfs_file_read_iter(struct kiocb * iocb,struct iov_iter * to)437 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
438 {
439 struct inode *vi = file_inode(iocb->ki_filp);
440 struct super_block *sb = vi->i_sb;
441 ssize_t ret;
442
443 if (NVolShutdown(NTFS_SB(sb)))
444 return -EIO;
445
446 if (NInoCompressed(NTFS_I(vi)) && iocb->ki_flags & IOCB_DIRECT)
447 return -EOPNOTSUPP;
448
449 inode_lock_shared(vi);
450
451 if (iocb->ki_flags & IOCB_DIRECT) {
452 size_t count = iov_iter_count(to);
453
454 if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
455 ret = -EINVAL;
456 goto inode_unlock;
457 }
458
459 file_accessed(iocb->ki_filp);
460 ret = iomap_dio_rw(iocb, to, &ntfs_read_iomap_ops, NULL, 0,
461 NULL, 0);
462 } else {
463 ret = generic_file_read_iter(iocb, to);
464 }
465
466 inode_unlock:
467 inode_unlock_shared(vi);
468
469 return ret;
470 }
471
ntfs_file_write_dio_end_io(struct kiocb * iocb,ssize_t size,int error,unsigned int flags)472 static int ntfs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
473 int error, unsigned int flags)
474 {
475 struct inode *inode = file_inode(iocb->ki_filp);
476
477 if (error)
478 return error;
479
480 if (size) {
481 if (i_size_read(inode) < iocb->ki_pos + size) {
482 i_size_write(inode, iocb->ki_pos + size);
483 mark_inode_dirty(inode);
484 }
485 }
486
487 return 0;
488 }
489
490 static const struct iomap_dio_ops ntfs_write_dio_ops = {
491 .end_io = ntfs_file_write_dio_end_io,
492 };
493
ntfs_dio_write_iter(struct kiocb * iocb,struct iov_iter * from)494 static ssize_t ntfs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
495 {
496 ssize_t ret;
497
498 ret = iomap_dio_rw(iocb, from, &ntfs_dio_iomap_ops,
499 &ntfs_write_dio_ops, 0, NULL, 0);
500 if (ret == -ENOTBLK)
501 ret = 0;
502 else if (ret < 0)
503 goto out;
504
505 if (iov_iter_count(from)) {
506 loff_t offset, end;
507 ssize_t written;
508 int ret2;
509
510 offset = iocb->ki_pos;
511 iocb->ki_flags &= ~IOCB_DIRECT;
512 written = iomap_file_buffered_write(iocb, from,
513 &ntfs_write_iomap_ops, &ntfs_iomap_folio_ops,
514 NULL);
515 if (written < 0) {
516 ret = written;
517 goto out;
518 }
519
520 ret += written;
521 end = iocb->ki_pos + written - 1;
522 ret2 = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
523 offset, end);
524 if (ret2) {
525 ret = -EIO;
526 goto out;
527 }
528 invalidate_mapping_pages(iocb->ki_filp->f_mapping,
529 offset >> PAGE_SHIFT,
530 end >> PAGE_SHIFT);
531 }
532
533 out:
534 return ret;
535 }
536
ntfs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)537 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
538 {
539 struct file *file = iocb->ki_filp;
540 struct inode *vi = file->f_mapping->host;
541 struct ntfs_inode *ni = NTFS_I(vi);
542 struct ntfs_volume *vol = ni->vol;
543 ssize_t ret;
544 ssize_t count;
545 loff_t pos;
546 int err;
547 loff_t old_data_size, old_init_size;
548
549 if (NVolShutdown(vol))
550 return -EIO;
551
552 if (NInoEncrypted(ni)) {
553 ntfs_error(vi->i_sb, "Writing for %s files is not supported yet",
554 NInoCompressed(ni) ? "Compressed" : "Encrypted");
555 return -EOPNOTSUPP;
556 }
557
558 if (NInoCompressed(ni) && iocb->ki_flags & IOCB_DIRECT)
559 return -EOPNOTSUPP;
560
561 if (iocb->ki_flags & IOCB_NOWAIT) {
562 if (!inode_trylock(vi))
563 return -EAGAIN;
564 } else
565 inode_lock(vi);
566
567 ret = generic_write_checks(iocb, from);
568 if (ret <= 0)
569 goto out_lock;
570
571 err = file_modified(iocb->ki_filp);
572 if (err) {
573 ret = err;
574 goto out_lock;
575 }
576
577 if (!(vol->vol_flags & VOLUME_IS_DIRTY))
578 ntfs_set_volume_flags(vol, VOLUME_IS_DIRTY);
579
580 pos = iocb->ki_pos;
581 count = ret;
582
583 old_data_size = ni->data_size;
584 old_init_size = ni->initialized_size;
585
586 if (NInoNonResident(ni) && NInoCompressed(ni)) {
587 ret = ntfs_compress_write(ni, pos, count, from);
588 if (ret > 0)
589 iocb->ki_pos += ret;
590 goto out;
591 }
592
593 if (NInoNonResident(ni) && iocb->ki_flags & IOCB_DIRECT)
594 ret = ntfs_dio_write_iter(iocb, from);
595 else
596 ret = iomap_file_buffered_write(iocb, from, &ntfs_write_iomap_ops,
597 &ntfs_iomap_folio_ops, NULL);
598 out:
599 if (ret < 0 && ret != -EIOCBQUEUED) {
600 if (ni->initialized_size != old_init_size) {
601 mutex_lock(&ni->mrec_lock);
602 ntfs_attr_set_initialized_size(ni, old_init_size);
603 mutex_unlock(&ni->mrec_lock);
604 }
605 if (ni->data_size != old_data_size) {
606 truncate_setsize(vi, old_data_size);
607 ntfs_attr_truncate(ni, old_data_size);
608 }
609 }
610 out_lock:
611 inode_unlock(vi);
612 if (ret > 0)
613 ret = generic_write_sync(iocb, ret);
614 return ret;
615 }
616
ntfs_filemap_page_mkwrite(struct vm_fault * vmf)617 static vm_fault_t ntfs_filemap_page_mkwrite(struct vm_fault *vmf)
618 {
619 struct inode *inode = file_inode(vmf->vma->vm_file);
620 vm_fault_t ret;
621
622 sb_start_pagefault(inode->i_sb);
623 file_update_time(vmf->vma->vm_file);
624
625 ret = iomap_page_mkwrite(vmf, &ntfs_page_mkwrite_iomap_ops, NULL);
626 sb_end_pagefault(inode->i_sb);
627 return ret;
628 }
629
630 static const struct vm_operations_struct ntfs_file_vm_ops = {
631 .fault = filemap_fault,
632 .map_pages = filemap_map_pages,
633 .page_mkwrite = ntfs_filemap_page_mkwrite,
634 };
635
ntfs_file_mmap_prepare(struct vm_area_desc * desc)636 static int ntfs_file_mmap_prepare(struct vm_area_desc *desc)
637 {
638 struct file *file = desc->file;
639 struct inode *inode = file_inode(file);
640
641 if (NVolShutdown(NTFS_SB(file->f_mapping->host->i_sb)))
642 return -EIO;
643
644 if (NInoCompressed(NTFS_I(inode)))
645 return -EOPNOTSUPP;
646
647 if (vma_desc_test_all(desc, VMA_SHARED_BIT, VMA_MAYWRITE_BIT)) {
648 struct inode *inode = file_inode(file);
649 loff_t from, to;
650 int err;
651
652 from = ((loff_t)desc->pgoff << PAGE_SHIFT);
653 to = min_t(loff_t, i_size_read(inode),
654 from + desc->end - desc->start);
655
656 if (NTFS_I(inode)->initialized_size < to) {
657 err = ntfs_extend_initialized_size(inode, to, to, false);
658 if (err)
659 return err;
660 }
661 }
662
663
664 file_accessed(file);
665 desc->vm_ops = &ntfs_file_vm_ops;
666 return 0;
667 }
668
ntfs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)669 static int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
670 u64 start, u64 len)
671 {
672 return iomap_fiemap(inode, fieinfo, start, len, &ntfs_read_iomap_ops);
673 }
674
ntfs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)675 static const char *ntfs_get_link(struct dentry *dentry, struct inode *inode,
676 struct delayed_call *done)
677 {
678 if (!NTFS_I(inode)->target)
679 return ERR_PTR(-EINVAL);
680
681 return NTFS_I(inode)->target;
682 }
683
ntfs_file_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)684 static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
685 struct pipe_inode_info *pipe, size_t len, unsigned int flags)
686 {
687 if (NVolShutdown(NTFS_SB(in->f_mapping->host->i_sb)))
688 return -EIO;
689
690 return filemap_splice_read(in, ppos, pipe, len, flags);
691 }
692
ntfs_ioctl_shutdown(struct super_block * sb,unsigned long arg)693 static int ntfs_ioctl_shutdown(struct super_block *sb, unsigned long arg)
694 {
695 u32 flags;
696
697 if (!capable(CAP_SYS_ADMIN))
698 return -EPERM;
699
700 if (get_user(flags, (__u32 __user *)arg))
701 return -EFAULT;
702
703 return ntfs_force_shutdown(sb, flags);
704 }
705
ntfs_ioctl_get_volume_label(struct file * filp,unsigned long arg)706 static int ntfs_ioctl_get_volume_label(struct file *filp, unsigned long arg)
707 {
708 struct ntfs_volume *vol = NTFS_SB(file_inode(filp)->i_sb);
709 char __user *buf = (char __user *)arg;
710
711 if (!vol->volume_label) {
712 if (copy_to_user(buf, "", 1))
713 return -EFAULT;
714 } else if (copy_to_user(buf, vol->volume_label,
715 MIN(FSLABEL_MAX, strlen(vol->volume_label) + 1)))
716 return -EFAULT;
717 return 0;
718 }
719
ntfs_ioctl_set_volume_label(struct file * filp,unsigned long arg)720 static int ntfs_ioctl_set_volume_label(struct file *filp, unsigned long arg)
721 {
722 struct ntfs_volume *vol = NTFS_SB(file_inode(filp)->i_sb);
723 char *label;
724 int ret;
725
726 if (!capable(CAP_SYS_ADMIN))
727 return -EPERM;
728
729 label = strndup_user((const char __user *)arg, FSLABEL_MAX);
730 if (IS_ERR(label))
731 return PTR_ERR(label);
732
733 ret = mnt_want_write_file(filp);
734 if (ret)
735 goto out;
736
737 ret = ntfs_write_volume_label(vol, label);
738 mnt_drop_write_file(filp);
739 out:
740 kfree(label);
741 return ret;
742 }
743
ntfs_ioctl_fitrim(struct ntfs_volume * vol,unsigned long arg)744 static int ntfs_ioctl_fitrim(struct ntfs_volume *vol, unsigned long arg)
745 {
746 struct fstrim_range __user *user_range;
747 struct fstrim_range range;
748 struct block_device *dev;
749 int err;
750
751 if (!capable(CAP_SYS_ADMIN))
752 return -EPERM;
753
754 dev = vol->sb->s_bdev;
755 if (!bdev_max_discard_sectors(dev))
756 return -EOPNOTSUPP;
757
758 user_range = (struct fstrim_range __user *)arg;
759 if (copy_from_user(&range, user_range, sizeof(range)))
760 return -EFAULT;
761
762 if (range.len == 0)
763 return -EINVAL;
764
765 if (range.len < vol->cluster_size)
766 return -EINVAL;
767
768 range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev));
769
770 err = ntfs_trim_fs(vol, &range);
771 if (err < 0)
772 return err;
773
774 if (copy_to_user(user_range, &range, sizeof(range)))
775 return -EFAULT;
776
777 return 0;
778 }
779
ntfs_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)780 long ntfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
781 {
782 switch (cmd) {
783 case FS_IOC_SHUTDOWN:
784 return ntfs_ioctl_shutdown(file_inode(filp)->i_sb, arg);
785 case FS_IOC_GETFSLABEL:
786 return ntfs_ioctl_get_volume_label(filp, arg);
787 case FS_IOC_SETFSLABEL:
788 return ntfs_ioctl_set_volume_label(filp, arg);
789 case FITRIM:
790 return ntfs_ioctl_fitrim(NTFS_SB(file_inode(filp)->i_sb), arg);
791 default:
792 return -ENOTTY;
793 }
794 }
795
796 #ifdef CONFIG_COMPAT
ntfs_compat_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)797 long ntfs_compat_ioctl(struct file *filp, unsigned int cmd,
798 unsigned long arg)
799 {
800 return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
801 }
802 #endif
803
ntfs_allocate_range(struct ntfs_inode * ni,int mode,loff_t offset,loff_t len)804 static int ntfs_allocate_range(struct ntfs_inode *ni, int mode, loff_t offset,
805 loff_t len)
806 {
807 struct inode *vi = VFS_I(ni);
808 struct ntfs_volume *vol = ni->vol;
809 s64 need_space;
810 loff_t old_size, new_size;
811 s64 start_vcn, end_vcn;
812 int err;
813
814 old_size = i_size_read(vi);
815 new_size = max_t(loff_t, old_size, offset + len);
816 start_vcn = ntfs_bytes_to_cluster(vol, offset);
817 end_vcn = ntfs_bytes_to_cluster(vol, offset + len - 1) + 1;
818
819 err = inode_newsize_ok(vi, new_size);
820 if (err)
821 goto out;
822
823 need_space = ntfs_bytes_to_cluster(vol, ni->allocated_size);
824 if (need_space > start_vcn)
825 need_space = end_vcn - need_space;
826 else
827 need_space = end_vcn - start_vcn;
828 if (need_space > 0 &&
829 need_space > (atomic64_read(&vol->free_clusters) -
830 atomic64_read(&vol->dirty_clusters))) {
831 err = -ENOSPC;
832 goto out;
833 }
834
835 err = ntfs_attr_fallocate(ni, offset, len,
836 mode & FALLOC_FL_KEEP_SIZE ? true : false);
837
838 if (!(mode & FALLOC_FL_KEEP_SIZE) && new_size != old_size)
839 i_size_write(vi, ni->data_size);
840 out:
841 return err;
842 }
843
ntfs_punch_hole(struct ntfs_inode * ni,int mode,loff_t offset,loff_t len)844 static int ntfs_punch_hole(struct ntfs_inode *ni, int mode, loff_t offset,
845 loff_t len)
846 {
847 struct ntfs_volume *vol = ni->vol;
848 struct inode *vi = VFS_I(ni);
849 loff_t end_offset;
850 s64 start_vcn, end_vcn;
851 int err = 0;
852
853 loff_t offset_down = round_down(offset, max_t(unsigned int,
854 vol->cluster_size, PAGE_SIZE));
855
856 if (NVolDisableSparse(vol)) {
857 err = -EOPNOTSUPP;
858 goto out;
859 }
860
861 if (offset >= ni->data_size)
862 goto out;
863
864 if (offset + len > ni->data_size)
865 end_offset = ni->data_size;
866 else
867 end_offset = offset + len;
868
869 err = filemap_write_and_wait_range(vi->i_mapping, offset_down, LLONG_MAX);
870 if (err)
871 goto out;
872 truncate_pagecache(vi, offset_down);
873
874 start_vcn = ntfs_bytes_to_cluster(vol, offset);
875 end_vcn = ntfs_bytes_to_cluster(vol, end_offset - 1) + 1;
876
877 if (offset & vol->cluster_size_mask) {
878 if (offset < ni->initialized_size) {
879 loff_t to;
880
881 to = min_t(loff_t,
882 ntfs_cluster_to_bytes(vol, start_vcn + 1),
883 end_offset);
884 err = iomap_zero_range(vi, offset, to - offset,
885 NULL, &ntfs_seek_iomap_ops,
886 &ntfs_iomap_folio_ops, NULL);
887 if (err < 0)
888 goto out;
889 }
890 if (end_vcn - start_vcn == 1)
891 goto out;
892 start_vcn++;
893 }
894
895 if (end_offset & vol->cluster_size_mask) {
896 loff_t from;
897
898 from = ntfs_cluster_to_bytes(vol, end_vcn - 1);
899 if (from < ni->initialized_size) {
900 err = iomap_zero_range(vi, from, end_offset - from,
901 NULL, &ntfs_seek_iomap_ops,
902 &ntfs_iomap_folio_ops, NULL);
903 if (err < 0)
904 goto out;
905 }
906 if (end_vcn - start_vcn == 1)
907 goto out;
908 end_vcn--;
909 }
910
911 mutex_lock_nested(&ni->mrec_lock, NTFS_INODE_MUTEX_NORMAL);
912 err = ntfs_non_resident_attr_punch_hole(ni, start_vcn,
913 end_vcn - start_vcn);
914 mutex_unlock(&ni->mrec_lock);
915 out:
916 return err;
917 }
918
ntfs_collapse_range(struct ntfs_inode * ni,loff_t offset,loff_t len)919 static int ntfs_collapse_range(struct ntfs_inode *ni, loff_t offset, loff_t len)
920 {
921 struct ntfs_volume *vol = ni->vol;
922 struct inode *vi = VFS_I(ni);
923 loff_t old_size, new_size;
924 s64 start_vcn, end_vcn;
925 int err;
926
927 loff_t offset_down = round_down(offset,
928 max_t(unsigned long, vol->cluster_size, PAGE_SIZE));
929
930 if ((offset & vol->cluster_size_mask) ||
931 (len & vol->cluster_size_mask) ||
932 offset >= ni->allocated_size) {
933 err = -EINVAL;
934 goto out;
935 }
936
937 old_size = i_size_read(vi);
938 start_vcn = ntfs_bytes_to_cluster(vol, offset);
939 end_vcn = ntfs_bytes_to_cluster(vol, offset + len - 1) + 1;
940
941 if (ntfs_cluster_to_bytes(vol, end_vcn) > ni->allocated_size)
942 end_vcn = (round_up(ni->allocated_size - 1,
943 vol->cluster_size) >> vol->cluster_size_bits) + 1;
944 new_size = old_size - ntfs_cluster_to_bytes(vol, end_vcn - start_vcn);
945 if (new_size < 0)
946 new_size = 0;
947 err = filemap_write_and_wait_range(vi->i_mapping,
948 offset_down, LLONG_MAX);
949 if (err)
950 goto out;
951
952 truncate_pagecache(vi, offset_down);
953
954 mutex_lock_nested(&ni->mrec_lock, NTFS_INODE_MUTEX_NORMAL);
955 err = ntfs_non_resident_attr_collapse_range(ni, start_vcn,
956 end_vcn - start_vcn);
957 mutex_unlock(&ni->mrec_lock);
958
959 if (new_size != old_size)
960 i_size_write(vi, ni->data_size);
961 out:
962 return err;
963 }
964
ntfs_insert_range(struct ntfs_inode * ni,loff_t offset,loff_t len)965 static int ntfs_insert_range(struct ntfs_inode *ni, loff_t offset, loff_t len)
966 {
967 struct ntfs_volume *vol = ni->vol;
968 struct inode *vi = VFS_I(ni);
969 loff_t offset_down = round_down(offset,
970 max_t(unsigned long, vol->cluster_size, PAGE_SIZE));
971 loff_t alloc_size, end_offset = offset + len;
972 loff_t old_size, new_size;
973 s64 start_vcn, end_vcn;
974 int err;
975
976 if (NVolDisableSparse(vol)) {
977 err = -EOPNOTSUPP;
978 goto out;
979 }
980
981 if ((offset & vol->cluster_size_mask) ||
982 (len & vol->cluster_size_mask) ||
983 offset >= ni->allocated_size) {
984 err = -EINVAL;
985 goto out;
986 }
987
988 old_size = i_size_read(vi);
989 start_vcn = ntfs_bytes_to_cluster(vol, offset);
990 end_vcn = ntfs_bytes_to_cluster(vol, end_offset - 1) + 1;
991
992 new_size = old_size + ntfs_cluster_to_bytes(vol, end_vcn - start_vcn);
993 alloc_size = ni->allocated_size +
994 ntfs_cluster_to_bytes(vol, end_vcn - start_vcn);
995 if (alloc_size < 0) {
996 err = -EFBIG;
997 goto out;
998 }
999 err = inode_newsize_ok(vi, alloc_size);
1000 if (err)
1001 goto out;
1002
1003 err = filemap_write_and_wait_range(vi->i_mapping,
1004 offset_down, LLONG_MAX);
1005 if (err)
1006 goto out;
1007
1008 truncate_pagecache(vi, offset_down);
1009
1010 mutex_lock_nested(&ni->mrec_lock, NTFS_INODE_MUTEX_NORMAL);
1011 err = ntfs_non_resident_attr_insert_range(ni, start_vcn,
1012 end_vcn - start_vcn);
1013 mutex_unlock(&ni->mrec_lock);
1014
1015 if (new_size != old_size)
1016 i_size_write(vi, ni->data_size);
1017 out:
1018 return err;
1019 }
1020
1021 #define NTFS_FALLOC_FL_SUPPORTED \
1022 (FALLOC_FL_ALLOCATE_RANGE | FALLOC_FL_KEEP_SIZE | \
1023 FALLOC_FL_INSERT_RANGE | FALLOC_FL_PUNCH_HOLE | \
1024 FALLOC_FL_COLLAPSE_RANGE)
1025
ntfs_fallocate(struct file * file,int mode,loff_t offset,loff_t len)1026 static long ntfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1027 {
1028 struct inode *vi = file_inode(file);
1029 struct ntfs_inode *ni = NTFS_I(vi);
1030 struct ntfs_volume *vol = ni->vol;
1031 int err = 0;
1032 loff_t old_size;
1033 bool map_locked = false;
1034
1035 if (mode & ~(NTFS_FALLOC_FL_SUPPORTED))
1036 return -EOPNOTSUPP;
1037
1038 if (!NVolFreeClusterKnown(vol))
1039 wait_event(vol->free_waitq, NVolFreeClusterKnown(vol));
1040
1041 if ((ni->vol->mft_zone_end - ni->vol->mft_zone_start) == 0)
1042 return -ENOSPC;
1043
1044 if (NInoNonResident(ni) && !NInoFullyMapped(ni)) {
1045 down_write(&ni->runlist.lock);
1046 err = ntfs_attr_map_whole_runlist(ni);
1047 up_write(&ni->runlist.lock);
1048 if (err)
1049 return err;
1050 }
1051
1052 if (!(vol->vol_flags & VOLUME_IS_DIRTY)) {
1053 err = ntfs_set_volume_flags(vol, VOLUME_IS_DIRTY);
1054 if (err)
1055 return err;
1056 }
1057
1058 old_size = i_size_read(vi);
1059
1060 inode_lock(vi);
1061 if (NInoCompressed(ni) || NInoEncrypted(ni)) {
1062 err = -EOPNOTSUPP;
1063 goto out;
1064 }
1065
1066 inode_dio_wait(vi);
1067 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1068 FALLOC_FL_INSERT_RANGE)) {
1069 filemap_invalidate_lock(vi->i_mapping);
1070 map_locked = true;
1071 }
1072
1073 switch (mode & FALLOC_FL_MODE_MASK) {
1074 case FALLOC_FL_ALLOCATE_RANGE:
1075 case FALLOC_FL_KEEP_SIZE:
1076 err = ntfs_allocate_range(ni, mode, offset, len);
1077 break;
1078 case FALLOC_FL_PUNCH_HOLE:
1079 err = ntfs_punch_hole(ni, mode, offset, len);
1080 break;
1081 case FALLOC_FL_COLLAPSE_RANGE:
1082 err = ntfs_collapse_range(ni, offset, len);
1083 break;
1084 case FALLOC_FL_INSERT_RANGE:
1085 err = ntfs_insert_range(ni, offset, len);
1086 break;
1087 default:
1088 err = -EOPNOTSUPP;
1089 }
1090
1091 if (err)
1092 goto out;
1093
1094 err = file_modified(file);
1095 out:
1096 if (map_locked)
1097 filemap_invalidate_unlock(vi->i_mapping);
1098 if (!err) {
1099 if (mode == 0 && NInoNonResident(ni) &&
1100 offset > old_size && old_size % PAGE_SIZE != 0) {
1101 loff_t len = min_t(loff_t,
1102 round_up(old_size, PAGE_SIZE) - old_size,
1103 offset - old_size);
1104 err = iomap_zero_range(vi, old_size, len, NULL,
1105 &ntfs_seek_iomap_ops,
1106 &ntfs_iomap_folio_ops, NULL);
1107 }
1108 NInoSetFileNameDirty(ni);
1109 inode_set_mtime_to_ts(vi, inode_set_ctime_current(vi));
1110 mark_inode_dirty(vi);
1111 }
1112
1113 inode_unlock(vi);
1114 return err;
1115 }
1116
1117 const struct file_operations ntfs_file_ops = {
1118 .llseek = ntfs_file_llseek,
1119 .read_iter = ntfs_file_read_iter,
1120 .write_iter = ntfs_file_write_iter,
1121 .fsync = ntfs_file_fsync,
1122 .mmap_prepare = ntfs_file_mmap_prepare,
1123 .open = ntfs_file_open,
1124 .release = ntfs_file_release,
1125 .splice_read = ntfs_file_splice_read,
1126 .splice_write = iter_file_splice_write,
1127 .unlocked_ioctl = ntfs_ioctl,
1128 #ifdef CONFIG_COMPAT
1129 .compat_ioctl = ntfs_compat_ioctl,
1130 #endif
1131 .fallocate = ntfs_fallocate,
1132 .setlease = generic_setlease,
1133 };
1134
1135 const struct inode_operations ntfs_file_inode_ops = {
1136 .setattr = ntfs_setattr,
1137 .getattr = ntfs_getattr,
1138 .listxattr = ntfs_listxattr,
1139 .get_acl = ntfs_get_acl,
1140 .set_acl = ntfs_set_acl,
1141 .fiemap = ntfs_fiemap,
1142 };
1143
1144 const struct inode_operations ntfs_symlink_inode_operations = {
1145 .get_link = ntfs_get_link,
1146 .setattr = ntfs_setattr,
1147 .listxattr = ntfs_listxattr,
1148 };
1149
1150 const struct inode_operations ntfs_special_inode_operations = {
1151 .setattr = ntfs_setattr,
1152 .getattr = ntfs_getattr,
1153 .listxattr = ntfs_listxattr,
1154 .get_acl = ntfs_get_acl,
1155 .set_acl = ntfs_set_acl,
1156 };
1157
1158 const struct file_operations ntfs_empty_file_ops = {};
1159
1160 const struct inode_operations ntfs_empty_inode_ops = {};
1161