1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/hpfs/file.c
4 *
5 * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
6 *
7 * file VFS functions
8 */
9
10 #include "hpfs_fn.h"
11 #include <linux/mpage.h>
12 #include <linux/iomap.h>
13 #include <linux/fiemap.h>
14
15 #define BLOCKS(size) (((size) + 511) >> 9)
16
hpfs_file_release(struct inode * inode,struct file * file)17 static int hpfs_file_release(struct inode *inode, struct file *file)
18 {
19 hpfs_lock(inode->i_sb);
20 hpfs_write_if_changed(inode);
21 hpfs_unlock(inode->i_sb);
22 return 0;
23 }
24
hpfs_file_fsync(struct file * file,loff_t start,loff_t end,int datasync)25 int hpfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
26 {
27 struct inode *inode = file->f_mapping->host;
28 int ret;
29
30 ret = file_write_and_wait_range(file, start, end);
31 if (ret)
32 return ret;
33 return sync_blockdev(inode->i_sb->s_bdev);
34 }
35
36 /*
37 * generic_file_read often calls bmap with non-existing sector,
38 * so we must ignore such errors.
39 */
40
hpfs_bmap(struct inode * inode,unsigned file_secno,unsigned * n_secs)41 static secno hpfs_bmap(struct inode *inode, unsigned file_secno, unsigned *n_secs)
42 {
43 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
44 unsigned n, disk_secno;
45 struct fnode *fnode;
46 struct buffer_head *bh;
47 if (BLOCKS(hpfs_i(inode)->mmu_private) <= file_secno) return 0;
48 n = file_secno - hpfs_inode->i_file_sec;
49 if (n < hpfs_inode->i_n_secs) {
50 *n_secs = hpfs_inode->i_n_secs - n;
51 return hpfs_inode->i_disk_sec + n;
52 }
53 if (!(fnode = hpfs_map_fnode(inode->i_sb, inode->i_ino, &bh))) return 0;
54 disk_secno = hpfs_bplus_lookup(inode->i_sb, inode,
55 GET_BTREE_PTR(&fnode->btree),
56 file_secno, bh);
57 if (disk_secno == -1) return 0;
58 if (hpfs_chk_sectors(inode->i_sb, disk_secno, 1, "bmap")) return 0;
59 n = file_secno - hpfs_inode->i_file_sec;
60 if (n < hpfs_inode->i_n_secs) {
61 *n_secs = hpfs_inode->i_n_secs - n;
62 return hpfs_inode->i_disk_sec + n;
63 }
64 *n_secs = 1;
65 return disk_secno;
66 }
67
hpfs_truncate(struct inode * i)68 void hpfs_truncate(struct inode *i)
69 {
70 if (IS_IMMUTABLE(i)) return /*-EPERM*/;
71 hpfs_lock_assert(i->i_sb);
72
73 hpfs_i(i)->i_n_secs = 0;
74 i->i_blocks = 1 + ((i->i_size + 511) >> 9);
75 hpfs_i(i)->mmu_private = i->i_size;
76 hpfs_truncate_btree(i->i_sb, i->i_ino, 1, ((i->i_size + 511) >> 9));
77 hpfs_write_inode(i);
78 hpfs_i(i)->i_n_secs = 0;
79 }
80
hpfs_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)81 static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
82 {
83 int r;
84 secno s;
85 unsigned n_secs;
86 hpfs_lock(inode->i_sb);
87 s = hpfs_bmap(inode, iblock, &n_secs);
88 if (s) {
89 if (bh_result->b_size >> 9 < n_secs)
90 n_secs = bh_result->b_size >> 9;
91 n_secs = hpfs_search_hotfix_map_for_range(inode->i_sb, s, n_secs);
92 if (unlikely(!n_secs)) {
93 s = hpfs_search_hotfix_map(inode->i_sb, s);
94 n_secs = 1;
95 }
96 map_bh(bh_result, inode->i_sb, s);
97 bh_result->b_size = n_secs << 9;
98 goto ret_0;
99 }
100 if (!create) goto ret_0;
101 if (iblock<<9 != hpfs_i(inode)->mmu_private) {
102 BUG();
103 r = -EIO;
104 goto ret_r;
105 }
106 if ((s = hpfs_add_sector_to_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1)) == -1) {
107 hpfs_truncate_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1);
108 r = -ENOSPC;
109 goto ret_r;
110 }
111 inode->i_blocks++;
112 hpfs_i(inode)->mmu_private += 512;
113 set_buffer_new(bh_result);
114 map_bh(bh_result, inode->i_sb, hpfs_search_hotfix_map(inode->i_sb, s));
115 ret_0:
116 r = 0;
117 ret_r:
118 hpfs_unlock(inode->i_sb);
119 return r;
120 }
121
hpfs_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned flags,struct iomap * iomap,struct iomap * srcmap)122 static int hpfs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
123 unsigned flags, struct iomap *iomap, struct iomap *srcmap)
124 {
125 struct super_block *sb = inode->i_sb;
126 unsigned int blkbits = inode->i_blkbits;
127 unsigned int n_secs;
128 secno s;
129
130 if (WARN_ON_ONCE(flags & (IOMAP_WRITE | IOMAP_ZERO)))
131 return -EINVAL;
132
133 iomap->bdev = inode->i_sb->s_bdev;
134 iomap->offset = offset;
135
136 hpfs_lock(sb);
137 s = hpfs_bmap(inode, offset >> blkbits, &n_secs);
138 if (s) {
139 n_secs = hpfs_search_hotfix_map_for_range(sb, s,
140 min_t(loff_t, n_secs, length));
141 if (unlikely(!n_secs)) {
142 s = hpfs_search_hotfix_map(sb, s);
143 n_secs = 1;
144 }
145 iomap->type = IOMAP_MAPPED;
146 iomap->flags = IOMAP_F_MERGED;
147 iomap->addr = (u64)s << blkbits;
148 iomap->length = (u64)n_secs << blkbits;
149 } else {
150 iomap->type = IOMAP_HOLE;
151 iomap->addr = IOMAP_NULL_ADDR;
152 iomap->length = 1 << blkbits;
153 }
154
155 hpfs_unlock(sb);
156 return 0;
157 }
158
159 static const struct iomap_ops hpfs_iomap_ops = {
160 .iomap_begin = hpfs_iomap_begin,
161 };
162
hpfs_read_folio(struct file * file,struct folio * folio)163 static int hpfs_read_folio(struct file *file, struct folio *folio)
164 {
165 return mpage_read_folio(folio, hpfs_get_block);
166 }
167
hpfs_readahead(struct readahead_control * rac)168 static void hpfs_readahead(struct readahead_control *rac)
169 {
170 mpage_readahead(rac, hpfs_get_block);
171 }
172
hpfs_writepages(struct address_space * mapping,struct writeback_control * wbc)173 static int hpfs_writepages(struct address_space *mapping,
174 struct writeback_control *wbc)
175 {
176 return mpage_writepages(mapping, wbc, hpfs_get_block);
177 }
178
hpfs_write_failed(struct address_space * mapping,loff_t to)179 static void hpfs_write_failed(struct address_space *mapping, loff_t to)
180 {
181 struct inode *inode = mapping->host;
182
183 hpfs_lock(inode->i_sb);
184
185 if (to > inode->i_size) {
186 truncate_pagecache(inode, inode->i_size);
187 hpfs_truncate(inode);
188 }
189
190 hpfs_unlock(inode->i_sb);
191 }
192
hpfs_write_begin(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)193 static int hpfs_write_begin(const struct kiocb *iocb,
194 struct address_space *mapping,
195 loff_t pos, unsigned len,
196 struct folio **foliop, void **fsdata)
197 {
198 int ret;
199
200 ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
201 hpfs_get_block,
202 &hpfs_i(mapping->host)->mmu_private);
203 if (unlikely(ret))
204 hpfs_write_failed(mapping, pos + len);
205
206 return ret;
207 }
208
hpfs_write_end(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)209 static int hpfs_write_end(const struct kiocb *iocb,
210 struct address_space *mapping,
211 loff_t pos, unsigned len, unsigned copied,
212 struct folio *folio, void *fsdata)
213 {
214 struct inode *inode = mapping->host;
215 int err;
216 err = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
217 if (err < len)
218 hpfs_write_failed(mapping, pos + len);
219 if (!(err < 0)) {
220 /* make sure we write it on close, if not earlier */
221 hpfs_lock(inode->i_sb);
222 hpfs_i(inode)->i_dirty = 1;
223 hpfs_unlock(inode->i_sb);
224 }
225 return err;
226 }
227
_hpfs_bmap(struct address_space * mapping,sector_t block)228 static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
229 {
230 return generic_block_bmap(mapping, block, hpfs_get_block);
231 }
232
hpfs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)233 static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len)
234 {
235 int ret;
236
237 inode_lock(inode);
238 len = min_t(u64, len, i_size_read(inode));
239 ret = iomap_fiemap(inode, fieinfo, start, len, &hpfs_iomap_ops);
240 inode_unlock(inode);
241
242 return ret;
243 }
244
245 const struct address_space_operations hpfs_aops = {
246 .dirty_folio = block_dirty_folio,
247 .invalidate_folio = block_invalidate_folio,
248 .read_folio = hpfs_read_folio,
249 .readahead = hpfs_readahead,
250 .writepages = hpfs_writepages,
251 .write_begin = hpfs_write_begin,
252 .write_end = hpfs_write_end,
253 .bmap = _hpfs_bmap,
254 .migrate_folio = buffer_migrate_folio,
255 };
256
257 const struct file_operations hpfs_file_ops =
258 {
259 .llseek = generic_file_llseek,
260 .read_iter = generic_file_read_iter,
261 .write_iter = generic_file_write_iter,
262 .mmap_prepare = generic_file_mmap_prepare,
263 .release = hpfs_file_release,
264 .fsync = hpfs_file_fsync,
265 .splice_read = filemap_splice_read,
266 .unlocked_ioctl = hpfs_ioctl,
267 .compat_ioctl = compat_ptr_ioctl,
268 };
269
270 const struct inode_operations hpfs_file_iops =
271 {
272 .setattr = hpfs_setattr,
273 .fiemap = hpfs_fiemap,
274 };
275