xref: /linux/fs/hpfs/file.c (revision 2775df6e5e324be9dc375f7db2c8d3042df72bbf)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/hpfs/file.c
4  *
5  *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
6  *
7  *  file VFS functions
8  */
9 
10 #include "hpfs_fn.h"
11 #include <linux/mpage.h>
12 #include <linux/iomap.h>
13 #include <linux/fiemap.h>
14 
15 #define BLOCKS(size) (((size) + 511) >> 9)
16 
hpfs_file_release(struct inode * inode,struct file * file)17 static int hpfs_file_release(struct inode *inode, struct file *file)
18 {
19 	hpfs_lock(inode->i_sb);
20 	hpfs_write_if_changed(inode);
21 	hpfs_unlock(inode->i_sb);
22 	return 0;
23 }
24 
hpfs_file_fsync(struct file * file,loff_t start,loff_t end,int datasync)25 int hpfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
26 {
27 	struct inode *inode = file->f_mapping->host;
28 	int ret;
29 
30 	ret = file_write_and_wait_range(file, start, end);
31 	if (ret)
32 		return ret;
33 	return sync_blockdev(inode->i_sb->s_bdev);
34 }
35 
36 /*
37  * generic_file_read often calls bmap with non-existing sector,
38  * so we must ignore such errors.
39  */
40 
hpfs_bmap(struct inode * inode,unsigned file_secno,unsigned * n_secs)41 static secno hpfs_bmap(struct inode *inode, unsigned file_secno, unsigned *n_secs)
42 {
43 	struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
44 	unsigned n, disk_secno;
45 	struct fnode *fnode;
46 	struct buffer_head *bh;
47 	if (BLOCKS(hpfs_i(inode)->mmu_private) <= file_secno) return 0;
48 	n = file_secno - hpfs_inode->i_file_sec;
49 	if (n < hpfs_inode->i_n_secs) {
50 		*n_secs = hpfs_inode->i_n_secs - n;
51 		return hpfs_inode->i_disk_sec + n;
52 	}
53 	if (!(fnode = hpfs_map_fnode(inode->i_sb, inode->i_ino, &bh))) return 0;
54 	disk_secno = hpfs_bplus_lookup(inode->i_sb, inode, &fnode->btree, file_secno, bh);
55 	if (disk_secno == -1) return 0;
56 	if (hpfs_chk_sectors(inode->i_sb, disk_secno, 1, "bmap")) return 0;
57 	n = file_secno - hpfs_inode->i_file_sec;
58 	if (n < hpfs_inode->i_n_secs) {
59 		*n_secs = hpfs_inode->i_n_secs - n;
60 		return hpfs_inode->i_disk_sec + n;
61 	}
62 	*n_secs = 1;
63 	return disk_secno;
64 }
65 
hpfs_truncate(struct inode * i)66 void hpfs_truncate(struct inode *i)
67 {
68 	if (IS_IMMUTABLE(i)) return /*-EPERM*/;
69 	hpfs_lock_assert(i->i_sb);
70 
71 	hpfs_i(i)->i_n_secs = 0;
72 	i->i_blocks = 1 + ((i->i_size + 511) >> 9);
73 	hpfs_i(i)->mmu_private = i->i_size;
74 	hpfs_truncate_btree(i->i_sb, i->i_ino, 1, ((i->i_size + 511) >> 9));
75 	hpfs_write_inode(i);
76 	hpfs_i(i)->i_n_secs = 0;
77 }
78 
hpfs_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)79 static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
80 {
81 	int r;
82 	secno s;
83 	unsigned n_secs;
84 	hpfs_lock(inode->i_sb);
85 	s = hpfs_bmap(inode, iblock, &n_secs);
86 	if (s) {
87 		if (bh_result->b_size >> 9 < n_secs)
88 			n_secs = bh_result->b_size >> 9;
89 		n_secs = hpfs_search_hotfix_map_for_range(inode->i_sb, s, n_secs);
90 		if (unlikely(!n_secs)) {
91 			s = hpfs_search_hotfix_map(inode->i_sb, s);
92 			n_secs = 1;
93 		}
94 		map_bh(bh_result, inode->i_sb, s);
95 		bh_result->b_size = n_secs << 9;
96 		goto ret_0;
97 	}
98 	if (!create) goto ret_0;
99 	if (iblock<<9 != hpfs_i(inode)->mmu_private) {
100 		BUG();
101 		r = -EIO;
102 		goto ret_r;
103 	}
104 	if ((s = hpfs_add_sector_to_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1)) == -1) {
105 		hpfs_truncate_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1);
106 		r = -ENOSPC;
107 		goto ret_r;
108 	}
109 	inode->i_blocks++;
110 	hpfs_i(inode)->mmu_private += 512;
111 	set_buffer_new(bh_result);
112 	map_bh(bh_result, inode->i_sb, hpfs_search_hotfix_map(inode->i_sb, s));
113 	ret_0:
114 	r = 0;
115 	ret_r:
116 	hpfs_unlock(inode->i_sb);
117 	return r;
118 }
119 
hpfs_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned flags,struct iomap * iomap,struct iomap * srcmap)120 static int hpfs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
121 		unsigned flags, struct iomap *iomap, struct iomap *srcmap)
122 {
123 	struct super_block *sb = inode->i_sb;
124 	unsigned int blkbits = inode->i_blkbits;
125 	unsigned int n_secs;
126 	secno s;
127 
128 	if (WARN_ON_ONCE(flags & (IOMAP_WRITE | IOMAP_ZERO)))
129 		return -EINVAL;
130 
131 	iomap->bdev = inode->i_sb->s_bdev;
132 	iomap->offset = offset;
133 
134 	hpfs_lock(sb);
135 	s = hpfs_bmap(inode, offset >> blkbits, &n_secs);
136 	if (s) {
137 		n_secs = hpfs_search_hotfix_map_for_range(sb, s,
138 				min_t(loff_t, n_secs, length));
139 		if (unlikely(!n_secs)) {
140 			s = hpfs_search_hotfix_map(sb, s);
141 			n_secs = 1;
142 		}
143 		iomap->type = IOMAP_MAPPED;
144 		iomap->flags = IOMAP_F_MERGED;
145 		iomap->addr = (u64)s << blkbits;
146 		iomap->length = (u64)n_secs << blkbits;
147 	} else {
148 		iomap->type = IOMAP_HOLE;
149 		iomap->addr = IOMAP_NULL_ADDR;
150 		iomap->length = 1 << blkbits;
151 	}
152 
153 	hpfs_unlock(sb);
154 	return 0;
155 }
156 
157 static const struct iomap_ops hpfs_iomap_ops = {
158 	.iomap_begin		= hpfs_iomap_begin,
159 };
160 
hpfs_read_folio(struct file * file,struct folio * folio)161 static int hpfs_read_folio(struct file *file, struct folio *folio)
162 {
163 	return mpage_read_folio(folio, hpfs_get_block);
164 }
165 
hpfs_readahead(struct readahead_control * rac)166 static void hpfs_readahead(struct readahead_control *rac)
167 {
168 	mpage_readahead(rac, hpfs_get_block);
169 }
170 
hpfs_writepages(struct address_space * mapping,struct writeback_control * wbc)171 static int hpfs_writepages(struct address_space *mapping,
172 			   struct writeback_control *wbc)
173 {
174 	return mpage_writepages(mapping, wbc, hpfs_get_block);
175 }
176 
hpfs_write_failed(struct address_space * mapping,loff_t to)177 static void hpfs_write_failed(struct address_space *mapping, loff_t to)
178 {
179 	struct inode *inode = mapping->host;
180 
181 	hpfs_lock(inode->i_sb);
182 
183 	if (to > inode->i_size) {
184 		truncate_pagecache(inode, inode->i_size);
185 		hpfs_truncate(inode);
186 	}
187 
188 	hpfs_unlock(inode->i_sb);
189 }
190 
hpfs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)191 static int hpfs_write_begin(struct file *file, struct address_space *mapping,
192 			loff_t pos, unsigned len,
193 			struct folio **foliop, void **fsdata)
194 {
195 	int ret;
196 
197 	ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
198 				hpfs_get_block,
199 				&hpfs_i(mapping->host)->mmu_private);
200 	if (unlikely(ret))
201 		hpfs_write_failed(mapping, pos + len);
202 
203 	return ret;
204 }
205 
hpfs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)206 static int hpfs_write_end(struct file *file, struct address_space *mapping,
207 			loff_t pos, unsigned len, unsigned copied,
208 			struct folio *folio, void *fsdata)
209 {
210 	struct inode *inode = mapping->host;
211 	int err;
212 	err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
213 	if (err < len)
214 		hpfs_write_failed(mapping, pos + len);
215 	if (!(err < 0)) {
216 		/* make sure we write it on close, if not earlier */
217 		hpfs_lock(inode->i_sb);
218 		hpfs_i(inode)->i_dirty = 1;
219 		hpfs_unlock(inode->i_sb);
220 	}
221 	return err;
222 }
223 
_hpfs_bmap(struct address_space * mapping,sector_t block)224 static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
225 {
226 	return generic_block_bmap(mapping, block, hpfs_get_block);
227 }
228 
hpfs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)229 static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len)
230 {
231 	int ret;
232 
233 	inode_lock(inode);
234 	len = min_t(u64, len, i_size_read(inode));
235 	ret = iomap_fiemap(inode, fieinfo, start, len, &hpfs_iomap_ops);
236 	inode_unlock(inode);
237 
238 	return ret;
239 }
240 
241 const struct address_space_operations hpfs_aops = {
242 	.dirty_folio	= block_dirty_folio,
243 	.invalidate_folio = block_invalidate_folio,
244 	.read_folio = hpfs_read_folio,
245 	.readahead = hpfs_readahead,
246 	.writepages = hpfs_writepages,
247 	.write_begin = hpfs_write_begin,
248 	.write_end = hpfs_write_end,
249 	.bmap = _hpfs_bmap,
250 	.migrate_folio = buffer_migrate_folio,
251 };
252 
253 const struct file_operations hpfs_file_ops =
254 {
255 	.llseek		= generic_file_llseek,
256 	.read_iter	= generic_file_read_iter,
257 	.write_iter	= generic_file_write_iter,
258 	.mmap		= generic_file_mmap,
259 	.release	= hpfs_file_release,
260 	.fsync		= hpfs_file_fsync,
261 	.splice_read	= filemap_splice_read,
262 	.unlocked_ioctl	= hpfs_ioctl,
263 	.compat_ioctl	= compat_ptr_ioctl,
264 };
265 
266 const struct inode_operations hpfs_file_iops =
267 {
268 	.setattr	= hpfs_setattr,
269 	.fiemap		= hpfs_fiemap,
270 };
271