xref: /linux/fs/jfs/inode.c (revision 6093a688a07da07808f0122f9aa2a3eed250d853)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *   Copyright (C) International Business Machines Corp., 2000-2004
4  *   Portions Copyright (C) Christoph Hellwig, 2001-2002
5  */
6 
7 #include <linux/fs.h>
8 #include <linux/mpage.h>
9 #include <linux/buffer_head.h>
10 #include <linux/pagemap.h>
11 #include <linux/quotaops.h>
12 #include <linux/uio.h>
13 #include <linux/writeback.h>
14 #include "jfs_incore.h"
15 #include "jfs_inode.h"
16 #include "jfs_filsys.h"
17 #include "jfs_imap.h"
18 #include "jfs_extent.h"
19 #include "jfs_unicode.h"
20 #include "jfs_debug.h"
21 #include "jfs_dmap.h"
22 
23 
24 struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
25 {
26 	struct inode *inode;
27 	int ret;
28 
29 	inode = iget_locked(sb, ino);
30 	if (!inode)
31 		return ERR_PTR(-ENOMEM);
32 	if (!(inode->i_state & I_NEW))
33 		return inode;
34 
35 	ret = diRead(inode);
36 	if (ret < 0) {
37 		iget_failed(inode);
38 		return ERR_PTR(ret);
39 	}
40 
41 	if (S_ISREG(inode->i_mode)) {
42 		inode->i_op = &jfs_file_inode_operations;
43 		inode->i_fop = &jfs_file_operations;
44 		inode->i_mapping->a_ops = &jfs_aops;
45 	} else if (S_ISDIR(inode->i_mode)) {
46 		inode->i_op = &jfs_dir_inode_operations;
47 		inode->i_fop = &jfs_dir_operations;
48 	} else if (S_ISLNK(inode->i_mode)) {
49 		if (inode->i_size >= IDATASIZE) {
50 			inode->i_op = &page_symlink_inode_operations;
51 			inode_nohighmem(inode);
52 			inode->i_mapping->a_ops = &jfs_aops;
53 		} else {
54 			inode->i_op = &jfs_fast_symlink_inode_operations;
55 			inode->i_link = JFS_IP(inode)->i_inline;
56 			/*
57 			 * The inline data should be null-terminated, but
58 			 * don't let on-disk corruption crash the kernel
59 			 */
60 			inode->i_link[inode->i_size] = '\0';
61 		}
62 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
63 		   S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
64 		inode->i_op = &jfs_file_inode_operations;
65 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
66 	} else {
67 		printk(KERN_DEBUG "JFS: Invalid file type 0%04o for inode %lu.\n",
68 		       inode->i_mode, inode->i_ino);
69 		iget_failed(inode);
70 		return ERR_PTR(-EIO);
71 	}
72 	unlock_new_inode(inode);
73 	return inode;
74 }
75 
76 /*
77  * Workhorse of both fsync & write_inode
78  */
79 int jfs_commit_inode(struct inode *inode, int wait)
80 {
81 	int rc = 0;
82 	tid_t tid;
83 	static int noisy = 5;
84 
85 	jfs_info("In jfs_commit_inode, inode = 0x%p", inode);
86 
87 	/*
88 	 * Don't commit if inode has been committed since last being
89 	 * marked dirty, or if it has been deleted.
90 	 */
91 	if (inode->i_nlink == 0 || !test_cflag(COMMIT_Dirty, inode))
92 		return 0;
93 
94 	if (isReadOnly(inode)) {
95 		/* kernel allows writes to devices on read-only
96 		 * partitions and may think inode is dirty
97 		 */
98 		if (!special_file(inode->i_mode) && noisy) {
99 			jfs_err("jfs_commit_inode(0x%p) called on read-only volume",
100 				inode);
101 			jfs_err("Is remount racy?");
102 			noisy--;
103 		}
104 		return 0;
105 	}
106 
107 	tid = txBegin(inode->i_sb, COMMIT_INODE);
108 	mutex_lock(&JFS_IP(inode)->commit_mutex);
109 
110 	/*
111 	 * Retest inode state after taking commit_mutex
112 	 */
113 	if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode))
114 		rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0);
115 
116 	txEnd(tid);
117 	mutex_unlock(&JFS_IP(inode)->commit_mutex);
118 	return rc;
119 }
120 
121 int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
122 {
123 	int wait = wbc->sync_mode == WB_SYNC_ALL;
124 
125 	if (inode->i_nlink == 0)
126 		return 0;
127 	/*
128 	 * If COMMIT_DIRTY is not set, the inode isn't really dirty.
129 	 * It has been committed since the last change, but was still
130 	 * on the dirty inode list.
131 	 */
132 	if (!test_cflag(COMMIT_Dirty, inode)) {
133 		/* Make sure committed changes hit the disk */
134 		jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait);
135 		return 0;
136 	}
137 
138 	if (jfs_commit_inode(inode, wait)) {
139 		jfs_err("jfs_write_inode: jfs_commit_inode failed!");
140 		return -EIO;
141 	} else
142 		return 0;
143 }
144 
145 void jfs_evict_inode(struct inode *inode)
146 {
147 	struct jfs_inode_info *ji = JFS_IP(inode);
148 
149 	jfs_info("In jfs_evict_inode, inode = 0x%p", inode);
150 
151 	if (!inode->i_nlink && !is_bad_inode(inode)) {
152 		dquot_initialize(inode);
153 
154 		truncate_inode_pages_final(&inode->i_data);
155 		if (JFS_IP(inode)->fileset == FILESYSTEM_I) {
156 			struct inode *ipimap = JFS_SBI(inode->i_sb)->ipimap;
157 
158 			if (test_cflag(COMMIT_Freewmap, inode))
159 				jfs_free_zero_link(inode);
160 
161 			if (ipimap && JFS_IP(ipimap)->i_imap)
162 				diFree(inode);
163 
164 			/*
165 			 * Free the inode from the quota allocation.
166 			 */
167 			dquot_free_inode(inode);
168 		}
169 	} else {
170 		truncate_inode_pages_final(&inode->i_data);
171 	}
172 	clear_inode(inode);
173 	dquot_drop(inode);
174 
175 	BUG_ON(!list_empty(&ji->anon_inode_list));
176 
177 	spin_lock_irq(&ji->ag_lock);
178 	if (ji->active_ag != -1) {
179 		struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
180 		atomic_dec(&bmap->db_active[ji->active_ag]);
181 		ji->active_ag = -1;
182 	}
183 	spin_unlock_irq(&ji->ag_lock);
184 }
185 
186 void jfs_dirty_inode(struct inode *inode, int flags)
187 {
188 	static int noisy = 5;
189 
190 	if (isReadOnly(inode)) {
191 		if (!special_file(inode->i_mode) && noisy) {
192 			/* kernel allows writes to devices on read-only
193 			 * partitions and may try to mark inode dirty
194 			 */
195 			jfs_err("jfs_dirty_inode called on read-only volume");
196 			jfs_err("Is remount racy?");
197 			noisy--;
198 		}
199 		return;
200 	}
201 
202 	set_cflag(COMMIT_Dirty, inode);
203 }
204 
205 int jfs_get_block(struct inode *ip, sector_t lblock,
206 		  struct buffer_head *bh_result, int create)
207 {
208 	s64 lblock64 = lblock;
209 	int rc = 0;
210 	xad_t xad;
211 	s64 xaddr;
212 	int xflag;
213 	s32 xlen = bh_result->b_size >> ip->i_blkbits;
214 
215 	/*
216 	 * Take appropriate lock on inode
217 	 */
218 	if (create)
219 		IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
220 	else
221 		IREAD_LOCK(ip, RDWRLOCK_NORMAL);
222 
223 	if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) &&
224 	    (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) &&
225 	    xaddr) {
226 		if (xflag & XAD_NOTRECORDED) {
227 			if (!create)
228 				/*
229 				 * Allocated but not recorded, read treats
230 				 * this as a hole
231 				 */
232 				goto unlock;
233 			XADoffset(&xad, lblock64);
234 			XADlength(&xad, xlen);
235 			XADaddress(&xad, xaddr);
236 			rc = extRecord(ip, &xad);
237 			if (rc)
238 				goto unlock;
239 			set_buffer_new(bh_result);
240 		}
241 
242 		map_bh(bh_result, ip->i_sb, xaddr);
243 		bh_result->b_size = xlen << ip->i_blkbits;
244 		goto unlock;
245 	}
246 	if (!create)
247 		goto unlock;
248 
249 	/*
250 	 * Allocate a new block
251 	 */
252 	if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad)))
253 		goto unlock;
254 	rc = extAlloc(ip, xlen, lblock64, &xad, false);
255 	if (rc)
256 		goto unlock;
257 
258 	set_buffer_new(bh_result);
259 	map_bh(bh_result, ip->i_sb, addressXAD(&xad));
260 	bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits;
261 
262       unlock:
263 	/*
264 	 * Release lock on inode
265 	 */
266 	if (create)
267 		IWRITE_UNLOCK(ip);
268 	else
269 		IREAD_UNLOCK(ip);
270 	return rc;
271 }
272 
273 static int jfs_writepages(struct address_space *mapping,
274 			struct writeback_control *wbc)
275 {
276 	return mpage_writepages(mapping, wbc, jfs_get_block);
277 }
278 
279 static int jfs_read_folio(struct file *file, struct folio *folio)
280 {
281 	return mpage_read_folio(folio, jfs_get_block);
282 }
283 
284 static void jfs_readahead(struct readahead_control *rac)
285 {
286 	mpage_readahead(rac, jfs_get_block);
287 }
288 
289 static void jfs_write_failed(struct address_space *mapping, loff_t to)
290 {
291 	struct inode *inode = mapping->host;
292 
293 	if (to > inode->i_size) {
294 		truncate_pagecache(inode, inode->i_size);
295 		jfs_truncate(inode);
296 	}
297 }
298 
299 static int jfs_write_begin(const struct kiocb *iocb,
300 			   struct address_space *mapping,
301 			   loff_t pos, unsigned len,
302 			   struct folio **foliop, void **fsdata)
303 {
304 	int ret;
305 
306 	ret = block_write_begin(mapping, pos, len, foliop, jfs_get_block);
307 	if (unlikely(ret))
308 		jfs_write_failed(mapping, pos + len);
309 
310 	return ret;
311 }
312 
313 static int jfs_write_end(const struct kiocb *iocb,
314 			 struct address_space *mapping,
315 			 loff_t pos, unsigned len, unsigned copied,
316 			 struct folio *folio, void *fsdata)
317 {
318 	int ret;
319 
320 	ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
321 	if (ret < len)
322 		jfs_write_failed(mapping, pos + len);
323 	return ret;
324 }
325 
326 static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
327 {
328 	return generic_block_bmap(mapping, block, jfs_get_block);
329 }
330 
331 static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
332 {
333 	struct file *file = iocb->ki_filp;
334 	struct address_space *mapping = file->f_mapping;
335 	struct inode *inode = file->f_mapping->host;
336 	size_t count = iov_iter_count(iter);
337 	ssize_t ret;
338 
339 	ret = blockdev_direct_IO(iocb, inode, iter, jfs_get_block);
340 
341 	/*
342 	 * In case of error extending write may have instantiated a few
343 	 * blocks outside i_size. Trim these off again.
344 	 */
345 	if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
346 		loff_t isize = i_size_read(inode);
347 		loff_t end = iocb->ki_pos + count;
348 
349 		if (end > isize)
350 			jfs_write_failed(mapping, end);
351 	}
352 
353 	return ret;
354 }
355 
356 const struct address_space_operations jfs_aops = {
357 	.dirty_folio	= block_dirty_folio,
358 	.invalidate_folio = block_invalidate_folio,
359 	.read_folio	= jfs_read_folio,
360 	.readahead	= jfs_readahead,
361 	.writepages	= jfs_writepages,
362 	.write_begin	= jfs_write_begin,
363 	.write_end	= jfs_write_end,
364 	.bmap		= jfs_bmap,
365 	.direct_IO	= jfs_direct_IO,
366 	.migrate_folio	= buffer_migrate_folio,
367 };
368 
369 /*
370  * Guts of jfs_truncate.  Called with locks already held.  Can be called
371  * with directory for truncating directory index table.
372  */
373 void jfs_truncate_nolock(struct inode *ip, loff_t length)
374 {
375 	loff_t newsize;
376 	tid_t tid;
377 
378 	ASSERT(length >= 0);
379 
380 	if (test_cflag(COMMIT_Nolink, ip) || isReadOnly(ip)) {
381 		xtTruncate(0, ip, length, COMMIT_WMAP);
382 		return;
383 	}
384 
385 	do {
386 		tid = txBegin(ip->i_sb, 0);
387 
388 		/*
389 		 * The commit_mutex cannot be taken before txBegin.
390 		 * txBegin may block and there is a chance the inode
391 		 * could be marked dirty and need to be committed
392 		 * before txBegin unblocks
393 		 */
394 		mutex_lock(&JFS_IP(ip)->commit_mutex);
395 
396 		newsize = xtTruncate(tid, ip, length,
397 				     COMMIT_TRUNCATE | COMMIT_PWMAP);
398 		if (newsize < 0) {
399 			txEnd(tid);
400 			mutex_unlock(&JFS_IP(ip)->commit_mutex);
401 			break;
402 		}
403 
404 		inode_set_mtime_to_ts(ip, inode_set_ctime_current(ip));
405 		mark_inode_dirty(ip);
406 
407 		txCommit(tid, 1, &ip, 0);
408 		txEnd(tid);
409 		mutex_unlock(&JFS_IP(ip)->commit_mutex);
410 	} while (newsize > length);	/* Truncate isn't always atomic */
411 }
412 
413 void jfs_truncate(struct inode *ip)
414 {
415 	jfs_info("jfs_truncate: size = 0x%lx", (ulong) ip->i_size);
416 
417 	block_truncate_page(ip->i_mapping, ip->i_size, jfs_get_block);
418 
419 	IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
420 	jfs_truncate_nolock(ip, ip->i_size);
421 	IWRITE_UNLOCK(ip);
422 }
423