xref: /linux/fs/minix/inode.c (revision 41e0d49104dbff888ef6446ea46842fde66c0a76)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/minix/inode.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  Copyright (C) 1996  Gertjan van Wingerde
8  *	Minix V2 fs support.
9  *
10  *  Modified for 680x0 by Andreas Schwab
11  *  Updated to filesystem version 3 by Daniel Aragones
12  */
13 
14 #include <linux/module.h>
15 #include "minix.h"
16 #include <linux/buffer_head.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/highuid.h>
20 #include <linux/vfs.h>
21 #include <linux/writeback.h>
22 
23 static int minix_write_inode(struct inode *inode,
24 		struct writeback_control *wbc);
25 static int minix_statfs(struct dentry *dentry, struct kstatfs *buf);
26 static int minix_remount (struct super_block * sb, int * flags, char * data);
27 
28 static void minix_evict_inode(struct inode *inode)
29 {
30 	truncate_inode_pages_final(&inode->i_data);
31 	if (!inode->i_nlink) {
32 		inode->i_size = 0;
33 		minix_truncate(inode);
34 	}
35 	invalidate_inode_buffers(inode);
36 	clear_inode(inode);
37 	if (!inode->i_nlink)
38 		minix_free_inode(inode);
39 }
40 
41 static void minix_put_super(struct super_block *sb)
42 {
43 	int i;
44 	struct minix_sb_info *sbi = minix_sb(sb);
45 
46 	if (!sb_rdonly(sb)) {
47 		if (sbi->s_version != MINIX_V3)	 /* s_state is now out from V3 sb */
48 			sbi->s_ms->s_state = sbi->s_mount_state;
49 		mark_buffer_dirty(sbi->s_sbh);
50 	}
51 	for (i = 0; i < sbi->s_imap_blocks; i++)
52 		brelse(sbi->s_imap[i]);
53 	for (i = 0; i < sbi->s_zmap_blocks; i++)
54 		brelse(sbi->s_zmap[i]);
55 	brelse (sbi->s_sbh);
56 	kfree(sbi->s_imap);
57 	sb->s_fs_info = NULL;
58 	kfree(sbi);
59 }
60 
61 static struct kmem_cache * minix_inode_cachep;
62 
63 static struct inode *minix_alloc_inode(struct super_block *sb)
64 {
65 	struct minix_inode_info *ei;
66 	ei = alloc_inode_sb(sb, minix_inode_cachep, GFP_KERNEL);
67 	if (!ei)
68 		return NULL;
69 	return &ei->vfs_inode;
70 }
71 
72 static void minix_free_in_core_inode(struct inode *inode)
73 {
74 	kmem_cache_free(minix_inode_cachep, minix_i(inode));
75 }
76 
77 static void init_once(void *foo)
78 {
79 	struct minix_inode_info *ei = (struct minix_inode_info *) foo;
80 
81 	inode_init_once(&ei->vfs_inode);
82 }
83 
84 static int __init init_inodecache(void)
85 {
86 	minix_inode_cachep = kmem_cache_create("minix_inode_cache",
87 					     sizeof(struct minix_inode_info),
88 					     0, (SLAB_RECLAIM_ACCOUNT|
89 						SLAB_MEM_SPREAD|SLAB_ACCOUNT),
90 					     init_once);
91 	if (minix_inode_cachep == NULL)
92 		return -ENOMEM;
93 	return 0;
94 }
95 
96 static void destroy_inodecache(void)
97 {
98 	/*
99 	 * Make sure all delayed rcu free inodes are flushed before we
100 	 * destroy cache.
101 	 */
102 	rcu_barrier();
103 	kmem_cache_destroy(minix_inode_cachep);
104 }
105 
106 static const struct super_operations minix_sops = {
107 	.alloc_inode	= minix_alloc_inode,
108 	.free_inode	= minix_free_in_core_inode,
109 	.write_inode	= minix_write_inode,
110 	.evict_inode	= minix_evict_inode,
111 	.put_super	= minix_put_super,
112 	.statfs		= minix_statfs,
113 	.remount_fs	= minix_remount,
114 };
115 
116 static int minix_remount (struct super_block * sb, int * flags, char * data)
117 {
118 	struct minix_sb_info * sbi = minix_sb(sb);
119 	struct minix_super_block * ms;
120 
121 	sync_filesystem(sb);
122 	ms = sbi->s_ms;
123 	if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
124 		return 0;
125 	if (*flags & SB_RDONLY) {
126 		if (ms->s_state & MINIX_VALID_FS ||
127 		    !(sbi->s_mount_state & MINIX_VALID_FS))
128 			return 0;
129 		/* Mounting a rw partition read-only. */
130 		if (sbi->s_version != MINIX_V3)
131 			ms->s_state = sbi->s_mount_state;
132 		mark_buffer_dirty(sbi->s_sbh);
133 	} else {
134 	  	/* Mount a partition which is read-only, read-write. */
135 		if (sbi->s_version != MINIX_V3) {
136 			sbi->s_mount_state = ms->s_state;
137 			ms->s_state &= ~MINIX_VALID_FS;
138 		} else {
139 			sbi->s_mount_state = MINIX_VALID_FS;
140 		}
141 		mark_buffer_dirty(sbi->s_sbh);
142 
143 		if (!(sbi->s_mount_state & MINIX_VALID_FS))
144 			printk("MINIX-fs warning: remounting unchecked fs, "
145 				"running fsck is recommended\n");
146 		else if ((sbi->s_mount_state & MINIX_ERROR_FS))
147 			printk("MINIX-fs warning: remounting fs with errors, "
148 				"running fsck is recommended\n");
149 	}
150 	return 0;
151 }
152 
153 static bool minix_check_superblock(struct super_block *sb)
154 {
155 	struct minix_sb_info *sbi = minix_sb(sb);
156 
157 	if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
158 		return false;
159 
160 	/*
161 	 * s_max_size must not exceed the block mapping limitation.  This check
162 	 * is only needed for V1 filesystems, since V2/V3 support an extra level
163 	 * of indirect blocks which places the limit well above U32_MAX.
164 	 */
165 	if (sbi->s_version == MINIX_V1 &&
166 	    sb->s_maxbytes > (7 + 512 + 512*512) * BLOCK_SIZE)
167 		return false;
168 
169 	return true;
170 }
171 
172 static int minix_fill_super(struct super_block *s, void *data, int silent)
173 {
174 	struct buffer_head *bh;
175 	struct buffer_head **map;
176 	struct minix_super_block *ms;
177 	struct minix3_super_block *m3s = NULL;
178 	unsigned long i, block;
179 	struct inode *root_inode;
180 	struct minix_sb_info *sbi;
181 	int ret = -EINVAL;
182 
183 	sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL);
184 	if (!sbi)
185 		return -ENOMEM;
186 	s->s_fs_info = sbi;
187 
188 	BUILD_BUG_ON(32 != sizeof (struct minix_inode));
189 	BUILD_BUG_ON(64 != sizeof(struct minix2_inode));
190 
191 	if (!sb_set_blocksize(s, BLOCK_SIZE))
192 		goto out_bad_hblock;
193 
194 	if (!(bh = sb_bread(s, 1)))
195 		goto out_bad_sb;
196 
197 	ms = (struct minix_super_block *) bh->b_data;
198 	sbi->s_ms = ms;
199 	sbi->s_sbh = bh;
200 	sbi->s_mount_state = ms->s_state;
201 	sbi->s_ninodes = ms->s_ninodes;
202 	sbi->s_nzones = ms->s_nzones;
203 	sbi->s_imap_blocks = ms->s_imap_blocks;
204 	sbi->s_zmap_blocks = ms->s_zmap_blocks;
205 	sbi->s_firstdatazone = ms->s_firstdatazone;
206 	sbi->s_log_zone_size = ms->s_log_zone_size;
207 	s->s_maxbytes = ms->s_max_size;
208 	s->s_magic = ms->s_magic;
209 	if (s->s_magic == MINIX_SUPER_MAGIC) {
210 		sbi->s_version = MINIX_V1;
211 		sbi->s_dirsize = 16;
212 		sbi->s_namelen = 14;
213 		s->s_max_links = MINIX_LINK_MAX;
214 	} else if (s->s_magic == MINIX_SUPER_MAGIC2) {
215 		sbi->s_version = MINIX_V1;
216 		sbi->s_dirsize = 32;
217 		sbi->s_namelen = 30;
218 		s->s_max_links = MINIX_LINK_MAX;
219 	} else if (s->s_magic == MINIX2_SUPER_MAGIC) {
220 		sbi->s_version = MINIX_V2;
221 		sbi->s_nzones = ms->s_zones;
222 		sbi->s_dirsize = 16;
223 		sbi->s_namelen = 14;
224 		s->s_max_links = MINIX2_LINK_MAX;
225 	} else if (s->s_magic == MINIX2_SUPER_MAGIC2) {
226 		sbi->s_version = MINIX_V2;
227 		sbi->s_nzones = ms->s_zones;
228 		sbi->s_dirsize = 32;
229 		sbi->s_namelen = 30;
230 		s->s_max_links = MINIX2_LINK_MAX;
231 	} else if ( *(__u16 *)(bh->b_data + 24) == MINIX3_SUPER_MAGIC) {
232 		m3s = (struct minix3_super_block *) bh->b_data;
233 		s->s_magic = m3s->s_magic;
234 		sbi->s_imap_blocks = m3s->s_imap_blocks;
235 		sbi->s_zmap_blocks = m3s->s_zmap_blocks;
236 		sbi->s_firstdatazone = m3s->s_firstdatazone;
237 		sbi->s_log_zone_size = m3s->s_log_zone_size;
238 		s->s_maxbytes = m3s->s_max_size;
239 		sbi->s_ninodes = m3s->s_ninodes;
240 		sbi->s_nzones = m3s->s_zones;
241 		sbi->s_dirsize = 64;
242 		sbi->s_namelen = 60;
243 		sbi->s_version = MINIX_V3;
244 		sbi->s_mount_state = MINIX_VALID_FS;
245 		sb_set_blocksize(s, m3s->s_blocksize);
246 		s->s_max_links = MINIX2_LINK_MAX;
247 	} else
248 		goto out_no_fs;
249 
250 	if (!minix_check_superblock(s))
251 		goto out_illegal_sb;
252 
253 	/*
254 	 * Allocate the buffer map to keep the superblock small.
255 	 */
256 	i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
257 	map = kzalloc(i, GFP_KERNEL);
258 	if (!map)
259 		goto out_no_map;
260 	sbi->s_imap = &map[0];
261 	sbi->s_zmap = &map[sbi->s_imap_blocks];
262 
263 	block=2;
264 	for (i=0 ; i < sbi->s_imap_blocks ; i++) {
265 		if (!(sbi->s_imap[i]=sb_bread(s, block)))
266 			goto out_no_bitmap;
267 		block++;
268 	}
269 	for (i=0 ; i < sbi->s_zmap_blocks ; i++) {
270 		if (!(sbi->s_zmap[i]=sb_bread(s, block)))
271 			goto out_no_bitmap;
272 		block++;
273 	}
274 
275 	minix_set_bit(0,sbi->s_imap[0]->b_data);
276 	minix_set_bit(0,sbi->s_zmap[0]->b_data);
277 
278 	/* Apparently minix can create filesystems that allocate more blocks for
279 	 * the bitmaps than needed.  We simply ignore that, but verify it didn't
280 	 * create one with not enough blocks and bail out if so.
281 	 */
282 	block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize);
283 	if (sbi->s_imap_blocks < block) {
284 		printk("MINIX-fs: file system does not have enough "
285 				"imap blocks allocated.  Refusing to mount.\n");
286 		goto out_no_bitmap;
287 	}
288 
289 	block = minix_blocks_needed(
290 			(sbi->s_nzones - sbi->s_firstdatazone + 1),
291 			s->s_blocksize);
292 	if (sbi->s_zmap_blocks < block) {
293 		printk("MINIX-fs: file system does not have enough "
294 				"zmap blocks allocated.  Refusing to mount.\n");
295 		goto out_no_bitmap;
296 	}
297 
298 	/* set up enough so that it can read an inode */
299 	s->s_op = &minix_sops;
300 	s->s_time_min = 0;
301 	s->s_time_max = U32_MAX;
302 	root_inode = minix_iget(s, MINIX_ROOT_INO);
303 	if (IS_ERR(root_inode)) {
304 		ret = PTR_ERR(root_inode);
305 		goto out_no_root;
306 	}
307 
308 	ret = -ENOMEM;
309 	s->s_root = d_make_root(root_inode);
310 	if (!s->s_root)
311 		goto out_no_root;
312 
313 	if (!sb_rdonly(s)) {
314 		if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
315 			ms->s_state &= ~MINIX_VALID_FS;
316 		mark_buffer_dirty(bh);
317 	}
318 	if (!(sbi->s_mount_state & MINIX_VALID_FS))
319 		printk("MINIX-fs: mounting unchecked file system, "
320 			"running fsck is recommended\n");
321 	else if (sbi->s_mount_state & MINIX_ERROR_FS)
322 		printk("MINIX-fs: mounting file system with errors, "
323 			"running fsck is recommended\n");
324 
325 	return 0;
326 
327 out_no_root:
328 	if (!silent)
329 		printk("MINIX-fs: get root inode failed\n");
330 	goto out_freemap;
331 
332 out_no_bitmap:
333 	printk("MINIX-fs: bad superblock or unable to read bitmaps\n");
334 out_freemap:
335 	for (i = 0; i < sbi->s_imap_blocks; i++)
336 		brelse(sbi->s_imap[i]);
337 	for (i = 0; i < sbi->s_zmap_blocks; i++)
338 		brelse(sbi->s_zmap[i]);
339 	kfree(sbi->s_imap);
340 	goto out_release;
341 
342 out_no_map:
343 	ret = -ENOMEM;
344 	if (!silent)
345 		printk("MINIX-fs: can't allocate map\n");
346 	goto out_release;
347 
348 out_illegal_sb:
349 	if (!silent)
350 		printk("MINIX-fs: bad superblock\n");
351 	goto out_release;
352 
353 out_no_fs:
354 	if (!silent)
355 		printk("VFS: Can't find a Minix filesystem V1 | V2 | V3 "
356 		       "on device %s.\n", s->s_id);
357 out_release:
358 	brelse(bh);
359 	goto out;
360 
361 out_bad_hblock:
362 	printk("MINIX-fs: blocksize too small for device\n");
363 	goto out;
364 
365 out_bad_sb:
366 	printk("MINIX-fs: unable to read superblock\n");
367 out:
368 	s->s_fs_info = NULL;
369 	kfree(sbi);
370 	return ret;
371 }
372 
373 static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
374 {
375 	struct super_block *sb = dentry->d_sb;
376 	struct minix_sb_info *sbi = minix_sb(sb);
377 	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
378 	buf->f_type = sb->s_magic;
379 	buf->f_bsize = sb->s_blocksize;
380 	buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
381 	buf->f_bfree = minix_count_free_blocks(sb);
382 	buf->f_bavail = buf->f_bfree;
383 	buf->f_files = sbi->s_ninodes;
384 	buf->f_ffree = minix_count_free_inodes(sb);
385 	buf->f_namelen = sbi->s_namelen;
386 	buf->f_fsid = u64_to_fsid(id);
387 
388 	return 0;
389 }
390 
391 static int minix_get_block(struct inode *inode, sector_t block,
392 		    struct buffer_head *bh_result, int create)
393 {
394 	if (INODE_VERSION(inode) == MINIX_V1)
395 		return V1_minix_get_block(inode, block, bh_result, create);
396 	else
397 		return V2_minix_get_block(inode, block, bh_result, create);
398 }
399 
400 static int minix_writepage(struct page *page, struct writeback_control *wbc)
401 {
402 	return block_write_full_page(page, minix_get_block, wbc);
403 }
404 
405 static int minix_read_folio(struct file *file, struct folio *folio)
406 {
407 	return block_read_full_folio(folio, minix_get_block);
408 }
409 
410 int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len)
411 {
412 	return __block_write_begin(page, pos, len, minix_get_block);
413 }
414 
415 static void minix_write_failed(struct address_space *mapping, loff_t to)
416 {
417 	struct inode *inode = mapping->host;
418 
419 	if (to > inode->i_size) {
420 		truncate_pagecache(inode, inode->i_size);
421 		minix_truncate(inode);
422 	}
423 }
424 
425 static int minix_write_begin(struct file *file, struct address_space *mapping,
426 			loff_t pos, unsigned len,
427 			struct page **pagep, void **fsdata)
428 {
429 	int ret;
430 
431 	ret = block_write_begin(mapping, pos, len, pagep, minix_get_block);
432 	if (unlikely(ret))
433 		minix_write_failed(mapping, pos + len);
434 
435 	return ret;
436 }
437 
438 static sector_t minix_bmap(struct address_space *mapping, sector_t block)
439 {
440 	return generic_block_bmap(mapping,block,minix_get_block);
441 }
442 
443 static const struct address_space_operations minix_aops = {
444 	.dirty_folio	= block_dirty_folio,
445 	.invalidate_folio = block_invalidate_folio,
446 	.read_folio = minix_read_folio,
447 	.writepage = minix_writepage,
448 	.write_begin = minix_write_begin,
449 	.write_end = generic_write_end,
450 	.bmap = minix_bmap,
451 	.direct_IO = noop_direct_IO
452 };
453 
454 static const struct inode_operations minix_symlink_inode_operations = {
455 	.get_link	= page_get_link,
456 	.getattr	= minix_getattr,
457 };
458 
459 void minix_set_inode(struct inode *inode, dev_t rdev)
460 {
461 	if (S_ISREG(inode->i_mode)) {
462 		inode->i_op = &minix_file_inode_operations;
463 		inode->i_fop = &minix_file_operations;
464 		inode->i_mapping->a_ops = &minix_aops;
465 	} else if (S_ISDIR(inode->i_mode)) {
466 		inode->i_op = &minix_dir_inode_operations;
467 		inode->i_fop = &minix_dir_operations;
468 		inode->i_mapping->a_ops = &minix_aops;
469 	} else if (S_ISLNK(inode->i_mode)) {
470 		inode->i_op = &minix_symlink_inode_operations;
471 		inode_nohighmem(inode);
472 		inode->i_mapping->a_ops = &minix_aops;
473 	} else
474 		init_special_inode(inode, inode->i_mode, rdev);
475 }
476 
477 /*
478  * The minix V1 function to read an inode.
479  */
480 static struct inode *V1_minix_iget(struct inode *inode)
481 {
482 	struct buffer_head * bh;
483 	struct minix_inode * raw_inode;
484 	struct minix_inode_info *minix_inode = minix_i(inode);
485 	int i;
486 
487 	raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh);
488 	if (!raw_inode) {
489 		iget_failed(inode);
490 		return ERR_PTR(-EIO);
491 	}
492 	if (raw_inode->i_nlinks == 0) {
493 		printk("MINIX-fs: deleted inode referenced: %lu\n",
494 		       inode->i_ino);
495 		brelse(bh);
496 		iget_failed(inode);
497 		return ERR_PTR(-ESTALE);
498 	}
499 	inode->i_mode = raw_inode->i_mode;
500 	i_uid_write(inode, raw_inode->i_uid);
501 	i_gid_write(inode, raw_inode->i_gid);
502 	set_nlink(inode, raw_inode->i_nlinks);
503 	inode->i_size = raw_inode->i_size;
504 	inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = raw_inode->i_time;
505 	inode->i_mtime.tv_nsec = 0;
506 	inode->i_atime.tv_nsec = 0;
507 	inode->i_ctime.tv_nsec = 0;
508 	inode->i_blocks = 0;
509 	for (i = 0; i < 9; i++)
510 		minix_inode->u.i1_data[i] = raw_inode->i_zone[i];
511 	minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
512 	brelse(bh);
513 	unlock_new_inode(inode);
514 	return inode;
515 }
516 
517 /*
518  * The minix V2 function to read an inode.
519  */
520 static struct inode *V2_minix_iget(struct inode *inode)
521 {
522 	struct buffer_head * bh;
523 	struct minix2_inode * raw_inode;
524 	struct minix_inode_info *minix_inode = minix_i(inode);
525 	int i;
526 
527 	raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh);
528 	if (!raw_inode) {
529 		iget_failed(inode);
530 		return ERR_PTR(-EIO);
531 	}
532 	if (raw_inode->i_nlinks == 0) {
533 		printk("MINIX-fs: deleted inode referenced: %lu\n",
534 		       inode->i_ino);
535 		brelse(bh);
536 		iget_failed(inode);
537 		return ERR_PTR(-ESTALE);
538 	}
539 	inode->i_mode = raw_inode->i_mode;
540 	i_uid_write(inode, raw_inode->i_uid);
541 	i_gid_write(inode, raw_inode->i_gid);
542 	set_nlink(inode, raw_inode->i_nlinks);
543 	inode->i_size = raw_inode->i_size;
544 	inode->i_mtime.tv_sec = raw_inode->i_mtime;
545 	inode->i_atime.tv_sec = raw_inode->i_atime;
546 	inode->i_ctime.tv_sec = raw_inode->i_ctime;
547 	inode->i_mtime.tv_nsec = 0;
548 	inode->i_atime.tv_nsec = 0;
549 	inode->i_ctime.tv_nsec = 0;
550 	inode->i_blocks = 0;
551 	for (i = 0; i < 10; i++)
552 		minix_inode->u.i2_data[i] = raw_inode->i_zone[i];
553 	minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
554 	brelse(bh);
555 	unlock_new_inode(inode);
556 	return inode;
557 }
558 
559 /*
560  * The global function to read an inode.
561  */
562 struct inode *minix_iget(struct super_block *sb, unsigned long ino)
563 {
564 	struct inode *inode;
565 
566 	inode = iget_locked(sb, ino);
567 	if (!inode)
568 		return ERR_PTR(-ENOMEM);
569 	if (!(inode->i_state & I_NEW))
570 		return inode;
571 
572 	if (INODE_VERSION(inode) == MINIX_V1)
573 		return V1_minix_iget(inode);
574 	else
575 		return V2_minix_iget(inode);
576 }
577 
578 /*
579  * The minix V1 function to synchronize an inode.
580  */
581 static struct buffer_head * V1_minix_update_inode(struct inode * inode)
582 {
583 	struct buffer_head * bh;
584 	struct minix_inode * raw_inode;
585 	struct minix_inode_info *minix_inode = minix_i(inode);
586 	int i;
587 
588 	raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh);
589 	if (!raw_inode)
590 		return NULL;
591 	raw_inode->i_mode = inode->i_mode;
592 	raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode));
593 	raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode));
594 	raw_inode->i_nlinks = inode->i_nlink;
595 	raw_inode->i_size = inode->i_size;
596 	raw_inode->i_time = inode->i_mtime.tv_sec;
597 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
598 		raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
599 	else for (i = 0; i < 9; i++)
600 		raw_inode->i_zone[i] = minix_inode->u.i1_data[i];
601 	mark_buffer_dirty(bh);
602 	return bh;
603 }
604 
605 /*
606  * The minix V2 function to synchronize an inode.
607  */
608 static struct buffer_head * V2_minix_update_inode(struct inode * inode)
609 {
610 	struct buffer_head * bh;
611 	struct minix2_inode * raw_inode;
612 	struct minix_inode_info *minix_inode = minix_i(inode);
613 	int i;
614 
615 	raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh);
616 	if (!raw_inode)
617 		return NULL;
618 	raw_inode->i_mode = inode->i_mode;
619 	raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode));
620 	raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode));
621 	raw_inode->i_nlinks = inode->i_nlink;
622 	raw_inode->i_size = inode->i_size;
623 	raw_inode->i_mtime = inode->i_mtime.tv_sec;
624 	raw_inode->i_atime = inode->i_atime.tv_sec;
625 	raw_inode->i_ctime = inode->i_ctime.tv_sec;
626 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
627 		raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
628 	else for (i = 0; i < 10; i++)
629 		raw_inode->i_zone[i] = minix_inode->u.i2_data[i];
630 	mark_buffer_dirty(bh);
631 	return bh;
632 }
633 
634 static int minix_write_inode(struct inode *inode, struct writeback_control *wbc)
635 {
636 	int err = 0;
637 	struct buffer_head *bh;
638 
639 	if (INODE_VERSION(inode) == MINIX_V1)
640 		bh = V1_minix_update_inode(inode);
641 	else
642 		bh = V2_minix_update_inode(inode);
643 	if (!bh)
644 		return -EIO;
645 	if (wbc->sync_mode == WB_SYNC_ALL && buffer_dirty(bh)) {
646 		sync_dirty_buffer(bh);
647 		if (buffer_req(bh) && !buffer_uptodate(bh)) {
648 			printk("IO error syncing minix inode [%s:%08lx]\n",
649 				inode->i_sb->s_id, inode->i_ino);
650 			err = -EIO;
651 		}
652 	}
653 	brelse (bh);
654 	return err;
655 }
656 
657 int minix_getattr(struct user_namespace *mnt_userns, const struct path *path,
658 		  struct kstat *stat, u32 request_mask, unsigned int flags)
659 {
660 	struct super_block *sb = path->dentry->d_sb;
661 	struct inode *inode = d_inode(path->dentry);
662 
663 	generic_fillattr(&init_user_ns, inode, stat);
664 	if (INODE_VERSION(inode) == MINIX_V1)
665 		stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb);
666 	else
667 		stat->blocks = (sb->s_blocksize / 512) * V2_minix_blocks(stat->size, sb);
668 	stat->blksize = sb->s_blocksize;
669 	return 0;
670 }
671 
672 /*
673  * The function that is called for file truncation.
674  */
675 void minix_truncate(struct inode * inode)
676 {
677 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
678 		return;
679 	if (INODE_VERSION(inode) == MINIX_V1)
680 		V1_minix_truncate(inode);
681 	else
682 		V2_minix_truncate(inode);
683 }
684 
685 static struct dentry *minix_mount(struct file_system_type *fs_type,
686 	int flags, const char *dev_name, void *data)
687 {
688 	return mount_bdev(fs_type, flags, dev_name, data, minix_fill_super);
689 }
690 
691 static struct file_system_type minix_fs_type = {
692 	.owner		= THIS_MODULE,
693 	.name		= "minix",
694 	.mount		= minix_mount,
695 	.kill_sb	= kill_block_super,
696 	.fs_flags	= FS_REQUIRES_DEV,
697 };
698 MODULE_ALIAS_FS("minix");
699 
700 static int __init init_minix_fs(void)
701 {
702 	int err = init_inodecache();
703 	if (err)
704 		goto out1;
705 	err = register_filesystem(&minix_fs_type);
706 	if (err)
707 		goto out;
708 	return 0;
709 out:
710 	destroy_inodecache();
711 out1:
712 	return err;
713 }
714 
715 static void __exit exit_minix_fs(void)
716 {
717         unregister_filesystem(&minix_fs_type);
718 	destroy_inodecache();
719 }
720 
721 module_init(init_minix_fs)
722 module_exit(exit_minix_fs)
723 MODULE_LICENSE("GPL");
724 
725