xref: /linux/fs/minix/inode.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/minix/inode.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  Copyright (C) 1996  Gertjan van Wingerde
8  *	Minix V2 fs support.
9  *
10  *  Modified for 680x0 by Andreas Schwab
11  *  Updated to filesystem version 3 by Daniel Aragones
12  */
13 
14 #include <linux/module.h>
15 #include "minix.h"
16 #include <linux/buffer_head.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/highuid.h>
20 #include <linux/mpage.h>
21 #include <linux/vfs.h>
22 #include <linux/writeback.h>
23 
24 static int minix_write_inode(struct inode *inode,
25 		struct writeback_control *wbc);
26 static int minix_statfs(struct dentry *dentry, struct kstatfs *buf);
27 static int minix_remount (struct super_block * sb, int * flags, char * data);
28 
29 static void minix_evict_inode(struct inode *inode)
30 {
31 	truncate_inode_pages_final(&inode->i_data);
32 	if (!inode->i_nlink) {
33 		inode->i_size = 0;
34 		minix_truncate(inode);
35 	}
36 	invalidate_inode_buffers(inode);
37 	clear_inode(inode);
38 	if (!inode->i_nlink)
39 		minix_free_inode(inode);
40 }
41 
42 static void minix_put_super(struct super_block *sb)
43 {
44 	int i;
45 	struct minix_sb_info *sbi = minix_sb(sb);
46 
47 	if (!sb_rdonly(sb)) {
48 		if (sbi->s_version != MINIX_V3)	 /* s_state is now out from V3 sb */
49 			sbi->s_ms->s_state = sbi->s_mount_state;
50 		mark_buffer_dirty(sbi->s_sbh);
51 	}
52 	for (i = 0; i < sbi->s_imap_blocks; i++)
53 		brelse(sbi->s_imap[i]);
54 	for (i = 0; i < sbi->s_zmap_blocks; i++)
55 		brelse(sbi->s_zmap[i]);
56 	brelse (sbi->s_sbh);
57 	kfree(sbi->s_imap);
58 	sb->s_fs_info = NULL;
59 	kfree(sbi);
60 }
61 
62 static struct kmem_cache * minix_inode_cachep;
63 
64 static struct inode *minix_alloc_inode(struct super_block *sb)
65 {
66 	struct minix_inode_info *ei;
67 	ei = alloc_inode_sb(sb, minix_inode_cachep, GFP_KERNEL);
68 	if (!ei)
69 		return NULL;
70 	return &ei->vfs_inode;
71 }
72 
73 static void minix_free_in_core_inode(struct inode *inode)
74 {
75 	kmem_cache_free(minix_inode_cachep, minix_i(inode));
76 }
77 
78 static void init_once(void *foo)
79 {
80 	struct minix_inode_info *ei = (struct minix_inode_info *) foo;
81 
82 	inode_init_once(&ei->vfs_inode);
83 }
84 
85 static int __init init_inodecache(void)
86 {
87 	minix_inode_cachep = kmem_cache_create("minix_inode_cache",
88 					     sizeof(struct minix_inode_info),
89 					     0, (SLAB_RECLAIM_ACCOUNT|
90 						SLAB_MEM_SPREAD|SLAB_ACCOUNT),
91 					     init_once);
92 	if (minix_inode_cachep == NULL)
93 		return -ENOMEM;
94 	return 0;
95 }
96 
97 static void destroy_inodecache(void)
98 {
99 	/*
100 	 * Make sure all delayed rcu free inodes are flushed before we
101 	 * destroy cache.
102 	 */
103 	rcu_barrier();
104 	kmem_cache_destroy(minix_inode_cachep);
105 }
106 
107 static const struct super_operations minix_sops = {
108 	.alloc_inode	= minix_alloc_inode,
109 	.free_inode	= minix_free_in_core_inode,
110 	.write_inode	= minix_write_inode,
111 	.evict_inode	= minix_evict_inode,
112 	.put_super	= minix_put_super,
113 	.statfs		= minix_statfs,
114 	.remount_fs	= minix_remount,
115 };
116 
117 static int minix_remount (struct super_block * sb, int * flags, char * data)
118 {
119 	struct minix_sb_info * sbi = minix_sb(sb);
120 	struct minix_super_block * ms;
121 
122 	sync_filesystem(sb);
123 	ms = sbi->s_ms;
124 	if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
125 		return 0;
126 	if (*flags & SB_RDONLY) {
127 		if (ms->s_state & MINIX_VALID_FS ||
128 		    !(sbi->s_mount_state & MINIX_VALID_FS))
129 			return 0;
130 		/* Mounting a rw partition read-only. */
131 		if (sbi->s_version != MINIX_V3)
132 			ms->s_state = sbi->s_mount_state;
133 		mark_buffer_dirty(sbi->s_sbh);
134 	} else {
135 	  	/* Mount a partition which is read-only, read-write. */
136 		if (sbi->s_version != MINIX_V3) {
137 			sbi->s_mount_state = ms->s_state;
138 			ms->s_state &= ~MINIX_VALID_FS;
139 		} else {
140 			sbi->s_mount_state = MINIX_VALID_FS;
141 		}
142 		mark_buffer_dirty(sbi->s_sbh);
143 
144 		if (!(sbi->s_mount_state & MINIX_VALID_FS))
145 			printk("MINIX-fs warning: remounting unchecked fs, "
146 				"running fsck is recommended\n");
147 		else if ((sbi->s_mount_state & MINIX_ERROR_FS))
148 			printk("MINIX-fs warning: remounting fs with errors, "
149 				"running fsck is recommended\n");
150 	}
151 	return 0;
152 }
153 
154 static bool minix_check_superblock(struct super_block *sb)
155 {
156 	struct minix_sb_info *sbi = minix_sb(sb);
157 
158 	if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
159 		return false;
160 
161 	/*
162 	 * s_max_size must not exceed the block mapping limitation.  This check
163 	 * is only needed for V1 filesystems, since V2/V3 support an extra level
164 	 * of indirect blocks which places the limit well above U32_MAX.
165 	 */
166 	if (sbi->s_version == MINIX_V1 &&
167 	    sb->s_maxbytes > (7 + 512 + 512*512) * BLOCK_SIZE)
168 		return false;
169 
170 	return true;
171 }
172 
173 static int minix_fill_super(struct super_block *s, void *data, int silent)
174 {
175 	struct buffer_head *bh;
176 	struct buffer_head **map;
177 	struct minix_super_block *ms;
178 	struct minix3_super_block *m3s = NULL;
179 	unsigned long i, block;
180 	struct inode *root_inode;
181 	struct minix_sb_info *sbi;
182 	int ret = -EINVAL;
183 
184 	sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL);
185 	if (!sbi)
186 		return -ENOMEM;
187 	s->s_fs_info = sbi;
188 
189 	BUILD_BUG_ON(32 != sizeof (struct minix_inode));
190 	BUILD_BUG_ON(64 != sizeof(struct minix2_inode));
191 
192 	if (!sb_set_blocksize(s, BLOCK_SIZE))
193 		goto out_bad_hblock;
194 
195 	if (!(bh = sb_bread(s, 1)))
196 		goto out_bad_sb;
197 
198 	ms = (struct minix_super_block *) bh->b_data;
199 	sbi->s_ms = ms;
200 	sbi->s_sbh = bh;
201 	sbi->s_mount_state = ms->s_state;
202 	sbi->s_ninodes = ms->s_ninodes;
203 	sbi->s_nzones = ms->s_nzones;
204 	sbi->s_imap_blocks = ms->s_imap_blocks;
205 	sbi->s_zmap_blocks = ms->s_zmap_blocks;
206 	sbi->s_firstdatazone = ms->s_firstdatazone;
207 	sbi->s_log_zone_size = ms->s_log_zone_size;
208 	s->s_maxbytes = ms->s_max_size;
209 	s->s_magic = ms->s_magic;
210 	if (s->s_magic == MINIX_SUPER_MAGIC) {
211 		sbi->s_version = MINIX_V1;
212 		sbi->s_dirsize = 16;
213 		sbi->s_namelen = 14;
214 		s->s_max_links = MINIX_LINK_MAX;
215 	} else if (s->s_magic == MINIX_SUPER_MAGIC2) {
216 		sbi->s_version = MINIX_V1;
217 		sbi->s_dirsize = 32;
218 		sbi->s_namelen = 30;
219 		s->s_max_links = MINIX_LINK_MAX;
220 	} else if (s->s_magic == MINIX2_SUPER_MAGIC) {
221 		sbi->s_version = MINIX_V2;
222 		sbi->s_nzones = ms->s_zones;
223 		sbi->s_dirsize = 16;
224 		sbi->s_namelen = 14;
225 		s->s_max_links = MINIX2_LINK_MAX;
226 	} else if (s->s_magic == MINIX2_SUPER_MAGIC2) {
227 		sbi->s_version = MINIX_V2;
228 		sbi->s_nzones = ms->s_zones;
229 		sbi->s_dirsize = 32;
230 		sbi->s_namelen = 30;
231 		s->s_max_links = MINIX2_LINK_MAX;
232 	} else if ( *(__u16 *)(bh->b_data + 24) == MINIX3_SUPER_MAGIC) {
233 		m3s = (struct minix3_super_block *) bh->b_data;
234 		s->s_magic = m3s->s_magic;
235 		sbi->s_imap_blocks = m3s->s_imap_blocks;
236 		sbi->s_zmap_blocks = m3s->s_zmap_blocks;
237 		sbi->s_firstdatazone = m3s->s_firstdatazone;
238 		sbi->s_log_zone_size = m3s->s_log_zone_size;
239 		s->s_maxbytes = m3s->s_max_size;
240 		sbi->s_ninodes = m3s->s_ninodes;
241 		sbi->s_nzones = m3s->s_zones;
242 		sbi->s_dirsize = 64;
243 		sbi->s_namelen = 60;
244 		sbi->s_version = MINIX_V3;
245 		sbi->s_mount_state = MINIX_VALID_FS;
246 		sb_set_blocksize(s, m3s->s_blocksize);
247 		s->s_max_links = MINIX2_LINK_MAX;
248 	} else
249 		goto out_no_fs;
250 
251 	if (!minix_check_superblock(s))
252 		goto out_illegal_sb;
253 
254 	/*
255 	 * Allocate the buffer map to keep the superblock small.
256 	 */
257 	i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
258 	map = kzalloc(i, GFP_KERNEL);
259 	if (!map)
260 		goto out_no_map;
261 	sbi->s_imap = &map[0];
262 	sbi->s_zmap = &map[sbi->s_imap_blocks];
263 
264 	block=2;
265 	for (i=0 ; i < sbi->s_imap_blocks ; i++) {
266 		if (!(sbi->s_imap[i]=sb_bread(s, block)))
267 			goto out_no_bitmap;
268 		block++;
269 	}
270 	for (i=0 ; i < sbi->s_zmap_blocks ; i++) {
271 		if (!(sbi->s_zmap[i]=sb_bread(s, block)))
272 			goto out_no_bitmap;
273 		block++;
274 	}
275 
276 	minix_set_bit(0,sbi->s_imap[0]->b_data);
277 	minix_set_bit(0,sbi->s_zmap[0]->b_data);
278 
279 	/* Apparently minix can create filesystems that allocate more blocks for
280 	 * the bitmaps than needed.  We simply ignore that, but verify it didn't
281 	 * create one with not enough blocks and bail out if so.
282 	 */
283 	block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize);
284 	if (sbi->s_imap_blocks < block) {
285 		printk("MINIX-fs: file system does not have enough "
286 				"imap blocks allocated.  Refusing to mount.\n");
287 		goto out_no_bitmap;
288 	}
289 
290 	block = minix_blocks_needed(
291 			(sbi->s_nzones - sbi->s_firstdatazone + 1),
292 			s->s_blocksize);
293 	if (sbi->s_zmap_blocks < block) {
294 		printk("MINIX-fs: file system does not have enough "
295 				"zmap blocks allocated.  Refusing to mount.\n");
296 		goto out_no_bitmap;
297 	}
298 
299 	/* set up enough so that it can read an inode */
300 	s->s_op = &minix_sops;
301 	s->s_time_min = 0;
302 	s->s_time_max = U32_MAX;
303 	root_inode = minix_iget(s, MINIX_ROOT_INO);
304 	if (IS_ERR(root_inode)) {
305 		ret = PTR_ERR(root_inode);
306 		goto out_no_root;
307 	}
308 
309 	ret = -ENOMEM;
310 	s->s_root = d_make_root(root_inode);
311 	if (!s->s_root)
312 		goto out_no_root;
313 
314 	if (!sb_rdonly(s)) {
315 		if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
316 			ms->s_state &= ~MINIX_VALID_FS;
317 		mark_buffer_dirty(bh);
318 	}
319 	if (!(sbi->s_mount_state & MINIX_VALID_FS))
320 		printk("MINIX-fs: mounting unchecked file system, "
321 			"running fsck is recommended\n");
322 	else if (sbi->s_mount_state & MINIX_ERROR_FS)
323 		printk("MINIX-fs: mounting file system with errors, "
324 			"running fsck is recommended\n");
325 
326 	return 0;
327 
328 out_no_root:
329 	if (!silent)
330 		printk("MINIX-fs: get root inode failed\n");
331 	goto out_freemap;
332 
333 out_no_bitmap:
334 	printk("MINIX-fs: bad superblock or unable to read bitmaps\n");
335 out_freemap:
336 	for (i = 0; i < sbi->s_imap_blocks; i++)
337 		brelse(sbi->s_imap[i]);
338 	for (i = 0; i < sbi->s_zmap_blocks; i++)
339 		brelse(sbi->s_zmap[i]);
340 	kfree(sbi->s_imap);
341 	goto out_release;
342 
343 out_no_map:
344 	ret = -ENOMEM;
345 	if (!silent)
346 		printk("MINIX-fs: can't allocate map\n");
347 	goto out_release;
348 
349 out_illegal_sb:
350 	if (!silent)
351 		printk("MINIX-fs: bad superblock\n");
352 	goto out_release;
353 
354 out_no_fs:
355 	if (!silent)
356 		printk("VFS: Can't find a Minix filesystem V1 | V2 | V3 "
357 		       "on device %s.\n", s->s_id);
358 out_release:
359 	brelse(bh);
360 	goto out;
361 
362 out_bad_hblock:
363 	printk("MINIX-fs: blocksize too small for device\n");
364 	goto out;
365 
366 out_bad_sb:
367 	printk("MINIX-fs: unable to read superblock\n");
368 out:
369 	s->s_fs_info = NULL;
370 	kfree(sbi);
371 	return ret;
372 }
373 
374 static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
375 {
376 	struct super_block *sb = dentry->d_sb;
377 	struct minix_sb_info *sbi = minix_sb(sb);
378 	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
379 	buf->f_type = sb->s_magic;
380 	buf->f_bsize = sb->s_blocksize;
381 	buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
382 	buf->f_bfree = minix_count_free_blocks(sb);
383 	buf->f_bavail = buf->f_bfree;
384 	buf->f_files = sbi->s_ninodes;
385 	buf->f_ffree = minix_count_free_inodes(sb);
386 	buf->f_namelen = sbi->s_namelen;
387 	buf->f_fsid = u64_to_fsid(id);
388 
389 	return 0;
390 }
391 
392 static int minix_get_block(struct inode *inode, sector_t block,
393 		    struct buffer_head *bh_result, int create)
394 {
395 	if (INODE_VERSION(inode) == MINIX_V1)
396 		return V1_minix_get_block(inode, block, bh_result, create);
397 	else
398 		return V2_minix_get_block(inode, block, bh_result, create);
399 }
400 
401 static int minix_writepages(struct address_space *mapping,
402 		struct writeback_control *wbc)
403 {
404 	return mpage_writepages(mapping, wbc, minix_get_block);
405 }
406 
407 static int minix_read_folio(struct file *file, struct folio *folio)
408 {
409 	return block_read_full_folio(folio, minix_get_block);
410 }
411 
412 int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len)
413 {
414 	return __block_write_begin(page, pos, len, minix_get_block);
415 }
416 
417 static void minix_write_failed(struct address_space *mapping, loff_t to)
418 {
419 	struct inode *inode = mapping->host;
420 
421 	if (to > inode->i_size) {
422 		truncate_pagecache(inode, inode->i_size);
423 		minix_truncate(inode);
424 	}
425 }
426 
427 static int minix_write_begin(struct file *file, struct address_space *mapping,
428 			loff_t pos, unsigned len,
429 			struct page **pagep, void **fsdata)
430 {
431 	int ret;
432 
433 	ret = block_write_begin(mapping, pos, len, pagep, minix_get_block);
434 	if (unlikely(ret))
435 		minix_write_failed(mapping, pos + len);
436 
437 	return ret;
438 }
439 
440 static sector_t minix_bmap(struct address_space *mapping, sector_t block)
441 {
442 	return generic_block_bmap(mapping,block,minix_get_block);
443 }
444 
445 static const struct address_space_operations minix_aops = {
446 	.dirty_folio	= block_dirty_folio,
447 	.invalidate_folio = block_invalidate_folio,
448 	.read_folio = minix_read_folio,
449 	.writepages = minix_writepages,
450 	.write_begin = minix_write_begin,
451 	.write_end = generic_write_end,
452 	.migrate_folio = buffer_migrate_folio,
453 	.bmap = minix_bmap,
454 	.direct_IO = noop_direct_IO
455 };
456 
457 static const struct inode_operations minix_symlink_inode_operations = {
458 	.get_link	= page_get_link,
459 	.getattr	= minix_getattr,
460 };
461 
462 void minix_set_inode(struct inode *inode, dev_t rdev)
463 {
464 	if (S_ISREG(inode->i_mode)) {
465 		inode->i_op = &minix_file_inode_operations;
466 		inode->i_fop = &minix_file_operations;
467 		inode->i_mapping->a_ops = &minix_aops;
468 	} else if (S_ISDIR(inode->i_mode)) {
469 		inode->i_op = &minix_dir_inode_operations;
470 		inode->i_fop = &minix_dir_operations;
471 		inode->i_mapping->a_ops = &minix_aops;
472 	} else if (S_ISLNK(inode->i_mode)) {
473 		inode->i_op = &minix_symlink_inode_operations;
474 		inode_nohighmem(inode);
475 		inode->i_mapping->a_ops = &minix_aops;
476 	} else
477 		init_special_inode(inode, inode->i_mode, rdev);
478 }
479 
480 /*
481  * The minix V1 function to read an inode.
482  */
483 static struct inode *V1_minix_iget(struct inode *inode)
484 {
485 	struct buffer_head * bh;
486 	struct minix_inode * raw_inode;
487 	struct minix_inode_info *minix_inode = minix_i(inode);
488 	int i;
489 
490 	raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh);
491 	if (!raw_inode) {
492 		iget_failed(inode);
493 		return ERR_PTR(-EIO);
494 	}
495 	if (raw_inode->i_nlinks == 0) {
496 		printk("MINIX-fs: deleted inode referenced: %lu\n",
497 		       inode->i_ino);
498 		brelse(bh);
499 		iget_failed(inode);
500 		return ERR_PTR(-ESTALE);
501 	}
502 	inode->i_mode = raw_inode->i_mode;
503 	i_uid_write(inode, raw_inode->i_uid);
504 	i_gid_write(inode, raw_inode->i_gid);
505 	set_nlink(inode, raw_inode->i_nlinks);
506 	inode->i_size = raw_inode->i_size;
507 	inode_set_mtime_to_ts(inode,
508 			      inode_set_atime_to_ts(inode, inode_set_ctime(inode, raw_inode->i_time, 0)));
509 	inode->i_blocks = 0;
510 	for (i = 0; i < 9; i++)
511 		minix_inode->u.i1_data[i] = raw_inode->i_zone[i];
512 	minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
513 	brelse(bh);
514 	unlock_new_inode(inode);
515 	return inode;
516 }
517 
518 /*
519  * The minix V2 function to read an inode.
520  */
521 static struct inode *V2_minix_iget(struct inode *inode)
522 {
523 	struct buffer_head * bh;
524 	struct minix2_inode * raw_inode;
525 	struct minix_inode_info *minix_inode = minix_i(inode);
526 	int i;
527 
528 	raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh);
529 	if (!raw_inode) {
530 		iget_failed(inode);
531 		return ERR_PTR(-EIO);
532 	}
533 	if (raw_inode->i_nlinks == 0) {
534 		printk("MINIX-fs: deleted inode referenced: %lu\n",
535 		       inode->i_ino);
536 		brelse(bh);
537 		iget_failed(inode);
538 		return ERR_PTR(-ESTALE);
539 	}
540 	inode->i_mode = raw_inode->i_mode;
541 	i_uid_write(inode, raw_inode->i_uid);
542 	i_gid_write(inode, raw_inode->i_gid);
543 	set_nlink(inode, raw_inode->i_nlinks);
544 	inode->i_size = raw_inode->i_size;
545 	inode_set_mtime(inode, raw_inode->i_mtime, 0);
546 	inode_set_atime(inode, raw_inode->i_atime, 0);
547 	inode_set_ctime(inode, raw_inode->i_ctime, 0);
548 	inode->i_blocks = 0;
549 	for (i = 0; i < 10; i++)
550 		minix_inode->u.i2_data[i] = raw_inode->i_zone[i];
551 	minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
552 	brelse(bh);
553 	unlock_new_inode(inode);
554 	return inode;
555 }
556 
557 /*
558  * The global function to read an inode.
559  */
560 struct inode *minix_iget(struct super_block *sb, unsigned long ino)
561 {
562 	struct inode *inode;
563 
564 	inode = iget_locked(sb, ino);
565 	if (!inode)
566 		return ERR_PTR(-ENOMEM);
567 	if (!(inode->i_state & I_NEW))
568 		return inode;
569 
570 	if (INODE_VERSION(inode) == MINIX_V1)
571 		return V1_minix_iget(inode);
572 	else
573 		return V2_minix_iget(inode);
574 }
575 
576 /*
577  * The minix V1 function to synchronize an inode.
578  */
579 static struct buffer_head * V1_minix_update_inode(struct inode * inode)
580 {
581 	struct buffer_head * bh;
582 	struct minix_inode * raw_inode;
583 	struct minix_inode_info *minix_inode = minix_i(inode);
584 	int i;
585 
586 	raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh);
587 	if (!raw_inode)
588 		return NULL;
589 	raw_inode->i_mode = inode->i_mode;
590 	raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode));
591 	raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode));
592 	raw_inode->i_nlinks = inode->i_nlink;
593 	raw_inode->i_size = inode->i_size;
594 	raw_inode->i_time = inode_get_mtime_sec(inode);
595 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
596 		raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
597 	else for (i = 0; i < 9; i++)
598 		raw_inode->i_zone[i] = minix_inode->u.i1_data[i];
599 	mark_buffer_dirty(bh);
600 	return bh;
601 }
602 
603 /*
604  * The minix V2 function to synchronize an inode.
605  */
606 static struct buffer_head * V2_minix_update_inode(struct inode * inode)
607 {
608 	struct buffer_head * bh;
609 	struct minix2_inode * raw_inode;
610 	struct minix_inode_info *minix_inode = minix_i(inode);
611 	int i;
612 
613 	raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh);
614 	if (!raw_inode)
615 		return NULL;
616 	raw_inode->i_mode = inode->i_mode;
617 	raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode));
618 	raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode));
619 	raw_inode->i_nlinks = inode->i_nlink;
620 	raw_inode->i_size = inode->i_size;
621 	raw_inode->i_mtime = inode_get_mtime_sec(inode);
622 	raw_inode->i_atime = inode_get_atime_sec(inode);
623 	raw_inode->i_ctime = inode_get_ctime_sec(inode);
624 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
625 		raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
626 	else for (i = 0; i < 10; i++)
627 		raw_inode->i_zone[i] = minix_inode->u.i2_data[i];
628 	mark_buffer_dirty(bh);
629 	return bh;
630 }
631 
632 static int minix_write_inode(struct inode *inode, struct writeback_control *wbc)
633 {
634 	int err = 0;
635 	struct buffer_head *bh;
636 
637 	if (INODE_VERSION(inode) == MINIX_V1)
638 		bh = V1_minix_update_inode(inode);
639 	else
640 		bh = V2_minix_update_inode(inode);
641 	if (!bh)
642 		return -EIO;
643 	if (wbc->sync_mode == WB_SYNC_ALL && buffer_dirty(bh)) {
644 		sync_dirty_buffer(bh);
645 		if (buffer_req(bh) && !buffer_uptodate(bh)) {
646 			printk("IO error syncing minix inode [%s:%08lx]\n",
647 				inode->i_sb->s_id, inode->i_ino);
648 			err = -EIO;
649 		}
650 	}
651 	brelse (bh);
652 	return err;
653 }
654 
655 int minix_getattr(struct mnt_idmap *idmap, const struct path *path,
656 		  struct kstat *stat, u32 request_mask, unsigned int flags)
657 {
658 	struct super_block *sb = path->dentry->d_sb;
659 	struct inode *inode = d_inode(path->dentry);
660 
661 	generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
662 	if (INODE_VERSION(inode) == MINIX_V1)
663 		stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb);
664 	else
665 		stat->blocks = (sb->s_blocksize / 512) * V2_minix_blocks(stat->size, sb);
666 	stat->blksize = sb->s_blocksize;
667 	return 0;
668 }
669 
670 /*
671  * The function that is called for file truncation.
672  */
673 void minix_truncate(struct inode * inode)
674 {
675 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
676 		return;
677 	if (INODE_VERSION(inode) == MINIX_V1)
678 		V1_minix_truncate(inode);
679 	else
680 		V2_minix_truncate(inode);
681 }
682 
683 static struct dentry *minix_mount(struct file_system_type *fs_type,
684 	int flags, const char *dev_name, void *data)
685 {
686 	return mount_bdev(fs_type, flags, dev_name, data, minix_fill_super);
687 }
688 
689 static struct file_system_type minix_fs_type = {
690 	.owner		= THIS_MODULE,
691 	.name		= "minix",
692 	.mount		= minix_mount,
693 	.kill_sb	= kill_block_super,
694 	.fs_flags	= FS_REQUIRES_DEV,
695 };
696 MODULE_ALIAS_FS("minix");
697 
698 static int __init init_minix_fs(void)
699 {
700 	int err = init_inodecache();
701 	if (err)
702 		goto out1;
703 	err = register_filesystem(&minix_fs_type);
704 	if (err)
705 		goto out;
706 	return 0;
707 out:
708 	destroy_inodecache();
709 out1:
710 	return err;
711 }
712 
713 static void __exit exit_minix_fs(void)
714 {
715         unregister_filesystem(&minix_fs_type);
716 	destroy_inodecache();
717 }
718 
719 module_init(init_minix_fs)
720 module_exit(exit_minix_fs)
721 MODULE_LICENSE("GPL");
722 
723