1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/minix/inode.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * Copyright (C) 1996 Gertjan van Wingerde
8 * Minix V2 fs support.
9 *
10 * Modified for 680x0 by Andreas Schwab
11 * Updated to filesystem version 3 by Daniel Aragones
12 */
13
14 #include <linux/module.h>
15 #include "minix.h"
16 #include <linux/buffer_head.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/highuid.h>
20 #include <linux/mpage.h>
21 #include <linux/vfs.h>
22 #include <linux/writeback.h>
23 #include <linux/fs_context.h>
24
25 static int minix_write_inode(struct inode *inode,
26 struct writeback_control *wbc);
27 static int minix_statfs(struct dentry *dentry, struct kstatfs *buf);
28
__minix_error_inode(struct inode * inode,const char * function,unsigned int line,const char * fmt,...)29 void __minix_error_inode(struct inode *inode, const char *function,
30 unsigned int line, const char *fmt, ...)
31 {
32 struct va_format vaf;
33 va_list args;
34
35 va_start(args, fmt);
36 vaf.fmt = fmt;
37 vaf.va = &args;
38 printk(KERN_CRIT "minix-fs error (device %s): %s:%d: "
39 "inode #%lu: comm %s: %pV\n",
40 inode->i_sb->s_id, function, line, inode->i_ino,
41 current->comm, &vaf);
42 va_end(args);
43 }
44
minix_evict_inode(struct inode * inode)45 static void minix_evict_inode(struct inode *inode)
46 {
47 truncate_inode_pages_final(&inode->i_data);
48 if (!inode->i_nlink) {
49 inode->i_size = 0;
50 minix_truncate(inode);
51 }
52 invalidate_inode_buffers(inode);
53 clear_inode(inode);
54 if (!inode->i_nlink)
55 minix_free_inode(inode);
56 }
57
minix_put_super(struct super_block * sb)58 static void minix_put_super(struct super_block *sb)
59 {
60 int i;
61 struct minix_sb_info *sbi = minix_sb(sb);
62
63 if (!sb_rdonly(sb)) {
64 if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
65 sbi->s_ms->s_state = sbi->s_mount_state;
66 mark_buffer_dirty(sbi->s_sbh);
67 }
68 for (i = 0; i < sbi->s_imap_blocks; i++)
69 brelse(sbi->s_imap[i]);
70 for (i = 0; i < sbi->s_zmap_blocks; i++)
71 brelse(sbi->s_zmap[i]);
72 brelse (sbi->s_sbh);
73 kfree(sbi->s_imap);
74 sb->s_fs_info = NULL;
75 kfree(sbi);
76 }
77
78 static struct kmem_cache * minix_inode_cachep;
79
minix_alloc_inode(struct super_block * sb)80 static struct inode *minix_alloc_inode(struct super_block *sb)
81 {
82 struct minix_inode_info *ei;
83 ei = alloc_inode_sb(sb, minix_inode_cachep, GFP_KERNEL);
84 if (!ei)
85 return NULL;
86 return &ei->vfs_inode;
87 }
88
minix_free_in_core_inode(struct inode * inode)89 static void minix_free_in_core_inode(struct inode *inode)
90 {
91 kmem_cache_free(minix_inode_cachep, minix_i(inode));
92 }
93
init_once(void * foo)94 static void init_once(void *foo)
95 {
96 struct minix_inode_info *ei = (struct minix_inode_info *) foo;
97
98 inode_init_once(&ei->vfs_inode);
99 }
100
init_inodecache(void)101 static int __init init_inodecache(void)
102 {
103 minix_inode_cachep = kmem_cache_create("minix_inode_cache",
104 sizeof(struct minix_inode_info),
105 0, (SLAB_RECLAIM_ACCOUNT|
106 SLAB_ACCOUNT),
107 init_once);
108 if (minix_inode_cachep == NULL)
109 return -ENOMEM;
110 return 0;
111 }
112
destroy_inodecache(void)113 static void destroy_inodecache(void)
114 {
115 /*
116 * Make sure all delayed rcu free inodes are flushed before we
117 * destroy cache.
118 */
119 rcu_barrier();
120 kmem_cache_destroy(minix_inode_cachep);
121 }
122
123 static const struct super_operations minix_sops = {
124 .alloc_inode = minix_alloc_inode,
125 .free_inode = minix_free_in_core_inode,
126 .write_inode = minix_write_inode,
127 .evict_inode = minix_evict_inode,
128 .put_super = minix_put_super,
129 .statfs = minix_statfs,
130 };
131
minix_reconfigure(struct fs_context * fc)132 static int minix_reconfigure(struct fs_context *fc)
133 {
134 struct minix_super_block * ms;
135 struct super_block *sb = fc->root->d_sb;
136 struct minix_sb_info * sbi = sb->s_fs_info;
137
138 sync_filesystem(sb);
139 ms = sbi->s_ms;
140 if ((bool)(fc->sb_flags & SB_RDONLY) == sb_rdonly(sb))
141 return 0;
142 if (fc->sb_flags & SB_RDONLY) {
143 if (ms->s_state & MINIX_VALID_FS ||
144 !(sbi->s_mount_state & MINIX_VALID_FS))
145 return 0;
146 /* Mounting a rw partition read-only. */
147 if (sbi->s_version != MINIX_V3)
148 ms->s_state = sbi->s_mount_state;
149 mark_buffer_dirty(sbi->s_sbh);
150 } else {
151 /* Mount a partition which is read-only, read-write. */
152 if (sbi->s_version != MINIX_V3) {
153 sbi->s_mount_state = ms->s_state;
154 ms->s_state &= ~MINIX_VALID_FS;
155 } else {
156 sbi->s_mount_state = MINIX_VALID_FS;
157 }
158 mark_buffer_dirty(sbi->s_sbh);
159
160 if (!(sbi->s_mount_state & MINIX_VALID_FS))
161 printk("MINIX-fs warning: remounting unchecked fs, "
162 "running fsck is recommended\n");
163 else if ((sbi->s_mount_state & MINIX_ERROR_FS))
164 printk("MINIX-fs warning: remounting fs with errors, "
165 "running fsck is recommended\n");
166 }
167 return 0;
168 }
169
minix_check_superblock(struct super_block * sb)170 static bool minix_check_superblock(struct super_block *sb)
171 {
172 struct minix_sb_info *sbi = minix_sb(sb);
173
174 if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
175 return false;
176
177 /*
178 * s_max_size must not exceed the block mapping limitation. This check
179 * is only needed for V1 filesystems, since V2/V3 support an extra level
180 * of indirect blocks which places the limit well above U32_MAX.
181 */
182 if (sbi->s_version == MINIX_V1 &&
183 sb->s_maxbytes > (7 + 512 + 512*512) * BLOCK_SIZE)
184 return false;
185
186 return true;
187 }
188
minix_fill_super(struct super_block * s,struct fs_context * fc)189 static int minix_fill_super(struct super_block *s, struct fs_context *fc)
190 {
191 struct buffer_head *bh;
192 struct buffer_head **map;
193 struct minix_super_block *ms;
194 struct minix3_super_block *m3s = NULL;
195 unsigned long i, block;
196 struct inode *root_inode;
197 struct minix_sb_info *sbi;
198 int ret = -EINVAL;
199 int silent = fc->sb_flags & SB_SILENT;
200
201 sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL);
202 if (!sbi)
203 return -ENOMEM;
204 s->s_fs_info = sbi;
205
206 BUILD_BUG_ON(32 != sizeof (struct minix_inode));
207 BUILD_BUG_ON(64 != sizeof(struct minix2_inode));
208
209 if (!sb_set_blocksize(s, BLOCK_SIZE))
210 goto out_bad_hblock;
211
212 if (!(bh = sb_bread(s, 1)))
213 goto out_bad_sb;
214
215 ms = (struct minix_super_block *) bh->b_data;
216 sbi->s_ms = ms;
217 sbi->s_sbh = bh;
218 sbi->s_mount_state = ms->s_state;
219 sbi->s_ninodes = ms->s_ninodes;
220 sbi->s_nzones = ms->s_nzones;
221 sbi->s_imap_blocks = ms->s_imap_blocks;
222 sbi->s_zmap_blocks = ms->s_zmap_blocks;
223 sbi->s_firstdatazone = ms->s_firstdatazone;
224 sbi->s_log_zone_size = ms->s_log_zone_size;
225 s->s_maxbytes = ms->s_max_size;
226 s->s_magic = ms->s_magic;
227 if (s->s_magic == MINIX_SUPER_MAGIC) {
228 sbi->s_version = MINIX_V1;
229 sbi->s_dirsize = 16;
230 sbi->s_namelen = 14;
231 s->s_max_links = MINIX_LINK_MAX;
232 } else if (s->s_magic == MINIX_SUPER_MAGIC2) {
233 sbi->s_version = MINIX_V1;
234 sbi->s_dirsize = 32;
235 sbi->s_namelen = 30;
236 s->s_max_links = MINIX_LINK_MAX;
237 } else if (s->s_magic == MINIX2_SUPER_MAGIC) {
238 sbi->s_version = MINIX_V2;
239 sbi->s_nzones = ms->s_zones;
240 sbi->s_dirsize = 16;
241 sbi->s_namelen = 14;
242 s->s_max_links = MINIX2_LINK_MAX;
243 } else if (s->s_magic == MINIX2_SUPER_MAGIC2) {
244 sbi->s_version = MINIX_V2;
245 sbi->s_nzones = ms->s_zones;
246 sbi->s_dirsize = 32;
247 sbi->s_namelen = 30;
248 s->s_max_links = MINIX2_LINK_MAX;
249 } else if ( *(__u16 *)(bh->b_data + 24) == MINIX3_SUPER_MAGIC) {
250 m3s = (struct minix3_super_block *) bh->b_data;
251 s->s_magic = m3s->s_magic;
252 sbi->s_imap_blocks = m3s->s_imap_blocks;
253 sbi->s_zmap_blocks = m3s->s_zmap_blocks;
254 sbi->s_firstdatazone = m3s->s_firstdatazone;
255 sbi->s_log_zone_size = m3s->s_log_zone_size;
256 s->s_maxbytes = m3s->s_max_size;
257 sbi->s_ninodes = m3s->s_ninodes;
258 sbi->s_nzones = m3s->s_zones;
259 sbi->s_dirsize = 64;
260 sbi->s_namelen = 60;
261 sbi->s_version = MINIX_V3;
262 sbi->s_mount_state = MINIX_VALID_FS;
263 sb_set_blocksize(s, m3s->s_blocksize);
264 s->s_max_links = MINIX2_LINK_MAX;
265 } else
266 goto out_no_fs;
267
268 if (!minix_check_superblock(s))
269 goto out_illegal_sb;
270
271 /*
272 * Allocate the buffer map to keep the superblock small.
273 */
274 i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
275 map = kzalloc(i, GFP_KERNEL);
276 if (!map)
277 goto out_no_map;
278 sbi->s_imap = &map[0];
279 sbi->s_zmap = &map[sbi->s_imap_blocks];
280
281 block=2;
282 for (i=0 ; i < sbi->s_imap_blocks ; i++) {
283 if (!(sbi->s_imap[i]=sb_bread(s, block)))
284 goto out_no_bitmap;
285 block++;
286 }
287 for (i=0 ; i < sbi->s_zmap_blocks ; i++) {
288 if (!(sbi->s_zmap[i]=sb_bread(s, block)))
289 goto out_no_bitmap;
290 block++;
291 }
292
293 minix_set_bit(0,sbi->s_imap[0]->b_data);
294 minix_set_bit(0,sbi->s_zmap[0]->b_data);
295
296 /* Apparently minix can create filesystems that allocate more blocks for
297 * the bitmaps than needed. We simply ignore that, but verify it didn't
298 * create one with not enough blocks and bail out if so.
299 */
300 block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize);
301 if (sbi->s_imap_blocks < block) {
302 printk("MINIX-fs: file system does not have enough "
303 "imap blocks allocated. Refusing to mount.\n");
304 goto out_no_bitmap;
305 }
306
307 block = minix_blocks_needed(
308 (sbi->s_nzones - sbi->s_firstdatazone + 1),
309 s->s_blocksize);
310 if (sbi->s_zmap_blocks < block) {
311 printk("MINIX-fs: file system does not have enough "
312 "zmap blocks allocated. Refusing to mount.\n");
313 goto out_no_bitmap;
314 }
315
316 /* set up enough so that it can read an inode */
317 s->s_op = &minix_sops;
318 s->s_time_min = 0;
319 s->s_time_max = U32_MAX;
320 root_inode = minix_iget(s, MINIX_ROOT_INO);
321 if (IS_ERR(root_inode)) {
322 ret = PTR_ERR(root_inode);
323 goto out_no_root;
324 }
325
326 ret = -ENOMEM;
327 s->s_root = d_make_root(root_inode);
328 if (!s->s_root)
329 goto out_no_root;
330
331 if (!sb_rdonly(s)) {
332 if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
333 ms->s_state &= ~MINIX_VALID_FS;
334 mark_buffer_dirty(bh);
335 }
336 if (!(sbi->s_mount_state & MINIX_VALID_FS))
337 printk("MINIX-fs: mounting unchecked file system, "
338 "running fsck is recommended\n");
339 else if (sbi->s_mount_state & MINIX_ERROR_FS)
340 printk("MINIX-fs: mounting file system with errors, "
341 "running fsck is recommended\n");
342
343 return 0;
344
345 out_no_root:
346 if (!silent)
347 printk("MINIX-fs: get root inode failed\n");
348 goto out_freemap;
349
350 out_no_bitmap:
351 printk("MINIX-fs: bad superblock or unable to read bitmaps\n");
352 out_freemap:
353 for (i = 0; i < sbi->s_imap_blocks; i++)
354 brelse(sbi->s_imap[i]);
355 for (i = 0; i < sbi->s_zmap_blocks; i++)
356 brelse(sbi->s_zmap[i]);
357 kfree(sbi->s_imap);
358 goto out_release;
359
360 out_no_map:
361 ret = -ENOMEM;
362 if (!silent)
363 printk("MINIX-fs: can't allocate map\n");
364 goto out_release;
365
366 out_illegal_sb:
367 if (!silent)
368 printk("MINIX-fs: bad superblock\n");
369 goto out_release;
370
371 out_no_fs:
372 if (!silent)
373 printk("VFS: Can't find a Minix filesystem V1 | V2 | V3 "
374 "on device %s.\n", s->s_id);
375 out_release:
376 brelse(bh);
377 goto out;
378
379 out_bad_hblock:
380 printk("MINIX-fs: blocksize too small for device\n");
381 goto out;
382
383 out_bad_sb:
384 printk("MINIX-fs: unable to read superblock\n");
385 out:
386 s->s_fs_info = NULL;
387 kfree(sbi);
388 return ret;
389 }
390
minix_get_tree(struct fs_context * fc)391 static int minix_get_tree(struct fs_context *fc)
392 {
393 return get_tree_bdev(fc, minix_fill_super);
394 }
395
396 static const struct fs_context_operations minix_context_ops = {
397 .get_tree = minix_get_tree,
398 .reconfigure = minix_reconfigure,
399 };
400
minix_init_fs_context(struct fs_context * fc)401 static int minix_init_fs_context(struct fs_context *fc)
402 {
403 fc->ops = &minix_context_ops;
404
405 return 0;
406 }
407
minix_statfs(struct dentry * dentry,struct kstatfs * buf)408 static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
409 {
410 struct super_block *sb = dentry->d_sb;
411 struct minix_sb_info *sbi = minix_sb(sb);
412 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
413 buf->f_type = sb->s_magic;
414 buf->f_bsize = sb->s_blocksize;
415 buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
416 buf->f_bfree = minix_count_free_blocks(sb);
417 buf->f_bavail = buf->f_bfree;
418 buf->f_files = sbi->s_ninodes;
419 buf->f_ffree = minix_count_free_inodes(sb);
420 buf->f_namelen = sbi->s_namelen;
421 buf->f_fsid = u64_to_fsid(id);
422
423 return 0;
424 }
425
minix_get_block(struct inode * inode,sector_t block,struct buffer_head * bh_result,int create)426 static int minix_get_block(struct inode *inode, sector_t block,
427 struct buffer_head *bh_result, int create)
428 {
429 if (INODE_VERSION(inode) == MINIX_V1)
430 return V1_minix_get_block(inode, block, bh_result, create);
431 else
432 return V2_minix_get_block(inode, block, bh_result, create);
433 }
434
minix_writepages(struct address_space * mapping,struct writeback_control * wbc)435 static int minix_writepages(struct address_space *mapping,
436 struct writeback_control *wbc)
437 {
438 return mpage_writepages(mapping, wbc, minix_get_block);
439 }
440
minix_read_folio(struct file * file,struct folio * folio)441 static int minix_read_folio(struct file *file, struct folio *folio)
442 {
443 return block_read_full_folio(folio, minix_get_block);
444 }
445
minix_prepare_chunk(struct folio * folio,loff_t pos,unsigned len)446 int minix_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
447 {
448 return __block_write_begin(folio, pos, len, minix_get_block);
449 }
450
minix_write_failed(struct address_space * mapping,loff_t to)451 static void minix_write_failed(struct address_space *mapping, loff_t to)
452 {
453 struct inode *inode = mapping->host;
454
455 if (to > inode->i_size) {
456 truncate_pagecache(inode, inode->i_size);
457 minix_truncate(inode);
458 }
459 }
460
minix_write_begin(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)461 static int minix_write_begin(const struct kiocb *iocb,
462 struct address_space *mapping,
463 loff_t pos, unsigned len,
464 struct folio **foliop, void **fsdata)
465 {
466 int ret;
467
468 ret = block_write_begin(mapping, pos, len, foliop, minix_get_block);
469 if (unlikely(ret))
470 minix_write_failed(mapping, pos + len);
471
472 return ret;
473 }
474
minix_bmap(struct address_space * mapping,sector_t block)475 static sector_t minix_bmap(struct address_space *mapping, sector_t block)
476 {
477 return generic_block_bmap(mapping,block,minix_get_block);
478 }
479
480 static const struct address_space_operations minix_aops = {
481 .dirty_folio = block_dirty_folio,
482 .invalidate_folio = block_invalidate_folio,
483 .read_folio = minix_read_folio,
484 .writepages = minix_writepages,
485 .write_begin = minix_write_begin,
486 .write_end = generic_write_end,
487 .migrate_folio = buffer_migrate_folio,
488 .bmap = minix_bmap,
489 .direct_IO = noop_direct_IO
490 };
491
492 static const struct inode_operations minix_symlink_inode_operations = {
493 .get_link = page_get_link,
494 .getattr = minix_getattr,
495 };
496
minix_set_inode(struct inode * inode,dev_t rdev)497 void minix_set_inode(struct inode *inode, dev_t rdev)
498 {
499 if (S_ISREG(inode->i_mode)) {
500 inode->i_op = &minix_file_inode_operations;
501 inode->i_fop = &minix_file_operations;
502 inode->i_mapping->a_ops = &minix_aops;
503 } else if (S_ISDIR(inode->i_mode)) {
504 inode->i_op = &minix_dir_inode_operations;
505 inode->i_fop = &minix_dir_operations;
506 inode->i_mapping->a_ops = &minix_aops;
507 } else if (S_ISLNK(inode->i_mode)) {
508 inode->i_op = &minix_symlink_inode_operations;
509 inode_nohighmem(inode);
510 inode->i_mapping->a_ops = &minix_aops;
511 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
512 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
513 init_special_inode(inode, inode->i_mode, rdev);
514 } else {
515 printk(KERN_DEBUG "MINIX-fs: Invalid file type 0%04o for inode %lu.\n",
516 inode->i_mode, inode->i_ino);
517 make_bad_inode(inode);
518 }
519 }
520
521 /*
522 * The minix V1 function to read an inode.
523 */
V1_minix_iget(struct inode * inode)524 static struct inode *V1_minix_iget(struct inode *inode)
525 {
526 struct buffer_head * bh;
527 struct minix_inode * raw_inode;
528 struct minix_inode_info *minix_inode = minix_i(inode);
529 int i;
530
531 raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh);
532 if (!raw_inode) {
533 iget_failed(inode);
534 return ERR_PTR(-EIO);
535 }
536 if (raw_inode->i_nlinks == 0) {
537 printk("MINIX-fs: deleted inode referenced: %lu\n",
538 inode->i_ino);
539 brelse(bh);
540 iget_failed(inode);
541 return ERR_PTR(-ESTALE);
542 }
543 inode->i_mode = raw_inode->i_mode;
544 i_uid_write(inode, raw_inode->i_uid);
545 i_gid_write(inode, raw_inode->i_gid);
546 set_nlink(inode, raw_inode->i_nlinks);
547 inode->i_size = raw_inode->i_size;
548 inode_set_mtime_to_ts(inode,
549 inode_set_atime_to_ts(inode, inode_set_ctime(inode, raw_inode->i_time, 0)));
550 inode->i_blocks = 0;
551 for (i = 0; i < 9; i++)
552 minix_inode->u.i1_data[i] = raw_inode->i_zone[i];
553 minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
554 brelse(bh);
555 unlock_new_inode(inode);
556 return inode;
557 }
558
559 /*
560 * The minix V2 function to read an inode.
561 */
V2_minix_iget(struct inode * inode)562 static struct inode *V2_minix_iget(struct inode *inode)
563 {
564 struct buffer_head * bh;
565 struct minix2_inode * raw_inode;
566 struct minix_inode_info *minix_inode = minix_i(inode);
567 int i;
568
569 raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh);
570 if (!raw_inode) {
571 iget_failed(inode);
572 return ERR_PTR(-EIO);
573 }
574 if (raw_inode->i_nlinks == 0) {
575 printk("MINIX-fs: deleted inode referenced: %lu\n",
576 inode->i_ino);
577 brelse(bh);
578 iget_failed(inode);
579 return ERR_PTR(-ESTALE);
580 }
581 inode->i_mode = raw_inode->i_mode;
582 i_uid_write(inode, raw_inode->i_uid);
583 i_gid_write(inode, raw_inode->i_gid);
584 set_nlink(inode, raw_inode->i_nlinks);
585 inode->i_size = raw_inode->i_size;
586 inode_set_mtime(inode, raw_inode->i_mtime, 0);
587 inode_set_atime(inode, raw_inode->i_atime, 0);
588 inode_set_ctime(inode, raw_inode->i_ctime, 0);
589 inode->i_blocks = 0;
590 for (i = 0; i < 10; i++)
591 minix_inode->u.i2_data[i] = raw_inode->i_zone[i];
592 minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
593 brelse(bh);
594 unlock_new_inode(inode);
595 return inode;
596 }
597
598 /*
599 * The global function to read an inode.
600 */
minix_iget(struct super_block * sb,unsigned long ino)601 struct inode *minix_iget(struct super_block *sb, unsigned long ino)
602 {
603 struct inode *inode;
604
605 inode = iget_locked(sb, ino);
606 if (!inode)
607 return ERR_PTR(-ENOMEM);
608 if (!(inode_state_read_once(inode) & I_NEW))
609 return inode;
610
611 if (INODE_VERSION(inode) == MINIX_V1)
612 return V1_minix_iget(inode);
613 else
614 return V2_minix_iget(inode);
615 }
616
617 /*
618 * The minix V1 function to synchronize an inode.
619 */
V1_minix_update_inode(struct inode * inode)620 static struct buffer_head * V1_minix_update_inode(struct inode * inode)
621 {
622 struct buffer_head * bh;
623 struct minix_inode * raw_inode;
624 struct minix_inode_info *minix_inode = minix_i(inode);
625 int i;
626
627 raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh);
628 if (!raw_inode)
629 return NULL;
630 raw_inode->i_mode = inode->i_mode;
631 raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode));
632 raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode));
633 raw_inode->i_nlinks = inode->i_nlink;
634 raw_inode->i_size = inode->i_size;
635 raw_inode->i_time = inode_get_mtime_sec(inode);
636 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
637 raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
638 else for (i = 0; i < 9; i++)
639 raw_inode->i_zone[i] = minix_inode->u.i1_data[i];
640 mark_buffer_dirty(bh);
641 return bh;
642 }
643
644 /*
645 * The minix V2 function to synchronize an inode.
646 */
V2_minix_update_inode(struct inode * inode)647 static struct buffer_head * V2_minix_update_inode(struct inode * inode)
648 {
649 struct buffer_head * bh;
650 struct minix2_inode * raw_inode;
651 struct minix_inode_info *minix_inode = minix_i(inode);
652 int i;
653
654 raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh);
655 if (!raw_inode)
656 return NULL;
657 raw_inode->i_mode = inode->i_mode;
658 raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode));
659 raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode));
660 raw_inode->i_nlinks = inode->i_nlink;
661 raw_inode->i_size = inode->i_size;
662 raw_inode->i_mtime = inode_get_mtime_sec(inode);
663 raw_inode->i_atime = inode_get_atime_sec(inode);
664 raw_inode->i_ctime = inode_get_ctime_sec(inode);
665 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
666 raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
667 else for (i = 0; i < 10; i++)
668 raw_inode->i_zone[i] = minix_inode->u.i2_data[i];
669 mark_buffer_dirty(bh);
670 return bh;
671 }
672
minix_write_inode(struct inode * inode,struct writeback_control * wbc)673 static int minix_write_inode(struct inode *inode, struct writeback_control *wbc)
674 {
675 int err = 0;
676 struct buffer_head *bh;
677
678 if (INODE_VERSION(inode) == MINIX_V1)
679 bh = V1_minix_update_inode(inode);
680 else
681 bh = V2_minix_update_inode(inode);
682 if (!bh)
683 return -EIO;
684 if (wbc->sync_mode == WB_SYNC_ALL && buffer_dirty(bh)) {
685 sync_dirty_buffer(bh);
686 if (buffer_req(bh) && !buffer_uptodate(bh)) {
687 printk("IO error syncing minix inode [%s:%08lx]\n",
688 inode->i_sb->s_id, inode->i_ino);
689 err = -EIO;
690 }
691 }
692 brelse (bh);
693 return err;
694 }
695
minix_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)696 int minix_getattr(struct mnt_idmap *idmap, const struct path *path,
697 struct kstat *stat, u32 request_mask, unsigned int flags)
698 {
699 struct super_block *sb = path->dentry->d_sb;
700 struct inode *inode = d_inode(path->dentry);
701
702 generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
703 if (INODE_VERSION(inode) == MINIX_V1)
704 stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb);
705 else
706 stat->blocks = (sb->s_blocksize / 512) * V2_minix_blocks(stat->size, sb);
707 stat->blksize = sb->s_blocksize;
708 return 0;
709 }
710
711 /*
712 * The function that is called for file truncation.
713 */
minix_truncate(struct inode * inode)714 void minix_truncate(struct inode * inode)
715 {
716 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
717 return;
718 if (INODE_VERSION(inode) == MINIX_V1)
719 V1_minix_truncate(inode);
720 else
721 V2_minix_truncate(inode);
722 }
723
724 static struct file_system_type minix_fs_type = {
725 .owner = THIS_MODULE,
726 .name = "minix",
727 .kill_sb = kill_block_super,
728 .fs_flags = FS_REQUIRES_DEV,
729 .init_fs_context = minix_init_fs_context,
730 };
731 MODULE_ALIAS_FS("minix");
732
init_minix_fs(void)733 static int __init init_minix_fs(void)
734 {
735 int err = init_inodecache();
736 if (err)
737 goto out1;
738 err = register_filesystem(&minix_fs_type);
739 if (err)
740 goto out;
741 return 0;
742 out:
743 destroy_inodecache();
744 out1:
745 return err;
746 }
747
exit_minix_fs(void)748 static void __exit exit_minix_fs(void)
749 {
750 unregister_filesystem(&minix_fs_type);
751 destroy_inodecache();
752 }
753
754 module_init(init_minix_fs)
755 module_exit(exit_minix_fs)
756 MODULE_DESCRIPTION("Minix file system");
757 MODULE_LICENSE("GPL");
758
759