/linux/fs/f2fs/ |
H A D | super.c | 68 int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate, in f2fs_build_fault_attr() argument 71 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; in f2fs_build_fault_attr() 89 f2fs_info(sbi, in f2fs_build_fault_attr() 271 void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, in f2fs_printk() argument 285 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf); in f2fs_printk() 288 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf); in f2fs_printk() 332 static inline void limit_reserve_root(struct f2fs_sb_info *sbi) in limit_reserve_root() argument 334 block_t limit = min((sbi->user_block_count >> 3), in limit_reserve_root() 335 sbi->user_block_count - sbi->reserved_blocks); in limit_reserve_root() 338 if (test_opt(sbi, RESERVE_ROOT) && in limit_reserve_root() [all …]
|
H A D | checkpoint.c | 29 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, in f2fs_stop_checkpoint() argument 32 f2fs_build_fault_attr(sbi, 0, 0); in f2fs_stop_checkpoint() 34 f2fs_flush_merged_writes(sbi); in f2fs_stop_checkpoint() 35 f2fs_handle_critical_error(sbi, reason, end_io); in f2fs_stop_checkpoint() 41 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_grab_meta_page() argument 43 struct address_space *mapping = META_MAPPING(sbi); in f2fs_grab_meta_page() 57 static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, in __get_meta_page() argument 60 struct address_space *mapping = META_MAPPING(sbi); in __get_meta_page() 63 .sbi = sbi, in __get_meta_page() 93 f2fs_update_iostat(sbi, NULL, FS_META_READ_IO, F2FS_BLKSIZE); in __get_meta_page() [all …]
|
H A D | segment.c | 171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi) in f2fs_need_SSR() argument 173 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); in f2fs_need_SSR() 174 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); in f2fs_need_SSR() 175 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA); in f2fs_need_SSR() 177 if (f2fs_lfs_mode(sbi)) in f2fs_need_SSR() 179 if (sbi->gc_mode == GC_URGENT_HIGH) in f2fs_need_SSR() 181 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) in f2fs_need_SSR() 184 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs + in f2fs_need_SSR() 185 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi)); in f2fs_need_SSR() 221 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in __replace_atomic_write_block() local [all …]
|
H A D | segment.h | 29 static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi, in sanity_check_seg_type() argument 32 f2fs_bug_on(sbi, seg_type >= NR_PERSISTENT_LOG); in sanity_check_seg_type() 39 #define IS_CURSEG(sbi, seg) \ argument 40 (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \ 41 ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \ 42 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \ 43 ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \ 44 ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \ 45 ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno) || \ 46 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno) || \ [all …]
|
H A D | gc.c | 33 struct f2fs_sb_info *sbi = data; in gc_thread_func() local 34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in gc_thread_func() 35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; in gc_thread_func() 36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq; in gc_thread_func() 55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq)) in gc_thread_func() 62 if (f2fs_readonly(sbi->sb)) { in gc_thread_func() 63 stat_other_skip_bggc_count(sbi); in gc_thread_func() 69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { in gc_thread_func() 71 stat_other_skip_bggc_count(sbi); in gc_thread_func() 75 if (time_to_inject(sbi, FAULT_CHECKPOINT)) in gc_thread_func() [all …]
|
H A D | iostat.c | 20 static inline unsigned long long iostat_get_avg_bytes(struct f2fs_sb_info *sbi, in iostat_get_avg_bytes() argument 23 return sbi->iostat_count[type] ? div64_u64(sbi->iostat_bytes[type], in iostat_get_avg_bytes() 24 sbi->iostat_count[type]) : 0; in iostat_get_avg_bytes() 29 name":", sbi->iostat_bytes[type], \ 30 sbi->iostat_count[type], \ 31 iostat_get_avg_bytes(sbi, type)) 36 struct f2fs_sb_info *sbi = F2FS_SB(sb); in iostat_info_seq_show() local 38 if (!sbi->iostat_enable) in iostat_info_seq_show() 88 static inline void __record_iostat_latency(struct f2fs_sb_info *sbi) in __record_iostat_latency() argument 92 struct iostat_lat_info *io_lat = sbi->iostat_io_lat; in __record_iostat_latency() [all …]
|
H A D | debug.c | 32 void f2fs_update_sit_info(struct f2fs_sb_info *sbi) in f2fs_update_sit_info() argument 34 struct f2fs_stat_info *si = F2FS_STAT(sbi); in f2fs_update_sit_info() 42 blks_per_sec = CAP_BLKS_PER_SEC(sbi); in f2fs_update_sit_info() 44 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) { in f2fs_update_sit_info() 45 vblocks = get_valid_blocks(sbi, segno, true); in f2fs_update_sit_info() 54 dist = div_u64(MAIN_SECS(sbi) * hblks_per_sec * hblks_per_sec, 100); in f2fs_update_sit_info() 63 static void update_general_status(struct f2fs_sb_info *sbi) in update_general_status() argument 65 struct f2fs_stat_info *si = F2FS_STAT(sbi); in update_general_status() 66 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in update_general_status() 77 struct extent_tree_info *eti = &sbi->extent_tree[i]; in update_general_status() [all …]
|
H A D | sysfs.c | 56 ssize_t (*show)(struct f2fs_attr *a, struct f2fs_sb_info *sbi, char *buf); 57 ssize_t (*store)(struct f2fs_attr *a, struct f2fs_sb_info *sbi, 65 struct f2fs_sb_info *sbi, char *buf); 67 static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type) in __struct_ptr() argument 70 return (unsigned char *)sbi->gc_thread; in __struct_ptr() 72 return (unsigned char *)SM_I(sbi); in __struct_ptr() 74 return (unsigned char *)SM_I(sbi)->dcc_info; in __struct_ptr() 76 return (unsigned char *)NM_I(sbi); in __struct_ptr() 78 return (unsigned char *)sbi; in __struct_ptr() 82 return (unsigned char *)&F2FS_OPTION(sbi).fault_info; in __struct_ptr() [all …]
|
H A D | node.c | 33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_check_nid_range() argument 35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { in f2fs_check_nid_range() 36 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_check_nid_range() 37 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.", in f2fs_check_nid_range() 39 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); in f2fs_check_nid_range() 45 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) in f2fs_available_free_memory() argument 47 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_available_free_memory() 48 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in f2fs_available_free_memory() 73 if (excess_cached_nats(sbi)) in f2fs_available_free_memory() 76 if (sbi->sb->s_bdi->wb.dirty_exceeded) in f2fs_available_free_memory() [all …]
|
H A D | f2fs.h | 35 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) argument 37 #define f2fs_bug_on(sbi, condition) \ argument 40 set_sbi_flag(sbi, SBI_NEED_FSCK); \ 119 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) argument 120 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) argument 121 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) argument 122 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) argument 219 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask) argument 670 struct list_head list; /* node in global extent list of sbi */ 679 struct list_head list; /* to be used by sbi->zombie_list */ [all …]
|
H A D | shrinker.c | 19 static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi) in __count_nat_entries() argument 21 return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT]; in __count_nat_entries() 24 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi) in __count_free_nids() argument 26 long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS; in __count_free_nids() 31 static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi, in __count_extent_cache() argument 34 struct extent_tree_info *eti = &sbi->extent_tree[type]; in __count_extent_cache() 43 struct f2fs_sb_info *sbi; in f2fs_shrink_count() local 50 sbi = list_entry(p, struct f2fs_sb_info, s_list); in f2fs_shrink_count() 53 if (!mutex_trylock(&sbi->umount_mutex)) { in f2fs_shrink_count() 60 count += __count_extent_cache(sbi, EX_READ); in f2fs_shrink_count() [all …]
|
/linux/fs/sysv/ |
H A D | super.c | 48 static void detected_xenix(struct sysv_sb_info *sbi, unsigned *max_links) in detected_xenix() argument 50 struct buffer_head *bh1 = sbi->s_bh1; in detected_xenix() 51 struct buffer_head *bh2 = sbi->s_bh2; in detected_xenix() 64 sbi->s_fic_size = XENIX_NICINOD; in detected_xenix() 65 sbi->s_flc_size = XENIX_NICFREE; in detected_xenix() 66 sbi->s_sbd1 = (char *)sbd1; in detected_xenix() 67 sbi->s_sbd2 = (char *)sbd2; in detected_xenix() 68 sbi->s_sb_fic_count = &sbd1->s_ninode; in detected_xenix() 69 sbi->s_sb_fic_inodes = &sbd1->s_inode[0]; in detected_xenix() 70 sbi->s_sb_total_free_inodes = &sbd2->s_tinode; in detected_xenix() [all …]
|
H A D | balloc.c | 45 struct sysv_sb_info * sbi = SYSV_SB(sb); in sysv_free_block() local 47 sysv_zone_t *blocks = sbi->s_bcache; in sysv_free_block() 49 unsigned block = fs32_to_cpu(sbi, nr); in sysv_free_block() 56 if (sbi->s_type == FSTYPE_AFS) in sysv_free_block() 59 if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) { in sysv_free_block() 64 mutex_lock(&sbi->s_lock); in sysv_free_block() 65 count = fs16_to_cpu(sbi, *sbi->s_bcache_count); in sysv_free_block() 67 if (count > sbi->s_flc_size) { in sysv_free_block() 69 mutex_unlock(&sbi->s_lock); in sysv_free_block() 76 if (count == sbi->s_flc_size || count == 0) { in sysv_free_block() [all …]
|
H A D | ialloc.c | 42 struct sysv_sb_info *sbi = SYSV_SB(sb); in sv_sb_fic_inode() local 44 if (sbi->s_bh1 == sbi->s_bh2) in sv_sb_fic_inode() 45 return &sbi->s_sb_fic_inodes[i]; in sv_sb_fic_inode() 50 return (sysv_ino_t*)(sbi->s_sbd1 + offset); in sv_sb_fic_inode() 52 return (sysv_ino_t*)(sbi->s_sbd2 + offset); in sv_sb_fic_inode() 59 struct sysv_sb_info *sbi = SYSV_SB(sb); in sysv_raw_inode() local 61 int block = sbi->s_firstinodezone + sbi->s_block_base; in sysv_raw_inode() 63 block += (ino-1) >> sbi->s_inodes_per_block_bits; in sysv_raw_inode() 68 return res + ((ino-1) & sbi->s_inodes_per_block_1); in sysv_raw_inode() 73 struct sysv_sb_info *sbi = SYSV_SB(sb); in refill_free_cache() local [all …]
|
H A D | inode.c | 37 struct sysv_sb_info *sbi = SYSV_SB(sb); in sysv_sync_fs() local 40 mutex_lock(&sbi->s_lock); in sysv_sync_fs() 47 old_time = fs32_to_cpu(sbi, *sbi->s_sb_time); in sysv_sync_fs() 48 if (sbi->s_type == FSTYPE_SYSV4) { in sysv_sync_fs() 49 if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38u - old_time)) in sysv_sync_fs() 50 *sbi->s_sb_state = cpu_to_fs32(sbi, 0x7c269d38u - time); in sysv_sync_fs() 51 *sbi->s_sb_time = cpu_to_fs32(sbi, time); in sysv_sync_fs() 52 mark_buffer_dirty(sbi->s_bh2); in sysv_sync_fs() 55 mutex_unlock(&sbi->s_lock); in sysv_sync_fs() 62 struct sysv_sb_info *sbi = SYSV_SB(sb); in sysv_remount() local [all …]
|
/linux/fs/autofs/ |
H A D | inode.c | 12 struct autofs_info *autofs_new_ino(struct autofs_sb_info *sbi) in autofs_new_ino() argument 21 ino->sbi = sbi; in autofs_new_ino() 43 struct autofs_sb_info *sbi = autofs_sbi(sb); in autofs_kill_sb() local 51 if (sbi) { in autofs_kill_sb() 53 autofs_catatonic_mode(sbi); in autofs_kill_sb() 54 put_pid(sbi->oz_pgrp); in autofs_kill_sb() 59 if (sbi) in autofs_kill_sb() 60 kfree_rcu(sbi, rcu); in autofs_kill_sb() 65 struct autofs_sb_info *sbi = autofs_sbi(root->d_sb); in autofs_show_options() local 68 if (!sbi) in autofs_show_options() [all …]
|
/linux/fs/hfsplus/ |
H A D | super.c | 102 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); in hfsplus_system_write_inode() local 103 struct hfsplus_vh *vhdr = sbi->s_vhdr; in hfsplus_system_write_inode() 110 tree = sbi->ext_tree; in hfsplus_system_write_inode() 114 tree = sbi->cat_tree; in hfsplus_system_write_inode() 124 tree = sbi->attr_tree; in hfsplus_system_write_inode() 131 set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags); in hfsplus_system_write_inode() 178 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfsplus_sync_fs() local 179 struct hfsplus_vh *vhdr = sbi->s_vhdr; in hfsplus_sync_fs() 196 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping); in hfsplus_sync_fs() 197 error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping); in hfsplus_sync_fs() [all …]
|
/linux/fs/erofs/ |
H A D | super.c | 162 struct erofs_sb_info *sbi = EROFS_SB(sb); in erofs_init_device() local 171 if (!sbi->devs->flatdev && !dif->path) { in erofs_init_device() 186 } else if (!sbi->devs->flatdev) { in erofs_init_device() 187 file = erofs_is_fileio_mode(sbi) ? in erofs_init_device() 194 if (!erofs_is_fileio_mode(sbi)) { in erofs_init_device() 206 sbi->total_blocks += dif->blocks; in erofs_init_device() 214 struct erofs_sb_info *sbi = EROFS_SB(sb); in erofs_scan_devices() local 221 sbi->total_blocks = sbi->primarydevice_blocks; in erofs_scan_devices() 222 if (!erofs_sb_has_device_table(sbi)) in erofs_scan_devices() 227 if (sbi->devs->extra_devices && in erofs_scan_devices() [all …]
|
/linux/fs/exfat/ |
H A D | super.c | 28 static void exfat_free_iocharset(struct exfat_sb_info *sbi) in exfat_free_iocharset() argument 30 if (sbi->options.iocharset != exfat_default_iocharset) in exfat_free_iocharset() 31 kfree(sbi->options.iocharset); in exfat_free_iocharset() 36 struct exfat_sb_info *sbi = EXFAT_SB(sb); in exfat_put_super() local 38 mutex_lock(&sbi->s_lock); in exfat_put_super() 39 exfat_free_bitmap(sbi); in exfat_put_super() 40 brelse(sbi->boot_bh); in exfat_put_super() 41 mutex_unlock(&sbi->s_lock); in exfat_put_super() 46 struct exfat_sb_info *sbi = EXFAT_SB(sb); in exfat_sync_fs() local 56 mutex_lock(&sbi->s_lock); in exfat_sync_fs() [all …]
|
H A D | balloc.c | 32 struct exfat_sb_info *sbi = EXFAT_SB(sb); in exfat_allocate_bitmap() local 37 sbi->map_clu = le32_to_cpu(ep->dentry.bitmap.start_clu); in exfat_allocate_bitmap() 39 need_map_size = ((EXFAT_DATA_CLUSTER_COUNT(sbi) - 1) / BITS_PER_BYTE) in exfat_allocate_bitmap() 51 sbi->map_sectors = ((need_map_size - 1) >> in exfat_allocate_bitmap() 53 sbi->vol_amap = kvmalloc_array(sbi->map_sectors, in exfat_allocate_bitmap() 55 if (!sbi->vol_amap) in exfat_allocate_bitmap() 58 sector = exfat_cluster_to_sector(sbi, sbi->map_clu); in exfat_allocate_bitmap() 59 for (i = 0; i < sbi->map_sectors; i++) { in exfat_allocate_bitmap() 60 sbi->vol_amap[i] = sb_bread(sb, sector + i); in exfat_allocate_bitmap() 61 if (!sbi->vol_amap[i]) { in exfat_allocate_bitmap() [all …]
|
/linux/fs/omfs/ |
H A D | inode.c | 26 struct omfs_sb_info *sbi = OMFS_SB(sb); in omfs_bread() local 27 if (block >= sbi->s_num_blocks) in omfs_bread() 30 return sb_bread(sb, clus_to_blk(sbi, block)); in omfs_bread() 39 struct omfs_sb_info *sbi = OMFS_SB(dir->i_sb); in omfs_new_inode() local 45 err = omfs_allocate_range(dir->i_sb, sbi->s_mirrors, sbi->s_mirrors, in omfs_new_inode() 59 inode->i_size = sbi->s_sys_blocksize; in omfs_new_inode() 105 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); in __omfs_write_inode() local 130 oi->i_head.h_body_size = cpu_to_be32(sbi->s_sys_blocksize - in __omfs_write_inode() 151 for (i = 1; i < sbi->s_mirrors; i++) { in __omfs_write_inode() 204 struct omfs_sb_info *sbi = OMFS_SB(sb); in omfs_iget() local [all …]
|
/linux/fs/affs/ |
H A D | bitmap.c | 41 struct affs_sb_info *sbi = AFFS_SB(sb); in affs_free_block() local 49 if (block > sbi->s_partition_size) in affs_free_block() 52 blk = block - sbi->s_reserved; in affs_free_block() 53 bmap = blk / sbi->s_bmap_bits; in affs_free_block() 54 bit = blk % sbi->s_bmap_bits; in affs_free_block() 55 bm = &sbi->s_bitmap[bmap]; in affs_free_block() 57 mutex_lock(&sbi->s_bmlock); in affs_free_block() 59 bh = sbi->s_bmap_bh; in affs_free_block() 60 if (sbi->s_last_bmap != bmap) { in affs_free_block() 65 sbi->s_bmap_bh = bh; in affs_free_block() [all …]
|
H A D | super.c | 35 struct affs_sb_info *sbi = AFFS_SB(sb); in affs_commit_super() local 36 struct buffer_head *bh = sbi->s_root_bh; in affs_commit_super() 52 struct affs_sb_info *sbi = AFFS_SB(sb); in affs_put_super() local 55 cancel_delayed_work_sync(&sbi->sb_work); in affs_put_super() 67 struct affs_sb_info *sbi; in flush_superblock() local 70 sbi = container_of(work, struct affs_sb_info, sb_work.work); in flush_superblock() 71 sb = sbi->sb; in flush_superblock() 73 spin_lock(&sbi->work_lock); in flush_superblock() 74 sbi->work_queued = 0; in flush_superblock() 75 spin_unlock(&sbi->work_lock); in flush_superblock() [all …]
|
/linux/fs/ufs/ |
H A D | cylinder.c | 32 struct ufs_sb_info * sbi = UFS_SB(sb); in ufs_read_cylinder() local 39 uspi = sbi->s_uspi; in ufs_read_cylinder() 40 ucpi = sbi->s_ucpi[bitmap_nr]; in ufs_read_cylinder() 41 ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data; in ufs_read_cylinder() 48 UCPI_UBH(ucpi)->bh[0] = sbi->s_ucg[cgno]; in ufs_read_cylinder() 52 sbi->s_cgno[bitmap_nr] = cgno; in ufs_read_cylinder() 74 brelse (sbi->s_ucg[j]); in ufs_read_cylinder() 75 sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; in ufs_read_cylinder() 85 struct ufs_sb_info * sbi = UFS_SB(sb); in ufs_put_cylinder() local 93 uspi = sbi->s_uspi; in ufs_put_cylinder() [all …]
|
/linux/fs/minix/ |
H A D | inode.c | 45 struct minix_sb_info *sbi = minix_sb(sb); in minix_put_super() local 48 if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */ in minix_put_super() 49 sbi->s_ms->s_state = sbi->s_mount_state; in minix_put_super() 50 mark_buffer_dirty(sbi->s_sbh); in minix_put_super() 52 for (i = 0; i < sbi->s_imap_blocks; i++) in minix_put_super() 53 brelse(sbi->s_imap[i]); in minix_put_super() 54 for (i = 0; i < sbi->s_zmap_blocks; i++) in minix_put_super() 55 brelse(sbi->s_zmap[i]); in minix_put_super() 56 brelse (sbi->s_sbh); in minix_put_super() 57 kfree(sbi->s_imap); in minix_put_super() [all …]
|