1 /* 2 * OMFS (as used by RIO Karma) file operations. 3 * Copyright (C) 2005 Bob Copeland <me@bobcopeland.com> 4 * Released under GPL v2. 5 */ 6 7 #include <linux/version.h> 8 #include <linux/module.h> 9 #include <linux/fs.h> 10 #include <linux/buffer_head.h> 11 #include <linux/mpage.h> 12 #include "omfs.h" 13 14 static int omfs_sync_file(struct file *file, struct dentry *dentry, 15 int datasync) 16 { 17 struct inode *inode = dentry->d_inode; 18 int err; 19 20 err = sync_mapping_buffers(inode->i_mapping); 21 if (!(inode->i_state & I_DIRTY)) 22 return err; 23 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 24 return err; 25 err |= omfs_sync_inode(inode); 26 return err ? -EIO : 0; 27 } 28 29 static u32 omfs_max_extents(struct omfs_sb_info *sbi, int offset) 30 { 31 return (sbi->s_sys_blocksize - offset - 32 sizeof(struct omfs_extent)) / 33 sizeof(struct omfs_extent_entry) + 1; 34 } 35 36 void omfs_make_empty_table(struct buffer_head *bh, int offset) 37 { 38 struct omfs_extent *oe = (struct omfs_extent *) &bh->b_data[offset]; 39 40 oe->e_next = ~cpu_to_be64(0ULL); 41 oe->e_extent_count = cpu_to_be32(1), 42 oe->e_fill = cpu_to_be32(0x22), 43 oe->e_entry.e_cluster = ~cpu_to_be64(0ULL); 44 oe->e_entry.e_blocks = ~cpu_to_be64(0ULL); 45 } 46 47 int omfs_shrink_inode(struct inode *inode) 48 { 49 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); 50 struct omfs_extent *oe; 51 struct omfs_extent_entry *entry; 52 struct buffer_head *bh; 53 u64 next, last; 54 u32 extent_count; 55 u32 max_extents; 56 int ret; 57 58 /* traverse extent table, freeing each entry that is greater 59 * than inode->i_size; 60 */ 61 next = inode->i_ino; 62 63 /* only support truncate -> 0 for now */ 64 ret = -EIO; 65 if (inode->i_size != 0) 66 goto out; 67 68 bh = sb_bread(inode->i_sb, clus_to_blk(sbi, next)); 69 if (!bh) 70 goto out; 71 72 oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]); 73 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START); 74 75 for (;;) { 76 77 if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next)) 78 goto out_brelse; 79 80 extent_count = be32_to_cpu(oe->e_extent_count); 81 82 if (extent_count > max_extents) 83 goto out_brelse; 84 85 last = next; 86 next = be64_to_cpu(oe->e_next); 87 entry = &oe->e_entry; 88 89 /* ignore last entry as it is the terminator */ 90 for (; extent_count > 1; extent_count--) { 91 u64 start, count; 92 start = be64_to_cpu(entry->e_cluster); 93 count = be64_to_cpu(entry->e_blocks); 94 95 omfs_clear_range(inode->i_sb, start, (int) count); 96 entry++; 97 } 98 omfs_make_empty_table(bh, (char *) oe - bh->b_data); 99 mark_buffer_dirty(bh); 100 brelse(bh); 101 102 if (last != inode->i_ino) 103 omfs_clear_range(inode->i_sb, last, sbi->s_mirrors); 104 105 if (next == ~0) 106 break; 107 108 bh = sb_bread(inode->i_sb, clus_to_blk(sbi, next)); 109 if (!bh) 110 goto out; 111 oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]); 112 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT); 113 } 114 ret = 0; 115 out: 116 return ret; 117 out_brelse: 118 brelse(bh); 119 return ret; 120 } 121 122 static void omfs_truncate(struct inode *inode) 123 { 124 omfs_shrink_inode(inode); 125 mark_inode_dirty(inode); 126 } 127 128 /* 129 * Add new blocks to the current extent, or create new entries/continuations 130 * as necessary. 131 */ 132 static int omfs_grow_extent(struct inode *inode, struct omfs_extent *oe, 133 u64 *ret_block) 134 { 135 struct omfs_extent_entry *terminator; 136 struct omfs_extent_entry *entry = &oe->e_entry; 137 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); 138 u32 extent_count = be32_to_cpu(oe->e_extent_count); 139 u64 new_block = 0; 140 u32 max_count; 141 int new_count; 142 int ret = 0; 143 144 /* reached the end of the extent table with no blocks mapped. 145 * there are three possibilities for adding: grow last extent, 146 * add a new extent to the current extent table, and add a 147 * continuation inode. in last two cases need an allocator for 148 * sbi->s_cluster_size 149 */ 150 151 /* TODO: handle holes */ 152 153 /* should always have a terminator */ 154 if (extent_count < 1) 155 return -EIO; 156 157 /* trivially grow current extent, if next block is not taken */ 158 terminator = entry + extent_count - 1; 159 if (extent_count > 1) { 160 entry = terminator-1; 161 new_block = be64_to_cpu(entry->e_cluster) + 162 be64_to_cpu(entry->e_blocks); 163 164 if (omfs_allocate_block(inode->i_sb, new_block)) { 165 entry->e_blocks = 166 cpu_to_be64(be64_to_cpu(entry->e_blocks) + 1); 167 terminator->e_blocks = ~(cpu_to_be64( 168 be64_to_cpu(~terminator->e_blocks) + 1)); 169 goto out; 170 } 171 } 172 max_count = omfs_max_extents(sbi, OMFS_EXTENT_START); 173 174 /* TODO: add a continuation block here */ 175 if (be32_to_cpu(oe->e_extent_count) > max_count-1) 176 return -EIO; 177 178 /* try to allocate a new cluster */ 179 ret = omfs_allocate_range(inode->i_sb, 1, sbi->s_clustersize, 180 &new_block, &new_count); 181 if (ret) 182 goto out_fail; 183 184 /* copy terminator down an entry */ 185 entry = terminator; 186 terminator++; 187 memcpy(terminator, entry, sizeof(struct omfs_extent_entry)); 188 189 entry->e_cluster = cpu_to_be64(new_block); 190 entry->e_blocks = cpu_to_be64((u64) new_count); 191 192 terminator->e_blocks = ~(cpu_to_be64( 193 be64_to_cpu(~terminator->e_blocks) + (u64) new_count)); 194 195 /* write in new entry */ 196 oe->e_extent_count = cpu_to_be32(1 + be32_to_cpu(oe->e_extent_count)); 197 198 out: 199 *ret_block = new_block; 200 out_fail: 201 return ret; 202 } 203 204 /* 205 * Scans across the directory table for a given file block number. 206 * If block not found, return 0. 207 */ 208 static sector_t find_block(struct inode *inode, struct omfs_extent_entry *ent, 209 sector_t block, int count, int *left) 210 { 211 /* count > 1 because of terminator */ 212 sector_t searched = 0; 213 for (; count > 1; count--) { 214 int numblocks = clus_to_blk(OMFS_SB(inode->i_sb), 215 be64_to_cpu(ent->e_blocks)); 216 217 if (block >= searched && 218 block < searched + numblocks) { 219 /* 220 * found it at cluster + (block - searched) 221 * numblocks - (block - searched) is remainder 222 */ 223 *left = numblocks - (block - searched); 224 return clus_to_blk(OMFS_SB(inode->i_sb), 225 be64_to_cpu(ent->e_cluster)) + 226 block - searched; 227 } 228 searched += numblocks; 229 ent++; 230 } 231 return 0; 232 } 233 234 static int omfs_get_block(struct inode *inode, sector_t block, 235 struct buffer_head *bh_result, int create) 236 { 237 struct buffer_head *bh; 238 sector_t next, offset; 239 int ret; 240 u64 new_block; 241 u32 max_extents; 242 int extent_count; 243 struct omfs_extent *oe; 244 struct omfs_extent_entry *entry; 245 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); 246 int max_blocks = bh_result->b_size >> inode->i_blkbits; 247 int remain; 248 249 ret = -EIO; 250 bh = sb_bread(inode->i_sb, clus_to_blk(sbi, inode->i_ino)); 251 if (!bh) 252 goto out; 253 254 oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]); 255 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START); 256 next = inode->i_ino; 257 258 for (;;) { 259 260 if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next)) 261 goto out_brelse; 262 263 extent_count = be32_to_cpu(oe->e_extent_count); 264 next = be64_to_cpu(oe->e_next); 265 entry = &oe->e_entry; 266 267 if (extent_count > max_extents) 268 goto out_brelse; 269 270 offset = find_block(inode, entry, block, extent_count, &remain); 271 if (offset > 0) { 272 ret = 0; 273 map_bh(bh_result, inode->i_sb, offset); 274 if (remain > max_blocks) 275 remain = max_blocks; 276 bh_result->b_size = (remain << inode->i_blkbits); 277 goto out_brelse; 278 } 279 if (next == ~0) 280 break; 281 282 brelse(bh); 283 bh = sb_bread(inode->i_sb, clus_to_blk(sbi, next)); 284 if (!bh) 285 goto out; 286 oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]); 287 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT); 288 } 289 if (create) { 290 ret = omfs_grow_extent(inode, oe, &new_block); 291 if (ret == 0) { 292 mark_buffer_dirty(bh); 293 mark_inode_dirty(inode); 294 map_bh(bh_result, inode->i_sb, 295 clus_to_blk(sbi, new_block)); 296 } 297 } 298 out_brelse: 299 brelse(bh); 300 out: 301 return ret; 302 } 303 304 static int omfs_readpage(struct file *file, struct page *page) 305 { 306 return block_read_full_page(page, omfs_get_block); 307 } 308 309 static int omfs_readpages(struct file *file, struct address_space *mapping, 310 struct list_head *pages, unsigned nr_pages) 311 { 312 return mpage_readpages(mapping, pages, nr_pages, omfs_get_block); 313 } 314 315 static int omfs_writepage(struct page *page, struct writeback_control *wbc) 316 { 317 return block_write_full_page(page, omfs_get_block, wbc); 318 } 319 320 static int 321 omfs_writepages(struct address_space *mapping, struct writeback_control *wbc) 322 { 323 return mpage_writepages(mapping, wbc, omfs_get_block); 324 } 325 326 static int omfs_write_begin(struct file *file, struct address_space *mapping, 327 loff_t pos, unsigned len, unsigned flags, 328 struct page **pagep, void **fsdata) 329 { 330 *pagep = NULL; 331 return block_write_begin(file, mapping, pos, len, flags, 332 pagep, fsdata, omfs_get_block); 333 } 334 335 static sector_t omfs_bmap(struct address_space *mapping, sector_t block) 336 { 337 return generic_block_bmap(mapping, block, omfs_get_block); 338 } 339 340 struct file_operations omfs_file_operations = { 341 .llseek = generic_file_llseek, 342 .read = do_sync_read, 343 .write = do_sync_write, 344 .aio_read = generic_file_aio_read, 345 .aio_write = generic_file_aio_write, 346 .mmap = generic_file_mmap, 347 .fsync = omfs_sync_file, 348 .splice_read = generic_file_splice_read, 349 }; 350 351 struct inode_operations omfs_file_inops = { 352 .truncate = omfs_truncate 353 }; 354 355 struct address_space_operations omfs_aops = { 356 .readpage = omfs_readpage, 357 .readpages = omfs_readpages, 358 .writepage = omfs_writepage, 359 .writepages = omfs_writepages, 360 .sync_page = block_sync_page, 361 .write_begin = omfs_write_begin, 362 .write_end = generic_write_end, 363 .bmap = omfs_bmap, 364 }; 365 366