1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * OMFS (as used by RIO Karma) file operations. 4 * Copyright (C) 2005 Bob Copeland <me@bobcopeland.com> 5 */ 6 7 #include <linux/module.h> 8 #include <linux/fs.h> 9 #include <linux/buffer_head.h> 10 #include <linux/mpage.h> 11 #include "omfs.h" 12 13 static u32 omfs_max_extents(struct omfs_sb_info *sbi, int offset) 14 { 15 return (sbi->s_sys_blocksize - offset - 16 sizeof(struct omfs_extent)) / 17 sizeof(struct omfs_extent_entry); 18 } 19 20 void omfs_make_empty_table(struct buffer_head *bh, int offset) 21 { 22 struct omfs_extent *oe = (struct omfs_extent *) &bh->b_data[offset]; 23 24 oe->e_next = ~cpu_to_be64(0ULL); 25 oe->e_extent_count = cpu_to_be32(1), 26 oe->e_fill = cpu_to_be32(0x22), 27 oe->e_entry[0].e_cluster = ~cpu_to_be64(0ULL); 28 oe->e_entry[0].e_blocks = ~cpu_to_be64(0ULL); 29 } 30 31 int omfs_shrink_inode(struct inode *inode) 32 { 33 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); 34 struct omfs_extent *oe; 35 struct omfs_extent_entry *entry; 36 struct buffer_head *bh; 37 u64 next, last; 38 u32 extent_count; 39 u32 max_extents; 40 int ret; 41 42 /* traverse extent table, freeing each entry that is greater 43 * than inode->i_size; 44 */ 45 next = inode->i_ino; 46 47 /* only support truncate -> 0 for now */ 48 ret = -EIO; 49 if (inode->i_size != 0) 50 goto out; 51 52 bh = omfs_bread(inode->i_sb, next); 53 if (!bh) 54 goto out; 55 56 oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]); 57 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START); 58 59 for (;;) { 60 61 if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next)) 62 goto out_brelse; 63 64 extent_count = be32_to_cpu(oe->e_extent_count); 65 66 if (extent_count > max_extents) 67 goto out_brelse; 68 69 last = next; 70 next = be64_to_cpu(oe->e_next); 71 entry = oe->e_entry; 72 73 /* ignore last entry as it is the terminator */ 74 for (; extent_count > 1; extent_count--) { 75 u64 start, count; 76 start = be64_to_cpu(entry->e_cluster); 77 count = be64_to_cpu(entry->e_blocks); 78 79 omfs_clear_range(inode->i_sb, start, (int) count); 80 entry++; 81 } 82 omfs_make_empty_table(bh, (char *) oe - bh->b_data); 83 mark_buffer_dirty(bh); 84 brelse(bh); 85 86 if (last != inode->i_ino) 87 omfs_clear_range(inode->i_sb, last, sbi->s_mirrors); 88 89 if (next == ~0) 90 break; 91 92 bh = omfs_bread(inode->i_sb, next); 93 if (!bh) 94 goto out; 95 oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]); 96 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT); 97 } 98 ret = 0; 99 out: 100 return ret; 101 out_brelse: 102 brelse(bh); 103 return ret; 104 } 105 106 static void omfs_truncate(struct inode *inode) 107 { 108 omfs_shrink_inode(inode); 109 mark_inode_dirty(inode); 110 } 111 112 /* 113 * Add new blocks to the current extent, or create new entries/continuations 114 * as necessary. 115 */ 116 static int omfs_grow_extent(struct inode *inode, struct omfs_extent *oe, 117 u64 *ret_block) 118 { 119 struct omfs_extent_entry *terminator; 120 struct omfs_extent_entry *entry = oe->e_entry; 121 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); 122 u32 extent_count = be32_to_cpu(oe->e_extent_count); 123 u64 new_block = 0; 124 u32 max_count; 125 int new_count; 126 int ret = 0; 127 128 /* reached the end of the extent table with no blocks mapped. 129 * there are three possibilities for adding: grow last extent, 130 * add a new extent to the current extent table, and add a 131 * continuation inode. in last two cases need an allocator for 132 * sbi->s_cluster_size 133 */ 134 135 /* TODO: handle holes */ 136 137 /* should always have a terminator */ 138 if (extent_count < 1) 139 return -EIO; 140 141 /* trivially grow current extent, if next block is not taken */ 142 terminator = entry + extent_count - 1; 143 if (extent_count > 1) { 144 entry = terminator-1; 145 new_block = be64_to_cpu(entry->e_cluster) + 146 be64_to_cpu(entry->e_blocks); 147 148 if (omfs_allocate_block(inode->i_sb, new_block)) { 149 be64_add_cpu(&entry->e_blocks, 1); 150 terminator->e_blocks = ~(cpu_to_be64( 151 be64_to_cpu(~terminator->e_blocks) + 1)); 152 goto out; 153 } 154 } 155 max_count = omfs_max_extents(sbi, OMFS_EXTENT_START); 156 157 /* TODO: add a continuation block here */ 158 if (be32_to_cpu(oe->e_extent_count) > max_count-1) 159 return -EIO; 160 161 /* try to allocate a new cluster */ 162 ret = omfs_allocate_range(inode->i_sb, 1, sbi->s_clustersize, 163 &new_block, &new_count); 164 if (ret) 165 goto out_fail; 166 167 /* copy terminator down an entry */ 168 entry = terminator; 169 terminator++; 170 memcpy(terminator, entry, sizeof(struct omfs_extent_entry)); 171 172 entry->e_cluster = cpu_to_be64(new_block); 173 entry->e_blocks = cpu_to_be64((u64) new_count); 174 175 terminator->e_blocks = ~(cpu_to_be64( 176 be64_to_cpu(~terminator->e_blocks) + (u64) new_count)); 177 178 /* write in new entry */ 179 be32_add_cpu(&oe->e_extent_count, 1); 180 181 out: 182 *ret_block = new_block; 183 out_fail: 184 return ret; 185 } 186 187 /* 188 * Scans across the directory table for a given file block number. 189 * If block not found, return 0. 190 */ 191 static sector_t find_block(struct inode *inode, struct omfs_extent_entry *ent, 192 sector_t block, int count, int *left) 193 { 194 /* count > 1 because of terminator */ 195 sector_t searched = 0; 196 for (; count > 1; count--) { 197 int numblocks = clus_to_blk(OMFS_SB(inode->i_sb), 198 be64_to_cpu(ent->e_blocks)); 199 200 if (block >= searched && 201 block < searched + numblocks) { 202 /* 203 * found it at cluster + (block - searched) 204 * numblocks - (block - searched) is remainder 205 */ 206 *left = numblocks - (block - searched); 207 return clus_to_blk(OMFS_SB(inode->i_sb), 208 be64_to_cpu(ent->e_cluster)) + 209 block - searched; 210 } 211 searched += numblocks; 212 ent++; 213 } 214 return 0; 215 } 216 217 static int omfs_get_block(struct inode *inode, sector_t block, 218 struct buffer_head *bh_result, int create) 219 { 220 struct buffer_head *bh; 221 sector_t next, offset; 222 int ret; 223 u64 new_block; 224 u32 max_extents; 225 int extent_count; 226 struct omfs_extent *oe; 227 struct omfs_extent_entry *entry; 228 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); 229 int max_blocks = bh_result->b_size >> inode->i_blkbits; 230 int remain; 231 232 ret = -EIO; 233 bh = omfs_bread(inode->i_sb, inode->i_ino); 234 if (!bh) 235 goto out; 236 237 oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]); 238 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START); 239 next = inode->i_ino; 240 241 for (;;) { 242 243 if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next)) 244 goto out_brelse; 245 246 extent_count = be32_to_cpu(oe->e_extent_count); 247 next = be64_to_cpu(oe->e_next); 248 entry = oe->e_entry; 249 250 if (extent_count > max_extents) 251 goto out_brelse; 252 253 offset = find_block(inode, entry, block, extent_count, &remain); 254 if (offset > 0) { 255 ret = 0; 256 map_bh(bh_result, inode->i_sb, offset); 257 if (remain > max_blocks) 258 remain = max_blocks; 259 bh_result->b_size = (remain << inode->i_blkbits); 260 goto out_brelse; 261 } 262 if (next == ~0) 263 break; 264 265 brelse(bh); 266 bh = omfs_bread(inode->i_sb, next); 267 if (!bh) 268 goto out; 269 oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]); 270 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT); 271 } 272 if (create) { 273 ret = omfs_grow_extent(inode, oe, &new_block); 274 if (ret == 0) { 275 mark_buffer_dirty(bh); 276 mark_inode_dirty(inode); 277 map_bh(bh_result, inode->i_sb, 278 clus_to_blk(sbi, new_block)); 279 } 280 } 281 out_brelse: 282 brelse(bh); 283 out: 284 return ret; 285 } 286 287 static int omfs_read_folio(struct file *file, struct folio *folio) 288 { 289 return block_read_full_folio(folio, omfs_get_block); 290 } 291 292 static void omfs_readahead(struct readahead_control *rac) 293 { 294 mpage_readahead(rac, omfs_get_block); 295 } 296 297 static int 298 omfs_writepages(struct address_space *mapping, struct writeback_control *wbc) 299 { 300 return mpage_writepages(mapping, wbc, omfs_get_block); 301 } 302 303 static void omfs_write_failed(struct address_space *mapping, loff_t to) 304 { 305 struct inode *inode = mapping->host; 306 307 if (to > inode->i_size) { 308 truncate_pagecache(inode, inode->i_size); 309 omfs_truncate(inode); 310 } 311 } 312 313 static int omfs_write_begin(struct file *file, struct address_space *mapping, 314 loff_t pos, unsigned len, 315 struct page **pagep, void **fsdata) 316 { 317 int ret; 318 319 ret = block_write_begin(mapping, pos, len, pagep, omfs_get_block); 320 if (unlikely(ret)) 321 omfs_write_failed(mapping, pos + len); 322 323 return ret; 324 } 325 326 static sector_t omfs_bmap(struct address_space *mapping, sector_t block) 327 { 328 return generic_block_bmap(mapping, block, omfs_get_block); 329 } 330 331 const struct file_operations omfs_file_operations = { 332 .llseek = generic_file_llseek, 333 .read_iter = generic_file_read_iter, 334 .write_iter = generic_file_write_iter, 335 .mmap = generic_file_mmap, 336 .fsync = generic_file_fsync, 337 .splice_read = filemap_splice_read, 338 }; 339 340 static int omfs_setattr(struct mnt_idmap *idmap, 341 struct dentry *dentry, struct iattr *attr) 342 { 343 struct inode *inode = d_inode(dentry); 344 int error; 345 346 error = setattr_prepare(&nop_mnt_idmap, dentry, attr); 347 if (error) 348 return error; 349 350 if ((attr->ia_valid & ATTR_SIZE) && 351 attr->ia_size != i_size_read(inode)) { 352 error = inode_newsize_ok(inode, attr->ia_size); 353 if (error) 354 return error; 355 truncate_setsize(inode, attr->ia_size); 356 omfs_truncate(inode); 357 } 358 359 setattr_copy(&nop_mnt_idmap, inode, attr); 360 mark_inode_dirty(inode); 361 return 0; 362 } 363 364 const struct inode_operations omfs_file_inops = { 365 .setattr = omfs_setattr, 366 }; 367 368 const struct address_space_operations omfs_aops = { 369 .dirty_folio = block_dirty_folio, 370 .invalidate_folio = block_invalidate_folio, 371 .read_folio = omfs_read_folio, 372 .readahead = omfs_readahead, 373 .writepages = omfs_writepages, 374 .write_begin = omfs_write_begin, 375 .write_end = generic_write_end, 376 .bmap = omfs_bmap, 377 .migrate_folio = buffer_migrate_folio, 378 }; 379 380