1 /* 2 * linux/fs/ufs/util.h 3 * 4 * Copyright (C) 1998 5 * Daniel Pirkl <daniel.pirkl@email.cz> 6 * Charles University, Faculty of Mathematics and Physics 7 */ 8 9 #include <linux/buffer_head.h> 10 #include <linux/fs.h> 11 #include "swab.h" 12 13 14 /* 15 * some useful macros 16 */ 17 #define in_range(b,first,len) ((b)>=(first)&&(b)<(first)+(len)) 18 19 /* 20 * macros used for retyping 21 */ 22 #define UCPI_UBH ((struct ufs_buffer_head *)ucpi) 23 #define USPI_UBH ((struct ufs_buffer_head *)uspi) 24 25 26 27 /* 28 * macros used for accessing structures 29 */ 30 static inline s32 31 ufs_get_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1, 32 struct ufs_super_block_third *usb3) 33 { 34 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { 35 case UFS_ST_SUN: 36 return fs32_to_cpu(sb, usb3->fs_u2.fs_sun.fs_state); 37 case UFS_ST_SUNx86: 38 return fs32_to_cpu(sb, usb1->fs_u1.fs_sunx86.fs_state); 39 case UFS_ST_44BSD: 40 default: 41 return fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_state); 42 } 43 } 44 45 static inline void 46 ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1, 47 struct ufs_super_block_third *usb3, s32 value) 48 { 49 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { 50 case UFS_ST_SUN: 51 usb3->fs_u2.fs_sun.fs_state = cpu_to_fs32(sb, value); 52 break; 53 case UFS_ST_SUNx86: 54 usb1->fs_u1.fs_sunx86.fs_state = cpu_to_fs32(sb, value); 55 break; 56 case UFS_ST_44BSD: 57 usb3->fs_u2.fs_44.fs_state = cpu_to_fs32(sb, value); 58 break; 59 } 60 } 61 62 static inline u32 63 ufs_get_fs_npsect(struct super_block *sb, struct ufs_super_block_first *usb1, 64 struct ufs_super_block_third *usb3) 65 { 66 if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86) 67 return fs32_to_cpu(sb, usb3->fs_u2.fs_sunx86.fs_npsect); 68 else 69 return fs32_to_cpu(sb, usb1->fs_u1.fs_sun.fs_npsect); 70 } 71 72 static inline u64 73 ufs_get_fs_qbmask(struct super_block *sb, struct ufs_super_block_third *usb3) 74 { 75 __fs64 tmp; 76 77 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { 78 case UFS_ST_SUN: 79 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sun.fs_qbmask[0]; 80 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sun.fs_qbmask[1]; 81 break; 82 case UFS_ST_SUNx86: 83 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sunx86.fs_qbmask[0]; 84 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sunx86.fs_qbmask[1]; 85 break; 86 case UFS_ST_44BSD: 87 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_44.fs_qbmask[0]; 88 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_44.fs_qbmask[1]; 89 break; 90 } 91 92 return fs64_to_cpu(sb, tmp); 93 } 94 95 static inline u64 96 ufs_get_fs_qfmask(struct super_block *sb, struct ufs_super_block_third *usb3) 97 { 98 __fs64 tmp; 99 100 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { 101 case UFS_ST_SUN: 102 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sun.fs_qfmask[0]; 103 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sun.fs_qfmask[1]; 104 break; 105 case UFS_ST_SUNx86: 106 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sunx86.fs_qfmask[0]; 107 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sunx86.fs_qfmask[1]; 108 break; 109 case UFS_ST_44BSD: 110 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_44.fs_qfmask[0]; 111 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_44.fs_qfmask[1]; 112 break; 113 } 114 115 return fs64_to_cpu(sb, tmp); 116 } 117 118 static inline u16 119 ufs_get_de_namlen(struct super_block *sb, struct ufs_dir_entry *de) 120 { 121 if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD) 122 return fs16_to_cpu(sb, de->d_u.d_namlen); 123 else 124 return de->d_u.d_44.d_namlen; /* XXX this seems wrong */ 125 } 126 127 static inline void 128 ufs_set_de_namlen(struct super_block *sb, struct ufs_dir_entry *de, u16 value) 129 { 130 if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD) 131 de->d_u.d_namlen = cpu_to_fs16(sb, value); 132 else 133 de->d_u.d_44.d_namlen = value; /* XXX this seems wrong */ 134 } 135 136 static inline void 137 ufs_set_de_type(struct super_block *sb, struct ufs_dir_entry *de, int mode) 138 { 139 if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) != UFS_DE_44BSD) 140 return; 141 142 /* 143 * TODO turn this into a table lookup 144 */ 145 switch (mode & S_IFMT) { 146 case S_IFSOCK: 147 de->d_u.d_44.d_type = DT_SOCK; 148 break; 149 case S_IFLNK: 150 de->d_u.d_44.d_type = DT_LNK; 151 break; 152 case S_IFREG: 153 de->d_u.d_44.d_type = DT_REG; 154 break; 155 case S_IFBLK: 156 de->d_u.d_44.d_type = DT_BLK; 157 break; 158 case S_IFDIR: 159 de->d_u.d_44.d_type = DT_DIR; 160 break; 161 case S_IFCHR: 162 de->d_u.d_44.d_type = DT_CHR; 163 break; 164 case S_IFIFO: 165 de->d_u.d_44.d_type = DT_FIFO; 166 break; 167 default: 168 de->d_u.d_44.d_type = DT_UNKNOWN; 169 } 170 } 171 172 static inline u32 173 ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode) 174 { 175 switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) { 176 case UFS_UID_EFT: 177 return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_uid); 178 case UFS_UID_44BSD: 179 return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_uid); 180 default: 181 return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_suid); 182 } 183 } 184 185 static inline void 186 ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value) 187 { 188 switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) { 189 case UFS_UID_EFT: 190 inode->ui_u3.ui_sun.ui_uid = cpu_to_fs32(sb, value); 191 break; 192 case UFS_UID_44BSD: 193 inode->ui_u3.ui_44.ui_uid = cpu_to_fs32(sb, value); 194 break; 195 } 196 inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value); 197 } 198 199 static inline u32 200 ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode) 201 { 202 switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) { 203 case UFS_UID_EFT: 204 return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid); 205 case UFS_UID_44BSD: 206 return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid); 207 default: 208 return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_sgid); 209 } 210 } 211 212 static inline void 213 ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value) 214 { 215 switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) { 216 case UFS_UID_EFT: 217 inode->ui_u3.ui_sun.ui_gid = cpu_to_fs32(sb, value); 218 break; 219 case UFS_UID_44BSD: 220 inode->ui_u3.ui_44.ui_gid = cpu_to_fs32(sb, value); 221 break; 222 } 223 inode->ui_u1.oldids.ui_sgid = cpu_to_fs16(sb, value); 224 } 225 226 extern dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *); 227 extern void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t); 228 229 /* 230 * These functions manipulate ufs buffers 231 */ 232 #define ubh_bread(sb,fragment,size) _ubh_bread_(uspi,sb,fragment,size) 233 extern struct ufs_buffer_head * _ubh_bread_(struct ufs_sb_private_info *, struct super_block *, u64 , u64); 234 extern struct ufs_buffer_head * ubh_bread_uspi(struct ufs_sb_private_info *, struct super_block *, u64, u64); 235 extern void ubh_brelse (struct ufs_buffer_head *); 236 extern void ubh_brelse_uspi (struct ufs_sb_private_info *); 237 extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *); 238 extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int); 239 extern void ubh_ll_rw_block (int, unsigned, struct ufs_buffer_head **); 240 extern void ubh_wait_on_buffer (struct ufs_buffer_head *); 241 extern unsigned ubh_max_bcount (struct ufs_buffer_head *); 242 extern void ubh_bforget (struct ufs_buffer_head *); 243 extern int ubh_buffer_dirty (struct ufs_buffer_head *); 244 #define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size) 245 extern void _ubh_ubhcpymem_(struct ufs_sb_private_info *, unsigned char *, struct ufs_buffer_head *, unsigned); 246 #define ubh_memcpyubh(ubh,mem,size) _ubh_memcpyubh_(uspi,ubh,mem,size) 247 extern void _ubh_memcpyubh_(struct ufs_sb_private_info *, struct ufs_buffer_head *, unsigned char *, unsigned); 248 249 250 251 /* 252 * macros and inline function to get important structures from ufs_sb_private_info 253 */ 254 255 static inline void *get_usb_offset(struct ufs_sb_private_info *uspi, 256 unsigned int offset) 257 { 258 unsigned int index; 259 260 index = offset >> uspi->s_fshift; 261 offset &= ~uspi->s_fmask; 262 return uspi->s_ubh.bh[index]->b_data + offset; 263 } 264 265 #define ubh_get_usb_first(uspi) \ 266 ((struct ufs_super_block_first *)get_usb_offset((uspi), 0)) 267 268 #define ubh_get_usb_second(uspi) \ 269 ((struct ufs_super_block_second *)get_usb_offset((uspi), UFS_SECTOR_SIZE)) 270 271 #define ubh_get_usb_third(uspi) \ 272 ((struct ufs_super_block_third *)get_usb_offset((uspi), 2*UFS_SECTOR_SIZE)) 273 274 275 #define ubh_get_ucg(ubh) \ 276 ((struct ufs_cylinder_group *)((ubh)->bh[0]->b_data)) 277 278 279 /* 280 * Extract byte from ufs_buffer_head 281 * Extract the bits for a block from a map inside ufs_buffer_head 282 */ 283 #define ubh_get_addr8(ubh,begin) \ 284 ((u8*)(ubh)->bh[(begin) >> uspi->s_fshift]->b_data + \ 285 ((begin) & ~uspi->s_fmask)) 286 287 #define ubh_get_addr16(ubh,begin) \ 288 (((__fs16*)((ubh)->bh[(begin) >> (uspi->s_fshift-1)]->b_data)) + \ 289 ((begin) & (uspi->fsize>>1) - 1))) 290 291 #define ubh_get_addr32(ubh,begin) \ 292 (((__fs32*)((ubh)->bh[(begin) >> (uspi->s_fshift-2)]->b_data)) + \ 293 ((begin) & ((uspi->s_fsize>>2) - 1))) 294 295 #define ubh_get_addr ubh_get_addr8 296 297 #define ubh_blkmap(ubh,begin,bit) \ 298 ((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb))) 299 300 301 /* 302 * Macros for access to superblock array structures 303 */ 304 #define ubh_postbl(ubh,cylno,i) \ 305 ((uspi->s_postblformat != UFS_DYNAMICPOSTBLFMT) \ 306 ? (*(__s16*)(ubh_get_addr(ubh, \ 307 (unsigned)(&((struct ufs_super_block *)0)->fs_opostbl) \ 308 + (((cylno) * 16 + (i)) << 1) ) )) \ 309 : (*(__s16*)(ubh_get_addr(ubh, \ 310 uspi->s_postbloff + (((cylno) * uspi->s_nrpos + (i)) << 1) )))) 311 312 #define ubh_rotbl(ubh,i) \ 313 ((uspi->s_postblformat != UFS_DYNAMICPOSTBLFMT) \ 314 ? (*(__u8*)(ubh_get_addr(ubh, \ 315 (unsigned)(&((struct ufs_super_block *)0)->fs_space) + (i)))) \ 316 : (*(__u8*)(ubh_get_addr(ubh, uspi->s_rotbloff + (i))))) 317 318 /* 319 * Determine the number of available frags given a 320 * percentage to hold in reserve. 321 */ 322 #define ufs_freespace(usb, percentreserved) \ 323 (ufs_blkstofrags(fs32_to_cpu(sb, (usb)->fs_cstotal.cs_nbfree)) + \ 324 fs32_to_cpu(sb, (usb)->fs_cstotal.cs_nffree) - (uspi->s_dsize * (percentreserved) / 100)) 325 326 /* 327 * Macros to access cylinder group array structures 328 */ 329 #define ubh_cg_blktot(ucpi,cylno) \ 330 (*((__fs32*)ubh_get_addr(UCPI_UBH, (ucpi)->c_btotoff + ((cylno) << 2)))) 331 332 #define ubh_cg_blks(ucpi,cylno,rpos) \ 333 (*((__fs16*)ubh_get_addr(UCPI_UBH, \ 334 (ucpi)->c_boff + (((cylno) * uspi->s_nrpos + (rpos)) << 1 )))) 335 336 /* 337 * Bitmap operations 338 * These functions work like classical bitmap operations. 339 * The difference is that we don't have the whole bitmap 340 * in one contiguous chunk of memory, but in several buffers. 341 * The parameters of each function are super_block, ufs_buffer_head and 342 * position of the beginning of the bitmap. 343 */ 344 #define ubh_setbit(ubh,begin,bit) \ 345 (*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) |= (1 << ((bit) & 7))) 346 347 #define ubh_clrbit(ubh,begin,bit) \ 348 (*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) &= ~(1 << ((bit) & 7))) 349 350 #define ubh_isset(ubh,begin,bit) \ 351 (*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) & (1 << ((bit) & 7))) 352 353 #define ubh_isclr(ubh,begin,bit) (!ubh_isset(ubh,begin,bit)) 354 355 #define ubh_find_first_zero_bit(ubh,begin,size) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,0) 356 357 #define ubh_find_next_zero_bit(ubh,begin,size,offset) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,offset) 358 static inline unsigned _ubh_find_next_zero_bit_( 359 struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh, 360 unsigned begin, unsigned size, unsigned offset) 361 { 362 unsigned base, count, pos; 363 364 size -= offset; 365 begin <<= 3; 366 offset += begin; 367 base = offset >> uspi->s_bpfshift; 368 offset &= uspi->s_bpfmask; 369 for (;;) { 370 count = min_t(unsigned int, size + offset, uspi->s_bpf); 371 size -= count - offset; 372 pos = ext2_find_next_zero_bit (ubh->bh[base]->b_data, count, offset); 373 if (pos < count || !size) 374 break; 375 base++; 376 offset = 0; 377 } 378 return (base << uspi->s_bpfshift) + pos - begin; 379 } 380 381 static inline unsigned find_last_zero_bit (unsigned char * bitmap, 382 unsigned size, unsigned offset) 383 { 384 unsigned bit, i; 385 unsigned char * mapp; 386 unsigned char map; 387 388 mapp = bitmap + (size >> 3); 389 map = *mapp--; 390 bit = 1 << (size & 7); 391 for (i = size; i > offset; i--) { 392 if ((map & bit) == 0) 393 break; 394 if ((i & 7) != 0) { 395 bit >>= 1; 396 } else { 397 map = *mapp--; 398 bit = 1 << 7; 399 } 400 } 401 return i; 402 } 403 404 #define ubh_find_last_zero_bit(ubh,begin,size,offset) _ubh_find_last_zero_bit_(uspi,ubh,begin,size,offset) 405 static inline unsigned _ubh_find_last_zero_bit_( 406 struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh, 407 unsigned begin, unsigned start, unsigned end) 408 { 409 unsigned base, count, pos, size; 410 411 size = start - end; 412 begin <<= 3; 413 start += begin; 414 base = start >> uspi->s_bpfshift; 415 start &= uspi->s_bpfmask; 416 for (;;) { 417 count = min_t(unsigned int, 418 size + (uspi->s_bpf - start), uspi->s_bpf) 419 - (uspi->s_bpf - start); 420 size -= count; 421 pos = find_last_zero_bit (ubh->bh[base]->b_data, 422 start, start - count); 423 if (pos > start - count || !size) 424 break; 425 base--; 426 start = uspi->s_bpf; 427 } 428 return (base << uspi->s_bpfshift) + pos - begin; 429 } 430 431 #define ubh_isblockclear(ubh,begin,block) (!_ubh_isblockset_(uspi,ubh,begin,block)) 432 433 #define ubh_isblockset(ubh,begin,block) _ubh_isblockset_(uspi,ubh,begin,block) 434 static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi, 435 struct ufs_buffer_head * ubh, unsigned begin, unsigned block) 436 { 437 switch (uspi->s_fpb) { 438 case 8: 439 return (*ubh_get_addr (ubh, begin + block) == 0xff); 440 case 4: 441 return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2))); 442 case 2: 443 return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1))); 444 case 1: 445 return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07))); 446 } 447 return 0; 448 } 449 450 #define ubh_clrblock(ubh,begin,block) _ubh_clrblock_(uspi,ubh,begin,block) 451 static inline void _ubh_clrblock_(struct ufs_sb_private_info * uspi, 452 struct ufs_buffer_head * ubh, unsigned begin, unsigned block) 453 { 454 switch (uspi->s_fpb) { 455 case 8: 456 *ubh_get_addr (ubh, begin + block) = 0x00; 457 return; 458 case 4: 459 *ubh_get_addr (ubh, begin + (block >> 1)) &= ~(0x0f << ((block & 0x01) << 2)); 460 return; 461 case 2: 462 *ubh_get_addr (ubh, begin + (block >> 2)) &= ~(0x03 << ((block & 0x03) << 1)); 463 return; 464 case 1: 465 *ubh_get_addr (ubh, begin + (block >> 3)) &= ~(0x01 << ((block & 0x07))); 466 return; 467 } 468 } 469 470 #define ubh_setblock(ubh,begin,block) _ubh_setblock_(uspi,ubh,begin,block) 471 static inline void _ubh_setblock_(struct ufs_sb_private_info * uspi, 472 struct ufs_buffer_head * ubh, unsigned begin, unsigned block) 473 { 474 switch (uspi->s_fpb) { 475 case 8: 476 *ubh_get_addr(ubh, begin + block) = 0xff; 477 return; 478 case 4: 479 *ubh_get_addr(ubh, begin + (block >> 1)) |= (0x0f << ((block & 0x01) << 2)); 480 return; 481 case 2: 482 *ubh_get_addr(ubh, begin + (block >> 2)) |= (0x03 << ((block & 0x03) << 1)); 483 return; 484 case 1: 485 *ubh_get_addr(ubh, begin + (block >> 3)) |= (0x01 << ((block & 0x07))); 486 return; 487 } 488 } 489 490 static inline void ufs_fragacct (struct super_block * sb, unsigned blockmap, 491 __fs32 * fraglist, int cnt) 492 { 493 struct ufs_sb_private_info * uspi; 494 unsigned fragsize, pos; 495 496 uspi = UFS_SB(sb)->s_uspi; 497 498 fragsize = 0; 499 for (pos = 0; pos < uspi->s_fpb; pos++) { 500 if (blockmap & (1 << pos)) { 501 fragsize++; 502 } 503 else if (fragsize > 0) { 504 fs32_add(sb, &fraglist[fragsize], cnt); 505 fragsize = 0; 506 } 507 } 508 if (fragsize > 0 && fragsize < uspi->s_fpb) 509 fs32_add(sb, &fraglist[fragsize], cnt); 510 } 511 512 #define ubh_scanc(ubh,begin,size,table,mask) _ubh_scanc_(uspi,ubh,begin,size,table,mask) 513 static inline unsigned _ubh_scanc_(struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh, 514 unsigned begin, unsigned size, unsigned char * table, unsigned char mask) 515 { 516 unsigned rest, offset; 517 unsigned char * cp; 518 519 520 offset = begin & ~uspi->s_fmask; 521 begin >>= uspi->s_fshift; 522 for (;;) { 523 if ((offset + size) < uspi->s_fsize) 524 rest = size; 525 else 526 rest = uspi->s_fsize - offset; 527 size -= rest; 528 cp = ubh->bh[begin]->b_data + offset; 529 while ((table[*cp++] & mask) == 0 && --rest); 530 if (rest || !size) 531 break; 532 begin++; 533 offset = 0; 534 } 535 return (size + rest); 536 } 537