xref: /linux/fs/ufs/util.h (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  linux/fs/ufs/util.h
4  *
5  * Copyright (C) 1998
6  * Daniel Pirkl <daniel.pirkl@email.cz>
7  * Charles University, Faculty of Mathematics and Physics
8  */
9 
10 #include <linux/buffer_head.h>
11 #include <linux/fs.h>
12 #include "swab.h"
13 
14 /*
15  * functions used for retyping
16  */
UCPI_UBH(struct ufs_cg_private_info * cpi)17 static inline struct ufs_buffer_head *UCPI_UBH(struct ufs_cg_private_info *cpi)
18 {
19 	return &cpi->c_ubh;
20 }
USPI_UBH(struct ufs_sb_private_info * spi)21 static inline struct ufs_buffer_head *USPI_UBH(struct ufs_sb_private_info *spi)
22 {
23 	return &spi->s_ubh;
24 }
25 
26 
27 
28 /*
29  * macros used for accessing structures
30  */
31 static inline s32
ufs_get_fs_state(struct super_block * sb,struct ufs_super_block_first * usb1,struct ufs_super_block_third * usb3)32 ufs_get_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
33 		 struct ufs_super_block_third *usb3)
34 {
35 	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
36 	case UFS_ST_SUNOS:
37 		if (fs32_to_cpu(sb, usb3->fs_postblformat) == UFS_42POSTBLFMT)
38 			return fs32_to_cpu(sb, usb1->fs_u0.fs_sun.fs_state);
39 		fallthrough;	/* to UFS_ST_SUN */
40 	case UFS_ST_SUN:
41 		return fs32_to_cpu(sb, usb3->fs_un2.fs_sun.fs_state);
42 	case UFS_ST_SUNx86:
43 		return fs32_to_cpu(sb, usb1->fs_u1.fs_sunx86.fs_state);
44 	case UFS_ST_44BSD:
45 	default:
46 		return fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_state);
47 	}
48 }
49 
50 static inline void
ufs_set_fs_state(struct super_block * sb,struct ufs_super_block_first * usb1,struct ufs_super_block_third * usb3,s32 value)51 ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
52 		 struct ufs_super_block_third *usb3, s32 value)
53 {
54 	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
55 	case UFS_ST_SUNOS:
56 		if (fs32_to_cpu(sb, usb3->fs_postblformat) == UFS_42POSTBLFMT) {
57 			usb1->fs_u0.fs_sun.fs_state = cpu_to_fs32(sb, value);
58 			break;
59 		}
60 		fallthrough;	/* to UFS_ST_SUN */
61 	case UFS_ST_SUN:
62 		usb3->fs_un2.fs_sun.fs_state = cpu_to_fs32(sb, value);
63 		break;
64 	case UFS_ST_SUNx86:
65 		usb1->fs_u1.fs_sunx86.fs_state = cpu_to_fs32(sb, value);
66 		break;
67 	case UFS_ST_44BSD:
68 		usb3->fs_un2.fs_44.fs_state = cpu_to_fs32(sb, value);
69 		break;
70 	}
71 }
72 
73 static inline u32
ufs_get_fs_npsect(struct super_block * sb,struct ufs_super_block_first * usb1,struct ufs_super_block_third * usb3)74 ufs_get_fs_npsect(struct super_block *sb, struct ufs_super_block_first *usb1,
75 		  struct ufs_super_block_third *usb3)
76 {
77 	if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
78 		return fs32_to_cpu(sb, usb3->fs_un2.fs_sunx86.fs_npsect);
79 	else
80 		return fs32_to_cpu(sb, usb1->fs_u1.fs_sun.fs_npsect);
81 }
82 
83 static inline u64
ufs_get_fs_qbmask(struct super_block * sb,struct ufs_super_block_third * usb3)84 ufs_get_fs_qbmask(struct super_block *sb, struct ufs_super_block_third *usb3)
85 {
86 	__fs64 tmp;
87 
88 	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
89 	case UFS_ST_SUNOS:
90 	case UFS_ST_SUN:
91 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sun.fs_qbmask[0];
92 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sun.fs_qbmask[1];
93 		break;
94 	case UFS_ST_SUNx86:
95 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sunx86.fs_qbmask[0];
96 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sunx86.fs_qbmask[1];
97 		break;
98 	case UFS_ST_44BSD:
99 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_44.fs_qbmask[0];
100 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_44.fs_qbmask[1];
101 		break;
102 	}
103 
104 	return fs64_to_cpu(sb, tmp);
105 }
106 
107 static inline u64
ufs_get_fs_qfmask(struct super_block * sb,struct ufs_super_block_third * usb3)108 ufs_get_fs_qfmask(struct super_block *sb, struct ufs_super_block_third *usb3)
109 {
110 	__fs64 tmp;
111 
112 	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
113 	case UFS_ST_SUNOS:
114 	case UFS_ST_SUN:
115 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sun.fs_qfmask[0];
116 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sun.fs_qfmask[1];
117 		break;
118 	case UFS_ST_SUNx86:
119 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sunx86.fs_qfmask[0];
120 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sunx86.fs_qfmask[1];
121 		break;
122 	case UFS_ST_44BSD:
123 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_44.fs_qfmask[0];
124 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_44.fs_qfmask[1];
125 		break;
126 	}
127 
128 	return fs64_to_cpu(sb, tmp);
129 }
130 
131 static inline u16
ufs_get_de_namlen(struct super_block * sb,struct ufs_dir_entry * de)132 ufs_get_de_namlen(struct super_block *sb, struct ufs_dir_entry *de)
133 {
134 	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
135 		return fs16_to_cpu(sb, de->d_u.d_namlen);
136 	else
137 		return de->d_u.d_44.d_namlen; /* XXX this seems wrong */
138 }
139 
140 static inline void
ufs_set_de_namlen(struct super_block * sb,struct ufs_dir_entry * de,u16 value)141 ufs_set_de_namlen(struct super_block *sb, struct ufs_dir_entry *de, u16 value)
142 {
143 	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
144 		de->d_u.d_namlen = cpu_to_fs16(sb, value);
145 	else
146 		de->d_u.d_44.d_namlen = value; /* XXX this seems wrong */
147 }
148 
149 static inline void
ufs_set_de_type(struct super_block * sb,struct ufs_dir_entry * de,int mode)150 ufs_set_de_type(struct super_block *sb, struct ufs_dir_entry *de, int mode)
151 {
152 	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) != UFS_DE_44BSD)
153 		return;
154 
155 	/*
156 	 * TODO turn this into a table lookup
157 	 */
158 	switch (mode & S_IFMT) {
159 	case S_IFSOCK:
160 		de->d_u.d_44.d_type = DT_SOCK;
161 		break;
162 	case S_IFLNK:
163 		de->d_u.d_44.d_type = DT_LNK;
164 		break;
165 	case S_IFREG:
166 		de->d_u.d_44.d_type = DT_REG;
167 		break;
168 	case S_IFBLK:
169 		de->d_u.d_44.d_type = DT_BLK;
170 		break;
171 	case S_IFDIR:
172 		de->d_u.d_44.d_type = DT_DIR;
173 		break;
174 	case S_IFCHR:
175 		de->d_u.d_44.d_type = DT_CHR;
176 		break;
177 	case S_IFIFO:
178 		de->d_u.d_44.d_type = DT_FIFO;
179 		break;
180 	default:
181 		de->d_u.d_44.d_type = DT_UNKNOWN;
182 	}
183 }
184 
185 static inline u32
ufs_get_inode_uid(struct super_block * sb,struct ufs_inode * inode)186 ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode)
187 {
188 	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
189 	case UFS_UID_44BSD:
190 		return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_uid);
191 	case UFS_UID_EFT:
192 		if (inode->ui_u1.oldids.ui_suid == 0xFFFF)
193 			return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_uid);
194 		fallthrough;
195 	default:
196 		return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_suid);
197 	}
198 }
199 
200 static inline void
ufs_set_inode_uid(struct super_block * sb,struct ufs_inode * inode,u32 value)201 ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value)
202 {
203 	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
204 	case UFS_UID_44BSD:
205 		inode->ui_u3.ui_44.ui_uid = cpu_to_fs32(sb, value);
206 		inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value);
207 		break;
208 	case UFS_UID_EFT:
209 		inode->ui_u3.ui_sun.ui_uid = cpu_to_fs32(sb, value);
210 		if (value > 0xFFFF)
211 			value = 0xFFFF;
212 		fallthrough;
213 	default:
214 		inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value);
215 		break;
216 	}
217 }
218 
219 static inline u32
ufs_get_inode_gid(struct super_block * sb,struct ufs_inode * inode)220 ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
221 {
222 	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
223 	case UFS_UID_44BSD:
224 		return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid);
225 	case UFS_UID_EFT:
226 		if (inode->ui_u1.oldids.ui_sgid == 0xFFFF)
227 			return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
228 		fallthrough;
229 	default:
230 		return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_sgid);
231 	}
232 }
233 
234 static inline void
ufs_set_inode_gid(struct super_block * sb,struct ufs_inode * inode,u32 value)235 ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value)
236 {
237 	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
238 	case UFS_UID_44BSD:
239 		inode->ui_u3.ui_44.ui_gid = cpu_to_fs32(sb, value);
240 		inode->ui_u1.oldids.ui_sgid =  cpu_to_fs16(sb, value);
241 		break;
242 	case UFS_UID_EFT:
243 		inode->ui_u3.ui_sun.ui_gid = cpu_to_fs32(sb, value);
244 		if (value > 0xFFFF)
245 			value = 0xFFFF;
246 		fallthrough;
247 	default:
248 		inode->ui_u1.oldids.ui_sgid =  cpu_to_fs16(sb, value);
249 		break;
250 	}
251 }
252 
253 dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *);
254 void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t);
255 int ufs_prepare_chunk(struct folio *folio, loff_t pos, unsigned len);
256 
257 /*
258  * These functions manipulate ufs buffers
259  */
260 #define ubh_bread(sb,fragment,size) _ubh_bread_(uspi,sb,fragment,size)
261 extern struct ufs_buffer_head * _ubh_bread_(struct ufs_sb_private_info *, struct super_block *, u64 , u64);
262 extern struct ufs_buffer_head * ubh_bread_uspi(struct ufs_sb_private_info *, struct super_block *, u64, u64);
263 extern void ubh_brelse (struct ufs_buffer_head *);
264 extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
265 extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
266 extern void ubh_sync_block(struct ufs_buffer_head *);
267 extern void ubh_bforget (struct ufs_buffer_head *);
268 extern int  ubh_buffer_dirty (struct ufs_buffer_head *);
269 
270 /* This functions works with cache pages*/
271 struct folio *ufs_get_locked_folio(struct address_space *mapping, pgoff_t index);
ufs_put_locked_folio(struct folio * folio)272 static inline void ufs_put_locked_folio(struct folio *folio)
273 {
274        folio_unlock(folio);
275        folio_put(folio);
276 }
277 
278 /*
279  * macros and inline function to get important structures from ufs_sb_private_info
280  */
281 
get_usb_offset(struct ufs_sb_private_info * uspi,unsigned int offset)282 static inline void *get_usb_offset(struct ufs_sb_private_info *uspi,
283 				   unsigned int offset)
284 {
285 	unsigned int index;
286 
287 	index = offset >> uspi->s_fshift;
288 	offset &= ~uspi->s_fmask;
289 	return uspi->s_ubh.bh[index]->b_data + offset;
290 }
291 
292 #define ubh_get_usb_first(uspi) \
293 	((struct ufs_super_block_first *)get_usb_offset((uspi), 0))
294 
295 #define ubh_get_usb_second(uspi) \
296 	((struct ufs_super_block_second *)get_usb_offset((uspi), UFS_SECTOR_SIZE))
297 
298 #define ubh_get_usb_third(uspi)	\
299 	((struct ufs_super_block_third *)get_usb_offset((uspi), 2*UFS_SECTOR_SIZE))
300 
301 
302 #define ubh_get_ucg(ubh) \
303 	((struct ufs_cylinder_group *)((ubh)->bh[0]->b_data))
304 
305 
306 /*
307  * Extract byte from ufs_buffer_head
308  * Extract the bits for a block from a map inside ufs_buffer_head
309  */
310 #define ubh_get_addr8(ubh,begin) \
311 	((u8*)(ubh)->bh[(begin) >> uspi->s_fshift]->b_data + \
312 	((begin) & ~uspi->s_fmask))
313 
314 #define ubh_get_addr16(ubh,begin) \
315 	(((__fs16*)((ubh)->bh[(begin) >> (uspi->s_fshift-1)]->b_data)) + \
316 	((begin) & ((uspi->fsize>>1) - 1)))
317 
318 #define ubh_get_addr32(ubh,begin) \
319 	(((__fs32*)((ubh)->bh[(begin) >> (uspi->s_fshift-2)]->b_data)) + \
320 	((begin) & ((uspi->s_fsize>>2) - 1)))
321 
322 #define ubh_get_addr64(ubh,begin) \
323 	(((__fs64*)((ubh)->bh[(begin) >> (uspi->s_fshift-3)]->b_data)) + \
324 	((begin) & ((uspi->s_fsize>>3) - 1)))
325 
326 #define ubh_get_addr ubh_get_addr8
327 
ubh_get_data_ptr(struct ufs_sb_private_info * uspi,struct ufs_buffer_head * ubh,u64 blk)328 static inline void *ubh_get_data_ptr(struct ufs_sb_private_info *uspi,
329 				     struct ufs_buffer_head *ubh,
330 				     u64 blk)
331 {
332 	if (uspi->fs_magic == UFS2_MAGIC)
333 		return ubh_get_addr64(ubh, blk);
334 	else
335 		return ubh_get_addr32(ubh, blk);
336 }
337 
338 #define ubh_blkmap(ubh,begin,bit) \
339 	((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb)))
340 
341 static inline u64
ufs_freefrags(struct ufs_sb_private_info * uspi)342 ufs_freefrags(struct ufs_sb_private_info *uspi)
343 {
344 	return ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
345 		uspi->cs_total.cs_nffree;
346 }
347 
348 /*
349  * Macros to access cylinder group array structures
350  */
351 #define ubh_cg_blktot(ucpi,cylno) \
352 	(*((__fs32*)ubh_get_addr(UCPI_UBH(ucpi), (ucpi)->c_btotoff + ((cylno) << 2))))
353 
354 #define ubh_cg_blks(ucpi,cylno,rpos) \
355 	(*((__fs16*)ubh_get_addr(UCPI_UBH(ucpi), \
356 	(ucpi)->c_boff + (((cylno) * uspi->s_nrpos + (rpos)) << 1 ))))
357 
358 /*
359  * Bitmap operations
360  * These functions work like classical bitmap operations.
361  * The difference is that we don't have the whole bitmap
362  * in one contiguous chunk of memory, but in several buffers.
363  * The parameters of each function are super_block, ufs_buffer_head and
364  * position of the beginning of the bitmap.
365  */
366 #define ubh_setbit(ubh,begin,bit) \
367 	(*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) |= (1 << ((bit) & 7)))
368 
369 #define ubh_clrbit(ubh,begin,bit) \
370 	(*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) &= ~(1 << ((bit) & 7)))
371 
372 #define ubh_isset(ubh,begin,bit) \
373 	(*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) & (1 << ((bit) & 7)))
374 
375 #define ubh_isclr(ubh,begin,bit) (!ubh_isset(ubh,begin,bit))
376 
377 #define ubh_find_first_zero_bit(ubh,begin,size) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,0)
378 
379 #define ubh_find_next_zero_bit(ubh,begin,size,offset) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,offset)
_ubh_find_next_zero_bit_(struct ufs_sb_private_info * uspi,struct ufs_buffer_head * ubh,unsigned begin,unsigned size,unsigned offset)380 static inline unsigned _ubh_find_next_zero_bit_(
381 	struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
382 	unsigned begin, unsigned size, unsigned offset)
383 {
384 	unsigned base, count, pos;
385 
386 	size -= offset;
387 	begin <<= 3;
388 	offset += begin;
389 	base = offset >> uspi->s_bpfshift;
390 	offset &= uspi->s_bpfmask;
391 	for (;;) {
392 		count = min_t(unsigned int, size + offset, uspi->s_bpf);
393 		size -= count - offset;
394 		pos = find_next_zero_bit_le(ubh->bh[base]->b_data, count, offset);
395 		if (pos < count || !size)
396 			break;
397 		base++;
398 		offset = 0;
399 	}
400 	return (base << uspi->s_bpfshift) + pos - begin;
401 }
402 
find_last_zero_bit(unsigned char * bitmap,unsigned size,unsigned offset)403 static inline unsigned find_last_zero_bit (unsigned char * bitmap,
404 	unsigned size, unsigned offset)
405 {
406 	unsigned bit, i;
407 	unsigned char * mapp;
408 	unsigned char map;
409 
410 	mapp = bitmap + (size >> 3);
411 	map = *mapp--;
412 	bit = 1 << (size & 7);
413 	for (i = size; i > offset; i--) {
414 		if ((map & bit) == 0)
415 			break;
416 		if ((i & 7) != 0) {
417 			bit >>= 1;
418 		} else {
419 			map = *mapp--;
420 			bit = 1 << 7;
421 		}
422 	}
423 	return i;
424 }
425 
426 #define ubh_find_last_zero_bit(ubh,begin,size,offset) _ubh_find_last_zero_bit_(uspi,ubh,begin,size,offset)
_ubh_find_last_zero_bit_(struct ufs_sb_private_info * uspi,struct ufs_buffer_head * ubh,unsigned begin,unsigned start,unsigned end)427 static inline unsigned _ubh_find_last_zero_bit_(
428 	struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
429 	unsigned begin, unsigned start, unsigned end)
430 {
431 	unsigned base, count, pos, size;
432 
433 	size = start - end;
434 	begin <<= 3;
435 	start += begin;
436 	base = start >> uspi->s_bpfshift;
437 	start &= uspi->s_bpfmask;
438 	for (;;) {
439 		count = min_t(unsigned int,
440 			    size + (uspi->s_bpf - start), uspi->s_bpf)
441 			- (uspi->s_bpf - start);
442 		size -= count;
443 		pos = find_last_zero_bit (ubh->bh[base]->b_data,
444 			start, start - count);
445 		if (pos > start - count || !size)
446 			break;
447 		base--;
448 		start = uspi->s_bpf;
449 	}
450 	return (base << uspi->s_bpfshift) + pos - begin;
451 }
452 
ubh_isblockset(struct ufs_sb_private_info * uspi,struct ufs_cg_private_info * ucpi,unsigned int frag)453 static inline int ubh_isblockset(struct ufs_sb_private_info *uspi,
454 	struct ufs_cg_private_info *ucpi, unsigned int frag)
455 {
456 	struct ufs_buffer_head *ubh = UCPI_UBH(ucpi);
457 	u8 *p = ubh_get_addr(ubh, ucpi->c_freeoff + (frag >> 3));
458 	u8 mask;
459 
460 	switch (uspi->s_fpb) {
461 	case 8:
462 		return *p == 0xff;
463 	case 4:
464 		mask = 0x0f << (frag & 4);
465 		return (*p & mask) == mask;
466 	case 2:
467 		mask = 0x03 << (frag & 6);
468 		return (*p & mask) == mask;
469 	case 1:
470 		mask = 0x01 << (frag & 7);
471 		return (*p & mask) == mask;
472 	}
473 	return 0;
474 }
475 
ubh_clrblock(struct ufs_sb_private_info * uspi,struct ufs_cg_private_info * ucpi,unsigned int frag)476 static inline void ubh_clrblock(struct ufs_sb_private_info *uspi,
477 	struct ufs_cg_private_info *ucpi, unsigned int frag)
478 {
479 	struct ufs_buffer_head *ubh = UCPI_UBH(ucpi);
480 	u8 *p = ubh_get_addr(ubh, ucpi->c_freeoff + (frag >> 3));
481 
482 	switch (uspi->s_fpb) {
483 	case 8:
484 		*p = 0x00;
485 	    	return;
486 	case 4:
487 		*p &= ~(0x0f << (frag & 4));
488 		return;
489 	case 2:
490 		*p &= ~(0x03 << (frag & 6));
491 		return;
492 	case 1:
493 		*p &= ~(0x01 << (frag & 7));
494 		return;
495 	}
496 }
497 
ubh_setblock(struct ufs_sb_private_info * uspi,struct ufs_cg_private_info * ucpi,unsigned int frag)498 static inline void ubh_setblock(struct ufs_sb_private_info * uspi,
499 	struct ufs_cg_private_info *ucpi, unsigned int frag)
500 {
501 	struct ufs_buffer_head *ubh = UCPI_UBH(ucpi);
502 	u8 *p = ubh_get_addr(ubh, ucpi->c_freeoff + (frag >> 3));
503 
504 	switch (uspi->s_fpb) {
505 	case 8:
506 		*p = 0xff;
507 	    	return;
508 	case 4:
509 		*p |= 0x0f << (frag & 4);
510 		return;
511 	case 2:
512 		*p |= 0x03 << (frag & 6);
513 		return;
514 	case 1:
515 		*p |= 0x01 << (frag & 7);
516 		return;
517 	}
518 }
519 
ufs_fragacct(struct super_block * sb,unsigned blockmap,__fs32 * fraglist,int cnt)520 static inline void ufs_fragacct (struct super_block * sb, unsigned blockmap,
521 	__fs32 * fraglist, int cnt)
522 {
523 	struct ufs_sb_private_info * uspi;
524 	unsigned fragsize, pos;
525 
526 	uspi = UFS_SB(sb)->s_uspi;
527 
528 	fragsize = 0;
529 	for (pos = 0; pos < uspi->s_fpb; pos++) {
530 		if (blockmap & (1 << pos)) {
531 			fragsize++;
532 		}
533 		else if (fragsize > 0) {
534 			fs32_add(sb, &fraglist[fragsize], cnt);
535 			fragsize = 0;
536 		}
537 	}
538 	if (fragsize > 0 && fragsize < uspi->s_fpb)
539 		fs32_add(sb, &fraglist[fragsize], cnt);
540 }
541 
ufs_get_direct_data_ptr(struct ufs_sb_private_info * uspi,struct ufs_inode_info * ufsi,unsigned blk)542 static inline void *ufs_get_direct_data_ptr(struct ufs_sb_private_info *uspi,
543 					    struct ufs_inode_info *ufsi,
544 					    unsigned blk)
545 {
546 	BUG_ON(blk > UFS_TIND_BLOCK);
547 	return uspi->fs_magic == UFS2_MAGIC ?
548 		(void *)&ufsi->i_u1.u2_i_data[blk] :
549 		(void *)&ufsi->i_u1.i_data[blk];
550 }
551 
ufs_data_ptr_to_cpu(struct super_block * sb,void * p)552 static inline u64 ufs_data_ptr_to_cpu(struct super_block *sb, void *p)
553 {
554 	return UFS_SB(sb)->s_uspi->fs_magic == UFS2_MAGIC ?
555 		fs64_to_cpu(sb, *(__fs64 *)p) :
556 		fs32_to_cpu(sb, *(__fs32 *)p);
557 }
558 
ufs_cpu_to_data_ptr(struct super_block * sb,void * p,u64 val)559 static inline void ufs_cpu_to_data_ptr(struct super_block *sb, void *p, u64 val)
560 {
561 	if (UFS_SB(sb)->s_uspi->fs_magic == UFS2_MAGIC)
562 		*(__fs64 *)p = cpu_to_fs64(sb, val);
563 	else
564 		*(__fs32 *)p = cpu_to_fs32(sb, val);
565 }
566 
ufs_data_ptr_clear(struct ufs_sb_private_info * uspi,void * p)567 static inline void ufs_data_ptr_clear(struct ufs_sb_private_info *uspi,
568 				      void *p)
569 {
570 	if (uspi->fs_magic == UFS2_MAGIC)
571 		*(__fs64 *)p = 0;
572 	else
573 		*(__fs32 *)p = 0;
574 }
575 
ufs_is_data_ptr_zero(struct ufs_sb_private_info * uspi,void * p)576 static inline int ufs_is_data_ptr_zero(struct ufs_sb_private_info *uspi,
577 				       void *p)
578 {
579 	if (uspi->fs_magic == UFS2_MAGIC)
580 		return *(__fs64 *)p == 0;
581 	else
582 		return *(__fs32 *)p == 0;
583 }
584 
ufs_get_seconds(struct super_block * sbp)585 static inline __fs32 ufs_get_seconds(struct super_block *sbp)
586 {
587 	time64_t now = ktime_get_real_seconds();
588 
589 	/* Signed 32-bit interpretation wraps around in 2038, which
590 	 * happens in ufs1 inode stamps but not ufs2 using 64-bits
591 	 * stamps. For superblock and blockgroup, let's assume
592 	 * unsigned 32-bit stamps, which are good until y2106.
593 	 * Wrap around rather than clamp here to make the dirty
594 	 * file system detection work in the superblock stamp.
595 	 */
596 	return cpu_to_fs32(sbp, lower_32_bits(now));
597 }
598