xref: /linux/fs/ufs/balloc.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ufs/balloc.c
4  *
5  * Copyright (C) 1998
6  * Daniel Pirkl <daniel.pirkl@email.cz>
7  * Charles University, Faculty of Mathematics and Physics
8  *
9  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
10  */
11 
12 #include <linux/fs.h>
13 #include <linux/stat.h>
14 #include <linux/time.h>
15 #include <linux/string.h>
16 #include <linux/buffer_head.h>
17 #include <linux/capability.h>
18 #include <linux/bitops.h>
19 #include <linux/bio.h>
20 #include <asm/byteorder.h>
21 
22 #include "ufs_fs.h"
23 #include "ufs.h"
24 #include "swab.h"
25 #include "util.h"
26 
27 #define INVBLOCK ((u64)-1L)
28 
29 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
30 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
31 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
32 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
33 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
34 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
35 
36 static void adjust_free_blocks(struct super_block *sb,
37 			       struct ufs_cylinder_group *ucg,
38 			       struct ufs_cg_private_info *ucpi,
39 			       unsigned fragment, int delta)
40 {
41 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
42 
43 	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
44 		ufs_clusteracct(sb, ucpi, fragment, delta);
45 
46 	fs32_add(sb, &ucg->cg_cs.cs_nbfree, delta);
47 	uspi->cs_total.cs_nbfree += delta;
48 	fs32_add(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, delta);
49 
50 	if (uspi->fs_magic != UFS2_MAGIC) {
51 		unsigned cylno = ufs_cbtocylno(fragment);
52 
53 		fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
54 					  ufs_cbtorpos(fragment)), delta);
55 		fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), delta);
56 	}
57 }
58 
59 /*
60  * Free 'count' fragments from fragment number 'fragment'
61  */
62 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
63 {
64 	struct super_block * sb;
65 	struct ufs_sb_private_info * uspi;
66 	struct ufs_cg_private_info * ucpi;
67 	struct ufs_cylinder_group * ucg;
68 	unsigned cgno, bit, end_bit, bbase, blkmap, i;
69 
70 	sb = inode->i_sb;
71 	uspi = UFS_SB(sb)->s_uspi;
72 
73 	UFSD("ENTER, fragment %llu, count %u\n",
74 	     (unsigned long long)fragment, count);
75 
76 	if (ufs_fragnum(fragment) + count > uspi->s_fpb)
77 		ufs_error (sb, "ufs_free_fragments", "internal error");
78 
79 	mutex_lock(&UFS_SB(sb)->s_lock);
80 
81 	cgno = ufs_dtog(uspi, fragment);
82 	bit = ufs_dtogd(uspi, fragment);
83 	if (cgno >= uspi->s_ncg) {
84 		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
85 		goto failed;
86 	}
87 
88 	ucpi = ufs_load_cylinder (sb, cgno);
89 	if (!ucpi)
90 		goto failed;
91 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
92 	if (!ufs_cg_chkmagic(sb, ucg)) {
93 		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
94 		goto failed;
95 	}
96 
97 	end_bit = bit + count;
98 	bbase = ufs_blknum (bit);
99 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
100 	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
101 	for (i = bit; i < end_bit; i++) {
102 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
103 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
104 		else
105 			ufs_error (sb, "ufs_free_fragments",
106 				   "bit already cleared for fragment %u", i);
107 	}
108 
109 	inode_sub_bytes(inode, count << uspi->s_fshift);
110 	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
111 	uspi->cs_total.cs_nffree += count;
112 	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
113 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
114 	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
115 
116 	/*
117 	 * Trying to reassemble free fragments into block
118 	 */
119 	if (ubh_isblockset(uspi, ucpi, bbase)) {
120 		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
121 		uspi->cs_total.cs_nffree -= uspi->s_fpb;
122 		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
123 		adjust_free_blocks(sb, ucg, ucpi, bbase, 1);
124 	}
125 
126 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
127 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
128 	if (sb->s_flags & SB_SYNCHRONOUS)
129 		ubh_sync_block(UCPI_UBH(ucpi));
130 	ufs_mark_sb_dirty(sb);
131 
132 	mutex_unlock(&UFS_SB(sb)->s_lock);
133 	UFSD("EXIT\n");
134 	return;
135 
136 failed:
137 	mutex_unlock(&UFS_SB(sb)->s_lock);
138 	UFSD("EXIT (FAILED)\n");
139 	return;
140 }
141 
142 /*
143  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
144  */
145 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
146 {
147 	struct super_block * sb;
148 	struct ufs_sb_private_info * uspi;
149 	struct ufs_cg_private_info * ucpi;
150 	struct ufs_cylinder_group * ucg;
151 	unsigned overflow, cgno, bit, end_bit, i;
152 
153 	sb = inode->i_sb;
154 	uspi = UFS_SB(sb)->s_uspi;
155 
156 	UFSD("ENTER, fragment %llu, count %u\n",
157 	     (unsigned long long)fragment, count);
158 
159 	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
160 		ufs_error (sb, "ufs_free_blocks", "internal error, "
161 			   "fragment %llu, count %u\n",
162 			   (unsigned long long)fragment, count);
163 		goto failed;
164 	}
165 
166 	mutex_lock(&UFS_SB(sb)->s_lock);
167 
168 do_more:
169 	overflow = 0;
170 	cgno = ufs_dtog(uspi, fragment);
171 	bit = ufs_dtogd(uspi, fragment);
172 	if (cgno >= uspi->s_ncg) {
173 		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
174 		goto failed_unlock;
175 	}
176 	end_bit = bit + count;
177 	if (end_bit > uspi->s_fpg) {
178 		overflow = bit + count - uspi->s_fpg;
179 		count -= overflow;
180 		end_bit -= overflow;
181 	}
182 
183 	ucpi = ufs_load_cylinder (sb, cgno);
184 	if (!ucpi)
185 		goto failed_unlock;
186 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
187 	if (!ufs_cg_chkmagic(sb, ucg)) {
188 		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
189 		goto failed_unlock;
190 	}
191 
192 	for (i = bit; i < end_bit; i += uspi->s_fpb) {
193 		if (ubh_isblockset(uspi, ucpi, i)) {
194 			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
195 		}
196 		ubh_setblock(uspi, ucpi, i);
197 		inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
198 		adjust_free_blocks(sb, ucg, ucpi, i, 1);
199 	}
200 
201 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
202 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
203 	if (sb->s_flags & SB_SYNCHRONOUS)
204 		ubh_sync_block(UCPI_UBH(ucpi));
205 
206 	if (overflow) {
207 		fragment += count;
208 		count = overflow;
209 		goto do_more;
210 	}
211 
212 	ufs_mark_sb_dirty(sb);
213 	mutex_unlock(&UFS_SB(sb)->s_lock);
214 	UFSD("EXIT\n");
215 	return;
216 
217 failed_unlock:
218 	mutex_unlock(&UFS_SB(sb)->s_lock);
219 failed:
220 	UFSD("EXIT (FAILED)\n");
221 	return;
222 }
223 
224 /*
225  * Modify inode page cache in such way:
226  * have - blocks with b_blocknr equal to oldb...oldb+count-1
227  * get - blocks with b_blocknr equal to newb...newb+count-1
228  * also we suppose that oldb...oldb+count-1 blocks
229  * situated at the end of file.
230  *
231  * We can come here from ufs_writepage or ufs_prepare_write,
232  * locked_folio is argument of these functions, so we already lock it.
233  */
234 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
235 			       unsigned int count, sector_t oldb,
236 			       sector_t newb, struct folio *locked_folio)
237 {
238 	struct folio *folio;
239 	const unsigned blks_per_page =
240 		1 << (PAGE_SHIFT - inode->i_blkbits);
241 	const unsigned mask = blks_per_page - 1;
242 	struct address_space * const mapping = inode->i_mapping;
243 	pgoff_t index, cur_index, last_index;
244 	unsigned pos, j, lblock;
245 	sector_t end, i;
246 	struct buffer_head *head, *bh;
247 
248 	UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
249 	      inode->i_ino, count,
250 	     (unsigned long long)oldb, (unsigned long long)newb);
251 
252 	BUG_ON(!folio_test_locked(locked_folio));
253 
254 	cur_index = locked_folio->index;
255 	end = count + beg;
256 	last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
257 	for (i = beg; i < end; i = (i | mask) + 1) {
258 		index = i >> (PAGE_SHIFT - inode->i_blkbits);
259 
260 		if (likely(cur_index != index)) {
261 			folio = ufs_get_locked_folio(mapping, index);
262 			if (!folio) /* it was truncated */
263 				continue;
264 			if (IS_ERR(folio)) {/* or EIO */
265 				ufs_error(inode->i_sb, __func__,
266 					  "read of page %llu failed\n",
267 					  (unsigned long long)index);
268 				continue;
269 			}
270 		} else
271 			folio = locked_folio;
272 
273 		head = folio_buffers(folio);
274 		bh = head;
275 		pos = i & mask;
276 		for (j = 0; j < pos; ++j)
277 			bh = bh->b_this_page;
278 
279 		if (unlikely(index == last_index))
280 			lblock = end & mask;
281 		else
282 			lblock = blks_per_page;
283 
284 		do {
285 			if (j >= lblock)
286 				break;
287 			pos = (i - beg) + j;
288 
289 			if (!buffer_mapped(bh))
290 					map_bh(bh, inode->i_sb, oldb + pos);
291 			if (bh_read(bh, 0) < 0) {
292 				ufs_error(inode->i_sb, __func__,
293 					  "read of block failed\n");
294 				break;
295 			}
296 
297 			UFSD(" change from %llu to %llu, pos %u\n",
298 			     (unsigned long long)(pos + oldb),
299 			     (unsigned long long)(pos + newb), pos);
300 
301 			bh->b_blocknr = newb + pos;
302 			clean_bdev_bh_alias(bh);
303 			mark_buffer_dirty(bh);
304 			++j;
305 			bh = bh->b_this_page;
306 		} while (bh != head);
307 
308 		if (likely(cur_index != index))
309 			ufs_put_locked_folio(folio);
310  	}
311 	UFSD("EXIT\n");
312 }
313 
314 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
315 			    int sync)
316 {
317 	struct buffer_head *bh;
318 	sector_t end = beg + n;
319 
320 	for (; beg < end; ++beg) {
321 		bh = sb_getblk(inode->i_sb, beg);
322 		lock_buffer(bh);
323 		memset(bh->b_data, 0, inode->i_sb->s_blocksize);
324 		set_buffer_uptodate(bh);
325 		mark_buffer_dirty(bh);
326 		unlock_buffer(bh);
327 		if (IS_SYNC(inode) || sync)
328 			sync_dirty_buffer(bh);
329 		brelse(bh);
330 	}
331 }
332 
333 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
334 			   u64 goal, unsigned count, int *err,
335 			   struct folio *locked_folio)
336 {
337 	struct super_block * sb;
338 	struct ufs_sb_private_info * uspi;
339 	struct ufs_super_block_first * usb1;
340 	unsigned cgno, oldcount, newcount;
341 	u64 tmp, request, result;
342 
343 	UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
344 	     inode->i_ino, (unsigned long long)fragment,
345 	     (unsigned long long)goal, count);
346 
347 	sb = inode->i_sb;
348 	uspi = UFS_SB(sb)->s_uspi;
349 	usb1 = ubh_get_usb_first(uspi);
350 	*err = -ENOSPC;
351 
352 	mutex_lock(&UFS_SB(sb)->s_lock);
353 	tmp = ufs_data_ptr_to_cpu(sb, p);
354 
355 	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
356 		ufs_warning(sb, "ufs_new_fragments", "internal warning"
357 			    " fragment %llu, count %u",
358 			    (unsigned long long)fragment, count);
359 		count = uspi->s_fpb - ufs_fragnum(fragment);
360 	}
361 	oldcount = ufs_fragnum (fragment);
362 	newcount = oldcount + count;
363 
364 	/*
365 	 * Somebody else has just allocated our fragments
366 	 */
367 	if (oldcount) {
368 		if (!tmp) {
369 			ufs_error(sb, "ufs_new_fragments", "internal error, "
370 				  "fragment %llu, tmp %llu\n",
371 				  (unsigned long long)fragment,
372 				  (unsigned long long)tmp);
373 			mutex_unlock(&UFS_SB(sb)->s_lock);
374 			return INVBLOCK;
375 		}
376 		if (fragment < UFS_I(inode)->i_lastfrag) {
377 			UFSD("EXIT (ALREADY ALLOCATED)\n");
378 			mutex_unlock(&UFS_SB(sb)->s_lock);
379 			return 0;
380 		}
381 	}
382 	else {
383 		if (tmp) {
384 			UFSD("EXIT (ALREADY ALLOCATED)\n");
385 			mutex_unlock(&UFS_SB(sb)->s_lock);
386 			return 0;
387 		}
388 	}
389 
390 	/*
391 	 * There is not enough space for user on the device
392 	 */
393 	if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
394 		if (!capable(CAP_SYS_RESOURCE)) {
395 			mutex_unlock(&UFS_SB(sb)->s_lock);
396 			UFSD("EXIT (FAILED)\n");
397 			return 0;
398 		}
399 	}
400 
401 	if (goal >= uspi->s_size)
402 		goal = 0;
403 	if (goal == 0)
404 		cgno = ufs_inotocg (inode->i_ino);
405 	else
406 		cgno = ufs_dtog(uspi, goal);
407 
408 	/*
409 	 * allocate new fragment
410 	 */
411 	if (oldcount == 0) {
412 		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
413 		if (result) {
414 			ufs_clear_frags(inode, result + oldcount,
415 					newcount - oldcount, locked_folio != NULL);
416 			*err = 0;
417 			write_seqlock(&UFS_I(inode)->meta_lock);
418 			ufs_cpu_to_data_ptr(sb, p, result);
419 			UFS_I(inode)->i_lastfrag =
420 				max(UFS_I(inode)->i_lastfrag, fragment + count);
421 			write_sequnlock(&UFS_I(inode)->meta_lock);
422 		}
423 		mutex_unlock(&UFS_SB(sb)->s_lock);
424 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
425 		return result;
426 	}
427 
428 	/*
429 	 * resize block
430 	 */
431 	result = ufs_add_fragments(inode, tmp, oldcount, newcount);
432 	if (result) {
433 		*err = 0;
434 		read_seqlock_excl(&UFS_I(inode)->meta_lock);
435 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
436 						fragment + count);
437 		read_sequnlock_excl(&UFS_I(inode)->meta_lock);
438 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
439 				locked_folio != NULL);
440 		mutex_unlock(&UFS_SB(sb)->s_lock);
441 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
442 		return result;
443 	}
444 
445 	/*
446 	 * allocate new block and move data
447 	 */
448 	if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
449 		request = newcount;
450 		if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
451 			usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
452 	} else {
453 		request = uspi->s_fpb;
454 		if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
455 			usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
456 	}
457 	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
458 	if (result) {
459 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
460 				locked_folio != NULL);
461 		mutex_unlock(&UFS_SB(sb)->s_lock);
462 		ufs_change_blocknr(inode, fragment - oldcount, oldcount,
463 				   uspi->s_sbbase + tmp,
464 				   uspi->s_sbbase + result, locked_folio);
465 		*err = 0;
466 		write_seqlock(&UFS_I(inode)->meta_lock);
467 		ufs_cpu_to_data_ptr(sb, p, result);
468 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
469 						fragment + count);
470 		write_sequnlock(&UFS_I(inode)->meta_lock);
471 		if (newcount < request)
472 			ufs_free_fragments (inode, result + newcount, request - newcount);
473 		ufs_free_fragments (inode, tmp, oldcount);
474 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
475 		return result;
476 	}
477 
478 	mutex_unlock(&UFS_SB(sb)->s_lock);
479 	UFSD("EXIT (FAILED)\n");
480 	return 0;
481 }
482 
483 static bool try_add_frags(struct inode *inode, unsigned frags)
484 {
485 	unsigned size = frags * i_blocksize(inode);
486 	spin_lock(&inode->i_lock);
487 	__inode_add_bytes(inode, size);
488 	if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
489 		__inode_sub_bytes(inode, size);
490 		spin_unlock(&inode->i_lock);
491 		return false;
492 	}
493 	spin_unlock(&inode->i_lock);
494 	return true;
495 }
496 
497 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
498 			     unsigned oldcount, unsigned newcount)
499 {
500 	struct super_block * sb;
501 	struct ufs_sb_private_info * uspi;
502 	struct ufs_cg_private_info * ucpi;
503 	struct ufs_cylinder_group * ucg;
504 	unsigned cgno, fragno, fragoff, count, fragsize, i;
505 
506 	UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
507 	     (unsigned long long)fragment, oldcount, newcount);
508 
509 	sb = inode->i_sb;
510 	uspi = UFS_SB(sb)->s_uspi;
511 	count = newcount - oldcount;
512 
513 	cgno = ufs_dtog(uspi, fragment);
514 	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
515 		return 0;
516 	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
517 		return 0;
518 	ucpi = ufs_load_cylinder (sb, cgno);
519 	if (!ucpi)
520 		return 0;
521 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
522 	if (!ufs_cg_chkmagic(sb, ucg)) {
523 		ufs_panic (sb, "ufs_add_fragments",
524 			"internal error, bad magic number on cg %u", cgno);
525 		return 0;
526 	}
527 
528 	fragno = ufs_dtogd(uspi, fragment);
529 	fragoff = ufs_fragnum (fragno);
530 	for (i = oldcount; i < newcount; i++)
531 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
532 			return 0;
533 
534 	if (!try_add_frags(inode, count))
535 		return 0;
536 	/*
537 	 * Block can be extended
538 	 */
539 	ucg->cg_time = ufs_get_seconds(sb);
540 	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
541 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
542 			break;
543 	fragsize = i - oldcount;
544 	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
545 		ufs_panic (sb, "ufs_add_fragments",
546 			"internal error or corrupted bitmap on cg %u", cgno);
547 	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
548 	if (fragsize != count)
549 		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
550 	for (i = oldcount; i < newcount; i++)
551 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
552 
553 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
554 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
555 	uspi->cs_total.cs_nffree -= count;
556 
557 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
558 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
559 	if (sb->s_flags & SB_SYNCHRONOUS)
560 		ubh_sync_block(UCPI_UBH(ucpi));
561 	ufs_mark_sb_dirty(sb);
562 
563 	UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
564 
565 	return fragment;
566 }
567 
568 #define UFS_TEST_FREE_SPACE_CG \
569 	ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
570 	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
571 		goto cg_found; \
572 	for (k = count; k < uspi->s_fpb; k++) \
573 		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
574 			goto cg_found;
575 
576 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
577 			       u64 goal, unsigned count, int *err)
578 {
579 	struct super_block * sb;
580 	struct ufs_sb_private_info * uspi;
581 	struct ufs_cg_private_info * ucpi;
582 	struct ufs_cylinder_group * ucg;
583 	unsigned oldcg, i, j, k, allocsize;
584 	u64 result;
585 
586 	UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
587 	     inode->i_ino, cgno, (unsigned long long)goal, count);
588 
589 	sb = inode->i_sb;
590 	uspi = UFS_SB(sb)->s_uspi;
591 	oldcg = cgno;
592 
593 	/*
594 	 * 1. searching on preferred cylinder group
595 	 */
596 	UFS_TEST_FREE_SPACE_CG
597 
598 	/*
599 	 * 2. quadratic rehash
600 	 */
601 	for (j = 1; j < uspi->s_ncg; j *= 2) {
602 		cgno += j;
603 		if (cgno >= uspi->s_ncg)
604 			cgno -= uspi->s_ncg;
605 		UFS_TEST_FREE_SPACE_CG
606 	}
607 
608 	/*
609 	 * 3. brute force search
610 	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
611 	 */
612 	cgno = (oldcg + 1) % uspi->s_ncg;
613 	for (j = 2; j < uspi->s_ncg; j++) {
614 		cgno++;
615 		if (cgno >= uspi->s_ncg)
616 			cgno = 0;
617 		UFS_TEST_FREE_SPACE_CG
618 	}
619 
620 	UFSD("EXIT (FAILED)\n");
621 	return 0;
622 
623 cg_found:
624 	ucpi = ufs_load_cylinder (sb, cgno);
625 	if (!ucpi)
626 		return 0;
627 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
628 	if (!ufs_cg_chkmagic(sb, ucg))
629 		ufs_panic (sb, "ufs_alloc_fragments",
630 			"internal error, bad magic number on cg %u", cgno);
631 	ucg->cg_time = ufs_get_seconds(sb);
632 
633 	if (count == uspi->s_fpb) {
634 		result = ufs_alloccg_block (inode, ucpi, goal, err);
635 		if (result == INVBLOCK)
636 			return 0;
637 		goto succed;
638 	}
639 
640 	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
641 		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
642 			break;
643 
644 	if (allocsize == uspi->s_fpb) {
645 		result = ufs_alloccg_block (inode, ucpi, goal, err);
646 		if (result == INVBLOCK)
647 			return 0;
648 		goal = ufs_dtogd(uspi, result);
649 		for (i = count; i < uspi->s_fpb; i++)
650 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
651 		i = uspi->s_fpb - count;
652 
653 		inode_sub_bytes(inode, i << uspi->s_fshift);
654 		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
655 		uspi->cs_total.cs_nffree += i;
656 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
657 		fs32_add(sb, &ucg->cg_frsum[i], 1);
658 		goto succed;
659 	}
660 
661 	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
662 	if (result == INVBLOCK)
663 		return 0;
664 	if (!try_add_frags(inode, count))
665 		return 0;
666 	for (i = 0; i < count; i++)
667 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
668 
669 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
670 	uspi->cs_total.cs_nffree -= count;
671 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
672 	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
673 
674 	if (count != allocsize)
675 		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
676 
677 succed:
678 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
679 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
680 	if (sb->s_flags & SB_SYNCHRONOUS)
681 		ubh_sync_block(UCPI_UBH(ucpi));
682 	ufs_mark_sb_dirty(sb);
683 
684 	result += cgno * uspi->s_fpg;
685 	UFSD("EXIT3, result %llu\n", (unsigned long long)result);
686 	return result;
687 }
688 
689 static u64 ufs_alloccg_block(struct inode *inode,
690 			     struct ufs_cg_private_info *ucpi,
691 			     u64 goal, int *err)
692 {
693 	struct super_block * sb;
694 	struct ufs_sb_private_info * uspi;
695 	struct ufs_cylinder_group * ucg;
696 	u64 result;
697 
698 	UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
699 
700 	sb = inode->i_sb;
701 	uspi = UFS_SB(sb)->s_uspi;
702 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
703 
704 	if (goal == 0) {
705 		goal = ucpi->c_rotor;
706 		goto norot;
707 	}
708 	goal = ufs_blknum (goal);
709 	goal = ufs_dtogd(uspi, goal);
710 
711 	/*
712 	 * If the requested block is available, use it.
713 	 */
714 	if (ubh_isblockset(uspi, ucpi, goal)) {
715 		result = goal;
716 		goto gotit;
717 	}
718 
719 norot:
720 	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
721 	if (result == INVBLOCK)
722 		return INVBLOCK;
723 	ucpi->c_rotor = result;
724 gotit:
725 	if (!try_add_frags(inode, uspi->s_fpb))
726 		return 0;
727 	ubh_clrblock(uspi, ucpi, result);
728 	adjust_free_blocks(sb, ucg, ucpi, result, -1);
729 
730 	UFSD("EXIT, result %llu\n", (unsigned long long)result);
731 
732 	return result;
733 }
734 
735 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
736 			  struct ufs_buffer_head *ubh,
737 			  unsigned begin, unsigned size,
738 			  unsigned char *table, unsigned char mask)
739 {
740 	unsigned rest, offset;
741 	unsigned char *cp;
742 
743 
744 	offset = begin & ~uspi->s_fmask;
745 	begin >>= uspi->s_fshift;
746 	for (;;) {
747 		if ((offset + size) < uspi->s_fsize)
748 			rest = size;
749 		else
750 			rest = uspi->s_fsize - offset;
751 		size -= rest;
752 		cp = ubh->bh[begin]->b_data + offset;
753 		while ((table[*cp++] & mask) == 0 && --rest)
754 			;
755 		if (rest || !size)
756 			break;
757 		begin++;
758 		offset = 0;
759 	}
760 	return (size + rest);
761 }
762 
763 /*
764  * Find a block of the specified size in the specified cylinder group.
765  * @sp: pointer to super block
766  * @ucpi: pointer to cylinder group info
767  * @goal: near which block we want find new one
768  * @count: specified size
769  */
770 static u64 ufs_bitmap_search(struct super_block *sb,
771 			     struct ufs_cg_private_info *ucpi,
772 			     u64 goal, unsigned count)
773 {
774 	/*
775 	 * Bit patterns for identifying fragments in the block map
776 	 * used as ((map & mask_arr) == want_arr)
777 	 */
778 	static const int mask_arr[9] = {
779 		0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
780 	};
781 	static const int want_arr[9] = {
782 		0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
783 	};
784 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
785 	unsigned start, length, loc;
786 	unsigned pos, want, blockmap, mask, end;
787 	u64 result;
788 
789 	UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
790 	     (unsigned long long)goal, count);
791 
792 	if (goal)
793 		start = ufs_dtogd(uspi, goal) >> 3;
794 	else
795 		start = ucpi->c_frotor >> 3;
796 
797 	length = ((uspi->s_fpg + 7) >> 3) - start;
798 	loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
799 		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
800 		1 << (count - 1 + (uspi->s_fpb & 7)));
801 	if (loc == 0) {
802 		length = start + 1;
803 		loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
804 				(uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
805 				ufs_fragtable_other,
806 				1 << (count - 1 + (uspi->s_fpb & 7)));
807 		if (loc == 0) {
808 			ufs_error(sb, "ufs_bitmap_search",
809 				  "bitmap corrupted on cg %u, start %u,"
810 				  " length %u, count %u, freeoff %u\n",
811 				  ucpi->c_cgx, start, length, count,
812 				  ucpi->c_freeoff);
813 			return INVBLOCK;
814 		}
815 		start = 0;
816 	}
817 	result = (start + length - loc) << 3;
818 	ucpi->c_frotor = result;
819 
820 	/*
821 	 * found the byte in the map
822 	 */
823 
824 	for (end = result + 8; result < end; result += uspi->s_fpb) {
825 		blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
826 		blockmap <<= 1;
827 		mask = mask_arr[count];
828 		want = want_arr[count];
829 		for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
830 			if ((blockmap & mask) == want) {
831 				UFSD("EXIT, result %llu\n",
832 				     (unsigned long long)result);
833 				return result + pos;
834  			}
835 			mask <<= 1;
836 			want <<= 1;
837  		}
838  	}
839 
840 	ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
841 		  ucpi->c_cgx);
842 	UFSD("EXIT (FAILED)\n");
843 	return INVBLOCK;
844 }
845 
846 static void ufs_clusteracct(struct super_block * sb,
847 	struct ufs_cg_private_info * ucpi, unsigned frag, int cnt)
848 {
849 	struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi;
850 	int i, start, end, forw, back;
851 	unsigned blkno = ufs_fragstoblks(frag);
852 
853 	if (uspi->s_contigsumsize <= 0)
854 		return;
855 
856 	if (cnt > 0)
857 		ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
858 	else
859 		ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
860 
861 	/*
862 	 * Find the size of the cluster going forward.
863 	 */
864 	start = blkno + 1;
865 	end = start + uspi->s_contigsumsize;
866 	if ( end >= ucpi->c_nclusterblks)
867 		end = ucpi->c_nclusterblks;
868 	i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
869 	if (i > end)
870 		i = end;
871 	forw = i - start;
872 
873 	/*
874 	 * Find the size of the cluster going backward.
875 	 */
876 	start = blkno - 1;
877 	end = start - uspi->s_contigsumsize;
878 	if (end < 0 )
879 		end = -1;
880 	i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
881 	if ( i < end)
882 		i = end;
883 	back = start - i;
884 
885 	/*
886 	 * Account for old cluster and the possibly new forward and
887 	 * back clusters.
888 	 */
889 	i = back + forw + 1;
890 	if (i > uspi->s_contigsumsize)
891 		i = uspi->s_contigsumsize;
892 	fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
893 	if (back > 0)
894 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
895 	if (forw > 0)
896 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
897 }
898 
899 
900 static unsigned char ufs_fragtable_8fpb[] = {
901 	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
902 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
903 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
904 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
905 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
906 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
907 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
908 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
909 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
910 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
911 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
912 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
913 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
914 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
915 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
916 	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
917 };
918 
919 static unsigned char ufs_fragtable_other[] = {
920 	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
921 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
922 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
923 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
924 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
925 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
926 	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
927 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
928 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
929 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
930 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
931 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
932 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
933 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
934 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
935 	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
936 };
937