xref: /linux/fs/ufs/balloc.c (revision b454cc6636d254fbf6049b73e9560aee76fb04a3)
1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  */
8 
9 #include <linux/fs.h>
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/sched.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
20 
21 #include "swab.h"
22 #include "util.h"
23 
24 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
25 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
26 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
27 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
28 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
29 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
30 
31 /*
32  * Free 'count' fragments from fragment number 'fragment'
33  */
34 void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count)
35 {
36 	struct super_block * sb;
37 	struct ufs_sb_private_info * uspi;
38 	struct ufs_super_block_first * usb1;
39 	struct ufs_cg_private_info * ucpi;
40 	struct ufs_cylinder_group * ucg;
41 	unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
42 
43 	sb = inode->i_sb;
44 	uspi = UFS_SB(sb)->s_uspi;
45 	usb1 = ubh_get_usb_first(uspi);
46 
47 	UFSD("ENTER, fragment %u, count %u\n", fragment, count);
48 
49 	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
50 		ufs_error (sb, "ufs_free_fragments", "internal error");
51 
52 	lock_super(sb);
53 
54 	cgno = ufs_dtog(fragment);
55 	bit = ufs_dtogd(fragment);
56 	if (cgno >= uspi->s_ncg) {
57 		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
58 		goto failed;
59 	}
60 
61 	ucpi = ufs_load_cylinder (sb, cgno);
62 	if (!ucpi)
63 		goto failed;
64 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
65 	if (!ufs_cg_chkmagic(sb, ucg)) {
66 		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
67 		goto failed;
68 	}
69 
70 	end_bit = bit + count;
71 	bbase = ufs_blknum (bit);
72 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
73 	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
74 	for (i = bit; i < end_bit; i++) {
75 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
76 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
77 		else
78 			ufs_error (sb, "ufs_free_fragments",
79 				   "bit already cleared for fragment %u", i);
80 	}
81 
82 	DQUOT_FREE_BLOCK (inode, count);
83 
84 
85 	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
86 	uspi->cs_total.cs_nffree += count;
87 	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
88 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
89 	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
90 
91 	/*
92 	 * Trying to reassemble free fragments into block
93 	 */
94 	blkno = ufs_fragstoblks (bbase);
95 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
96 		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
97 		uspi->cs_total.cs_nffree -= uspi->s_fpb;
98 		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
99 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
100 			ufs_clusteracct (sb, ucpi, blkno, 1);
101 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
102 		uspi->cs_total.cs_nbfree++;
103 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
104 		cylno = ufs_cbtocylno (bbase);
105 		fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
106 		fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
107 	}
108 
109 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
110 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
111 	if (sb->s_flags & MS_SYNCHRONOUS) {
112 		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
113 		ubh_wait_on_buffer (UCPI_UBH(ucpi));
114 	}
115 	sb->s_dirt = 1;
116 
117 	unlock_super (sb);
118 	UFSD("EXIT\n");
119 	return;
120 
121 failed:
122 	unlock_super (sb);
123 	UFSD("EXIT (FAILED)\n");
124 	return;
125 }
126 
127 /*
128  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
129  */
130 void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
131 {
132 	struct super_block * sb;
133 	struct ufs_sb_private_info * uspi;
134 	struct ufs_super_block_first * usb1;
135 	struct ufs_cg_private_info * ucpi;
136 	struct ufs_cylinder_group * ucg;
137 	unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
138 
139 	sb = inode->i_sb;
140 	uspi = UFS_SB(sb)->s_uspi;
141 	usb1 = ubh_get_usb_first(uspi);
142 
143 	UFSD("ENTER, fragment %u, count %u\n", fragment, count);
144 
145 	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
146 		ufs_error (sb, "ufs_free_blocks", "internal error, "
147 			"fragment %u, count %u\n", fragment, count);
148 		goto failed;
149 	}
150 
151 	lock_super(sb);
152 
153 do_more:
154 	overflow = 0;
155 	cgno = ufs_dtog (fragment);
156 	bit = ufs_dtogd (fragment);
157 	if (cgno >= uspi->s_ncg) {
158 		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
159 		goto failed_unlock;
160 	}
161 	end_bit = bit + count;
162 	if (end_bit > uspi->s_fpg) {
163 		overflow = bit + count - uspi->s_fpg;
164 		count -= overflow;
165 		end_bit -= overflow;
166 	}
167 
168 	ucpi = ufs_load_cylinder (sb, cgno);
169 	if (!ucpi)
170 		goto failed_unlock;
171 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
172 	if (!ufs_cg_chkmagic(sb, ucg)) {
173 		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
174 		goto failed_unlock;
175 	}
176 
177 	for (i = bit; i < end_bit; i += uspi->s_fpb) {
178 		blkno = ufs_fragstoblks(i);
179 		if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
180 			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
181 		}
182 		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
183 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
184 			ufs_clusteracct (sb, ucpi, blkno, 1);
185 		DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
186 
187 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
188 		uspi->cs_total.cs_nbfree++;
189 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
190 		cylno = ufs_cbtocylno(i);
191 		fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
192 		fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
193 	}
194 
195 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
196 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
197 	if (sb->s_flags & MS_SYNCHRONOUS) {
198 		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
199 		ubh_wait_on_buffer (UCPI_UBH(ucpi));
200 	}
201 
202 	if (overflow) {
203 		fragment += count;
204 		count = overflow;
205 		goto do_more;
206 	}
207 
208 	sb->s_dirt = 1;
209 	unlock_super (sb);
210 	UFSD("EXIT\n");
211 	return;
212 
213 failed_unlock:
214 	unlock_super (sb);
215 failed:
216 	UFSD("EXIT (FAILED)\n");
217 	return;
218 }
219 
220 /*
221  * Modify inode page cache in such way:
222  * have - blocks with b_blocknr equal to oldb...oldb+count-1
223  * get - blocks with b_blocknr equal to newb...newb+count-1
224  * also we suppose that oldb...oldb+count-1 blocks
225  * situated at the end of file.
226  *
227  * We can come here from ufs_writepage or ufs_prepare_write,
228  * locked_page is argument of these functions, so we already lock it.
229  */
230 static void ufs_change_blocknr(struct inode *inode, unsigned int beg,
231 			       unsigned int count, unsigned int oldb,
232 			       unsigned int newb, struct page *locked_page)
233 {
234 	const unsigned mask = (1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1;
235 	struct address_space * const mapping = inode->i_mapping;
236 	pgoff_t index, cur_index;
237 	unsigned end, pos, j;
238 	struct page *page;
239 	struct buffer_head *head, *bh;
240 
241 	UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
242 	      inode->i_ino, count, oldb, newb);
243 
244 	BUG_ON(!locked_page);
245 	BUG_ON(!PageLocked(locked_page));
246 
247 	cur_index = locked_page->index;
248 
249 	for (end = count + beg; beg < end; beg = (beg | mask) + 1) {
250 		index = beg >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
251 
252 		if (likely(cur_index != index)) {
253 			page = ufs_get_locked_page(mapping, index);
254 			if (!page || IS_ERR(page)) /* it was truncated or EIO */
255 				continue;
256 		} else
257 			page = locked_page;
258 
259 		head = page_buffers(page);
260 		bh = head;
261 		pos = beg & mask;
262 		for (j = 0; j < pos; ++j)
263 			bh = bh->b_this_page;
264 		j = 0;
265 		do {
266 			if (buffer_mapped(bh)) {
267 				pos = bh->b_blocknr - oldb;
268 				if (pos < count) {
269 					UFSD(" change from %llu to %llu\n",
270 					     (unsigned long long)pos + oldb,
271 					     (unsigned long long)pos + newb);
272 					bh->b_blocknr = newb + pos;
273 					unmap_underlying_metadata(bh->b_bdev,
274 								  bh->b_blocknr);
275 					mark_buffer_dirty(bh);
276 					++j;
277 				}
278 			}
279 
280 			bh = bh->b_this_page;
281 		} while (bh != head);
282 
283 		if (j)
284 			set_page_dirty(page);
285 
286 		if (likely(cur_index != index))
287 			ufs_put_locked_page(page);
288  	}
289 	UFSD("EXIT\n");
290 }
291 
292 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
293 			    int sync)
294 {
295 	struct buffer_head *bh;
296 	sector_t end = beg + n;
297 
298 	for (; beg < end; ++beg) {
299 		bh = sb_getblk(inode->i_sb, beg);
300 		lock_buffer(bh);
301 		memset(bh->b_data, 0, inode->i_sb->s_blocksize);
302 		set_buffer_uptodate(bh);
303 		mark_buffer_dirty(bh);
304 		unlock_buffer(bh);
305 		if (IS_SYNC(inode) || sync)
306 			sync_dirty_buffer(bh);
307 		brelse(bh);
308 	}
309 }
310 
311 unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
312 			   unsigned goal, unsigned count, int * err, struct page *locked_page)
313 {
314 	struct super_block * sb;
315 	struct ufs_sb_private_info * uspi;
316 	struct ufs_super_block_first * usb1;
317 	unsigned cgno, oldcount, newcount, tmp, request, result;
318 
319 	UFSD("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count);
320 
321 	sb = inode->i_sb;
322 	uspi = UFS_SB(sb)->s_uspi;
323 	usb1 = ubh_get_usb_first(uspi);
324 	*err = -ENOSPC;
325 
326 	lock_super (sb);
327 
328 	tmp = fs32_to_cpu(sb, *p);
329 	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
330 		ufs_warning (sb, "ufs_new_fragments", "internal warning"
331 			" fragment %u, count %u", fragment, count);
332 		count = uspi->s_fpb - ufs_fragnum(fragment);
333 	}
334 	oldcount = ufs_fragnum (fragment);
335 	newcount = oldcount + count;
336 
337 	/*
338 	 * Somebody else has just allocated our fragments
339 	 */
340 	if (oldcount) {
341 		if (!tmp) {
342 			ufs_error (sb, "ufs_new_fragments", "internal error, "
343 				"fragment %u, tmp %u\n", fragment, tmp);
344 			unlock_super (sb);
345 			return (unsigned)-1;
346 		}
347 		if (fragment < UFS_I(inode)->i_lastfrag) {
348 			UFSD("EXIT (ALREADY ALLOCATED)\n");
349 			unlock_super (sb);
350 			return 0;
351 		}
352 	}
353 	else {
354 		if (tmp) {
355 			UFSD("EXIT (ALREADY ALLOCATED)\n");
356 			unlock_super(sb);
357 			return 0;
358 		}
359 	}
360 
361 	/*
362 	 * There is not enough space for user on the device
363 	 */
364 	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
365 		unlock_super (sb);
366 		UFSD("EXIT (FAILED)\n");
367 		return 0;
368 	}
369 
370 	if (goal >= uspi->s_size)
371 		goal = 0;
372 	if (goal == 0)
373 		cgno = ufs_inotocg (inode->i_ino);
374 	else
375 		cgno = ufs_dtog (goal);
376 
377 	/*
378 	 * allocate new fragment
379 	 */
380 	if (oldcount == 0) {
381 		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
382 		if (result) {
383 			*p = cpu_to_fs32(sb, result);
384 			*err = 0;
385 			UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
386 			ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
387 					locked_page != NULL);
388 		}
389 		unlock_super(sb);
390 		UFSD("EXIT, result %u\n", result);
391 		return result;
392 	}
393 
394 	/*
395 	 * resize block
396 	 */
397 	result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
398 	if (result) {
399 		*err = 0;
400 		UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
401 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
402 				locked_page != NULL);
403 		unlock_super(sb);
404 		UFSD("EXIT, result %u\n", result);
405 		return result;
406 	}
407 
408 	/*
409 	 * allocate new block and move data
410 	 */
411 	switch (fs32_to_cpu(sb, usb1->fs_optim)) {
412 	    case UFS_OPTSPACE:
413 		request = newcount;
414 		if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
415 		    > uspi->s_dsize * uspi->s_minfree / (2 * 100))
416 			break;
417 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
418 		break;
419 	    default:
420 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
421 
422 	    case UFS_OPTTIME:
423 		request = uspi->s_fpb;
424 		if (uspi->cs_total.cs_nffree < uspi->s_dsize *
425 		    (uspi->s_minfree - 2) / 100)
426 			break;
427 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
428 		break;
429 	}
430 	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
431 	if (result) {
432 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
433 				locked_page != NULL);
434 		ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp,
435 				   result, locked_page);
436 
437 		*p = cpu_to_fs32(sb, result);
438 		*err = 0;
439 		UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
440 		unlock_super(sb);
441 		if (newcount < request)
442 			ufs_free_fragments (inode, result + newcount, request - newcount);
443 		ufs_free_fragments (inode, tmp, oldcount);
444 		UFSD("EXIT, result %u\n", result);
445 		return result;
446 	}
447 
448 	unlock_super(sb);
449 	UFSD("EXIT (FAILED)\n");
450 	return 0;
451 }
452 
453 static unsigned
454 ufs_add_fragments (struct inode * inode, unsigned fragment,
455 		   unsigned oldcount, unsigned newcount, int * err)
456 {
457 	struct super_block * sb;
458 	struct ufs_sb_private_info * uspi;
459 	struct ufs_super_block_first * usb1;
460 	struct ufs_cg_private_info * ucpi;
461 	struct ufs_cylinder_group * ucg;
462 	unsigned cgno, fragno, fragoff, count, fragsize, i;
463 
464 	UFSD("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount);
465 
466 	sb = inode->i_sb;
467 	uspi = UFS_SB(sb)->s_uspi;
468 	usb1 = ubh_get_usb_first (uspi);
469 	count = newcount - oldcount;
470 
471 	cgno = ufs_dtog(fragment);
472 	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
473 		return 0;
474 	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
475 		return 0;
476 	ucpi = ufs_load_cylinder (sb, cgno);
477 	if (!ucpi)
478 		return 0;
479 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
480 	if (!ufs_cg_chkmagic(sb, ucg)) {
481 		ufs_panic (sb, "ufs_add_fragments",
482 			"internal error, bad magic number on cg %u", cgno);
483 		return 0;
484 	}
485 
486 	fragno = ufs_dtogd (fragment);
487 	fragoff = ufs_fragnum (fragno);
488 	for (i = oldcount; i < newcount; i++)
489 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
490 			return 0;
491 	/*
492 	 * Block can be extended
493 	 */
494 	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
495 	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
496 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
497 			break;
498 	fragsize = i - oldcount;
499 	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
500 		ufs_panic (sb, "ufs_add_fragments",
501 			"internal error or corrupted bitmap on cg %u", cgno);
502 	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
503 	if (fragsize != count)
504 		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
505 	for (i = oldcount; i < newcount; i++)
506 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
507 	if(DQUOT_ALLOC_BLOCK(inode, count)) {
508 		*err = -EDQUOT;
509 		return 0;
510 	}
511 
512 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
513 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
514 	uspi->cs_total.cs_nffree -= count;
515 
516 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
517 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
518 	if (sb->s_flags & MS_SYNCHRONOUS) {
519 		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
520 		ubh_wait_on_buffer (UCPI_UBH(ucpi));
521 	}
522 	sb->s_dirt = 1;
523 
524 	UFSD("EXIT, fragment %u\n", fragment);
525 
526 	return fragment;
527 }
528 
529 #define UFS_TEST_FREE_SPACE_CG \
530 	ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
531 	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
532 		goto cg_found; \
533 	for (k = count; k < uspi->s_fpb; k++) \
534 		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
535 			goto cg_found;
536 
537 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
538 	unsigned goal, unsigned count, int * err)
539 {
540 	struct super_block * sb;
541 	struct ufs_sb_private_info * uspi;
542 	struct ufs_super_block_first * usb1;
543 	struct ufs_cg_private_info * ucpi;
544 	struct ufs_cylinder_group * ucg;
545 	unsigned oldcg, i, j, k, result, allocsize;
546 
547 	UFSD("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count);
548 
549 	sb = inode->i_sb;
550 	uspi = UFS_SB(sb)->s_uspi;
551 	usb1 = ubh_get_usb_first(uspi);
552 	oldcg = cgno;
553 
554 	/*
555 	 * 1. searching on preferred cylinder group
556 	 */
557 	UFS_TEST_FREE_SPACE_CG
558 
559 	/*
560 	 * 2. quadratic rehash
561 	 */
562 	for (j = 1; j < uspi->s_ncg; j *= 2) {
563 		cgno += j;
564 		if (cgno >= uspi->s_ncg)
565 			cgno -= uspi->s_ncg;
566 		UFS_TEST_FREE_SPACE_CG
567 	}
568 
569 	/*
570 	 * 3. brute force search
571 	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
572 	 */
573 	cgno = (oldcg + 1) % uspi->s_ncg;
574 	for (j = 2; j < uspi->s_ncg; j++) {
575 		cgno++;
576 		if (cgno >= uspi->s_ncg)
577 			cgno = 0;
578 		UFS_TEST_FREE_SPACE_CG
579 	}
580 
581 	UFSD("EXIT (FAILED)\n");
582 	return 0;
583 
584 cg_found:
585 	ucpi = ufs_load_cylinder (sb, cgno);
586 	if (!ucpi)
587 		return 0;
588 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
589 	if (!ufs_cg_chkmagic(sb, ucg))
590 		ufs_panic (sb, "ufs_alloc_fragments",
591 			"internal error, bad magic number on cg %u", cgno);
592 	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
593 
594 	if (count == uspi->s_fpb) {
595 		result = ufs_alloccg_block (inode, ucpi, goal, err);
596 		if (result == (unsigned)-1)
597 			return 0;
598 		goto succed;
599 	}
600 
601 	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
602 		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
603 			break;
604 
605 	if (allocsize == uspi->s_fpb) {
606 		result = ufs_alloccg_block (inode, ucpi, goal, err);
607 		if (result == (unsigned)-1)
608 			return 0;
609 		goal = ufs_dtogd (result);
610 		for (i = count; i < uspi->s_fpb; i++)
611 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
612 		i = uspi->s_fpb - count;
613 		DQUOT_FREE_BLOCK(inode, i);
614 
615 		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
616 		uspi->cs_total.cs_nffree += i;
617 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
618 		fs32_add(sb, &ucg->cg_frsum[i], 1);
619 		goto succed;
620 	}
621 
622 	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
623 	if (result == (unsigned)-1)
624 		return 0;
625 	if(DQUOT_ALLOC_BLOCK(inode, count)) {
626 		*err = -EDQUOT;
627 		return 0;
628 	}
629 	for (i = 0; i < count; i++)
630 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
631 
632 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
633 	uspi->cs_total.cs_nffree -= count;
634 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
635 	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
636 
637 	if (count != allocsize)
638 		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
639 
640 succed:
641 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
642 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
643 	if (sb->s_flags & MS_SYNCHRONOUS) {
644 		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
645 		ubh_wait_on_buffer (UCPI_UBH(ucpi));
646 	}
647 	sb->s_dirt = 1;
648 
649 	result += cgno * uspi->s_fpg;
650 	UFSD("EXIT3, result %u\n", result);
651 	return result;
652 }
653 
654 static unsigned ufs_alloccg_block (struct inode * inode,
655 	struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
656 {
657 	struct super_block * sb;
658 	struct ufs_sb_private_info * uspi;
659 	struct ufs_super_block_first * usb1;
660 	struct ufs_cylinder_group * ucg;
661 	unsigned result, cylno, blkno;
662 
663 	UFSD("ENTER, goal %u\n", goal);
664 
665 	sb = inode->i_sb;
666 	uspi = UFS_SB(sb)->s_uspi;
667 	usb1 = ubh_get_usb_first(uspi);
668 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
669 
670 	if (goal == 0) {
671 		goal = ucpi->c_rotor;
672 		goto norot;
673 	}
674 	goal = ufs_blknum (goal);
675 	goal = ufs_dtogd (goal);
676 
677 	/*
678 	 * If the requested block is available, use it.
679 	 */
680 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
681 		result = goal;
682 		goto gotit;
683 	}
684 
685 norot:
686 	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
687 	if (result == (unsigned)-1)
688 		return (unsigned)-1;
689 	ucpi->c_rotor = result;
690 gotit:
691 	blkno = ufs_fragstoblks(result);
692 	ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
693 	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
694 		ufs_clusteracct (sb, ucpi, blkno, -1);
695 	if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
696 		*err = -EDQUOT;
697 		return (unsigned)-1;
698 	}
699 
700 	fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
701 	uspi->cs_total.cs_nbfree--;
702 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
703 	cylno = ufs_cbtocylno(result);
704 	fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
705 	fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
706 
707 	UFSD("EXIT, result %u\n", result);
708 
709 	return result;
710 }
711 
712 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
713 			  struct ufs_buffer_head *ubh,
714 			  unsigned begin, unsigned size,
715 			  unsigned char *table, unsigned char mask)
716 {
717 	unsigned rest, offset;
718 	unsigned char *cp;
719 
720 
721 	offset = begin & ~uspi->s_fmask;
722 	begin >>= uspi->s_fshift;
723 	for (;;) {
724 		if ((offset + size) < uspi->s_fsize)
725 			rest = size;
726 		else
727 			rest = uspi->s_fsize - offset;
728 		size -= rest;
729 		cp = ubh->bh[begin]->b_data + offset;
730 		while ((table[*cp++] & mask) == 0 && --rest)
731 			;
732 		if (rest || !size)
733 			break;
734 		begin++;
735 		offset = 0;
736 	}
737 	return (size + rest);
738 }
739 
740 /*
741  * Find a block of the specified size in the specified cylinder group.
742  * @sp: pointer to super block
743  * @ucpi: pointer to cylinder group info
744  * @goal: near which block we want find new one
745  * @count: specified size
746  */
747 static unsigned ufs_bitmap_search(struct super_block *sb,
748 				  struct ufs_cg_private_info *ucpi,
749 				  unsigned goal, unsigned count)
750 {
751 	/*
752 	 * Bit patterns for identifying fragments in the block map
753 	 * used as ((map & mask_arr) == want_arr)
754 	 */
755 	static const int mask_arr[9] = {
756 		0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
757 	};
758 	static const int want_arr[9] = {
759 		0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
760 	};
761 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
762 	struct ufs_super_block_first *usb1;
763 	struct ufs_cylinder_group *ucg;
764 	unsigned start, length, loc, result;
765 	unsigned pos, want, blockmap, mask, end;
766 
767 	UFSD("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count);
768 
769 	usb1 = ubh_get_usb_first (uspi);
770 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
771 
772 	if (goal)
773 		start = ufs_dtogd(goal) >> 3;
774 	else
775 		start = ucpi->c_frotor >> 3;
776 
777 	length = ((uspi->s_fpg + 7) >> 3) - start;
778 	loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
779 		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
780 		1 << (count - 1 + (uspi->s_fpb & 7)));
781 	if (loc == 0) {
782 		length = start + 1;
783 		loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
784 				(uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
785 				ufs_fragtable_other,
786 				1 << (count - 1 + (uspi->s_fpb & 7)));
787 		if (loc == 0) {
788 			ufs_error(sb, "ufs_bitmap_search",
789 				  "bitmap corrupted on cg %u, start %u,"
790 				  " length %u, count %u, freeoff %u\n",
791 				  ucpi->c_cgx, start, length, count,
792 				  ucpi->c_freeoff);
793 			return (unsigned)-1;
794 		}
795 		start = 0;
796 	}
797 	result = (start + length - loc) << 3;
798 	ucpi->c_frotor = result;
799 
800 	/*
801 	 * found the byte in the map
802 	 */
803 
804 	for (end = result + 8; result < end; result += uspi->s_fpb) {
805 		blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
806 		blockmap <<= 1;
807 		mask = mask_arr[count];
808 		want = want_arr[count];
809 		for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
810 			if ((blockmap & mask) == want) {
811 				UFSD("EXIT, result %u\n", result);
812 				return result + pos;
813  			}
814 			mask <<= 1;
815 			want <<= 1;
816  		}
817  	}
818 
819 	ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
820 		  ucpi->c_cgx);
821 	UFSD("EXIT (FAILED)\n");
822 	return (unsigned)-1;
823 }
824 
825 static void ufs_clusteracct(struct super_block * sb,
826 	struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
827 {
828 	struct ufs_sb_private_info * uspi;
829 	int i, start, end, forw, back;
830 
831 	uspi = UFS_SB(sb)->s_uspi;
832 	if (uspi->s_contigsumsize <= 0)
833 		return;
834 
835 	if (cnt > 0)
836 		ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
837 	else
838 		ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
839 
840 	/*
841 	 * Find the size of the cluster going forward.
842 	 */
843 	start = blkno + 1;
844 	end = start + uspi->s_contigsumsize;
845 	if ( end >= ucpi->c_nclusterblks)
846 		end = ucpi->c_nclusterblks;
847 	i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
848 	if (i > end)
849 		i = end;
850 	forw = i - start;
851 
852 	/*
853 	 * Find the size of the cluster going backward.
854 	 */
855 	start = blkno - 1;
856 	end = start - uspi->s_contigsumsize;
857 	if (end < 0 )
858 		end = -1;
859 	i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
860 	if ( i < end)
861 		i = end;
862 	back = start - i;
863 
864 	/*
865 	 * Account for old cluster and the possibly new forward and
866 	 * back clusters.
867 	 */
868 	i = back + forw + 1;
869 	if (i > uspi->s_contigsumsize)
870 		i = uspi->s_contigsumsize;
871 	fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
872 	if (back > 0)
873 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
874 	if (forw > 0)
875 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
876 }
877 
878 
879 static unsigned char ufs_fragtable_8fpb[] = {
880 	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
881 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
882 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
883 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
884 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
885 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
886 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
887 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
888 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
889 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
890 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
891 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
892 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
893 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
894 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
895 	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
896 };
897 
898 static unsigned char ufs_fragtable_other[] = {
899 	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
900 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
901 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
902 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
903 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
904 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
905 	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
906 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
907 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
908 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
909 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
910 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
911 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
912 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
913 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
914 	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
915 };
916