xref: /linux/fs/ufs/balloc.c (revision d257f9bf06129613de539ea71ecea60848b662cd)
1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9  */
10 
11 #include <linux/fs.h>
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <linux/bio.h>
19 #include <asm/byteorder.h>
20 
21 #include "ufs_fs.h"
22 #include "ufs.h"
23 #include "swab.h"
24 #include "util.h"
25 
26 #define INVBLOCK ((u64)-1L)
27 
28 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
29 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
30 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
31 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
32 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
33 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
34 
35 /*
36  * Free 'count' fragments from fragment number 'fragment'
37  */
38 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
39 {
40 	struct super_block * sb;
41 	struct ufs_sb_private_info * uspi;
42 	struct ufs_cg_private_info * ucpi;
43 	struct ufs_cylinder_group * ucg;
44 	unsigned cgno, bit, end_bit, bbase, blkmap, i;
45 	u64 blkno;
46 
47 	sb = inode->i_sb;
48 	uspi = UFS_SB(sb)->s_uspi;
49 
50 	UFSD("ENTER, fragment %llu, count %u\n",
51 	     (unsigned long long)fragment, count);
52 
53 	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
54 		ufs_error (sb, "ufs_free_fragments", "internal error");
55 
56 	mutex_lock(&UFS_SB(sb)->s_lock);
57 
58 	cgno = ufs_dtog(uspi, fragment);
59 	bit = ufs_dtogd(uspi, fragment);
60 	if (cgno >= uspi->s_ncg) {
61 		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
62 		goto failed;
63 	}
64 
65 	ucpi = ufs_load_cylinder (sb, cgno);
66 	if (!ucpi)
67 		goto failed;
68 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
69 	if (!ufs_cg_chkmagic(sb, ucg)) {
70 		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
71 		goto failed;
72 	}
73 
74 	end_bit = bit + count;
75 	bbase = ufs_blknum (bit);
76 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
77 	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
78 	for (i = bit; i < end_bit; i++) {
79 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
80 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
81 		else
82 			ufs_error (sb, "ufs_free_fragments",
83 				   "bit already cleared for fragment %u", i);
84 	}
85 
86 	inode_sub_bytes(inode, count << uspi->s_fshift);
87 	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
88 	uspi->cs_total.cs_nffree += count;
89 	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
90 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
91 	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92 
93 	/*
94 	 * Trying to reassemble free fragments into block
95 	 */
96 	blkno = ufs_fragstoblks (bbase);
97 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
98 		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
99 		uspi->cs_total.cs_nffree -= uspi->s_fpb;
100 		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
101 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
102 			ufs_clusteracct (sb, ucpi, blkno, 1);
103 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
104 		uspi->cs_total.cs_nbfree++;
105 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
106 		if (uspi->fs_magic != UFS2_MAGIC) {
107 			unsigned cylno = ufs_cbtocylno (bbase);
108 
109 			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
110 						  ufs_cbtorpos(bbase)), 1);
111 			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
112 		}
113 	}
114 
115 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
116 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
117 	if (sb->s_flags & MS_SYNCHRONOUS)
118 		ubh_sync_block(UCPI_UBH(ucpi));
119 	ufs_mark_sb_dirty(sb);
120 
121 	mutex_unlock(&UFS_SB(sb)->s_lock);
122 	UFSD("EXIT\n");
123 	return;
124 
125 failed:
126 	mutex_unlock(&UFS_SB(sb)->s_lock);
127 	UFSD("EXIT (FAILED)\n");
128 	return;
129 }
130 
131 /*
132  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
133  */
134 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
135 {
136 	struct super_block * sb;
137 	struct ufs_sb_private_info * uspi;
138 	struct ufs_cg_private_info * ucpi;
139 	struct ufs_cylinder_group * ucg;
140 	unsigned overflow, cgno, bit, end_bit, i;
141 	u64 blkno;
142 
143 	sb = inode->i_sb;
144 	uspi = UFS_SB(sb)->s_uspi;
145 
146 	UFSD("ENTER, fragment %llu, count %u\n",
147 	     (unsigned long long)fragment, count);
148 
149 	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
150 		ufs_error (sb, "ufs_free_blocks", "internal error, "
151 			   "fragment %llu, count %u\n",
152 			   (unsigned long long)fragment, count);
153 		goto failed;
154 	}
155 
156 	mutex_lock(&UFS_SB(sb)->s_lock);
157 
158 do_more:
159 	overflow = 0;
160 	cgno = ufs_dtog(uspi, fragment);
161 	bit = ufs_dtogd(uspi, fragment);
162 	if (cgno >= uspi->s_ncg) {
163 		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
164 		goto failed_unlock;
165 	}
166 	end_bit = bit + count;
167 	if (end_bit > uspi->s_fpg) {
168 		overflow = bit + count - uspi->s_fpg;
169 		count -= overflow;
170 		end_bit -= overflow;
171 	}
172 
173 	ucpi = ufs_load_cylinder (sb, cgno);
174 	if (!ucpi)
175 		goto failed_unlock;
176 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
177 	if (!ufs_cg_chkmagic(sb, ucg)) {
178 		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
179 		goto failed_unlock;
180 	}
181 
182 	for (i = bit; i < end_bit; i += uspi->s_fpb) {
183 		blkno = ufs_fragstoblks(i);
184 		if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
185 			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
186 		}
187 		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
188 		inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
189 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
190 			ufs_clusteracct (sb, ucpi, blkno, 1);
191 
192 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
193 		uspi->cs_total.cs_nbfree++;
194 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
195 
196 		if (uspi->fs_magic != UFS2_MAGIC) {
197 			unsigned cylno = ufs_cbtocylno(i);
198 
199 			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
200 						  ufs_cbtorpos(i)), 1);
201 			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
202 		}
203 	}
204 
205 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
206 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
207 	if (sb->s_flags & MS_SYNCHRONOUS)
208 		ubh_sync_block(UCPI_UBH(ucpi));
209 
210 	if (overflow) {
211 		fragment += count;
212 		count = overflow;
213 		goto do_more;
214 	}
215 
216 	ufs_mark_sb_dirty(sb);
217 	mutex_unlock(&UFS_SB(sb)->s_lock);
218 	UFSD("EXIT\n");
219 	return;
220 
221 failed_unlock:
222 	mutex_unlock(&UFS_SB(sb)->s_lock);
223 failed:
224 	UFSD("EXIT (FAILED)\n");
225 	return;
226 }
227 
228 /*
229  * Modify inode page cache in such way:
230  * have - blocks with b_blocknr equal to oldb...oldb+count-1
231  * get - blocks with b_blocknr equal to newb...newb+count-1
232  * also we suppose that oldb...oldb+count-1 blocks
233  * situated at the end of file.
234  *
235  * We can come here from ufs_writepage or ufs_prepare_write,
236  * locked_page is argument of these functions, so we already lock it.
237  */
238 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
239 			       unsigned int count, sector_t oldb,
240 			       sector_t newb, struct page *locked_page)
241 {
242 	const unsigned blks_per_page =
243 		1 << (PAGE_SHIFT - inode->i_blkbits);
244 	const unsigned mask = blks_per_page - 1;
245 	struct address_space * const mapping = inode->i_mapping;
246 	pgoff_t index, cur_index, last_index;
247 	unsigned pos, j, lblock;
248 	sector_t end, i;
249 	struct page *page;
250 	struct buffer_head *head, *bh;
251 
252 	UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
253 	      inode->i_ino, count,
254 	     (unsigned long long)oldb, (unsigned long long)newb);
255 
256 	BUG_ON(!locked_page);
257 	BUG_ON(!PageLocked(locked_page));
258 
259 	cur_index = locked_page->index;
260 	end = count + beg;
261 	last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
262 	for (i = beg; i < end; i = (i | mask) + 1) {
263 		index = i >> (PAGE_SHIFT - inode->i_blkbits);
264 
265 		if (likely(cur_index != index)) {
266 			page = ufs_get_locked_page(mapping, index);
267 			if (!page)/* it was truncated */
268 				continue;
269 			if (IS_ERR(page)) {/* or EIO */
270 				ufs_error(inode->i_sb, __func__,
271 					  "read of page %llu failed\n",
272 					  (unsigned long long)index);
273 				continue;
274 			}
275 		} else
276 			page = locked_page;
277 
278 		head = page_buffers(page);
279 		bh = head;
280 		pos = i & mask;
281 		for (j = 0; j < pos; ++j)
282 			bh = bh->b_this_page;
283 
284 
285 		if (unlikely(index == last_index))
286 			lblock = end & mask;
287 		else
288 			lblock = blks_per_page;
289 
290 		do {
291 			if (j >= lblock)
292 				break;
293 			pos = (i - beg) + j;
294 
295 			if (!buffer_mapped(bh))
296 					map_bh(bh, inode->i_sb, oldb + pos);
297 			if (!buffer_uptodate(bh)) {
298 				ll_rw_block(REQ_OP_READ, 0, 1, &bh);
299 				wait_on_buffer(bh);
300 				if (!buffer_uptodate(bh)) {
301 					ufs_error(inode->i_sb, __func__,
302 						  "read of block failed\n");
303 					break;
304 				}
305 			}
306 
307 			UFSD(" change from %llu to %llu, pos %u\n",
308 			     (unsigned long long)(pos + oldb),
309 			     (unsigned long long)(pos + newb), pos);
310 
311 			bh->b_blocknr = newb + pos;
312 			clean_bdev_bh_alias(bh);
313 			mark_buffer_dirty(bh);
314 			++j;
315 			bh = bh->b_this_page;
316 		} while (bh != head);
317 
318 		if (likely(cur_index != index))
319 			ufs_put_locked_page(page);
320  	}
321 	UFSD("EXIT\n");
322 }
323 
324 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
325 			    int sync)
326 {
327 	struct buffer_head *bh;
328 	sector_t end = beg + n;
329 
330 	for (; beg < end; ++beg) {
331 		bh = sb_getblk(inode->i_sb, beg);
332 		lock_buffer(bh);
333 		memset(bh->b_data, 0, inode->i_sb->s_blocksize);
334 		set_buffer_uptodate(bh);
335 		mark_buffer_dirty(bh);
336 		unlock_buffer(bh);
337 		if (IS_SYNC(inode) || sync)
338 			sync_dirty_buffer(bh);
339 		brelse(bh);
340 	}
341 }
342 
343 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
344 			   u64 goal, unsigned count, int *err,
345 			   struct page *locked_page)
346 {
347 	struct super_block * sb;
348 	struct ufs_sb_private_info * uspi;
349 	struct ufs_super_block_first * usb1;
350 	unsigned cgno, oldcount, newcount;
351 	u64 tmp, request, result;
352 
353 	UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
354 	     inode->i_ino, (unsigned long long)fragment,
355 	     (unsigned long long)goal, count);
356 
357 	sb = inode->i_sb;
358 	uspi = UFS_SB(sb)->s_uspi;
359 	usb1 = ubh_get_usb_first(uspi);
360 	*err = -ENOSPC;
361 
362 	mutex_lock(&UFS_SB(sb)->s_lock);
363 	tmp = ufs_data_ptr_to_cpu(sb, p);
364 
365 	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
366 		ufs_warning(sb, "ufs_new_fragments", "internal warning"
367 			    " fragment %llu, count %u",
368 			    (unsigned long long)fragment, count);
369 		count = uspi->s_fpb - ufs_fragnum(fragment);
370 	}
371 	oldcount = ufs_fragnum (fragment);
372 	newcount = oldcount + count;
373 
374 	/*
375 	 * Somebody else has just allocated our fragments
376 	 */
377 	if (oldcount) {
378 		if (!tmp) {
379 			ufs_error(sb, "ufs_new_fragments", "internal error, "
380 				  "fragment %llu, tmp %llu\n",
381 				  (unsigned long long)fragment,
382 				  (unsigned long long)tmp);
383 			mutex_unlock(&UFS_SB(sb)->s_lock);
384 			return INVBLOCK;
385 		}
386 		if (fragment < UFS_I(inode)->i_lastfrag) {
387 			UFSD("EXIT (ALREADY ALLOCATED)\n");
388 			mutex_unlock(&UFS_SB(sb)->s_lock);
389 			return 0;
390 		}
391 	}
392 	else {
393 		if (tmp) {
394 			UFSD("EXIT (ALREADY ALLOCATED)\n");
395 			mutex_unlock(&UFS_SB(sb)->s_lock);
396 			return 0;
397 		}
398 	}
399 
400 	/*
401 	 * There is not enough space for user on the device
402 	 */
403 	if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
404 		if (!capable(CAP_SYS_RESOURCE)) {
405 			mutex_unlock(&UFS_SB(sb)->s_lock);
406 			UFSD("EXIT (FAILED)\n");
407 			return 0;
408 		}
409 	}
410 
411 	if (goal >= uspi->s_size)
412 		goal = 0;
413 	if (goal == 0)
414 		cgno = ufs_inotocg (inode->i_ino);
415 	else
416 		cgno = ufs_dtog(uspi, goal);
417 
418 	/*
419 	 * allocate new fragment
420 	 */
421 	if (oldcount == 0) {
422 		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
423 		if (result) {
424 			ufs_clear_frags(inode, result + oldcount,
425 					newcount - oldcount, locked_page != NULL);
426 			*err = 0;
427 			write_seqlock(&UFS_I(inode)->meta_lock);
428 			ufs_cpu_to_data_ptr(sb, p, result);
429 			UFS_I(inode)->i_lastfrag =
430 				max(UFS_I(inode)->i_lastfrag, fragment + count);
431 			write_sequnlock(&UFS_I(inode)->meta_lock);
432 		}
433 		mutex_unlock(&UFS_SB(sb)->s_lock);
434 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
435 		return result;
436 	}
437 
438 	/*
439 	 * resize block
440 	 */
441 	result = ufs_add_fragments(inode, tmp, oldcount, newcount);
442 	if (result) {
443 		*err = 0;
444 		read_seqlock_excl(&UFS_I(inode)->meta_lock);
445 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
446 						fragment + count);
447 		read_sequnlock_excl(&UFS_I(inode)->meta_lock);
448 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
449 				locked_page != NULL);
450 		mutex_unlock(&UFS_SB(sb)->s_lock);
451 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
452 		return result;
453 	}
454 
455 	/*
456 	 * allocate new block and move data
457 	 */
458 	if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
459 		request = newcount;
460 		if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
461 			usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
462 	} else {
463 		request = uspi->s_fpb;
464 		if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
465 			usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
466 	}
467 	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
468 	if (result) {
469 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
470 				locked_page != NULL);
471 		mutex_unlock(&UFS_SB(sb)->s_lock);
472 		ufs_change_blocknr(inode, fragment - oldcount, oldcount,
473 				   uspi->s_sbbase + tmp,
474 				   uspi->s_sbbase + result, locked_page);
475 		*err = 0;
476 		write_seqlock(&UFS_I(inode)->meta_lock);
477 		ufs_cpu_to_data_ptr(sb, p, result);
478 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
479 						fragment + count);
480 		write_sequnlock(&UFS_I(inode)->meta_lock);
481 		if (newcount < request)
482 			ufs_free_fragments (inode, result + newcount, request - newcount);
483 		ufs_free_fragments (inode, tmp, oldcount);
484 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
485 		return result;
486 	}
487 
488 	mutex_unlock(&UFS_SB(sb)->s_lock);
489 	UFSD("EXIT (FAILED)\n");
490 	return 0;
491 }
492 
493 static bool try_add_frags(struct inode *inode, unsigned frags)
494 {
495 	unsigned size = frags * i_blocksize(inode);
496 	spin_lock(&inode->i_lock);
497 	__inode_add_bytes(inode, size);
498 	if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
499 		__inode_sub_bytes(inode, size);
500 		spin_unlock(&inode->i_lock);
501 		return false;
502 	}
503 	spin_unlock(&inode->i_lock);
504 	return true;
505 }
506 
507 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
508 			     unsigned oldcount, unsigned newcount)
509 {
510 	struct super_block * sb;
511 	struct ufs_sb_private_info * uspi;
512 	struct ufs_cg_private_info * ucpi;
513 	struct ufs_cylinder_group * ucg;
514 	unsigned cgno, fragno, fragoff, count, fragsize, i;
515 
516 	UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
517 	     (unsigned long long)fragment, oldcount, newcount);
518 
519 	sb = inode->i_sb;
520 	uspi = UFS_SB(sb)->s_uspi;
521 	count = newcount - oldcount;
522 
523 	cgno = ufs_dtog(uspi, fragment);
524 	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
525 		return 0;
526 	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
527 		return 0;
528 	ucpi = ufs_load_cylinder (sb, cgno);
529 	if (!ucpi)
530 		return 0;
531 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
532 	if (!ufs_cg_chkmagic(sb, ucg)) {
533 		ufs_panic (sb, "ufs_add_fragments",
534 			"internal error, bad magic number on cg %u", cgno);
535 		return 0;
536 	}
537 
538 	fragno = ufs_dtogd(uspi, fragment);
539 	fragoff = ufs_fragnum (fragno);
540 	for (i = oldcount; i < newcount; i++)
541 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
542 			return 0;
543 
544 	if (!try_add_frags(inode, count))
545 		return 0;
546 	/*
547 	 * Block can be extended
548 	 */
549 	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
550 	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
551 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
552 			break;
553 	fragsize = i - oldcount;
554 	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
555 		ufs_panic (sb, "ufs_add_fragments",
556 			"internal error or corrupted bitmap on cg %u", cgno);
557 	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
558 	if (fragsize != count)
559 		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
560 	for (i = oldcount; i < newcount; i++)
561 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
562 
563 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
564 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
565 	uspi->cs_total.cs_nffree -= count;
566 
567 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
568 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
569 	if (sb->s_flags & MS_SYNCHRONOUS)
570 		ubh_sync_block(UCPI_UBH(ucpi));
571 	ufs_mark_sb_dirty(sb);
572 
573 	UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
574 
575 	return fragment;
576 }
577 
578 #define UFS_TEST_FREE_SPACE_CG \
579 	ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
580 	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
581 		goto cg_found; \
582 	for (k = count; k < uspi->s_fpb; k++) \
583 		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
584 			goto cg_found;
585 
586 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
587 			       u64 goal, unsigned count, int *err)
588 {
589 	struct super_block * sb;
590 	struct ufs_sb_private_info * uspi;
591 	struct ufs_cg_private_info * ucpi;
592 	struct ufs_cylinder_group * ucg;
593 	unsigned oldcg, i, j, k, allocsize;
594 	u64 result;
595 
596 	UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
597 	     inode->i_ino, cgno, (unsigned long long)goal, count);
598 
599 	sb = inode->i_sb;
600 	uspi = UFS_SB(sb)->s_uspi;
601 	oldcg = cgno;
602 
603 	/*
604 	 * 1. searching on preferred cylinder group
605 	 */
606 	UFS_TEST_FREE_SPACE_CG
607 
608 	/*
609 	 * 2. quadratic rehash
610 	 */
611 	for (j = 1; j < uspi->s_ncg; j *= 2) {
612 		cgno += j;
613 		if (cgno >= uspi->s_ncg)
614 			cgno -= uspi->s_ncg;
615 		UFS_TEST_FREE_SPACE_CG
616 	}
617 
618 	/*
619 	 * 3. brute force search
620 	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
621 	 */
622 	cgno = (oldcg + 1) % uspi->s_ncg;
623 	for (j = 2; j < uspi->s_ncg; j++) {
624 		cgno++;
625 		if (cgno >= uspi->s_ncg)
626 			cgno = 0;
627 		UFS_TEST_FREE_SPACE_CG
628 	}
629 
630 	UFSD("EXIT (FAILED)\n");
631 	return 0;
632 
633 cg_found:
634 	ucpi = ufs_load_cylinder (sb, cgno);
635 	if (!ucpi)
636 		return 0;
637 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
638 	if (!ufs_cg_chkmagic(sb, ucg))
639 		ufs_panic (sb, "ufs_alloc_fragments",
640 			"internal error, bad magic number on cg %u", cgno);
641 	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
642 
643 	if (count == uspi->s_fpb) {
644 		result = ufs_alloccg_block (inode, ucpi, goal, err);
645 		if (result == INVBLOCK)
646 			return 0;
647 		goto succed;
648 	}
649 
650 	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
651 		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
652 			break;
653 
654 	if (allocsize == uspi->s_fpb) {
655 		result = ufs_alloccg_block (inode, ucpi, goal, err);
656 		if (result == INVBLOCK)
657 			return 0;
658 		goal = ufs_dtogd(uspi, result);
659 		for (i = count; i < uspi->s_fpb; i++)
660 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
661 		i = uspi->s_fpb - count;
662 
663 		inode_sub_bytes(inode, i << uspi->s_fshift);
664 		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
665 		uspi->cs_total.cs_nffree += i;
666 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
667 		fs32_add(sb, &ucg->cg_frsum[i], 1);
668 		goto succed;
669 	}
670 
671 	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
672 	if (result == INVBLOCK)
673 		return 0;
674 	if (!try_add_frags(inode, count))
675 		return 0;
676 	for (i = 0; i < count; i++)
677 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
678 
679 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
680 	uspi->cs_total.cs_nffree -= count;
681 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
682 	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
683 
684 	if (count != allocsize)
685 		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
686 
687 succed:
688 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
689 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
690 	if (sb->s_flags & MS_SYNCHRONOUS)
691 		ubh_sync_block(UCPI_UBH(ucpi));
692 	ufs_mark_sb_dirty(sb);
693 
694 	result += cgno * uspi->s_fpg;
695 	UFSD("EXIT3, result %llu\n", (unsigned long long)result);
696 	return result;
697 }
698 
699 static u64 ufs_alloccg_block(struct inode *inode,
700 			     struct ufs_cg_private_info *ucpi,
701 			     u64 goal, int *err)
702 {
703 	struct super_block * sb;
704 	struct ufs_sb_private_info * uspi;
705 	struct ufs_cylinder_group * ucg;
706 	u64 result, blkno;
707 
708 	UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
709 
710 	sb = inode->i_sb;
711 	uspi = UFS_SB(sb)->s_uspi;
712 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
713 
714 	if (goal == 0) {
715 		goal = ucpi->c_rotor;
716 		goto norot;
717 	}
718 	goal = ufs_blknum (goal);
719 	goal = ufs_dtogd(uspi, goal);
720 
721 	/*
722 	 * If the requested block is available, use it.
723 	 */
724 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
725 		result = goal;
726 		goto gotit;
727 	}
728 
729 norot:
730 	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
731 	if (result == INVBLOCK)
732 		return INVBLOCK;
733 	ucpi->c_rotor = result;
734 gotit:
735 	if (!try_add_frags(inode, uspi->s_fpb))
736 		return 0;
737 	blkno = ufs_fragstoblks(result);
738 	ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
739 	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
740 		ufs_clusteracct (sb, ucpi, blkno, -1);
741 
742 	fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
743 	uspi->cs_total.cs_nbfree--;
744 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
745 
746 	if (uspi->fs_magic != UFS2_MAGIC) {
747 		unsigned cylno = ufs_cbtocylno((unsigned)result);
748 
749 		fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
750 					  ufs_cbtorpos((unsigned)result)), 1);
751 		fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
752 	}
753 
754 	UFSD("EXIT, result %llu\n", (unsigned long long)result);
755 
756 	return result;
757 }
758 
759 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
760 			  struct ufs_buffer_head *ubh,
761 			  unsigned begin, unsigned size,
762 			  unsigned char *table, unsigned char mask)
763 {
764 	unsigned rest, offset;
765 	unsigned char *cp;
766 
767 
768 	offset = begin & ~uspi->s_fmask;
769 	begin >>= uspi->s_fshift;
770 	for (;;) {
771 		if ((offset + size) < uspi->s_fsize)
772 			rest = size;
773 		else
774 			rest = uspi->s_fsize - offset;
775 		size -= rest;
776 		cp = ubh->bh[begin]->b_data + offset;
777 		while ((table[*cp++] & mask) == 0 && --rest)
778 			;
779 		if (rest || !size)
780 			break;
781 		begin++;
782 		offset = 0;
783 	}
784 	return (size + rest);
785 }
786 
787 /*
788  * Find a block of the specified size in the specified cylinder group.
789  * @sp: pointer to super block
790  * @ucpi: pointer to cylinder group info
791  * @goal: near which block we want find new one
792  * @count: specified size
793  */
794 static u64 ufs_bitmap_search(struct super_block *sb,
795 			     struct ufs_cg_private_info *ucpi,
796 			     u64 goal, unsigned count)
797 {
798 	/*
799 	 * Bit patterns for identifying fragments in the block map
800 	 * used as ((map & mask_arr) == want_arr)
801 	 */
802 	static const int mask_arr[9] = {
803 		0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
804 	};
805 	static const int want_arr[9] = {
806 		0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
807 	};
808 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
809 	unsigned start, length, loc;
810 	unsigned pos, want, blockmap, mask, end;
811 	u64 result;
812 
813 	UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
814 	     (unsigned long long)goal, count);
815 
816 	if (goal)
817 		start = ufs_dtogd(uspi, goal) >> 3;
818 	else
819 		start = ucpi->c_frotor >> 3;
820 
821 	length = ((uspi->s_fpg + 7) >> 3) - start;
822 	loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
823 		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
824 		1 << (count - 1 + (uspi->s_fpb & 7)));
825 	if (loc == 0) {
826 		length = start + 1;
827 		loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
828 				(uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
829 				ufs_fragtable_other,
830 				1 << (count - 1 + (uspi->s_fpb & 7)));
831 		if (loc == 0) {
832 			ufs_error(sb, "ufs_bitmap_search",
833 				  "bitmap corrupted on cg %u, start %u,"
834 				  " length %u, count %u, freeoff %u\n",
835 				  ucpi->c_cgx, start, length, count,
836 				  ucpi->c_freeoff);
837 			return INVBLOCK;
838 		}
839 		start = 0;
840 	}
841 	result = (start + length - loc) << 3;
842 	ucpi->c_frotor = result;
843 
844 	/*
845 	 * found the byte in the map
846 	 */
847 
848 	for (end = result + 8; result < end; result += uspi->s_fpb) {
849 		blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
850 		blockmap <<= 1;
851 		mask = mask_arr[count];
852 		want = want_arr[count];
853 		for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
854 			if ((blockmap & mask) == want) {
855 				UFSD("EXIT, result %llu\n",
856 				     (unsigned long long)result);
857 				return result + pos;
858  			}
859 			mask <<= 1;
860 			want <<= 1;
861  		}
862  	}
863 
864 	ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
865 		  ucpi->c_cgx);
866 	UFSD("EXIT (FAILED)\n");
867 	return INVBLOCK;
868 }
869 
870 static void ufs_clusteracct(struct super_block * sb,
871 	struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
872 {
873 	struct ufs_sb_private_info * uspi;
874 	int i, start, end, forw, back;
875 
876 	uspi = UFS_SB(sb)->s_uspi;
877 	if (uspi->s_contigsumsize <= 0)
878 		return;
879 
880 	if (cnt > 0)
881 		ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
882 	else
883 		ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
884 
885 	/*
886 	 * Find the size of the cluster going forward.
887 	 */
888 	start = blkno + 1;
889 	end = start + uspi->s_contigsumsize;
890 	if ( end >= ucpi->c_nclusterblks)
891 		end = ucpi->c_nclusterblks;
892 	i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
893 	if (i > end)
894 		i = end;
895 	forw = i - start;
896 
897 	/*
898 	 * Find the size of the cluster going backward.
899 	 */
900 	start = blkno - 1;
901 	end = start - uspi->s_contigsumsize;
902 	if (end < 0 )
903 		end = -1;
904 	i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
905 	if ( i < end)
906 		i = end;
907 	back = start - i;
908 
909 	/*
910 	 * Account for old cluster and the possibly new forward and
911 	 * back clusters.
912 	 */
913 	i = back + forw + 1;
914 	if (i > uspi->s_contigsumsize)
915 		i = uspi->s_contigsumsize;
916 	fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
917 	if (back > 0)
918 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
919 	if (forw > 0)
920 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
921 }
922 
923 
924 static unsigned char ufs_fragtable_8fpb[] = {
925 	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
926 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
927 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
928 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
929 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
930 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
931 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
932 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
933 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
934 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
935 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
936 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
937 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
938 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
939 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
940 	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
941 };
942 
943 static unsigned char ufs_fragtable_other[] = {
944 	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
945 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
946 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
947 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
948 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
949 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
950 	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
951 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
952 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
953 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
954 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
955 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
956 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
957 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
958 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
959 	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
960 };
961