xref: /linux/fs/ufs/balloc.c (revision bf80eef2212a1e8451df13b52533f4bc31bb4f8e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ufs/balloc.c
4  *
5  * Copyright (C) 1998
6  * Daniel Pirkl <daniel.pirkl@email.cz>
7  * Charles University, Faculty of Mathematics and Physics
8  *
9  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
10  */
11 
12 #include <linux/fs.h>
13 #include <linux/stat.h>
14 #include <linux/time.h>
15 #include <linux/string.h>
16 #include <linux/buffer_head.h>
17 #include <linux/capability.h>
18 #include <linux/bitops.h>
19 #include <linux/bio.h>
20 #include <asm/byteorder.h>
21 
22 #include "ufs_fs.h"
23 #include "ufs.h"
24 #include "swab.h"
25 #include "util.h"
26 
27 #define INVBLOCK ((u64)-1L)
28 
29 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
30 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
31 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
32 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
33 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
34 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
35 
36 /*
37  * Free 'count' fragments from fragment number 'fragment'
38  */
39 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
40 {
41 	struct super_block * sb;
42 	struct ufs_sb_private_info * uspi;
43 	struct ufs_cg_private_info * ucpi;
44 	struct ufs_cylinder_group * ucg;
45 	unsigned cgno, bit, end_bit, bbase, blkmap, i;
46 	u64 blkno;
47 
48 	sb = inode->i_sb;
49 	uspi = UFS_SB(sb)->s_uspi;
50 
51 	UFSD("ENTER, fragment %llu, count %u\n",
52 	     (unsigned long long)fragment, count);
53 
54 	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
55 		ufs_error (sb, "ufs_free_fragments", "internal error");
56 
57 	mutex_lock(&UFS_SB(sb)->s_lock);
58 
59 	cgno = ufs_dtog(uspi, fragment);
60 	bit = ufs_dtogd(uspi, fragment);
61 	if (cgno >= uspi->s_ncg) {
62 		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
63 		goto failed;
64 	}
65 
66 	ucpi = ufs_load_cylinder (sb, cgno);
67 	if (!ucpi)
68 		goto failed;
69 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
70 	if (!ufs_cg_chkmagic(sb, ucg)) {
71 		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
72 		goto failed;
73 	}
74 
75 	end_bit = bit + count;
76 	bbase = ufs_blknum (bit);
77 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
78 	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
79 	for (i = bit; i < end_bit; i++) {
80 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
81 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
82 		else
83 			ufs_error (sb, "ufs_free_fragments",
84 				   "bit already cleared for fragment %u", i);
85 	}
86 
87 	inode_sub_bytes(inode, count << uspi->s_fshift);
88 	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
89 	uspi->cs_total.cs_nffree += count;
90 	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
91 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
92 	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
93 
94 	/*
95 	 * Trying to reassemble free fragments into block
96 	 */
97 	blkno = ufs_fragstoblks (bbase);
98 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
99 		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
100 		uspi->cs_total.cs_nffree -= uspi->s_fpb;
101 		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
102 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
103 			ufs_clusteracct (sb, ucpi, blkno, 1);
104 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
105 		uspi->cs_total.cs_nbfree++;
106 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
107 		if (uspi->fs_magic != UFS2_MAGIC) {
108 			unsigned cylno = ufs_cbtocylno (bbase);
109 
110 			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
111 						  ufs_cbtorpos(bbase)), 1);
112 			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
113 		}
114 	}
115 
116 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
117 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
118 	if (sb->s_flags & SB_SYNCHRONOUS)
119 		ubh_sync_block(UCPI_UBH(ucpi));
120 	ufs_mark_sb_dirty(sb);
121 
122 	mutex_unlock(&UFS_SB(sb)->s_lock);
123 	UFSD("EXIT\n");
124 	return;
125 
126 failed:
127 	mutex_unlock(&UFS_SB(sb)->s_lock);
128 	UFSD("EXIT (FAILED)\n");
129 	return;
130 }
131 
132 /*
133  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
134  */
135 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
136 {
137 	struct super_block * sb;
138 	struct ufs_sb_private_info * uspi;
139 	struct ufs_cg_private_info * ucpi;
140 	struct ufs_cylinder_group * ucg;
141 	unsigned overflow, cgno, bit, end_bit, i;
142 	u64 blkno;
143 
144 	sb = inode->i_sb;
145 	uspi = UFS_SB(sb)->s_uspi;
146 
147 	UFSD("ENTER, fragment %llu, count %u\n",
148 	     (unsigned long long)fragment, count);
149 
150 	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
151 		ufs_error (sb, "ufs_free_blocks", "internal error, "
152 			   "fragment %llu, count %u\n",
153 			   (unsigned long long)fragment, count);
154 		goto failed;
155 	}
156 
157 	mutex_lock(&UFS_SB(sb)->s_lock);
158 
159 do_more:
160 	overflow = 0;
161 	cgno = ufs_dtog(uspi, fragment);
162 	bit = ufs_dtogd(uspi, fragment);
163 	if (cgno >= uspi->s_ncg) {
164 		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
165 		goto failed_unlock;
166 	}
167 	end_bit = bit + count;
168 	if (end_bit > uspi->s_fpg) {
169 		overflow = bit + count - uspi->s_fpg;
170 		count -= overflow;
171 		end_bit -= overflow;
172 	}
173 
174 	ucpi = ufs_load_cylinder (sb, cgno);
175 	if (!ucpi)
176 		goto failed_unlock;
177 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
178 	if (!ufs_cg_chkmagic(sb, ucg)) {
179 		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
180 		goto failed_unlock;
181 	}
182 
183 	for (i = bit; i < end_bit; i += uspi->s_fpb) {
184 		blkno = ufs_fragstoblks(i);
185 		if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
186 			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
187 		}
188 		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
189 		inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
190 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
191 			ufs_clusteracct (sb, ucpi, blkno, 1);
192 
193 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
194 		uspi->cs_total.cs_nbfree++;
195 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
196 
197 		if (uspi->fs_magic != UFS2_MAGIC) {
198 			unsigned cylno = ufs_cbtocylno(i);
199 
200 			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
201 						  ufs_cbtorpos(i)), 1);
202 			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
203 		}
204 	}
205 
206 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
207 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
208 	if (sb->s_flags & SB_SYNCHRONOUS)
209 		ubh_sync_block(UCPI_UBH(ucpi));
210 
211 	if (overflow) {
212 		fragment += count;
213 		count = overflow;
214 		goto do_more;
215 	}
216 
217 	ufs_mark_sb_dirty(sb);
218 	mutex_unlock(&UFS_SB(sb)->s_lock);
219 	UFSD("EXIT\n");
220 	return;
221 
222 failed_unlock:
223 	mutex_unlock(&UFS_SB(sb)->s_lock);
224 failed:
225 	UFSD("EXIT (FAILED)\n");
226 	return;
227 }
228 
229 /*
230  * Modify inode page cache in such way:
231  * have - blocks with b_blocknr equal to oldb...oldb+count-1
232  * get - blocks with b_blocknr equal to newb...newb+count-1
233  * also we suppose that oldb...oldb+count-1 blocks
234  * situated at the end of file.
235  *
236  * We can come here from ufs_writepage or ufs_prepare_write,
237  * locked_page is argument of these functions, so we already lock it.
238  */
239 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
240 			       unsigned int count, sector_t oldb,
241 			       sector_t newb, struct page *locked_page)
242 {
243 	const unsigned blks_per_page =
244 		1 << (PAGE_SHIFT - inode->i_blkbits);
245 	const unsigned mask = blks_per_page - 1;
246 	struct address_space * const mapping = inode->i_mapping;
247 	pgoff_t index, cur_index, last_index;
248 	unsigned pos, j, lblock;
249 	sector_t end, i;
250 	struct page *page;
251 	struct buffer_head *head, *bh;
252 
253 	UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
254 	      inode->i_ino, count,
255 	     (unsigned long long)oldb, (unsigned long long)newb);
256 
257 	BUG_ON(!locked_page);
258 	BUG_ON(!PageLocked(locked_page));
259 
260 	cur_index = locked_page->index;
261 	end = count + beg;
262 	last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
263 	for (i = beg; i < end; i = (i | mask) + 1) {
264 		index = i >> (PAGE_SHIFT - inode->i_blkbits);
265 
266 		if (likely(cur_index != index)) {
267 			page = ufs_get_locked_page(mapping, index);
268 			if (!page)/* it was truncated */
269 				continue;
270 			if (IS_ERR(page)) {/* or EIO */
271 				ufs_error(inode->i_sb, __func__,
272 					  "read of page %llu failed\n",
273 					  (unsigned long long)index);
274 				continue;
275 			}
276 		} else
277 			page = locked_page;
278 
279 		head = page_buffers(page);
280 		bh = head;
281 		pos = i & mask;
282 		for (j = 0; j < pos; ++j)
283 			bh = bh->b_this_page;
284 
285 
286 		if (unlikely(index == last_index))
287 			lblock = end & mask;
288 		else
289 			lblock = blks_per_page;
290 
291 		do {
292 			if (j >= lblock)
293 				break;
294 			pos = (i - beg) + j;
295 
296 			if (!buffer_mapped(bh))
297 					map_bh(bh, inode->i_sb, oldb + pos);
298 			if (bh_read(bh, 0) < 0) {
299 				ufs_error(inode->i_sb, __func__,
300 					  "read of block failed\n");
301 				break;
302 			}
303 
304 			UFSD(" change from %llu to %llu, pos %u\n",
305 			     (unsigned long long)(pos + oldb),
306 			     (unsigned long long)(pos + newb), pos);
307 
308 			bh->b_blocknr = newb + pos;
309 			clean_bdev_bh_alias(bh);
310 			mark_buffer_dirty(bh);
311 			++j;
312 			bh = bh->b_this_page;
313 		} while (bh != head);
314 
315 		if (likely(cur_index != index))
316 			ufs_put_locked_page(page);
317  	}
318 	UFSD("EXIT\n");
319 }
320 
321 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
322 			    int sync)
323 {
324 	struct buffer_head *bh;
325 	sector_t end = beg + n;
326 
327 	for (; beg < end; ++beg) {
328 		bh = sb_getblk(inode->i_sb, beg);
329 		lock_buffer(bh);
330 		memset(bh->b_data, 0, inode->i_sb->s_blocksize);
331 		set_buffer_uptodate(bh);
332 		mark_buffer_dirty(bh);
333 		unlock_buffer(bh);
334 		if (IS_SYNC(inode) || sync)
335 			sync_dirty_buffer(bh);
336 		brelse(bh);
337 	}
338 }
339 
340 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
341 			   u64 goal, unsigned count, int *err,
342 			   struct page *locked_page)
343 {
344 	struct super_block * sb;
345 	struct ufs_sb_private_info * uspi;
346 	struct ufs_super_block_first * usb1;
347 	unsigned cgno, oldcount, newcount;
348 	u64 tmp, request, result;
349 
350 	UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
351 	     inode->i_ino, (unsigned long long)fragment,
352 	     (unsigned long long)goal, count);
353 
354 	sb = inode->i_sb;
355 	uspi = UFS_SB(sb)->s_uspi;
356 	usb1 = ubh_get_usb_first(uspi);
357 	*err = -ENOSPC;
358 
359 	mutex_lock(&UFS_SB(sb)->s_lock);
360 	tmp = ufs_data_ptr_to_cpu(sb, p);
361 
362 	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
363 		ufs_warning(sb, "ufs_new_fragments", "internal warning"
364 			    " fragment %llu, count %u",
365 			    (unsigned long long)fragment, count);
366 		count = uspi->s_fpb - ufs_fragnum(fragment);
367 	}
368 	oldcount = ufs_fragnum (fragment);
369 	newcount = oldcount + count;
370 
371 	/*
372 	 * Somebody else has just allocated our fragments
373 	 */
374 	if (oldcount) {
375 		if (!tmp) {
376 			ufs_error(sb, "ufs_new_fragments", "internal error, "
377 				  "fragment %llu, tmp %llu\n",
378 				  (unsigned long long)fragment,
379 				  (unsigned long long)tmp);
380 			mutex_unlock(&UFS_SB(sb)->s_lock);
381 			return INVBLOCK;
382 		}
383 		if (fragment < UFS_I(inode)->i_lastfrag) {
384 			UFSD("EXIT (ALREADY ALLOCATED)\n");
385 			mutex_unlock(&UFS_SB(sb)->s_lock);
386 			return 0;
387 		}
388 	}
389 	else {
390 		if (tmp) {
391 			UFSD("EXIT (ALREADY ALLOCATED)\n");
392 			mutex_unlock(&UFS_SB(sb)->s_lock);
393 			return 0;
394 		}
395 	}
396 
397 	/*
398 	 * There is not enough space for user on the device
399 	 */
400 	if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
401 		if (!capable(CAP_SYS_RESOURCE)) {
402 			mutex_unlock(&UFS_SB(sb)->s_lock);
403 			UFSD("EXIT (FAILED)\n");
404 			return 0;
405 		}
406 	}
407 
408 	if (goal >= uspi->s_size)
409 		goal = 0;
410 	if (goal == 0)
411 		cgno = ufs_inotocg (inode->i_ino);
412 	else
413 		cgno = ufs_dtog(uspi, goal);
414 
415 	/*
416 	 * allocate new fragment
417 	 */
418 	if (oldcount == 0) {
419 		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
420 		if (result) {
421 			ufs_clear_frags(inode, result + oldcount,
422 					newcount - oldcount, locked_page != NULL);
423 			*err = 0;
424 			write_seqlock(&UFS_I(inode)->meta_lock);
425 			ufs_cpu_to_data_ptr(sb, p, result);
426 			UFS_I(inode)->i_lastfrag =
427 				max(UFS_I(inode)->i_lastfrag, fragment + count);
428 			write_sequnlock(&UFS_I(inode)->meta_lock);
429 		}
430 		mutex_unlock(&UFS_SB(sb)->s_lock);
431 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
432 		return result;
433 	}
434 
435 	/*
436 	 * resize block
437 	 */
438 	result = ufs_add_fragments(inode, tmp, oldcount, newcount);
439 	if (result) {
440 		*err = 0;
441 		read_seqlock_excl(&UFS_I(inode)->meta_lock);
442 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
443 						fragment + count);
444 		read_sequnlock_excl(&UFS_I(inode)->meta_lock);
445 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
446 				locked_page != NULL);
447 		mutex_unlock(&UFS_SB(sb)->s_lock);
448 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
449 		return result;
450 	}
451 
452 	/*
453 	 * allocate new block and move data
454 	 */
455 	if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
456 		request = newcount;
457 		if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
458 			usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
459 	} else {
460 		request = uspi->s_fpb;
461 		if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
462 			usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
463 	}
464 	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
465 	if (result) {
466 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
467 				locked_page != NULL);
468 		mutex_unlock(&UFS_SB(sb)->s_lock);
469 		ufs_change_blocknr(inode, fragment - oldcount, oldcount,
470 				   uspi->s_sbbase + tmp,
471 				   uspi->s_sbbase + result, locked_page);
472 		*err = 0;
473 		write_seqlock(&UFS_I(inode)->meta_lock);
474 		ufs_cpu_to_data_ptr(sb, p, result);
475 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
476 						fragment + count);
477 		write_sequnlock(&UFS_I(inode)->meta_lock);
478 		if (newcount < request)
479 			ufs_free_fragments (inode, result + newcount, request - newcount);
480 		ufs_free_fragments (inode, tmp, oldcount);
481 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
482 		return result;
483 	}
484 
485 	mutex_unlock(&UFS_SB(sb)->s_lock);
486 	UFSD("EXIT (FAILED)\n");
487 	return 0;
488 }
489 
490 static bool try_add_frags(struct inode *inode, unsigned frags)
491 {
492 	unsigned size = frags * i_blocksize(inode);
493 	spin_lock(&inode->i_lock);
494 	__inode_add_bytes(inode, size);
495 	if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
496 		__inode_sub_bytes(inode, size);
497 		spin_unlock(&inode->i_lock);
498 		return false;
499 	}
500 	spin_unlock(&inode->i_lock);
501 	return true;
502 }
503 
504 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
505 			     unsigned oldcount, unsigned newcount)
506 {
507 	struct super_block * sb;
508 	struct ufs_sb_private_info * uspi;
509 	struct ufs_cg_private_info * ucpi;
510 	struct ufs_cylinder_group * ucg;
511 	unsigned cgno, fragno, fragoff, count, fragsize, i;
512 
513 	UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
514 	     (unsigned long long)fragment, oldcount, newcount);
515 
516 	sb = inode->i_sb;
517 	uspi = UFS_SB(sb)->s_uspi;
518 	count = newcount - oldcount;
519 
520 	cgno = ufs_dtog(uspi, fragment);
521 	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
522 		return 0;
523 	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
524 		return 0;
525 	ucpi = ufs_load_cylinder (sb, cgno);
526 	if (!ucpi)
527 		return 0;
528 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
529 	if (!ufs_cg_chkmagic(sb, ucg)) {
530 		ufs_panic (sb, "ufs_add_fragments",
531 			"internal error, bad magic number on cg %u", cgno);
532 		return 0;
533 	}
534 
535 	fragno = ufs_dtogd(uspi, fragment);
536 	fragoff = ufs_fragnum (fragno);
537 	for (i = oldcount; i < newcount; i++)
538 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
539 			return 0;
540 
541 	if (!try_add_frags(inode, count))
542 		return 0;
543 	/*
544 	 * Block can be extended
545 	 */
546 	ucg->cg_time = ufs_get_seconds(sb);
547 	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
548 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
549 			break;
550 	fragsize = i - oldcount;
551 	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
552 		ufs_panic (sb, "ufs_add_fragments",
553 			"internal error or corrupted bitmap on cg %u", cgno);
554 	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
555 	if (fragsize != count)
556 		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
557 	for (i = oldcount; i < newcount; i++)
558 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
559 
560 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
561 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
562 	uspi->cs_total.cs_nffree -= count;
563 
564 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
565 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
566 	if (sb->s_flags & SB_SYNCHRONOUS)
567 		ubh_sync_block(UCPI_UBH(ucpi));
568 	ufs_mark_sb_dirty(sb);
569 
570 	UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
571 
572 	return fragment;
573 }
574 
575 #define UFS_TEST_FREE_SPACE_CG \
576 	ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
577 	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
578 		goto cg_found; \
579 	for (k = count; k < uspi->s_fpb; k++) \
580 		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
581 			goto cg_found;
582 
583 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
584 			       u64 goal, unsigned count, int *err)
585 {
586 	struct super_block * sb;
587 	struct ufs_sb_private_info * uspi;
588 	struct ufs_cg_private_info * ucpi;
589 	struct ufs_cylinder_group * ucg;
590 	unsigned oldcg, i, j, k, allocsize;
591 	u64 result;
592 
593 	UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
594 	     inode->i_ino, cgno, (unsigned long long)goal, count);
595 
596 	sb = inode->i_sb;
597 	uspi = UFS_SB(sb)->s_uspi;
598 	oldcg = cgno;
599 
600 	/*
601 	 * 1. searching on preferred cylinder group
602 	 */
603 	UFS_TEST_FREE_SPACE_CG
604 
605 	/*
606 	 * 2. quadratic rehash
607 	 */
608 	for (j = 1; j < uspi->s_ncg; j *= 2) {
609 		cgno += j;
610 		if (cgno >= uspi->s_ncg)
611 			cgno -= uspi->s_ncg;
612 		UFS_TEST_FREE_SPACE_CG
613 	}
614 
615 	/*
616 	 * 3. brute force search
617 	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
618 	 */
619 	cgno = (oldcg + 1) % uspi->s_ncg;
620 	for (j = 2; j < uspi->s_ncg; j++) {
621 		cgno++;
622 		if (cgno >= uspi->s_ncg)
623 			cgno = 0;
624 		UFS_TEST_FREE_SPACE_CG
625 	}
626 
627 	UFSD("EXIT (FAILED)\n");
628 	return 0;
629 
630 cg_found:
631 	ucpi = ufs_load_cylinder (sb, cgno);
632 	if (!ucpi)
633 		return 0;
634 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
635 	if (!ufs_cg_chkmagic(sb, ucg))
636 		ufs_panic (sb, "ufs_alloc_fragments",
637 			"internal error, bad magic number on cg %u", cgno);
638 	ucg->cg_time = ufs_get_seconds(sb);
639 
640 	if (count == uspi->s_fpb) {
641 		result = ufs_alloccg_block (inode, ucpi, goal, err);
642 		if (result == INVBLOCK)
643 			return 0;
644 		goto succed;
645 	}
646 
647 	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
648 		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
649 			break;
650 
651 	if (allocsize == uspi->s_fpb) {
652 		result = ufs_alloccg_block (inode, ucpi, goal, err);
653 		if (result == INVBLOCK)
654 			return 0;
655 		goal = ufs_dtogd(uspi, result);
656 		for (i = count; i < uspi->s_fpb; i++)
657 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
658 		i = uspi->s_fpb - count;
659 
660 		inode_sub_bytes(inode, i << uspi->s_fshift);
661 		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
662 		uspi->cs_total.cs_nffree += i;
663 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
664 		fs32_add(sb, &ucg->cg_frsum[i], 1);
665 		goto succed;
666 	}
667 
668 	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
669 	if (result == INVBLOCK)
670 		return 0;
671 	if (!try_add_frags(inode, count))
672 		return 0;
673 	for (i = 0; i < count; i++)
674 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
675 
676 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
677 	uspi->cs_total.cs_nffree -= count;
678 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
679 	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
680 
681 	if (count != allocsize)
682 		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
683 
684 succed:
685 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
686 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
687 	if (sb->s_flags & SB_SYNCHRONOUS)
688 		ubh_sync_block(UCPI_UBH(ucpi));
689 	ufs_mark_sb_dirty(sb);
690 
691 	result += cgno * uspi->s_fpg;
692 	UFSD("EXIT3, result %llu\n", (unsigned long long)result);
693 	return result;
694 }
695 
696 static u64 ufs_alloccg_block(struct inode *inode,
697 			     struct ufs_cg_private_info *ucpi,
698 			     u64 goal, int *err)
699 {
700 	struct super_block * sb;
701 	struct ufs_sb_private_info * uspi;
702 	struct ufs_cylinder_group * ucg;
703 	u64 result, blkno;
704 
705 	UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
706 
707 	sb = inode->i_sb;
708 	uspi = UFS_SB(sb)->s_uspi;
709 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
710 
711 	if (goal == 0) {
712 		goal = ucpi->c_rotor;
713 		goto norot;
714 	}
715 	goal = ufs_blknum (goal);
716 	goal = ufs_dtogd(uspi, goal);
717 
718 	/*
719 	 * If the requested block is available, use it.
720 	 */
721 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
722 		result = goal;
723 		goto gotit;
724 	}
725 
726 norot:
727 	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
728 	if (result == INVBLOCK)
729 		return INVBLOCK;
730 	ucpi->c_rotor = result;
731 gotit:
732 	if (!try_add_frags(inode, uspi->s_fpb))
733 		return 0;
734 	blkno = ufs_fragstoblks(result);
735 	ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
736 	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
737 		ufs_clusteracct (sb, ucpi, blkno, -1);
738 
739 	fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
740 	uspi->cs_total.cs_nbfree--;
741 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
742 
743 	if (uspi->fs_magic != UFS2_MAGIC) {
744 		unsigned cylno = ufs_cbtocylno((unsigned)result);
745 
746 		fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
747 					  ufs_cbtorpos((unsigned)result)), 1);
748 		fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
749 	}
750 
751 	UFSD("EXIT, result %llu\n", (unsigned long long)result);
752 
753 	return result;
754 }
755 
756 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
757 			  struct ufs_buffer_head *ubh,
758 			  unsigned begin, unsigned size,
759 			  unsigned char *table, unsigned char mask)
760 {
761 	unsigned rest, offset;
762 	unsigned char *cp;
763 
764 
765 	offset = begin & ~uspi->s_fmask;
766 	begin >>= uspi->s_fshift;
767 	for (;;) {
768 		if ((offset + size) < uspi->s_fsize)
769 			rest = size;
770 		else
771 			rest = uspi->s_fsize - offset;
772 		size -= rest;
773 		cp = ubh->bh[begin]->b_data + offset;
774 		while ((table[*cp++] & mask) == 0 && --rest)
775 			;
776 		if (rest || !size)
777 			break;
778 		begin++;
779 		offset = 0;
780 	}
781 	return (size + rest);
782 }
783 
784 /*
785  * Find a block of the specified size in the specified cylinder group.
786  * @sp: pointer to super block
787  * @ucpi: pointer to cylinder group info
788  * @goal: near which block we want find new one
789  * @count: specified size
790  */
791 static u64 ufs_bitmap_search(struct super_block *sb,
792 			     struct ufs_cg_private_info *ucpi,
793 			     u64 goal, unsigned count)
794 {
795 	/*
796 	 * Bit patterns for identifying fragments in the block map
797 	 * used as ((map & mask_arr) == want_arr)
798 	 */
799 	static const int mask_arr[9] = {
800 		0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
801 	};
802 	static const int want_arr[9] = {
803 		0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
804 	};
805 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
806 	unsigned start, length, loc;
807 	unsigned pos, want, blockmap, mask, end;
808 	u64 result;
809 
810 	UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
811 	     (unsigned long long)goal, count);
812 
813 	if (goal)
814 		start = ufs_dtogd(uspi, goal) >> 3;
815 	else
816 		start = ucpi->c_frotor >> 3;
817 
818 	length = ((uspi->s_fpg + 7) >> 3) - start;
819 	loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
820 		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
821 		1 << (count - 1 + (uspi->s_fpb & 7)));
822 	if (loc == 0) {
823 		length = start + 1;
824 		loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
825 				(uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
826 				ufs_fragtable_other,
827 				1 << (count - 1 + (uspi->s_fpb & 7)));
828 		if (loc == 0) {
829 			ufs_error(sb, "ufs_bitmap_search",
830 				  "bitmap corrupted on cg %u, start %u,"
831 				  " length %u, count %u, freeoff %u\n",
832 				  ucpi->c_cgx, start, length, count,
833 				  ucpi->c_freeoff);
834 			return INVBLOCK;
835 		}
836 		start = 0;
837 	}
838 	result = (start + length - loc) << 3;
839 	ucpi->c_frotor = result;
840 
841 	/*
842 	 * found the byte in the map
843 	 */
844 
845 	for (end = result + 8; result < end; result += uspi->s_fpb) {
846 		blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
847 		blockmap <<= 1;
848 		mask = mask_arr[count];
849 		want = want_arr[count];
850 		for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
851 			if ((blockmap & mask) == want) {
852 				UFSD("EXIT, result %llu\n",
853 				     (unsigned long long)result);
854 				return result + pos;
855  			}
856 			mask <<= 1;
857 			want <<= 1;
858  		}
859  	}
860 
861 	ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
862 		  ucpi->c_cgx);
863 	UFSD("EXIT (FAILED)\n");
864 	return INVBLOCK;
865 }
866 
867 static void ufs_clusteracct(struct super_block * sb,
868 	struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
869 {
870 	struct ufs_sb_private_info * uspi;
871 	int i, start, end, forw, back;
872 
873 	uspi = UFS_SB(sb)->s_uspi;
874 	if (uspi->s_contigsumsize <= 0)
875 		return;
876 
877 	if (cnt > 0)
878 		ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
879 	else
880 		ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
881 
882 	/*
883 	 * Find the size of the cluster going forward.
884 	 */
885 	start = blkno + 1;
886 	end = start + uspi->s_contigsumsize;
887 	if ( end >= ucpi->c_nclusterblks)
888 		end = ucpi->c_nclusterblks;
889 	i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
890 	if (i > end)
891 		i = end;
892 	forw = i - start;
893 
894 	/*
895 	 * Find the size of the cluster going backward.
896 	 */
897 	start = blkno - 1;
898 	end = start - uspi->s_contigsumsize;
899 	if (end < 0 )
900 		end = -1;
901 	i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
902 	if ( i < end)
903 		i = end;
904 	back = start - i;
905 
906 	/*
907 	 * Account for old cluster and the possibly new forward and
908 	 * back clusters.
909 	 */
910 	i = back + forw + 1;
911 	if (i > uspi->s_contigsumsize)
912 		i = uspi->s_contigsumsize;
913 	fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
914 	if (back > 0)
915 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
916 	if (forw > 0)
917 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
918 }
919 
920 
921 static unsigned char ufs_fragtable_8fpb[] = {
922 	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
923 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
924 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
925 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
926 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
927 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
928 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
929 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
930 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
931 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
932 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
933 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
934 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
935 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
936 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
937 	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
938 };
939 
940 static unsigned char ufs_fragtable_other[] = {
941 	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
942 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
943 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
944 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
945 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
946 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
947 	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
948 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
949 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
950 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
951 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
952 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
953 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
954 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
955 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
956 	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
957 };
958