xref: /linux/fs/jfs/jfs_dmap.c (revision f79adee883586b94cf977e4d28384ea0288473ed)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *   Copyright (C) International Business Machines Corp., 2000-2004
4  *   Portions Copyright (C) Tino Reichardt, 2012
5  */
6 
7 #include <linux/fs.h>
8 #include <linux/slab.h>
9 #include "jfs_incore.h"
10 #include "jfs_superblock.h"
11 #include "jfs_dmap.h"
12 #include "jfs_imap.h"
13 #include "jfs_lock.h"
14 #include "jfs_metapage.h"
15 #include "jfs_debug.h"
16 #include "jfs_discard.h"
17 
18 /*
19  *	SERIALIZATION of the Block Allocation Map.
20  *
21  *	the working state of the block allocation map is accessed in
22  *	two directions:
23  *
24  *	1) allocation and free requests that start at the dmap
25  *	   level and move up through the dmap control pages (i.e.
26  *	   the vast majority of requests).
27  *
28  *	2) allocation requests that start at dmap control page
29  *	   level and work down towards the dmaps.
30  *
31  *	the serialization scheme used here is as follows.
32  *
33  *	requests which start at the bottom are serialized against each
34  *	other through buffers and each requests holds onto its buffers
35  *	as it works it way up from a single dmap to the required level
36  *	of dmap control page.
37  *	requests that start at the top are serialized against each other
38  *	and request that start from the bottom by the multiple read/single
39  *	write inode lock of the bmap inode. requests starting at the top
40  *	take this lock in write mode while request starting at the bottom
41  *	take the lock in read mode.  a single top-down request may proceed
42  *	exclusively while multiple bottoms-up requests may proceed
43  *	simultaneously (under the protection of busy buffers).
44  *
45  *	in addition to information found in dmaps and dmap control pages,
46  *	the working state of the block allocation map also includes read/
47  *	write information maintained in the bmap descriptor (i.e. total
48  *	free block count, allocation group level free block counts).
49  *	a single exclusive lock (BMAP_LOCK) is used to guard this information
50  *	in the face of multiple-bottoms up requests.
51  *	(lock ordering: IREAD_LOCK, BMAP_LOCK);
52  *
53  *	accesses to the persistent state of the block allocation map (limited
54  *	to the persistent bitmaps in dmaps) is guarded by (busy) buffers.
55  */
56 
57 #define BMAP_LOCK_INIT(bmp)	mutex_init(&bmp->db_bmaplock)
58 #define BMAP_LOCK(bmp)		mutex_lock(&bmp->db_bmaplock)
59 #define BMAP_UNLOCK(bmp)	mutex_unlock(&bmp->db_bmaplock)
60 
61 /*
62  * forward references
63  */
64 static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
65 			int nblocks);
66 static void dbSplit(dmtree_t *tp, int leafno, int splitsz, int newval, bool is_ctl);
67 static int dbBackSplit(dmtree_t *tp, int leafno, bool is_ctl);
68 static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl);
69 static void dbAdjTree(dmtree_t *tp, int leafno, int newval, bool is_ctl);
70 static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc,
71 		    int level);
72 static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results);
73 static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
74 		       int nblocks);
75 static int dbAllocNear(struct bmap * bmp, struct dmap * dp, s64 blkno,
76 		       int nblocks,
77 		       int l2nb, s64 * results);
78 static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
79 		       int nblocks);
80 static int dbAllocDmapLev(struct bmap * bmp, struct dmap * dp, int nblocks,
81 			  int l2nb,
82 			  s64 * results);
83 static int dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb,
84 		     s64 * results);
85 static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
86 		      s64 * results);
87 static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
88 static int dbFindBits(u32 word, int l2nb);
89 static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
90 static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl);
91 static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
92 		      int nblocks);
93 static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
94 		      int nblocks);
95 static int dbMaxBud(u8 * cp);
96 static int blkstol2(s64 nb);
97 
98 static int cntlz(u32 value);
99 static int cnttz(u32 word);
100 
101 static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
102 			 int nblocks);
103 static int dbInitDmap(struct dmap * dp, s64 blkno, int nblocks);
104 static int dbInitDmapTree(struct dmap * dp);
105 static int dbInitTree(struct dmaptree * dtp);
106 static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i);
107 static int dbGetL2AGSize(s64 nblocks);
108 
109 /*
110  *	buddy table
111  *
112  * table used for determining buddy sizes within characters of
113  * dmap bitmap words.  the characters themselves serve as indexes
114  * into the table, with the table elements yielding the maximum
115  * binary buddy of free bits within the character.
116  */
117 static const s8 budtab[256] = {
118 	3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
119 	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120 	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
122 	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
123 	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
124 	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
125 	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
126 	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
127 	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
128 	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
129 	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
130 	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
131 	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
132 	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
133 	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, -1
134 };
135 
136 /*
137  * NAME:	dbMount()
138  *
139  * FUNCTION:	initializate the block allocation map.
140  *
141  *		memory is allocated for the in-core bmap descriptor and
142  *		the in-core descriptor is initialized from disk.
143  *
144  * PARAMETERS:
145  *	ipbmap	- pointer to in-core inode for the block map.
146  *
147  * RETURN VALUES:
148  *	0	- success
149  *	-ENOMEM	- insufficient memory
150  *	-EIO	- i/o error
151  *	-EINVAL - wrong bmap data
152  */
dbMount(struct inode * ipbmap)153 int dbMount(struct inode *ipbmap)
154 {
155 	struct bmap *bmp;
156 	struct dbmap_disk *dbmp_le;
157 	struct metapage *mp;
158 	int i, err;
159 
160 	/*
161 	 * allocate/initialize the in-memory bmap descriptor
162 	 */
163 	/* allocate memory for the in-memory bmap descriptor */
164 	bmp = kmalloc(sizeof(struct bmap), GFP_KERNEL);
165 	if (bmp == NULL)
166 		return -ENOMEM;
167 
168 	/* read the on-disk bmap descriptor. */
169 	mp = read_metapage(ipbmap,
170 			   BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
171 			   PSIZE, 0);
172 	if (mp == NULL) {
173 		err = -EIO;
174 		goto err_kfree_bmp;
175 	}
176 
177 	/* copy the on-disk bmap descriptor to its in-memory version. */
178 	dbmp_le = (struct dbmap_disk *) mp->data;
179 	bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize);
180 	bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
181 	bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
182 	bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
183 	bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
184 	bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
185 	bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
186 	bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
187 	bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
188 	bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
189 	bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
190 	bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
191 
192 	if ((bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE) ||
193 	    (bmp->db_l2nbperpage < 0) ||
194 	    !bmp->db_numag || (bmp->db_numag > MAXAG) ||
195 	    (bmp->db_maxag >= MAXAG) || (bmp->db_maxag < 0) ||
196 	    (bmp->db_agpref >= MAXAG) || (bmp->db_agpref < 0) ||
197 	    !bmp->db_agwidth ||
198 	    (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) ||
199 	    (bmp->db_agl2size < 0) ||
200 	    ((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) {
201 		err = -EINVAL;
202 		goto err_release_metapage;
203 	}
204 
205 	for (i = 0; i < MAXAG; i++)
206 		bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]);
207 	bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize);
208 	bmp->db_maxfreebud = dbmp_le->dn_maxfreebud;
209 
210 	/* release the buffer. */
211 	release_metapage(mp);
212 
213 	/* bind the bmap inode and the bmap descriptor to each other. */
214 	bmp->db_ipbmap = ipbmap;
215 	JFS_SBI(ipbmap->i_sb)->bmap = bmp;
216 
217 	memset(bmp->db_active, 0, sizeof(bmp->db_active));
218 
219 	/*
220 	 * allocate/initialize the bmap lock
221 	 */
222 	BMAP_LOCK_INIT(bmp);
223 
224 	return (0);
225 
226 err_release_metapage:
227 	release_metapage(mp);
228 err_kfree_bmp:
229 	kfree(bmp);
230 	return err;
231 }
232 
233 
234 /*
235  * NAME:	dbUnmount()
236  *
237  * FUNCTION:	terminate the block allocation map in preparation for
238  *		file system unmount.
239  *
240  *		the in-core bmap descriptor is written to disk and
241  *		the memory for this descriptor is freed.
242  *
243  * PARAMETERS:
244  *	ipbmap	- pointer to in-core inode for the block map.
245  *
246  * RETURN VALUES:
247  *	0	- success
248  *	-EIO	- i/o error
249  */
dbUnmount(struct inode * ipbmap,int mounterror)250 int dbUnmount(struct inode *ipbmap, int mounterror)
251 {
252 	struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
253 
254 	if (!(mounterror || isReadOnly(ipbmap)))
255 		dbSync(ipbmap);
256 
257 	/*
258 	 * Invalidate the page cache buffers
259 	 */
260 	truncate_inode_pages(ipbmap->i_mapping, 0);
261 
262 	/* free the memory for the in-memory bmap. */
263 	kfree(bmp);
264 	JFS_SBI(ipbmap->i_sb)->bmap = NULL;
265 
266 	return (0);
267 }
268 
269 /*
270  *	dbSync()
271  */
dbSync(struct inode * ipbmap)272 int dbSync(struct inode *ipbmap)
273 {
274 	struct dbmap_disk *dbmp_le;
275 	struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
276 	struct metapage *mp;
277 	int i;
278 
279 	/*
280 	 * write bmap global control page
281 	 */
282 	/* get the buffer for the on-disk bmap descriptor. */
283 	mp = read_metapage(ipbmap,
284 			   BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
285 			   PSIZE, 0);
286 	if (mp == NULL) {
287 		jfs_err("dbSync: read_metapage failed!");
288 		return -EIO;
289 	}
290 	/* copy the in-memory version of the bmap to the on-disk version */
291 	dbmp_le = (struct dbmap_disk *) mp->data;
292 	dbmp_le->dn_mapsize = cpu_to_le64(bmp->db_mapsize);
293 	dbmp_le->dn_nfree = cpu_to_le64(bmp->db_nfree);
294 	dbmp_le->dn_l2nbperpage = cpu_to_le32(bmp->db_l2nbperpage);
295 	dbmp_le->dn_numag = cpu_to_le32(bmp->db_numag);
296 	dbmp_le->dn_maxlevel = cpu_to_le32(bmp->db_maxlevel);
297 	dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag);
298 	dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref);
299 	dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel);
300 	dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight);
301 	dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth);
302 	dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart);
303 	dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size);
304 	for (i = 0; i < MAXAG; i++)
305 		dbmp_le->dn_agfree[i] = cpu_to_le64(bmp->db_agfree[i]);
306 	dbmp_le->dn_agsize = cpu_to_le64(bmp->db_agsize);
307 	dbmp_le->dn_maxfreebud = bmp->db_maxfreebud;
308 
309 	/* write the buffer */
310 	write_metapage(mp);
311 
312 	/*
313 	 * write out dirty pages of bmap
314 	 */
315 	filemap_write_and_wait(ipbmap->i_mapping);
316 
317 	diWriteSpecial(ipbmap, 0);
318 
319 	return (0);
320 }
321 
322 /*
323  * NAME:	dbFree()
324  *
325  * FUNCTION:	free the specified block range from the working block
326  *		allocation map.
327  *
328  *		the blocks will be free from the working map one dmap
329  *		at a time.
330  *
331  * PARAMETERS:
332  *	ip	- pointer to in-core inode;
333  *	blkno	- starting block number to be freed.
334  *	nblocks	- number of blocks to be freed.
335  *
336  * RETURN VALUES:
337  *	0	- success
338  *	-EIO	- i/o error
339  */
dbFree(struct inode * ip,s64 blkno,s64 nblocks)340 int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
341 {
342 	struct metapage *mp;
343 	struct dmap *dp;
344 	int nb, rc;
345 	s64 lblkno, rem;
346 	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
347 	struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
348 	struct super_block *sb = ipbmap->i_sb;
349 
350 	IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
351 
352 	/* block to be freed better be within the mapsize. */
353 	if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) {
354 		IREAD_UNLOCK(ipbmap);
355 		printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n",
356 		       (unsigned long long) blkno,
357 		       (unsigned long long) nblocks);
358 		jfs_error(ip->i_sb, "block to be freed is outside the map\n");
359 		return -EIO;
360 	}
361 
362 	/**
363 	 * TRIM the blocks, when mounted with discard option
364 	 */
365 	if (JFS_SBI(sb)->flag & JFS_DISCARD)
366 		if (JFS_SBI(sb)->minblks_trim <= nblocks)
367 			jfs_issue_discard(ipbmap, blkno, nblocks);
368 
369 	/*
370 	 * free the blocks a dmap at a time.
371 	 */
372 	mp = NULL;
373 	for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) {
374 		/* release previous dmap if any */
375 		if (mp) {
376 			write_metapage(mp);
377 		}
378 
379 		/* get the buffer for the current dmap. */
380 		lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
381 		mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
382 		if (mp == NULL) {
383 			IREAD_UNLOCK(ipbmap);
384 			return -EIO;
385 		}
386 		dp = (struct dmap *) mp->data;
387 
388 		/* determine the number of blocks to be freed from
389 		 * this dmap.
390 		 */
391 		nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
392 
393 		/* free the blocks. */
394 		if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) {
395 			jfs_error(ip->i_sb, "error in block map\n");
396 			release_metapage(mp);
397 			IREAD_UNLOCK(ipbmap);
398 			return (rc);
399 		}
400 	}
401 
402 	/* write the last buffer. */
403 	if (mp)
404 		write_metapage(mp);
405 
406 	IREAD_UNLOCK(ipbmap);
407 
408 	return (0);
409 }
410 
411 
412 /*
413  * NAME:	dbUpdatePMap()
414  *
415  * FUNCTION:	update the allocation state (free or allocate) of the
416  *		specified block range in the persistent block allocation map.
417  *
418  *		the blocks will be updated in the persistent map one
419  *		dmap at a time.
420  *
421  * PARAMETERS:
422  *	ipbmap	- pointer to in-core inode for the block map.
423  *	free	- 'true' if block range is to be freed from the persistent
424  *		  map; 'false' if it is to be allocated.
425  *	blkno	- starting block number of the range.
426  *	nblocks	- number of contiguous blocks in the range.
427  *	tblk	- transaction block;
428  *
429  * RETURN VALUES:
430  *	0	- success
431  *	-EIO	- i/o error
432  */
433 int
dbUpdatePMap(struct inode * ipbmap,int free,s64 blkno,s64 nblocks,struct tblock * tblk)434 dbUpdatePMap(struct inode *ipbmap,
435 	     int free, s64 blkno, s64 nblocks, struct tblock * tblk)
436 {
437 	int nblks, dbitno, wbitno, rbits;
438 	int word, nbits, nwords;
439 	struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
440 	s64 lblkno, rem, lastlblkno;
441 	u32 mask;
442 	struct dmap *dp;
443 	struct metapage *mp;
444 	struct jfs_log *log;
445 	int lsn, difft, diffp;
446 	unsigned long flags;
447 
448 	/* the blocks better be within the mapsize. */
449 	if (blkno + nblocks > bmp->db_mapsize) {
450 		printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n",
451 		       (unsigned long long) blkno,
452 		       (unsigned long long) nblocks);
453 		jfs_error(ipbmap->i_sb, "blocks are outside the map\n");
454 		return -EIO;
455 	}
456 
457 	/* compute delta of transaction lsn from log syncpt */
458 	lsn = tblk->lsn;
459 	log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
460 	logdiff(difft, lsn, log);
461 
462 	/*
463 	 * update the block state a dmap at a time.
464 	 */
465 	mp = NULL;
466 	lastlblkno = 0;
467 	for (rem = nblocks; rem > 0; rem -= nblks, blkno += nblks) {
468 		/* get the buffer for the current dmap. */
469 		lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
470 		if (lblkno != lastlblkno) {
471 			if (mp) {
472 				write_metapage(mp);
473 			}
474 
475 			mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE,
476 					   0);
477 			if (mp == NULL)
478 				return -EIO;
479 			metapage_wait_for_io(mp);
480 		}
481 		dp = (struct dmap *) mp->data;
482 
483 		/* determine the bit number and word within the dmap of
484 		 * the starting block.  also determine how many blocks
485 		 * are to be updated within this dmap.
486 		 */
487 		dbitno = blkno & (BPERDMAP - 1);
488 		word = dbitno >> L2DBWORD;
489 		nblks = min(rem, (s64)BPERDMAP - dbitno);
490 
491 		/* update the bits of the dmap words. the first and last
492 		 * words may only have a subset of their bits updated. if
493 		 * this is the case, we'll work against that word (i.e.
494 		 * partial first and/or last) only in a single pass.  a
495 		 * single pass will also be used to update all words that
496 		 * are to have all their bits updated.
497 		 */
498 		for (rbits = nblks; rbits > 0;
499 		     rbits -= nbits, dbitno += nbits) {
500 			/* determine the bit number within the word and
501 			 * the number of bits within the word.
502 			 */
503 			wbitno = dbitno & (DBWORD - 1);
504 			nbits = min(rbits, DBWORD - wbitno);
505 
506 			/* check if only part of the word is to be updated. */
507 			if (nbits < DBWORD) {
508 				/* update (free or allocate) the bits
509 				 * in this word.
510 				 */
511 				mask =
512 				    (ONES << (DBWORD - nbits) >> wbitno);
513 				if (free)
514 					dp->pmap[word] &=
515 					    cpu_to_le32(~mask);
516 				else
517 					dp->pmap[word] |=
518 					    cpu_to_le32(mask);
519 
520 				word += 1;
521 			} else {
522 				/* one or more words are to have all
523 				 * their bits updated.  determine how
524 				 * many words and how many bits.
525 				 */
526 				nwords = rbits >> L2DBWORD;
527 				nbits = nwords << L2DBWORD;
528 
529 				/* update (free or allocate) the bits
530 				 * in these words.
531 				 */
532 				if (free)
533 					memset(&dp->pmap[word], 0,
534 					       nwords * 4);
535 				else
536 					memset(&dp->pmap[word], (int) ONES,
537 					       nwords * 4);
538 
539 				word += nwords;
540 			}
541 		}
542 
543 		/*
544 		 * update dmap lsn
545 		 */
546 		if (lblkno == lastlblkno)
547 			continue;
548 
549 		lastlblkno = lblkno;
550 
551 		LOGSYNC_LOCK(log, flags);
552 		if (mp->lsn != 0) {
553 			/* inherit older/smaller lsn */
554 			logdiff(diffp, mp->lsn, log);
555 			if (difft < diffp) {
556 				mp->lsn = lsn;
557 
558 				/* move bp after tblock in logsync list */
559 				list_move(&mp->synclist, &tblk->synclist);
560 			}
561 
562 			/* inherit younger/larger clsn */
563 			logdiff(difft, tblk->clsn, log);
564 			logdiff(diffp, mp->clsn, log);
565 			if (difft > diffp)
566 				mp->clsn = tblk->clsn;
567 		} else {
568 			mp->log = log;
569 			mp->lsn = lsn;
570 
571 			/* insert bp after tblock in logsync list */
572 			log->count++;
573 			list_add(&mp->synclist, &tblk->synclist);
574 
575 			mp->clsn = tblk->clsn;
576 		}
577 		LOGSYNC_UNLOCK(log, flags);
578 	}
579 
580 	/* write the last buffer. */
581 	if (mp) {
582 		write_metapage(mp);
583 	}
584 
585 	return (0);
586 }
587 
588 
589 /*
590  * NAME:	dbNextAG()
591  *
592  * FUNCTION:	find the preferred allocation group for new allocations.
593  *
594  *		Within the allocation groups, we maintain a preferred
595  *		allocation group which consists of a group with at least
596  *		average free space.  It is the preferred group that we target
597  *		new inode allocation towards.  The tie-in between inode
598  *		allocation and block allocation occurs as we allocate the
599  *		first (data) block of an inode and specify the inode (block)
600  *		as the allocation hint for this block.
601  *
602  *		We try to avoid having more than one open file growing in
603  *		an allocation group, as this will lead to fragmentation.
604  *		This differs from the old OS/2 method of trying to keep
605  *		empty ags around for large allocations.
606  *
607  * PARAMETERS:
608  *	ipbmap	- pointer to in-core inode for the block map.
609  *
610  * RETURN VALUES:
611  *	the preferred allocation group number.
612  */
dbNextAG(struct inode * ipbmap)613 int dbNextAG(struct inode *ipbmap)
614 {
615 	s64 avgfree;
616 	int agpref;
617 	s64 hwm = 0;
618 	int i;
619 	int next_best = -1;
620 	struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
621 
622 	BMAP_LOCK(bmp);
623 
624 	/* determine the average number of free blocks within the ags. */
625 	avgfree = (u32)bmp->db_nfree / bmp->db_numag;
626 
627 	/*
628 	 * if the current preferred ag does not have an active allocator
629 	 * and has at least average freespace, return it
630 	 */
631 	agpref = bmp->db_agpref;
632 	if ((atomic_read(&bmp->db_active[agpref]) == 0) &&
633 	    (bmp->db_agfree[agpref] >= avgfree))
634 		goto unlock;
635 
636 	/* From the last preferred ag, find the next one with at least
637 	 * average free space.
638 	 */
639 	for (i = 0 ; i < bmp->db_numag; i++, agpref++) {
640 		if (agpref >= bmp->db_numag)
641 			agpref = 0;
642 
643 		if (atomic_read(&bmp->db_active[agpref]))
644 			/* open file is currently growing in this ag */
645 			continue;
646 		if (bmp->db_agfree[agpref] >= avgfree) {
647 			/* Return this one */
648 			bmp->db_agpref = agpref;
649 			goto unlock;
650 		} else if (bmp->db_agfree[agpref] > hwm) {
651 			/* Less than avg. freespace, but best so far */
652 			hwm = bmp->db_agfree[agpref];
653 			next_best = agpref;
654 		}
655 	}
656 
657 	/*
658 	 * If no inactive ag was found with average freespace, use the
659 	 * next best
660 	 */
661 	if (next_best != -1)
662 		bmp->db_agpref = next_best;
663 	/* else leave db_agpref unchanged */
664 unlock:
665 	BMAP_UNLOCK(bmp);
666 
667 	/* return the preferred group.
668 	 */
669 	return (bmp->db_agpref);
670 }
671 
672 /*
673  * NAME:	dbAlloc()
674  *
675  * FUNCTION:	attempt to allocate a specified number of contiguous free
676  *		blocks from the working allocation block map.
677  *
678  *		the block allocation policy uses hints and a multi-step
679  *		approach.
680  *
681  *		for allocation requests smaller than the number of blocks
682  *		per dmap, we first try to allocate the new blocks
683  *		immediately following the hint.  if these blocks are not
684  *		available, we try to allocate blocks near the hint.  if
685  *		no blocks near the hint are available, we next try to
686  *		allocate within the same dmap as contains the hint.
687  *
688  *		if no blocks are available in the dmap or the allocation
689  *		request is larger than the dmap size, we try to allocate
690  *		within the same allocation group as contains the hint. if
691  *		this does not succeed, we finally try to allocate anywhere
692  *		within the aggregate.
693  *
694  *		we also try to allocate anywhere within the aggregate
695  *		for allocation requests larger than the allocation group
696  *		size or requests that specify no hint value.
697  *
698  * PARAMETERS:
699  *	ip	- pointer to in-core inode;
700  *	hint	- allocation hint.
701  *	nblocks	- number of contiguous blocks in the range.
702  *	results	- on successful return, set to the starting block number
703  *		  of the newly allocated contiguous range.
704  *
705  * RETURN VALUES:
706  *	0	- success
707  *	-ENOSPC	- insufficient disk resources
708  *	-EIO	- i/o error
709  */
dbAlloc(struct inode * ip,s64 hint,s64 nblocks,s64 * results)710 int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
711 {
712 	int rc, agno;
713 	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
714 	struct bmap *bmp;
715 	struct metapage *mp;
716 	s64 lblkno, blkno;
717 	struct dmap *dp;
718 	int l2nb;
719 	s64 mapSize;
720 	int writers;
721 
722 	/* assert that nblocks is valid */
723 	assert(nblocks > 0);
724 
725 	/* get the log2 number of blocks to be allocated.
726 	 * if the number of blocks is not a log2 multiple,
727 	 * it will be rounded up to the next log2 multiple.
728 	 */
729 	l2nb = BLKSTOL2(nblocks);
730 
731 	bmp = JFS_SBI(ip->i_sb)->bmap;
732 
733 	mapSize = bmp->db_mapsize;
734 
735 	/* the hint should be within the map */
736 	if (hint >= mapSize) {
737 		jfs_error(ip->i_sb, "the hint is outside the map\n");
738 		return -EIO;
739 	}
740 
741 	/* if the number of blocks to be allocated is greater than the
742 	 * allocation group size, try to allocate anywhere.
743 	 */
744 	if (l2nb > bmp->db_agl2size) {
745 		IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
746 
747 		rc = dbAllocAny(bmp, nblocks, l2nb, results);
748 
749 		goto write_unlock;
750 	}
751 
752 	/*
753 	 * If no hint, let dbNextAG recommend an allocation group
754 	 */
755 	if (hint == 0)
756 		goto pref_ag;
757 
758 	/* we would like to allocate close to the hint.  adjust the
759 	 * hint to the block following the hint since the allocators
760 	 * will start looking for free space starting at this point.
761 	 */
762 	blkno = hint + 1;
763 
764 	if (blkno >= bmp->db_mapsize)
765 		goto pref_ag;
766 
767 	agno = blkno >> bmp->db_agl2size;
768 
769 	/* check if blkno crosses over into a new allocation group.
770 	 * if so, check if we should allow allocations within this
771 	 * allocation group.
772 	 */
773 	if ((blkno & (bmp->db_agsize - 1)) == 0)
774 		/* check if the AG is currently being written to.
775 		 * if so, call dbNextAG() to find a non-busy
776 		 * AG with sufficient free space.
777 		 */
778 		if (atomic_read(&bmp->db_active[agno]))
779 			goto pref_ag;
780 
781 	/* check if the allocation request size can be satisfied from a
782 	 * single dmap.  if so, try to allocate from the dmap containing
783 	 * the hint using a tiered strategy.
784 	 */
785 	if (nblocks <= BPERDMAP) {
786 		IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
787 
788 		/* get the buffer for the dmap containing the hint.
789 		 */
790 		rc = -EIO;
791 		lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
792 		mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
793 		if (mp == NULL)
794 			goto read_unlock;
795 
796 		dp = (struct dmap *) mp->data;
797 
798 		/* first, try to satisfy the allocation request with the
799 		 * blocks beginning at the hint.
800 		 */
801 		if ((rc = dbAllocNext(bmp, dp, blkno, (int) nblocks))
802 		    != -ENOSPC) {
803 			if (rc == 0) {
804 				*results = blkno;
805 				mark_metapage_dirty(mp);
806 			}
807 
808 			release_metapage(mp);
809 			goto read_unlock;
810 		}
811 
812 		writers = atomic_read(&bmp->db_active[agno]);
813 		if ((writers > 1) ||
814 		    ((writers == 1) && (JFS_IP(ip)->active_ag != agno))) {
815 			/*
816 			 * Someone else is writing in this allocation
817 			 * group.  To avoid fragmenting, try another ag
818 			 */
819 			release_metapage(mp);
820 			IREAD_UNLOCK(ipbmap);
821 			goto pref_ag;
822 		}
823 
824 		/* next, try to satisfy the allocation request with blocks
825 		 * near the hint.
826 		 */
827 		if ((rc =
828 		     dbAllocNear(bmp, dp, blkno, (int) nblocks, l2nb, results))
829 		    != -ENOSPC) {
830 			if (rc == 0)
831 				mark_metapage_dirty(mp);
832 
833 			release_metapage(mp);
834 			goto read_unlock;
835 		}
836 
837 		/* try to satisfy the allocation request with blocks within
838 		 * the same dmap as the hint.
839 		 */
840 		if ((rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results))
841 		    != -ENOSPC) {
842 			if (rc == 0)
843 				mark_metapage_dirty(mp);
844 
845 			release_metapage(mp);
846 			goto read_unlock;
847 		}
848 
849 		release_metapage(mp);
850 		IREAD_UNLOCK(ipbmap);
851 	}
852 
853 	/* try to satisfy the allocation request with blocks within
854 	 * the same allocation group as the hint.
855 	 */
856 	IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
857 	if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC)
858 		goto write_unlock;
859 
860 	IWRITE_UNLOCK(ipbmap);
861 
862 
863       pref_ag:
864 	/*
865 	 * Let dbNextAG recommend a preferred allocation group
866 	 */
867 	agno = dbNextAG(ipbmap);
868 	IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
869 
870 	/* Try to allocate within this allocation group.  if that fails, try to
871 	 * allocate anywhere in the map.
872 	 */
873 	if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) == -ENOSPC)
874 		rc = dbAllocAny(bmp, nblocks, l2nb, results);
875 
876       write_unlock:
877 	IWRITE_UNLOCK(ipbmap);
878 
879 	return (rc);
880 
881       read_unlock:
882 	IREAD_UNLOCK(ipbmap);
883 
884 	return (rc);
885 }
886 
887 /*
888  * NAME:	dbReAlloc()
889  *
890  * FUNCTION:	attempt to extend a current allocation by a specified
891  *		number of blocks.
892  *
893  *		this routine attempts to satisfy the allocation request
894  *		by first trying to extend the existing allocation in
895  *		place by allocating the additional blocks as the blocks
896  *		immediately following the current allocation.  if these
897  *		blocks are not available, this routine will attempt to
898  *		allocate a new set of contiguous blocks large enough
899  *		to cover the existing allocation plus the additional
900  *		number of blocks required.
901  *
902  * PARAMETERS:
903  *	ip	    -  pointer to in-core inode requiring allocation.
904  *	blkno	    -  starting block of the current allocation.
905  *	nblocks	    -  number of contiguous blocks within the current
906  *		       allocation.
907  *	addnblocks  -  number of blocks to add to the allocation.
908  *	results	-      on successful return, set to the starting block number
909  *		       of the existing allocation if the existing allocation
910  *		       was extended in place or to a newly allocated contiguous
911  *		       range if the existing allocation could not be extended
912  *		       in place.
913  *
914  * RETURN VALUES:
915  *	0	- success
916  *	-ENOSPC	- insufficient disk resources
917  *	-EIO	- i/o error
918  */
919 int
dbReAlloc(struct inode * ip,s64 blkno,s64 nblocks,s64 addnblocks,s64 * results)920 dbReAlloc(struct inode *ip,
921 	  s64 blkno, s64 nblocks, s64 addnblocks, s64 * results)
922 {
923 	int rc;
924 
925 	/* try to extend the allocation in place.
926 	 */
927 	if ((rc = dbExtend(ip, blkno, nblocks, addnblocks)) == 0) {
928 		*results = blkno;
929 		return (0);
930 	} else {
931 		if (rc != -ENOSPC)
932 			return (rc);
933 	}
934 
935 	/* could not extend the allocation in place, so allocate a
936 	 * new set of blocks for the entire request (i.e. try to get
937 	 * a range of contiguous blocks large enough to cover the
938 	 * existing allocation plus the additional blocks.)
939 	 */
940 	return (dbAlloc
941 		(ip, blkno + nblocks - 1, addnblocks + nblocks, results));
942 }
943 
944 
945 /*
946  * NAME:	dbExtend()
947  *
948  * FUNCTION:	attempt to extend a current allocation by a specified
949  *		number of blocks.
950  *
951  *		this routine attempts to satisfy the allocation request
952  *		by first trying to extend the existing allocation in
953  *		place by allocating the additional blocks as the blocks
954  *		immediately following the current allocation.
955  *
956  * PARAMETERS:
957  *	ip	    -  pointer to in-core inode requiring allocation.
958  *	blkno	    -  starting block of the current allocation.
959  *	nblocks	    -  number of contiguous blocks within the current
960  *		       allocation.
961  *	addnblocks  -  number of blocks to add to the allocation.
962  *
963  * RETURN VALUES:
964  *	0	- success
965  *	-ENOSPC	- insufficient disk resources
966  *	-EIO	- i/o error
967  */
dbExtend(struct inode * ip,s64 blkno,s64 nblocks,s64 addnblocks)968 static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
969 {
970 	struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
971 	s64 lblkno, lastblkno, extblkno;
972 	uint rel_block;
973 	struct metapage *mp;
974 	struct dmap *dp;
975 	int rc;
976 	struct inode *ipbmap = sbi->ipbmap;
977 	struct bmap *bmp;
978 
979 	/*
980 	 * We don't want a non-aligned extent to cross a page boundary
981 	 */
982 	if (((rel_block = blkno & (sbi->nbperpage - 1))) &&
983 	    (rel_block + nblocks + addnblocks > sbi->nbperpage))
984 		return -ENOSPC;
985 
986 	/* get the last block of the current allocation */
987 	lastblkno = blkno + nblocks - 1;
988 
989 	/* determine the block number of the block following
990 	 * the existing allocation.
991 	 */
992 	extblkno = lastblkno + 1;
993 
994 	IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
995 
996 	/* better be within the file system */
997 	bmp = sbi->bmap;
998 	if (lastblkno < 0 || lastblkno >= bmp->db_mapsize) {
999 		IREAD_UNLOCK(ipbmap);
1000 		jfs_error(ip->i_sb, "the block is outside the filesystem\n");
1001 		return -EIO;
1002 	}
1003 
1004 	/* we'll attempt to extend the current allocation in place by
1005 	 * allocating the additional blocks as the blocks immediately
1006 	 * following the current allocation.  we only try to extend the
1007 	 * current allocation in place if the number of additional blocks
1008 	 * can fit into a dmap, the last block of the current allocation
1009 	 * is not the last block of the file system, and the start of the
1010 	 * inplace extension is not on an allocation group boundary.
1011 	 */
1012 	if (addnblocks > BPERDMAP || extblkno >= bmp->db_mapsize ||
1013 	    (extblkno & (bmp->db_agsize - 1)) == 0) {
1014 		IREAD_UNLOCK(ipbmap);
1015 		return -ENOSPC;
1016 	}
1017 
1018 	/* get the buffer for the dmap containing the first block
1019 	 * of the extension.
1020 	 */
1021 	lblkno = BLKTODMAP(extblkno, bmp->db_l2nbperpage);
1022 	mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
1023 	if (mp == NULL) {
1024 		IREAD_UNLOCK(ipbmap);
1025 		return -EIO;
1026 	}
1027 
1028 	dp = (struct dmap *) mp->data;
1029 
1030 	/* try to allocate the blocks immediately following the
1031 	 * current allocation.
1032 	 */
1033 	rc = dbAllocNext(bmp, dp, extblkno, (int) addnblocks);
1034 
1035 	IREAD_UNLOCK(ipbmap);
1036 
1037 	/* were we successful ? */
1038 	if (rc == 0)
1039 		write_metapage(mp);
1040 	else
1041 		/* we were not successful */
1042 		release_metapage(mp);
1043 
1044 	return (rc);
1045 }
1046 
1047 
1048 /*
1049  * NAME:	dbAllocNext()
1050  *
1051  * FUNCTION:	attempt to allocate the blocks of the specified block
1052  *		range within a dmap.
1053  *
1054  * PARAMETERS:
1055  *	bmp	-  pointer to bmap descriptor
1056  *	dp	-  pointer to dmap.
1057  *	blkno	-  starting block number of the range.
1058  *	nblocks	-  number of contiguous free blocks of the range.
1059  *
1060  * RETURN VALUES:
1061  *	0	- success
1062  *	-ENOSPC	- insufficient disk resources
1063  *	-EIO	- i/o error
1064  *
1065  * serialization: IREAD_LOCK(ipbmap) held on entry/exit;
1066  */
dbAllocNext(struct bmap * bmp,struct dmap * dp,s64 blkno,int nblocks)1067 static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
1068 		       int nblocks)
1069 {
1070 	int dbitno, word, rembits, nb, nwords, wbitno, nw;
1071 	int l2size;
1072 	s8 *leaf;
1073 	u32 mask;
1074 
1075 	if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) {
1076 		jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmap page\n");
1077 		return -EIO;
1078 	}
1079 
1080 	/* pick up a pointer to the leaves of the dmap tree.
1081 	 */
1082 	leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx);
1083 
1084 	/* determine the bit number and word within the dmap of the
1085 	 * starting block.
1086 	 */
1087 	dbitno = blkno & (BPERDMAP - 1);
1088 	word = dbitno >> L2DBWORD;
1089 
1090 	/* check if the specified block range is contained within
1091 	 * this dmap.
1092 	 */
1093 	if (dbitno + nblocks > BPERDMAP)
1094 		return -ENOSPC;
1095 
1096 	/* check if the starting leaf indicates that anything
1097 	 * is free.
1098 	 */
1099 	if (leaf[word] == NOFREE)
1100 		return -ENOSPC;
1101 
1102 	/* check the dmaps words corresponding to block range to see
1103 	 * if the block range is free.  not all bits of the first and
1104 	 * last words may be contained within the block range.  if this
1105 	 * is the case, we'll work against those words (i.e. partial first
1106 	 * and/or last) on an individual basis (a single pass) and examine
1107 	 * the actual bits to determine if they are free.  a single pass
1108 	 * will be used for all dmap words fully contained within the
1109 	 * specified range.  within this pass, the leaves of the dmap
1110 	 * tree will be examined to determine if the blocks are free. a
1111 	 * single leaf may describe the free space of multiple dmap
1112 	 * words, so we may visit only a subset of the actual leaves
1113 	 * corresponding to the dmap words of the block range.
1114 	 */
1115 	for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
1116 		/* determine the bit number within the word and
1117 		 * the number of bits within the word.
1118 		 */
1119 		wbitno = dbitno & (DBWORD - 1);
1120 		nb = min(rembits, DBWORD - wbitno);
1121 
1122 		/* check if only part of the word is to be examined.
1123 		 */
1124 		if (nb < DBWORD) {
1125 			/* check if the bits are free.
1126 			 */
1127 			mask = (ONES << (DBWORD - nb) >> wbitno);
1128 			if ((mask & ~le32_to_cpu(dp->wmap[word])) != mask)
1129 				return -ENOSPC;
1130 
1131 			word += 1;
1132 		} else {
1133 			/* one or more dmap words are fully contained
1134 			 * within the block range.  determine how many
1135 			 * words and how many bits.
1136 			 */
1137 			nwords = rembits >> L2DBWORD;
1138 			nb = nwords << L2DBWORD;
1139 
1140 			/* now examine the appropriate leaves to determine
1141 			 * if the blocks are free.
1142 			 */
1143 			while (nwords > 0) {
1144 				/* does the leaf describe any free space ?
1145 				 */
1146 				if (leaf[word] < BUDMIN)
1147 					return -ENOSPC;
1148 
1149 				/* determine the l2 number of bits provided
1150 				 * by this leaf.
1151 				 */
1152 				l2size =
1153 				    min_t(int, leaf[word], NLSTOL2BSZ(nwords));
1154 
1155 				/* determine how many words were handled.
1156 				 */
1157 				nw = BUDSIZE(l2size, BUDMIN);
1158 
1159 				nwords -= nw;
1160 				word += nw;
1161 			}
1162 		}
1163 	}
1164 
1165 	/* allocate the blocks.
1166 	 */
1167 	return (dbAllocDmap(bmp, dp, blkno, nblocks));
1168 }
1169 
1170 
1171 /*
1172  * NAME:	dbAllocNear()
1173  *
1174  * FUNCTION:	attempt to allocate a number of contiguous free blocks near
1175  *		a specified block (hint) within a dmap.
1176  *
1177  *		starting with the dmap leaf that covers the hint, we'll
1178  *		check the next four contiguous leaves for sufficient free
1179  *		space.  if sufficient free space is found, we'll allocate
1180  *		the desired free space.
1181  *
1182  * PARAMETERS:
1183  *	bmp	-  pointer to bmap descriptor
1184  *	dp	-  pointer to dmap.
1185  *	blkno	-  block number to allocate near.
1186  *	nblocks	-  actual number of contiguous free blocks desired.
1187  *	l2nb	-  log2 number of contiguous free blocks desired.
1188  *	results	-  on successful return, set to the starting block number
1189  *		   of the newly allocated range.
1190  *
1191  * RETURN VALUES:
1192  *	0	- success
1193  *	-ENOSPC	- insufficient disk resources
1194  *	-EIO	- i/o error
1195  *
1196  * serialization: IREAD_LOCK(ipbmap) held on entry/exit;
1197  */
1198 static int
dbAllocNear(struct bmap * bmp,struct dmap * dp,s64 blkno,int nblocks,int l2nb,s64 * results)1199 dbAllocNear(struct bmap * bmp,
1200 	    struct dmap * dp, s64 blkno, int nblocks, int l2nb, s64 * results)
1201 {
1202 	int word, lword, rc;
1203 	s8 *leaf;
1204 
1205 	if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) {
1206 		jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmap page\n");
1207 		return -EIO;
1208 	}
1209 
1210 	leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx);
1211 
1212 	/* determine the word within the dmap that holds the hint
1213 	 * (i.e. blkno).  also, determine the last word in the dmap
1214 	 * that we'll include in our examination.
1215 	 */
1216 	word = (blkno & (BPERDMAP - 1)) >> L2DBWORD;
1217 	lword = min(word + 4, LPERDMAP);
1218 
1219 	/* examine the leaves for sufficient free space.
1220 	 */
1221 	for (; word < lword; word++) {
1222 		/* does the leaf describe sufficient free space ?
1223 		 */
1224 		if (leaf[word] < l2nb)
1225 			continue;
1226 
1227 		/* determine the block number within the file system
1228 		 * of the first block described by this dmap word.
1229 		 */
1230 		blkno = le64_to_cpu(dp->start) + (word << L2DBWORD);
1231 
1232 		/* if not all bits of the dmap word are free, get the
1233 		 * starting bit number within the dmap word of the required
1234 		 * string of free bits and adjust the block number with the
1235 		 * value.
1236 		 */
1237 		if (leaf[word] < BUDMIN)
1238 			blkno +=
1239 			    dbFindBits(le32_to_cpu(dp->wmap[word]), l2nb);
1240 
1241 		/* allocate the blocks.
1242 		 */
1243 		if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0)
1244 			*results = blkno;
1245 
1246 		return (rc);
1247 	}
1248 
1249 	return -ENOSPC;
1250 }
1251 
1252 
1253 /*
1254  * NAME:	dbAllocAG()
1255  *
1256  * FUNCTION:	attempt to allocate the specified number of contiguous
1257  *		free blocks within the specified allocation group.
1258  *
1259  *		unless the allocation group size is equal to the number
1260  *		of blocks per dmap, the dmap control pages will be used to
1261  *		find the required free space, if available.  we start the
1262  *		search at the highest dmap control page level which
1263  *		distinctly describes the allocation group's free space
1264  *		(i.e. the highest level at which the allocation group's
1265  *		free space is not mixed in with that of any other group).
1266  *		in addition, we start the search within this level at a
1267  *		height of the dmapctl dmtree at which the nodes distinctly
1268  *		describe the allocation group's free space.  at this height,
1269  *		the allocation group's free space may be represented by 1
1270  *		or two sub-trees, depending on the allocation group size.
1271  *		we search the top nodes of these subtrees left to right for
1272  *		sufficient free space.  if sufficient free space is found,
1273  *		the subtree is searched to find the leftmost leaf that
1274  *		has free space.  once we have made it to the leaf, we
1275  *		move the search to the next lower level dmap control page
1276  *		corresponding to this leaf.  we continue down the dmap control
1277  *		pages until we find the dmap that contains or starts the
1278  *		sufficient free space and we allocate at this dmap.
1279  *
1280  *		if the allocation group size is equal to the dmap size,
1281  *		we'll start at the dmap corresponding to the allocation
1282  *		group and attempt the allocation at this level.
1283  *
1284  *		the dmap control page search is also not performed if the
1285  *		allocation group is completely free and we go to the first
1286  *		dmap of the allocation group to do the allocation.  this is
1287  *		done because the allocation group may be part (not the first
1288  *		part) of a larger binary buddy system, causing the dmap
1289  *		control pages to indicate no free space (NOFREE) within
1290  *		the allocation group.
1291  *
1292  * PARAMETERS:
1293  *	bmp	-  pointer to bmap descriptor
1294  *	agno	- allocation group number.
1295  *	nblocks	-  actual number of contiguous free blocks desired.
1296  *	l2nb	-  log2 number of contiguous free blocks desired.
1297  *	results	-  on successful return, set to the starting block number
1298  *		   of the newly allocated range.
1299  *
1300  * RETURN VALUES:
1301  *	0	- success
1302  *	-ENOSPC	- insufficient disk resources
1303  *	-EIO	- i/o error
1304  *
1305  * note: IWRITE_LOCK(ipmap) held on entry/exit;
1306  */
1307 static int
dbAllocAG(struct bmap * bmp,int agno,s64 nblocks,int l2nb,s64 * results)1308 dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
1309 {
1310 	struct metapage *mp;
1311 	struct dmapctl *dcp;
1312 	int rc, ti, i, k, m, n, agperlev;
1313 	s64 blkno, lblkno;
1314 	int budmin;
1315 
1316 	/* allocation request should not be for more than the
1317 	 * allocation group size.
1318 	 */
1319 	if (l2nb > bmp->db_agl2size) {
1320 		jfs_error(bmp->db_ipbmap->i_sb,
1321 			  "allocation request is larger than the allocation group size\n");
1322 		return -EIO;
1323 	}
1324 
1325 	/* determine the starting block number of the allocation
1326 	 * group.
1327 	 */
1328 	blkno = (s64) agno << bmp->db_agl2size;
1329 
1330 	/* check if the allocation group size is the minimum allocation
1331 	 * group size or if the allocation group is completely free. if
1332 	 * the allocation group size is the minimum size of BPERDMAP (i.e.
1333 	 * 1 dmap), there is no need to search the dmap control page (below)
1334 	 * that fully describes the allocation group since the allocation
1335 	 * group is already fully described by a dmap.  in this case, we
1336 	 * just call dbAllocCtl() to search the dmap tree and allocate the
1337 	 * required space if available.
1338 	 *
1339 	 * if the allocation group is completely free, dbAllocCtl() is
1340 	 * also called to allocate the required space.  this is done for
1341 	 * two reasons.  first, it makes no sense searching the dmap control
1342 	 * pages for free space when we know that free space exists.  second,
1343 	 * the dmap control pages may indicate that the allocation group
1344 	 * has no free space if the allocation group is part (not the first
1345 	 * part) of a larger binary buddy system.
1346 	 */
1347 	if (bmp->db_agsize == BPERDMAP
1348 	    || bmp->db_agfree[agno] == bmp->db_agsize) {
1349 		rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
1350 		if ((rc == -ENOSPC) &&
1351 		    (bmp->db_agfree[agno] == bmp->db_agsize)) {
1352 			printk(KERN_ERR "blkno = %Lx, blocks = %Lx\n",
1353 			       (unsigned long long) blkno,
1354 			       (unsigned long long) nblocks);
1355 			jfs_error(bmp->db_ipbmap->i_sb,
1356 				  "dbAllocCtl failed in free AG\n");
1357 		}
1358 		return (rc);
1359 	}
1360 
1361 	/* the buffer for the dmap control page that fully describes the
1362 	 * allocation group.
1363 	 */
1364 	lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, bmp->db_aglevel);
1365 	mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
1366 	if (mp == NULL)
1367 		return -EIO;
1368 	dcp = (struct dmapctl *) mp->data;
1369 	budmin = dcp->budmin;
1370 
1371 	if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
1372 		jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmapctl page\n");
1373 		release_metapage(mp);
1374 		return -EIO;
1375 	}
1376 
1377 	/* search the subtree(s) of the dmap control page that describes
1378 	 * the allocation group, looking for sufficient free space.  to begin,
1379 	 * determine how many allocation groups are represented in a dmap
1380 	 * control page at the control page level (i.e. L0, L1, L2) that
1381 	 * fully describes an allocation group. next, determine the starting
1382 	 * tree index of this allocation group within the control page.
1383 	 */
1384 	agperlev =
1385 	    (1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth;
1386 	ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1));
1387 
1388 	/* dmap control page trees fan-out by 4 and a single allocation
1389 	 * group may be described by 1 or 2 subtrees within the ag level
1390 	 * dmap control page, depending upon the ag size. examine the ag's
1391 	 * subtrees for sufficient free space, starting with the leftmost
1392 	 * subtree.
1393 	 */
1394 	for (i = 0; i < bmp->db_agwidth; i++, ti++) {
1395 		/* is there sufficient free space ?
1396 		 */
1397 		if (l2nb > dcp->stree[ti])
1398 			continue;
1399 
1400 		/* sufficient free space found in a subtree. now search down
1401 		 * the subtree to find the leftmost leaf that describes this
1402 		 * free space.
1403 		 */
1404 		for (k = bmp->db_agheight; k > 0; k--) {
1405 			for (n = 0, m = (ti << 2) + 1; n < 4; n++) {
1406 				if (l2nb <= dcp->stree[m + n]) {
1407 					ti = m + n;
1408 					break;
1409 				}
1410 			}
1411 			if (n == 4) {
1412 				jfs_error(bmp->db_ipbmap->i_sb,
1413 					  "failed descending stree\n");
1414 				release_metapage(mp);
1415 				return -EIO;
1416 			}
1417 		}
1418 
1419 		/* determine the block number within the file system
1420 		 * that corresponds to this leaf.
1421 		 */
1422 		if (bmp->db_aglevel == 2)
1423 			blkno = 0;
1424 		else if (bmp->db_aglevel == 1)
1425 			blkno &= ~(MAXL1SIZE - 1);
1426 		else		/* bmp->db_aglevel == 0 */
1427 			blkno &= ~(MAXL0SIZE - 1);
1428 
1429 		blkno +=
1430 		    ((s64) (ti - le32_to_cpu(dcp->leafidx))) << budmin;
1431 
1432 		/* release the buffer in preparation for going down
1433 		 * the next level of dmap control pages.
1434 		 */
1435 		release_metapage(mp);
1436 
1437 		/* check if we need to continue to search down the lower
1438 		 * level dmap control pages.  we need to if the number of
1439 		 * blocks required is less than maximum number of blocks
1440 		 * described at the next lower level.
1441 		 */
1442 		if (l2nb < budmin) {
1443 
1444 			/* search the lower level dmap control pages to get
1445 			 * the starting block number of the dmap that
1446 			 * contains or starts off the free space.
1447 			 */
1448 			if ((rc =
1449 			     dbFindCtl(bmp, l2nb, bmp->db_aglevel - 1,
1450 				       &blkno))) {
1451 				if (rc == -ENOSPC) {
1452 					jfs_error(bmp->db_ipbmap->i_sb,
1453 						  "control page inconsistent\n");
1454 					return -EIO;
1455 				}
1456 				return (rc);
1457 			}
1458 		}
1459 
1460 		/* allocate the blocks.
1461 		 */
1462 		rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
1463 		if (rc == -ENOSPC) {
1464 			jfs_error(bmp->db_ipbmap->i_sb,
1465 				  "unable to allocate blocks\n");
1466 			rc = -EIO;
1467 		}
1468 		return (rc);
1469 	}
1470 
1471 	/* no space in the allocation group.  release the buffer and
1472 	 * return -ENOSPC.
1473 	 */
1474 	release_metapage(mp);
1475 
1476 	return -ENOSPC;
1477 }
1478 
1479 
1480 /*
1481  * NAME:	dbAllocAny()
1482  *
1483  * FUNCTION:	attempt to allocate the specified number of contiguous
1484  *		free blocks anywhere in the file system.
1485  *
1486  *		dbAllocAny() attempts to find the sufficient free space by
1487  *		searching down the dmap control pages, starting with the
1488  *		highest level (i.e. L0, L1, L2) control page.  if free space
1489  *		large enough to satisfy the desired free space is found, the
1490  *		desired free space is allocated.
1491  *
1492  * PARAMETERS:
1493  *	bmp	-  pointer to bmap descriptor
1494  *	nblocks	 -  actual number of contiguous free blocks desired.
1495  *	l2nb	 -  log2 number of contiguous free blocks desired.
1496  *	results	-  on successful return, set to the starting block number
1497  *		   of the newly allocated range.
1498  *
1499  * RETURN VALUES:
1500  *	0	- success
1501  *	-ENOSPC	- insufficient disk resources
1502  *	-EIO	- i/o error
1503  *
1504  * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
1505  */
dbAllocAny(struct bmap * bmp,s64 nblocks,int l2nb,s64 * results)1506 static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results)
1507 {
1508 	int rc;
1509 	s64 blkno = 0;
1510 
1511 	/* starting with the top level dmap control page, search
1512 	 * down the dmap control levels for sufficient free space.
1513 	 * if free space is found, dbFindCtl() returns the starting
1514 	 * block number of the dmap that contains or starts off the
1515 	 * range of free space.
1516 	 */
1517 	if ((rc = dbFindCtl(bmp, l2nb, bmp->db_maxlevel, &blkno)))
1518 		return (rc);
1519 
1520 	/* allocate the blocks.
1521 	 */
1522 	rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
1523 	if (rc == -ENOSPC) {
1524 		jfs_error(bmp->db_ipbmap->i_sb, "unable to allocate blocks\n");
1525 		return -EIO;
1526 	}
1527 	return (rc);
1528 }
1529 
1530 
1531 /*
1532  * NAME:	dbDiscardAG()
1533  *
1534  * FUNCTION:	attempt to discard (TRIM) all free blocks of specific AG
1535  *
1536  *		algorithm:
1537  *		1) allocate blocks, as large as possible and save them
1538  *		   while holding IWRITE_LOCK on ipbmap
1539  *		2) trim all these saved block/length values
1540  *		3) mark the blocks free again
1541  *
1542  *		benefit:
1543  *		- we work only on one ag at some time, minimizing how long we
1544  *		  need to lock ipbmap
1545  *		- reading / writing the fs is possible most time, even on
1546  *		  trimming
1547  *
1548  *		downside:
1549  *		- we write two times to the dmapctl and dmap pages
1550  *		- but for me, this seems the best way, better ideas?
1551  *		/TR 2012
1552  *
1553  * PARAMETERS:
1554  *	ip	- pointer to in-core inode
1555  *	agno	- ag to trim
1556  *	minlen	- minimum value of contiguous blocks
1557  *
1558  * RETURN VALUES:
1559  *	s64	- actual number of blocks trimmed
1560  */
dbDiscardAG(struct inode * ip,int agno,s64 minlen)1561 s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen)
1562 {
1563 	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
1564 	struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
1565 	s64 nblocks, blkno;
1566 	u64 trimmed = 0;
1567 	int rc, l2nb;
1568 	struct super_block *sb = ipbmap->i_sb;
1569 
1570 	struct range2trim {
1571 		u64 blkno;
1572 		u64 nblocks;
1573 	} *totrim, *tt;
1574 
1575 	/* max blkno / nblocks pairs to trim */
1576 	int count = 0, range_cnt;
1577 	u64 max_ranges;
1578 
1579 	/* prevent others from writing new stuff here, while trimming */
1580 	IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
1581 
1582 	nblocks = bmp->db_agfree[agno];
1583 	max_ranges = nblocks;
1584 	do_div(max_ranges, minlen);
1585 	range_cnt = min_t(u64, max_ranges + 1, 32 * 1024);
1586 	totrim = kmalloc_array(range_cnt, sizeof(struct range2trim), GFP_NOFS);
1587 	if (totrim == NULL) {
1588 		jfs_error(bmp->db_ipbmap->i_sb, "no memory for trim array\n");
1589 		IWRITE_UNLOCK(ipbmap);
1590 		return 0;
1591 	}
1592 
1593 	tt = totrim;
1594 	while (nblocks >= minlen) {
1595 		l2nb = BLKSTOL2(nblocks);
1596 
1597 		/* 0 = okay, -EIO = fatal, -ENOSPC -> try smaller block */
1598 		rc = dbAllocAG(bmp, agno, nblocks, l2nb, &blkno);
1599 		if (rc == 0) {
1600 			tt->blkno = blkno;
1601 			tt->nblocks = nblocks;
1602 			tt++; count++;
1603 
1604 			/* the whole ag is free, trim now */
1605 			if (bmp->db_agfree[agno] == 0)
1606 				break;
1607 
1608 			/* give a hint for the next while */
1609 			nblocks = bmp->db_agfree[agno];
1610 			continue;
1611 		} else if (rc == -ENOSPC) {
1612 			/* search for next smaller log2 block */
1613 			l2nb = BLKSTOL2(nblocks) - 1;
1614 			if (unlikely(l2nb < 0))
1615 				break;
1616 			nblocks = 1LL << l2nb;
1617 		} else {
1618 			/* Trim any already allocated blocks */
1619 			jfs_error(bmp->db_ipbmap->i_sb, "-EIO\n");
1620 			break;
1621 		}
1622 
1623 		/* check, if our trim array is full */
1624 		if (unlikely(count >= range_cnt - 1))
1625 			break;
1626 	}
1627 	IWRITE_UNLOCK(ipbmap);
1628 
1629 	tt->nblocks = 0; /* mark the current end */
1630 	for (tt = totrim; tt->nblocks != 0; tt++) {
1631 		/* when mounted with online discard, dbFree() will
1632 		 * call jfs_issue_discard() itself */
1633 		if (!(JFS_SBI(sb)->flag & JFS_DISCARD))
1634 			jfs_issue_discard(ip, tt->blkno, tt->nblocks);
1635 		dbFree(ip, tt->blkno, tt->nblocks);
1636 		trimmed += tt->nblocks;
1637 	}
1638 	kfree(totrim);
1639 
1640 	return trimmed;
1641 }
1642 
1643 /*
1644  * NAME:	dbFindCtl()
1645  *
1646  * FUNCTION:	starting at a specified dmap control page level and block
1647  *		number, search down the dmap control levels for a range of
1648  *		contiguous free blocks large enough to satisfy an allocation
1649  *		request for the specified number of free blocks.
1650  *
1651  *		if sufficient contiguous free blocks are found, this routine
1652  *		returns the starting block number within a dmap page that
1653  *		contains or starts a range of contiqious free blocks that
1654  *		is sufficient in size.
1655  *
1656  * PARAMETERS:
1657  *	bmp	-  pointer to bmap descriptor
1658  *	level	-  starting dmap control page level.
1659  *	l2nb	-  log2 number of contiguous free blocks desired.
1660  *	*blkno	-  on entry, starting block number for conducting the search.
1661  *		   on successful return, the first block within a dmap page
1662  *		   that contains or starts a range of contiguous free blocks.
1663  *
1664  * RETURN VALUES:
1665  *	0	- success
1666  *	-ENOSPC	- insufficient disk resources
1667  *	-EIO	- i/o error
1668  *
1669  * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
1670  */
dbFindCtl(struct bmap * bmp,int l2nb,int level,s64 * blkno)1671 static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
1672 {
1673 	int rc, leafidx, lev;
1674 	s64 b, lblkno;
1675 	struct dmapctl *dcp;
1676 	int budmin;
1677 	struct metapage *mp;
1678 
1679 	/* starting at the specified dmap control page level and block
1680 	 * number, search down the dmap control levels for the starting
1681 	 * block number of a dmap page that contains or starts off
1682 	 * sufficient free blocks.
1683 	 */
1684 	for (lev = level, b = *blkno; lev >= 0; lev--) {
1685 		/* get the buffer of the dmap control page for the block
1686 		 * number and level (i.e. L0, L1, L2).
1687 		 */
1688 		lblkno = BLKTOCTL(b, bmp->db_l2nbperpage, lev);
1689 		mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
1690 		if (mp == NULL)
1691 			return -EIO;
1692 		dcp = (struct dmapctl *) mp->data;
1693 		budmin = dcp->budmin;
1694 
1695 		if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
1696 			jfs_error(bmp->db_ipbmap->i_sb,
1697 				  "Corrupt dmapctl page\n");
1698 			release_metapage(mp);
1699 			return -EIO;
1700 		}
1701 
1702 		/* search the tree within the dmap control page for
1703 		 * sufficient free space.  if sufficient free space is found,
1704 		 * dbFindLeaf() returns the index of the leaf at which
1705 		 * free space was found.
1706 		 */
1707 		rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx, true);
1708 
1709 		/* release the buffer.
1710 		 */
1711 		release_metapage(mp);
1712 
1713 		/* space found ?
1714 		 */
1715 		if (rc) {
1716 			if (lev != level) {
1717 				jfs_error(bmp->db_ipbmap->i_sb,
1718 					  "dmap inconsistent\n");
1719 				return -EIO;
1720 			}
1721 			return -ENOSPC;
1722 		}
1723 
1724 		/* adjust the block number to reflect the location within
1725 		 * the dmap control page (i.e. the leaf) at which free
1726 		 * space was found.
1727 		 */
1728 		b += (((s64) leafidx) << budmin);
1729 
1730 		/* we stop the search at this dmap control page level if
1731 		 * the number of blocks required is greater than or equal
1732 		 * to the maximum number of blocks described at the next
1733 		 * (lower) level.
1734 		 */
1735 		if (l2nb >= budmin)
1736 			break;
1737 	}
1738 
1739 	*blkno = b;
1740 	return (0);
1741 }
1742 
1743 
1744 /*
1745  * NAME:	dbAllocCtl()
1746  *
1747  * FUNCTION:	attempt to allocate a specified number of contiguous
1748  *		blocks starting within a specific dmap.
1749  *
1750  *		this routine is called by higher level routines that search
1751  *		the dmap control pages above the actual dmaps for contiguous
1752  *		free space.  the result of successful searches by these
1753  *		routines are the starting block numbers within dmaps, with
1754  *		the dmaps themselves containing the desired contiguous free
1755  *		space or starting a contiguous free space of desired size
1756  *		that is made up of the blocks of one or more dmaps. these
1757  *		calls should not fail due to insufficent resources.
1758  *
1759  *		this routine is called in some cases where it is not known
1760  *		whether it will fail due to insufficient resources.  more
1761  *		specifically, this occurs when allocating from an allocation
1762  *		group whose size is equal to the number of blocks per dmap.
1763  *		in this case, the dmap control pages are not examined prior
1764  *		to calling this routine (to save pathlength) and the call
1765  *		might fail.
1766  *
1767  *		for a request size that fits within a dmap, this routine relies
1768  *		upon the dmap's dmtree to find the requested contiguous free
1769  *		space.  for request sizes that are larger than a dmap, the
1770  *		requested free space will start at the first block of the
1771  *		first dmap (i.e. blkno).
1772  *
1773  * PARAMETERS:
1774  *	bmp	-  pointer to bmap descriptor
1775  *	nblocks	 -  actual number of contiguous free blocks to allocate.
1776  *	l2nb	 -  log2 number of contiguous free blocks to allocate.
1777  *	blkno	 -  starting block number of the dmap to start the allocation
1778  *		    from.
1779  *	results	-  on successful return, set to the starting block number
1780  *		   of the newly allocated range.
1781  *
1782  * RETURN VALUES:
1783  *	0	- success
1784  *	-ENOSPC	- insufficient disk resources
1785  *	-EIO	- i/o error
1786  *
1787  * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
1788  */
1789 static int
dbAllocCtl(struct bmap * bmp,s64 nblocks,int l2nb,s64 blkno,s64 * results)1790 dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
1791 {
1792 	int rc, nb;
1793 	s64 b, lblkno, n;
1794 	struct metapage *mp;
1795 	struct dmap *dp;
1796 
1797 	/* check if the allocation request is confined to a single dmap.
1798 	 */
1799 	if (l2nb <= L2BPERDMAP) {
1800 		/* get the buffer for the dmap.
1801 		 */
1802 		lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
1803 		mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
1804 		if (mp == NULL)
1805 			return -EIO;
1806 		dp = (struct dmap *) mp->data;
1807 
1808 		if (dp->tree.budmin < 0)
1809 			return -EIO;
1810 
1811 		/* try to allocate the blocks.
1812 		 */
1813 		rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results);
1814 		if (rc == 0)
1815 			mark_metapage_dirty(mp);
1816 
1817 		release_metapage(mp);
1818 
1819 		return (rc);
1820 	}
1821 
1822 	/* allocation request involving multiple dmaps. it must start on
1823 	 * a dmap boundary.
1824 	 */
1825 	assert((blkno & (BPERDMAP - 1)) == 0);
1826 
1827 	/* allocate the blocks dmap by dmap.
1828 	 */
1829 	for (n = nblocks, b = blkno; n > 0; n -= nb, b += nb) {
1830 		/* get the buffer for the dmap.
1831 		 */
1832 		lblkno = BLKTODMAP(b, bmp->db_l2nbperpage);
1833 		mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
1834 		if (mp == NULL) {
1835 			rc = -EIO;
1836 			goto backout;
1837 		}
1838 		dp = (struct dmap *) mp->data;
1839 
1840 		/* the dmap better be all free.
1841 		 */
1842 		if (dp->tree.stree[ROOT] != L2BPERDMAP) {
1843 			release_metapage(mp);
1844 			jfs_error(bmp->db_ipbmap->i_sb,
1845 				  "the dmap is not all free\n");
1846 			rc = -EIO;
1847 			goto backout;
1848 		}
1849 
1850 		/* determine how many blocks to allocate from this dmap.
1851 		 */
1852 		nb = min_t(s64, n, BPERDMAP);
1853 
1854 		/* allocate the blocks from the dmap.
1855 		 */
1856 		if ((rc = dbAllocDmap(bmp, dp, b, nb))) {
1857 			release_metapage(mp);
1858 			goto backout;
1859 		}
1860 
1861 		/* write the buffer.
1862 		 */
1863 		write_metapage(mp);
1864 	}
1865 
1866 	/* set the results (starting block number) and return.
1867 	 */
1868 	*results = blkno;
1869 	return (0);
1870 
1871 	/* something failed in handling an allocation request involving
1872 	 * multiple dmaps.  we'll try to clean up by backing out any
1873 	 * allocation that has already happened for this request.  if
1874 	 * we fail in backing out the allocation, we'll mark the file
1875 	 * system to indicate that blocks have been leaked.
1876 	 */
1877       backout:
1878 
1879 	/* try to backout the allocations dmap by dmap.
1880 	 */
1881 	for (n = nblocks - n, b = blkno; n > 0;
1882 	     n -= BPERDMAP, b += BPERDMAP) {
1883 		/* get the buffer for this dmap.
1884 		 */
1885 		lblkno = BLKTODMAP(b, bmp->db_l2nbperpage);
1886 		mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
1887 		if (mp == NULL) {
1888 			/* could not back out.  mark the file system
1889 			 * to indicate that we have leaked blocks.
1890 			 */
1891 			jfs_error(bmp->db_ipbmap->i_sb,
1892 				  "I/O Error: Block Leakage\n");
1893 			continue;
1894 		}
1895 		dp = (struct dmap *) mp->data;
1896 
1897 		/* free the blocks is this dmap.
1898 		 */
1899 		if (dbFreeDmap(bmp, dp, b, BPERDMAP)) {
1900 			/* could not back out.  mark the file system
1901 			 * to indicate that we have leaked blocks.
1902 			 */
1903 			release_metapage(mp);
1904 			jfs_error(bmp->db_ipbmap->i_sb, "Block Leakage\n");
1905 			continue;
1906 		}
1907 
1908 		/* write the buffer.
1909 		 */
1910 		write_metapage(mp);
1911 	}
1912 
1913 	return (rc);
1914 }
1915 
1916 
1917 /*
1918  * NAME:	dbAllocDmapLev()
1919  *
1920  * FUNCTION:	attempt to allocate a specified number of contiguous blocks
1921  *		from a specified dmap.
1922  *
1923  *		this routine checks if the contiguous blocks are available.
1924  *		if so, nblocks of blocks are allocated; otherwise, ENOSPC is
1925  *		returned.
1926  *
1927  * PARAMETERS:
1928  *	mp	-  pointer to bmap descriptor
1929  *	dp	-  pointer to dmap to attempt to allocate blocks from.
1930  *	l2nb	-  log2 number of contiguous block desired.
1931  *	nblocks	-  actual number of contiguous block desired.
1932  *	results	-  on successful return, set to the starting block number
1933  *		   of the newly allocated range.
1934  *
1935  * RETURN VALUES:
1936  *	0	- success
1937  *	-ENOSPC	- insufficient disk resources
1938  *	-EIO	- i/o error
1939  *
1940  * serialization: IREAD_LOCK(ipbmap), e.g., from dbAlloc(), or
1941  *	IWRITE_LOCK(ipbmap), e.g., dbAllocCtl(), held on entry/exit;
1942  */
1943 static int
dbAllocDmapLev(struct bmap * bmp,struct dmap * dp,int nblocks,int l2nb,s64 * results)1944 dbAllocDmapLev(struct bmap * bmp,
1945 	       struct dmap * dp, int nblocks, int l2nb, s64 * results)
1946 {
1947 	s64 blkno;
1948 	int leafidx, rc;
1949 
1950 	/* can't be more than a dmaps worth of blocks */
1951 	assert(l2nb <= L2BPERDMAP);
1952 
1953 	/* search the tree within the dmap page for sufficient
1954 	 * free space.  if sufficient free space is found, dbFindLeaf()
1955 	 * returns the index of the leaf at which free space was found.
1956 	 */
1957 	if (dbFindLeaf((dmtree_t *) &dp->tree, l2nb, &leafidx, false))
1958 		return -ENOSPC;
1959 
1960 	if (leafidx < 0)
1961 		return -EIO;
1962 
1963 	/* determine the block number within the file system corresponding
1964 	 * to the leaf at which free space was found.
1965 	 */
1966 	blkno = le64_to_cpu(dp->start) + (leafidx << L2DBWORD);
1967 
1968 	/* if not all bits of the dmap word are free, get the starting
1969 	 * bit number within the dmap word of the required string of free
1970 	 * bits and adjust the block number with this value.
1971 	 */
1972 	if (dp->tree.stree[leafidx + LEAFIND] < BUDMIN)
1973 		blkno += dbFindBits(le32_to_cpu(dp->wmap[leafidx]), l2nb);
1974 
1975 	/* allocate the blocks */
1976 	if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0)
1977 		*results = blkno;
1978 
1979 	return (rc);
1980 }
1981 
1982 
1983 /*
1984  * NAME:	dbAllocDmap()
1985  *
1986  * FUNCTION:	adjust the disk allocation map to reflect the allocation
1987  *		of a specified block range within a dmap.
1988  *
1989  *		this routine allocates the specified blocks from the dmap
1990  *		through a call to dbAllocBits(). if the allocation of the
1991  *		block range causes the maximum string of free blocks within
1992  *		the dmap to change (i.e. the value of the root of the dmap's
1993  *		dmtree), this routine will cause this change to be reflected
1994  *		up through the appropriate levels of the dmap control pages
1995  *		by a call to dbAdjCtl() for the L0 dmap control page that
1996  *		covers this dmap.
1997  *
1998  * PARAMETERS:
1999  *	bmp	-  pointer to bmap descriptor
2000  *	dp	-  pointer to dmap to allocate the block range from.
2001  *	blkno	-  starting block number of the block to be allocated.
2002  *	nblocks	-  number of blocks to be allocated.
2003  *
2004  * RETURN VALUES:
2005  *	0	- success
2006  *	-EIO	- i/o error
2007  *
2008  * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2009  */
dbAllocDmap(struct bmap * bmp,struct dmap * dp,s64 blkno,int nblocks)2010 static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
2011 		       int nblocks)
2012 {
2013 	s8 oldroot;
2014 	int rc;
2015 
2016 	/* save the current value of the root (i.e. maximum free string)
2017 	 * of the dmap tree.
2018 	 */
2019 	oldroot = dp->tree.stree[ROOT];
2020 
2021 	/* allocate the specified (blocks) bits */
2022 	dbAllocBits(bmp, dp, blkno, nblocks);
2023 
2024 	/* if the root has not changed, done. */
2025 	if (dp->tree.stree[ROOT] == oldroot)
2026 		return (0);
2027 
2028 	/* root changed. bubble the change up to the dmap control pages.
2029 	 * if the adjustment of the upper level control pages fails,
2030 	 * backout the bit allocation (thus making everything consistent).
2031 	 */
2032 	if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 1, 0)))
2033 		dbFreeBits(bmp, dp, blkno, nblocks);
2034 
2035 	return (rc);
2036 }
2037 
2038 
2039 /*
2040  * NAME:	dbFreeDmap()
2041  *
2042  * FUNCTION:	adjust the disk allocation map to reflect the allocation
2043  *		of a specified block range within a dmap.
2044  *
2045  *		this routine frees the specified blocks from the dmap through
2046  *		a call to dbFreeBits(). if the deallocation of the block range
2047  *		causes the maximum string of free blocks within the dmap to
2048  *		change (i.e. the value of the root of the dmap's dmtree), this
2049  *		routine will cause this change to be reflected up through the
2050  *		appropriate levels of the dmap control pages by a call to
2051  *		dbAdjCtl() for the L0 dmap control page that covers this dmap.
2052  *
2053  * PARAMETERS:
2054  *	bmp	-  pointer to bmap descriptor
2055  *	dp	-  pointer to dmap to free the block range from.
2056  *	blkno	-  starting block number of the block to be freed.
2057  *	nblocks	-  number of blocks to be freed.
2058  *
2059  * RETURN VALUES:
2060  *	0	- success
2061  *	-EIO	- i/o error
2062  *
2063  * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2064  */
dbFreeDmap(struct bmap * bmp,struct dmap * dp,s64 blkno,int nblocks)2065 static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
2066 		      int nblocks)
2067 {
2068 	s8 oldroot;
2069 	int rc = 0, word;
2070 
2071 	/* save the current value of the root (i.e. maximum free string)
2072 	 * of the dmap tree.
2073 	 */
2074 	oldroot = dp->tree.stree[ROOT];
2075 
2076 	/* free the specified (blocks) bits */
2077 	rc = dbFreeBits(bmp, dp, blkno, nblocks);
2078 
2079 	/* if error or the root has not changed, done. */
2080 	if (rc || (dp->tree.stree[ROOT] == oldroot))
2081 		return (rc);
2082 
2083 	/* root changed. bubble the change up to the dmap control pages.
2084 	 * if the adjustment of the upper level control pages fails,
2085 	 * backout the deallocation.
2086 	 */
2087 	if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 0, 0))) {
2088 		word = (blkno & (BPERDMAP - 1)) >> L2DBWORD;
2089 
2090 		/* as part of backing out the deallocation, we will have
2091 		 * to back split the dmap tree if the deallocation caused
2092 		 * the freed blocks to become part of a larger binary buddy
2093 		 * system.
2094 		 */
2095 		if (dp->tree.stree[word] == NOFREE)
2096 			dbBackSplit((dmtree_t *)&dp->tree, word, false);
2097 
2098 		dbAllocBits(bmp, dp, blkno, nblocks);
2099 	}
2100 
2101 	return (rc);
2102 }
2103 
2104 
2105 /*
2106  * NAME:	dbAllocBits()
2107  *
2108  * FUNCTION:	allocate a specified block range from a dmap.
2109  *
2110  *		this routine updates the dmap to reflect the working
2111  *		state allocation of the specified block range. it directly
2112  *		updates the bits of the working map and causes the adjustment
2113  *		of the binary buddy system described by the dmap's dmtree
2114  *		leaves to reflect the bits allocated.  it also causes the
2115  *		dmap's dmtree, as a whole, to reflect the allocated range.
2116  *
2117  * PARAMETERS:
2118  *	bmp	-  pointer to bmap descriptor
2119  *	dp	-  pointer to dmap to allocate bits from.
2120  *	blkno	-  starting block number of the bits to be allocated.
2121  *	nblocks	-  number of bits to be allocated.
2122  *
2123  * RETURN VALUES: none
2124  *
2125  * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2126  */
dbAllocBits(struct bmap * bmp,struct dmap * dp,s64 blkno,int nblocks)2127 static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
2128 			int nblocks)
2129 {
2130 	int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
2131 	dmtree_t *tp = (dmtree_t *) & dp->tree;
2132 	int size;
2133 	s8 *leaf;
2134 
2135 	/* pick up a pointer to the leaves of the dmap tree */
2136 	leaf = dp->tree.stree + LEAFIND;
2137 
2138 	/* determine the bit number and word within the dmap of the
2139 	 * starting block.
2140 	 */
2141 	dbitno = blkno & (BPERDMAP - 1);
2142 	word = dbitno >> L2DBWORD;
2143 
2144 	/* block range better be within the dmap */
2145 	assert(dbitno + nblocks <= BPERDMAP);
2146 
2147 	/* allocate the bits of the dmap's words corresponding to the block
2148 	 * range. not all bits of the first and last words may be contained
2149 	 * within the block range.  if this is the case, we'll work against
2150 	 * those words (i.e. partial first and/or last) on an individual basis
2151 	 * (a single pass), allocating the bits of interest by hand and
2152 	 * updating the leaf corresponding to the dmap word. a single pass
2153 	 * will be used for all dmap words fully contained within the
2154 	 * specified range.  within this pass, the bits of all fully contained
2155 	 * dmap words will be marked as free in a single shot and the leaves
2156 	 * will be updated. a single leaf may describe the free space of
2157 	 * multiple dmap words, so we may update only a subset of the actual
2158 	 * leaves corresponding to the dmap words of the block range.
2159 	 */
2160 	for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
2161 		/* determine the bit number within the word and
2162 		 * the number of bits within the word.
2163 		 */
2164 		wbitno = dbitno & (DBWORD - 1);
2165 		nb = min(rembits, DBWORD - wbitno);
2166 
2167 		/* check if only part of a word is to be allocated.
2168 		 */
2169 		if (nb < DBWORD) {
2170 			/* allocate (set to 1) the appropriate bits within
2171 			 * this dmap word.
2172 			 */
2173 			dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb)
2174 						      >> wbitno);
2175 
2176 			/* update the leaf for this dmap word. in addition
2177 			 * to setting the leaf value to the binary buddy max
2178 			 * of the updated dmap word, dbSplit() will split
2179 			 * the binary system of the leaves if need be.
2180 			 */
2181 			dbSplit(tp, word, BUDMIN,
2182 				dbMaxBud((u8 *)&dp->wmap[word]), false);
2183 
2184 			word += 1;
2185 		} else {
2186 			/* one or more dmap words are fully contained
2187 			 * within the block range.  determine how many
2188 			 * words and allocate (set to 1) the bits of these
2189 			 * words.
2190 			 */
2191 			nwords = rembits >> L2DBWORD;
2192 			memset(&dp->wmap[word], (int) ONES, nwords * 4);
2193 
2194 			/* determine how many bits.
2195 			 */
2196 			nb = nwords << L2DBWORD;
2197 
2198 			/* now update the appropriate leaves to reflect
2199 			 * the allocated words.
2200 			 */
2201 			for (; nwords > 0; nwords -= nw) {
2202 				if (leaf[word] < BUDMIN) {
2203 					jfs_error(bmp->db_ipbmap->i_sb,
2204 						  "leaf page corrupt\n");
2205 					break;
2206 				}
2207 
2208 				/* determine what the leaf value should be
2209 				 * updated to as the minimum of the l2 number
2210 				 * of bits being allocated and the l2 number
2211 				 * of bits currently described by this leaf.
2212 				 */
2213 				size = min_t(int, leaf[word],
2214 					     NLSTOL2BSZ(nwords));
2215 
2216 				/* update the leaf to reflect the allocation.
2217 				 * in addition to setting the leaf value to
2218 				 * NOFREE, dbSplit() will split the binary
2219 				 * system of the leaves to reflect the current
2220 				 * allocation (size).
2221 				 */
2222 				dbSplit(tp, word, size, NOFREE, false);
2223 
2224 				/* get the number of dmap words handled */
2225 				nw = BUDSIZE(size, BUDMIN);
2226 				word += nw;
2227 			}
2228 		}
2229 	}
2230 
2231 	/* update the free count for this dmap */
2232 	le32_add_cpu(&dp->nfree, -nblocks);
2233 
2234 	BMAP_LOCK(bmp);
2235 
2236 	/* if this allocation group is completely free,
2237 	 * update the maximum allocation group number if this allocation
2238 	 * group is the new max.
2239 	 */
2240 	agno = blkno >> bmp->db_agl2size;
2241 	if (agno > bmp->db_maxag)
2242 		bmp->db_maxag = agno;
2243 
2244 	/* update the free count for the allocation group and map */
2245 	bmp->db_agfree[agno] -= nblocks;
2246 	bmp->db_nfree -= nblocks;
2247 
2248 	BMAP_UNLOCK(bmp);
2249 }
2250 
2251 
2252 /*
2253  * NAME:	dbFreeBits()
2254  *
2255  * FUNCTION:	free a specified block range from a dmap.
2256  *
2257  *		this routine updates the dmap to reflect the working
2258  *		state allocation of the specified block range. it directly
2259  *		updates the bits of the working map and causes the adjustment
2260  *		of the binary buddy system described by the dmap's dmtree
2261  *		leaves to reflect the bits freed.  it also causes the dmap's
2262  *		dmtree, as a whole, to reflect the deallocated range.
2263  *
2264  * PARAMETERS:
2265  *	bmp	-  pointer to bmap descriptor
2266  *	dp	-  pointer to dmap to free bits from.
2267  *	blkno	-  starting block number of the bits to be freed.
2268  *	nblocks	-  number of bits to be freed.
2269  *
2270  * RETURN VALUES: 0 for success
2271  *
2272  * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2273  */
dbFreeBits(struct bmap * bmp,struct dmap * dp,s64 blkno,int nblocks)2274 static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
2275 		       int nblocks)
2276 {
2277 	int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
2278 	dmtree_t *tp = (dmtree_t *) & dp->tree;
2279 	int rc = 0;
2280 	int size;
2281 
2282 	/* determine the bit number and word within the dmap of the
2283 	 * starting block.
2284 	 */
2285 	dbitno = blkno & (BPERDMAP - 1);
2286 	word = dbitno >> L2DBWORD;
2287 
2288 	/* block range better be within the dmap.
2289 	 */
2290 	assert(dbitno + nblocks <= BPERDMAP);
2291 
2292 	/* free the bits of the dmaps words corresponding to the block range.
2293 	 * not all bits of the first and last words may be contained within
2294 	 * the block range.  if this is the case, we'll work against those
2295 	 * words (i.e. partial first and/or last) on an individual basis
2296 	 * (a single pass), freeing the bits of interest by hand and updating
2297 	 * the leaf corresponding to the dmap word. a single pass will be used
2298 	 * for all dmap words fully contained within the specified range.
2299 	 * within this pass, the bits of all fully contained dmap words will
2300 	 * be marked as free in a single shot and the leaves will be updated. a
2301 	 * single leaf may describe the free space of multiple dmap words,
2302 	 * so we may update only a subset of the actual leaves corresponding
2303 	 * to the dmap words of the block range.
2304 	 *
2305 	 * dbJoin() is used to update leaf values and will join the binary
2306 	 * buddy system of the leaves if the new leaf values indicate this
2307 	 * should be done.
2308 	 */
2309 	for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
2310 		/* determine the bit number within the word and
2311 		 * the number of bits within the word.
2312 		 */
2313 		wbitno = dbitno & (DBWORD - 1);
2314 		nb = min(rembits, DBWORD - wbitno);
2315 
2316 		/* check if only part of a word is to be freed.
2317 		 */
2318 		if (nb < DBWORD) {
2319 			/* free (zero) the appropriate bits within this
2320 			 * dmap word.
2321 			 */
2322 			dp->wmap[word] &=
2323 			    cpu_to_le32(~(ONES << (DBWORD - nb)
2324 					  >> wbitno));
2325 
2326 			/* update the leaf for this dmap word.
2327 			 */
2328 			rc = dbJoin(tp, word,
2329 				    dbMaxBud((u8 *)&dp->wmap[word]), false);
2330 			if (rc)
2331 				return rc;
2332 
2333 			word += 1;
2334 		} else {
2335 			/* one or more dmap words are fully contained
2336 			 * within the block range.  determine how many
2337 			 * words and free (zero) the bits of these words.
2338 			 */
2339 			nwords = rembits >> L2DBWORD;
2340 			memset(&dp->wmap[word], 0, nwords * 4);
2341 
2342 			/* determine how many bits.
2343 			 */
2344 			nb = nwords << L2DBWORD;
2345 
2346 			/* now update the appropriate leaves to reflect
2347 			 * the freed words.
2348 			 */
2349 			for (; nwords > 0; nwords -= nw) {
2350 				/* determine what the leaf value should be
2351 				 * updated to as the minimum of the l2 number
2352 				 * of bits being freed and the l2 (max) number
2353 				 * of bits that can be described by this leaf.
2354 				 */
2355 				size =
2356 				    min(LITOL2BSZ
2357 					(word, L2LPERDMAP, BUDMIN),
2358 					NLSTOL2BSZ(nwords));
2359 
2360 				/* update the leaf.
2361 				 */
2362 				rc = dbJoin(tp, word, size, false);
2363 				if (rc)
2364 					return rc;
2365 
2366 				/* get the number of dmap words handled.
2367 				 */
2368 				nw = BUDSIZE(size, BUDMIN);
2369 				word += nw;
2370 			}
2371 		}
2372 	}
2373 
2374 	/* update the free count for this dmap.
2375 	 */
2376 	le32_add_cpu(&dp->nfree, nblocks);
2377 
2378 	BMAP_LOCK(bmp);
2379 
2380 	/* update the free count for the allocation group and
2381 	 * map.
2382 	 */
2383 	agno = blkno >> bmp->db_agl2size;
2384 	bmp->db_nfree += nblocks;
2385 	bmp->db_agfree[agno] += nblocks;
2386 
2387 	/* check if this allocation group is not completely free and
2388 	 * if it is currently the maximum (rightmost) allocation group.
2389 	 * if so, establish the new maximum allocation group number by
2390 	 * searching left for the first allocation group with allocation.
2391 	 */
2392 	if ((bmp->db_agfree[agno] == bmp->db_agsize && agno == bmp->db_maxag) ||
2393 	    (agno == bmp->db_numag - 1 &&
2394 	     bmp->db_agfree[agno] == (bmp-> db_mapsize & (BPERDMAP - 1)))) {
2395 		while (bmp->db_maxag > 0) {
2396 			bmp->db_maxag -= 1;
2397 			if (bmp->db_agfree[bmp->db_maxag] !=
2398 			    bmp->db_agsize)
2399 				break;
2400 		}
2401 
2402 		/* re-establish the allocation group preference if the
2403 		 * current preference is right of the maximum allocation
2404 		 * group.
2405 		 */
2406 		if (bmp->db_agpref > bmp->db_maxag)
2407 			bmp->db_agpref = bmp->db_maxag;
2408 	}
2409 
2410 	BMAP_UNLOCK(bmp);
2411 
2412 	return 0;
2413 }
2414 
2415 
2416 /*
2417  * NAME:	dbAdjCtl()
2418  *
2419  * FUNCTION:	adjust a dmap control page at a specified level to reflect
2420  *		the change in a lower level dmap or dmap control page's
2421  *		maximum string of free blocks (i.e. a change in the root
2422  *		of the lower level object's dmtree) due to the allocation
2423  *		or deallocation of a range of blocks with a single dmap.
2424  *
2425  *		on entry, this routine is provided with the new value of
2426  *		the lower level dmap or dmap control page root and the
2427  *		starting block number of the block range whose allocation
2428  *		or deallocation resulted in the root change.  this range
2429  *		is respresented by a single leaf of the current dmapctl
2430  *		and the leaf will be updated with this value, possibly
2431  *		causing a binary buddy system within the leaves to be
2432  *		split or joined.  the update may also cause the dmapctl's
2433  *		dmtree to be updated.
2434  *
2435  *		if the adjustment of the dmap control page, itself, causes its
2436  *		root to change, this change will be bubbled up to the next dmap
2437  *		control level by a recursive call to this routine, specifying
2438  *		the new root value and the next dmap control page level to
2439  *		be adjusted.
2440  * PARAMETERS:
2441  *	bmp	-  pointer to bmap descriptor
2442  *	blkno	-  the first block of a block range within a dmap.  it is
2443  *		   the allocation or deallocation of this block range that
2444  *		   requires the dmap control page to be adjusted.
2445  *	newval	-  the new value of the lower level dmap or dmap control
2446  *		   page root.
2447  *	alloc	-  'true' if adjustment is due to an allocation.
2448  *	level	-  current level of dmap control page (i.e. L0, L1, L2) to
2449  *		   be adjusted.
2450  *
2451  * RETURN VALUES:
2452  *	0	- success
2453  *	-EIO	- i/o error
2454  *
2455  * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2456  */
2457 static int
dbAdjCtl(struct bmap * bmp,s64 blkno,int newval,int alloc,int level)2458 dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
2459 {
2460 	struct metapage *mp;
2461 	s8 oldroot;
2462 	int oldval;
2463 	s64 lblkno;
2464 	struct dmapctl *dcp;
2465 	int rc, leafno, ti;
2466 
2467 	/* get the buffer for the dmap control page for the specified
2468 	 * block number and control page level.
2469 	 */
2470 	lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, level);
2471 	mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
2472 	if (mp == NULL)
2473 		return -EIO;
2474 	dcp = (struct dmapctl *) mp->data;
2475 
2476 	if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
2477 		jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmapctl page\n");
2478 		release_metapage(mp);
2479 		return -EIO;
2480 	}
2481 
2482 	/* determine the leaf number corresponding to the block and
2483 	 * the index within the dmap control tree.
2484 	 */
2485 	leafno = BLKTOCTLLEAF(blkno, dcp->budmin);
2486 	ti = leafno + le32_to_cpu(dcp->leafidx);
2487 
2488 	/* save the current leaf value and the current root level (i.e.
2489 	 * maximum l2 free string described by this dmapctl).
2490 	 */
2491 	oldval = dcp->stree[ti];
2492 	oldroot = dcp->stree[ROOT];
2493 
2494 	/* check if this is a control page update for an allocation.
2495 	 * if so, update the leaf to reflect the new leaf value using
2496 	 * dbSplit(); otherwise (deallocation), use dbJoin() to update
2497 	 * the leaf with the new value.  in addition to updating the
2498 	 * leaf, dbSplit() will also split the binary buddy system of
2499 	 * the leaves, if required, and bubble new values within the
2500 	 * dmapctl tree, if required.  similarly, dbJoin() will join
2501 	 * the binary buddy system of leaves and bubble new values up
2502 	 * the dmapctl tree as required by the new leaf value.
2503 	 */
2504 	if (alloc) {
2505 		/* check if we are in the middle of a binary buddy
2506 		 * system.  this happens when we are performing the
2507 		 * first allocation out of an allocation group that
2508 		 * is part (not the first part) of a larger binary
2509 		 * buddy system.  if we are in the middle, back split
2510 		 * the system prior to calling dbSplit() which assumes
2511 		 * that it is at the front of a binary buddy system.
2512 		 */
2513 		if (oldval == NOFREE) {
2514 			rc = dbBackSplit((dmtree_t *)dcp, leafno, true);
2515 			if (rc) {
2516 				release_metapage(mp);
2517 				return rc;
2518 			}
2519 			oldval = dcp->stree[ti];
2520 		}
2521 		dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval, true);
2522 	} else {
2523 		rc = dbJoin((dmtree_t *) dcp, leafno, newval, true);
2524 		if (rc) {
2525 			release_metapage(mp);
2526 			return rc;
2527 		}
2528 	}
2529 
2530 	/* check if the root of the current dmap control page changed due
2531 	 * to the update and if the current dmap control page is not at
2532 	 * the current top level (i.e. L0, L1, L2) of the map.  if so (i.e.
2533 	 * root changed and this is not the top level), call this routine
2534 	 * again (recursion) for the next higher level of the mapping to
2535 	 * reflect the change in root for the current dmap control page.
2536 	 */
2537 	if (dcp->stree[ROOT] != oldroot) {
2538 		/* are we below the top level of the map.  if so,
2539 		 * bubble the root up to the next higher level.
2540 		 */
2541 		if (level < bmp->db_maxlevel) {
2542 			/* bubble up the new root of this dmap control page to
2543 			 * the next level.
2544 			 */
2545 			if ((rc =
2546 			     dbAdjCtl(bmp, blkno, dcp->stree[ROOT], alloc,
2547 				      level + 1))) {
2548 				/* something went wrong in bubbling up the new
2549 				 * root value, so backout the changes to the
2550 				 * current dmap control page.
2551 				 */
2552 				if (alloc) {
2553 					dbJoin((dmtree_t *) dcp, leafno,
2554 					       oldval, true);
2555 				} else {
2556 					/* the dbJoin() above might have
2557 					 * caused a larger binary buddy system
2558 					 * to form and we may now be in the
2559 					 * middle of it.  if this is the case,
2560 					 * back split the buddies.
2561 					 */
2562 					if (dcp->stree[ti] == NOFREE)
2563 						dbBackSplit((dmtree_t *)
2564 							    dcp, leafno, true);
2565 					dbSplit((dmtree_t *) dcp, leafno,
2566 						dcp->budmin, oldval, true);
2567 				}
2568 
2569 				/* release the buffer and return the error.
2570 				 */
2571 				release_metapage(mp);
2572 				return (rc);
2573 			}
2574 		} else {
2575 			/* we're at the top level of the map. update
2576 			 * the bmap control page to reflect the size
2577 			 * of the maximum free buddy system.
2578 			 */
2579 			assert(level == bmp->db_maxlevel);
2580 			if (bmp->db_maxfreebud != oldroot) {
2581 				jfs_error(bmp->db_ipbmap->i_sb,
2582 					  "the maximum free buddy is not the old root\n");
2583 			}
2584 			bmp->db_maxfreebud = dcp->stree[ROOT];
2585 		}
2586 	}
2587 
2588 	/* write the buffer.
2589 	 */
2590 	write_metapage(mp);
2591 
2592 	return (0);
2593 }
2594 
2595 
2596 /*
2597  * NAME:	dbSplit()
2598  *
2599  * FUNCTION:	update the leaf of a dmtree with a new value, splitting
2600  *		the leaf from the binary buddy system of the dmtree's
2601  *		leaves, as required.
2602  *
2603  * PARAMETERS:
2604  *	tp	- pointer to the tree containing the leaf.
2605  *	leafno	- the number of the leaf to be updated.
2606  *	splitsz	- the size the binary buddy system starting at the leaf
2607  *		  must be split to, specified as the log2 number of blocks.
2608  *	newval	- the new value for the leaf.
2609  *
2610  * RETURN VALUES: none
2611  *
2612  * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2613  */
dbSplit(dmtree_t * tp,int leafno,int splitsz,int newval,bool is_ctl)2614 static void dbSplit(dmtree_t *tp, int leafno, int splitsz, int newval, bool is_ctl)
2615 {
2616 	int budsz;
2617 	int cursz;
2618 	s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
2619 
2620 	/* check if the leaf needs to be split.
2621 	 */
2622 	if (leaf[leafno] > tp->dmt_budmin) {
2623 		/* the split occurs by cutting the buddy system in half
2624 		 * at the specified leaf until we reach the specified
2625 		 * size.  pick up the starting split size (current size
2626 		 * - 1 in l2) and the corresponding buddy size.
2627 		 */
2628 		cursz = leaf[leafno] - 1;
2629 		budsz = BUDSIZE(cursz, tp->dmt_budmin);
2630 
2631 		/* split until we reach the specified size.
2632 		 */
2633 		while (cursz >= splitsz) {
2634 			/* update the buddy's leaf with its new value.
2635 			 */
2636 			dbAdjTree(tp, leafno ^ budsz, cursz, is_ctl);
2637 
2638 			/* on to the next size and buddy.
2639 			 */
2640 			cursz -= 1;
2641 			budsz >>= 1;
2642 		}
2643 	}
2644 
2645 	/* adjust the dmap tree to reflect the specified leaf's new
2646 	 * value.
2647 	 */
2648 	dbAdjTree(tp, leafno, newval, is_ctl);
2649 }
2650 
2651 
2652 /*
2653  * NAME:	dbBackSplit()
2654  *
2655  * FUNCTION:	back split the binary buddy system of dmtree leaves
2656  *		that hold a specified leaf until the specified leaf
2657  *		starts its own binary buddy system.
2658  *
2659  *		the allocators typically perform allocations at the start
2660  *		of binary buddy systems and dbSplit() is used to accomplish
2661  *		any required splits.  in some cases, however, allocation
2662  *		may occur in the middle of a binary system and requires a
2663  *		back split, with the split proceeding out from the middle of
2664  *		the system (less efficient) rather than the start of the
2665  *		system (more efficient).  the cases in which a back split
2666  *		is required are rare and are limited to the first allocation
2667  *		within an allocation group which is a part (not first part)
2668  *		of a larger binary buddy system and a few exception cases
2669  *		in which a previous join operation must be backed out.
2670  *
2671  * PARAMETERS:
2672  *	tp	- pointer to the tree containing the leaf.
2673  *	leafno	- the number of the leaf to be updated.
2674  *
2675  * RETURN VALUES: none
2676  *
2677  * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2678  */
dbBackSplit(dmtree_t * tp,int leafno,bool is_ctl)2679 static int dbBackSplit(dmtree_t *tp, int leafno, bool is_ctl)
2680 {
2681 	int budsz, bud, w, bsz, size;
2682 	int cursz;
2683 	s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
2684 
2685 	/* leaf should be part (not first part) of a binary
2686 	 * buddy system.
2687 	 */
2688 	assert(leaf[leafno] == NOFREE);
2689 
2690 	/* the back split is accomplished by iteratively finding the leaf
2691 	 * that starts the buddy system that contains the specified leaf and
2692 	 * splitting that system in two.  this iteration continues until
2693 	 * the specified leaf becomes the start of a buddy system.
2694 	 *
2695 	 * determine maximum possible l2 size for the specified leaf.
2696 	 */
2697 	size =
2698 	    LITOL2BSZ(leafno, le32_to_cpu(tp->dmt_l2nleafs),
2699 		      tp->dmt_budmin);
2700 
2701 	/* determine the number of leaves covered by this size.  this
2702 	 * is the buddy size that we will start with as we search for
2703 	 * the buddy system that contains the specified leaf.
2704 	 */
2705 	budsz = BUDSIZE(size, tp->dmt_budmin);
2706 
2707 	/* back split.
2708 	 */
2709 	while (leaf[leafno] == NOFREE) {
2710 		/* find the leftmost buddy leaf.
2711 		 */
2712 		for (w = leafno, bsz = budsz;; bsz <<= 1,
2713 		     w = (w < bud) ? w : bud) {
2714 			if (bsz >= le32_to_cpu(tp->dmt_nleafs)) {
2715 				jfs_err("JFS: block map error in dbBackSplit");
2716 				return -EIO;
2717 			}
2718 
2719 			/* determine the buddy.
2720 			 */
2721 			bud = w ^ bsz;
2722 
2723 			/* check if this buddy is the start of the system.
2724 			 */
2725 			if (leaf[bud] != NOFREE) {
2726 				/* split the leaf at the start of the
2727 				 * system in two.
2728 				 */
2729 				cursz = leaf[bud] - 1;
2730 				dbSplit(tp, bud, cursz, cursz, is_ctl);
2731 				break;
2732 			}
2733 		}
2734 	}
2735 
2736 	if (leaf[leafno] != size) {
2737 		jfs_err("JFS: wrong leaf value in dbBackSplit");
2738 		return -EIO;
2739 	}
2740 	return 0;
2741 }
2742 
2743 
2744 /*
2745  * NAME:	dbJoin()
2746  *
2747  * FUNCTION:	update the leaf of a dmtree with a new value, joining
2748  *		the leaf with other leaves of the dmtree into a multi-leaf
2749  *		binary buddy system, as required.
2750  *
2751  * PARAMETERS:
2752  *	tp	- pointer to the tree containing the leaf.
2753  *	leafno	- the number of the leaf to be updated.
2754  *	newval	- the new value for the leaf.
2755  *
2756  * RETURN VALUES: none
2757  */
dbJoin(dmtree_t * tp,int leafno,int newval,bool is_ctl)2758 static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl)
2759 {
2760 	int budsz, buddy;
2761 	s8 *leaf;
2762 
2763 	/* can the new leaf value require a join with other leaves ?
2764 	 */
2765 	if (newval >= tp->dmt_budmin) {
2766 		/* pickup a pointer to the leaves of the tree.
2767 		 */
2768 		leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
2769 
2770 		/* try to join the specified leaf into a large binary
2771 		 * buddy system.  the join proceeds by attempting to join
2772 		 * the specified leafno with its buddy (leaf) at new value.
2773 		 * if the join occurs, we attempt to join the left leaf
2774 		 * of the joined buddies with its buddy at new value + 1.
2775 		 * we continue to join until we find a buddy that cannot be
2776 		 * joined (does not have a value equal to the size of the
2777 		 * last join) or until all leaves have been joined into a
2778 		 * single system.
2779 		 *
2780 		 * get the buddy size (number of words covered) of
2781 		 * the new value.
2782 		 */
2783 		budsz = BUDSIZE(newval, tp->dmt_budmin);
2784 
2785 		/* try to join.
2786 		 */
2787 		while (budsz < le32_to_cpu(tp->dmt_nleafs)) {
2788 			/* get the buddy leaf.
2789 			 */
2790 			buddy = leafno ^ budsz;
2791 
2792 			/* if the leaf's new value is greater than its
2793 			 * buddy's value, we join no more.
2794 			 */
2795 			if (newval > leaf[buddy])
2796 				break;
2797 
2798 			/* It shouldn't be less */
2799 			if (newval < leaf[buddy])
2800 				return -EIO;
2801 
2802 			/* check which (leafno or buddy) is the left buddy.
2803 			 * the left buddy gets to claim the blocks resulting
2804 			 * from the join while the right gets to claim none.
2805 			 * the left buddy is also eligible to participate in
2806 			 * a join at the next higher level while the right
2807 			 * is not.
2808 			 *
2809 			 */
2810 			if (leafno < buddy) {
2811 				/* leafno is the left buddy.
2812 				 */
2813 				dbAdjTree(tp, buddy, NOFREE, is_ctl);
2814 			} else {
2815 				/* buddy is the left buddy and becomes
2816 				 * leafno.
2817 				 */
2818 				dbAdjTree(tp, leafno, NOFREE, is_ctl);
2819 				leafno = buddy;
2820 			}
2821 
2822 			/* on to try the next join.
2823 			 */
2824 			newval += 1;
2825 			budsz <<= 1;
2826 		}
2827 	}
2828 
2829 	/* update the leaf value.
2830 	 */
2831 	dbAdjTree(tp, leafno, newval, is_ctl);
2832 
2833 	return 0;
2834 }
2835 
2836 
2837 /*
2838  * NAME:	dbAdjTree()
2839  *
2840  * FUNCTION:	update a leaf of a dmtree with a new value, adjusting
2841  *		the dmtree, as required, to reflect the new leaf value.
2842  *		the combination of any buddies must already be done before
2843  *		this is called.
2844  *
2845  * PARAMETERS:
2846  *	tp	- pointer to the tree to be adjusted.
2847  *	leafno	- the number of the leaf to be updated.
2848  *	newval	- the new value for the leaf.
2849  *
2850  * RETURN VALUES: none
2851  */
dbAdjTree(dmtree_t * tp,int leafno,int newval,bool is_ctl)2852 static void dbAdjTree(dmtree_t *tp, int leafno, int newval, bool is_ctl)
2853 {
2854 	int lp, pp, k;
2855 	int max, size;
2856 
2857 	size = is_ctl ? CTLTREESIZE : TREESIZE;
2858 
2859 	/* pick up the index of the leaf for this leafno.
2860 	 */
2861 	lp = leafno + le32_to_cpu(tp->dmt_leafidx);
2862 
2863 	if (WARN_ON_ONCE(lp >= size || lp < 0))
2864 		return;
2865 
2866 	/* is the current value the same as the old value ?  if so,
2867 	 * there is nothing to do.
2868 	 */
2869 	if (tp->dmt_stree[lp] == newval)
2870 		return;
2871 
2872 	/* set the new value.
2873 	 */
2874 	tp->dmt_stree[lp] = newval;
2875 
2876 	/* bubble the new value up the tree as required.
2877 	 */
2878 	for (k = 0; k < le32_to_cpu(tp->dmt_height); k++) {
2879 		if (lp == 0)
2880 			break;
2881 
2882 		/* get the index of the first leaf of the 4 leaf
2883 		 * group containing the specified leaf (leafno).
2884 		 */
2885 		lp = ((lp - 1) & ~0x03) + 1;
2886 
2887 		/* get the index of the parent of this 4 leaf group.
2888 		 */
2889 		pp = (lp - 1) >> 2;
2890 
2891 		/* determine the maximum of the 4 leaves.
2892 		 */
2893 		max = TREEMAX(&tp->dmt_stree[lp]);
2894 
2895 		/* if the maximum of the 4 is the same as the
2896 		 * parent's value, we're done.
2897 		 */
2898 		if (tp->dmt_stree[pp] == max)
2899 			break;
2900 
2901 		/* parent gets new value.
2902 		 */
2903 		tp->dmt_stree[pp] = max;
2904 
2905 		/* parent becomes leaf for next go-round.
2906 		 */
2907 		lp = pp;
2908 	}
2909 }
2910 
2911 
2912 /*
2913  * NAME:	dbFindLeaf()
2914  *
2915  * FUNCTION:	search a dmtree_t for sufficient free blocks, returning
2916  *		the index of a leaf describing the free blocks if
2917  *		sufficient free blocks are found.
2918  *
2919  *		the search starts at the top of the dmtree_t tree and
2920  *		proceeds down the tree to the leftmost leaf with sufficient
2921  *		free space.
2922  *
2923  * PARAMETERS:
2924  *	tp	- pointer to the tree to be searched.
2925  *	l2nb	- log2 number of free blocks to search for.
2926  *	leafidx	- return pointer to be set to the index of the leaf
2927  *		  describing at least l2nb free blocks if sufficient
2928  *		  free blocks are found.
2929  *	is_ctl	- determines if the tree is of type ctl
2930  *
2931  * RETURN VALUES:
2932  *	0	- success
2933  *	-ENOSPC	- insufficient free blocks.
2934  */
dbFindLeaf(dmtree_t * tp,int l2nb,int * leafidx,bool is_ctl)2935 static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl)
2936 {
2937 	int ti, n = 0, k, x = 0;
2938 	int max_size, max_idx;
2939 
2940 	max_size = is_ctl ? CTLTREESIZE : TREESIZE;
2941 	max_idx = is_ctl ? LPERCTL : LPERDMAP;
2942 
2943 	/* first check the root of the tree to see if there is
2944 	 * sufficient free space.
2945 	 */
2946 	if (l2nb > tp->dmt_stree[ROOT])
2947 		return -ENOSPC;
2948 
2949 	/* sufficient free space available. now search down the tree
2950 	 * starting at the next level for the leftmost leaf that
2951 	 * describes sufficient free space.
2952 	 */
2953 	for (k = le32_to_cpu(tp->dmt_height), ti = 1;
2954 	     k > 0; k--, ti = ((ti + n) << 2) + 1) {
2955 		/* search the four nodes at this level, starting from
2956 		 * the left.
2957 		 */
2958 		for (x = ti, n = 0; n < 4; n++) {
2959 			/* sufficient free space found.  move to the next
2960 			 * level (or quit if this is the last level).
2961 			 */
2962 			if (x + n > max_size)
2963 				return -ENOSPC;
2964 			if (l2nb <= tp->dmt_stree[x + n])
2965 				break;
2966 		}
2967 
2968 		/* better have found something since the higher
2969 		 * levels of the tree said it was here.
2970 		 */
2971 		assert(n < 4);
2972 	}
2973 	if (le32_to_cpu(tp->dmt_leafidx) >= max_idx)
2974 		return -ENOSPC;
2975 
2976 	/* set the return to the leftmost leaf describing sufficient
2977 	 * free space.
2978 	 */
2979 	*leafidx = x + n - le32_to_cpu(tp->dmt_leafidx);
2980 
2981 	return (0);
2982 }
2983 
2984 
2985 /*
2986  * NAME:	dbFindBits()
2987  *
2988  * FUNCTION:	find a specified number of binary buddy free bits within a
2989  *		dmap bitmap word value.
2990  *
2991  *		this routine searches the bitmap value for (1 << l2nb) free
2992  *		bits at (1 << l2nb) alignments within the value.
2993  *
2994  * PARAMETERS:
2995  *	word	-  dmap bitmap word value.
2996  *	l2nb	-  number of free bits specified as a log2 number.
2997  *
2998  * RETURN VALUES:
2999  *	starting bit number of free bits.
3000  */
dbFindBits(u32 word,int l2nb)3001 static int dbFindBits(u32 word, int l2nb)
3002 {
3003 	int bitno, nb;
3004 	u32 mask;
3005 
3006 	/* get the number of bits.
3007 	 */
3008 	nb = 1 << l2nb;
3009 	assert(nb <= DBWORD);
3010 
3011 	/* complement the word so we can use a mask (i.e. 0s represent
3012 	 * free bits) and compute the mask.
3013 	 */
3014 	word = ~word;
3015 	mask = ONES << (DBWORD - nb);
3016 
3017 	/* scan the word for nb free bits at nb alignments.
3018 	 */
3019 	for (bitno = 0; mask != 0; bitno += nb, mask = (mask >> nb)) {
3020 		if ((mask & word) == mask)
3021 			break;
3022 	}
3023 
3024 	ASSERT(bitno < 32);
3025 
3026 	/* return the bit number.
3027 	 */
3028 	return (bitno);
3029 }
3030 
3031 
3032 /*
3033  * NAME:	dbMaxBud(u8 *cp)
3034  *
3035  * FUNCTION:	determine the largest binary buddy string of free
3036  *		bits within 32-bits of the map.
3037  *
3038  * PARAMETERS:
3039  *	cp	-  pointer to the 32-bit value.
3040  *
3041  * RETURN VALUES:
3042  *	largest binary buddy of free bits within a dmap word.
3043  */
dbMaxBud(u8 * cp)3044 static int dbMaxBud(u8 * cp)
3045 {
3046 	signed char tmp1, tmp2;
3047 
3048 	/* check if the wmap word is all free. if so, the
3049 	 * free buddy size is BUDMIN.
3050 	 */
3051 	if (*((uint *) cp) == 0)
3052 		return (BUDMIN);
3053 
3054 	/* check if the wmap word is half free. if so, the
3055 	 * free buddy size is BUDMIN-1.
3056 	 */
3057 	if (*((u16 *) cp) == 0 || *((u16 *) cp + 1) == 0)
3058 		return (BUDMIN - 1);
3059 
3060 	/* not all free or half free. determine the free buddy
3061 	 * size thru table lookup using quarters of the wmap word.
3062 	 */
3063 	tmp1 = max(budtab[cp[2]], budtab[cp[3]]);
3064 	tmp2 = max(budtab[cp[0]], budtab[cp[1]]);
3065 	return (max(tmp1, tmp2));
3066 }
3067 
3068 
3069 /*
3070  * NAME:	cnttz(uint word)
3071  *
3072  * FUNCTION:	determine the number of trailing zeros within a 32-bit
3073  *		value.
3074  *
3075  * PARAMETERS:
3076  *	value	-  32-bit value to be examined.
3077  *
3078  * RETURN VALUES:
3079  *	count of trailing zeros
3080  */
cnttz(u32 word)3081 static int cnttz(u32 word)
3082 {
3083 	int n;
3084 
3085 	for (n = 0; n < 32; n++, word >>= 1) {
3086 		if (word & 0x01)
3087 			break;
3088 	}
3089 
3090 	return (n);
3091 }
3092 
3093 
3094 /*
3095  * NAME:	cntlz(u32 value)
3096  *
3097  * FUNCTION:	determine the number of leading zeros within a 32-bit
3098  *		value.
3099  *
3100  * PARAMETERS:
3101  *	value	-  32-bit value to be examined.
3102  *
3103  * RETURN VALUES:
3104  *	count of leading zeros
3105  */
cntlz(u32 value)3106 static int cntlz(u32 value)
3107 {
3108 	int n;
3109 
3110 	for (n = 0; n < 32; n++, value <<= 1) {
3111 		if (value & HIGHORDER)
3112 			break;
3113 	}
3114 	return (n);
3115 }
3116 
3117 
3118 /*
3119  * NAME:	blkstol2(s64 nb)
3120  *
3121  * FUNCTION:	convert a block count to its log2 value. if the block
3122  *		count is not a l2 multiple, it is rounded up to the next
3123  *		larger l2 multiple.
3124  *
3125  * PARAMETERS:
3126  *	nb	-  number of blocks
3127  *
3128  * RETURN VALUES:
3129  *	log2 number of blocks
3130  */
blkstol2(s64 nb)3131 static int blkstol2(s64 nb)
3132 {
3133 	int l2nb;
3134 	s64 mask;		/* meant to be signed */
3135 
3136 	mask = (s64) 1 << (64 - 1);
3137 
3138 	/* count the leading bits.
3139 	 */
3140 	for (l2nb = 0; l2nb < 64; l2nb++, mask >>= 1) {
3141 		/* leading bit found.
3142 		 */
3143 		if (nb & mask) {
3144 			/* determine the l2 value.
3145 			 */
3146 			l2nb = (64 - 1) - l2nb;
3147 
3148 			/* check if we need to round up.
3149 			 */
3150 			if (~mask & nb)
3151 				l2nb++;
3152 
3153 			return (l2nb);
3154 		}
3155 	}
3156 	assert(0);
3157 	return 0;		/* fix compiler warning */
3158 }
3159 
3160 
3161 /*
3162  * NAME:	dbAllocBottomUp()
3163  *
3164  * FUNCTION:	alloc the specified block range from the working block
3165  *		allocation map.
3166  *
3167  *		the blocks will be alloc from the working map one dmap
3168  *		at a time.
3169  *
3170  * PARAMETERS:
3171  *	ip	-  pointer to in-core inode;
3172  *	blkno	-  starting block number to be freed.
3173  *	nblocks	-  number of blocks to be freed.
3174  *
3175  * RETURN VALUES:
3176  *	0	- success
3177  *	-EIO	- i/o error
3178  */
dbAllocBottomUp(struct inode * ip,s64 blkno,s64 nblocks)3179 int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks)
3180 {
3181 	struct metapage *mp;
3182 	struct dmap *dp;
3183 	int nb, rc;
3184 	s64 lblkno, rem;
3185 	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
3186 	struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
3187 
3188 	IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
3189 
3190 	/* block to be allocated better be within the mapsize. */
3191 	ASSERT(nblocks <= bmp->db_mapsize - blkno);
3192 
3193 	/*
3194 	 * allocate the blocks a dmap at a time.
3195 	 */
3196 	mp = NULL;
3197 	for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) {
3198 		/* release previous dmap if any */
3199 		if (mp) {
3200 			write_metapage(mp);
3201 		}
3202 
3203 		/* get the buffer for the current dmap. */
3204 		lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
3205 		mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
3206 		if (mp == NULL) {
3207 			IREAD_UNLOCK(ipbmap);
3208 			return -EIO;
3209 		}
3210 		dp = (struct dmap *) mp->data;
3211 
3212 		/* determine the number of blocks to be allocated from
3213 		 * this dmap.
3214 		 */
3215 		nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
3216 
3217 		/* allocate the blocks. */
3218 		if ((rc = dbAllocDmapBU(bmp, dp, blkno, nb))) {
3219 			release_metapage(mp);
3220 			IREAD_UNLOCK(ipbmap);
3221 			return (rc);
3222 		}
3223 	}
3224 
3225 	/* write the last buffer. */
3226 	write_metapage(mp);
3227 
3228 	IREAD_UNLOCK(ipbmap);
3229 
3230 	return (0);
3231 }
3232 
3233 
dbAllocDmapBU(struct bmap * bmp,struct dmap * dp,s64 blkno,int nblocks)3234 static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
3235 			 int nblocks)
3236 {
3237 	int rc;
3238 	int dbitno, word, rembits, nb, nwords, wbitno, agno;
3239 	s8 oldroot;
3240 	struct dmaptree *tp = (struct dmaptree *) & dp->tree;
3241 
3242 	/* save the current value of the root (i.e. maximum free string)
3243 	 * of the dmap tree.
3244 	 */
3245 	oldroot = tp->stree[ROOT];
3246 
3247 	/* determine the bit number and word within the dmap of the
3248 	 * starting block.
3249 	 */
3250 	dbitno = blkno & (BPERDMAP - 1);
3251 	word = dbitno >> L2DBWORD;
3252 
3253 	/* block range better be within the dmap */
3254 	assert(dbitno + nblocks <= BPERDMAP);
3255 
3256 	/* allocate the bits of the dmap's words corresponding to the block
3257 	 * range. not all bits of the first and last words may be contained
3258 	 * within the block range.  if this is the case, we'll work against
3259 	 * those words (i.e. partial first and/or last) on an individual basis
3260 	 * (a single pass), allocating the bits of interest by hand and
3261 	 * updating the leaf corresponding to the dmap word. a single pass
3262 	 * will be used for all dmap words fully contained within the
3263 	 * specified range.  within this pass, the bits of all fully contained
3264 	 * dmap words will be marked as free in a single shot and the leaves
3265 	 * will be updated. a single leaf may describe the free space of
3266 	 * multiple dmap words, so we may update only a subset of the actual
3267 	 * leaves corresponding to the dmap words of the block range.
3268 	 */
3269 	for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
3270 		/* determine the bit number within the word and
3271 		 * the number of bits within the word.
3272 		 */
3273 		wbitno = dbitno & (DBWORD - 1);
3274 		nb = min(rembits, DBWORD - wbitno);
3275 
3276 		/* check if only part of a word is to be allocated.
3277 		 */
3278 		if (nb < DBWORD) {
3279 			/* allocate (set to 1) the appropriate bits within
3280 			 * this dmap word.
3281 			 */
3282 			dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb)
3283 						      >> wbitno);
3284 
3285 			word++;
3286 		} else {
3287 			/* one or more dmap words are fully contained
3288 			 * within the block range.  determine how many
3289 			 * words and allocate (set to 1) the bits of these
3290 			 * words.
3291 			 */
3292 			nwords = rembits >> L2DBWORD;
3293 			memset(&dp->wmap[word], (int) ONES, nwords * 4);
3294 
3295 			/* determine how many bits */
3296 			nb = nwords << L2DBWORD;
3297 			word += nwords;
3298 		}
3299 	}
3300 
3301 	/* update the free count for this dmap */
3302 	le32_add_cpu(&dp->nfree, -nblocks);
3303 
3304 	/* reconstruct summary tree */
3305 	dbInitDmapTree(dp);
3306 
3307 	BMAP_LOCK(bmp);
3308 
3309 	/* if this allocation group is completely free,
3310 	 * update the highest active allocation group number
3311 	 * if this allocation group is the new max.
3312 	 */
3313 	agno = blkno >> bmp->db_agl2size;
3314 	if (agno > bmp->db_maxag)
3315 		bmp->db_maxag = agno;
3316 
3317 	/* update the free count for the allocation group and map */
3318 	bmp->db_agfree[agno] -= nblocks;
3319 	bmp->db_nfree -= nblocks;
3320 
3321 	BMAP_UNLOCK(bmp);
3322 
3323 	/* if the root has not changed, done. */
3324 	if (tp->stree[ROOT] == oldroot)
3325 		return (0);
3326 
3327 	/* root changed. bubble the change up to the dmap control pages.
3328 	 * if the adjustment of the upper level control pages fails,
3329 	 * backout the bit allocation (thus making everything consistent).
3330 	 */
3331 	if ((rc = dbAdjCtl(bmp, blkno, tp->stree[ROOT], 1, 0)))
3332 		dbFreeBits(bmp, dp, blkno, nblocks);
3333 
3334 	return (rc);
3335 }
3336 
3337 
3338 /*
3339  * NAME:	dbExtendFS()
3340  *
3341  * FUNCTION:	extend bmap from blkno for nblocks;
3342  *		dbExtendFS() updates bmap ready for dbAllocBottomUp();
3343  *
3344  * L2
3345  *  |
3346  *   L1---------------------------------L1
3347  *    |					 |
3348  *     L0---------L0---------L0		  L0---------L0---------L0
3349  *      |	   |	      |		   |	      |		 |
3350  *	 d0,...,dn  d0,...,dn  d0,...,dn    d0,...,dn  d0,...,dn  d0,.,dm;
3351  * L2L1L0d0,...,dnL0d0,...,dnL0d0,...,dnL1L0d0,...,dnL0d0,...,dnL0d0,..dm
3352  *
3353  * <---old---><----------------------------extend----------------------->
3354  */
dbExtendFS(struct inode * ipbmap,s64 blkno,s64 nblocks)3355 int dbExtendFS(struct inode *ipbmap, s64 blkno,	s64 nblocks)
3356 {
3357 	struct jfs_sb_info *sbi = JFS_SBI(ipbmap->i_sb);
3358 	int nbperpage = sbi->nbperpage;
3359 	int i, i0 = true, j, j0 = true, k, n;
3360 	s64 newsize;
3361 	s64 p;
3362 	struct metapage *mp, *l2mp, *l1mp = NULL, *l0mp = NULL;
3363 	struct dmapctl *l2dcp, *l1dcp, *l0dcp;
3364 	struct dmap *dp;
3365 	s8 *l0leaf, *l1leaf, *l2leaf;
3366 	struct bmap *bmp = sbi->bmap;
3367 	int agno, l2agsize, oldl2agsize;
3368 	s64 ag_rem;
3369 
3370 	newsize = blkno + nblocks;
3371 
3372 	jfs_info("dbExtendFS: blkno:%Ld nblocks:%Ld newsize:%Ld",
3373 		 (long long) blkno, (long long) nblocks, (long long) newsize);
3374 
3375 	/*
3376 	 *	initialize bmap control page.
3377 	 *
3378 	 * all the data in bmap control page should exclude
3379 	 * the mkfs hidden dmap page.
3380 	 */
3381 
3382 	/* update mapsize */
3383 	bmp->db_mapsize = newsize;
3384 	bmp->db_maxlevel = BMAPSZTOLEV(bmp->db_mapsize);
3385 
3386 	/* compute new AG size */
3387 	l2agsize = dbGetL2AGSize(newsize);
3388 	oldl2agsize = bmp->db_agl2size;
3389 
3390 	bmp->db_agl2size = l2agsize;
3391 	bmp->db_agsize = (s64)1 << l2agsize;
3392 
3393 	/* compute new number of AG */
3394 	agno = bmp->db_numag;
3395 	bmp->db_numag = newsize >> l2agsize;
3396 	bmp->db_numag += ((u32) newsize % (u32) bmp->db_agsize) ? 1 : 0;
3397 
3398 	/*
3399 	 *	reconfigure db_agfree[]
3400 	 * from old AG configuration to new AG configuration;
3401 	 *
3402 	 * coalesce contiguous k (newAGSize/oldAGSize) AGs;
3403 	 * i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn;
3404 	 * note: new AG size = old AG size * (2**x).
3405 	 */
3406 	if (l2agsize == oldl2agsize)
3407 		goto extend;
3408 	k = 1 << (l2agsize - oldl2agsize);
3409 	ag_rem = bmp->db_agfree[0];	/* save agfree[0] */
3410 	for (i = 0, n = 0; i < agno; n++) {
3411 		bmp->db_agfree[n] = 0;	/* init collection point */
3412 
3413 		/* coalesce contiguous k AGs; */
3414 		for (j = 0; j < k && i < agno; j++, i++) {
3415 			/* merge AGi to AGn */
3416 			bmp->db_agfree[n] += bmp->db_agfree[i];
3417 		}
3418 	}
3419 	bmp->db_agfree[0] += ag_rem;	/* restore agfree[0] */
3420 
3421 	for (; n < MAXAG; n++)
3422 		bmp->db_agfree[n] = 0;
3423 
3424 	/*
3425 	 * update highest active ag number
3426 	 */
3427 
3428 	bmp->db_maxag = bmp->db_maxag / k;
3429 
3430 	/*
3431 	 *	extend bmap
3432 	 *
3433 	 * update bit maps and corresponding level control pages;
3434 	 * global control page db_nfree, db_agfree[agno], db_maxfreebud;
3435 	 */
3436       extend:
3437 	/* get L2 page */
3438 	p = BMAPBLKNO + nbperpage;	/* L2 page */
3439 	l2mp = read_metapage(ipbmap, p, PSIZE, 0);
3440 	if (!l2mp) {
3441 		jfs_error(ipbmap->i_sb, "L2 page could not be read\n");
3442 		return -EIO;
3443 	}
3444 	l2dcp = (struct dmapctl *) l2mp->data;
3445 
3446 	/* compute start L1 */
3447 	k = blkno >> L2MAXL1SIZE;
3448 	l2leaf = l2dcp->stree + CTLLEAFIND + k;
3449 	p = BLKTOL1(blkno, sbi->l2nbperpage);	/* L1 page */
3450 
3451 	/*
3452 	 * extend each L1 in L2
3453 	 */
3454 	for (; k < LPERCTL; k++, p += nbperpage) {
3455 		/* get L1 page */
3456 		if (j0) {
3457 			/* read in L1 page: (blkno & (MAXL1SIZE - 1)) */
3458 			l1mp = read_metapage(ipbmap, p, PSIZE, 0);
3459 			if (l1mp == NULL)
3460 				goto errout;
3461 			l1dcp = (struct dmapctl *) l1mp->data;
3462 
3463 			/* compute start L0 */
3464 			j = (blkno & (MAXL1SIZE - 1)) >> L2MAXL0SIZE;
3465 			l1leaf = l1dcp->stree + CTLLEAFIND + j;
3466 			p = BLKTOL0(blkno, sbi->l2nbperpage);
3467 			j0 = false;
3468 		} else {
3469 			/* assign/init L1 page */
3470 			l1mp = get_metapage(ipbmap, p, PSIZE, 0);
3471 			if (l1mp == NULL)
3472 				goto errout;
3473 
3474 			l1dcp = (struct dmapctl *) l1mp->data;
3475 
3476 			/* compute start L0 */
3477 			j = 0;
3478 			l1leaf = l1dcp->stree + CTLLEAFIND;
3479 			p += nbperpage;	/* 1st L0 of L1.k */
3480 		}
3481 
3482 		/*
3483 		 * extend each L0 in L1
3484 		 */
3485 		for (; j < LPERCTL; j++) {
3486 			/* get L0 page */
3487 			if (i0) {
3488 				/* read in L0 page: (blkno & (MAXL0SIZE - 1)) */
3489 
3490 				l0mp = read_metapage(ipbmap, p, PSIZE, 0);
3491 				if (l0mp == NULL)
3492 					goto errout;
3493 				l0dcp = (struct dmapctl *) l0mp->data;
3494 
3495 				/* compute start dmap */
3496 				i = (blkno & (MAXL0SIZE - 1)) >>
3497 				    L2BPERDMAP;
3498 				l0leaf = l0dcp->stree + CTLLEAFIND + i;
3499 				p = BLKTODMAP(blkno,
3500 					      sbi->l2nbperpage);
3501 				i0 = false;
3502 			} else {
3503 				/* assign/init L0 page */
3504 				l0mp = get_metapage(ipbmap, p, PSIZE, 0);
3505 				if (l0mp == NULL)
3506 					goto errout;
3507 
3508 				l0dcp = (struct dmapctl *) l0mp->data;
3509 
3510 				/* compute start dmap */
3511 				i = 0;
3512 				l0leaf = l0dcp->stree + CTLLEAFIND;
3513 				p += nbperpage;	/* 1st dmap of L0.j */
3514 			}
3515 
3516 			/*
3517 			 * extend each dmap in L0
3518 			 */
3519 			for (; i < LPERCTL; i++) {
3520 				/*
3521 				 * reconstruct the dmap page, and
3522 				 * initialize corresponding parent L0 leaf
3523 				 */
3524 				if ((n = blkno & (BPERDMAP - 1))) {
3525 					/* read in dmap page: */
3526 					mp = read_metapage(ipbmap, p,
3527 							   PSIZE, 0);
3528 					if (mp == NULL)
3529 						goto errout;
3530 					n = min(nblocks, (s64)BPERDMAP - n);
3531 				} else {
3532 					/* assign/init dmap page */
3533 					mp = read_metapage(ipbmap, p,
3534 							   PSIZE, 0);
3535 					if (mp == NULL)
3536 						goto errout;
3537 
3538 					n = min_t(s64, nblocks, BPERDMAP);
3539 				}
3540 
3541 				dp = (struct dmap *) mp->data;
3542 				*l0leaf = dbInitDmap(dp, blkno, n);
3543 
3544 				bmp->db_nfree += n;
3545 				agno = le64_to_cpu(dp->start) >> l2agsize;
3546 				bmp->db_agfree[agno] += n;
3547 
3548 				write_metapage(mp);
3549 
3550 				l0leaf++;
3551 				p += nbperpage;
3552 
3553 				blkno += n;
3554 				nblocks -= n;
3555 				if (nblocks == 0)
3556 					break;
3557 			}	/* for each dmap in a L0 */
3558 
3559 			/*
3560 			 * build current L0 page from its leaves, and
3561 			 * initialize corresponding parent L1 leaf
3562 			 */
3563 			*l1leaf = dbInitDmapCtl(l0dcp, 0, ++i);
3564 			write_metapage(l0mp);
3565 			l0mp = NULL;
3566 
3567 			if (nblocks)
3568 				l1leaf++;	/* continue for next L0 */
3569 			else {
3570 				/* more than 1 L0 ? */
3571 				if (j > 0)
3572 					break;	/* build L1 page */
3573 				else {
3574 					/* summarize in global bmap page */
3575 					bmp->db_maxfreebud = *l1leaf;
3576 					release_metapage(l1mp);
3577 					release_metapage(l2mp);
3578 					goto finalize;
3579 				}
3580 			}
3581 		}		/* for each L0 in a L1 */
3582 
3583 		/*
3584 		 * build current L1 page from its leaves, and
3585 		 * initialize corresponding parent L2 leaf
3586 		 */
3587 		*l2leaf = dbInitDmapCtl(l1dcp, 1, ++j);
3588 		write_metapage(l1mp);
3589 		l1mp = NULL;
3590 
3591 		if (nblocks)
3592 			l2leaf++;	/* continue for next L1 */
3593 		else {
3594 			/* more than 1 L1 ? */
3595 			if (k > 0)
3596 				break;	/* build L2 page */
3597 			else {
3598 				/* summarize in global bmap page */
3599 				bmp->db_maxfreebud = *l2leaf;
3600 				release_metapage(l2mp);
3601 				goto finalize;
3602 			}
3603 		}
3604 	}			/* for each L1 in a L2 */
3605 
3606 	jfs_error(ipbmap->i_sb, "function has not returned as expected\n");
3607 errout:
3608 	if (l0mp)
3609 		release_metapage(l0mp);
3610 	if (l1mp)
3611 		release_metapage(l1mp);
3612 	release_metapage(l2mp);
3613 	return -EIO;
3614 
3615 	/*
3616 	 *	finalize bmap control page
3617 	 */
3618 finalize:
3619 
3620 	return 0;
3621 }
3622 
3623 
3624 /*
3625  *	dbFinalizeBmap()
3626  */
dbFinalizeBmap(struct inode * ipbmap)3627 void dbFinalizeBmap(struct inode *ipbmap)
3628 {
3629 	struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
3630 	int actags, inactags, l2nl;
3631 	s64 ag_rem, actfree, inactfree, avgfree;
3632 	int i, n;
3633 
3634 	/*
3635 	 *	finalize bmap control page
3636 	 */
3637 //finalize:
3638 	/*
3639 	 * compute db_agpref: preferred ag to allocate from
3640 	 * (the leftmost ag with average free space in it);
3641 	 */
3642 //agpref:
3643 	/* get the number of active ags and inactive ags */
3644 	actags = bmp->db_maxag + 1;
3645 	inactags = bmp->db_numag - actags;
3646 	ag_rem = bmp->db_mapsize & (bmp->db_agsize - 1);	/* ??? */
3647 
3648 	/* determine how many blocks are in the inactive allocation
3649 	 * groups. in doing this, we must account for the fact that
3650 	 * the rightmost group might be a partial group (i.e. file
3651 	 * system size is not a multiple of the group size).
3652 	 */
3653 	inactfree = (inactags && ag_rem) ?
3654 	    (((s64)inactags - 1) << bmp->db_agl2size) + ag_rem
3655 	    : ((s64)inactags << bmp->db_agl2size);
3656 
3657 	/* determine how many free blocks are in the active
3658 	 * allocation groups plus the average number of free blocks
3659 	 * within the active ags.
3660 	 */
3661 	actfree = bmp->db_nfree - inactfree;
3662 	avgfree = (u32) actfree / (u32) actags;
3663 
3664 	/* if the preferred allocation group has not average free space.
3665 	 * re-establish the preferred group as the leftmost
3666 	 * group with average free space.
3667 	 */
3668 	if (bmp->db_agfree[bmp->db_agpref] < avgfree) {
3669 		for (bmp->db_agpref = 0; bmp->db_agpref < actags;
3670 		     bmp->db_agpref++) {
3671 			if (bmp->db_agfree[bmp->db_agpref] >= avgfree)
3672 				break;
3673 		}
3674 		if (bmp->db_agpref >= bmp->db_numag) {
3675 			jfs_error(ipbmap->i_sb,
3676 				  "cannot find ag with average freespace\n");
3677 		}
3678 	}
3679 
3680 	/*
3681 	 * compute db_aglevel, db_agheight, db_width, db_agstart:
3682 	 * an ag is covered in aglevel dmapctl summary tree,
3683 	 * at agheight level height (from leaf) with agwidth number of nodes
3684 	 * each, which starts at agstart index node of the smmary tree node
3685 	 * array;
3686 	 */
3687 	bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize);
3688 	l2nl =
3689 	    bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL);
3690 	bmp->db_agheight = l2nl >> 1;
3691 	bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheight << 1));
3692 	for (i = 5 - bmp->db_agheight, bmp->db_agstart = 0, n = 1; i > 0;
3693 	     i--) {
3694 		bmp->db_agstart += n;
3695 		n <<= 2;
3696 	}
3697 
3698 }
3699 
3700 
3701 /*
3702  * NAME:	dbInitDmap()/ujfs_idmap_page()
3703  *
3704  * FUNCTION:	initialize working/persistent bitmap of the dmap page
3705  *		for the specified number of blocks:
3706  *
3707  *		at entry, the bitmaps had been initialized as free (ZEROS);
3708  *		The number of blocks will only account for the actually
3709  *		existing blocks. Blocks which don't actually exist in
3710  *		the aggregate will be marked as allocated (ONES);
3711  *
3712  * PARAMETERS:
3713  *	dp	- pointer to page of map
3714  *	nblocks	- number of blocks this page
3715  *
3716  * RETURNS: NONE
3717  */
dbInitDmap(struct dmap * dp,s64 Blkno,int nblocks)3718 static int dbInitDmap(struct dmap * dp, s64 Blkno, int nblocks)
3719 {
3720 	int blkno, w, b, r, nw, nb, i;
3721 
3722 	/* starting block number within the dmap */
3723 	blkno = Blkno & (BPERDMAP - 1);
3724 
3725 	if (blkno == 0) {
3726 		dp->nblocks = dp->nfree = cpu_to_le32(nblocks);
3727 		dp->start = cpu_to_le64(Blkno);
3728 
3729 		if (nblocks == BPERDMAP) {
3730 			memset(&dp->wmap[0], 0, LPERDMAP * 4);
3731 			memset(&dp->pmap[0], 0, LPERDMAP * 4);
3732 			goto initTree;
3733 		}
3734 	} else {
3735 		le32_add_cpu(&dp->nblocks, nblocks);
3736 		le32_add_cpu(&dp->nfree, nblocks);
3737 	}
3738 
3739 	/* word number containing start block number */
3740 	w = blkno >> L2DBWORD;
3741 
3742 	/*
3743 	 * free the bits corresponding to the block range (ZEROS):
3744 	 * note: not all bits of the first and last words may be contained
3745 	 * within the block range.
3746 	 */
3747 	for (r = nblocks; r > 0; r -= nb, blkno += nb) {
3748 		/* number of bits preceding range to be freed in the word */
3749 		b = blkno & (DBWORD - 1);
3750 		/* number of bits to free in the word */
3751 		nb = min(r, DBWORD - b);
3752 
3753 		/* is partial word to be freed ? */
3754 		if (nb < DBWORD) {
3755 			/* free (set to 0) from the bitmap word */
3756 			dp->wmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb)
3757 						     >> b));
3758 			dp->pmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb)
3759 						     >> b));
3760 
3761 			/* skip the word freed */
3762 			w++;
3763 		} else {
3764 			/* free (set to 0) contiguous bitmap words */
3765 			nw = r >> L2DBWORD;
3766 			memset(&dp->wmap[w], 0, nw * 4);
3767 			memset(&dp->pmap[w], 0, nw * 4);
3768 
3769 			/* skip the words freed */
3770 			nb = nw << L2DBWORD;
3771 			w += nw;
3772 		}
3773 	}
3774 
3775 	/*
3776 	 * mark bits following the range to be freed (non-existing
3777 	 * blocks) as allocated (ONES)
3778 	 */
3779 
3780 	if (blkno == BPERDMAP)
3781 		goto initTree;
3782 
3783 	/* the first word beyond the end of existing blocks */
3784 	w = blkno >> L2DBWORD;
3785 
3786 	/* does nblocks fall on a 32-bit boundary ? */
3787 	b = blkno & (DBWORD - 1);
3788 	if (b) {
3789 		/* mark a partial word allocated */
3790 		dp->wmap[w] = dp->pmap[w] = cpu_to_le32(ONES >> b);
3791 		w++;
3792 	}
3793 
3794 	/* set the rest of the words in the page to allocated (ONES) */
3795 	for (i = w; i < LPERDMAP; i++)
3796 		dp->pmap[i] = dp->wmap[i] = cpu_to_le32(ONES);
3797 
3798 	/*
3799 	 * init tree
3800 	 */
3801       initTree:
3802 	return (dbInitDmapTree(dp));
3803 }
3804 
3805 
3806 /*
3807  * NAME:	dbInitDmapTree()/ujfs_complete_dmap()
3808  *
3809  * FUNCTION:	initialize summary tree of the specified dmap:
3810  *
3811  *		at entry, bitmap of the dmap has been initialized;
3812  *
3813  * PARAMETERS:
3814  *	dp	- dmap to complete
3815  *	blkno	- starting block number for this dmap
3816  *	treemax	- will be filled in with max free for this dmap
3817  *
3818  * RETURNS:	max free string at the root of the tree
3819  */
dbInitDmapTree(struct dmap * dp)3820 static int dbInitDmapTree(struct dmap * dp)
3821 {
3822 	struct dmaptree *tp;
3823 	s8 *cp;
3824 	int i;
3825 
3826 	/* init fixed info of tree */
3827 	tp = &dp->tree;
3828 	tp->nleafs = cpu_to_le32(LPERDMAP);
3829 	tp->l2nleafs = cpu_to_le32(L2LPERDMAP);
3830 	tp->leafidx = cpu_to_le32(LEAFIND);
3831 	tp->height = cpu_to_le32(4);
3832 	tp->budmin = BUDMIN;
3833 
3834 	/* init each leaf from corresponding wmap word:
3835 	 * note: leaf is set to NOFREE(-1) if all blocks of corresponding
3836 	 * bitmap word are allocated.
3837 	 */
3838 	cp = tp->stree + le32_to_cpu(tp->leafidx);
3839 	for (i = 0; i < LPERDMAP; i++)
3840 		*cp++ = dbMaxBud((u8 *) & dp->wmap[i]);
3841 
3842 	/* build the dmap's binary buddy summary tree */
3843 	return (dbInitTree(tp));
3844 }
3845 
3846 
3847 /*
3848  * NAME:	dbInitTree()/ujfs_adjtree()
3849  *
3850  * FUNCTION:	initialize binary buddy summary tree of a dmap or dmapctl.
3851  *
3852  *		at entry, the leaves of the tree has been initialized
3853  *		from corresponding bitmap word or root of summary tree
3854  *		of the child control page;
3855  *		configure binary buddy system at the leaf level, then
3856  *		bubble up the values of the leaf nodes up the tree.
3857  *
3858  * PARAMETERS:
3859  *	cp	- Pointer to the root of the tree
3860  *	l2leaves- Number of leaf nodes as a power of 2
3861  *	l2min	- Number of blocks that can be covered by a leaf
3862  *		  as a power of 2
3863  *
3864  * RETURNS: max free string at the root of the tree
3865  */
dbInitTree(struct dmaptree * dtp)3866 static int dbInitTree(struct dmaptree * dtp)
3867 {
3868 	int l2max, l2free, bsize, nextb, i;
3869 	int child, parent, nparent;
3870 	s8 *tp, *cp, *cp1;
3871 
3872 	tp = dtp->stree;
3873 
3874 	/* Determine the maximum free string possible for the leaves */
3875 	l2max = le32_to_cpu(dtp->l2nleafs) + dtp->budmin;
3876 
3877 	/*
3878 	 * configure the leaf level into binary buddy system
3879 	 *
3880 	 * Try to combine buddies starting with a buddy size of 1
3881 	 * (i.e. two leaves). At a buddy size of 1 two buddy leaves
3882 	 * can be combined if both buddies have a maximum free of l2min;
3883 	 * the combination will result in the left-most buddy leaf having
3884 	 * a maximum free of l2min+1.
3885 	 * After processing all buddies for a given size, process buddies
3886 	 * at the next higher buddy size (i.e. current size * 2) and
3887 	 * the next maximum free (current free + 1).
3888 	 * This continues until the maximum possible buddy combination
3889 	 * yields maximum free.
3890 	 */
3891 	for (l2free = dtp->budmin, bsize = 1; l2free < l2max;
3892 	     l2free++, bsize = nextb) {
3893 		/* get next buddy size == current buddy pair size */
3894 		nextb = bsize << 1;
3895 
3896 		/* scan each adjacent buddy pair at current buddy size */
3897 		for (i = 0, cp = tp + le32_to_cpu(dtp->leafidx);
3898 		     i < le32_to_cpu(dtp->nleafs);
3899 		     i += nextb, cp += nextb) {
3900 			/* coalesce if both adjacent buddies are max free */
3901 			if (*cp == l2free && *(cp + bsize) == l2free) {
3902 				*cp = l2free + 1;	/* left take right */
3903 				*(cp + bsize) = -1;	/* right give left */
3904 			}
3905 		}
3906 	}
3907 
3908 	/*
3909 	 * bubble summary information of leaves up the tree.
3910 	 *
3911 	 * Starting at the leaf node level, the four nodes described by
3912 	 * the higher level parent node are compared for a maximum free and
3913 	 * this maximum becomes the value of the parent node.
3914 	 * when all lower level nodes are processed in this fashion then
3915 	 * move up to the next level (parent becomes a lower level node) and
3916 	 * continue the process for that level.
3917 	 */
3918 	for (child = le32_to_cpu(dtp->leafidx),
3919 	     nparent = le32_to_cpu(dtp->nleafs) >> 2;
3920 	     nparent > 0; nparent >>= 2, child = parent) {
3921 		/* get index of 1st node of parent level */
3922 		parent = (child - 1) >> 2;
3923 
3924 		/* set the value of the parent node as the maximum
3925 		 * of the four nodes of the current level.
3926 		 */
3927 		for (i = 0, cp = tp + child, cp1 = tp + parent;
3928 		     i < nparent; i++, cp += 4, cp1++)
3929 			*cp1 = TREEMAX(cp);
3930 	}
3931 
3932 	return (*tp);
3933 }
3934 
3935 
3936 /*
3937  *	dbInitDmapCtl()
3938  *
3939  * function: initialize dmapctl page
3940  */
dbInitDmapCtl(struct dmapctl * dcp,int level,int i)3941 static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i)
3942 {				/* start leaf index not covered by range */
3943 	s8 *cp;
3944 
3945 	dcp->nleafs = cpu_to_le32(LPERCTL);
3946 	dcp->l2nleafs = cpu_to_le32(L2LPERCTL);
3947 	dcp->leafidx = cpu_to_le32(CTLLEAFIND);
3948 	dcp->height = cpu_to_le32(5);
3949 	dcp->budmin = L2BPERDMAP + L2LPERCTL * level;
3950 
3951 	/*
3952 	 * initialize the leaves of current level that were not covered
3953 	 * by the specified input block range (i.e. the leaves have no
3954 	 * low level dmapctl or dmap).
3955 	 */
3956 	cp = &dcp->stree[CTLLEAFIND + i];
3957 	for (; i < LPERCTL; i++)
3958 		*cp++ = NOFREE;
3959 
3960 	/* build the dmap's binary buddy summary tree */
3961 	return (dbInitTree((struct dmaptree *) dcp));
3962 }
3963 
3964 
3965 /*
3966  * NAME:	dbGetL2AGSize()/ujfs_getagl2size()
3967  *
3968  * FUNCTION:	Determine log2(allocation group size) from aggregate size
3969  *
3970  * PARAMETERS:
3971  *	nblocks	- Number of blocks in aggregate
3972  *
3973  * RETURNS: log2(allocation group size) in aggregate blocks
3974  */
dbGetL2AGSize(s64 nblocks)3975 static int dbGetL2AGSize(s64 nblocks)
3976 {
3977 	s64 sz;
3978 	s64 m;
3979 	int l2sz;
3980 
3981 	if (nblocks < BPERDMAP * MAXAG)
3982 		return (L2BPERDMAP);
3983 
3984 	/* round up aggregate size to power of 2 */
3985 	m = ((u64) 1 << (64 - 1));
3986 	for (l2sz = 64; l2sz >= 0; l2sz--, m >>= 1) {
3987 		if (m & nblocks)
3988 			break;
3989 	}
3990 
3991 	sz = (s64) 1 << l2sz;
3992 	if (sz < nblocks)
3993 		l2sz += 1;
3994 
3995 	/* agsize = roundupSize/max_number_of_ag */
3996 	return (l2sz - L2MAXAG);
3997 }
3998 
3999 
4000 /*
4001  * NAME:	dbMapFileSizeToMapSize()
4002  *
4003  * FUNCTION:	compute number of blocks the block allocation map file
4004  *		can cover from the map file size;
4005  *
4006  * RETURNS:	Number of blocks which can be covered by this block map file;
4007  */
4008 
4009 /*
4010  * maximum number of map pages at each level including control pages
4011  */
4012 #define MAXL0PAGES	(1 + LPERCTL)
4013 #define MAXL1PAGES	(1 + LPERCTL * MAXL0PAGES)
4014 
4015 /*
4016  * convert number of map pages to the zero origin top dmapctl level
4017  */
4018 #define BMAPPGTOLEV(npages)	\
4019 	(((npages) <= 3 + MAXL0PAGES) ? 0 : \
4020 	 ((npages) <= 2 + MAXL1PAGES) ? 1 : 2)
4021 
dbMapFileSizeToMapSize(struct inode * ipbmap)4022 s64 dbMapFileSizeToMapSize(struct inode * ipbmap)
4023 {
4024 	struct super_block *sb = ipbmap->i_sb;
4025 	s64 nblocks;
4026 	s64 npages, ndmaps;
4027 	int level, i;
4028 	int complete, factor;
4029 
4030 	nblocks = ipbmap->i_size >> JFS_SBI(sb)->l2bsize;
4031 	npages = nblocks >> JFS_SBI(sb)->l2nbperpage;
4032 	level = BMAPPGTOLEV(npages);
4033 
4034 	/* At each level, accumulate the number of dmap pages covered by
4035 	 * the number of full child levels below it;
4036 	 * repeat for the last incomplete child level.
4037 	 */
4038 	ndmaps = 0;
4039 	npages--;		/* skip the first global control page */
4040 	/* skip higher level control pages above top level covered by map */
4041 	npages -= (2 - level);
4042 	npages--;		/* skip top level's control page */
4043 	for (i = level; i >= 0; i--) {
4044 		factor =
4045 		    (i == 2) ? MAXL1PAGES : ((i == 1) ? MAXL0PAGES : 1);
4046 		complete = (u32) npages / factor;
4047 		ndmaps += complete * ((i == 2) ? LPERCTL * LPERCTL :
4048 				      ((i == 1) ? LPERCTL : 1));
4049 
4050 		/* pages in last/incomplete child */
4051 		npages = (u32) npages % factor;
4052 		/* skip incomplete child's level control page */
4053 		npages--;
4054 	}
4055 
4056 	/* convert the number of dmaps into the number of blocks
4057 	 * which can be covered by the dmaps;
4058 	 */
4059 	nblocks = ndmaps << L2BPERDMAP;
4060 
4061 	return (nblocks);
4062 }
4063