Lines Matching +full:block +full:- +full:size
4 /*-
5 * SPDX-License-Identifier: BSD-3-Clause
13 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
69 * Allocate a block in the file system.
71 * The size of the requested block is given, which must be some
74 * the following hierarchy is used to allocate a block:
75 * 1) allocate the requested block.
76 * 2) allocate a rotationally optimal block in the same cylinder.
77 * 3) allocate a block in the same cylinder group.
79 * available block is located.
80 * If no block preference is given the following hierarchy is used
81 * to allocate a block:
82 * 1) allocate a block in the cylinder group that contains the
85 * available block is located.
88 ffs_alloc(struct inode *ip, daddr_t lbn __unused, daddr_t bpref, int size, in ffs_alloc() argument
91 struct fs *fs = ip->i_fs; in ffs_alloc()
96 if (size > fs->fs_bsize || fragoff(fs, size) != 0) { in ffs_alloc()
97 errx(1, "ffs_alloc: bad size: bsize %d size %d", in ffs_alloc()
98 fs->fs_bsize, size); in ffs_alloc()
100 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) in ffs_alloc()
102 if (bpref >= fs->fs_size) in ffs_alloc()
105 cg = ino_to_cg(fs, ip->i_number); in ffs_alloc()
108 bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg); in ffs_alloc()
110 if (ip->i_fs->fs_magic == FS_UFS1_MAGIC) in ffs_alloc()
111 ip->i_ffs1_blocks += size / DEV_BSIZE; in ffs_alloc()
113 ip->i_ffs2_blocks += size / DEV_BSIZE; in ffs_alloc()
122 * Select the desired position for the next block in a file. The file is
127 * request a block in the same cylinder group as the inode that describes
135 * of free blocks is found. If the allocation is for the first block in an
136 * indirect block, the information on the previous allocation is unavailable;
137 * here a best guess is made based upon the logical block number being
155 fs = ip->i_fs; in ffs_blkpref_ufs1()
156 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { in ffs_blkpref_ufs1()
158 cg = ino_to_cg(fs, ip->i_number); in ffs_blkpref_ufs1()
159 return (fs->fs_fpg * cg + fs->fs_frag); in ffs_blkpref_ufs1()
165 if (indx == 0 || bap[indx - 1] == 0) in ffs_blkpref_ufs1()
167 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; in ffs_blkpref_ufs1()
170 ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1); in ffs_blkpref_ufs1()
171 startcg %= fs->fs_ncg; in ffs_blkpref_ufs1()
172 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; in ffs_blkpref_ufs1()
173 for (cg = startcg; cg < fs->fs_ncg; cg++) in ffs_blkpref_ufs1()
174 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) in ffs_blkpref_ufs1()
175 return (fs->fs_fpg * cg + fs->fs_frag); in ffs_blkpref_ufs1()
177 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) in ffs_blkpref_ufs1()
178 return (fs->fs_fpg * cg + fs->fs_frag); in ffs_blkpref_ufs1()
184 return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag; in ffs_blkpref_ufs1()
194 fs = ip->i_fs; in ffs_blkpref_ufs2()
195 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { in ffs_blkpref_ufs2()
197 cg = ino_to_cg(fs, ip->i_number); in ffs_blkpref_ufs2()
198 return (fs->fs_fpg * cg + fs->fs_frag); in ffs_blkpref_ufs2()
204 if (indx == 0 || bap[indx - 1] == 0) in ffs_blkpref_ufs2()
206 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; in ffs_blkpref_ufs2()
209 ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1); in ffs_blkpref_ufs2()
210 startcg %= fs->fs_ncg; in ffs_blkpref_ufs2()
211 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; in ffs_blkpref_ufs2()
212 for (cg = startcg; cg < fs->fs_ncg; cg++) in ffs_blkpref_ufs2()
213 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { in ffs_blkpref_ufs2()
214 return (fs->fs_fpg * cg + fs->fs_frag); in ffs_blkpref_ufs2()
217 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { in ffs_blkpref_ufs2()
218 return (fs->fs_fpg * cg + fs->fs_frag); in ffs_blkpref_ufs2()
225 return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag; in ffs_blkpref_ufs2()
232 * 1) allocate the block in its requested cylinder group.
234 * 3) brute force search for a free block.
236 * `size': size for data blocks, mode for inodes
240 ffs_hashalloc(struct inode *ip, u_int cg, daddr_t pref, int size, in ffs_hashalloc() argument
247 fs = ip->i_fs; in ffs_hashalloc()
251 result = (*allocator)(ip, cg, pref, size); in ffs_hashalloc()
257 for (i = 1; i < fs->fs_ncg; i *= 2) { in ffs_hashalloc()
259 if (cg >= fs->fs_ncg) in ffs_hashalloc()
260 cg -= fs->fs_ncg; in ffs_hashalloc()
261 result = (*allocator)(ip, cg, 0, size); in ffs_hashalloc()
270 cg = (icg + 2) % fs->fs_ncg; in ffs_hashalloc()
271 for (i = 2; i < fs->fs_ncg; i++) { in ffs_hashalloc()
272 result = (*allocator)(ip, cg, 0, size); in ffs_hashalloc()
276 if (cg == fs->fs_ncg) in ffs_hashalloc()
283 * Determine whether a block can be allocated.
285 * Check to see if a block of the appropriate size is available,
289 ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size) in ffs_alloccg() argument
295 struct fs *fs = ip->i_fs; in ffs_alloccg()
298 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) in ffs_alloccg()
300 error = bread((void *)ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), in ffs_alloccg()
301 (int)fs->fs_cgsize, NULL, &bp); in ffs_alloccg()
305 cgp = (struct cg *)bp->b_data; in ffs_alloccg()
307 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { in ffs_alloccg()
311 if (size == fs->fs_bsize) { in ffs_alloccg()
318 * allocsiz is the size which will be allocated, hacking in ffs_alloccg()
319 * it down to a smaller size if necessary in ffs_alloccg()
321 frags = numfrags(fs, size); in ffs_alloccg()
322 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) in ffs_alloccg()
323 if (cgp->cg_frsum[allocsiz] != 0) in ffs_alloccg()
325 if (allocsiz == fs->fs_frag) { in ffs_alloccg()
327 * no fragments were available, so a block will be in ffs_alloccg()
330 if (cgp->cg_cs.cs_nbfree == 0) { in ffs_alloccg()
336 for (i = frags; i < fs->fs_frag; i++) in ffs_alloccg()
338 i = fs->fs_frag - frags; in ffs_alloccg()
339 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); in ffs_alloccg()
340 fs->fs_cstotal.cs_nffree += i; in ffs_alloccg()
341 fs->fs_cs(fs, cg).cs_nffree += i; in ffs_alloccg()
342 fs->fs_fmod = 1; in ffs_alloccg()
343 ufs_add32(cgp->cg_frsum[i], 1, needswap); in ffs_alloccg()
350 ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap); in ffs_alloccg()
351 fs->fs_cstotal.cs_nffree -= frags; in ffs_alloccg()
352 fs->fs_cs(fs, cg).cs_nffree -= frags; in ffs_alloccg()
353 fs->fs_fmod = 1; in ffs_alloccg()
354 ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap); in ffs_alloccg()
356 ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap); in ffs_alloccg()
357 blkno = cg * fs->fs_fpg + bno; in ffs_alloccg()
363 * Allocate a block in a cylinder group.
366 * 1) allocate the requested block.
367 * 2) allocate a rotationally optimal block in the same cylinder.
368 * 3) allocate the next available block on the block rotor for the
379 struct fs *fs = ip->i_fs; in ffs_alloccgblk()
383 cgp = (struct cg *)bp->b_data; in ffs_alloccgblk()
385 if (bpref == 0 || (uint32_t)dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) { in ffs_alloccgblk()
386 bpref = ufs_rw32(cgp->cg_rotor, needswap); in ffs_alloccgblk()
391 * if the requested block is available, use it in ffs_alloccgblk()
399 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); in ffs_alloccgblk()
402 cgp->cg_rotor = ufs_rw32(bno, needswap); in ffs_alloccgblk()
406 ffs_clusteracct(fs, cgp, blkno, -1); in ffs_alloccgblk()
407 ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap); in ffs_alloccgblk()
408 fs->fs_cstotal.cs_nbfree--; in ffs_alloccgblk()
409 fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--; in ffs_alloccgblk()
410 fs->fs_fmod = 1; in ffs_alloccgblk()
411 blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno; in ffs_alloccgblk()
416 * Free a block or fragment.
418 * The specified block or fragment is placed back in the
420 * block reassembly is checked.
423 ffs_blkfree(struct inode *ip, daddr_t bno, long size) in ffs_blkfree() argument
429 struct fs *fs = ip->i_fs; in ffs_blkfree()
432 if (size > fs->fs_bsize || fragoff(fs, size) != 0 || in ffs_blkfree()
433 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { in ffs_blkfree()
434 errx(1, "blkfree: bad size: bno %lld bsize %d size %ld", in ffs_blkfree()
435 (long long)bno, fs->fs_bsize, size); in ffs_blkfree()
438 if (bno >= fs->fs_size) { in ffs_blkfree()
439 warnx("bad block %lld, ino %ju", (long long)bno, in ffs_blkfree()
440 (uintmax_t)ip->i_number); in ffs_blkfree()
443 error = bread((void *)ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), in ffs_blkfree()
444 (int)fs->fs_cgsize, NULL, &bp); in ffs_blkfree()
448 cgp = (struct cg *)bp->b_data; in ffs_blkfree()
454 if (size == fs->fs_bsize) { in ffs_blkfree()
457 errx(1, "blkfree: freeing free block %lld", in ffs_blkfree()
462 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); in ffs_blkfree()
463 fs->fs_cstotal.cs_nbfree++; in ffs_blkfree()
464 fs->fs_cs(fs, cg).cs_nbfree++; in ffs_blkfree()
466 bbase = cgbno - fragnum(fs, cgbno); in ffs_blkfree()
471 ffs_fragacct_swap(fs, blk, cgp->cg_frsum, -1, needswap); in ffs_blkfree()
475 frags = numfrags(fs, size); in ffs_blkfree()
478 errx(1, "blkfree: freeing free frag: block %lld", in ffs_blkfree()
483 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); in ffs_blkfree()
484 fs->fs_cstotal.cs_nffree += i; in ffs_blkfree()
485 fs->fs_cs(fs, cg).cs_nffree += i; in ffs_blkfree()
490 ffs_fragacct_swap(fs, blk, cgp->cg_frsum, 1, needswap); in ffs_blkfree()
492 * if a complete block has been reassembled, account for it in ffs_blkfree()
496 ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap); in ffs_blkfree()
497 fs->fs_cstotal.cs_nffree -= fs->fs_frag; in ffs_blkfree()
498 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; in ffs_blkfree()
500 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); in ffs_blkfree()
501 fs->fs_cstotal.cs_nbfree++; in ffs_blkfree()
502 fs->fs_cs(fs, cg).cs_nbfree++; in ffs_blkfree()
505 fs->fs_fmod = 1; in ffs_blkfree()
511 scanc(u_int size, const u_char *cp, const u_char table[], int mask) in scanc() argument
513 const u_char *end = &cp[size]; in scanc()
517 return (end - cp); in scanc()
521 * Find a block of the specified size in the specified cylinder group.
523 * It is a panic if a request is made to find a block if none are
536 * find the fragment by searching through the free block in ffs_mapsearch()
542 start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY; in ffs_mapsearch()
543 len = howmany(fs->fs_fpg, NBBY) - start; in ffs_mapsearch()
548 (const u_char *)fragtbl[fs->fs_frag], in ffs_mapsearch()
549 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); in ffs_mapsearch()
555 (const u_char *)fragtbl[fs->fs_frag], in ffs_mapsearch()
556 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); in ffs_mapsearch()
561 ufs_rw32(cgp->cg_freeoff, needswap), in ffs_mapsearch()
562 (long)cg_blksfree_swap(cgp, needswap) - (long)cgp); in ffs_mapsearch()
566 bno = (start + len - loc) * NBBY; in ffs_mapsearch()
567 cgp->cg_frotor = ufs_rw32(bno, needswap); in ffs_mapsearch()
572 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { in ffs_mapsearch()
577 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { in ffs_mapsearch()
584 errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno); in ffs_mapsearch()
585 return (-1); in ffs_mapsearch()
591 * Cnt == 1 means free; cnt == -1 means allocating.
602 if (fs->fs_contigsumsize <= 0) in ffs_clusteracct()
607 * Allocate or clear the actual block. in ffs_clusteracct()
614 * Find the size of the cluster going forward. in ffs_clusteracct()
617 end = start + fs->fs_contigsumsize; in ffs_clusteracct()
618 if ((unsigned)end >= ufs_rw32(cgp->cg_nclusterblks, needswap)) in ffs_clusteracct()
619 end = ufs_rw32(cgp->cg_nclusterblks, needswap); in ffs_clusteracct()
626 if ((i & (NBBY - 1)) != (NBBY - 1)) { in ffs_clusteracct()
633 forw = i - start; in ffs_clusteracct()
635 * Find the size of the cluster going backward. in ffs_clusteracct()
637 start = blkno - 1; in ffs_clusteracct()
638 end = start - fs->fs_contigsumsize; in ffs_clusteracct()
640 end = -1; in ffs_clusteracct()
642 map = *mapp--; in ffs_clusteracct()
644 for (i = start; i > end; i--) { in ffs_clusteracct()
647 if ((i & (NBBY - 1)) != 0) { in ffs_clusteracct()
650 map = *mapp--; in ffs_clusteracct()
651 bit = 1 << (NBBY - 1); in ffs_clusteracct()
654 back = start - i; in ffs_clusteracct()
660 if (i > fs->fs_contigsumsize) in ffs_clusteracct()
661 i = fs->fs_contigsumsize; in ffs_clusteracct()
664 ufs_add32(sump[back], -cnt, needswap); in ffs_clusteracct()
666 ufs_add32(sump[forw], -cnt, needswap); in ffs_clusteracct()
671 lp = &sump[fs->fs_contigsumsize]; in ffs_clusteracct()
672 for (i = fs->fs_contigsumsize; i > 0; i--) in ffs_clusteracct()
673 if (ufs_rw32(*lp--, needswap) > 0) in ffs_clusteracct()
675 fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i; in ffs_clusteracct()