xref: /linux/fs/hpfs/alloc.c (revision 498495dba268b20e8eadd7fe93c140c68b6cc9d2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/hpfs/alloc.c
4  *
5  *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
6  *
7  *  HPFS bitmap operations
8  */
9 
10 #include "hpfs_fn.h"
11 
hpfs_claim_alloc(struct super_block * s,secno sec)12 static void hpfs_claim_alloc(struct super_block *s, secno sec)
13 {
14 	struct hpfs_sb_info *sbi = hpfs_sb(s);
15 	if (sbi->sb_n_free != (unsigned)-1) {
16 		if (unlikely(!sbi->sb_n_free)) {
17 			hpfs_error(s, "free count underflow, allocating sector %08x", sec);
18 			sbi->sb_n_free = -1;
19 			return;
20 		}
21 		sbi->sb_n_free--;
22 	}
23 }
24 
hpfs_claim_free(struct super_block * s,secno sec)25 static void hpfs_claim_free(struct super_block *s, secno sec)
26 {
27 	struct hpfs_sb_info *sbi = hpfs_sb(s);
28 	if (sbi->sb_n_free != (unsigned)-1) {
29 		if (unlikely(sbi->sb_n_free >= sbi->sb_fs_size)) {
30 			hpfs_error(s, "free count overflow, freeing sector %08x", sec);
31 			sbi->sb_n_free = -1;
32 			return;
33 		}
34 		sbi->sb_n_free++;
35 	}
36 }
37 
hpfs_claim_dirband_alloc(struct super_block * s,secno sec)38 static void hpfs_claim_dirband_alloc(struct super_block *s, secno sec)
39 {
40 	struct hpfs_sb_info *sbi = hpfs_sb(s);
41 	if (sbi->sb_n_free_dnodes != (unsigned)-1) {
42 		if (unlikely(!sbi->sb_n_free_dnodes)) {
43 			hpfs_error(s, "dirband free count underflow, allocating sector %08x", sec);
44 			sbi->sb_n_free_dnodes = -1;
45 			return;
46 		}
47 		sbi->sb_n_free_dnodes--;
48 	}
49 }
50 
hpfs_claim_dirband_free(struct super_block * s,secno sec)51 static void hpfs_claim_dirband_free(struct super_block *s, secno sec)
52 {
53 	struct hpfs_sb_info *sbi = hpfs_sb(s);
54 	if (sbi->sb_n_free_dnodes != (unsigned)-1) {
55 		if (unlikely(sbi->sb_n_free_dnodes >= sbi->sb_dirband_size / 4)) {
56 			hpfs_error(s, "dirband free count overflow, freeing sector %08x", sec);
57 			sbi->sb_n_free_dnodes = -1;
58 			return;
59 		}
60 		sbi->sb_n_free_dnodes++;
61 	}
62 }
63 
64 /*
65  * Check if a sector is allocated in bitmap
66  * This is really slow. Turned on only if chk==2
67  */
68 
chk_if_allocated(struct super_block * s,secno sec,char * msg)69 static int chk_if_allocated(struct super_block *s, secno sec, char *msg)
70 {
71 	struct quad_buffer_head qbh;
72 	__le32 *bmp;
73 	if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail;
74 	if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) {
75 		hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec);
76 		goto fail1;
77 	}
78 	hpfs_brelse4(&qbh);
79 	if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) {
80 		unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4;
81 		if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail;
82 		if ((le32_to_cpu(bmp[ssec >> 5]) >> (ssec & 0x1f)) & 1) {
83 			hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec);
84 			goto fail1;
85 		}
86 		hpfs_brelse4(&qbh);
87 	}
88 	return 0;
89 	fail1:
90 	hpfs_brelse4(&qbh);
91 	fail:
92 	return 1;
93 }
94 
95 /*
96  * Check if sector(s) have proper number and additionally check if they're
97  * allocated in bitmap.
98  */
99 
hpfs_chk_sectors(struct super_block * s,secno start,int len,char * msg)100 int hpfs_chk_sectors(struct super_block *s, secno start, int len, char *msg)
101 {
102 	if (start + len < start || start < 0x12 ||
103 	    start + len > hpfs_sb(s)->sb_fs_size) {
104 	    	hpfs_error(s, "sector(s) '%s' badly placed at %08x", msg, start);
105 		return 1;
106 	}
107 	if (hpfs_sb(s)->sb_chk>=2) {
108 		int i;
109 		for (i = 0; i < len; i++)
110 			if (chk_if_allocated(s, start + i, msg)) return 1;
111 	}
112 	return 0;
113 }
114 
alloc_in_bmp(struct super_block * s,secno near,unsigned n,unsigned forward)115 static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigned forward)
116 {
117 	struct quad_buffer_head qbh;
118 	__le32 *bmp;
119 	unsigned bs = near & ~0x3fff;
120 	unsigned nr = (near & 0x3fff) & ~(n - 1);
121 	/*unsigned mnr;*/
122 	unsigned i, q;
123 	int a, b;
124 	secno ret = 0;
125 	if (n != 1 && n != 4) {
126 		hpfs_error(s, "Bad allocation size: %d", n);
127 		return 0;
128 	}
129 	if (bs != ~0x3fff) {
130 		if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls;
131 	} else {
132 		if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto uls;
133 	}
134 	if (!tstbits(bmp, nr, n + forward)) {
135 		ret = bs + nr;
136 		goto rt;
137 	}
138 	q = nr + n; b = 0;
139 	while ((a = tstbits(bmp, q, n + forward)) != 0) {
140 		q += a;
141 		if (n != 1) q = ((q-1)&~(n-1))+n;
142 		if (!b) {
143 			if (q>>5 != nr>>5) {
144 				b = 1;
145 				q = nr & 0x1f;
146 			}
147 		} else if (q > nr) break;
148 	}
149 	if (!a) {
150 		ret = bs + q;
151 		goto rt;
152 	}
153 	nr >>= 5;
154 	/*for (i = nr + 1; i != nr; i++, i &= 0x1ff) */
155 	i = nr;
156 	do {
157 		if (!le32_to_cpu(bmp[i])) goto cont;
158 		if (n + forward >= 0x3f && le32_to_cpu(bmp[i]) != 0xffffffff) goto cont;
159 		q = i<<5;
160 		if (i > 0) {
161 			unsigned k = le32_to_cpu(bmp[i-1]);
162 			while (k & 0x80000000) {
163 				q--; k <<= 1;
164 			}
165 		}
166 		if (n != 1) q = ((q-1)&~(n-1))+n;
167 		while ((a = tstbits(bmp, q, n + forward)) != 0) {
168 			q += a;
169 			if (n != 1) q = ((q-1)&~(n-1))+n;
170 			if (q>>5 > i) break;
171 		}
172 		if (!a) {
173 			ret = bs + q;
174 			goto rt;
175 		}
176 		cont:
177 		i++, i &= 0x1ff;
178 	} while (i != nr);
179 	rt:
180 	if (ret) {
181 		if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (le32_to_cpu(bmp[(ret & 0x3fff) >> 5]) | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) {
182 			hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret);
183 			ret = 0;
184 			goto b;
185 		}
186 		bmp[(ret & 0x3fff) >> 5] &= cpu_to_le32(~(((1 << n) - 1) << (ret & 0x1f)));
187 		hpfs_mark_4buffers_dirty(&qbh);
188 	}
189 	b:
190 	hpfs_brelse4(&qbh);
191 	uls:
192 	return ret;
193 }
194 
195 /*
196  * Allocation strategy:	1) search place near the sector specified
197  *			2) search bitmap where free sectors last found
198  *			3) search all bitmaps
199  *			4) search all bitmaps ignoring number of pre-allocated
200  *				sectors
201  */
202 
hpfs_alloc_sector(struct super_block * s,secno near,unsigned n,int forward)203 secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward)
204 {
205 	secno sec;
206 	int i;
207 	unsigned n_bmps;
208 	struct hpfs_sb_info *sbi = hpfs_sb(s);
209 	int f_p = 0;
210 	int near_bmp;
211 	if (forward < 0) {
212 		forward = -forward;
213 		f_p = 1;
214 	}
215 	n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14;
216 	if (near && near < sbi->sb_fs_size) {
217 		if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret;
218 		near_bmp = near >> 14;
219 	} else near_bmp = n_bmps / 2;
220 	/*
221 	if (b != -1) {
222 		if ((sec = alloc_in_bmp(s, b<<14, n, f_p ? forward : forward/2))) {
223 			b &= 0x0fffffff;
224 			goto ret;
225 		}
226 		if (b > 0x10000000) if ((sec = alloc_in_bmp(s, (b&0xfffffff)<<14, n, f_p ? forward : 0))) goto ret;
227 	*/
228 	if (!f_p) if (forward > sbi->sb_max_fwd_alloc) forward = sbi->sb_max_fwd_alloc;
229 	less_fwd:
230 	for (i = 0; i < n_bmps; i++) {
231 		if (near_bmp+i < n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i) << 14, n, forward)))) {
232 			sbi->sb_c_bitmap = near_bmp+i;
233 			goto ret;
234 		}
235 		if (!forward) {
236 			if (near_bmp-i-1 >= 0 && ((sec = alloc_in_bmp(s, (near_bmp-i-1) << 14, n, forward)))) {
237 				sbi->sb_c_bitmap = near_bmp-i-1;
238 				goto ret;
239 			}
240 		} else {
241 			if (near_bmp+i >= n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i-n_bmps) << 14, n, forward)))) {
242 				sbi->sb_c_bitmap = near_bmp+i-n_bmps;
243 				goto ret;
244 			}
245 		}
246 		if (i == 1 && sbi->sb_c_bitmap != -1 && ((sec = alloc_in_bmp(s, (sbi->sb_c_bitmap) << 14, n, forward)))) {
247 			goto ret;
248 		}
249 	}
250 	if (!f_p) {
251 		if (forward) {
252 			sbi->sb_max_fwd_alloc = forward * 3 / 4;
253 			forward /= 2;
254 			goto less_fwd;
255 		}
256 	}
257 	sec = 0;
258 	ret:
259 	if (sec) {
260 		i = 0;
261 		do
262 			hpfs_claim_alloc(s, sec + i);
263 		while (unlikely(++i < n));
264 	}
265 	if (sec && f_p) {
266 		for (i = 0; i < forward; i++) {
267 			if (!hpfs_alloc_if_possible(s, sec + n + i)) {
268 				hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i);
269 				sec = 0;
270 				break;
271 			}
272 		}
273 	}
274 	return sec;
275 }
276 
alloc_in_dirband(struct super_block * s,secno near)277 static secno alloc_in_dirband(struct super_block *s, secno near)
278 {
279 	unsigned nr = near;
280 	secno sec;
281 	struct hpfs_sb_info *sbi = hpfs_sb(s);
282 	if (nr < sbi->sb_dirband_start)
283 		nr = sbi->sb_dirband_start;
284 	if (nr >= sbi->sb_dirband_start + sbi->sb_dirband_size)
285 		nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4;
286 	nr -= sbi->sb_dirband_start;
287 	nr >>= 2;
288 	sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0);
289 	if (!sec) return 0;
290 	hpfs_claim_dirband_alloc(s, sec);
291 	return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start;
292 }
293 
294 /* Alloc sector if it's free */
295 
hpfs_alloc_if_possible(struct super_block * s,secno sec)296 int hpfs_alloc_if_possible(struct super_block *s, secno sec)
297 {
298 	struct quad_buffer_head qbh;
299 	__le32 *bmp;
300 	if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end;
301 	if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) {
302 		bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f)));
303 		hpfs_mark_4buffers_dirty(&qbh);
304 		hpfs_brelse4(&qbh);
305 		hpfs_claim_alloc(s, sec);
306 		return 1;
307 	}
308 	hpfs_brelse4(&qbh);
309 	end:
310 	return 0;
311 }
312 
313 /* Free sectors in bitmaps */
314 
hpfs_free_sectors(struct super_block * s,secno sec,unsigned n)315 void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
316 {
317 	struct quad_buffer_head qbh;
318 	__le32 *bmp;
319 	struct hpfs_sb_info *sbi = hpfs_sb(s);
320 	/*pr_info("2 - ");*/
321 	if (!n) return;
322 	if (sec < 0x12) {
323 		hpfs_error(s, "Trying to free reserved sector %08x", sec);
324 		return;
325 	}
326 	sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n;
327 	if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff;
328 	new_map:
329 	if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) {
330 		return;
331 	}
332 	new_tst:
333 	if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f) & 1)) {
334 		hpfs_error(s, "sector %08x not allocated", sec);
335 		hpfs_brelse4(&qbh);
336 		return;
337 	}
338 	bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f));
339 	hpfs_claim_free(s, sec);
340 	if (!--n) {
341 		hpfs_mark_4buffers_dirty(&qbh);
342 		hpfs_brelse4(&qbh);
343 		return;
344 	}
345 	if (!(++sec & 0x3fff)) {
346 		hpfs_mark_4buffers_dirty(&qbh);
347 		hpfs_brelse4(&qbh);
348 		goto new_map;
349 	}
350 	goto new_tst;
351 }
352 
353 /*
354  * Check if there are at least n free dnodes on the filesystem.
355  * Called before adding to dnode. If we run out of space while
356  * splitting dnodes, it would corrupt dnode tree.
357  */
358 
hpfs_check_free_dnodes(struct super_block * s,int n)359 int hpfs_check_free_dnodes(struct super_block *s, int n)
360 {
361 	int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14;
362 	int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff;
363 	int i, j;
364 	__le32 *bmp;
365 	struct quad_buffer_head qbh;
366 	if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
367 		for (j = 0; j < 512; j++) {
368 			unsigned k;
369 			if (!le32_to_cpu(bmp[j])) continue;
370 			for (k = le32_to_cpu(bmp[j]); k; k >>= 1) if (k & 1) if (!--n) {
371 				hpfs_brelse4(&qbh);
372 				return 0;
373 			}
374 		}
375 	}
376 	hpfs_brelse4(&qbh);
377 	i = 0;
378 	if (hpfs_sb(s)->sb_c_bitmap != -1) {
379 		bmp = hpfs_map_bitmap(s, b, &qbh, "chkdn1");
380 		goto chk_bmp;
381 	}
382 	chk_next:
383 	if (i == b) i++;
384 	if (i >= n_bmps) return 1;
385 	bmp = hpfs_map_bitmap(s, i, &qbh, "chkdn2");
386 	chk_bmp:
387 	if (bmp) {
388 		for (j = 0; j < 512; j++) {
389 			u32 k;
390 			if (!le32_to_cpu(bmp[j])) continue;
391 			for (k = 0xf; k; k <<= 4)
392 				if ((le32_to_cpu(bmp[j]) & k) == k) {
393 					if (!--n) {
394 						hpfs_brelse4(&qbh);
395 						return 0;
396 					}
397 				}
398 		}
399 		hpfs_brelse4(&qbh);
400 	}
401 	i++;
402 	goto chk_next;
403 }
404 
hpfs_free_dnode(struct super_block * s,dnode_secno dno)405 void hpfs_free_dnode(struct super_block *s, dnode_secno dno)
406 {
407 	if (hpfs_sb(s)->sb_chk) if (dno & 3) {
408 		hpfs_error(s, "hpfs_free_dnode: dnode %08x not aligned", dno);
409 		return;
410 	}
411 	if (dno < hpfs_sb(s)->sb_dirband_start ||
412 	    dno >= hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) {
413 		hpfs_free_sectors(s, dno, 4);
414 	} else {
415 		struct quad_buffer_head qbh;
416 		__le32 *bmp;
417 		unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4;
418 		if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
419 			return;
420 		}
421 		bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f));
422 		hpfs_mark_4buffers_dirty(&qbh);
423 		hpfs_brelse4(&qbh);
424 		hpfs_claim_dirband_free(s, dno);
425 	}
426 }
427 
hpfs_alloc_dnode(struct super_block * s,secno near,dnode_secno * dno,struct quad_buffer_head * qbh)428 struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near,
429 			 dnode_secno *dno, struct quad_buffer_head *qbh)
430 {
431 	struct dnode *d;
432 	if (hpfs_get_free_dnodes(s) > FREE_DNODES_ADD) {
433 		if (!(*dno = alloc_in_dirband(s, near)))
434 			if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL;
435 	} else {
436 		if (!(*dno = hpfs_alloc_sector(s, near, 4, 0)))
437 			if (!(*dno = alloc_in_dirband(s, near))) return NULL;
438 	}
439 	if (!(d = hpfs_get_4sectors(s, *dno, qbh))) {
440 		hpfs_free_dnode(s, *dno);
441 		return NULL;
442 	}
443 	memset(d, 0, 2048);
444 	d->magic = cpu_to_le32(DNODE_MAGIC);
445 	d->first_free = cpu_to_le32(52);
446 	d->dirent[0] = 32;
447 	d->dirent[2] = 8;
448 	d->dirent[30] = 1;
449 	d->dirent[31] = 255;
450 	d->self = cpu_to_le32(*dno);
451 	return d;
452 }
453 
hpfs_alloc_fnode(struct super_block * s,secno near,fnode_secno * fno,struct buffer_head ** bh)454 struct fnode *hpfs_alloc_fnode(struct super_block *s, secno near, fnode_secno *fno,
455 			  struct buffer_head **bh)
456 {
457 	struct fnode *f;
458 	if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD))) return NULL;
459 	if (!(f = hpfs_get_sector(s, *fno, bh))) {
460 		hpfs_free_sectors(s, *fno, 1);
461 		return NULL;
462 	}
463 	memset(f, 0, 512);
464 	f->magic = cpu_to_le32(FNODE_MAGIC);
465 	f->ea_offs = cpu_to_le16(0xc4);
466 	f->btree.n_free_nodes = 8;
467 	f->btree.first_free = cpu_to_le16(8);
468 	return f;
469 }
470 
hpfs_alloc_anode(struct super_block * s,secno near,anode_secno * ano,struct buffer_head ** bh)471 struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *ano,
472 			  struct buffer_head **bh)
473 {
474 	struct anode *a;
475 	if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD))) return NULL;
476 	if (!(a = hpfs_get_sector(s, *ano, bh))) {
477 		hpfs_free_sectors(s, *ano, 1);
478 		return NULL;
479 	}
480 	memset(a, 0, 512);
481 	a->magic = cpu_to_le32(ANODE_MAGIC);
482 	a->self = cpu_to_le32(*ano);
483 	a->btree.n_free_nodes = 40;
484 	a->btree.n_used_nodes = 0;
485 	a->btree.first_free = cpu_to_le16(8);
486 	return a;
487 }
488 
find_run(__le32 * bmp,unsigned * idx)489 static unsigned find_run(__le32 *bmp, unsigned *idx)
490 {
491 	unsigned len;
492 	while (tstbits(bmp, *idx, 1)) {
493 		(*idx)++;
494 		if (unlikely(*idx >= 0x4000))
495 			return 0;
496 	}
497 	len = 1;
498 	while (!tstbits(bmp, *idx + len, 1))
499 		len++;
500 	return len;
501 }
502 
do_trim(struct super_block * s,secno start,unsigned len,secno limit_start,secno limit_end,unsigned minlen,unsigned * result)503 static int do_trim(struct super_block *s, secno start, unsigned len, secno limit_start, secno limit_end, unsigned minlen, unsigned *result)
504 {
505 	int err;
506 	secno end;
507 	if (fatal_signal_pending(current))
508 		return -EINTR;
509 	end = start + len;
510 	if (start < limit_start)
511 		start = limit_start;
512 	if (end > limit_end)
513 		end = limit_end;
514 	if (start >= end)
515 		return 0;
516 	if (end - start < minlen)
517 		return 0;
518 	err = sb_issue_discard(s, start, end - start, GFP_NOFS, 0);
519 	if (err)
520 		return err;
521 	*result += end - start;
522 	return 0;
523 }
524 
hpfs_trim_fs(struct super_block * s,u64 start,u64 end,u64 minlen,unsigned * result)525 int hpfs_trim_fs(struct super_block *s, u64 start, u64 end, u64 minlen, unsigned *result)
526 {
527 	int err = 0;
528 	struct hpfs_sb_info *sbi = hpfs_sb(s);
529 	unsigned idx, len, start_bmp, end_bmp;
530 	__le32 *bmp;
531 	struct quad_buffer_head qbh;
532 
533 	*result = 0;
534 	if (!end || end > sbi->sb_fs_size)
535 		end = sbi->sb_fs_size;
536 	if (start >= sbi->sb_fs_size)
537 		return 0;
538 	if (minlen > 0x4000)
539 		return 0;
540 	if (start < sbi->sb_dirband_start + sbi->sb_dirband_size && end > sbi->sb_dirband_start) {
541 		hpfs_lock(s);
542 		if (sb_rdonly(s)) {
543 			err = -EROFS;
544 			goto unlock_1;
545 		}
546 		if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
547 			err = -EIO;
548 			goto unlock_1;
549 		}
550 		idx = 0;
551 		while ((len = find_run(bmp, &idx)) && !err) {
552 			err = do_trim(s, sbi->sb_dirband_start + idx * 4, len * 4, start, end, minlen, result);
553 			idx += len;
554 		}
555 		hpfs_brelse4(&qbh);
556 unlock_1:
557 		hpfs_unlock(s);
558 	}
559 	start_bmp = start >> 14;
560 	end_bmp = (end + 0x3fff) >> 14;
561 	while (start_bmp < end_bmp && !err) {
562 		hpfs_lock(s);
563 		if (sb_rdonly(s)) {
564 			err = -EROFS;
565 			goto unlock_2;
566 		}
567 		if (!(bmp = hpfs_map_bitmap(s, start_bmp, &qbh, "trim"))) {
568 			err = -EIO;
569 			goto unlock_2;
570 		}
571 		idx = 0;
572 		while ((len = find_run(bmp, &idx)) && !err) {
573 			err = do_trim(s, (start_bmp << 14) + idx, len, start, end, minlen, result);
574 			idx += len;
575 		}
576 		hpfs_brelse4(&qbh);
577 unlock_2:
578 		hpfs_unlock(s);
579 		start_bmp++;
580 	}
581 	return err;
582 }
583