xref: /linux/fs/hfsplus/bitmap.c (revision 93df8a1ed6231727c5db94a80b1a6bd5ee67cec3)
1 /*
2  *  linux/fs/hfsplus/bitmap.c
3  *
4  * Copyright (C) 2001
5  * Brad Boyer (flar@allandria.com)
6  * (C) 2003 Ardis Technologies <roman@ardistech.com>
7  *
8  * Handling of allocation file
9  */
10 
11 #include <linux/pagemap.h>
12 
13 #include "hfsplus_fs.h"
14 #include "hfsplus_raw.h"
15 
16 #define PAGE_CACHE_BITS	(PAGE_CACHE_SIZE * 8)
17 
18 int hfsplus_block_allocate(struct super_block *sb, u32 size,
19 		u32 offset, u32 *max)
20 {
21 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
22 	struct page *page;
23 	struct address_space *mapping;
24 	__be32 *pptr, *curr, *end;
25 	u32 mask, start, len, n;
26 	__be32 val;
27 	int i;
28 
29 	len = *max;
30 	if (!len)
31 		return size;
32 
33 	hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
34 	mutex_lock(&sbi->alloc_mutex);
35 	mapping = sbi->alloc_file->i_mapping;
36 	page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
37 	if (IS_ERR(page)) {
38 		start = size;
39 		goto out;
40 	}
41 	pptr = kmap(page);
42 	curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
43 	i = offset % 32;
44 	offset &= ~(PAGE_CACHE_BITS - 1);
45 	if ((size ^ offset) / PAGE_CACHE_BITS)
46 		end = pptr + PAGE_CACHE_BITS / 32;
47 	else
48 		end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
49 
50 	/* scan the first partial u32 for zero bits */
51 	val = *curr;
52 	if (~val) {
53 		n = be32_to_cpu(val);
54 		mask = (1U << 31) >> i;
55 		for (; i < 32; mask >>= 1, i++) {
56 			if (!(n & mask))
57 				goto found;
58 		}
59 	}
60 	curr++;
61 
62 	/* scan complete u32s for the first zero bit */
63 	while (1) {
64 		while (curr < end) {
65 			val = *curr;
66 			if (~val) {
67 				n = be32_to_cpu(val);
68 				mask = 1 << 31;
69 				for (i = 0; i < 32; mask >>= 1, i++) {
70 					if (!(n & mask))
71 						goto found;
72 				}
73 			}
74 			curr++;
75 		}
76 		kunmap(page);
77 		offset += PAGE_CACHE_BITS;
78 		if (offset >= size)
79 			break;
80 		page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
81 					 NULL);
82 		if (IS_ERR(page)) {
83 			start = size;
84 			goto out;
85 		}
86 		curr = pptr = kmap(page);
87 		if ((size ^ offset) / PAGE_CACHE_BITS)
88 			end = pptr + PAGE_CACHE_BITS / 32;
89 		else
90 			end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
91 	}
92 	hfs_dbg(BITMAP, "bitmap full\n");
93 	start = size;
94 	goto out;
95 
96 found:
97 	start = offset + (curr - pptr) * 32 + i;
98 	if (start >= size) {
99 		hfs_dbg(BITMAP, "bitmap full\n");
100 		goto out;
101 	}
102 	/* do any partial u32 at the start */
103 	len = min(size - start, len);
104 	while (1) {
105 		n |= mask;
106 		if (++i >= 32)
107 			break;
108 		mask >>= 1;
109 		if (!--len || n & mask)
110 			goto done;
111 	}
112 	if (!--len)
113 		goto done;
114 	*curr++ = cpu_to_be32(n);
115 	/* do full u32s */
116 	while (1) {
117 		while (curr < end) {
118 			n = be32_to_cpu(*curr);
119 			if (len < 32)
120 				goto last;
121 			if (n) {
122 				len = 32;
123 				goto last;
124 			}
125 			*curr++ = cpu_to_be32(0xffffffff);
126 			len -= 32;
127 		}
128 		set_page_dirty(page);
129 		kunmap(page);
130 		offset += PAGE_CACHE_BITS;
131 		page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
132 					 NULL);
133 		if (IS_ERR(page)) {
134 			start = size;
135 			goto out;
136 		}
137 		pptr = kmap(page);
138 		curr = pptr;
139 		end = pptr + PAGE_CACHE_BITS / 32;
140 	}
141 last:
142 	/* do any partial u32 at end */
143 	mask = 1U << 31;
144 	for (i = 0; i < len; i++) {
145 		if (n & mask)
146 			break;
147 		n |= mask;
148 		mask >>= 1;
149 	}
150 done:
151 	*curr = cpu_to_be32(n);
152 	set_page_dirty(page);
153 	kunmap(page);
154 	*max = offset + (curr - pptr) * 32 + i - start;
155 	sbi->free_blocks -= *max;
156 	hfsplus_mark_mdb_dirty(sb);
157 	hfs_dbg(BITMAP, "-> %u,%u\n", start, *max);
158 out:
159 	mutex_unlock(&sbi->alloc_mutex);
160 	return start;
161 }
162 
163 int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
164 {
165 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
166 	struct page *page;
167 	struct address_space *mapping;
168 	__be32 *pptr, *curr, *end;
169 	u32 mask, len, pnr;
170 	int i;
171 
172 	/* is there any actual work to be done? */
173 	if (!count)
174 		return 0;
175 
176 	hfs_dbg(BITMAP, "block_free: %u,%u\n", offset, count);
177 	/* are all of the bits in range? */
178 	if ((offset + count) > sbi->total_blocks)
179 		return -ENOENT;
180 
181 	mutex_lock(&sbi->alloc_mutex);
182 	mapping = sbi->alloc_file->i_mapping;
183 	pnr = offset / PAGE_CACHE_BITS;
184 	page = read_mapping_page(mapping, pnr, NULL);
185 	if (IS_ERR(page))
186 		goto kaboom;
187 	pptr = kmap(page);
188 	curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
189 	end = pptr + PAGE_CACHE_BITS / 32;
190 	len = count;
191 
192 	/* do any partial u32 at the start */
193 	i = offset % 32;
194 	if (i) {
195 		int j = 32 - i;
196 		mask = 0xffffffffU << j;
197 		if (j > count) {
198 			mask |= 0xffffffffU >> (i + count);
199 			*curr++ &= cpu_to_be32(mask);
200 			goto out;
201 		}
202 		*curr++ &= cpu_to_be32(mask);
203 		count -= j;
204 	}
205 
206 	/* do full u32s */
207 	while (1) {
208 		while (curr < end) {
209 			if (count < 32)
210 				goto done;
211 			*curr++ = 0;
212 			count -= 32;
213 		}
214 		if (!count)
215 			break;
216 		set_page_dirty(page);
217 		kunmap(page);
218 		page = read_mapping_page(mapping, ++pnr, NULL);
219 		if (IS_ERR(page))
220 			goto kaboom;
221 		pptr = kmap(page);
222 		curr = pptr;
223 		end = pptr + PAGE_CACHE_BITS / 32;
224 	}
225 done:
226 	/* do any partial u32 at end */
227 	if (count) {
228 		mask = 0xffffffffU >> count;
229 		*curr &= cpu_to_be32(mask);
230 	}
231 out:
232 	set_page_dirty(page);
233 	kunmap(page);
234 	sbi->free_blocks += len;
235 	hfsplus_mark_mdb_dirty(sb);
236 	mutex_unlock(&sbi->alloc_mutex);
237 
238 	return 0;
239 
240 kaboom:
241 	pr_crit("unable to mark blocks free: error %ld\n", PTR_ERR(page));
242 	mutex_unlock(&sbi->alloc_mutex);
243 
244 	return -EIO;
245 }
246