xref: /linux/fs/minix/itree_common.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic part */
3 
4 typedef struct {
5 	block_t	*p;
6 	block_t	key;
7 	struct buffer_head *bh;
8 } Indirect;
9 
10 static DEFINE_RWLOCK(pointers_lock);
11 
12 static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v)
13 {
14 	p->key = *(p->p = v);
15 	p->bh = bh;
16 }
17 
18 static inline int verify_chain(Indirect *from, Indirect *to)
19 {
20 	while (from <= to && from->key == *from->p)
21 		from++;
22 	return (from > to);
23 }
24 
25 static inline block_t *block_end(struct buffer_head *bh)
26 {
27 	return (block_t *)((char*)bh->b_data + bh->b_size);
28 }
29 
30 static inline Indirect *get_branch(struct inode *inode,
31 					int depth,
32 					int *offsets,
33 					Indirect chain[DEPTH],
34 					int *err)
35 {
36 	struct super_block *sb = inode->i_sb;
37 	Indirect *p = chain;
38 	struct buffer_head *bh;
39 
40 	*err = 0;
41 	/* i_data is not going away, no lock needed */
42 	add_chain (chain, NULL, i_data(inode) + *offsets);
43 	if (!p->key)
44 		goto no_block;
45 	while (--depth) {
46 		bh = sb_bread(sb, block_to_cpu(p->key));
47 		if (!bh)
48 			goto failure;
49 		read_lock(&pointers_lock);
50 		if (!verify_chain(chain, p))
51 			goto changed;
52 		add_chain(++p, bh, (block_t *)bh->b_data + *++offsets);
53 		read_unlock(&pointers_lock);
54 		if (!p->key)
55 			goto no_block;
56 	}
57 	return NULL;
58 
59 changed:
60 	read_unlock(&pointers_lock);
61 	brelse(bh);
62 	*err = -EAGAIN;
63 	goto no_block;
64 failure:
65 	*err = -EIO;
66 no_block:
67 	return p;
68 }
69 
70 static int alloc_branch(struct inode *inode,
71 			     int num,
72 			     int *offsets,
73 			     Indirect *branch)
74 {
75 	int n = 0;
76 	int i;
77 	int parent = minix_new_block(inode);
78 	int err = -ENOSPC;
79 
80 	branch[0].key = cpu_to_block(parent);
81 	if (parent) for (n = 1; n < num; n++) {
82 		struct buffer_head *bh;
83 		/* Allocate the next block */
84 		int nr = minix_new_block(inode);
85 		if (!nr)
86 			break;
87 		branch[n].key = cpu_to_block(nr);
88 		bh = sb_getblk(inode->i_sb, parent);
89 		if (!bh) {
90 			minix_free_block(inode, nr);
91 			err = -ENOMEM;
92 			break;
93 		}
94 		lock_buffer(bh);
95 		memset(bh->b_data, 0, bh->b_size);
96 		branch[n].bh = bh;
97 		branch[n].p = (block_t*) bh->b_data + offsets[n];
98 		*branch[n].p = branch[n].key;
99 		set_buffer_uptodate(bh);
100 		unlock_buffer(bh);
101 		mmb_mark_buffer_dirty(bh, &minix_i(inode)->i_metadata_bhs);
102 		parent = nr;
103 	}
104 	if (n == num)
105 		return 0;
106 
107 	/* Allocation failed, free what we already allocated */
108 	for (i = 1; i < n; i++)
109 		bforget(branch[i].bh);
110 	for (i = 0; i < n; i++)
111 		minix_free_block(inode, block_to_cpu(branch[i].key));
112 	return err;
113 }
114 
115 static inline int splice_branch(struct inode *inode,
116 				     Indirect chain[DEPTH],
117 				     Indirect *where,
118 				     int num)
119 {
120 	int i;
121 
122 	write_lock(&pointers_lock);
123 
124 	/* Verify that place we are splicing to is still there and vacant */
125 	if (!verify_chain(chain, where-1) || *where->p)
126 		goto changed;
127 
128 	*where->p = where->key;
129 
130 	write_unlock(&pointers_lock);
131 
132 	/* We are done with atomic stuff, now do the rest of housekeeping */
133 
134 	inode_set_ctime_current(inode);
135 
136 	/* had we spliced it onto indirect block? */
137 	if (where->bh)
138 		mmb_mark_buffer_dirty(where->bh,
139 				      &minix_i(inode)->i_metadata_bhs);
140 
141 	mark_inode_dirty(inode);
142 	return 0;
143 
144 changed:
145 	write_unlock(&pointers_lock);
146 	for (i = 1; i < num; i++)
147 		bforget(where[i].bh);
148 	for (i = 0; i < num; i++)
149 		minix_free_block(inode, block_to_cpu(where[i].key));
150 	return -EAGAIN;
151 }
152 
153 static int get_block(struct inode * inode, sector_t block,
154 			struct buffer_head *bh, int create)
155 {
156 	int err = -EIO;
157 	int offsets[DEPTH];
158 	Indirect chain[DEPTH];
159 	Indirect *partial;
160 	int left;
161 	int depth = block_to_path(inode, block, offsets);
162 
163 	if (depth == 0)
164 		goto out;
165 
166 reread:
167 	partial = get_branch(inode, depth, offsets, chain, &err);
168 
169 	/* Simplest case - block found, no allocation needed */
170 	if (!partial) {
171 got_it:
172 		map_bh(bh, inode->i_sb, block_to_cpu(chain[depth-1].key));
173 		/* Clean up and exit */
174 		partial = chain+depth-1; /* the whole chain */
175 		goto cleanup;
176 	}
177 
178 	/* Next simple case - plain lookup or failed read of indirect block */
179 	if (!create || err == -EIO) {
180 cleanup:
181 		while (partial > chain) {
182 			brelse(partial->bh);
183 			partial--;
184 		}
185 out:
186 		return err;
187 	}
188 
189 	/*
190 	 * Indirect block might be removed by truncate while we were
191 	 * reading it. Handling of that case (forget what we've got and
192 	 * reread) is taken out of the main path.
193 	 */
194 	if (err == -EAGAIN)
195 		goto changed;
196 
197 	left = (chain + depth) - partial;
198 	err = alloc_branch(inode, left, offsets+(partial-chain), partial);
199 	if (err)
200 		goto cleanup;
201 
202 	if (splice_branch(inode, chain, partial, left) < 0)
203 		goto changed;
204 
205 	set_buffer_new(bh);
206 	goto got_it;
207 
208 changed:
209 	while (partial > chain) {
210 		brelse(partial->bh);
211 		partial--;
212 	}
213 	goto reread;
214 }
215 
216 static inline int all_zeroes(block_t *p, block_t *q)
217 {
218 	while (p < q)
219 		if (*p++)
220 			return 0;
221 	return 1;
222 }
223 
224 static Indirect *find_shared(struct inode *inode,
225 				int depth,
226 				int offsets[DEPTH],
227 				Indirect chain[DEPTH],
228 				block_t *top)
229 {
230 	Indirect *partial, *p;
231 	int k, err;
232 
233 	*top = 0;
234 	for (k = depth; k > 1 && !offsets[k-1]; k--)
235 		;
236 	partial = get_branch(inode, k, offsets, chain, &err);
237 
238 	write_lock(&pointers_lock);
239 	if (!partial)
240 		partial = chain + k-1;
241 	if (!partial->key && *partial->p) {
242 		write_unlock(&pointers_lock);
243 		goto no_top;
244 	}
245 	for (p=partial;p>chain && all_zeroes((block_t*)p->bh->b_data,p->p);p--)
246 		;
247 	if (p == chain + k - 1 && p > chain) {
248 		p->p--;
249 	} else {
250 		*top = *p->p;
251 		*p->p = 0;
252 	}
253 	write_unlock(&pointers_lock);
254 
255 	while(partial > p)
256 	{
257 		brelse(partial->bh);
258 		partial--;
259 	}
260 no_top:
261 	return partial;
262 }
263 
264 static inline void free_data(struct inode *inode, block_t *p, block_t *q)
265 {
266 	unsigned long nr;
267 
268 	for ( ; p < q ; p++) {
269 		nr = block_to_cpu(*p);
270 		if (nr) {
271 			*p = 0;
272 			minix_free_block(inode, nr);
273 		}
274 	}
275 }
276 
277 static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
278 {
279 	struct buffer_head * bh;
280 	unsigned long nr;
281 
282 	if (depth--) {
283 		for ( ; p < q ; p++) {
284 			nr = block_to_cpu(*p);
285 			if (!nr)
286 				continue;
287 			*p = 0;
288 			bh = sb_bread(inode->i_sb, nr);
289 			if (!bh)
290 				continue;
291 			free_branches(inode, (block_t*)bh->b_data,
292 				      block_end(bh), depth);
293 			bforget(bh);
294 			minix_free_block(inode, nr);
295 			mark_inode_dirty(inode);
296 		}
297 	} else
298 		free_data(inode, p, q);
299 }
300 
301 static inline void truncate (struct inode * inode)
302 {
303 	struct super_block *sb = inode->i_sb;
304 	block_t *idata = i_data(inode);
305 	int offsets[DEPTH];
306 	Indirect chain[DEPTH];
307 	Indirect *partial;
308 	block_t nr = 0;
309 	int n;
310 	int first_whole;
311 	long iblock;
312 
313 	iblock = (inode->i_size + sb->s_blocksize -1) >> sb->s_blocksize_bits;
314 	block_truncate_page(inode->i_mapping, inode->i_size, get_block);
315 
316 	n = block_to_path(inode, iblock, offsets);
317 	if (!n)
318 		return;
319 
320 	if (n == 1) {
321 		free_data(inode, idata+offsets[0], idata + DIRECT);
322 		first_whole = 0;
323 		goto do_indirects;
324 	}
325 
326 	first_whole = offsets[0] + 1 - DIRECT;
327 	partial = find_shared(inode, n, offsets, chain, &nr);
328 	if (nr) {
329 		if (partial == chain)
330 			mark_inode_dirty(inode);
331 		else
332 			mmb_mark_buffer_dirty(partial->bh,
333 					      &minix_i(inode)->i_metadata_bhs);
334 		free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
335 	}
336 	/* Clear the ends of indirect blocks on the shared branch */
337 	while (partial > chain) {
338 		free_branches(inode, partial->p + 1, block_end(partial->bh),
339 				(chain+n-1) - partial);
340 		mmb_mark_buffer_dirty(partial->bh,
341 				      &minix_i(inode)->i_metadata_bhs);
342 		brelse (partial->bh);
343 		partial--;
344 	}
345 do_indirects:
346 	/* Kill the remaining (whole) subtrees */
347 	while (first_whole < DEPTH-1) {
348 		nr = idata[DIRECT+first_whole];
349 		if (nr) {
350 			idata[DIRECT+first_whole] = 0;
351 			mark_inode_dirty(inode);
352 			free_branches(inode, &nr, &nr+1, first_whole+1);
353 		}
354 		first_whole++;
355 	}
356 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
357 	mark_inode_dirty(inode);
358 }
359 
360 static inline unsigned nblocks(loff_t size, struct super_block *sb)
361 {
362 	int k = sb->s_blocksize_bits - 10;
363 	unsigned blocks, res, direct = DIRECT, i = DEPTH;
364 	blocks = (size + sb->s_blocksize - 1) >> (BLOCK_SIZE_BITS + k);
365 	res = blocks;
366 	while (--i && blocks > direct) {
367 		blocks -= direct;
368 		blocks += sb->s_blocksize/sizeof(block_t) - 1;
369 		blocks /= sb->s_blocksize/sizeof(block_t);
370 		res += blocks;
371 		direct = 1;
372 	}
373 	return res;
374 }
375