xref: /linux/fs/jffs2/nodemgmt.c (revision 04b43ea325d21c4c98e831383a1b7d540721898a)
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@infradead.org>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/kernel.h>
15 #include <linux/mtd/mtd.h>
16 #include <linux/compiler.h>
17 #include <linux/sched/signal.h>
18 #include <linux/string_choices.h>
19 #include "nodelist.h"
20 #include "debug.h"
21 
22 /*
23  * Check whether the user is allowed to write.
24  */
jffs2_rp_can_write(struct jffs2_sb_info * c)25 static int jffs2_rp_can_write(struct jffs2_sb_info *c)
26 {
27 	uint32_t avail;
28 	struct jffs2_mount_opts *opts = &c->mount_opts;
29 
30 	avail = c->dirty_size + c->free_size + c->unchecked_size +
31 		c->erasing_size - c->resv_blocks_write * c->sector_size
32 		- c->nospc_dirty_size;
33 
34 	if (avail < 2 * opts->rp_size)
35 		jffs2_dbg(1, "rpsize %u, dirty_size %u, free_size %u, "
36 			  "erasing_size %u, unchecked_size %u, "
37 			  "nr_erasing_blocks %u, avail %u, resrv %u\n",
38 			  opts->rp_size, c->dirty_size, c->free_size,
39 			  c->erasing_size, c->unchecked_size,
40 			  c->nr_erasing_blocks, avail, c->nospc_dirty_size);
41 
42 	if (avail > opts->rp_size)
43 		return 1;
44 
45 	/* Always allow root */
46 	if (capable(CAP_SYS_RESOURCE))
47 		return 1;
48 
49 	jffs2_dbg(1, "forbid writing\n");
50 	return 0;
51 }
52 
53 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize,
54 				  uint32_t *len, uint32_t sumsize);
55 
56 /**
57  *	jffs2_reserve_space - request physical space to write nodes to flash
58  *	@c: superblock info
59  *	@minsize: Minimum acceptable size of allocation
60  *	@len: Returned value of allocation length
61  *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
62  *	@sumsize: summary size requested or JFFS2_SUMMARY_NOSUM_SIZE for no summary
63  *
64  *	Requests a block of physical space on the flash.
65  *
66  *	Returns: %0 for success	and puts 'len' into the appropriate place,
67  *	or returns -ENOSPC or other error if appropriate.
68  *	Doesn't return len since that's already returned in @len.
69  *
70  *	If it returns %0, jffs2_reserve_space() also downs the per-filesystem
71  *	allocation semaphore, to prevent more than one allocation from being
72  *	active at any time. The semaphore is later released by jffs2_commit_allocation().
73  *
74  *	jffs2_reserve_space() may trigger garbage collection in order to make room
75  *	for the requested allocation.
76  */
77 
jffs2_reserve_space(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * len,int prio,uint32_t sumsize)78 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
79 			uint32_t *len, int prio, uint32_t sumsize)
80 {
81 	int ret = -EAGAIN;
82 	int blocksneeded = c->resv_blocks_write;
83 	/* align it */
84 	minsize = PAD(minsize);
85 
86 	jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
87 	mutex_lock(&c->alloc_sem);
88 
89 	jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
90 
91 	spin_lock(&c->erase_completion_lock);
92 
93 	/*
94 	 * Check if the free space is greater then size of the reserved pool.
95 	 * If not, only allow root to proceed with writing.
96 	 */
97 	if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) {
98 		ret = -ENOSPC;
99 		goto out;
100 	}
101 
102 	/* this needs a little more thought (true <tglx> :)) */
103 	while(ret == -EAGAIN) {
104 		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
105 			uint32_t dirty, avail;
106 
107 			/* calculate real dirty size
108 			 * dirty_size contains blocks on erase_pending_list
109 			 * those blocks are counted in c->nr_erasing_blocks.
110 			 * If one block is actually erased, it is not longer counted as dirty_space
111 			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
112 			 * with c->nr_erasing_blocks * c->sector_size again.
113 			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
114 			 * This helps us to force gc and pick eventually a clean block to spread the load.
115 			 * We add unchecked_size here, as we hopefully will find some space to use.
116 			 * This will affect the sum only once, as gc first finishes checking
117 			 * of nodes.
118 			 */
119 			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
120 			if (dirty < c->nospc_dirty_size) {
121 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
122 					jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
123 						  __func__);
124 					break;
125 				}
126 				jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
127 					  dirty, c->unchecked_size,
128 					  c->sector_size);
129 
130 				spin_unlock(&c->erase_completion_lock);
131 				mutex_unlock(&c->alloc_sem);
132 				return -ENOSPC;
133 			}
134 
135 			/* Calc possibly available space. Possibly available means that we
136 			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
137 			 * more usable space. This will affect the sum only once, as gc first finishes checking
138 			 * of nodes.
139 			 + Return -ENOSPC, if the maximum possibly available space is less or equal than
140 			 * blocksneeded * sector_size.
141 			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
142 			 * the check above passes.
143 			 */
144 			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
145 			if ( (avail / c->sector_size) <= blocksneeded) {
146 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
147 					jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
148 						  __func__);
149 					break;
150 				}
151 
152 				jffs2_dbg(1, "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
153 					  avail, blocksneeded * c->sector_size);
154 				spin_unlock(&c->erase_completion_lock);
155 				mutex_unlock(&c->alloc_sem);
156 				return -ENOSPC;
157 			}
158 
159 			mutex_unlock(&c->alloc_sem);
160 
161 			jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
162 				  c->nr_free_blocks, c->nr_erasing_blocks,
163 				  c->free_size, c->dirty_size, c->wasted_size,
164 				  c->used_size, c->erasing_size, c->bad_size,
165 				  c->free_size + c->dirty_size +
166 				  c->wasted_size + c->used_size +
167 				  c->erasing_size + c->bad_size,
168 				  c->flash_size);
169 			spin_unlock(&c->erase_completion_lock);
170 
171 			ret = jffs2_garbage_collect_pass(c);
172 
173 			if (ret == -EAGAIN) {
174 				spin_lock(&c->erase_completion_lock);
175 				if (c->nr_erasing_blocks &&
176 				    list_empty(&c->erase_pending_list) &&
177 				    list_empty(&c->erase_complete_list)) {
178 					DECLARE_WAITQUEUE(wait, current);
179 					set_current_state(TASK_UNINTERRUPTIBLE);
180 					add_wait_queue(&c->erase_wait, &wait);
181 					jffs2_dbg(1, "%s waiting for erase to complete\n",
182 						  __func__);
183 					spin_unlock(&c->erase_completion_lock);
184 
185 					schedule();
186 					remove_wait_queue(&c->erase_wait, &wait);
187 				} else
188 					spin_unlock(&c->erase_completion_lock);
189 			} else if (ret)
190 				return ret;
191 
192 			cond_resched();
193 
194 			if (signal_pending(current))
195 				return -EINTR;
196 
197 			mutex_lock(&c->alloc_sem);
198 			spin_lock(&c->erase_completion_lock);
199 		}
200 
201 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
202 		if (ret) {
203 			jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
204 		}
205 	}
206 
207 out:
208 	spin_unlock(&c->erase_completion_lock);
209 	if (!ret)
210 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
211 	if (ret)
212 		mutex_unlock(&c->alloc_sem);
213 	return ret;
214 }
215 
jffs2_reserve_space_gc(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * len,uint32_t sumsize)216 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
217 			   uint32_t *len, uint32_t sumsize)
218 {
219 	int ret;
220 	minsize = PAD(minsize);
221 
222 	jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
223 
224 	while (true) {
225 		spin_lock(&c->erase_completion_lock);
226 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
227 		if (ret) {
228 			jffs2_dbg(1, "%s(): looping, ret is %d\n",
229 				  __func__, ret);
230 		}
231 		spin_unlock(&c->erase_completion_lock);
232 
233 		if (ret == -EAGAIN)
234 			cond_resched();
235 		else
236 			break;
237 	}
238 	if (!ret)
239 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
240 
241 	return ret;
242 }
243 
244 
245 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
246 
jffs2_close_nextblock(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb)247 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
248 {
249 
250 	if (c->nextblock == NULL) {
251 		jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
252 			  __func__, jeb->offset);
253 		return;
254 	}
255 	/* Check, if we have a dirty block now, or if it was dirty already */
256 	if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
257 		c->dirty_size += jeb->wasted_size;
258 		c->wasted_size -= jeb->wasted_size;
259 		jeb->dirty_size += jeb->wasted_size;
260 		jeb->wasted_size = 0;
261 		if (VERYDIRTY(c, jeb->dirty_size)) {
262 			jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
263 				  jeb->offset, jeb->free_size, jeb->dirty_size,
264 				  jeb->used_size);
265 			list_add_tail(&jeb->list, &c->very_dirty_list);
266 		} else {
267 			jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
268 				  jeb->offset, jeb->free_size, jeb->dirty_size,
269 				  jeb->used_size);
270 			list_add_tail(&jeb->list, &c->dirty_list);
271 		}
272 	} else {
273 		jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
274 			  jeb->offset, jeb->free_size, jeb->dirty_size,
275 			  jeb->used_size);
276 		list_add_tail(&jeb->list, &c->clean_list);
277 	}
278 	c->nextblock = NULL;
279 
280 }
281 
282 /* Select a new jeb for nextblock */
283 
jffs2_find_nextblock(struct jffs2_sb_info * c)284 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
285 {
286 	struct list_head *next;
287 
288 	/* Take the next block off the 'free' list */
289 
290 	if (list_empty(&c->free_list)) {
291 
292 		if (!c->nr_erasing_blocks &&
293 			!list_empty(&c->erasable_list)) {
294 			struct jffs2_eraseblock *ejeb;
295 
296 			ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
297 			list_move_tail(&ejeb->list, &c->erase_pending_list);
298 			c->nr_erasing_blocks++;
299 			jffs2_garbage_collect_trigger(c);
300 			jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
301 				  __func__, ejeb->offset);
302 		}
303 
304 		if (!c->nr_erasing_blocks &&
305 			!list_empty(&c->erasable_pending_wbuf_list)) {
306 			jffs2_dbg(1, "%s(): Flushing write buffer\n",
307 				  __func__);
308 			/* c->nextblock is NULL, no update to c->nextblock allowed */
309 			spin_unlock(&c->erase_completion_lock);
310 			jffs2_flush_wbuf_pad(c);
311 			spin_lock(&c->erase_completion_lock);
312 			/* Have another go. It'll be on the erasable_list now */
313 			return -EAGAIN;
314 		}
315 
316 		if (!c->nr_erasing_blocks) {
317 			/* Ouch. We're in GC, or we wouldn't have got here.
318 			   And there's no space left. At all. */
319 			pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
320 				c->nr_erasing_blocks, c->nr_free_blocks,
321 				str_yes_no(list_empty(&c->erasable_list)),
322 				str_yes_no(list_empty(&c->erasing_list)),
323 				str_yes_no(list_empty(&c->erase_pending_list)));
324 			return -ENOSPC;
325 		}
326 
327 		spin_unlock(&c->erase_completion_lock);
328 		/* Don't wait for it; just erase one right now */
329 		jffs2_erase_pending_blocks(c, 1);
330 		spin_lock(&c->erase_completion_lock);
331 
332 		/* An erase may have failed, decreasing the
333 		   amount of free space available. So we must
334 		   restart from the beginning */
335 		return -EAGAIN;
336 	}
337 
338 	next = c->free_list.next;
339 	list_del(next);
340 	c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
341 	c->nr_free_blocks--;
342 
343 	jffs2_sum_reset_collected(c->summary); /* reset collected summary */
344 
345 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
346 	/* adjust write buffer offset, else we get a non contiguous write bug */
347 	if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
348 		c->wbuf_ofs = 0xffffffff;
349 #endif
350 
351 	jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
352 		  __func__, c->nextblock->offset);
353 
354 	return 0;
355 }
356 
357 /* Called with alloc sem _and_ erase_completion_lock */
jffs2_do_reserve_space(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * len,uint32_t sumsize)358 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
359 				  uint32_t *len, uint32_t sumsize)
360 {
361 	struct jffs2_eraseblock *jeb = c->nextblock;
362 	uint32_t reserved_size;				/* for summary information at the end of the jeb */
363 	int ret;
364 
365  restart:
366 	reserved_size = 0;
367 
368 	if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
369 							/* NOSUM_SIZE means not to generate summary */
370 
371 		if (jeb) {
372 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
373 			dbg_summary("minsize=%d , jeb->free=%d ,"
374 						"summary->size=%d , sumsize=%d\n",
375 						minsize, jeb->free_size,
376 						c->summary->sum_size, sumsize);
377 		}
378 
379 		/* Is there enough space for writing out the current node, or we have to
380 		   write out summary information now, close this jeb and select new nextblock? */
381 		if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
382 					JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
383 
384 			/* Has summary been disabled for this jeb? */
385 			if (jffs2_sum_is_disabled(c->summary)) {
386 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
387 				goto restart;
388 			}
389 
390 			/* Writing out the collected summary information */
391 			dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
392 			ret = jffs2_sum_write_sumnode(c);
393 
394 			if (ret)
395 				return ret;
396 
397 			if (jffs2_sum_is_disabled(c->summary)) {
398 				/* jffs2_write_sumnode() couldn't write out the summary information
399 				   diabling summary for this jeb and free the collected information
400 				 */
401 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
402 				goto restart;
403 			}
404 
405 			jffs2_close_nextblock(c, jeb);
406 			jeb = NULL;
407 			/* keep always valid value in reserved_size */
408 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
409 		}
410 	} else {
411 		if (jeb && minsize > jeb->free_size) {
412 			uint32_t waste;
413 
414 			/* Skip the end of this block and file it as having some dirty space */
415 			/* If there's a pending write to it, flush now */
416 
417 			if (jffs2_wbuf_dirty(c)) {
418 				spin_unlock(&c->erase_completion_lock);
419 				jffs2_dbg(1, "%s(): Flushing write buffer\n",
420 					  __func__);
421 				jffs2_flush_wbuf_pad(c);
422 				spin_lock(&c->erase_completion_lock);
423 				jeb = c->nextblock;
424 				goto restart;
425 			}
426 
427 			spin_unlock(&c->erase_completion_lock);
428 
429 			ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
430 
431 			/* Just lock it again and continue. Nothing much can change because
432 			   we hold c->alloc_sem anyway. In fact, it's not entirely clear why
433 			   we hold c->erase_completion_lock in the majority of this function...
434 			   but that's a question for another (more caffeine-rich) day. */
435 			spin_lock(&c->erase_completion_lock);
436 
437 			if (ret)
438 				return ret;
439 
440 			waste = jeb->free_size;
441 			jffs2_link_node_ref(c, jeb,
442 					    (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
443 					    waste, NULL);
444 			/* FIXME: that made it count as dirty. Convert to wasted */
445 			jeb->dirty_size -= waste;
446 			c->dirty_size -= waste;
447 			jeb->wasted_size += waste;
448 			c->wasted_size += waste;
449 
450 			jffs2_close_nextblock(c, jeb);
451 			jeb = NULL;
452 		}
453 	}
454 
455 	if (!jeb) {
456 
457 		ret = jffs2_find_nextblock(c);
458 		if (ret)
459 			return ret;
460 
461 		jeb = c->nextblock;
462 
463 		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
464 			pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
465 				jeb->offset, jeb->free_size);
466 			goto restart;
467 		}
468 	}
469 	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
470 	   enough space */
471 	*len = jeb->free_size - reserved_size;
472 
473 	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
474 	    !jeb->first_node->next_in_ino) {
475 		/* Only node in it beforehand was a CLEANMARKER node (we think).
476 		   So mark it obsolete now that there's going to be another node
477 		   in the block. This will reduce used_size to zero but We've
478 		   already set c->nextblock so that jffs2_mark_node_obsolete()
479 		   won't try to refile it to the dirty_list.
480 		*/
481 		spin_unlock(&c->erase_completion_lock);
482 		jffs2_mark_node_obsolete(c, jeb->first_node);
483 		spin_lock(&c->erase_completion_lock);
484 	}
485 
486 	jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
487 		  __func__,
488 		  *len, jeb->offset + (c->sector_size - jeb->free_size));
489 	return 0;
490 }
491 
492 /**
493  *	jffs2_add_physical_node_ref - add a physical node reference to the list
494  *	@c: superblock info
495  *	@ofs: offset in the block
496  *	@len: length of this physical node
497  *	@ic: inode cache pointer
498  *
499  *	Should only be used to report nodes for which space has been allocated
500  *	by jffs2_reserve_space.
501  *
502  *	Must be called with the alloc_sem held.
503  *
504  *	Returns: pointer to new node on success or -errno code on error
505  */
506 
jffs2_add_physical_node_ref(struct jffs2_sb_info * c,uint32_t ofs,uint32_t len,struct jffs2_inode_cache * ic)507 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
508 						       uint32_t ofs, uint32_t len,
509 						       struct jffs2_inode_cache *ic)
510 {
511 	struct jffs2_eraseblock *jeb;
512 	struct jffs2_raw_node_ref *new;
513 
514 	jeb = &c->blocks[ofs / c->sector_size];
515 
516 	jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
517 		  __func__, ofs & ~3, ofs & 3, len);
518 #if 1
519 	/* Allow non-obsolete nodes only to be added at the end of c->nextblock,
520 	   if c->nextblock is set. Note that wbuf.c will file obsolete nodes
521 	   even after refiling c->nextblock */
522 	if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
523 	    && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
524 		pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
525 			ofs & ~3, ofs & 3);
526 		if (c->nextblock)
527 			pr_warn("nextblock 0x%08x", c->nextblock->offset);
528 		else
529 			pr_warn("No nextblock");
530 		pr_cont(", expected at %08x\n",
531 			jeb->offset + (c->sector_size - jeb->free_size));
532 		return ERR_PTR(-EINVAL);
533 	}
534 #endif
535 	spin_lock(&c->erase_completion_lock);
536 
537 	new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
538 
539 	if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
540 		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
541 		jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
542 			  jeb->offset, jeb->free_size, jeb->dirty_size,
543 			  jeb->used_size);
544 		if (jffs2_wbuf_dirty(c)) {
545 			/* Flush the last write in the block if it's outstanding */
546 			spin_unlock(&c->erase_completion_lock);
547 			jffs2_flush_wbuf_pad(c);
548 			spin_lock(&c->erase_completion_lock);
549 		}
550 
551 		list_add_tail(&jeb->list, &c->clean_list);
552 		c->nextblock = NULL;
553 	}
554 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
555 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
556 
557 	spin_unlock(&c->erase_completion_lock);
558 
559 	return new;
560 }
561 
562 
jffs2_complete_reservation(struct jffs2_sb_info * c)563 void jffs2_complete_reservation(struct jffs2_sb_info *c)
564 {
565 	jffs2_dbg(1, "jffs2_complete_reservation()\n");
566 	spin_lock(&c->erase_completion_lock);
567 	jffs2_garbage_collect_trigger(c);
568 	spin_unlock(&c->erase_completion_lock);
569 	mutex_unlock(&c->alloc_sem);
570 }
571 
on_list(struct list_head * obj,struct list_head * head)572 static inline int on_list(struct list_head *obj, struct list_head *head)
573 {
574 	struct list_head *this;
575 
576 	list_for_each(this, head) {
577 		if (this == obj) {
578 			jffs2_dbg(1, "%p is on list at %p\n", obj, head);
579 			return 1;
580 
581 		}
582 	}
583 	return 0;
584 }
585 
jffs2_mark_node_obsolete(struct jffs2_sb_info * c,struct jffs2_raw_node_ref * ref)586 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
587 {
588 	struct jffs2_eraseblock *jeb;
589 	int blocknr;
590 	struct jffs2_unknown_node n;
591 	int ret, addedsize;
592 	size_t retlen;
593 	uint32_t freed_len;
594 
595 	if(unlikely(!ref)) {
596 		pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
597 		return;
598 	}
599 	if (ref_obsolete(ref)) {
600 		jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
601 			  __func__, ref_offset(ref));
602 		return;
603 	}
604 	blocknr = ref->flash_offset / c->sector_size;
605 	if (blocknr >= c->nr_blocks) {
606 		pr_notice("raw node at 0x%08x is off the end of device!\n",
607 			  ref->flash_offset);
608 		BUG();
609 	}
610 	jeb = &c->blocks[blocknr];
611 
612 	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
613 	    !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
614 		/* Hm. This may confuse static lock analysis. If any of the above
615 		   three conditions is false, we're going to return from this
616 		   function without actually obliterating any nodes or freeing
617 		   any jffs2_raw_node_refs. So we don't need to stop erases from
618 		   happening, or protect against people holding an obsolete
619 		   jffs2_raw_node_ref without the erase_completion_lock. */
620 		mutex_lock(&c->erase_free_sem);
621 	}
622 
623 	spin_lock(&c->erase_completion_lock);
624 
625 	freed_len = ref_totlen(c, jeb, ref);
626 
627 	if (ref_flags(ref) == REF_UNCHECKED) {
628 		D1(if (unlikely(jeb->unchecked_size < freed_len)) {
629 				pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
630 					  freed_len, blocknr,
631 					  ref->flash_offset, jeb->used_size);
632 			BUG();
633 		})
634 		jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
635 				ref_offset(ref), freed_len);
636 		jeb->unchecked_size -= freed_len;
637 		c->unchecked_size -= freed_len;
638 	} else {
639 		D1(if (unlikely(jeb->used_size < freed_len)) {
640 				pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
641 					  freed_len, blocknr,
642 					  ref->flash_offset, jeb->used_size);
643 			BUG();
644 		})
645 		jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
646 				ref_offset(ref), freed_len);
647 		jeb->used_size -= freed_len;
648 		c->used_size -= freed_len;
649 	}
650 
651 	// Take care, that wasted size is taken into concern
652 	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
653 		jffs2_dbg(1, "Dirtying\n");
654 		addedsize = freed_len;
655 		jeb->dirty_size += freed_len;
656 		c->dirty_size += freed_len;
657 
658 		/* Convert wasted space to dirty, if not a bad block */
659 		if (jeb->wasted_size) {
660 			if (on_list(&jeb->list, &c->bad_used_list)) {
661 				jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
662 					  jeb->offset);
663 				addedsize = 0; /* To fool the refiling code later */
664 			} else {
665 				jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
666 					  jeb->wasted_size, jeb->offset);
667 				addedsize += jeb->wasted_size;
668 				jeb->dirty_size += jeb->wasted_size;
669 				c->dirty_size += jeb->wasted_size;
670 				c->wasted_size -= jeb->wasted_size;
671 				jeb->wasted_size = 0;
672 			}
673 		}
674 	} else {
675 		jffs2_dbg(1, "Wasting\n");
676 		addedsize = 0;
677 		jeb->wasted_size += freed_len;
678 		c->wasted_size += freed_len;
679 	}
680 	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
681 
682 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
683 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
684 
685 	if (c->flags & JFFS2_SB_FLAG_SCANNING) {
686 		/* Flash scanning is in progress. Don't muck about with the block
687 		   lists because they're not ready yet, and don't actually
688 		   obliterate nodes that look obsolete. If they weren't
689 		   marked obsolete on the flash at the time they _became_
690 		   obsolete, there was probably a reason for that. */
691 		spin_unlock(&c->erase_completion_lock);
692 		/* We didn't lock the erase_free_sem */
693 		return;
694 	}
695 
696 	if (jeb == c->nextblock) {
697 		jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
698 			  jeb->offset);
699 	} else if (!jeb->used_size && !jeb->unchecked_size) {
700 		if (jeb == c->gcblock) {
701 			jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
702 				  jeb->offset);
703 			c->gcblock = NULL;
704 		} else {
705 			jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
706 				  jeb->offset);
707 			list_del(&jeb->list);
708 		}
709 		if (jffs2_wbuf_dirty(c)) {
710 			jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
711 			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
712 		} else {
713 			if (jiffies & 127) {
714 				/* Most of the time, we just erase it immediately. Otherwise we
715 				   spend ages scanning it on mount, etc. */
716 				jffs2_dbg(1, "...and adding to erase_pending_list\n");
717 				list_add_tail(&jeb->list, &c->erase_pending_list);
718 				c->nr_erasing_blocks++;
719 				jffs2_garbage_collect_trigger(c);
720 			} else {
721 				/* Sometimes, however, we leave it elsewhere so it doesn't get
722 				   immediately reused, and we spread the load a bit. */
723 				jffs2_dbg(1, "...and adding to erasable_list\n");
724 				list_add_tail(&jeb->list, &c->erasable_list);
725 			}
726 		}
727 		jffs2_dbg(1, "Done OK\n");
728 	} else if (jeb == c->gcblock) {
729 		jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
730 			  jeb->offset);
731 	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
732 		jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
733 			  jeb->offset);
734 		list_del(&jeb->list);
735 		jffs2_dbg(1, "...and adding to dirty_list\n");
736 		list_add_tail(&jeb->list, &c->dirty_list);
737 	} else if (VERYDIRTY(c, jeb->dirty_size) &&
738 		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
739 		jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
740 			  jeb->offset);
741 		list_del(&jeb->list);
742 		jffs2_dbg(1, "...and adding to very_dirty_list\n");
743 		list_add_tail(&jeb->list, &c->very_dirty_list);
744 	} else {
745 		jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
746 			  jeb->offset, jeb->free_size, jeb->dirty_size,
747 			  jeb->used_size);
748 	}
749 
750 	spin_unlock(&c->erase_completion_lock);
751 
752 	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
753 		(c->flags & JFFS2_SB_FLAG_BUILDING)) {
754 		/* We didn't lock the erase_free_sem */
755 		return;
756 	}
757 
758 	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
759 	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
760 	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
761 	   by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
762 
763 	jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
764 		  ref_offset(ref));
765 	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
766 	if (ret) {
767 		pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
768 			ref_offset(ref), ret);
769 		goto out_erase_sem;
770 	}
771 	if (retlen != sizeof(n)) {
772 		pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
773 			ref_offset(ref), retlen);
774 		goto out_erase_sem;
775 	}
776 	if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
777 		pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
778 			je32_to_cpu(n.totlen), freed_len);
779 		goto out_erase_sem;
780 	}
781 	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
782 		jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
783 			  ref_offset(ref), je16_to_cpu(n.nodetype));
784 		goto out_erase_sem;
785 	}
786 	/* XXX FIXME: This is ugly now */
787 	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
788 	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
789 	if (ret) {
790 		pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
791 			ref_offset(ref), ret);
792 		goto out_erase_sem;
793 	}
794 	if (retlen != sizeof(n)) {
795 		pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
796 			ref_offset(ref), retlen);
797 		goto out_erase_sem;
798 	}
799 
800 	/* Nodes which have been marked obsolete no longer need to be
801 	   associated with any inode. Remove them from the per-inode list.
802 
803 	   Note we can't do this for NAND at the moment because we need
804 	   obsolete dirent nodes to stay on the lists, because of the
805 	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
806 	   because we delete the inocache, and on NAND we need that to
807 	   stay around until all the nodes are actually erased, in order
808 	   to stop us from giving the same inode number to another newly
809 	   created inode. */
810 	if (ref->next_in_ino) {
811 		struct jffs2_inode_cache *ic;
812 		struct jffs2_raw_node_ref **p;
813 
814 		spin_lock(&c->erase_completion_lock);
815 
816 		ic = jffs2_raw_ref_to_ic(ref);
817 		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
818 			;
819 
820 		*p = ref->next_in_ino;
821 		ref->next_in_ino = NULL;
822 
823 		switch (ic->class) {
824 #ifdef CONFIG_JFFS2_FS_XATTR
825 			case RAWNODE_CLASS_XATTR_DATUM:
826 				jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
827 				break;
828 			case RAWNODE_CLASS_XATTR_REF:
829 				jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
830 				break;
831 #endif
832 			default:
833 				if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
834 					jffs2_del_ino_cache(c, ic);
835 				break;
836 		}
837 		spin_unlock(&c->erase_completion_lock);
838 	}
839 
840  out_erase_sem:
841 	mutex_unlock(&c->erase_free_sem);
842 }
843 
jffs2_thread_should_wake(struct jffs2_sb_info * c)844 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
845 {
846 	int ret = 0;
847 	uint32_t dirty;
848 	int nr_very_dirty = 0;
849 	struct jffs2_eraseblock *jeb;
850 
851 	if (!list_empty(&c->erase_complete_list) ||
852 	    !list_empty(&c->erase_pending_list))
853 		return 1;
854 
855 	if (c->unchecked_size) {
856 		jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, check_ino #%d\n",
857 			  c->unchecked_size, c->check_ino);
858 		return 1;
859 	}
860 
861 	/* dirty_size contains blocks on erase_pending_list
862 	 * those blocks are counted in c->nr_erasing_blocks.
863 	 * If one block is actually erased, it is not longer counted as dirty_space
864 	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
865 	 * with c->nr_erasing_blocks * c->sector_size again.
866 	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
867 	 * This helps us to force gc and pick eventually a clean block to spread the load.
868 	 */
869 	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
870 
871 	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
872 			(dirty > c->nospc_dirty_size))
873 		ret = 1;
874 
875 	list_for_each_entry(jeb, &c->very_dirty_list, list) {
876 		nr_very_dirty++;
877 		if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
878 			ret = 1;
879 			/* In debug mode, actually go through and count them all */
880 			D1(continue);
881 			break;
882 		}
883 	}
884 
885 	jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
886 		  __func__, c->nr_free_blocks, c->nr_erasing_blocks,
887 		  c->dirty_size, nr_very_dirty, str_yes_no(ret));
888 
889 	return ret;
890 }
891