xref: /linux/fs/jffs2/nodemgmt.c (revision 9ce7677cfd7cd871adb457c80bea3b581b839641)
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright (C) 2001-2003 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@infradead.org>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  * $Id: nodemgmt.c,v 1.127 2005/09/20 15:49:12 dedekind Exp $
11  *
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/compiler.h>
18 #include <linux/sched.h> /* For cond_resched() */
19 #include "nodelist.h"
20 #include "debug.h"
21 
22 /**
23  *	jffs2_reserve_space - request physical space to write nodes to flash
24  *	@c: superblock info
25  *	@minsize: Minimum acceptable size of allocation
26  *	@ofs: Returned value of node offset
27  *	@len: Returned value of allocation length
28  *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
29  *
30  *	Requests a block of physical space on the flash. Returns zero for success
31  *	and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
32  *	or other error if appropriate.
33  *
34  *	If it returns zero, jffs2_reserve_space() also downs the per-filesystem
35  *	allocation semaphore, to prevent more than one allocation from being
36  *	active at any time. The semaphore is later released by jffs2_commit_allocation()
37  *
38  *	jffs2_reserve_space() may trigger garbage collection in order to make room
39  *	for the requested allocation.
40  */
41 
42 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize,
43 					uint32_t *ofs, uint32_t *len, uint32_t sumsize);
44 
45 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
46 			uint32_t *len, int prio, uint32_t sumsize)
47 {
48 	int ret = -EAGAIN;
49 	int blocksneeded = c->resv_blocks_write;
50 	/* align it */
51 	minsize = PAD(minsize);
52 
53 	D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
54 	down(&c->alloc_sem);
55 
56 	D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
57 
58 	spin_lock(&c->erase_completion_lock);
59 
60 	/* this needs a little more thought (true <tglx> :)) */
61 	while(ret == -EAGAIN) {
62 		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
63 			int ret;
64 			uint32_t dirty, avail;
65 
66 			/* calculate real dirty size
67 			 * dirty_size contains blocks on erase_pending_list
68 			 * those blocks are counted in c->nr_erasing_blocks.
69 			 * If one block is actually erased, it is not longer counted as dirty_space
70 			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
71 			 * with c->nr_erasing_blocks * c->sector_size again.
72 			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
73 			 * This helps us to force gc and pick eventually a clean block to spread the load.
74 			 * We add unchecked_size here, as we hopefully will find some space to use.
75 			 * This will affect the sum only once, as gc first finishes checking
76 			 * of nodes.
77 			 */
78 			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
79 			if (dirty < c->nospc_dirty_size) {
80 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
81 					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
82 					break;
83 				}
84 				D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
85 					  dirty, c->unchecked_size, c->sector_size));
86 
87 				spin_unlock(&c->erase_completion_lock);
88 				up(&c->alloc_sem);
89 				return -ENOSPC;
90 			}
91 
92 			/* Calc possibly available space. Possibly available means that we
93 			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
94 			 * more usable space. This will affect the sum only once, as gc first finishes checking
95 			 * of nodes.
96 			 + Return -ENOSPC, if the maximum possibly available space is less or equal than
97 			 * blocksneeded * sector_size.
98 			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
99 			 * the check above passes.
100 			 */
101 			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
102 			if ( (avail / c->sector_size) <= blocksneeded) {
103 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
104 					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
105 					break;
106 				}
107 
108 				D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
109 					  avail, blocksneeded * c->sector_size));
110 				spin_unlock(&c->erase_completion_lock);
111 				up(&c->alloc_sem);
112 				return -ENOSPC;
113 			}
114 
115 			up(&c->alloc_sem);
116 
117 			D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
118 				  c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
119 				  c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
120 			spin_unlock(&c->erase_completion_lock);
121 
122 			ret = jffs2_garbage_collect_pass(c);
123 			if (ret)
124 				return ret;
125 
126 			cond_resched();
127 
128 			if (signal_pending(current))
129 				return -EINTR;
130 
131 			down(&c->alloc_sem);
132 			spin_lock(&c->erase_completion_lock);
133 		}
134 
135 		ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize);
136 		if (ret) {
137 			D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
138 		}
139 	}
140 	spin_unlock(&c->erase_completion_lock);
141 	if (ret)
142 		up(&c->alloc_sem);
143 	return ret;
144 }
145 
146 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
147 			uint32_t *len, uint32_t sumsize)
148 {
149 	int ret = -EAGAIN;
150 	minsize = PAD(minsize);
151 
152 	D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
153 
154 	spin_lock(&c->erase_completion_lock);
155 	while(ret == -EAGAIN) {
156 		ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize);
157 		if (ret) {
158 		        D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
159 		}
160 	}
161 	spin_unlock(&c->erase_completion_lock);
162 	return ret;
163 }
164 
165 
166 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
167 
168 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
169 {
170 
171 	/* Check, if we have a dirty block now, or if it was dirty already */
172 	if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
173 		c->dirty_size += jeb->wasted_size;
174 		c->wasted_size -= jeb->wasted_size;
175 		jeb->dirty_size += jeb->wasted_size;
176 		jeb->wasted_size = 0;
177 		if (VERYDIRTY(c, jeb->dirty_size)) {
178 			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
179 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
180 			list_add_tail(&jeb->list, &c->very_dirty_list);
181 		} else {
182 			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
183 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
184 			list_add_tail(&jeb->list, &c->dirty_list);
185 		}
186 	} else {
187 		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
188 		  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
189 		list_add_tail(&jeb->list, &c->clean_list);
190 	}
191 	c->nextblock = NULL;
192 
193 }
194 
195 /* Select a new jeb for nextblock */
196 
197 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
198 {
199 	struct list_head *next;
200 
201 	/* Take the next block off the 'free' list */
202 
203 	if (list_empty(&c->free_list)) {
204 
205 		if (!c->nr_erasing_blocks &&
206 			!list_empty(&c->erasable_list)) {
207 			struct jffs2_eraseblock *ejeb;
208 
209 			ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
210 			list_del(&ejeb->list);
211 			list_add_tail(&ejeb->list, &c->erase_pending_list);
212 			c->nr_erasing_blocks++;
213 			jffs2_erase_pending_trigger(c);
214 			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
215 				  ejeb->offset));
216 		}
217 
218 		if (!c->nr_erasing_blocks &&
219 			!list_empty(&c->erasable_pending_wbuf_list)) {
220 			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
221 			/* c->nextblock is NULL, no update to c->nextblock allowed */
222 			spin_unlock(&c->erase_completion_lock);
223 			jffs2_flush_wbuf_pad(c);
224 			spin_lock(&c->erase_completion_lock);
225 			/* Have another go. It'll be on the erasable_list now */
226 			return -EAGAIN;
227 		}
228 
229 		if (!c->nr_erasing_blocks) {
230 			/* Ouch. We're in GC, or we wouldn't have got here.
231 			   And there's no space left. At all. */
232 			printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
233 				   c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
234 				   list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
235 			return -ENOSPC;
236 		}
237 
238 		spin_unlock(&c->erase_completion_lock);
239 		/* Don't wait for it; just erase one right now */
240 		jffs2_erase_pending_blocks(c, 1);
241 		spin_lock(&c->erase_completion_lock);
242 
243 		/* An erase may have failed, decreasing the
244 		   amount of free space available. So we must
245 		   restart from the beginning */
246 		return -EAGAIN;
247 	}
248 
249 	next = c->free_list.next;
250 	list_del(next);
251 	c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
252 	c->nr_free_blocks--;
253 
254 	jffs2_sum_reset_collected(c->summary); /* reset collected summary */
255 
256 	D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
257 
258 	return 0;
259 }
260 
261 /* Called with alloc sem _and_ erase_completion_lock */
262 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, uint32_t sumsize)
263 {
264 	struct jffs2_eraseblock *jeb = c->nextblock;
265 	uint32_t reserved_size; 			/* for summary information at the end of the jeb */
266 	int ret;
267 
268  restart:
269 	reserved_size = 0;
270 
271 	if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
272 							/* NOSUM_SIZE means not to generate summary */
273 
274 		if (jeb) {
275 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
276 			dbg_summary("minsize=%d , jeb->free=%d ,"
277 						"summary->size=%d , sumsize=%d\n",
278 						minsize, jeb->free_size,
279 						c->summary->sum_size, sumsize);
280 		}
281 
282 		/* Is there enough space for writing out the current node, or we have to
283 		   write out summary information now, close this jeb and select new nextblock? */
284 		if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
285 					JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
286 
287 			/* Has summary been disabled for this jeb? */
288 			if (jffs2_sum_is_disabled(c->summary)) {
289 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
290 				goto restart;
291 			}
292 
293 			/* Writing out the collected summary information */
294 			dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
295 			ret = jffs2_sum_write_sumnode(c);
296 
297 			if (ret)
298 				return ret;
299 
300 			if (jffs2_sum_is_disabled(c->summary)) {
301 				/* jffs2_write_sumnode() couldn't write out the summary information
302 				   diabling summary for this jeb and free the collected information
303 				 */
304 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
305 				goto restart;
306 			}
307 
308 			jffs2_close_nextblock(c, jeb);
309 			jeb = NULL;
310 			/* keep always valid value in reserved_size */
311 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
312 		}
313 	} else {
314 		if (jeb && minsize > jeb->free_size) {
315 			/* Skip the end of this block and file it as having some dirty space */
316 			/* If there's a pending write to it, flush now */
317 
318 			if (jffs2_wbuf_dirty(c)) {
319 				spin_unlock(&c->erase_completion_lock);
320 				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
321 				jffs2_flush_wbuf_pad(c);
322 				spin_lock(&c->erase_completion_lock);
323 				jeb = c->nextblock;
324 				goto restart;
325 			}
326 
327 			c->wasted_size += jeb->free_size;
328 			c->free_size -= jeb->free_size;
329 			jeb->wasted_size += jeb->free_size;
330 			jeb->free_size = 0;
331 
332 			jffs2_close_nextblock(c, jeb);
333 			jeb = NULL;
334 		}
335 	}
336 
337 	if (!jeb) {
338 
339 		ret = jffs2_find_nextblock(c);
340 		if (ret)
341 			return ret;
342 
343 		jeb = c->nextblock;
344 
345 		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
346 			printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
347 			goto restart;
348 		}
349 	}
350 	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
351 	   enough space */
352 	*ofs = jeb->offset + (c->sector_size - jeb->free_size);
353 	*len = jeb->free_size - reserved_size;
354 
355 	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
356 	    !jeb->first_node->next_in_ino) {
357 		/* Only node in it beforehand was a CLEANMARKER node (we think).
358 		   So mark it obsolete now that there's going to be another node
359 		   in the block. This will reduce used_size to zero but We've
360 		   already set c->nextblock so that jffs2_mark_node_obsolete()
361 		   won't try to refile it to the dirty_list.
362 		*/
363 		spin_unlock(&c->erase_completion_lock);
364 		jffs2_mark_node_obsolete(c, jeb->first_node);
365 		spin_lock(&c->erase_completion_lock);
366 	}
367 
368 	D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
369 	return 0;
370 }
371 
372 /**
373  *	jffs2_add_physical_node_ref - add a physical node reference to the list
374  *	@c: superblock info
375  *	@new: new node reference to add
376  *	@len: length of this physical node
377  *	@dirty: dirty flag for new node
378  *
379  *	Should only be used to report nodes for which space has been allocated
380  *	by jffs2_reserve_space.
381  *
382  *	Must be called with the alloc_sem held.
383  */
384 
385 int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
386 {
387 	struct jffs2_eraseblock *jeb;
388 	uint32_t len;
389 
390 	jeb = &c->blocks[new->flash_offset / c->sector_size];
391 	len = ref_totlen(c, jeb, new);
392 
393 	D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
394 #if 1
395 	/* we could get some obsolete nodes after nextblock was refiled
396 	   in wbuf.c */
397 	if ((c->nextblock || !ref_obsolete(new))
398 	    &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) {
399 		printk(KERN_WARNING "argh. node added in wrong place\n");
400 		jffs2_free_raw_node_ref(new);
401 		return -EINVAL;
402 	}
403 #endif
404 	spin_lock(&c->erase_completion_lock);
405 
406 	if (!jeb->first_node)
407 		jeb->first_node = new;
408 	if (jeb->last_node)
409 		jeb->last_node->next_phys = new;
410 	jeb->last_node = new;
411 
412 	jeb->free_size -= len;
413 	c->free_size -= len;
414 	if (ref_obsolete(new)) {
415 		jeb->dirty_size += len;
416 		c->dirty_size += len;
417 	} else {
418 		jeb->used_size += len;
419 		c->used_size += len;
420 	}
421 
422 	if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
423 		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
424 		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
425 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
426 		if (jffs2_wbuf_dirty(c)) {
427 			/* Flush the last write in the block if it's outstanding */
428 			spin_unlock(&c->erase_completion_lock);
429 			jffs2_flush_wbuf_pad(c);
430 			spin_lock(&c->erase_completion_lock);
431 		}
432 
433 		list_add_tail(&jeb->list, &c->clean_list);
434 		c->nextblock = NULL;
435 	}
436 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
437 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
438 
439 	spin_unlock(&c->erase_completion_lock);
440 
441 	return 0;
442 }
443 
444 
445 void jffs2_complete_reservation(struct jffs2_sb_info *c)
446 {
447 	D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
448 	jffs2_garbage_collect_trigger(c);
449 	up(&c->alloc_sem);
450 }
451 
452 static inline int on_list(struct list_head *obj, struct list_head *head)
453 {
454 	struct list_head *this;
455 
456 	list_for_each(this, head) {
457 		if (this == obj) {
458 			D1(printk("%p is on list at %p\n", obj, head));
459 			return 1;
460 
461 		}
462 	}
463 	return 0;
464 }
465 
466 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
467 {
468 	struct jffs2_eraseblock *jeb;
469 	int blocknr;
470 	struct jffs2_unknown_node n;
471 	int ret, addedsize;
472 	size_t retlen;
473 
474 	if(!ref) {
475 		printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
476 		return;
477 	}
478 	if (ref_obsolete(ref)) {
479 		D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
480 		return;
481 	}
482 	blocknr = ref->flash_offset / c->sector_size;
483 	if (blocknr >= c->nr_blocks) {
484 		printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
485 		BUG();
486 	}
487 	jeb = &c->blocks[blocknr];
488 
489 	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
490 	    !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
491 		/* Hm. This may confuse static lock analysis. If any of the above
492 		   three conditions is false, we're going to return from this
493 		   function without actually obliterating any nodes or freeing
494 		   any jffs2_raw_node_refs. So we don't need to stop erases from
495 		   happening, or protect against people holding an obsolete
496 		   jffs2_raw_node_ref without the erase_completion_lock. */
497 		down(&c->erase_free_sem);
498 	}
499 
500 	spin_lock(&c->erase_completion_lock);
501 
502 	if (ref_flags(ref) == REF_UNCHECKED) {
503 		D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) {
504 			printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
505 			       ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
506 			BUG();
507 		})
508 		D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
509 		jeb->unchecked_size -= ref_totlen(c, jeb, ref);
510 		c->unchecked_size -= ref_totlen(c, jeb, ref);
511 	} else {
512 		D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) {
513 			printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
514 			       ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
515 			BUG();
516 		})
517 		D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
518 		jeb->used_size -= ref_totlen(c, jeb, ref);
519 		c->used_size -= ref_totlen(c, jeb, ref);
520 	}
521 
522 	// Take care, that wasted size is taken into concern
523 	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) {
524 		D1(printk(KERN_DEBUG "Dirtying\n"));
525 		addedsize = ref_totlen(c, jeb, ref);
526 		jeb->dirty_size += ref_totlen(c, jeb, ref);
527 		c->dirty_size += ref_totlen(c, jeb, ref);
528 
529 		/* Convert wasted space to dirty, if not a bad block */
530 		if (jeb->wasted_size) {
531 			if (on_list(&jeb->list, &c->bad_used_list)) {
532 				D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
533 					  jeb->offset));
534 				addedsize = 0; /* To fool the refiling code later */
535 			} else {
536 				D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
537 					  jeb->wasted_size, jeb->offset));
538 				addedsize += jeb->wasted_size;
539 				jeb->dirty_size += jeb->wasted_size;
540 				c->dirty_size += jeb->wasted_size;
541 				c->wasted_size -= jeb->wasted_size;
542 				jeb->wasted_size = 0;
543 			}
544 		}
545 	} else {
546 		D1(printk(KERN_DEBUG "Wasting\n"));
547 		addedsize = 0;
548 		jeb->wasted_size += ref_totlen(c, jeb, ref);
549 		c->wasted_size += ref_totlen(c, jeb, ref);
550 	}
551 	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
552 
553 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
554 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
555 
556 	if (c->flags & JFFS2_SB_FLAG_SCANNING) {
557 		/* Flash scanning is in progress. Don't muck about with the block
558 		   lists because they're not ready yet, and don't actually
559 		   obliterate nodes that look obsolete. If they weren't
560 		   marked obsolete on the flash at the time they _became_
561 		   obsolete, there was probably a reason for that. */
562 		spin_unlock(&c->erase_completion_lock);
563 		/* We didn't lock the erase_free_sem */
564 		return;
565 	}
566 
567 	if (jeb == c->nextblock) {
568 		D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
569 	} else if (!jeb->used_size && !jeb->unchecked_size) {
570 		if (jeb == c->gcblock) {
571 			D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
572 			c->gcblock = NULL;
573 		} else {
574 			D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
575 			list_del(&jeb->list);
576 		}
577 		if (jffs2_wbuf_dirty(c)) {
578 			D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
579 			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
580 		} else {
581 			if (jiffies & 127) {
582 				/* Most of the time, we just erase it immediately. Otherwise we
583 				   spend ages scanning it on mount, etc. */
584 				D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
585 				list_add_tail(&jeb->list, &c->erase_pending_list);
586 				c->nr_erasing_blocks++;
587 				jffs2_erase_pending_trigger(c);
588 			} else {
589 				/* Sometimes, however, we leave it elsewhere so it doesn't get
590 				   immediately reused, and we spread the load a bit. */
591 				D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
592 				list_add_tail(&jeb->list, &c->erasable_list);
593 			}
594 		}
595 		D1(printk(KERN_DEBUG "Done OK\n"));
596 	} else if (jeb == c->gcblock) {
597 		D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
598 	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
599 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
600 		list_del(&jeb->list);
601 		D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
602 		list_add_tail(&jeb->list, &c->dirty_list);
603 	} else if (VERYDIRTY(c, jeb->dirty_size) &&
604 		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
605 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
606 		list_del(&jeb->list);
607 		D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
608 		list_add_tail(&jeb->list, &c->very_dirty_list);
609 	} else {
610 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
611 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
612 	}
613 
614 	spin_unlock(&c->erase_completion_lock);
615 
616 	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
617 		(c->flags & JFFS2_SB_FLAG_BUILDING)) {
618 		/* We didn't lock the erase_free_sem */
619 		return;
620 	}
621 
622 	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
623 	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
624 	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
625 	   by jffs2_free_all_node_refs() in erase.c. Which is nice. */
626 
627 	D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
628 	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
629 	if (ret) {
630 		printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
631 		goto out_erase_sem;
632 	}
633 	if (retlen != sizeof(n)) {
634 		printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
635 		goto out_erase_sem;
636 	}
637 	if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) {
638 		printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref));
639 		goto out_erase_sem;
640 	}
641 	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
642 		D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
643 		goto out_erase_sem;
644 	}
645 	/* XXX FIXME: This is ugly now */
646 	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
647 	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
648 	if (ret) {
649 		printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
650 		goto out_erase_sem;
651 	}
652 	if (retlen != sizeof(n)) {
653 		printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
654 		goto out_erase_sem;
655 	}
656 
657 	/* Nodes which have been marked obsolete no longer need to be
658 	   associated with any inode. Remove them from the per-inode list.
659 
660 	   Note we can't do this for NAND at the moment because we need
661 	   obsolete dirent nodes to stay on the lists, because of the
662 	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
663 	   because we delete the inocache, and on NAND we need that to
664 	   stay around until all the nodes are actually erased, in order
665 	   to stop us from giving the same inode number to another newly
666 	   created inode. */
667 	if (ref->next_in_ino) {
668 		struct jffs2_inode_cache *ic;
669 		struct jffs2_raw_node_ref **p;
670 
671 		spin_lock(&c->erase_completion_lock);
672 
673 		ic = jffs2_raw_ref_to_ic(ref);
674 		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
675 			;
676 
677 		*p = ref->next_in_ino;
678 		ref->next_in_ino = NULL;
679 
680 		if (ic->nodes == (void *)ic && ic->nlink == 0)
681 			jffs2_del_ino_cache(c, ic);
682 
683 		spin_unlock(&c->erase_completion_lock);
684 	}
685 
686 
687 	/* Merge with the next node in the physical list, if there is one
688 	   and if it's also obsolete and if it doesn't belong to any inode */
689 	if (ref->next_phys && ref_obsolete(ref->next_phys) &&
690 	    !ref->next_phys->next_in_ino) {
691 		struct jffs2_raw_node_ref *n = ref->next_phys;
692 
693 		spin_lock(&c->erase_completion_lock);
694 
695 		ref->__totlen += n->__totlen;
696 		ref->next_phys = n->next_phys;
697                 if (jeb->last_node == n) jeb->last_node = ref;
698 		if (jeb->gc_node == n) {
699 			/* gc will be happy continuing gc on this node */
700 			jeb->gc_node=ref;
701 		}
702 		spin_unlock(&c->erase_completion_lock);
703 
704 		jffs2_free_raw_node_ref(n);
705 	}
706 
707 	/* Also merge with the previous node in the list, if there is one
708 	   and that one is obsolete */
709 	if (ref != jeb->first_node ) {
710 		struct jffs2_raw_node_ref *p = jeb->first_node;
711 
712 		spin_lock(&c->erase_completion_lock);
713 
714 		while (p->next_phys != ref)
715 			p = p->next_phys;
716 
717 		if (ref_obsolete(p) && !ref->next_in_ino) {
718 			p->__totlen += ref->__totlen;
719 			if (jeb->last_node == ref) {
720 				jeb->last_node = p;
721 			}
722 			if (jeb->gc_node == ref) {
723 				/* gc will be happy continuing gc on this node */
724 				jeb->gc_node=p;
725 			}
726 			p->next_phys = ref->next_phys;
727 			jffs2_free_raw_node_ref(ref);
728 		}
729 		spin_unlock(&c->erase_completion_lock);
730 	}
731  out_erase_sem:
732 	up(&c->erase_free_sem);
733 }
734 
735 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
736 {
737 	int ret = 0;
738 	uint32_t dirty;
739 
740 	if (c->unchecked_size) {
741 		D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
742 			  c->unchecked_size, c->checked_ino));
743 		return 1;
744 	}
745 
746 	/* dirty_size contains blocks on erase_pending_list
747 	 * those blocks are counted in c->nr_erasing_blocks.
748 	 * If one block is actually erased, it is not longer counted as dirty_space
749 	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
750 	 * with c->nr_erasing_blocks * c->sector_size again.
751 	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
752 	 * This helps us to force gc and pick eventually a clean block to spread the load.
753 	 */
754 	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
755 
756 	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
757 			(dirty > c->nospc_dirty_size))
758 		ret = 1;
759 
760 	D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
761 		  c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
762 
763 	return ret;
764 }
765