xref: /linux/fs/jffs2/nodemgmt.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright (C) 2001-2003 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@infradead.org>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  * $Id: nodemgmt.c,v 1.127 2005/09/20 15:49:12 dedekind Exp $
11  *
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/compiler.h>
18 #include <linux/sched.h> /* For cond_resched() */
19 #include "nodelist.h"
20 #include "debug.h"
21 
22 /**
23  *	jffs2_reserve_space - request physical space to write nodes to flash
24  *	@c: superblock info
25  *	@minsize: Minimum acceptable size of allocation
26  *	@len: Returned value of allocation length
27  *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
28  *
29  *	Requests a block of physical space on the flash. Returns zero for success
30  *	and puts 'len' into the appropriate place, or returns -ENOSPC or other
31  *	error if appropriate. Doesn't return len since that's
32  *
33  *	If it returns zero, jffs2_reserve_space() also downs the per-filesystem
34  *	allocation semaphore, to prevent more than one allocation from being
35  *	active at any time. The semaphore is later released by jffs2_commit_allocation()
36  *
37  *	jffs2_reserve_space() may trigger garbage collection in order to make room
38  *	for the requested allocation.
39  */
40 
41 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize,
42 				  uint32_t *len, uint32_t sumsize);
43 
44 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
45 			uint32_t *len, int prio, uint32_t sumsize)
46 {
47 	int ret = -EAGAIN;
48 	int blocksneeded = c->resv_blocks_write;
49 	/* align it */
50 	minsize = PAD(minsize);
51 
52 	D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
53 	down(&c->alloc_sem);
54 
55 	D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
56 
57 	spin_lock(&c->erase_completion_lock);
58 
59 	/* this needs a little more thought (true <tglx> :)) */
60 	while(ret == -EAGAIN) {
61 		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
62 			int ret;
63 			uint32_t dirty, avail;
64 
65 			/* calculate real dirty size
66 			 * dirty_size contains blocks on erase_pending_list
67 			 * those blocks are counted in c->nr_erasing_blocks.
68 			 * If one block is actually erased, it is not longer counted as dirty_space
69 			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
70 			 * with c->nr_erasing_blocks * c->sector_size again.
71 			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
72 			 * This helps us to force gc and pick eventually a clean block to spread the load.
73 			 * We add unchecked_size here, as we hopefully will find some space to use.
74 			 * This will affect the sum only once, as gc first finishes checking
75 			 * of nodes.
76 			 */
77 			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
78 			if (dirty < c->nospc_dirty_size) {
79 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
80 					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
81 					break;
82 				}
83 				D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
84 					  dirty, c->unchecked_size, c->sector_size));
85 
86 				spin_unlock(&c->erase_completion_lock);
87 				up(&c->alloc_sem);
88 				return -ENOSPC;
89 			}
90 
91 			/* Calc possibly available space. Possibly available means that we
92 			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
93 			 * more usable space. This will affect the sum only once, as gc first finishes checking
94 			 * of nodes.
95 			 + Return -ENOSPC, if the maximum possibly available space is less or equal than
96 			 * blocksneeded * sector_size.
97 			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
98 			 * the check above passes.
99 			 */
100 			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
101 			if ( (avail / c->sector_size) <= blocksneeded) {
102 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
103 					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
104 					break;
105 				}
106 
107 				D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
108 					  avail, blocksneeded * c->sector_size));
109 				spin_unlock(&c->erase_completion_lock);
110 				up(&c->alloc_sem);
111 				return -ENOSPC;
112 			}
113 
114 			up(&c->alloc_sem);
115 
116 			D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
117 				  c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
118 				  c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
119 			spin_unlock(&c->erase_completion_lock);
120 
121 			ret = jffs2_garbage_collect_pass(c);
122 			if (ret)
123 				return ret;
124 
125 			cond_resched();
126 
127 			if (signal_pending(current))
128 				return -EINTR;
129 
130 			down(&c->alloc_sem);
131 			spin_lock(&c->erase_completion_lock);
132 		}
133 
134 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
135 		if (ret) {
136 			D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
137 		}
138 	}
139 	spin_unlock(&c->erase_completion_lock);
140 	if (!ret)
141 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
142 	if (ret)
143 		up(&c->alloc_sem);
144 	return ret;
145 }
146 
147 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
148 			   uint32_t *len, uint32_t sumsize)
149 {
150 	int ret = -EAGAIN;
151 	minsize = PAD(minsize);
152 
153 	D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
154 
155 	spin_lock(&c->erase_completion_lock);
156 	while(ret == -EAGAIN) {
157 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
158 		if (ret) {
159 		        D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
160 		}
161 	}
162 	spin_unlock(&c->erase_completion_lock);
163 	if (!ret)
164 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
165 
166 	return ret;
167 }
168 
169 
170 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
171 
172 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
173 {
174 
175 	/* Check, if we have a dirty block now, or if it was dirty already */
176 	if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
177 		c->dirty_size += jeb->wasted_size;
178 		c->wasted_size -= jeb->wasted_size;
179 		jeb->dirty_size += jeb->wasted_size;
180 		jeb->wasted_size = 0;
181 		if (VERYDIRTY(c, jeb->dirty_size)) {
182 			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
183 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
184 			list_add_tail(&jeb->list, &c->very_dirty_list);
185 		} else {
186 			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
187 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
188 			list_add_tail(&jeb->list, &c->dirty_list);
189 		}
190 	} else {
191 		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
192 		  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
193 		list_add_tail(&jeb->list, &c->clean_list);
194 	}
195 	c->nextblock = NULL;
196 
197 }
198 
199 /* Select a new jeb for nextblock */
200 
201 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
202 {
203 	struct list_head *next;
204 
205 	/* Take the next block off the 'free' list */
206 
207 	if (list_empty(&c->free_list)) {
208 
209 		if (!c->nr_erasing_blocks &&
210 			!list_empty(&c->erasable_list)) {
211 			struct jffs2_eraseblock *ejeb;
212 
213 			ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
214 			list_del(&ejeb->list);
215 			list_add_tail(&ejeb->list, &c->erase_pending_list);
216 			c->nr_erasing_blocks++;
217 			jffs2_erase_pending_trigger(c);
218 			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
219 				  ejeb->offset));
220 		}
221 
222 		if (!c->nr_erasing_blocks &&
223 			!list_empty(&c->erasable_pending_wbuf_list)) {
224 			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
225 			/* c->nextblock is NULL, no update to c->nextblock allowed */
226 			spin_unlock(&c->erase_completion_lock);
227 			jffs2_flush_wbuf_pad(c);
228 			spin_lock(&c->erase_completion_lock);
229 			/* Have another go. It'll be on the erasable_list now */
230 			return -EAGAIN;
231 		}
232 
233 		if (!c->nr_erasing_blocks) {
234 			/* Ouch. We're in GC, or we wouldn't have got here.
235 			   And there's no space left. At all. */
236 			printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
237 				   c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
238 				   list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
239 			return -ENOSPC;
240 		}
241 
242 		spin_unlock(&c->erase_completion_lock);
243 		/* Don't wait for it; just erase one right now */
244 		jffs2_erase_pending_blocks(c, 1);
245 		spin_lock(&c->erase_completion_lock);
246 
247 		/* An erase may have failed, decreasing the
248 		   amount of free space available. So we must
249 		   restart from the beginning */
250 		return -EAGAIN;
251 	}
252 
253 	next = c->free_list.next;
254 	list_del(next);
255 	c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
256 	c->nr_free_blocks--;
257 
258 	jffs2_sum_reset_collected(c->summary); /* reset collected summary */
259 
260 	D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
261 
262 	return 0;
263 }
264 
265 /* Called with alloc sem _and_ erase_completion_lock */
266 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
267 				  uint32_t *len, uint32_t sumsize)
268 {
269 	struct jffs2_eraseblock *jeb = c->nextblock;
270 	uint32_t reserved_size;				/* for summary information at the end of the jeb */
271 	int ret;
272 
273  restart:
274 	reserved_size = 0;
275 
276 	if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
277 							/* NOSUM_SIZE means not to generate summary */
278 
279 		if (jeb) {
280 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
281 			dbg_summary("minsize=%d , jeb->free=%d ,"
282 						"summary->size=%d , sumsize=%d\n",
283 						minsize, jeb->free_size,
284 						c->summary->sum_size, sumsize);
285 		}
286 
287 		/* Is there enough space for writing out the current node, or we have to
288 		   write out summary information now, close this jeb and select new nextblock? */
289 		if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
290 					JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
291 
292 			/* Has summary been disabled for this jeb? */
293 			if (jffs2_sum_is_disabled(c->summary)) {
294 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
295 				goto restart;
296 			}
297 
298 			/* Writing out the collected summary information */
299 			dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
300 			ret = jffs2_sum_write_sumnode(c);
301 
302 			if (ret)
303 				return ret;
304 
305 			if (jffs2_sum_is_disabled(c->summary)) {
306 				/* jffs2_write_sumnode() couldn't write out the summary information
307 				   diabling summary for this jeb and free the collected information
308 				 */
309 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
310 				goto restart;
311 			}
312 
313 			jffs2_close_nextblock(c, jeb);
314 			jeb = NULL;
315 			/* keep always valid value in reserved_size */
316 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
317 		}
318 	} else {
319 		if (jeb && minsize > jeb->free_size) {
320 			uint32_t waste;
321 
322 			/* Skip the end of this block and file it as having some dirty space */
323 			/* If there's a pending write to it, flush now */
324 
325 			if (jffs2_wbuf_dirty(c)) {
326 				spin_unlock(&c->erase_completion_lock);
327 				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
328 				jffs2_flush_wbuf_pad(c);
329 				spin_lock(&c->erase_completion_lock);
330 				jeb = c->nextblock;
331 				goto restart;
332 			}
333 
334 			spin_unlock(&c->erase_completion_lock);
335 
336 			ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
337 			if (ret)
338 				return ret;
339 			/* Just lock it again and continue. Nothing much can change because
340 			   we hold c->alloc_sem anyway. In fact, it's not entirely clear why
341 			   we hold c->erase_completion_lock in the majority of this function...
342 			   but that's a question for another (more caffeine-rich) day. */
343 			spin_lock(&c->erase_completion_lock);
344 
345 			waste = jeb->free_size;
346 			jffs2_link_node_ref(c, jeb,
347 					    (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
348 					    waste, NULL);
349 			/* FIXME: that made it count as dirty. Convert to wasted */
350 			jeb->dirty_size -= waste;
351 			c->dirty_size -= waste;
352 			jeb->wasted_size += waste;
353 			c->wasted_size += waste;
354 
355 			jffs2_close_nextblock(c, jeb);
356 			jeb = NULL;
357 		}
358 	}
359 
360 	if (!jeb) {
361 
362 		ret = jffs2_find_nextblock(c);
363 		if (ret)
364 			return ret;
365 
366 		jeb = c->nextblock;
367 
368 		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
369 			printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
370 			goto restart;
371 		}
372 	}
373 	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
374 	   enough space */
375 	*len = jeb->free_size - reserved_size;
376 
377 	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
378 	    !jeb->first_node->next_in_ino) {
379 		/* Only node in it beforehand was a CLEANMARKER node (we think).
380 		   So mark it obsolete now that there's going to be another node
381 		   in the block. This will reduce used_size to zero but We've
382 		   already set c->nextblock so that jffs2_mark_node_obsolete()
383 		   won't try to refile it to the dirty_list.
384 		*/
385 		spin_unlock(&c->erase_completion_lock);
386 		jffs2_mark_node_obsolete(c, jeb->first_node);
387 		spin_lock(&c->erase_completion_lock);
388 	}
389 
390 	D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
391 		  *len, jeb->offset + (c->sector_size - jeb->free_size)));
392 	return 0;
393 }
394 
395 /**
396  *	jffs2_add_physical_node_ref - add a physical node reference to the list
397  *	@c: superblock info
398  *	@new: new node reference to add
399  *	@len: length of this physical node
400  *
401  *	Should only be used to report nodes for which space has been allocated
402  *	by jffs2_reserve_space.
403  *
404  *	Must be called with the alloc_sem held.
405  */
406 
407 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
408 						       uint32_t ofs, uint32_t len,
409 						       struct jffs2_inode_cache *ic)
410 {
411 	struct jffs2_eraseblock *jeb;
412 	struct jffs2_raw_node_ref *new;
413 
414 	jeb = &c->blocks[ofs / c->sector_size];
415 
416 	D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
417 		  ofs & ~3, ofs & 3, len));
418 #if 1
419 	/* Allow non-obsolete nodes only to be added at the end of c->nextblock,
420 	   if c->nextblock is set. Note that wbuf.c will file obsolete nodes
421 	   even after refiling c->nextblock */
422 	if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
423 	    && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
424 		printk(KERN_WARNING "argh. node added in wrong place\n");
425 		return ERR_PTR(-EINVAL);
426 	}
427 #endif
428 	spin_lock(&c->erase_completion_lock);
429 
430 	new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
431 
432 	if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
433 		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
434 		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
435 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
436 		if (jffs2_wbuf_dirty(c)) {
437 			/* Flush the last write in the block if it's outstanding */
438 			spin_unlock(&c->erase_completion_lock);
439 			jffs2_flush_wbuf_pad(c);
440 			spin_lock(&c->erase_completion_lock);
441 		}
442 
443 		list_add_tail(&jeb->list, &c->clean_list);
444 		c->nextblock = NULL;
445 	}
446 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
447 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
448 
449 	spin_unlock(&c->erase_completion_lock);
450 
451 	return new;
452 }
453 
454 
455 void jffs2_complete_reservation(struct jffs2_sb_info *c)
456 {
457 	D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
458 	jffs2_garbage_collect_trigger(c);
459 	up(&c->alloc_sem);
460 }
461 
462 static inline int on_list(struct list_head *obj, struct list_head *head)
463 {
464 	struct list_head *this;
465 
466 	list_for_each(this, head) {
467 		if (this == obj) {
468 			D1(printk("%p is on list at %p\n", obj, head));
469 			return 1;
470 
471 		}
472 	}
473 	return 0;
474 }
475 
476 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
477 {
478 	struct jffs2_eraseblock *jeb;
479 	int blocknr;
480 	struct jffs2_unknown_node n;
481 	int ret, addedsize;
482 	size_t retlen;
483 	uint32_t freed_len;
484 
485 	if(unlikely(!ref)) {
486 		printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
487 		return;
488 	}
489 	if (ref_obsolete(ref)) {
490 		D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
491 		return;
492 	}
493 	blocknr = ref->flash_offset / c->sector_size;
494 	if (blocknr >= c->nr_blocks) {
495 		printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
496 		BUG();
497 	}
498 	jeb = &c->blocks[blocknr];
499 
500 	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
501 	    !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
502 		/* Hm. This may confuse static lock analysis. If any of the above
503 		   three conditions is false, we're going to return from this
504 		   function without actually obliterating any nodes or freeing
505 		   any jffs2_raw_node_refs. So we don't need to stop erases from
506 		   happening, or protect against people holding an obsolete
507 		   jffs2_raw_node_ref without the erase_completion_lock. */
508 		down(&c->erase_free_sem);
509 	}
510 
511 	spin_lock(&c->erase_completion_lock);
512 
513 	freed_len = ref_totlen(c, jeb, ref);
514 
515 	if (ref_flags(ref) == REF_UNCHECKED) {
516 		D1(if (unlikely(jeb->unchecked_size < freed_len)) {
517 			printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
518 			       freed_len, blocknr, ref->flash_offset, jeb->used_size);
519 			BUG();
520 		})
521 		D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
522 		jeb->unchecked_size -= freed_len;
523 		c->unchecked_size -= freed_len;
524 	} else {
525 		D1(if (unlikely(jeb->used_size < freed_len)) {
526 			printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
527 			       freed_len, blocknr, ref->flash_offset, jeb->used_size);
528 			BUG();
529 		})
530 		D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
531 		jeb->used_size -= freed_len;
532 		c->used_size -= freed_len;
533 	}
534 
535 	// Take care, that wasted size is taken into concern
536 	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
537 		D1(printk("Dirtying\n"));
538 		addedsize = freed_len;
539 		jeb->dirty_size += freed_len;
540 		c->dirty_size += freed_len;
541 
542 		/* Convert wasted space to dirty, if not a bad block */
543 		if (jeb->wasted_size) {
544 			if (on_list(&jeb->list, &c->bad_used_list)) {
545 				D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
546 					  jeb->offset));
547 				addedsize = 0; /* To fool the refiling code later */
548 			} else {
549 				D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
550 					  jeb->wasted_size, jeb->offset));
551 				addedsize += jeb->wasted_size;
552 				jeb->dirty_size += jeb->wasted_size;
553 				c->dirty_size += jeb->wasted_size;
554 				c->wasted_size -= jeb->wasted_size;
555 				jeb->wasted_size = 0;
556 			}
557 		}
558 	} else {
559 		D1(printk("Wasting\n"));
560 		addedsize = 0;
561 		jeb->wasted_size += freed_len;
562 		c->wasted_size += freed_len;
563 	}
564 	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
565 
566 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
567 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
568 
569 	if (c->flags & JFFS2_SB_FLAG_SCANNING) {
570 		/* Flash scanning is in progress. Don't muck about with the block
571 		   lists because they're not ready yet, and don't actually
572 		   obliterate nodes that look obsolete. If they weren't
573 		   marked obsolete on the flash at the time they _became_
574 		   obsolete, there was probably a reason for that. */
575 		spin_unlock(&c->erase_completion_lock);
576 		/* We didn't lock the erase_free_sem */
577 		return;
578 	}
579 
580 	if (jeb == c->nextblock) {
581 		D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
582 	} else if (!jeb->used_size && !jeb->unchecked_size) {
583 		if (jeb == c->gcblock) {
584 			D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
585 			c->gcblock = NULL;
586 		} else {
587 			D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
588 			list_del(&jeb->list);
589 		}
590 		if (jffs2_wbuf_dirty(c)) {
591 			D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
592 			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
593 		} else {
594 			if (jiffies & 127) {
595 				/* Most of the time, we just erase it immediately. Otherwise we
596 				   spend ages scanning it on mount, etc. */
597 				D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
598 				list_add_tail(&jeb->list, &c->erase_pending_list);
599 				c->nr_erasing_blocks++;
600 				jffs2_erase_pending_trigger(c);
601 			} else {
602 				/* Sometimes, however, we leave it elsewhere so it doesn't get
603 				   immediately reused, and we spread the load a bit. */
604 				D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
605 				list_add_tail(&jeb->list, &c->erasable_list);
606 			}
607 		}
608 		D1(printk(KERN_DEBUG "Done OK\n"));
609 	} else if (jeb == c->gcblock) {
610 		D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
611 	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
612 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
613 		list_del(&jeb->list);
614 		D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
615 		list_add_tail(&jeb->list, &c->dirty_list);
616 	} else if (VERYDIRTY(c, jeb->dirty_size) &&
617 		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
618 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
619 		list_del(&jeb->list);
620 		D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
621 		list_add_tail(&jeb->list, &c->very_dirty_list);
622 	} else {
623 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
624 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
625 	}
626 
627 	spin_unlock(&c->erase_completion_lock);
628 
629 	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
630 		(c->flags & JFFS2_SB_FLAG_BUILDING)) {
631 		/* We didn't lock the erase_free_sem */
632 		return;
633 	}
634 
635 	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
636 	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
637 	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
638 	   by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
639 
640 	D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
641 	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
642 	if (ret) {
643 		printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
644 		goto out_erase_sem;
645 	}
646 	if (retlen != sizeof(n)) {
647 		printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
648 		goto out_erase_sem;
649 	}
650 	if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
651 		printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
652 		goto out_erase_sem;
653 	}
654 	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
655 		D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
656 		goto out_erase_sem;
657 	}
658 	/* XXX FIXME: This is ugly now */
659 	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
660 	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
661 	if (ret) {
662 		printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
663 		goto out_erase_sem;
664 	}
665 	if (retlen != sizeof(n)) {
666 		printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
667 		goto out_erase_sem;
668 	}
669 
670 	/* Nodes which have been marked obsolete no longer need to be
671 	   associated with any inode. Remove them from the per-inode list.
672 
673 	   Note we can't do this for NAND at the moment because we need
674 	   obsolete dirent nodes to stay on the lists, because of the
675 	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
676 	   because we delete the inocache, and on NAND we need that to
677 	   stay around until all the nodes are actually erased, in order
678 	   to stop us from giving the same inode number to another newly
679 	   created inode. */
680 	if (ref->next_in_ino) {
681 		struct jffs2_inode_cache *ic;
682 		struct jffs2_raw_node_ref **p;
683 
684 		spin_lock(&c->erase_completion_lock);
685 
686 		ic = jffs2_raw_ref_to_ic(ref);
687 		/* It seems we should never call jffs2_mark_node_obsolete() for
688 		   XATTR nodes.... yet. Make sure we notice if/when we change
689 		   that :) */
690 		BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE);
691 		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
692 			;
693 
694 		*p = ref->next_in_ino;
695 		ref->next_in_ino = NULL;
696 
697 		if (ic->nodes == (void *)ic && ic->nlink == 0)
698 			jffs2_del_ino_cache(c, ic);
699 
700 		spin_unlock(&c->erase_completion_lock);
701 	}
702 
703  out_erase_sem:
704 	up(&c->erase_free_sem);
705 }
706 
707 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
708 {
709 	int ret = 0;
710 	uint32_t dirty;
711 
712 	if (c->unchecked_size) {
713 		D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
714 			  c->unchecked_size, c->checked_ino));
715 		return 1;
716 	}
717 
718 	/* dirty_size contains blocks on erase_pending_list
719 	 * those blocks are counted in c->nr_erasing_blocks.
720 	 * If one block is actually erased, it is not longer counted as dirty_space
721 	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
722 	 * with c->nr_erasing_blocks * c->sector_size again.
723 	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
724 	 * This helps us to force gc and pick eventually a clean block to spread the load.
725 	 */
726 	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
727 
728 	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
729 			(dirty > c->nospc_dirty_size))
730 		ret = 1;
731 
732 	D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
733 		  c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
734 
735 	return ret;
736 }
737