xref: /linux/fs/jffs2/nodemgmt.c (revision 906fd46a65383cd639e5eec72a047efc33045d86)
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@infradead.org>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/kernel.h>
15 #include <linux/mtd/mtd.h>
16 #include <linux/compiler.h>
17 #include <linux/sched/signal.h>
18 #include "nodelist.h"
19 #include "debug.h"
20 
21 /*
22  * Check whether the user is allowed to write.
23  */
24 static int jffs2_rp_can_write(struct jffs2_sb_info *c)
25 {
26 	uint32_t avail;
27 	struct jffs2_mount_opts *opts = &c->mount_opts;
28 
29 	avail = c->dirty_size + c->free_size + c->unchecked_size +
30 		c->erasing_size - c->resv_blocks_write * c->sector_size
31 		- c->nospc_dirty_size;
32 
33 	if (avail < 2 * opts->rp_size)
34 		jffs2_dbg(1, "rpsize %u, dirty_size %u, free_size %u, "
35 			  "erasing_size %u, unchecked_size %u, "
36 			  "nr_erasing_blocks %u, avail %u, resrv %u\n",
37 			  opts->rp_size, c->dirty_size, c->free_size,
38 			  c->erasing_size, c->unchecked_size,
39 			  c->nr_erasing_blocks, avail, c->nospc_dirty_size);
40 
41 	if (avail > opts->rp_size)
42 		return 1;
43 
44 	/* Always allow root */
45 	if (capable(CAP_SYS_RESOURCE))
46 		return 1;
47 
48 	jffs2_dbg(1, "forbid writing\n");
49 	return 0;
50 }
51 
52 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize,
53 				  uint32_t *len, uint32_t sumsize);
54 
55 /**
56  *	jffs2_reserve_space - request physical space to write nodes to flash
57  *	@c: superblock info
58  *	@minsize: Minimum acceptable size of allocation
59  *	@len: Returned value of allocation length
60  *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
61  *	@sumsize: summary size requested or JFFS2_SUMMARY_NOSUM_SIZE for no summary
62  *
63  *	Requests a block of physical space on the flash.
64  *
65  *	Returns: %0 for success	and puts 'len' into the appropriate place,
66  *	or returns -ENOSPC or other error if appropriate.
67  *	Doesn't return len since that's already returned in @len.
68  *
69  *	If it returns %0, jffs2_reserve_space() also downs the per-filesystem
70  *	allocation semaphore, to prevent more than one allocation from being
71  *	active at any time. The semaphore is later released by jffs2_commit_allocation().
72  *
73  *	jffs2_reserve_space() may trigger garbage collection in order to make room
74  *	for the requested allocation.
75  */
76 
77 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
78 			uint32_t *len, int prio, uint32_t sumsize)
79 {
80 	int ret = -EAGAIN;
81 	int blocksneeded = c->resv_blocks_write;
82 	/* align it */
83 	minsize = PAD(minsize);
84 
85 	jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
86 	mutex_lock(&c->alloc_sem);
87 
88 	jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
89 
90 	spin_lock(&c->erase_completion_lock);
91 
92 	/*
93 	 * Check if the free space is greater then size of the reserved pool.
94 	 * If not, only allow root to proceed with writing.
95 	 */
96 	if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) {
97 		ret = -ENOSPC;
98 		goto out;
99 	}
100 
101 	/* this needs a little more thought (true <tglx> :)) */
102 	while(ret == -EAGAIN) {
103 		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
104 			uint32_t dirty, avail;
105 
106 			/* calculate real dirty size
107 			 * dirty_size contains blocks on erase_pending_list
108 			 * those blocks are counted in c->nr_erasing_blocks.
109 			 * If one block is actually erased, it is not longer counted as dirty_space
110 			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
111 			 * with c->nr_erasing_blocks * c->sector_size again.
112 			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
113 			 * This helps us to force gc and pick eventually a clean block to spread the load.
114 			 * We add unchecked_size here, as we hopefully will find some space to use.
115 			 * This will affect the sum only once, as gc first finishes checking
116 			 * of nodes.
117 			 */
118 			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
119 			if (dirty < c->nospc_dirty_size) {
120 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
121 					jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
122 						  __func__);
123 					break;
124 				}
125 				jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
126 					  dirty, c->unchecked_size,
127 					  c->sector_size);
128 
129 				spin_unlock(&c->erase_completion_lock);
130 				mutex_unlock(&c->alloc_sem);
131 				return -ENOSPC;
132 			}
133 
134 			/* Calc possibly available space. Possibly available means that we
135 			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
136 			 * more usable space. This will affect the sum only once, as gc first finishes checking
137 			 * of nodes.
138 			 + Return -ENOSPC, if the maximum possibly available space is less or equal than
139 			 * blocksneeded * sector_size.
140 			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
141 			 * the check above passes.
142 			 */
143 			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
144 			if ( (avail / c->sector_size) <= blocksneeded) {
145 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
146 					jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
147 						  __func__);
148 					break;
149 				}
150 
151 				jffs2_dbg(1, "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
152 					  avail, blocksneeded * c->sector_size);
153 				spin_unlock(&c->erase_completion_lock);
154 				mutex_unlock(&c->alloc_sem);
155 				return -ENOSPC;
156 			}
157 
158 			mutex_unlock(&c->alloc_sem);
159 
160 			jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
161 				  c->nr_free_blocks, c->nr_erasing_blocks,
162 				  c->free_size, c->dirty_size, c->wasted_size,
163 				  c->used_size, c->erasing_size, c->bad_size,
164 				  c->free_size + c->dirty_size +
165 				  c->wasted_size + c->used_size +
166 				  c->erasing_size + c->bad_size,
167 				  c->flash_size);
168 			spin_unlock(&c->erase_completion_lock);
169 
170 			ret = jffs2_garbage_collect_pass(c);
171 
172 			if (ret == -EAGAIN) {
173 				spin_lock(&c->erase_completion_lock);
174 				if (c->nr_erasing_blocks &&
175 				    list_empty(&c->erase_pending_list) &&
176 				    list_empty(&c->erase_complete_list)) {
177 					DECLARE_WAITQUEUE(wait, current);
178 					set_current_state(TASK_UNINTERRUPTIBLE);
179 					add_wait_queue(&c->erase_wait, &wait);
180 					jffs2_dbg(1, "%s waiting for erase to complete\n",
181 						  __func__);
182 					spin_unlock(&c->erase_completion_lock);
183 
184 					schedule();
185 					remove_wait_queue(&c->erase_wait, &wait);
186 				} else
187 					spin_unlock(&c->erase_completion_lock);
188 			} else if (ret)
189 				return ret;
190 
191 			cond_resched();
192 
193 			if (signal_pending(current))
194 				return -EINTR;
195 
196 			mutex_lock(&c->alloc_sem);
197 			spin_lock(&c->erase_completion_lock);
198 		}
199 
200 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
201 		if (ret) {
202 			jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
203 		}
204 	}
205 
206 out:
207 	spin_unlock(&c->erase_completion_lock);
208 	if (!ret)
209 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
210 	if (ret)
211 		mutex_unlock(&c->alloc_sem);
212 	return ret;
213 }
214 
215 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
216 			   uint32_t *len, uint32_t sumsize)
217 {
218 	int ret;
219 	minsize = PAD(minsize);
220 
221 	jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
222 
223 	while (true) {
224 		spin_lock(&c->erase_completion_lock);
225 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
226 		if (ret) {
227 			jffs2_dbg(1, "%s(): looping, ret is %d\n",
228 				  __func__, ret);
229 		}
230 		spin_unlock(&c->erase_completion_lock);
231 
232 		if (ret == -EAGAIN)
233 			cond_resched();
234 		else
235 			break;
236 	}
237 	if (!ret)
238 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
239 
240 	return ret;
241 }
242 
243 
244 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
245 
246 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
247 {
248 
249 	if (c->nextblock == NULL) {
250 		jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
251 			  __func__, jeb->offset);
252 		return;
253 	}
254 	/* Check, if we have a dirty block now, or if it was dirty already */
255 	if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
256 		c->dirty_size += jeb->wasted_size;
257 		c->wasted_size -= jeb->wasted_size;
258 		jeb->dirty_size += jeb->wasted_size;
259 		jeb->wasted_size = 0;
260 		if (VERYDIRTY(c, jeb->dirty_size)) {
261 			jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
262 				  jeb->offset, jeb->free_size, jeb->dirty_size,
263 				  jeb->used_size);
264 			list_add_tail(&jeb->list, &c->very_dirty_list);
265 		} else {
266 			jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
267 				  jeb->offset, jeb->free_size, jeb->dirty_size,
268 				  jeb->used_size);
269 			list_add_tail(&jeb->list, &c->dirty_list);
270 		}
271 	} else {
272 		jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
273 			  jeb->offset, jeb->free_size, jeb->dirty_size,
274 			  jeb->used_size);
275 		list_add_tail(&jeb->list, &c->clean_list);
276 	}
277 	c->nextblock = NULL;
278 
279 }
280 
281 /* Select a new jeb for nextblock */
282 
283 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
284 {
285 	struct list_head *next;
286 
287 	/* Take the next block off the 'free' list */
288 
289 	if (list_empty(&c->free_list)) {
290 
291 		if (!c->nr_erasing_blocks &&
292 			!list_empty(&c->erasable_list)) {
293 			struct jffs2_eraseblock *ejeb;
294 
295 			ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
296 			list_move_tail(&ejeb->list, &c->erase_pending_list);
297 			c->nr_erasing_blocks++;
298 			jffs2_garbage_collect_trigger(c);
299 			jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
300 				  __func__, ejeb->offset);
301 		}
302 
303 		if (!c->nr_erasing_blocks &&
304 			!list_empty(&c->erasable_pending_wbuf_list)) {
305 			jffs2_dbg(1, "%s(): Flushing write buffer\n",
306 				  __func__);
307 			/* c->nextblock is NULL, no update to c->nextblock allowed */
308 			spin_unlock(&c->erase_completion_lock);
309 			jffs2_flush_wbuf_pad(c);
310 			spin_lock(&c->erase_completion_lock);
311 			/* Have another go. It'll be on the erasable_list now */
312 			return -EAGAIN;
313 		}
314 
315 		if (!c->nr_erasing_blocks) {
316 			/* Ouch. We're in GC, or we wouldn't have got here.
317 			   And there's no space left. At all. */
318 			pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
319 				c->nr_erasing_blocks, c->nr_free_blocks,
320 				list_empty(&c->erasable_list) ? "yes" : "no",
321 				list_empty(&c->erasing_list) ? "yes" : "no",
322 				list_empty(&c->erase_pending_list) ? "yes" : "no");
323 			return -ENOSPC;
324 		}
325 
326 		spin_unlock(&c->erase_completion_lock);
327 		/* Don't wait for it; just erase one right now */
328 		jffs2_erase_pending_blocks(c, 1);
329 		spin_lock(&c->erase_completion_lock);
330 
331 		/* An erase may have failed, decreasing the
332 		   amount of free space available. So we must
333 		   restart from the beginning */
334 		return -EAGAIN;
335 	}
336 
337 	next = c->free_list.next;
338 	list_del(next);
339 	c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
340 	c->nr_free_blocks--;
341 
342 	jffs2_sum_reset_collected(c->summary); /* reset collected summary */
343 
344 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
345 	/* adjust write buffer offset, else we get a non contiguous write bug */
346 	if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
347 		c->wbuf_ofs = 0xffffffff;
348 #endif
349 
350 	jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
351 		  __func__, c->nextblock->offset);
352 
353 	return 0;
354 }
355 
356 /* Called with alloc sem _and_ erase_completion_lock */
357 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
358 				  uint32_t *len, uint32_t sumsize)
359 {
360 	struct jffs2_eraseblock *jeb = c->nextblock;
361 	uint32_t reserved_size;				/* for summary information at the end of the jeb */
362 	int ret;
363 
364  restart:
365 	reserved_size = 0;
366 
367 	if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
368 							/* NOSUM_SIZE means not to generate summary */
369 
370 		if (jeb) {
371 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
372 			dbg_summary("minsize=%d , jeb->free=%d ,"
373 						"summary->size=%d , sumsize=%d\n",
374 						minsize, jeb->free_size,
375 						c->summary->sum_size, sumsize);
376 		}
377 
378 		/* Is there enough space for writing out the current node, or we have to
379 		   write out summary information now, close this jeb and select new nextblock? */
380 		if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
381 					JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
382 
383 			/* Has summary been disabled for this jeb? */
384 			if (jffs2_sum_is_disabled(c->summary)) {
385 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
386 				goto restart;
387 			}
388 
389 			/* Writing out the collected summary information */
390 			dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
391 			ret = jffs2_sum_write_sumnode(c);
392 
393 			if (ret)
394 				return ret;
395 
396 			if (jffs2_sum_is_disabled(c->summary)) {
397 				/* jffs2_write_sumnode() couldn't write out the summary information
398 				   diabling summary for this jeb and free the collected information
399 				 */
400 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
401 				goto restart;
402 			}
403 
404 			jffs2_close_nextblock(c, jeb);
405 			jeb = NULL;
406 			/* keep always valid value in reserved_size */
407 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
408 		}
409 	} else {
410 		if (jeb && minsize > jeb->free_size) {
411 			uint32_t waste;
412 
413 			/* Skip the end of this block and file it as having some dirty space */
414 			/* If there's a pending write to it, flush now */
415 
416 			if (jffs2_wbuf_dirty(c)) {
417 				spin_unlock(&c->erase_completion_lock);
418 				jffs2_dbg(1, "%s(): Flushing write buffer\n",
419 					  __func__);
420 				jffs2_flush_wbuf_pad(c);
421 				spin_lock(&c->erase_completion_lock);
422 				jeb = c->nextblock;
423 				goto restart;
424 			}
425 
426 			spin_unlock(&c->erase_completion_lock);
427 
428 			ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
429 
430 			/* Just lock it again and continue. Nothing much can change because
431 			   we hold c->alloc_sem anyway. In fact, it's not entirely clear why
432 			   we hold c->erase_completion_lock in the majority of this function...
433 			   but that's a question for another (more caffeine-rich) day. */
434 			spin_lock(&c->erase_completion_lock);
435 
436 			if (ret)
437 				return ret;
438 
439 			waste = jeb->free_size;
440 			jffs2_link_node_ref(c, jeb,
441 					    (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
442 					    waste, NULL);
443 			/* FIXME: that made it count as dirty. Convert to wasted */
444 			jeb->dirty_size -= waste;
445 			c->dirty_size -= waste;
446 			jeb->wasted_size += waste;
447 			c->wasted_size += waste;
448 
449 			jffs2_close_nextblock(c, jeb);
450 			jeb = NULL;
451 		}
452 	}
453 
454 	if (!jeb) {
455 
456 		ret = jffs2_find_nextblock(c);
457 		if (ret)
458 			return ret;
459 
460 		jeb = c->nextblock;
461 
462 		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
463 			pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
464 				jeb->offset, jeb->free_size);
465 			goto restart;
466 		}
467 	}
468 	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
469 	   enough space */
470 	*len = jeb->free_size - reserved_size;
471 
472 	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
473 	    !jeb->first_node->next_in_ino) {
474 		/* Only node in it beforehand was a CLEANMARKER node (we think).
475 		   So mark it obsolete now that there's going to be another node
476 		   in the block. This will reduce used_size to zero but We've
477 		   already set c->nextblock so that jffs2_mark_node_obsolete()
478 		   won't try to refile it to the dirty_list.
479 		*/
480 		spin_unlock(&c->erase_completion_lock);
481 		jffs2_mark_node_obsolete(c, jeb->first_node);
482 		spin_lock(&c->erase_completion_lock);
483 	}
484 
485 	jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
486 		  __func__,
487 		  *len, jeb->offset + (c->sector_size - jeb->free_size));
488 	return 0;
489 }
490 
491 /**
492  *	jffs2_add_physical_node_ref - add a physical node reference to the list
493  *	@c: superblock info
494  *	@ofs: offset in the block
495  *	@len: length of this physical node
496  *	@ic: inode cache pointer
497  *
498  *	Should only be used to report nodes for which space has been allocated
499  *	by jffs2_reserve_space.
500  *
501  *	Must be called with the alloc_sem held.
502  *
503  *	Returns: pointer to new node on success or -errno code on error
504  */
505 
506 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
507 						       uint32_t ofs, uint32_t len,
508 						       struct jffs2_inode_cache *ic)
509 {
510 	struct jffs2_eraseblock *jeb;
511 	struct jffs2_raw_node_ref *new;
512 
513 	jeb = &c->blocks[ofs / c->sector_size];
514 
515 	jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
516 		  __func__, ofs & ~3, ofs & 3, len);
517 #if 1
518 	/* Allow non-obsolete nodes only to be added at the end of c->nextblock,
519 	   if c->nextblock is set. Note that wbuf.c will file obsolete nodes
520 	   even after refiling c->nextblock */
521 	if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
522 	    && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
523 		pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
524 			ofs & ~3, ofs & 3);
525 		if (c->nextblock)
526 			pr_warn("nextblock 0x%08x", c->nextblock->offset);
527 		else
528 			pr_warn("No nextblock");
529 		pr_cont(", expected at %08x\n",
530 			jeb->offset + (c->sector_size - jeb->free_size));
531 		return ERR_PTR(-EINVAL);
532 	}
533 #endif
534 	spin_lock(&c->erase_completion_lock);
535 
536 	new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
537 
538 	if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
539 		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
540 		jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
541 			  jeb->offset, jeb->free_size, jeb->dirty_size,
542 			  jeb->used_size);
543 		if (jffs2_wbuf_dirty(c)) {
544 			/* Flush the last write in the block if it's outstanding */
545 			spin_unlock(&c->erase_completion_lock);
546 			jffs2_flush_wbuf_pad(c);
547 			spin_lock(&c->erase_completion_lock);
548 		}
549 
550 		list_add_tail(&jeb->list, &c->clean_list);
551 		c->nextblock = NULL;
552 	}
553 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
554 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
555 
556 	spin_unlock(&c->erase_completion_lock);
557 
558 	return new;
559 }
560 
561 
562 void jffs2_complete_reservation(struct jffs2_sb_info *c)
563 {
564 	jffs2_dbg(1, "jffs2_complete_reservation()\n");
565 	spin_lock(&c->erase_completion_lock);
566 	jffs2_garbage_collect_trigger(c);
567 	spin_unlock(&c->erase_completion_lock);
568 	mutex_unlock(&c->alloc_sem);
569 }
570 
571 static inline int on_list(struct list_head *obj, struct list_head *head)
572 {
573 	struct list_head *this;
574 
575 	list_for_each(this, head) {
576 		if (this == obj) {
577 			jffs2_dbg(1, "%p is on list at %p\n", obj, head);
578 			return 1;
579 
580 		}
581 	}
582 	return 0;
583 }
584 
585 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
586 {
587 	struct jffs2_eraseblock *jeb;
588 	int blocknr;
589 	struct jffs2_unknown_node n;
590 	int ret, addedsize;
591 	size_t retlen;
592 	uint32_t freed_len;
593 
594 	if(unlikely(!ref)) {
595 		pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
596 		return;
597 	}
598 	if (ref_obsolete(ref)) {
599 		jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
600 			  __func__, ref_offset(ref));
601 		return;
602 	}
603 	blocknr = ref->flash_offset / c->sector_size;
604 	if (blocknr >= c->nr_blocks) {
605 		pr_notice("raw node at 0x%08x is off the end of device!\n",
606 			  ref->flash_offset);
607 		BUG();
608 	}
609 	jeb = &c->blocks[blocknr];
610 
611 	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
612 	    !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
613 		/* Hm. This may confuse static lock analysis. If any of the above
614 		   three conditions is false, we're going to return from this
615 		   function without actually obliterating any nodes or freeing
616 		   any jffs2_raw_node_refs. So we don't need to stop erases from
617 		   happening, or protect against people holding an obsolete
618 		   jffs2_raw_node_ref without the erase_completion_lock. */
619 		mutex_lock(&c->erase_free_sem);
620 	}
621 
622 	spin_lock(&c->erase_completion_lock);
623 
624 	freed_len = ref_totlen(c, jeb, ref);
625 
626 	if (ref_flags(ref) == REF_UNCHECKED) {
627 		D1(if (unlikely(jeb->unchecked_size < freed_len)) {
628 				pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
629 					  freed_len, blocknr,
630 					  ref->flash_offset, jeb->used_size);
631 			BUG();
632 		})
633 			jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
634 				  ref_offset(ref), freed_len);
635 		jeb->unchecked_size -= freed_len;
636 		c->unchecked_size -= freed_len;
637 	} else {
638 		D1(if (unlikely(jeb->used_size < freed_len)) {
639 				pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
640 					  freed_len, blocknr,
641 					  ref->flash_offset, jeb->used_size);
642 			BUG();
643 		})
644 			jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
645 				  ref_offset(ref), freed_len);
646 		jeb->used_size -= freed_len;
647 		c->used_size -= freed_len;
648 	}
649 
650 	// Take care, that wasted size is taken into concern
651 	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
652 		jffs2_dbg(1, "Dirtying\n");
653 		addedsize = freed_len;
654 		jeb->dirty_size += freed_len;
655 		c->dirty_size += freed_len;
656 
657 		/* Convert wasted space to dirty, if not a bad block */
658 		if (jeb->wasted_size) {
659 			if (on_list(&jeb->list, &c->bad_used_list)) {
660 				jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
661 					  jeb->offset);
662 				addedsize = 0; /* To fool the refiling code later */
663 			} else {
664 				jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
665 					  jeb->wasted_size, jeb->offset);
666 				addedsize += jeb->wasted_size;
667 				jeb->dirty_size += jeb->wasted_size;
668 				c->dirty_size += jeb->wasted_size;
669 				c->wasted_size -= jeb->wasted_size;
670 				jeb->wasted_size = 0;
671 			}
672 		}
673 	} else {
674 		jffs2_dbg(1, "Wasting\n");
675 		addedsize = 0;
676 		jeb->wasted_size += freed_len;
677 		c->wasted_size += freed_len;
678 	}
679 	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
680 
681 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
682 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
683 
684 	if (c->flags & JFFS2_SB_FLAG_SCANNING) {
685 		/* Flash scanning is in progress. Don't muck about with the block
686 		   lists because they're not ready yet, and don't actually
687 		   obliterate nodes that look obsolete. If they weren't
688 		   marked obsolete on the flash at the time they _became_
689 		   obsolete, there was probably a reason for that. */
690 		spin_unlock(&c->erase_completion_lock);
691 		/* We didn't lock the erase_free_sem */
692 		return;
693 	}
694 
695 	if (jeb == c->nextblock) {
696 		jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
697 			  jeb->offset);
698 	} else if (!jeb->used_size && !jeb->unchecked_size) {
699 		if (jeb == c->gcblock) {
700 			jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
701 				  jeb->offset);
702 			c->gcblock = NULL;
703 		} else {
704 			jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
705 				  jeb->offset);
706 			list_del(&jeb->list);
707 		}
708 		if (jffs2_wbuf_dirty(c)) {
709 			jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
710 			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
711 		} else {
712 			if (jiffies & 127) {
713 				/* Most of the time, we just erase it immediately. Otherwise we
714 				   spend ages scanning it on mount, etc. */
715 				jffs2_dbg(1, "...and adding to erase_pending_list\n");
716 				list_add_tail(&jeb->list, &c->erase_pending_list);
717 				c->nr_erasing_blocks++;
718 				jffs2_garbage_collect_trigger(c);
719 			} else {
720 				/* Sometimes, however, we leave it elsewhere so it doesn't get
721 				   immediately reused, and we spread the load a bit. */
722 				jffs2_dbg(1, "...and adding to erasable_list\n");
723 				list_add_tail(&jeb->list, &c->erasable_list);
724 			}
725 		}
726 		jffs2_dbg(1, "Done OK\n");
727 	} else if (jeb == c->gcblock) {
728 		jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
729 			  jeb->offset);
730 	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
731 		jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
732 			  jeb->offset);
733 		list_del(&jeb->list);
734 		jffs2_dbg(1, "...and adding to dirty_list\n");
735 		list_add_tail(&jeb->list, &c->dirty_list);
736 	} else if (VERYDIRTY(c, jeb->dirty_size) &&
737 		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
738 		jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
739 			  jeb->offset);
740 		list_del(&jeb->list);
741 		jffs2_dbg(1, "...and adding to very_dirty_list\n");
742 		list_add_tail(&jeb->list, &c->very_dirty_list);
743 	} else {
744 		jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
745 			  jeb->offset, jeb->free_size, jeb->dirty_size,
746 			  jeb->used_size);
747 	}
748 
749 	spin_unlock(&c->erase_completion_lock);
750 
751 	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
752 		(c->flags & JFFS2_SB_FLAG_BUILDING)) {
753 		/* We didn't lock the erase_free_sem */
754 		return;
755 	}
756 
757 	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
758 	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
759 	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
760 	   by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
761 
762 	jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
763 		  ref_offset(ref));
764 	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
765 	if (ret) {
766 		pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
767 			ref_offset(ref), ret);
768 		goto out_erase_sem;
769 	}
770 	if (retlen != sizeof(n)) {
771 		pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
772 			ref_offset(ref), retlen);
773 		goto out_erase_sem;
774 	}
775 	if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
776 		pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
777 			je32_to_cpu(n.totlen), freed_len);
778 		goto out_erase_sem;
779 	}
780 	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
781 		jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
782 			  ref_offset(ref), je16_to_cpu(n.nodetype));
783 		goto out_erase_sem;
784 	}
785 	/* XXX FIXME: This is ugly now */
786 	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
787 	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
788 	if (ret) {
789 		pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
790 			ref_offset(ref), ret);
791 		goto out_erase_sem;
792 	}
793 	if (retlen != sizeof(n)) {
794 		pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
795 			ref_offset(ref), retlen);
796 		goto out_erase_sem;
797 	}
798 
799 	/* Nodes which have been marked obsolete no longer need to be
800 	   associated with any inode. Remove them from the per-inode list.
801 
802 	   Note we can't do this for NAND at the moment because we need
803 	   obsolete dirent nodes to stay on the lists, because of the
804 	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
805 	   because we delete the inocache, and on NAND we need that to
806 	   stay around until all the nodes are actually erased, in order
807 	   to stop us from giving the same inode number to another newly
808 	   created inode. */
809 	if (ref->next_in_ino) {
810 		struct jffs2_inode_cache *ic;
811 		struct jffs2_raw_node_ref **p;
812 
813 		spin_lock(&c->erase_completion_lock);
814 
815 		ic = jffs2_raw_ref_to_ic(ref);
816 		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
817 			;
818 
819 		*p = ref->next_in_ino;
820 		ref->next_in_ino = NULL;
821 
822 		switch (ic->class) {
823 #ifdef CONFIG_JFFS2_FS_XATTR
824 			case RAWNODE_CLASS_XATTR_DATUM:
825 				jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
826 				break;
827 			case RAWNODE_CLASS_XATTR_REF:
828 				jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
829 				break;
830 #endif
831 			default:
832 				if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
833 					jffs2_del_ino_cache(c, ic);
834 				break;
835 		}
836 		spin_unlock(&c->erase_completion_lock);
837 	}
838 
839  out_erase_sem:
840 	mutex_unlock(&c->erase_free_sem);
841 }
842 
843 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
844 {
845 	int ret = 0;
846 	uint32_t dirty;
847 	int nr_very_dirty = 0;
848 	struct jffs2_eraseblock *jeb;
849 
850 	if (!list_empty(&c->erase_complete_list) ||
851 	    !list_empty(&c->erase_pending_list))
852 		return 1;
853 
854 	if (c->unchecked_size) {
855 		jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, check_ino #%d\n",
856 			  c->unchecked_size, c->check_ino);
857 		return 1;
858 	}
859 
860 	/* dirty_size contains blocks on erase_pending_list
861 	 * those blocks are counted in c->nr_erasing_blocks.
862 	 * If one block is actually erased, it is not longer counted as dirty_space
863 	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
864 	 * with c->nr_erasing_blocks * c->sector_size again.
865 	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
866 	 * This helps us to force gc and pick eventually a clean block to spread the load.
867 	 */
868 	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
869 
870 	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
871 			(dirty > c->nospc_dirty_size))
872 		ret = 1;
873 
874 	list_for_each_entry(jeb, &c->very_dirty_list, list) {
875 		nr_very_dirty++;
876 		if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
877 			ret = 1;
878 			/* In debug mode, actually go through and count them all */
879 			D1(continue);
880 			break;
881 		}
882 	}
883 
884 	jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
885 		  __func__, c->nr_free_blocks, c->nr_erasing_blocks,
886 		  c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
887 
888 	return ret;
889 }
890