xref: /nrf52832-nimble/rt-thread/components/dfs/filesystems/jffs2/src/nodemgmt.c (revision 104654410c56c573564690304ae786df310c91fc)
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright (C) 2001-2003 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <[email protected]>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  * $Id: nodemgmt.c,v 1.124 2005/07/20 15:32:28 dedekind Exp $
11  *
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/compiler.h>
18 #include <linux/sched.h> /* For cond_resched() */
19 #include "nodelist.h"
20 
21 /**
22  *	jffs2_reserve_space - request physical space to write nodes to flash
23  *	@c: superblock info
24  *	@minsize: Minimum acceptable size of allocation
25  *	@ofs: Returned value of node offset
26  *	@len: Returned value of allocation length
27  *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
28  *
29  *	Requests a block of physical space on the flash. Returns zero for success
30  *	and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
31  *	or other error if appropriate.
32  *
33  *	If it returns zero, jffs2_reserve_space() also downs the per-filesystem
34  *	allocation semaphore, to prevent more than one allocation from being
35  *	active at any time. The semaphore is later released by jffs2_commit_allocation()
36  *
37  *	jffs2_reserve_space() may trigger garbage collection in order to make room
38  *	for the requested allocation.
39  */
40 
41 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize, uint32_t *ofs, uint32_t *len);
42 
jffs2_reserve_space(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * ofs,uint32_t * len,int prio)43 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio)
44 {
45 	int ret = -EAGAIN;
46 	int blocksneeded = c->resv_blocks_write;
47 	/* align it */
48 	minsize = PAD(minsize);
49 
50 	D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
51 	down(&c->alloc_sem);
52 
53 	D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
54 
55 	spin_lock(&c->erase_completion_lock);
56 
57 	/* this needs a little more thought (true <tglx> :)) */
58 	while(ret == -EAGAIN) {
59 		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
60 			int ret;
61 			uint32_t dirty, avail;
62 
63 			/* calculate real dirty size
64 			 * dirty_size contains blocks on erase_pending_list
65 			 * those blocks are counted in c->nr_erasing_blocks.
66 			 * If one block is actually erased, it is not longer counted as dirty_space
67 			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
68 			 * with c->nr_erasing_blocks * c->sector_size again.
69 			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
70 			 * This helps us to force gc and pick eventually a clean block to spread the load.
71 			 * We add unchecked_size here, as we hopefully will find some space to use.
72 			 * This will affect the sum only once, as gc first finishes checking
73 			 * of nodes.
74 			 */
75 			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
76 			if (dirty < c->nospc_dirty_size) {
77 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
78 					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
79 					break;
80 				}
81 				D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
82 					  dirty, c->unchecked_size, c->sector_size));
83 
84 				spin_unlock(&c->erase_completion_lock);
85 				up(&c->alloc_sem);
86 				return -ENOSPC;
87 			}
88 
89 			/* Calc possibly available space. Possibly available means that we
90 			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
91 			 * more usable space. This will affect the sum only once, as gc first finishes checking
92 			 * of nodes.
93 			 + Return -ENOSPC, if the maximum possibly available space is less or equal than
94 			 * blocksneeded * sector_size.
95 			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
96 			 * the check above passes.
97 			 */
98 			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
99 			if ( (avail / c->sector_size) <= blocksneeded) {
100 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
101 					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
102 					break;
103 				}
104 
105 				D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
106 					  avail, blocksneeded * c->sector_size));
107 				spin_unlock(&c->erase_completion_lock);
108 				up(&c->alloc_sem);
109 				return -ENOSPC;
110 			}
111 
112 			up(&c->alloc_sem);
113 
114 			D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
115 				  c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
116 				  c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
117 			spin_unlock(&c->erase_completion_lock);
118 
119 			ret = jffs2_garbage_collect_pass(c);
120 			if (ret)
121 				return ret;
122 
123 			cond_resched();
124 
125 			if (signal_pending(current))
126 				return -EINTR;
127 
128 			down(&c->alloc_sem);
129 			spin_lock(&c->erase_completion_lock);
130 		}
131 
132 		ret = jffs2_do_reserve_space(c, minsize, ofs, len);
133 		if (ret) {
134 			D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
135 		}
136 	}
137 	spin_unlock(&c->erase_completion_lock);
138 	if (ret)
139 		up(&c->alloc_sem);
140 	return ret;
141 }
142 
jffs2_reserve_space_gc(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * ofs,uint32_t * len)143 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
144 {
145 	int ret = -EAGAIN;
146 	minsize = PAD(minsize);
147 
148 	D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
149 
150 	spin_lock(&c->erase_completion_lock);
151 	while(ret == -EAGAIN) {
152 		ret = jffs2_do_reserve_space(c, minsize, ofs, len);
153 		if (ret) {
154 		        D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
155 		}
156 	}
157 	spin_unlock(&c->erase_completion_lock);
158 	return ret;
159 }
160 
161 /* Called with alloc sem _and_ erase_completion_lock */
jffs2_do_reserve_space(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * ofs,uint32_t * len)162 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize, uint32_t *ofs, uint32_t *len)
163 {
164 	struct jffs2_eraseblock *jeb = c->nextblock;
165 
166  restart:
167 	if (jeb && minsize > jeb->free_size) {
168 		/* Skip the end of this block and file it as having some dirty space */
169 		/* If there's a pending write to it, flush now */
170 		if (jffs2_wbuf_dirty(c)) {
171 			spin_unlock(&c->erase_completion_lock);
172 			D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
173 			jffs2_flush_wbuf_pad(c);
174 			spin_lock(&c->erase_completion_lock);
175 			jeb = c->nextblock;
176 			goto restart;
177 		}
178 		c->wasted_size += jeb->free_size;
179 		c->free_size -= jeb->free_size;
180 		jeb->wasted_size += jeb->free_size;
181 		jeb->free_size = 0;
182 
183 		/* Check, if we have a dirty block now, or if it was dirty already */
184 		if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
185 			c->dirty_size += jeb->wasted_size;
186 			c->wasted_size -= jeb->wasted_size;
187 			jeb->dirty_size += jeb->wasted_size;
188 			jeb->wasted_size = 0;
189 			if (VERYDIRTY(c, jeb->dirty_size)) {
190 				D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
191 				  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
192 				list_add_tail(&jeb->list, &c->very_dirty_list);
193 			} else {
194 				D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
195 				  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
196 				list_add_tail(&jeb->list, &c->dirty_list);
197 			}
198 		} else {
199 			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
200 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
201 			list_add_tail(&jeb->list, &c->clean_list);
202 		}
203 		c->nextblock = jeb = NULL;
204 	}
205 
206 	if (!jeb) {
207 		struct list_head *next;
208 		/* Take the next block off the 'free' list */
209 
210 		if (list_empty(&c->free_list)) {
211 
212 			if (!c->nr_erasing_blocks &&
213 			    !list_empty(&c->erasable_list)) {
214 				struct jffs2_eraseblock *ejeb;
215 
216 				ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
217 				list_del(&ejeb->list);
218 				list_add_tail(&ejeb->list, &c->erase_pending_list);
219 				c->nr_erasing_blocks++;
220 				jffs2_erase_pending_trigger(c);
221 				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
222 					  ejeb->offset));
223 			}
224 
225 			if (!c->nr_erasing_blocks &&
226 			    !list_empty(&c->erasable_pending_wbuf_list)) {
227 				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
228 				/* c->nextblock is NULL, no update to c->nextblock allowed */
229 				spin_unlock(&c->erase_completion_lock);
230 				jffs2_flush_wbuf_pad(c);
231 				spin_lock(&c->erase_completion_lock);
232 				/* Have another go. It'll be on the erasable_list now */
233 				return -EAGAIN;
234 			}
235 
236 			if (!c->nr_erasing_blocks) {
237 				/* Ouch. We're in GC, or we wouldn't have got here.
238 				   And there's no space left. At all. */
239 				printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
240 				       c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
241 				       list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
242 				return -ENOSPC;
243 			}
244 
245 			spin_unlock(&c->erase_completion_lock);
246 			/* Don't wait for it; just erase one right now */
247 			jffs2_erase_pending_blocks(c, 1);
248 			spin_lock(&c->erase_completion_lock);
249 
250 			/* An erase may have failed, decreasing the
251 			   amount of free space available. So we must
252 			   restart from the beginning */
253 			return -EAGAIN;
254 		}
255 
256 		next = c->free_list.next;
257 		list_del(next);
258 		c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
259 		c->nr_free_blocks--;
260 
261 		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
262 			printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
263 			goto restart;
264 		}
265 	}
266 	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
267 	   enough space */
268 	*ofs = jeb->offset + (c->sector_size - jeb->free_size);
269 	*len = jeb->free_size;
270 
271 	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
272 	    !jeb->first_node->next_in_ino) {
273 		/* Only node in it beforehand was a CLEANMARKER node (we think).
274 		   So mark it obsolete now that there's going to be another node
275 		   in the block. This will reduce used_size to zero but We've
276 		   already set c->nextblock so that jffs2_mark_node_obsolete()
277 		   won't try to refile it to the dirty_list.
278 		*/
279 		spin_unlock(&c->erase_completion_lock);
280 		jffs2_mark_node_obsolete(c, jeb->first_node);
281 		spin_lock(&c->erase_completion_lock);
282 	}
283 
284 	D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
285 	return 0;
286 }
287 
288 /**
289  *	jffs2_add_physical_node_ref - add a physical node reference to the list
290  *	@c: superblock info
291  *	@new: new node reference to add
292  *	@len: length of this physical node
293  *	@dirty: dirty flag for new node
294  *
295  *	Should only be used to report nodes for which space has been allocated
296  *	by jffs2_reserve_space.
297  *
298  *	Must be called with the alloc_sem held.
299  */
300 
jffs2_add_physical_node_ref(struct jffs2_sb_info * c,struct jffs2_raw_node_ref * new)301 int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
302 {
303 	struct jffs2_eraseblock *jeb;
304 	uint32_t len;
305 
306 	jeb = &c->blocks[new->flash_offset / c->sector_size];
307 	len = ref_totlen(c, jeb, new);
308 
309 	D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
310 #if 1
311 	/* we could get some obsolete nodes after nextblock was refiled
312 	   in wbuf.c */
313 	if ((c->nextblock || !ref_obsolete(new))
314 	    &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) {
315 		printk(KERN_WARNING "argh. node added in wrong place\n");
316 		jffs2_free_raw_node_ref(new);
317 		return -EINVAL;
318 	}
319 #endif
320 	spin_lock(&c->erase_completion_lock);
321 
322 	if (!jeb->first_node)
323 		jeb->first_node = new;
324 	if (jeb->last_node)
325 		jeb->last_node->next_phys = new;
326 	jeb->last_node = new;
327 
328 	jeb->free_size -= len;
329 	c->free_size -= len;
330 	if (ref_obsolete(new)) {
331 		jeb->dirty_size += len;
332 		c->dirty_size += len;
333 	} else {
334 		jeb->used_size += len;
335 		c->used_size += len;
336 	}
337 
338 	if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
339 		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
340 		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
341 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
342 		if (jffs2_wbuf_dirty(c)) {
343 			/* Flush the last write in the block if it's outstanding */
344 			spin_unlock(&c->erase_completion_lock);
345 			jffs2_flush_wbuf_pad(c);
346 			spin_lock(&c->erase_completion_lock);
347 		}
348 
349 		list_add_tail(&jeb->list, &c->clean_list);
350 		c->nextblock = NULL;
351 	}
352 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
353 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
354 
355 	spin_unlock(&c->erase_completion_lock);
356 
357 	return 0;
358 }
359 
360 
jffs2_complete_reservation(struct jffs2_sb_info * c)361 void jffs2_complete_reservation(struct jffs2_sb_info *c)
362 {
363 	D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
364 	jffs2_garbage_collect_trigger(c);
365 	up(&c->alloc_sem);
366 }
367 
on_list(struct list_head * obj,struct list_head * head)368 static inline int on_list(struct list_head *obj, struct list_head *head)
369 {
370 	struct list_head *this;
371 
372 	list_for_each(this, head) {
373 		if (this == obj) {
374 			D1(printk("%p is on list at %p\n", obj, head));
375 			return 1;
376 
377 		}
378 	}
379 	return 0;
380 }
381 
jffs2_mark_node_obsolete(struct jffs2_sb_info * c,struct jffs2_raw_node_ref * ref)382 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
383 {
384 	struct jffs2_eraseblock *jeb;
385 	int blocknr;
386 	struct jffs2_unknown_node n;
387 	int ret, addedsize;
388 	size_t retlen;
389 
390 	if(!ref) {
391 		printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
392 		return;
393 	}
394 	if (ref_obsolete(ref)) {
395 		D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
396 		return;
397 	}
398 	blocknr = ref->flash_offset / c->sector_size;
399 	if (blocknr >= c->nr_blocks) {
400 		printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
401 		BUG();
402 	}
403 	jeb = &c->blocks[blocknr];
404 
405 	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
406 	    !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
407 		/* Hm. This may confuse static lock analysis. If any of the above
408 		   three conditions is false, we're going to return from this
409 		   function without actually obliterating any nodes or freeing
410 		   any jffs2_raw_node_refs. So we don't need to stop erases from
411 		   happening, or protect against people holding an obsolete
412 		   jffs2_raw_node_ref without the erase_completion_lock. */
413 		down(&c->erase_free_sem);
414 	}
415 
416 	spin_lock(&c->erase_completion_lock);
417 
418 	if (ref_flags(ref) == REF_UNCHECKED) {
419 		D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) {
420 			printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
421 			       ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
422 			BUG();
423 		})
424 		D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
425 		jeb->unchecked_size -= ref_totlen(c, jeb, ref);
426 		c->unchecked_size -= ref_totlen(c, jeb, ref);
427 	} else {
428 		D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) {
429 			printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
430 			       ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
431 			BUG();
432 		})
433 		D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
434 		jeb->used_size -= ref_totlen(c, jeb, ref);
435 		c->used_size -= ref_totlen(c, jeb, ref);
436 	}
437 
438 	// Take care, that wasted size is taken into concern
439 	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) {
440 		D1(printk(KERN_DEBUG "Dirtying\n"));
441 		addedsize = ref_totlen(c, jeb, ref);
442 		jeb->dirty_size += ref_totlen(c, jeb, ref);
443 		c->dirty_size += ref_totlen(c, jeb, ref);
444 
445 		/* Convert wasted space to dirty, if not a bad block */
446 		if (jeb->wasted_size) {
447 			if (on_list(&jeb->list, &c->bad_used_list)) {
448 				D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
449 					  jeb->offset));
450 				addedsize = 0; /* To fool the refiling code later */
451 			} else {
452 				D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
453 					  jeb->wasted_size, jeb->offset));
454 				addedsize += jeb->wasted_size;
455 				jeb->dirty_size += jeb->wasted_size;
456 				c->dirty_size += jeb->wasted_size;
457 				c->wasted_size -= jeb->wasted_size;
458 				jeb->wasted_size = 0;
459 			}
460 		}
461 	} else {
462 		D1(printk(KERN_DEBUG "Wasting\n"));
463 		addedsize = 0;
464 		jeb->wasted_size += ref_totlen(c, jeb, ref);
465 		c->wasted_size += ref_totlen(c, jeb, ref);
466 	}
467 	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
468 
469 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
470 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
471 
472 	if (c->flags & JFFS2_SB_FLAG_SCANNING) {
473 		/* Flash scanning is in progress. Don't muck about with the block
474 		   lists because they're not ready yet, and don't actually
475 		   obliterate nodes that look obsolete. If they weren't
476 		   marked obsolete on the flash at the time they _became_
477 		   obsolete, there was probably a reason for that. */
478 		spin_unlock(&c->erase_completion_lock);
479 		/* We didn't lock the erase_free_sem */
480 		return;
481 	}
482 
483 	if (jeb == c->nextblock) {
484 		D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
485 	} else if (!jeb->used_size && !jeb->unchecked_size) {
486 		if (jeb == c->gcblock) {
487 			D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
488 			c->gcblock = NULL;
489 		} else {
490 			D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
491 			list_del(&jeb->list);
492 		}
493 		if (jffs2_wbuf_dirty(c)) {
494 			D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
495 			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
496 		} else {
497 			if (jiffies & 127) {
498 				/* Most of the time, we just erase it immediately. Otherwise we
499 				   spend ages scanning it on mount, etc. */
500 				D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
501 				list_add_tail(&jeb->list, &c->erase_pending_list);
502 				c->nr_erasing_blocks++;
503 				jffs2_erase_pending_trigger(c);
504 			} else {
505 				/* Sometimes, however, we leave it elsewhere so it doesn't get
506 				   immediately reused, and we spread the load a bit. */
507 				D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
508 				list_add_tail(&jeb->list, &c->erasable_list);
509 			}
510 		}
511 		D1(printk(KERN_DEBUG "Done OK\n"));
512 	} else if (jeb == c->gcblock) {
513 		D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
514 	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
515 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
516 		list_del(&jeb->list);
517 		D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
518 		list_add_tail(&jeb->list, &c->dirty_list);
519 	} else if (VERYDIRTY(c, jeb->dirty_size) &&
520 		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
521 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
522 		list_del(&jeb->list);
523 		D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
524 		list_add_tail(&jeb->list, &c->very_dirty_list);
525 	} else {
526 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
527 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
528 	}
529 
530 	spin_unlock(&c->erase_completion_lock);
531 
532 	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
533 		(c->flags & JFFS2_SB_FLAG_BUILDING)) {
534 		/* We didn't lock the erase_free_sem */
535 		return;
536 	}
537 
538 	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
539 	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
540 	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
541 	   by jffs2_free_all_node_refs() in erase.c. Which is nice. */
542 
543 	D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
544 	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (unsigned char *)&n);
545 	if (ret) {
546 		printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
547 		goto out_erase_sem;
548 	}
549 	if (retlen != sizeof(n)) {
550 		printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
551 		goto out_erase_sem;
552 	}
553 	if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) {
554 		printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref));
555 		goto out_erase_sem;
556 	}
557 	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
558 		D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
559 		goto out_erase_sem;
560 	}
561 	/* XXX FIXME: This is ugly now */
562 	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
563 	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (unsigned char *)&n);
564 	if (ret) {
565 		printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
566 		goto out_erase_sem;
567 	}
568 	if (retlen != sizeof(n)) {
569 		printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
570 		goto out_erase_sem;
571 	}
572 
573 	/* Nodes which have been marked obsolete no longer need to be
574 	   associated with any inode. Remove them from the per-inode list.
575 
576 	   Note we can't do this for NAND at the moment because we need
577 	   obsolete dirent nodes to stay on the lists, because of the
578 	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
579 	   because we delete the inocache, and on NAND we need that to
580 	   stay around until all the nodes are actually erased, in order
581 	   to stop us from giving the same inode number to another newly
582 	   created inode. */
583 	if (ref->next_in_ino) {
584 		struct jffs2_inode_cache *ic;
585 		struct jffs2_raw_node_ref **p;
586 
587 		spin_lock(&c->erase_completion_lock);
588 
589 		ic = jffs2_raw_ref_to_ic(ref);
590 		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
591 			;
592 
593 		*p = ref->next_in_ino;
594 		ref->next_in_ino = NULL;
595 
596 		if (ic->nodes == (void *)ic && ic->nlink == 0)
597 			jffs2_del_ino_cache(c, ic);
598 
599 		spin_unlock(&c->erase_completion_lock);
600 	}
601 
602 
603 	/* Merge with the next node in the physical list, if there is one
604 	   and if it's also obsolete and if it doesn't belong to any inode */
605 	if (ref->next_phys && ref_obsolete(ref->next_phys) &&
606 	    !ref->next_phys->next_in_ino) {
607 		struct jffs2_raw_node_ref *n = ref->next_phys;
608 
609 		spin_lock(&c->erase_completion_lock);
610 
611 		ref->__totlen += n->__totlen;
612 		ref->next_phys = n->next_phys;
613                 if (jeb->last_node == n) jeb->last_node = ref;
614 		if (jeb->gc_node == n) {
615 			/* gc will be happy continuing gc on this node */
616 			jeb->gc_node=ref;
617 		}
618 		spin_unlock(&c->erase_completion_lock);
619 
620 		jffs2_free_raw_node_ref(n);
621 	}
622 
623 	/* Also merge with the previous node in the list, if there is one
624 	   and that one is obsolete */
625 	if (ref != jeb->first_node ) {
626 		struct jffs2_raw_node_ref *p = jeb->first_node;
627 
628 		spin_lock(&c->erase_completion_lock);
629 
630 		while (p->next_phys != ref)
631 			p = p->next_phys;
632 
633 		if (ref_obsolete(p) && !ref->next_in_ino) {
634 			p->__totlen += ref->__totlen;
635 			if (jeb->last_node == ref) {
636 				jeb->last_node = p;
637 			}
638 			if (jeb->gc_node == ref) {
639 				/* gc will be happy continuing gc on this node */
640 				jeb->gc_node=p;
641 			}
642 			p->next_phys = ref->next_phys;
643 			jffs2_free_raw_node_ref(ref);
644 		}
645 		spin_unlock(&c->erase_completion_lock);
646 	}
647  out_erase_sem:
648 	up(&c->erase_free_sem);
649 }
650 
jffs2_thread_should_wake(struct jffs2_sb_info * c)651 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
652 {
653 	int ret = 0;
654 	uint32_t dirty;
655 
656 	if (c->unchecked_size) {
657 		D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
658 			  c->unchecked_size, c->checked_ino));
659 		return 1;
660 	}
661 
662 	/* dirty_size contains blocks on erase_pending_list
663 	 * those blocks are counted in c->nr_erasing_blocks.
664 	 * If one block is actually erased, it is not longer counted as dirty_space
665 	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
666 	 * with c->nr_erasing_blocks * c->sector_size again.
667 	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
668 	 * This helps us to force gc and pick eventually a clean block to spread the load.
669 	 */
670 	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
671 
672 	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
673 			(dirty > c->nospc_dirty_size))
674 		ret = 1;
675 
676 	D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
677 		  c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
678 
679 	return ret;
680 }
681