Lines Matching +full:1 +full:c

25 static int jffs2_rp_can_write(struct jffs2_sb_info *c)  in jffs2_rp_can_write()  argument
28 struct jffs2_mount_opts *opts = &c->mount_opts; in jffs2_rp_can_write()
30 avail = c->dirty_size + c->free_size + c->unchecked_size + in jffs2_rp_can_write()
31 c->erasing_size - c->resv_blocks_write * c->sector_size in jffs2_rp_can_write()
32 - c->nospc_dirty_size; in jffs2_rp_can_write()
35 jffs2_dbg(1, "rpsize %u, dirty_size %u, free_size %u, " in jffs2_rp_can_write()
38 opts->rp_size, c->dirty_size, c->free_size, in jffs2_rp_can_write()
39 c->erasing_size, c->unchecked_size, in jffs2_rp_can_write()
40 c->nr_erasing_blocks, avail, c->nospc_dirty_size); in jffs2_rp_can_write()
43 return 1; in jffs2_rp_can_write()
47 return 1; in jffs2_rp_can_write()
49 jffs2_dbg(1, "forbid writing\n"); in jffs2_rp_can_write()
53 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
58 * @c: superblock info
78 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, in jffs2_reserve_space() argument
82 int blocksneeded = c->resv_blocks_write; in jffs2_reserve_space()
86 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize); in jffs2_reserve_space()
87 mutex_lock(&c->alloc_sem); in jffs2_reserve_space()
89 jffs2_dbg(1, "%s(): alloc sem got\n", __func__); in jffs2_reserve_space()
91 spin_lock(&c->erase_completion_lock); in jffs2_reserve_space()
97 if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) { in jffs2_reserve_space()
104 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) { in jffs2_reserve_space()
109 * those blocks are counted in c->nr_erasing_blocks. in jffs2_reserve_space()
111 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it in jffs2_reserve_space()
112 * with c->nr_erasing_blocks * c->sector_size again. in jffs2_reserve_space()
113 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks in jffs2_reserve_space()
119 …dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_siz… in jffs2_reserve_space()
120 if (dirty < c->nospc_dirty_size) { in jffs2_reserve_space()
121 …if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion)… in jffs2_reserve_space()
122 jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n", in jffs2_reserve_space()
126 …jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOS… in jffs2_reserve_space()
127 dirty, c->unchecked_size, in jffs2_reserve_space()
128 c->sector_size); in jffs2_reserve_space()
130 spin_unlock(&c->erase_completion_lock); in jffs2_reserve_space()
131 mutex_unlock(&c->alloc_sem); in jffs2_reserve_space()
144 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size; in jffs2_reserve_space()
145 if ( (avail / c->sector_size) <= blocksneeded) { in jffs2_reserve_space()
146 …if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion)… in jffs2_reserve_space()
147 jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n", in jffs2_reserve_space()
152 …jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n… in jffs2_reserve_space()
153 avail, blocksneeded * c->sector_size); in jffs2_reserve_space()
154 spin_unlock(&c->erase_completion_lock); in jffs2_reserve_space()
155 mutex_unlock(&c->alloc_sem); in jffs2_reserve_space()
159 mutex_unlock(&c->alloc_sem); in jffs2_reserve_space()
161 …jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty… in jffs2_reserve_space()
162 c->nr_free_blocks, c->nr_erasing_blocks, in jffs2_reserve_space()
163 c->free_size, c->dirty_size, c->wasted_size, in jffs2_reserve_space()
164 c->used_size, c->erasing_size, c->bad_size, in jffs2_reserve_space()
165 c->free_size + c->dirty_size + in jffs2_reserve_space()
166 c->wasted_size + c->used_size + in jffs2_reserve_space()
167 c->erasing_size + c->bad_size, in jffs2_reserve_space()
168 c->flash_size); in jffs2_reserve_space()
169 spin_unlock(&c->erase_completion_lock); in jffs2_reserve_space()
171 ret = jffs2_garbage_collect_pass(c); in jffs2_reserve_space()
174 spin_lock(&c->erase_completion_lock); in jffs2_reserve_space()
175 if (c->nr_erasing_blocks && in jffs2_reserve_space()
176 list_empty(&c->erase_pending_list) && in jffs2_reserve_space()
177 list_empty(&c->erase_complete_list)) { in jffs2_reserve_space()
180 add_wait_queue(&c->erase_wait, &wait); in jffs2_reserve_space()
181 jffs2_dbg(1, "%s waiting for erase to complete\n", in jffs2_reserve_space()
183 spin_unlock(&c->erase_completion_lock); in jffs2_reserve_space()
186 remove_wait_queue(&c->erase_wait, &wait); in jffs2_reserve_space()
188 spin_unlock(&c->erase_completion_lock); in jffs2_reserve_space()
197 mutex_lock(&c->alloc_sem); in jffs2_reserve_space()
198 spin_lock(&c->erase_completion_lock); in jffs2_reserve_space()
201 ret = jffs2_do_reserve_space(c, minsize, len, sumsize); in jffs2_reserve_space()
203 jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret); in jffs2_reserve_space()
208 spin_unlock(&c->erase_completion_lock); in jffs2_reserve_space()
210 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); in jffs2_reserve_space()
212 mutex_unlock(&c->alloc_sem); in jffs2_reserve_space()
216 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, in jffs2_reserve_space_gc() argument
222 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize); in jffs2_reserve_space_gc()
225 spin_lock(&c->erase_completion_lock); in jffs2_reserve_space_gc()
226 ret = jffs2_do_reserve_space(c, minsize, len, sumsize); in jffs2_reserve_space_gc()
228 jffs2_dbg(1, "%s(): looping, ret is %d\n", in jffs2_reserve_space_gc()
231 spin_unlock(&c->erase_completion_lock); in jffs2_reserve_space_gc()
239 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); in jffs2_reserve_space_gc()
247 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) in jffs2_close_nextblock() argument
250 if (c->nextblock == NULL) { in jffs2_close_nextblock()
251 jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n", in jffs2_close_nextblock()
257 c->dirty_size += jeb->wasted_size; in jffs2_close_nextblock()
258 c->wasted_size -= jeb->wasted_size; in jffs2_close_nextblock()
261 if (VERYDIRTY(c, jeb->dirty_size)) { in jffs2_close_nextblock()
262 …jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, use… in jffs2_close_nextblock()
265 list_add_tail(&jeb->list, &c->very_dirty_list); in jffs2_close_nextblock()
267 …jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%… in jffs2_close_nextblock()
270 list_add_tail(&jeb->list, &c->dirty_list); in jffs2_close_nextblock()
273 …jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%… in jffs2_close_nextblock()
276 list_add_tail(&jeb->list, &c->clean_list); in jffs2_close_nextblock()
278 c->nextblock = NULL; in jffs2_close_nextblock()
284 static int jffs2_find_nextblock(struct jffs2_sb_info *c) in jffs2_find_nextblock() argument
290 if (list_empty(&c->free_list)) { in jffs2_find_nextblock()
292 if (!c->nr_erasing_blocks && in jffs2_find_nextblock()
293 !list_empty(&c->erasable_list)) { in jffs2_find_nextblock()
296 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); in jffs2_find_nextblock()
297 list_move_tail(&ejeb->list, &c->erase_pending_list); in jffs2_find_nextblock()
298 c->nr_erasing_blocks++; in jffs2_find_nextblock()
299 jffs2_garbage_collect_trigger(c); in jffs2_find_nextblock()
300 jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n", in jffs2_find_nextblock()
304 if (!c->nr_erasing_blocks && in jffs2_find_nextblock()
305 !list_empty(&c->erasable_pending_wbuf_list)) { in jffs2_find_nextblock()
306 jffs2_dbg(1, "%s(): Flushing write buffer\n", in jffs2_find_nextblock()
308 /* c->nextblock is NULL, no update to c->nextblock allowed */ in jffs2_find_nextblock()
309 spin_unlock(&c->erase_completion_lock); in jffs2_find_nextblock()
310 jffs2_flush_wbuf_pad(c); in jffs2_find_nextblock()
311 spin_lock(&c->erase_completion_lock); in jffs2_find_nextblock()
316 if (!c->nr_erasing_blocks) { in jffs2_find_nextblock()
320 c->nr_erasing_blocks, c->nr_free_blocks, in jffs2_find_nextblock()
321 str_yes_no(list_empty(&c->erasable_list)), in jffs2_find_nextblock()
322 str_yes_no(list_empty(&c->erasing_list)), in jffs2_find_nextblock()
323 str_yes_no(list_empty(&c->erase_pending_list))); in jffs2_find_nextblock()
327 spin_unlock(&c->erase_completion_lock); in jffs2_find_nextblock()
329 jffs2_erase_pending_blocks(c, 1); in jffs2_find_nextblock()
330 spin_lock(&c->erase_completion_lock); in jffs2_find_nextblock()
338 next = c->free_list.next; in jffs2_find_nextblock()
340 c->nextblock = list_entry(next, struct jffs2_eraseblock, list); in jffs2_find_nextblock()
341 c->nr_free_blocks--; in jffs2_find_nextblock()
343 jffs2_sum_reset_collected(c->summary); /* reset collected summary */ in jffs2_find_nextblock()
347 if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len) in jffs2_find_nextblock()
348 c->wbuf_ofs = 0xffffffff; in jffs2_find_nextblock()
351 jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n", in jffs2_find_nextblock()
352 __func__, c->nextblock->offset); in jffs2_find_nextblock()
358 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, in jffs2_do_reserve_space() argument
361 struct jffs2_eraseblock *jeb = c->nextblock; in jffs2_do_reserve_space()
372 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE); in jffs2_do_reserve_space()
376 c->summary->sum_size, sumsize); in jffs2_do_reserve_space()
381 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize + in jffs2_do_reserve_space()
385 if (jffs2_sum_is_disabled(c->summary)) { in jffs2_do_reserve_space()
392 ret = jffs2_sum_write_sumnode(c); in jffs2_do_reserve_space()
397 if (jffs2_sum_is_disabled(c->summary)) { in jffs2_do_reserve_space()
405 jffs2_close_nextblock(c, jeb); in jffs2_do_reserve_space()
408 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE); in jffs2_do_reserve_space()
417 if (jffs2_wbuf_dirty(c)) { in jffs2_do_reserve_space()
418 spin_unlock(&c->erase_completion_lock); in jffs2_do_reserve_space()
419 jffs2_dbg(1, "%s(): Flushing write buffer\n", in jffs2_do_reserve_space()
421 jffs2_flush_wbuf_pad(c); in jffs2_do_reserve_space()
422 spin_lock(&c->erase_completion_lock); in jffs2_do_reserve_space()
423 jeb = c->nextblock; in jffs2_do_reserve_space()
427 spin_unlock(&c->erase_completion_lock); in jffs2_do_reserve_space()
429 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); in jffs2_do_reserve_space()
432 we hold c->alloc_sem anyway. In fact, it's not entirely clear why in jffs2_do_reserve_space()
433 we hold c->erase_completion_lock in the majority of this function... in jffs2_do_reserve_space()
435 spin_lock(&c->erase_completion_lock); in jffs2_do_reserve_space()
441 jffs2_link_node_ref(c, jeb, in jffs2_do_reserve_space()
442 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE, in jffs2_do_reserve_space()
446 c->dirty_size -= waste; in jffs2_do_reserve_space()
448 c->wasted_size += waste; in jffs2_do_reserve_space()
450 jffs2_close_nextblock(c, jeb); in jffs2_do_reserve_space()
457 ret = jffs2_find_nextblock(c); in jffs2_do_reserve_space()
461 jeb = c->nextblock; in jffs2_do_reserve_space()
463 if (jeb->free_size != c->sector_size - c->cleanmarker_size) { in jffs2_do_reserve_space()
469 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has in jffs2_do_reserve_space()
473 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && in jffs2_do_reserve_space()
478 already set c->nextblock so that jffs2_mark_node_obsolete() in jffs2_do_reserve_space()
481 spin_unlock(&c->erase_completion_lock); in jffs2_do_reserve_space()
482 jffs2_mark_node_obsolete(c, jeb->first_node); in jffs2_do_reserve_space()
483 spin_lock(&c->erase_completion_lock); in jffs2_do_reserve_space()
486 jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n", in jffs2_do_reserve_space()
488 *len, jeb->offset + (c->sector_size - jeb->free_size)); in jffs2_do_reserve_space()
494 * @c: superblock info
507 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, in jffs2_add_physical_node_ref() argument
514 jeb = &c->blocks[ofs / c->sector_size]; in jffs2_add_physical_node_ref()
516 jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n", in jffs2_add_physical_node_ref()
518 #if 1 in jffs2_add_physical_node_ref()
519 /* Allow non-obsolete nodes only to be added at the end of c->nextblock, in jffs2_add_physical_node_ref()
520 if c->nextblock is set. Note that wbuf.c will file obsolete nodes in jffs2_add_physical_node_ref()
521 even after refiling c->nextblock */ in jffs2_add_physical_node_ref()
522 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE)) in jffs2_add_physical_node_ref()
523 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) { in jffs2_add_physical_node_ref()
526 if (c->nextblock) in jffs2_add_physical_node_ref()
527 pr_warn("nextblock 0x%08x", c->nextblock->offset); in jffs2_add_physical_node_ref()
531 jeb->offset + (c->sector_size - jeb->free_size)); in jffs2_add_physical_node_ref()
535 spin_lock(&c->erase_completion_lock); in jffs2_add_physical_node_ref()
537 new = jffs2_link_node_ref(c, jeb, ofs, len, ic); in jffs2_add_physical_node_ref()
541 …jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%… in jffs2_add_physical_node_ref()
544 if (jffs2_wbuf_dirty(c)) { in jffs2_add_physical_node_ref()
546 spin_unlock(&c->erase_completion_lock); in jffs2_add_physical_node_ref()
547 jffs2_flush_wbuf_pad(c); in jffs2_add_physical_node_ref()
548 spin_lock(&c->erase_completion_lock); in jffs2_add_physical_node_ref()
551 list_add_tail(&jeb->list, &c->clean_list); in jffs2_add_physical_node_ref()
552 c->nextblock = NULL; in jffs2_add_physical_node_ref()
554 jffs2_dbg_acct_sanity_check_nolock(c,jeb); in jffs2_add_physical_node_ref()
555 jffs2_dbg_acct_paranoia_check_nolock(c, jeb); in jffs2_add_physical_node_ref()
557 spin_unlock(&c->erase_completion_lock); in jffs2_add_physical_node_ref()
563 void jffs2_complete_reservation(struct jffs2_sb_info *c) in jffs2_complete_reservation() argument
565 jffs2_dbg(1, "jffs2_complete_reservation()\n"); in jffs2_complete_reservation()
566 spin_lock(&c->erase_completion_lock); in jffs2_complete_reservation()
567 jffs2_garbage_collect_trigger(c); in jffs2_complete_reservation()
568 spin_unlock(&c->erase_completion_lock); in jffs2_complete_reservation()
569 mutex_unlock(&c->alloc_sem); in jffs2_complete_reservation()
578 jffs2_dbg(1, "%p is on list at %p\n", obj, head); in on_list()
579 return 1; in on_list()
586 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref) in jffs2_mark_node_obsolete() argument
600 jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n", in jffs2_mark_node_obsolete()
604 blocknr = ref->flash_offset / c->sector_size; in jffs2_mark_node_obsolete()
605 if (blocknr >= c->nr_blocks) { in jffs2_mark_node_obsolete()
610 jeb = &c->blocks[blocknr]; in jffs2_mark_node_obsolete()
612 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) && in jffs2_mark_node_obsolete()
613 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) { in jffs2_mark_node_obsolete()
620 mutex_lock(&c->erase_free_sem); in jffs2_mark_node_obsolete()
623 spin_lock(&c->erase_completion_lock); in jffs2_mark_node_obsolete()
625 freed_len = ref_totlen(c, jeb, ref); in jffs2_mark_node_obsolete()
634 jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n", in jffs2_mark_node_obsolete()
637 c->unchecked_size -= freed_len; in jffs2_mark_node_obsolete()
645 jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ", in jffs2_mark_node_obsolete()
648 c->used_size -= freed_len; in jffs2_mark_node_obsolete()
652 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) { in jffs2_mark_node_obsolete()
653 jffs2_dbg(1, "Dirtying\n"); in jffs2_mark_node_obsolete()
656 c->dirty_size += freed_len; in jffs2_mark_node_obsolete()
660 if (on_list(&jeb->list, &c->bad_used_list)) { in jffs2_mark_node_obsolete()
661 jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n", in jffs2_mark_node_obsolete()
665 jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n", in jffs2_mark_node_obsolete()
669 c->dirty_size += jeb->wasted_size; in jffs2_mark_node_obsolete()
670 c->wasted_size -= jeb->wasted_size; in jffs2_mark_node_obsolete()
675 jffs2_dbg(1, "Wasting\n"); in jffs2_mark_node_obsolete()
678 c->wasted_size += freed_len; in jffs2_mark_node_obsolete()
682 jffs2_dbg_acct_sanity_check_nolock(c, jeb); in jffs2_mark_node_obsolete()
683 jffs2_dbg_acct_paranoia_check_nolock(c, jeb); in jffs2_mark_node_obsolete()
685 if (c->flags & JFFS2_SB_FLAG_SCANNING) { in jffs2_mark_node_obsolete()
691 spin_unlock(&c->erase_completion_lock); in jffs2_mark_node_obsolete()
696 if (jeb == c->nextblock) { in jffs2_mark_node_obsolete()
700 if (jeb == c->gcblock) { in jffs2_mark_node_obsolete()
701 jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", in jffs2_mark_node_obsolete()
703 c->gcblock = NULL; in jffs2_mark_node_obsolete()
705 jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", in jffs2_mark_node_obsolete()
709 if (jffs2_wbuf_dirty(c)) { in jffs2_mark_node_obsolete()
710 jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n"); in jffs2_mark_node_obsolete()
711 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list); in jffs2_mark_node_obsolete()
716 jffs2_dbg(1, "...and adding to erase_pending_list\n"); in jffs2_mark_node_obsolete()
717 list_add_tail(&jeb->list, &c->erase_pending_list); in jffs2_mark_node_obsolete()
718 c->nr_erasing_blocks++; in jffs2_mark_node_obsolete()
719 jffs2_garbage_collect_trigger(c); in jffs2_mark_node_obsolete()
723 jffs2_dbg(1, "...and adding to erasable_list\n"); in jffs2_mark_node_obsolete()
724 list_add_tail(&jeb->list, &c->erasable_list); in jffs2_mark_node_obsolete()
727 jffs2_dbg(1, "Done OK\n"); in jffs2_mark_node_obsolete()
728 } else if (jeb == c->gcblock) { in jffs2_mark_node_obsolete()
732 jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", in jffs2_mark_node_obsolete()
735 jffs2_dbg(1, "...and adding to dirty_list\n"); in jffs2_mark_node_obsolete()
736 list_add_tail(&jeb->list, &c->dirty_list); in jffs2_mark_node_obsolete()
737 } else if (VERYDIRTY(c, jeb->dirty_size) && in jffs2_mark_node_obsolete()
738 !VERYDIRTY(c, jeb->dirty_size - addedsize)) { in jffs2_mark_node_obsolete()
739 jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", in jffs2_mark_node_obsolete()
742 jffs2_dbg(1, "...and adding to very_dirty_list\n"); in jffs2_mark_node_obsolete()
743 list_add_tail(&jeb->list, &c->very_dirty_list); in jffs2_mark_node_obsolete()
745 …jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", in jffs2_mark_node_obsolete()
750 spin_unlock(&c->erase_completion_lock); in jffs2_mark_node_obsolete()
752 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) || in jffs2_mark_node_obsolete()
753 (c->flags & JFFS2_SB_FLAG_BUILDING)) { in jffs2_mark_node_obsolete()
761 by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */ in jffs2_mark_node_obsolete()
763 jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n", in jffs2_mark_node_obsolete()
765 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); in jffs2_mark_node_obsolete()
782 jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", in jffs2_mark_node_obsolete()
788 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); in jffs2_mark_node_obsolete()
814 spin_lock(&c->erase_completion_lock); in jffs2_mark_node_obsolete()
826 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); in jffs2_mark_node_obsolete()
829 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); in jffs2_mark_node_obsolete()
834 jffs2_del_ino_cache(c, ic); in jffs2_mark_node_obsolete()
837 spin_unlock(&c->erase_completion_lock); in jffs2_mark_node_obsolete()
841 mutex_unlock(&c->erase_free_sem); in jffs2_mark_node_obsolete()
844 int jffs2_thread_should_wake(struct jffs2_sb_info *c) in jffs2_thread_should_wake() argument
851 if (!list_empty(&c->erase_complete_list) || in jffs2_thread_should_wake()
852 !list_empty(&c->erase_pending_list)) in jffs2_thread_should_wake()
853 return 1; in jffs2_thread_should_wake()
855 if (c->unchecked_size) { in jffs2_thread_should_wake()
856 jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, check_ino #%d\n", in jffs2_thread_should_wake()
857 c->unchecked_size, c->check_ino); in jffs2_thread_should_wake()
858 return 1; in jffs2_thread_should_wake()
862 * those blocks are counted in c->nr_erasing_blocks. in jffs2_thread_should_wake()
864 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it in jffs2_thread_should_wake()
865 * with c->nr_erasing_blocks * c->sector_size again. in jffs2_thread_should_wake()
866 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks in jffs2_thread_should_wake()
869 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size; in jffs2_thread_should_wake()
871 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && in jffs2_thread_should_wake()
872 (dirty > c->nospc_dirty_size)) in jffs2_thread_should_wake()
873 ret = 1; in jffs2_thread_should_wake()
875 list_for_each_entry(jeb, &c->very_dirty_list, list) { in jffs2_thread_should_wake()
877 if (nr_very_dirty == c->vdirty_blocks_gctrigger) { in jffs2_thread_should_wake()
878 ret = 1; in jffs2_thread_should_wake()
885 …jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s… in jffs2_thread_should_wake()
886 __func__, c->nr_free_blocks, c->nr_erasing_blocks, in jffs2_thread_should_wake()
887 c->dirty_size, nr_very_dirty, str_yes_no(ret)); in jffs2_thread_should_wake()