]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/jffs2/nodemgmt.c
kbuild: Fixup deb-pkg target to generate separate firmware deb
[linux-2.6-omap-h63xx.git] / fs / jffs2 / nodemgmt.c
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@infradead.org>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/compiler.h>
16 #include <linux/sched.h> /* For cond_resched() */
17 #include "nodelist.h"
18 #include "debug.h"
19
20 /**
21  *      jffs2_reserve_space - request physical space to write nodes to flash
22  *      @c: superblock info
23  *      @minsize: Minimum acceptable size of allocation
24  *      @len: Returned value of allocation length
25  *      @prio: Allocation type - ALLOC_{NORMAL,DELETION}
26  *
27  *      Requests a block of physical space on the flash. Returns zero for success
28  *      and puts 'len' into the appropriate place, or returns -ENOSPC or other 
29  *      error if appropriate. Doesn't return len since that's 
30  *
31  *      If it returns zero, jffs2_reserve_space() also downs the per-filesystem
32  *      allocation semaphore, to prevent more than one allocation from being
33  *      active at any time. The semaphore is later released by jffs2_commit_allocation()
34  *
35  *      jffs2_reserve_space() may trigger garbage collection in order to make room
36  *      for the requested allocation.
37  */
38
39 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize,
40                                   uint32_t *len, uint32_t sumsize);
41
42 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
43                         uint32_t *len, int prio, uint32_t sumsize)
44 {
45         int ret = -EAGAIN;
46         int blocksneeded = c->resv_blocks_write;
47         /* align it */
48         minsize = PAD(minsize);
49
50         D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
51         mutex_lock(&c->alloc_sem);
52
53         D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
54
55         spin_lock(&c->erase_completion_lock);
56
57         /* this needs a little more thought (true <tglx> :)) */
58         while(ret == -EAGAIN) {
59                 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
60                         uint32_t dirty, avail;
61
62                         /* calculate real dirty size
63                          * dirty_size contains blocks on erase_pending_list
64                          * those blocks are counted in c->nr_erasing_blocks.
65                          * If one block is actually erased, it is not longer counted as dirty_space
66                          * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
67                          * with c->nr_erasing_blocks * c->sector_size again.
68                          * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
69                          * This helps us to force gc and pick eventually a clean block to spread the load.
70                          * We add unchecked_size here, as we hopefully will find some space to use.
71                          * This will affect the sum only once, as gc first finishes checking
72                          * of nodes.
73                          */
74                         dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
75                         if (dirty < c->nospc_dirty_size) {
76                                 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
77                                         D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
78                                         break;
79                                 }
80                                 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
81                                           dirty, c->unchecked_size, c->sector_size));
82
83                                 spin_unlock(&c->erase_completion_lock);
84                                 mutex_unlock(&c->alloc_sem);
85                                 return -ENOSPC;
86                         }
87
88                         /* Calc possibly available space. Possibly available means that we
89                          * don't know, if unchecked size contains obsoleted nodes, which could give us some
90                          * more usable space. This will affect the sum only once, as gc first finishes checking
91                          * of nodes.
92                          + Return -ENOSPC, if the maximum possibly available space is less or equal than
93                          * blocksneeded * sector_size.
94                          * This blocks endless gc looping on a filesystem, which is nearly full, even if
95                          * the check above passes.
96                          */
97                         avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
98                         if ( (avail / c->sector_size) <= blocksneeded) {
99                                 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
100                                         D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
101                                         break;
102                                 }
103
104                                 D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
105                                           avail, blocksneeded * c->sector_size));
106                                 spin_unlock(&c->erase_completion_lock);
107                                 mutex_unlock(&c->alloc_sem);
108                                 return -ENOSPC;
109                         }
110
111                         mutex_unlock(&c->alloc_sem);
112
113                         D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
114                                   c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
115                                   c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
116                         spin_unlock(&c->erase_completion_lock);
117
118                         ret = jffs2_garbage_collect_pass(c);
119
120                         if (ret == -EAGAIN)
121                                 jffs2_erase_pending_blocks(c, 1);
122                         else if (ret)
123                                 return ret;
124
125                         cond_resched();
126
127                         if (signal_pending(current))
128                                 return -EINTR;
129
130                         mutex_lock(&c->alloc_sem);
131                         spin_lock(&c->erase_completion_lock);
132                 }
133
134                 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
135                 if (ret) {
136                         D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
137                 }
138         }
139         spin_unlock(&c->erase_completion_lock);
140         if (!ret)
141                 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
142         if (ret)
143                 mutex_unlock(&c->alloc_sem);
144         return ret;
145 }
146
147 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
148                            uint32_t *len, uint32_t sumsize)
149 {
150         int ret = -EAGAIN;
151         minsize = PAD(minsize);
152
153         D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
154
155         spin_lock(&c->erase_completion_lock);
156         while(ret == -EAGAIN) {
157                 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
158                 if (ret) {
159                         D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
160                 }
161         }
162         spin_unlock(&c->erase_completion_lock);
163         if (!ret)
164                 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
165
166         return ret;
167 }
168
169
170 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
171
172 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
173 {
174
175         if (c->nextblock == NULL) {
176                 D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n",
177                   jeb->offset));
178                 return;
179         }
180         /* Check, if we have a dirty block now, or if it was dirty already */
181         if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
182                 c->dirty_size += jeb->wasted_size;
183                 c->wasted_size -= jeb->wasted_size;
184                 jeb->dirty_size += jeb->wasted_size;
185                 jeb->wasted_size = 0;
186                 if (VERYDIRTY(c, jeb->dirty_size)) {
187                         D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
188                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
189                         list_add_tail(&jeb->list, &c->very_dirty_list);
190                 } else {
191                         D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
192                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
193                         list_add_tail(&jeb->list, &c->dirty_list);
194                 }
195         } else {
196                 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
197                   jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
198                 list_add_tail(&jeb->list, &c->clean_list);
199         }
200         c->nextblock = NULL;
201
202 }
203
204 /* Select a new jeb for nextblock */
205
206 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
207 {
208         struct list_head *next;
209
210         /* Take the next block off the 'free' list */
211
212         if (list_empty(&c->free_list)) {
213
214                 if (!c->nr_erasing_blocks &&
215                         !list_empty(&c->erasable_list)) {
216                         struct jffs2_eraseblock *ejeb;
217
218                         ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
219                         list_move_tail(&ejeb->list, &c->erase_pending_list);
220                         c->nr_erasing_blocks++;
221                         jffs2_erase_pending_trigger(c);
222                         D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
223                                   ejeb->offset));
224                 }
225
226                 if (!c->nr_erasing_blocks &&
227                         !list_empty(&c->erasable_pending_wbuf_list)) {
228                         D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
229                         /* c->nextblock is NULL, no update to c->nextblock allowed */
230                         spin_unlock(&c->erase_completion_lock);
231                         jffs2_flush_wbuf_pad(c);
232                         spin_lock(&c->erase_completion_lock);
233                         /* Have another go. It'll be on the erasable_list now */
234                         return -EAGAIN;
235                 }
236
237                 if (!c->nr_erasing_blocks) {
238                         /* Ouch. We're in GC, or we wouldn't have got here.
239                            And there's no space left. At all. */
240                         printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
241                                    c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
242                                    list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
243                         return -ENOSPC;
244                 }
245
246                 spin_unlock(&c->erase_completion_lock);
247                 /* Don't wait for it; just erase one right now */
248                 jffs2_erase_pending_blocks(c, 1);
249                 spin_lock(&c->erase_completion_lock);
250
251                 /* An erase may have failed, decreasing the
252                    amount of free space available. So we must
253                    restart from the beginning */
254                 return -EAGAIN;
255         }
256
257         next = c->free_list.next;
258         list_del(next);
259         c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
260         c->nr_free_blocks--;
261
262         jffs2_sum_reset_collected(c->summary); /* reset collected summary */
263
264         /* adjust write buffer offset, else we get a non contiguous write bug */
265         if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
266                 c->wbuf_ofs = 0xffffffff;
267
268         D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
269
270         return 0;
271 }
272
273 /* Called with alloc sem _and_ erase_completion_lock */
274 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
275                                   uint32_t *len, uint32_t sumsize)
276 {
277         struct jffs2_eraseblock *jeb = c->nextblock;
278         uint32_t reserved_size;                         /* for summary information at the end of the jeb */
279         int ret;
280
281  restart:
282         reserved_size = 0;
283
284         if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
285                                                         /* NOSUM_SIZE means not to generate summary */
286
287                 if (jeb) {
288                         reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
289                         dbg_summary("minsize=%d , jeb->free=%d ,"
290                                                 "summary->size=%d , sumsize=%d\n",
291                                                 minsize, jeb->free_size,
292                                                 c->summary->sum_size, sumsize);
293                 }
294
295                 /* Is there enough space for writing out the current node, or we have to
296                    write out summary information now, close this jeb and select new nextblock? */
297                 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
298                                         JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
299
300                         /* Has summary been disabled for this jeb? */
301                         if (jffs2_sum_is_disabled(c->summary)) {
302                                 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
303                                 goto restart;
304                         }
305
306                         /* Writing out the collected summary information */
307                         dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
308                         ret = jffs2_sum_write_sumnode(c);
309
310                         if (ret)
311                                 return ret;
312
313                         if (jffs2_sum_is_disabled(c->summary)) {
314                                 /* jffs2_write_sumnode() couldn't write out the summary information
315                                    diabling summary for this jeb and free the collected information
316                                  */
317                                 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
318                                 goto restart;
319                         }
320
321                         jffs2_close_nextblock(c, jeb);
322                         jeb = NULL;
323                         /* keep always valid value in reserved_size */
324                         reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
325                 }
326         } else {
327                 if (jeb && minsize > jeb->free_size) {
328                         uint32_t waste;
329
330                         /* Skip the end of this block and file it as having some dirty space */
331                         /* If there's a pending write to it, flush now */
332
333                         if (jffs2_wbuf_dirty(c)) {
334                                 spin_unlock(&c->erase_completion_lock);
335                                 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
336                                 jffs2_flush_wbuf_pad(c);
337                                 spin_lock(&c->erase_completion_lock);
338                                 jeb = c->nextblock;
339                                 goto restart;
340                         }
341
342                         spin_unlock(&c->erase_completion_lock);
343
344                         ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
345                         if (ret)
346                                 return ret;
347                         /* Just lock it again and continue. Nothing much can change because
348                            we hold c->alloc_sem anyway. In fact, it's not entirely clear why
349                            we hold c->erase_completion_lock in the majority of this function...
350                            but that's a question for another (more caffeine-rich) day. */
351                         spin_lock(&c->erase_completion_lock);
352
353                         waste = jeb->free_size;
354                         jffs2_link_node_ref(c, jeb,
355                                             (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
356                                             waste, NULL);
357                         /* FIXME: that made it count as dirty. Convert to wasted */
358                         jeb->dirty_size -= waste;
359                         c->dirty_size -= waste;
360                         jeb->wasted_size += waste;
361                         c->wasted_size += waste;
362
363                         jffs2_close_nextblock(c, jeb);
364                         jeb = NULL;
365                 }
366         }
367
368         if (!jeb) {
369
370                 ret = jffs2_find_nextblock(c);
371                 if (ret)
372                         return ret;
373
374                 jeb = c->nextblock;
375
376                 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
377                         printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
378                         goto restart;
379                 }
380         }
381         /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
382            enough space */
383         *len = jeb->free_size - reserved_size;
384
385         if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
386             !jeb->first_node->next_in_ino) {
387                 /* Only node in it beforehand was a CLEANMARKER node (we think).
388                    So mark it obsolete now that there's going to be another node
389                    in the block. This will reduce used_size to zero but We've
390                    already set c->nextblock so that jffs2_mark_node_obsolete()
391                    won't try to refile it to the dirty_list.
392                 */
393                 spin_unlock(&c->erase_completion_lock);
394                 jffs2_mark_node_obsolete(c, jeb->first_node);
395                 spin_lock(&c->erase_completion_lock);
396         }
397
398         D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
399                   *len, jeb->offset + (c->sector_size - jeb->free_size)));
400         return 0;
401 }
402
403 /**
404  *      jffs2_add_physical_node_ref - add a physical node reference to the list
405  *      @c: superblock info
406  *      @new: new node reference to add
407  *      @len: length of this physical node
408  *
409  *      Should only be used to report nodes for which space has been allocated
410  *      by jffs2_reserve_space.
411  *
412  *      Must be called with the alloc_sem held.
413  */
414
415 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
416                                                        uint32_t ofs, uint32_t len,
417                                                        struct jffs2_inode_cache *ic)
418 {
419         struct jffs2_eraseblock *jeb;
420         struct jffs2_raw_node_ref *new;
421
422         jeb = &c->blocks[ofs / c->sector_size];
423
424         D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
425                   ofs & ~3, ofs & 3, len));
426 #if 1
427         /* Allow non-obsolete nodes only to be added at the end of c->nextblock, 
428            if c->nextblock is set. Note that wbuf.c will file obsolete nodes
429            even after refiling c->nextblock */
430         if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
431             && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
432                 printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3);
433                 if (c->nextblock)
434                         printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset);
435                 else
436                         printk(KERN_WARNING "No nextblock");
437                 printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size));
438                 return ERR_PTR(-EINVAL);
439         }
440 #endif
441         spin_lock(&c->erase_completion_lock);
442
443         new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
444
445         if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
446                 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
447                 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
448                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
449                 if (jffs2_wbuf_dirty(c)) {
450                         /* Flush the last write in the block if it's outstanding */
451                         spin_unlock(&c->erase_completion_lock);
452                         jffs2_flush_wbuf_pad(c);
453                         spin_lock(&c->erase_completion_lock);
454                 }
455
456                 list_add_tail(&jeb->list, &c->clean_list);
457                 c->nextblock = NULL;
458         }
459         jffs2_dbg_acct_sanity_check_nolock(c,jeb);
460         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
461
462         spin_unlock(&c->erase_completion_lock);
463
464         return new;
465 }
466
467
468 void jffs2_complete_reservation(struct jffs2_sb_info *c)
469 {
470         D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
471         jffs2_garbage_collect_trigger(c);
472         mutex_unlock(&c->alloc_sem);
473 }
474
475 static inline int on_list(struct list_head *obj, struct list_head *head)
476 {
477         struct list_head *this;
478
479         list_for_each(this, head) {
480                 if (this == obj) {
481                         D1(printk("%p is on list at %p\n", obj, head));
482                         return 1;
483
484                 }
485         }
486         return 0;
487 }
488
489 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
490 {
491         struct jffs2_eraseblock *jeb;
492         int blocknr;
493         struct jffs2_unknown_node n;
494         int ret, addedsize;
495         size_t retlen;
496         uint32_t freed_len;
497
498         if(unlikely(!ref)) {
499                 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
500                 return;
501         }
502         if (ref_obsolete(ref)) {
503                 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
504                 return;
505         }
506         blocknr = ref->flash_offset / c->sector_size;
507         if (blocknr >= c->nr_blocks) {
508                 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
509                 BUG();
510         }
511         jeb = &c->blocks[blocknr];
512
513         if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
514             !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
515                 /* Hm. This may confuse static lock analysis. If any of the above
516                    three conditions is false, we're going to return from this
517                    function without actually obliterating any nodes or freeing
518                    any jffs2_raw_node_refs. So we don't need to stop erases from
519                    happening, or protect against people holding an obsolete
520                    jffs2_raw_node_ref without the erase_completion_lock. */
521                 mutex_lock(&c->erase_free_sem);
522         }
523
524         spin_lock(&c->erase_completion_lock);
525
526         freed_len = ref_totlen(c, jeb, ref);
527
528         if (ref_flags(ref) == REF_UNCHECKED) {
529                 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
530                         printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
531                                freed_len, blocknr, ref->flash_offset, jeb->used_size);
532                         BUG();
533                 })
534                 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
535                 jeb->unchecked_size -= freed_len;
536                 c->unchecked_size -= freed_len;
537         } else {
538                 D1(if (unlikely(jeb->used_size < freed_len)) {
539                         printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
540                                freed_len, blocknr, ref->flash_offset, jeb->used_size);
541                         BUG();
542                 })
543                 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
544                 jeb->used_size -= freed_len;
545                 c->used_size -= freed_len;
546         }
547
548         // Take care, that wasted size is taken into concern
549         if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
550                 D1(printk("Dirtying\n"));
551                 addedsize = freed_len;
552                 jeb->dirty_size += freed_len;
553                 c->dirty_size += freed_len;
554
555                 /* Convert wasted space to dirty, if not a bad block */
556                 if (jeb->wasted_size) {
557                         if (on_list(&jeb->list, &c->bad_used_list)) {
558                                 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
559                                           jeb->offset));
560                                 addedsize = 0; /* To fool the refiling code later */
561                         } else {
562                                 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
563                                           jeb->wasted_size, jeb->offset));
564                                 addedsize += jeb->wasted_size;
565                                 jeb->dirty_size += jeb->wasted_size;
566                                 c->dirty_size += jeb->wasted_size;
567                                 c->wasted_size -= jeb->wasted_size;
568                                 jeb->wasted_size = 0;
569                         }
570                 }
571         } else {
572                 D1(printk("Wasting\n"));
573                 addedsize = 0;
574                 jeb->wasted_size += freed_len;
575                 c->wasted_size += freed_len;
576         }
577         ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
578
579         jffs2_dbg_acct_sanity_check_nolock(c, jeb);
580         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
581
582         if (c->flags & JFFS2_SB_FLAG_SCANNING) {
583                 /* Flash scanning is in progress. Don't muck about with the block
584                    lists because they're not ready yet, and don't actually
585                    obliterate nodes that look obsolete. If they weren't
586                    marked obsolete on the flash at the time they _became_
587                    obsolete, there was probably a reason for that. */
588                 spin_unlock(&c->erase_completion_lock);
589                 /* We didn't lock the erase_free_sem */
590                 return;
591         }
592
593         if (jeb == c->nextblock) {
594                 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
595         } else if (!jeb->used_size && !jeb->unchecked_size) {
596                 if (jeb == c->gcblock) {
597                         D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
598                         c->gcblock = NULL;
599                 } else {
600                         D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
601                         list_del(&jeb->list);
602                 }
603                 if (jffs2_wbuf_dirty(c)) {
604                         D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
605                         list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
606                 } else {
607                         if (jiffies & 127) {
608                                 /* Most of the time, we just erase it immediately. Otherwise we
609                                    spend ages scanning it on mount, etc. */
610                                 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
611                                 list_add_tail(&jeb->list, &c->erase_pending_list);
612                                 c->nr_erasing_blocks++;
613                                 jffs2_erase_pending_trigger(c);
614                         } else {
615                                 /* Sometimes, however, we leave it elsewhere so it doesn't get
616                                    immediately reused, and we spread the load a bit. */
617                                 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
618                                 list_add_tail(&jeb->list, &c->erasable_list);
619                         }
620                 }
621                 D1(printk(KERN_DEBUG "Done OK\n"));
622         } else if (jeb == c->gcblock) {
623                 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
624         } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
625                 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
626                 list_del(&jeb->list);
627                 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
628                 list_add_tail(&jeb->list, &c->dirty_list);
629         } else if (VERYDIRTY(c, jeb->dirty_size) &&
630                    !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
631                 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
632                 list_del(&jeb->list);
633                 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
634                 list_add_tail(&jeb->list, &c->very_dirty_list);
635         } else {
636                 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
637                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
638         }
639
640         spin_unlock(&c->erase_completion_lock);
641
642         if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
643                 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
644                 /* We didn't lock the erase_free_sem */
645                 return;
646         }
647
648         /* The erase_free_sem is locked, and has been since before we marked the node obsolete
649            and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
650            the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
651            by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
652
653         D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
654         ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
655         if (ret) {
656                 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
657                 goto out_erase_sem;
658         }
659         if (retlen != sizeof(n)) {
660                 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
661                 goto out_erase_sem;
662         }
663         if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
664                 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
665                 goto out_erase_sem;
666         }
667         if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
668                 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
669                 goto out_erase_sem;
670         }
671         /* XXX FIXME: This is ugly now */
672         n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
673         ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
674         if (ret) {
675                 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
676                 goto out_erase_sem;
677         }
678         if (retlen != sizeof(n)) {
679                 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
680                 goto out_erase_sem;
681         }
682
683         /* Nodes which have been marked obsolete no longer need to be
684            associated with any inode. Remove them from the per-inode list.
685
686            Note we can't do this for NAND at the moment because we need
687            obsolete dirent nodes to stay on the lists, because of the
688            horridness in jffs2_garbage_collect_deletion_dirent(). Also
689            because we delete the inocache, and on NAND we need that to
690            stay around until all the nodes are actually erased, in order
691            to stop us from giving the same inode number to another newly
692            created inode. */
693         if (ref->next_in_ino) {
694                 struct jffs2_inode_cache *ic;
695                 struct jffs2_raw_node_ref **p;
696
697                 spin_lock(&c->erase_completion_lock);
698
699                 ic = jffs2_raw_ref_to_ic(ref);
700                 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
701                         ;
702
703                 *p = ref->next_in_ino;
704                 ref->next_in_ino = NULL;
705
706                 switch (ic->class) {
707 #ifdef CONFIG_JFFS2_FS_XATTR
708                         case RAWNODE_CLASS_XATTR_DATUM:
709                                 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
710                                 break;
711                         case RAWNODE_CLASS_XATTR_REF:
712                                 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
713                                 break;
714 #endif
715                         default:
716                                 if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
717                                         jffs2_del_ino_cache(c, ic);
718                                 break;
719                 }
720                 spin_unlock(&c->erase_completion_lock);
721         }
722
723  out_erase_sem:
724         mutex_unlock(&c->erase_free_sem);
725 }
726
727 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
728 {
729         int ret = 0;
730         uint32_t dirty;
731         int nr_very_dirty = 0;
732         struct jffs2_eraseblock *jeb;
733
734         if (c->unchecked_size) {
735                 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
736                           c->unchecked_size, c->checked_ino));
737                 return 1;
738         }
739
740         /* dirty_size contains blocks on erase_pending_list
741          * those blocks are counted in c->nr_erasing_blocks.
742          * If one block is actually erased, it is not longer counted as dirty_space
743          * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
744          * with c->nr_erasing_blocks * c->sector_size again.
745          * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
746          * This helps us to force gc and pick eventually a clean block to spread the load.
747          */
748         dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
749
750         if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
751                         (dirty > c->nospc_dirty_size))
752                 ret = 1;
753
754         list_for_each_entry(jeb, &c->very_dirty_list, list) {
755                 nr_very_dirty++;
756                 if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
757                         ret = 1;
758                         /* In debug mode, actually go through and count them all */
759                         D1(continue);
760                         break;
761                 }
762         }
763
764         D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
765                   c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret?"yes":"no"));
766
767         return ret;
768 }