diff -auNrp mtd_9_28_EBH_1:1_ect/fs/jffs2/build.c mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/build.c --- mtd_9_28_EBH_1:1_ect/fs/jffs2/build.c 2005-09-29 09:34:08.000000000 +0800 +++ mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/build.c 2005-09-29 13:24:02.000000000 +0800 @@ -305,6 +305,19 @@ static void jffs2_calc_trigger_levels(st c->nospc_dirty_size); } +static void jffs2_init_hash_tables(struct jffs2_sb_info *c) +{ + int i; + + for (i=0; iused_blocks[i].chain)); + INIT_LIST_HEAD(&(c->free_blocks[i].chain)); + } + c->used_blocks_current_index = HASH_SIZE; + c->free_blocks_current_index = HASH_SIZE; + return; +} + int jffs2_do_mount_fs(struct jffs2_sb_info *c) { int ret; @@ -330,6 +343,8 @@ int jffs2_do_mount_fs(struct jffs2_sb_in c->highest_ino = 1; c->summary = NULL; + jffs2_init_hash_tables(c); + ret = jffs2_sum_init(c); if (ret) { jffs2_free_eraseblocks(c); diff -auNrp mtd_9_28_EBH_1:1_ect/fs/jffs2/erase.c mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/erase.c --- mtd_9_28_EBH_1:1_ect/fs/jffs2/erase.c 2005-09-29 11:41:19.000000000 +0800 +++ mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/erase.c 2005-09-29 13:49:13.000000000 +0800 @@ -455,6 +455,7 @@ static void jffs2_mark_erased_block(stru jffs2_dbg_acct_paranoia_check_nolock(c, jeb); list_add_tail(&jeb->list, &c->free_list); + jffs2_add_to_hash_table(c, jeb, 2); c->nr_erasing_blocks--; c->nr_free_blocks++; spin_unlock(&c->erase_completion_lock); diff -auNrp mtd_9_28_EBH_1:1_ect/fs/jffs2/gc.c mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/gc.c --- mtd_9_28_EBH_1:1_ect/fs/jffs2/gc.c 2005-09-29 09:34:08.000000000 +0800 +++ mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/gc.c 2005-09-29 15:18:36.000000000 +0800 @@ -45,6 +45,7 @@ static struct jffs2_eraseblock *jffs2_fi struct jffs2_eraseblock *ret; struct list_head *nextlist = NULL; int n = jiffies % 128; + int flag = 0; /* Pick an eraseblock to garbage collect next. This is where we'll put the clever wear-levelling algorithms. Eventually. */ @@ -59,23 +60,28 @@ again: So don't favour the erasable_list _too_ much. */ D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n")); nextlist = &c->erasable_list; - } else if (n < 110 && !list_empty(&c->very_dirty_list)) { + } else if (n < 112 && !list_empty(&c->very_dirty_list)) { /* Most of the time, pick one off the very_dirty list */ D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next\n")); nextlist = &c->very_dirty_list; - } else if (n < 126 && !list_empty(&c->dirty_list)) { + flag = 1; + } else if (n < 128 && !list_empty(&c->dirty_list)) { D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next\n")); nextlist = &c->dirty_list; + flag = 1; } else if (!list_empty(&c->clean_list)) { D1(printk(KERN_DEBUG "Picking block from clean_list to GC next\n")); nextlist = &c->clean_list; + flag = 1; } else if (!list_empty(&c->dirty_list)) { D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next (clean_list was empty)\n")); nextlist = &c->dirty_list; + flag = 1; } else if (!list_empty(&c->very_dirty_list)) { D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n")); nextlist = &c->very_dirty_list; + flag = 1; } else if (!list_empty(&c->erasable_list)) { D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n")); @@ -95,6 +101,9 @@ again: ret = list_entry(nextlist->next, struct jffs2_eraseblock, list); list_del(&ret->list); + if (flag == 1) { + jffs2_remove_from_hash_table(c, ret, 1); + } c->gcblock = ret; ret->gc_node = ret->first_node; if (!ret->gc_node) { @@ -114,6 +123,51 @@ again: return ret; } +static int jffs2_should_pick_used_block(struct jffs2_sb_info *c) +{ + static uint8_t seqno = 99; + + if (((c->max_erase_count >> BUCKET_RANGE_BIT_LEN) - c->used_blocks_current_index) + <= WL_DELTA/BUCKET_RANGE) { + return 0; + } + seqno++; + if (seqno == 100) { + seqno = 0; + return 1; + } + return 0; +} + +static struct jffs2_eraseblock *jffs2_find_gc_block_with_wl(struct jffs2_sb_info *c) +{ + struct jffs2_eraseblock *ret; + + if (jffs2_should_pick_used_block(c)) { + ret = jffs2_get_used_block(c); + if (ret == NULL) { + return NULL; + } + c->gcblock = ret; + ret->gc_node = ret->first_node; + if (!ret->gc_node) { + printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset); + BUG(); + } + if (ret->wasted_size) { + D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size)); + ret->dirty_size += ret->wasted_size; + c->wasted_size -= ret->wasted_size; + c->dirty_size += ret->wasted_size; + ret->wasted_size = 0; + } + } else { + ret = jffs2_find_gc_block(c); + } + + return ret; +} + /* jffs2_garbage_collect_pass * Make a single attempt to progress GC. Move one node, and possibly * start erasing one eraseblock. @@ -209,7 +263,7 @@ int jffs2_garbage_collect_pass(struct jf jeb = c->gcblock; if (!jeb) - jeb = jffs2_find_gc_block(c); + jeb = jffs2_find_gc_block_with_wl(c); if (!jeb) { D1 (printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n")); diff -auNrp mtd_9_28_EBH_1:1_ect/fs/jffs2/Makefile.common mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/Makefile.common --- mtd_9_28_EBH_1:1_ect/fs/jffs2/Makefile.common 2005-09-29 09:34:09.000000000 +0800 +++ mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/Makefile.common 2005-09-29 13:25:44.000000000 +0800 @@ -9,7 +9,7 @@ obj-$(CONFIG_JFFS2_FS) += jffs2.o jffs2-y := compr.o dir.o file.o ioctl.o nodelist.o malloc.o jffs2-y += read.o nodemgmt.o readinode.o write.o scan.o gc.o jffs2-y += symlink.o build.o erase.o background.o fs.o writev.o -jffs2-y += super.o debug.o +jffs2-y += super.o debug.o wear_leveling.o jffs2-$(CONFIG_JFFS2_FS_WRITEBUFFER) += wbuf.o jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o diff -auNrp mtd_9_28_EBH_1:1_ect/fs/jffs2/malloc.c mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/malloc.c --- mtd_9_28_EBH_1:1_ect/fs/jffs2/malloc.c 2005-09-29 09:34:09.000000000 +0800 +++ mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/malloc.c 2005-09-29 13:23:14.000000000 +0800 @@ -247,6 +247,7 @@ int jffs2_alloc_eraseblocks(struct jffs2 for (i=0; inr_blocks; i++) { INIT_LIST_HEAD(&c->blocks[i]->list); + INIT_LIST_HEAD(&c->blocks[i]->hash_list); c->blocks[i]->offset = i * c->sector_size; c->blocks[i]->free_size = c->sector_size; c->blocks[i]->first_node = NULL; diff -auNrp mtd_9_28_EBH_1:1_ect/fs/jffs2/nodelist.h mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/nodelist.h --- mtd_9_28_EBH_1:1_ect/fs/jffs2/nodelist.h 2005-09-29 11:51:43.000000000 +0800 +++ mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/nodelist.h 2005-09-29 13:34:20.000000000 +0800 @@ -183,6 +183,7 @@ struct jffs2_node_frag struct jffs2_eraseblock { struct list_head list; + struct list_head hash_list; uint16_t bad_count; uint16_t flags; uint32_t offset; /* of this block in the MTD */ @@ -421,6 +422,12 @@ int jffs2_check_nand_cleanmarker_ebh(str int jffs2_write_nand_ebh(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); #endif +/* wear_leveling.c */ +void jffs2_add_to_hash_table(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint8_t flag); +void jffs2_remove_from_hash_table(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint8_t flag); +struct jffs2_eraseblock *jffs2_get_free_block(struct jffs2_sb_info *c); +struct jffs2_eraseblock *jffs2_get_used_block(struct jffs2_sb_info *c); + #include "debug.h" #endif /* __JFFS2_NODELIST_H__ */ diff -auNrp mtd_9_28_EBH_1:1_ect/fs/jffs2/nodemgmt.c mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/nodemgmt.c --- mtd_9_28_EBH_1:1_ect/fs/jffs2/nodemgmt.c 2005-09-29 09:34:09.000000000 +0800 +++ mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/nodemgmt.c 2005-09-29 14:53:29.000000000 +0800 @@ -188,6 +188,7 @@ static void jffs2_close_nextblock(struct jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); list_add_tail(&jeb->list, &c->clean_list); } + jffs2_add_to_hash_table(c, jeb, 1); c->nextblock = NULL; } @@ -196,8 +197,6 @@ static void jffs2_close_nextblock(struct static int jffs2_find_nextblock(struct jffs2_sb_info *c) { - struct list_head *next; - /* Take the next block off the 'free' list */ if (list_empty(&c->free_list)) { @@ -246,10 +245,7 @@ static int jffs2_find_nextblock(struct j return -EAGAIN; } - next = c->free_list.next; - list_del(next); - c->nextblock = list_entry(next, struct jffs2_eraseblock, list); - c->nr_free_blocks--; + c->nextblock = jffs2_get_free_block(c); jffs2_sum_reset_collected(c->summary); /* reset collected summary */ @@ -578,6 +574,7 @@ void jffs2_mark_node_obsolete(struct jff } else { D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset)); list_del(&jeb->list); + jffs2_remove_from_hash_table(c, jeb, 1); } if (jffs2_wbuf_dirty(c)) { D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n")); diff -auNrp mtd_9_28_EBH_1:1_ect/fs/jffs2/scan.c mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/scan.c --- mtd_9_28_EBH_1:1_ect/fs/jffs2/scan.c 2005-09-29 11:52:39.000000000 +0800 +++ mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/scan.c 2005-09-29 13:45:59.000000000 +0800 @@ -150,6 +150,7 @@ int jffs2_scan_medium(struct jffs2_sb_in if (!jeb->dirty_size) { /* It's actually free */ list_add(&jeb->list, &c->free_list); + jffs2_add_to_hash_table(c, jeb, 2); c->nr_free_blocks++; } else { /* Dirt */ @@ -162,6 +163,7 @@ int jffs2_scan_medium(struct jffs2_sb_in case BLK_STATE_CLEAN: /* Full (or almost full) of clean data. Clean list */ list_add(&jeb->list, &c->clean_list); + jffs2_add_to_hash_table(c, jeb, 1); break; case BLK_STATE_PARTDIRTY: @@ -182,6 +184,7 @@ int jffs2_scan_medium(struct jffs2_sb_in } else { list_add(&c->nextblock->list, &c->dirty_list); } + jffs2_add_to_hash_table(c, c->nextblock, 1); /* deleting summary information of the old nextblock */ jffs2_sum_reset_collected(c->summary); } @@ -200,6 +203,7 @@ int jffs2_scan_medium(struct jffs2_sb_in } else { list_add(&jeb->list, &c->dirty_list); } + jffs2_add_to_hash_table(c, jeb, 1); } break; diff -auNrp mtd_9_28_EBH_1:1_ect/fs/jffs2/wbuf.c mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/wbuf.c --- mtd_9_28_EBH_1:1_ect/fs/jffs2/wbuf.c 2005-09-29 11:53:49.000000000 +0800 +++ mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/wbuf.c 2005-09-29 13:52:13.000000000 +0800 @@ -137,8 +137,10 @@ static void jffs2_block_refile(struct jf /* File the existing block on the bad_used_list.... */ if (c->nextblock == jeb) c->nextblock = NULL; - else /* Not sure this should ever happen... need more coffee */ + else { /* Not sure this should ever happen... need more coffee */ list_del(&jeb->list); + jffs2_remove_from_hash_table(c, jeb, 1); + } if (jeb->first_node) { D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset)); list_add(&jeb->list, &c->bad_used_list); diff -auNrp mtd_9_28_EBH_1:1_ect/fs/jffs2/wear_leveling.c mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/wear_leveling.c --- mtd_9_28_EBH_1:1_ect/fs/jffs2/wear_leveling.c 1970-01-01 08:00:00.000000000 +0800 +++ mtd_9_28_EBH_1:1_ect_wl/fs/jffs2/wear_leveling.c 2005-09-29 13:28:45.000000000 +0800 @@ -0,0 +1,98 @@ +#include "nodelist.h" + +void jffs2_add_to_hash_table(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint8_t flag) +{ + struct jffs2_blocks_bucket *hash_table; + uint32_t index, *current_index_p; + + if (flag == 1) { + hash_table = c->used_blocks; + current_index_p = &(c->used_blocks_current_index); + }else if (flag == 2) { + hash_table = c->free_blocks; + current_index_p = &(c->free_blocks_current_index); + }else { + return; + } + + index = (jeb->erase_count >> BUCKET_RANGE_BIT_LEN); + if (index >= HASH_SIZE) { + return; + } + if (index < *current_index_p) { + *current_index_p = index; + } + hash_table[index].number++; + list_add_tail(&jeb->hash_list, &(hash_table[index].chain)); + return; +} + +void jffs2_remove_from_hash_table(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint8_t flag) +{ + struct jffs2_blocks_bucket *hash_table; + uint32_t index, *current_index_p, i; + + if (flag == 1) { + hash_table = c->used_blocks; + current_index_p = &(c->used_blocks_current_index); + }else if (flag == 2) { + hash_table = c->free_blocks; + current_index_p = &(c->free_blocks_current_index); + }else { + return; + } + + index = (jeb->erase_count >> BUCKET_RANGE_BIT_LEN); + if (index >= HASH_SIZE) { + return; + } + hash_table[index].number--; + list_del(&jeb->hash_list); + + if (hash_table[index].number == 0) { + for (i=index+1; ifree_blocks_current_index == HASH_SIZE) { + return NULL; + } + next = c->free_blocks[c->free_blocks_current_index].chain.next; + jeb = list_entry(next, struct jffs2_eraseblock, hash_list); + list_del(&jeb->list); + jffs2_remove_from_hash_table(c, jeb, 2); + c->nr_free_blocks--; + + return jeb; +} + +struct jffs2_eraseblock *jffs2_get_used_block(struct jffs2_sb_info *c) +{ + struct list_head *next; + struct jffs2_eraseblock *jeb; + + if (c->used_blocks_current_index == HASH_SIZE) { + return NULL; + } + next = c->used_blocks[c->used_blocks_current_index].chain.next; + jeb = list_entry(next, struct jffs2_eraseblock, hash_list); + list_del(&jeb->list); + jffs2_remove_from_hash_table(c, jeb, 1); + + return jeb; +} + diff -auNrp mtd_9_28_EBH_1:1_ect/include/linux/jffs2_fs_sb.h mtd_9_28_EBH_1:1_ect_wl/include/linux/jffs2_fs_sb.h --- mtd_9_28_EBH_1:1_ect/include/linux/jffs2_fs_sb.h 2005-09-29 11:40:59.000000000 +0800 +++ mtd_9_28_EBH_1:1_ect_wl/include/linux/jffs2_fs_sb.h 2005-09-29 13:15:10.000000000 +0800 @@ -17,6 +17,20 @@ #define JFFS2_SB_FLAG_SCANNING 2 /* Flash scanning is in progress */ #define JFFS2_SB_FLAG_BUILDING 4 /* File system building is in progress */ +#define MAX_ERASE_COUNT_BIT_LEN 18 +#define MAX_ERASE_COUNT (1 << MAX_ERASE_COUNT_BIT_LEN) /* The maximum guaranteed erase cycles for NAND and NOR are ~ 100K at the moment */ +#define WL_DELTA_BIT_LEN 10 +#define WL_DELTA (1 << WL_DELTA_BIT_LEN) /* This is wear-leveling delta, which is defined as "maximum of all erase counts - minimum of all erase counts" */ +#define HASH_SIZE_BIT_LEN (MAX_ERASE_COUNT_BIT_LEN - WL_DELTA_BIT_LEN + 1) /* The range size of per-bucket is half of WL_DELTA */ +#define HASH_SIZE (1 << HASH_SIZE_BIT_LEN) +#define BUCKET_RANGE_BIT_LEN (MAX_ERASE_COUNT_BIT_LEN - HASH_SIZE_BIT_LEN) +#define BUCKET_RANGE (1 << BUCKET_RANGE_BIT_LEN) + +struct jffs2_blocks_bucket { + uint32_t number; /* The number of erase blocks in this bucket*/ + struct list_head chain; /* The head of erase blocks in this bucket */ +}; + struct jffs2_inodirty; /* A struct for the overall file system control. Pointers to @@ -120,6 +134,12 @@ struct jffs2_sb_info { uint32_t total_erase_count; /* The summary erase count of all erase blocks */ uint32_t nr_blocks_with_ebh; /* The number of erase blocks, which has eraseblock header on it */ uint32_t max_erase_count; /* The maximum erase count of all erase blocks */ + + uint32_t used_blocks_current_index; + uint32_t free_blocks_current_index; + struct jffs2_blocks_bucket used_blocks[HASH_SIZE]; /* The hash table for both dirty and clean erase blocks */ + struct jffs2_blocks_bucket free_blocks[HASH_SIZE]; /* The hash table for free erase blocks */ + /* OS-private pointer for getting back to master superblock info */ void *os_priv; };