diff -auNrp mtd_9_28_EBH/fs/jffs2/build.c mtd_9_28_EBH_1:1/fs/jffs2/build.c --- mtd_9_28_EBH/fs/jffs2/build.c 2005-09-28 10:54:01.000000000 +0800 +++ mtd_9_28_EBH_1:1/fs/jffs2/build.c 2005-09-28 14:51:04.000000000 +0800 @@ -308,27 +308,13 @@ static void jffs2_calc_trigger_levels(st int jffs2_do_mount_fs(struct jffs2_sb_info *c) { int ret; - int i; - int size; c->free_size = c->flash_size; c->nr_blocks = c->flash_size / c->sector_size; - size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; -#ifndef __ECOS - if (jffs2_blocks_use_vmalloc(c)) - c->blocks = vmalloc(size); - else -#endif - c->blocks = kmalloc(size, GFP_KERNEL); - if (!c->blocks) - return -ENOMEM; - - memset(c->blocks, 0, size); - for (i=0; inr_blocks; i++) { - INIT_LIST_HEAD(&c->blocks[i].list); - c->blocks[i].offset = i * c->sector_size; - c->blocks[i].free_size = c->sector_size; - } + + ret = jffs2_alloc_eraseblocks(c); + if (ret) + return ret; INIT_LIST_HEAD(&c->clean_list); INIT_LIST_HEAD(&c->very_dirty_list); @@ -345,20 +331,16 @@ int jffs2_do_mount_fs(struct jffs2_sb_in c->summary = NULL; ret = jffs2_sum_init(c); - if (ret) + if (ret) { + jffs2_free_eraseblocks(c); return ret; + } if (jffs2_build_filesystem(c)) { dbg_fsbuild("build_fs failed\n"); jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); -#ifndef __ECOS - if (jffs2_blocks_use_vmalloc(c)) - vfree(c->blocks); - else -#endif - kfree(c->blocks); - + jffs2_free_eraseblocks(c); return -EIO; } diff -auNrp mtd_9_28_EBH/fs/jffs2/fs.c mtd_9_28_EBH_1:1/fs/jffs2/fs.c --- mtd_9_28_EBH/fs/jffs2/fs.c 2005-09-28 11:51:53.000000000 +0800 +++ mtd_9_28_EBH_1:1/fs/jffs2/fs.c 2005-09-28 15:03:27.000000000 +0800 @@ -519,10 +519,7 @@ int jffs2_do_fill_super(struct super_blo iput(root_i); jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); - if (jffs2_blocks_use_vmalloc(c)) - vfree(c->blocks); - else - kfree(c->blocks); + jffs2_free_eraseblocks(c); out_inohash: kfree(c->inocache_list); out_wbuf: diff -auNrp mtd_9_28_EBH/fs/jffs2/gc.c mtd_9_28_EBH_1:1/fs/jffs2/gc.c --- mtd_9_28_EBH/fs/jffs2/gc.c 2005-09-28 10:52:30.000000000 +0800 +++ mtd_9_28_EBH_1:1/fs/jffs2/gc.c 2005-09-28 14:48:32.000000000 +0800 @@ -616,7 +616,7 @@ static int jffs2_garbage_collect_pristin if (!retried && (nraw = jffs2_alloc_raw_node_ref())) { /* Try to reallocate space and retry */ uint32_t dummy; - struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size]; + struct jffs2_eraseblock *jeb = c->blocks[phys_ofs / c->sector_size]; retried = 1; @@ -1129,7 +1129,7 @@ static int jffs2_garbage_collect_dnode(s struct jffs2_raw_node_ref *raw = frag->node->raw; struct jffs2_eraseblock *jeb; - jeb = &c->blocks[raw->flash_offset / c->sector_size]; + jeb = c->blocks[raw->flash_offset / c->sector_size]; if (jeb == c->gcblock) { D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n", @@ -1179,7 +1179,7 @@ static int jffs2_garbage_collect_dnode(s struct jffs2_raw_node_ref *raw = frag->node->raw; struct jffs2_eraseblock *jeb; - jeb = &c->blocks[raw->flash_offset / c->sector_size]; + jeb = c->blocks[raw->flash_offset / c->sector_size]; if (jeb == c->gcblock) { D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n", diff -auNrp mtd_9_28_EBH/fs/jffs2/malloc.c mtd_9_28_EBH_1:1/fs/jffs2/malloc.c --- mtd_9_28_EBH/fs/jffs2/malloc.c 2005-09-28 10:52:30.000000000 +0800 +++ mtd_9_28_EBH_1:1/fs/jffs2/malloc.c 2005-09-28 16:53:04.000000000 +0800 @@ -14,6 +14,7 @@ #include #include #include +#include #include #include "nodelist.h" @@ -26,6 +27,12 @@ static kmem_cache_t *tmp_dnode_info_slab static kmem_cache_t *raw_node_ref_slab; static kmem_cache_t *node_frag_slab; static kmem_cache_t *inode_cache_slab; +static kmem_cache_t *eraseblock_slab; + +static inline int jffs2_blocks_use_vmalloc(struct jffs2_sb_info *c) +{ + return ((c->flash_size / c->sector_size) * sizeof(void*)) > (128 * 1024); +} int __init jffs2_create_slab_caches(void) { @@ -65,6 +72,12 @@ int __init jffs2_create_slab_caches(void if (!node_frag_slab) goto err; + eraseblock_slab = kmem_cache_create("jffs2_eraseblock", + sizeof(struct jffs2_eraseblock), + 0, 0, NULL, NULL); + if (!eraseblock_slab) + goto err; + inode_cache_slab = kmem_cache_create("jffs2_inode_cache", sizeof(struct jffs2_inode_cache), 0, 0, NULL, NULL); @@ -91,6 +104,8 @@ void jffs2_destroy_slab_caches(void) kmem_cache_destroy(node_frag_slab); if(inode_cache_slab) kmem_cache_destroy(inode_cache_slab); + if (eraseblock_slab) + kmem_cache_destroy(eraseblock_slab); } struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize) @@ -205,3 +220,57 @@ void jffs2_free_inode_cache(struct jffs2 dbg_memalloc("%p\n", x); kmem_cache_free(inode_cache_slab, x); } + +int jffs2_alloc_eraseblocks(struct jffs2_sb_info *c) +{ + uint32_t i; +#ifndef __ECOS + if (jffs2_blocks_use_vmalloc(c)) + c->blocks = vmalloc(sizeof(void *) * c->nr_blocks); + else +#endif + c->blocks = kmalloc(sizeof(void *) * c->nr_blocks, GFP_KERNEL); + if (!c->blocks) + return -ENOMEM; + memset(c->blocks, 0, sizeof(void *) * c->nr_blocks); + + for (i=0; inr_blocks; i++) { + c->blocks[i] = kmem_cache_alloc(eraseblock_slab, GFP_KERNEL); + dbg_memalloc("%p\n", c->blocks[i]); + if (!c->blocks[i]) { + jffs2_free_eraseblocks(c); + return -ENOMEM; + } + memset(c->blocks[i], 0, sizeof(struct jffs2_eraseblock)); + } + + + for (i=0; inr_blocks; i++) { + INIT_LIST_HEAD(&c->blocks[i]->list); + c->blocks[i]->offset = i * c->sector_size; + c->blocks[i]->free_size = c->sector_size; + c->blocks[i]->first_node = NULL; + c->blocks[i]->last_node = NULL; + } + + return 0; +} + +void jffs2_free_eraseblocks(struct jffs2_sb_info *c) +{ + uint32_t i; + + for (i=0; inr_blocks; i++) { + if (c->blocks[i]) { + dbg_memalloc("%p\n", c->blocks[i]); + kmem_cache_free(eraseblock_slab, c->blocks[i]); + } + } +#ifndef __ECOS + if (jffs2_blocks_use_vmalloc(c)) + vfree(c->blocks); + else +#endif + kfree(c->blocks); +} + diff -auNrp mtd_9_28_EBH/fs/jffs2/nodelist.c mtd_9_28_EBH_1:1/fs/jffs2/nodelist.c --- mtd_9_28_EBH/fs/jffs2/nodelist.c 2005-09-28 10:52:30.000000000 +0800 +++ mtd_9_28_EBH_1:1/fs/jffs2/nodelist.c 2005-09-28 14:48:32.000000000 +0800 @@ -482,7 +482,7 @@ static int check_node_data(struct jffs2_ } adj_acc: - jeb = &c->blocks[ref->flash_offset / c->sector_size]; + jeb = c->blocks[ref->flash_offset / c->sector_size]; len = ref_totlen(c, jeb, ref); /* @@ -950,13 +950,13 @@ void jffs2_free_raw_node_refs(struct jff struct jffs2_raw_node_ref *this, *next; for (i=0; inr_blocks; i++) { - this = c->blocks[i].first_node; + this = c->blocks[i]->first_node; while(this) { next = this->next_phys; jffs2_free_raw_node_ref(this); this = next; } - c->blocks[i].first_node = c->blocks[i].last_node = NULL; + c->blocks[i]->first_node = c->blocks[i]->last_node = NULL; } } diff -auNrp mtd_9_28_EBH/fs/jffs2/nodelist.h mtd_9_28_EBH_1:1/fs/jffs2/nodelist.h --- mtd_9_28_EBH/fs/jffs2/nodelist.h 2005-09-28 13:32:19.000000000 +0800 +++ mtd_9_28_EBH_1:1/fs/jffs2/nodelist.h 2005-09-28 15:05:58.000000000 +0800 @@ -205,11 +205,6 @@ struct jffs2_eraseblock #define CLR_EBFLAGS_HAS_EBH(jeb) (jeb->flags &= ~1) #define EBFLAGS_HAS_EBH(jeb) ((jeb->flags & 1) == 1) -static inline int jffs2_blocks_use_vmalloc(struct jffs2_sb_info *c) -{ - return ((c->flash_size / c->sector_size) * sizeof (struct jffs2_eraseblock)) > (128 * 1024); -} - /* Calculate totlen from surrounding nodes or eraseblock */ static inline uint32_t __ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, @@ -221,7 +216,7 @@ static inline uint32_t __ref_totlen(stru ref_end = ref_offset(ref->next_phys); else { if (!jeb) - jeb = &c->blocks[ref->flash_offset / c->sector_size]; + jeb = c->blocks[ref->flash_offset / c->sector_size]; /* Last node in block. Use free_space */ BUG_ON(ref != jeb->last_node); @@ -237,9 +232,9 @@ static inline uint32_t ref_totlen(struct uint32_t ret; #if CONFIG_JFFS2_FS_DEBUG > 0 - if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) { + if (jeb && jeb != c->blocks[ref->flash_offset / c->sector_size]) { printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n", - jeb->offset, c->blocks[ref->flash_offset / c->sector_size].offset, ref_offset(ref)); + jeb->offset, c->blocks[ref->flash_offset / c->sector_size]->offset, ref_offset(ref)); BUG(); } #endif @@ -254,7 +249,7 @@ static inline uint32_t ref_totlen(struct ref, ref_offset(ref), ref_offset(ref)+ref->__totlen, ret, ref->__totlen); if (!jeb) - jeb = &c->blocks[ref->flash_offset / c->sector_size]; + jeb = c->blocks[ref->flash_offset / c->sector_size]; jffs2_dbg_dump_node_refs_nolock(c, jeb); BUG(); } @@ -381,7 +376,8 @@ struct jffs2_node_frag *jffs2_alloc_node void jffs2_free_node_frag(struct jffs2_node_frag *); struct jffs2_inode_cache *jffs2_alloc_inode_cache(void); void jffs2_free_inode_cache(struct jffs2_inode_cache *); - +int jffs2_alloc_eraseblocks(struct jffs2_sb_info *c); +void jffs2_free_eraseblocks(struct jffs2_sb_info *c); /* gc.c */ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c); diff -auNrp mtd_9_28_EBH/fs/jffs2/nodemgmt.c mtd_9_28_EBH_1:1/fs/jffs2/nodemgmt.c --- mtd_9_28_EBH/fs/jffs2/nodemgmt.c 2005-09-28 13:42:53.000000000 +0800 +++ mtd_9_28_EBH_1:1/fs/jffs2/nodemgmt.c 2005-09-28 14:48:32.000000000 +0800 @@ -392,7 +392,7 @@ int jffs2_add_physical_node_ref(struct j struct jffs2_eraseblock *jeb; uint32_t len; - jeb = &c->blocks[new->flash_offset / c->sector_size]; + jeb = c->blocks[new->flash_offset / c->sector_size]; len = ref_totlen(c, jeb, new); D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len)); @@ -489,7 +489,7 @@ void jffs2_mark_node_obsolete(struct jff printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset); BUG(); } - jeb = &c->blocks[blocknr]; + jeb = c->blocks[blocknr]; if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) && !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) { diff -auNrp mtd_9_28_EBH/fs/jffs2/readinode.c mtd_9_28_EBH_1:1/fs/jffs2/readinode.c --- mtd_9_28_EBH/fs/jffs2/readinode.c 2005-09-28 10:52:30.000000000 +0800 +++ mtd_9_28_EBH_1:1/fs/jffs2/readinode.c 2005-09-28 14:48:32.000000000 +0800 @@ -294,7 +294,7 @@ static inline int read_dnode(struct jffs struct jffs2_eraseblock *jeb; dbg_readinode("the node has no data.\n"); - jeb = &c->blocks[ref->flash_offset / c->sector_size]; + jeb = c->blocks[ref->flash_offset / c->sector_size]; len = ref_totlen(c, jeb, ref); spin_lock(&c->erase_completion_lock); diff -auNrp mtd_9_28_EBH/fs/jffs2/scan.c mtd_9_28_EBH_1:1/fs/jffs2/scan.c --- mtd_9_28_EBH/fs/jffs2/scan.c 2005-09-28 13:31:56.000000000 +0800 +++ mtd_9_28_EBH_1:1/fs/jffs2/scan.c 2005-09-28 14:48:32.000000000 +0800 @@ -117,7 +117,7 @@ int jffs2_scan_medium(struct jffs2_sb_in } for (i=0; inr_blocks; i++) { - struct jffs2_eraseblock *jeb = &c->blocks[i]; + struct jffs2_eraseblock *jeb = c->blocks[i]; /* reset summary info for next eraseblock scan */ jffs2_sum_reset_collected(s); diff -auNrp mtd_9_28_EBH/fs/jffs2/summary.c mtd_9_28_EBH_1:1/fs/jffs2/summary.c --- mtd_9_28_EBH/fs/jffs2/summary.c 2005-09-28 14:45:13.000000000 +0800 +++ mtd_9_28_EBH_1:1/fs/jffs2/summary.c 2005-09-28 14:48:32.000000000 +0800 @@ -234,7 +234,7 @@ int jffs2_sum_add_kvec(struct jffs2_sb_i struct jffs2_eraseblock *jeb; node = invecs[0].iov_base; - jeb = &c->blocks[ofs / c->sector_size]; + jeb = c->blocks[ofs / c->sector_size]; ofs -= jeb->offset; switch (je16_to_cpu(node->u.nodetype)) { diff -auNrp mtd_9_28_EBH/fs/jffs2/super.c mtd_9_28_EBH_1:1/fs/jffs2/super.c --- mtd_9_28_EBH/fs/jffs2/super.c 2005-09-28 10:52:30.000000000 +0800 +++ mtd_9_28_EBH_1:1/fs/jffs2/super.c 2005-09-28 14:48:32.000000000 +0800 @@ -287,10 +287,7 @@ static void jffs2_put_super (struct supe jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); - if (jffs2_blocks_use_vmalloc(c)) - vfree(c->blocks); - else - kfree(c->blocks); + jffs2_free_eraseblocks(c); jffs2_flash_cleanup(c); kfree(c->inocache_list); if (c->mtd->sync) diff -auNrp mtd_9_28_EBH/fs/jffs2/super-v24.c mtd_9_28_EBH_1:1/fs/jffs2/super-v24.c --- mtd_9_28_EBH/fs/jffs2/super-v24.c 2005-09-28 10:52:30.000000000 +0800 +++ mtd_9_28_EBH_1:1/fs/jffs2/super-v24.c 2005-09-28 15:07:39.000000000 +0800 @@ -103,10 +103,7 @@ static void jffs2_put_super (struct supe jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); - if (jffs2_blocks_use_vmalloc(c)) - vfree(c->blocks); - else - kfree(c->blocks); + jffs2_free_eraseblocks(c); jffs2_flash_cleanup(c); kfree(c->inocache_list); if (c->mtd->sync) diff -auNrp mtd_9_28_EBH/fs/jffs2/wbuf.c mtd_9_28_EBH_1:1/fs/jffs2/wbuf.c --- mtd_9_28_EBH/fs/jffs2/wbuf.c 2005-09-28 13:33:21.000000000 +0800 +++ mtd_9_28_EBH_1:1/fs/jffs2/wbuf.c 2005-09-28 14:48:32.000000000 +0800 @@ -176,7 +176,7 @@ static void jffs2_wbuf_recover(struct jf spin_lock(&c->erase_completion_lock); - jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; + jeb = c->blocks[c->wbuf_ofs / c->sector_size]; jffs2_block_refile(c, jeb, REFILE_NOTEMPTY); @@ -336,7 +336,7 @@ static void jffs2_wbuf_recover(struct jf } /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */ - new_jeb = &c->blocks[ofs / c->sector_size]; + new_jeb = c->blocks[ofs / c->sector_size]; spin_lock(&c->erase_completion_lock); if (new_jeb->first_node) { @@ -485,7 +485,7 @@ static int __jffs2_flush_wbuf(struct jff if (pad && !jffs2_dataflash(c)) { struct jffs2_eraseblock *jeb; - jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; + jeb = c->blocks[c->wbuf_ofs / c->sector_size]; D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", (jeb==c->nextblock)?"next":"", jeb->offset)); @@ -790,7 +790,7 @@ int jffs2_flash_writev(struct jffs2_sb_i spin_lock(&c->erase_completion_lock); - jeb = &c->blocks[outvec_to / c->sector_size]; + jeb = c->blocks[outvec_to / c->sector_size]; jffs2_block_refile(c, jeb, REFILE_ANYWAY); *retlen = 0; diff -auNrp mtd_9_28_EBH/fs/jffs2/write.c mtd_9_28_EBH_1:1/fs/jffs2/write.c --- mtd_9_28_EBH/fs/jffs2/write.c 2005-09-28 10:52:30.000000000 +0800 +++ mtd_9_28_EBH_1:1/fs/jffs2/write.c 2005-09-28 14:48:32.000000000 +0800 @@ -143,7 +143,7 @@ struct jffs2_full_dnode *jffs2_write_dno if (!retried && alloc_mode != ALLOC_NORETRY && (raw = jffs2_alloc_raw_node_ref())) { /* Try to reallocate space and retry */ uint32_t dummy; - struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; + struct jffs2_eraseblock *jeb = c->blocks[flash_ofs / c->sector_size]; retried = 1; @@ -291,7 +291,7 @@ struct jffs2_full_dirent *jffs2_write_di if (!retried && (raw = jffs2_alloc_raw_node_ref())) { /* Try to reallocate space and retry */ uint32_t dummy; - struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; + struct jffs2_eraseblock *jeb = c->blocks[flash_ofs / c->sector_size]; retried = 1; diff -auNrp mtd_9_28_EBH/include/linux/jffs2_fs_sb.h mtd_9_28_EBH_1:1/include/linux/jffs2_fs_sb.h --- mtd_9_28_EBH/include/linux/jffs2_fs_sb.h 2005-09-28 11:58:03.000000000 +0800 +++ mtd_9_28_EBH_1:1/include/linux/jffs2_fs_sb.h 2005-09-28 14:48:32.000000000 +0800 @@ -64,7 +64,7 @@ struct jffs2_sb_info { uint32_t nospc_dirty_size; uint32_t nr_blocks; - struct jffs2_eraseblock *blocks; /* The whole array of blocks. Used for getting blocks + struct jffs2_eraseblock **blocks; /* The whole array of blocks. Used for getting blocks * from the offset (blocks[ofs / sector_size]) */ struct jffs2_eraseblock *nextblock; /* The block we're currently filling */