/*
  * Dummy cache handling routines for machines without boardcaches
  */
-static void no_sc_noop(void) {}
+static void cache_noop(void) {}
 
 static struct bcache_ops no_sc_ops = {
-       .bc_enable = (void *)no_sc_noop,
-       .bc_disable = (void *)no_sc_noop,
-       .bc_wback_inv = (void *)no_sc_noop,
-       .bc_inv = (void *)no_sc_noop
+       .bc_enable = (void *)cache_noop,
+       .bc_disable = (void *)cache_noop,
+       .bc_wback_inv = (void *)cache_noop,
+       .bc_inv = (void *)cache_noop
 };
 
 struct bcache_ops *bcops = &no_sc_ops;
 {
        unsigned long  dc_lsize = cpu_dcache_line_size();
 
-       if (dc_lsize == 16)
+       if (dc_lsize == 0)
+               r4k_blast_dcache_page = (void *)cache_noop;
+       else if (dc_lsize == 16)
                r4k_blast_dcache_page = blast_dcache16_page;
        else if (dc_lsize == 32)
                r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
 {
        unsigned long dc_lsize = cpu_dcache_line_size();
 
-       if (dc_lsize == 16)
+       if (dc_lsize == 0)
+               r4k_blast_dcache_page_indexed = (void *)cache_noop;
+       else if (dc_lsize == 16)
                r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
        else if (dc_lsize == 32)
                r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
 {
        unsigned long dc_lsize = cpu_dcache_line_size();
 
-       if (dc_lsize == 16)
+       if (dc_lsize == 0)
+               r4k_blast_dcache = (void *)cache_noop;
+       else if (dc_lsize == 16)
                r4k_blast_dcache = blast_dcache16;
        else if (dc_lsize == 32)
                r4k_blast_dcache = blast_dcache32;
 {
        unsigned long ic_lsize = cpu_icache_line_size();
 
-       if (ic_lsize == 16)
+       if (ic_lsize == 0)
+               r4k_blast_icache_page = (void *)cache_noop;
+       else if (ic_lsize == 16)
                r4k_blast_icache_page = blast_icache16_page;
        else if (ic_lsize == 32)
                r4k_blast_icache_page = blast_icache32_page;
 {
        unsigned long ic_lsize = cpu_icache_line_size();
 
-       if (ic_lsize == 16)
+       if (ic_lsize == 0)
+               r4k_blast_icache_page_indexed = (void *)cache_noop;
+       else if (ic_lsize == 16)
                r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
        else if (ic_lsize == 32) {
                if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
 {
        unsigned long ic_lsize = cpu_icache_line_size();
 
-       if (ic_lsize == 16)
+       if (ic_lsize == 0)
+               r4k_blast_icache = (void *)cache_noop;
+       else if (ic_lsize == 16)
                r4k_blast_icache = blast_icache16;
        else if (ic_lsize == 32) {
                if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
        unsigned long sc_lsize = cpu_scache_line_size();
 
        if (scache_size == 0)
-               r4k_blast_scache_page = (void *)no_sc_noop;
+               r4k_blast_scache_page = (void *)cache_noop;
        else if (sc_lsize == 16)
                r4k_blast_scache_page = blast_scache16_page;
        else if (sc_lsize == 32)
        unsigned long sc_lsize = cpu_scache_line_size();
 
        if (scache_size == 0)
-               r4k_blast_scache_page_indexed = (void *)no_sc_noop;
+               r4k_blast_scache_page_indexed = (void *)cache_noop;
        else if (sc_lsize == 16)
                r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
        else if (sc_lsize == 32)
        unsigned long sc_lsize = cpu_scache_line_size();
 
        if (scache_size == 0)
-               r4k_blast_scache = (void *)no_sc_noop;
+               r4k_blast_scache = (void *)cache_noop;
        else if (sc_lsize == 16)
                r4k_blast_scache = blast_scache16;
        else if (sc_lsize == 32)
        unsigned long end = fir_args->end;
 
        if (!cpu_has_ic_fills_f_dc) {
-               if (end - start > dcache_size) {
+               if (end - start >= dcache_size) {
                        r4k_blast_dcache();
                } else {
                        R4600_HIT_CACHEOP_WAR_IMPL;
        unsigned long addr = (unsigned long) arg;
 
        R4600_HIT_CACHEOP_WAR_IMPL;
-       protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
+       if (dc_lsize)
+               protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
        if (!cpu_icache_snoops_remote_store && scache_size)
                protected_writeback_scache_line(addr & ~(sc_lsize - 1));
-       protected_flush_icache_line(addr & ~(ic_lsize - 1));
+       if (ic_lsize)
+               protected_flush_icache_line(addr & ~(ic_lsize - 1));
        if (MIPS4K_ICACHE_REFILL_WAR) {
                __asm__ __volatile__ (
                        ".set push\n\t"
        c->icache.waysize = icache_size / c->icache.ways;
        c->dcache.waysize = dcache_size / c->dcache.ways;
 
-       c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
-       c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
+       c->icache.sets = c->icache.linesz ?
+               icache_size / (c->icache.linesz * c->icache.ways) : 0;
+       c->dcache.sets = c->dcache.linesz ?
+               dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
 
        /*
         * R10000 and R12000 P-caches are odd in a positive way.  They're 32kB
         * This code supports virtually indexed processors and will be
         * unnecessarily inefficient on physically indexed processors.
         */
-       shm_align_mask = max_t( unsigned long,
-                               c->dcache.sets * c->dcache.linesz - 1,
-                               PAGE_SIZE - 1);
-
+       if (c->dcache.linesz)
+               shm_align_mask = max_t( unsigned long,
+                                       c->dcache.sets * c->dcache.linesz - 1,
+                                       PAGE_SIZE - 1);
+       else
+               shm_align_mask = PAGE_SIZE-1;
        flush_cache_all         = r4k_flush_cache_all;
        __flush_cache_all       = r4k___flush_cache_all;
        flush_cache_mm          = r4k_flush_cache_mm;