Glibc function mmap(... O_SYNC) will make page to _PAGE_UNCACHE +
_PAGE_SO and strong-order page couldn't support unalignment access.
So remove _PAGE_SO from _PAGE_UNCACHE, also sync abiv1 with the macro
of _PAGE_SO.
Signed-off-by: Guo Ren <ren_guo@c-sky.com>
Reported-by: Liu Renwei <Renwei.Liu@verisilicon.com>
Tested-by: Yuan Qiyun <qiyun_yuan@c-sky.com>
 
 #define _PAGE_CACHE            (3<<9)
 #define _PAGE_UNCACHE          (2<<9)
+#define _PAGE_SO               _PAGE_UNCACHE
 
 #define _CACHE_MASK            (7<<9)
 
 
 #define _CACHE_MASK            _PAGE_CACHE
 
 #define _CACHE_CACHED          (_PAGE_VALID | _PAGE_CACHE | _PAGE_BUF)
-#define _CACHE_UNCACHED                (_PAGE_VALID | _PAGE_SO)
+#define _CACHE_UNCACHED                (_PAGE_VALID)
 
 #endif /* __ASM_CSKY_PGTABLE_BITS_H */
 
        vaddr = (unsigned long)area->addr;
 
        prot = __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE |
-                       _PAGE_GLOBAL | _CACHE_UNCACHED);
+                       _PAGE_GLOBAL | _CACHE_UNCACHED | _PAGE_SO);
 
        if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
                free_vm_area(area);