return (void __iomem *)(u32)paddr;
return ioremap_prot(paddr, size,
- pgprot_val(pgprot_noncached(PAGE_KERNEL)));
+ pgprot_noncached(PAGE_KERNEL));
}
EXPORT_SYMBOL(ioremap);
* might need finer access control (R/W/X)
*/
void __iomem *ioremap_prot(phys_addr_t paddr, size_t size,
- unsigned long flags)
+ pgprot_t prot)
{
- pgprot_t prot = __pgprot(flags);
-
/* force uncached */
return generic_ioremap_prot(paddr, size, pgprot_noncached(prot));
}
#define _PAGE_IOREMAP PROT_DEVICE_nGnRE
#define ioremap_wc(addr, size) \
- ioremap_prot((addr), (size), PROT_NORMAL_NC)
+ ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC))
#define ioremap_np(addr, size) \
- ioremap_prot((addr), (size), PROT_DEVICE_nGnRnE)
+ ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
/*
* io{read,write}{16,32,64}be() macros
if (pfn_is_map_memory(__phys_to_pfn(addr)))
return (void __iomem *)__phys_to_virt(addr);
- return ioremap_prot(addr, size, PROT_NORMAL);
+ return ioremap_prot(addr, size, __pgprot(PROT_NORMAL));
}
/*
prot = __acpi_get_writethrough_mem_attribute();
}
}
- return ioremap_prot(phys, size, pgprot_val(prot));
+ return ioremap_prot(phys, size, prot);
}
/*
}
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot)
+ pgprot_t pgprot)
{
unsigned long last_addr = phys_addr + size - 1;
- pgprot_t pgprot = __pgprot(prot);
/* Don't allow outside PHYS_MASK */
if (last_addr & ~PHYS_MASK)
*/
#define ioremap_wc(addr, size) \
ioremap_prot((addr), (size), \
- (_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED)
+ __pgprot((_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED))
#include <asm-generic/io.h>
#ifdef CONFIG_ARCH_IOREMAP
static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
- unsigned long prot_val)
+ pgprot_t prot)
{
- switch (prot_val & _CACHE_MASK) {
+ switch (pgprot_val(prot) & _CACHE_MASK) {
case _CACHE_CC:
return (void __iomem *)(unsigned long)(CACHE_BASE + offset);
case _CACHE_SUC:
}
#define ioremap(offset, size) \
- ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_SUC))
+ ioremap_prot((offset), (size), PAGE_KERNEL_SUC)
#define iounmap(addr) ((void)(addr))
*/
#define ioremap_wc(offset, size) \
ioremap_prot((offset), (size), \
- pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC))
+ wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC)
#define ioremap_cache(offset, size) \
- ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
+ ioremap_prot((offset), (size), PAGE_KERNEL)
#define mmiowb() wmb()
}
void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
- unsigned long prot_val);
+ pgprot_t prot);
void iounmap(const volatile void __iomem *addr);
/*
* address.
*/
#define ioremap(offset, size) \
- ioremap_prot((offset), (size), _CACHE_UNCACHED)
+ ioremap_prot((offset), (size), __pgprot(_CACHE_UNCACHED))
/*
* ioremap_cache - map bus memory into CPU space
* memory-like regions on I/O busses.
*/
#define ioremap_cache(offset, size) \
- ioremap_prot((offset), (size), _page_cachable_default)
+ ioremap_prot((offset), (size), __pgprot(_page_cachable_default))
/*
* ioremap_wc - map bus memory into CPU space
* _CACHE_UNCACHED option (see cpu_probe() method).
*/
#define ioremap_wc(offset, size) \
- ioremap_prot((offset), (size), boot_cpu_data.writecombine)
+ ioremap_prot((offset), (size), __pgprot(boot_cpu_data.writecombine))
#if defined(CONFIG_CPU_CAVIUM_OCTEON)
#define war_io_reorder_wmb() wmb()
* ioremap_prot gives the caller control over cache coherency attributes (CCA)
*/
void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
- unsigned long prot_val)
+ pgprot_t prot)
{
- unsigned long flags = prot_val & _CACHE_MASK;
+ unsigned long flags = pgprot_val(prot) & _CACHE_MASK;
unsigned long offset, pfn, last_pfn;
struct vm_struct *area;
phys_addr_t last_addr;
#include <ioremap.h>
void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
- unsigned long prot_val)
+ pgprot_t prot)
{
- unsigned long flags = prot_val & _CACHE_MASK;
+ unsigned long flags = pgprot_val(prot) & _CACHE_MASK;
u64 base = (flags == _CACHE_UNCACHED ? IO_BASE : UNCAC_BASE);
void __iomem *addr;
_PAGE_ACCESSED | _PAGE_NO_CACHE)
#define ioremap_wc(addr, size) \
- ioremap_prot((addr), (size), _PAGE_IOREMAP)
+ ioremap_prot((addr), (size), __pgprot(_PAGE_IOREMAP))
#define pci_iounmap pci_iounmap
#include <linux/mm.h>
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot)
+ pgprot_t prot)
{
#ifdef CONFIG_EISA
unsigned long end = phys_addr + size - 1;
}
}
- return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
+ return generic_ioremap_prot(phys_addr, size, prot);
}
EXPORT_SYMBOL(ioremap_prot);
void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size);
#define ioremap_cache(addr, size) \
- ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL))
+ ioremap_prot((addr), (size), PAGE_KERNEL)
#define iounmap iounmap
return __ioremap_caller(addr, size, prot, caller);
}
-void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags)
+void __iomem *ioremap_prot(phys_addr_t addr, size_t size, pgprot_t prot)
{
- pte_t pte = __pte(flags);
+ pte_t pte = __pte(pgprot_val(prot));
void *caller = __builtin_return_address(0);
/* writeable implies dirty for kernel addresses */
static int __init setup_areas(struct spu *spu)
{
struct table {char* name; unsigned long addr; unsigned long size;};
- unsigned long shadow_flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL_RO));
spu_pdata(spu)->shadow = ioremap_prot(spu_pdata(spu)->shadow_addr,
- sizeof(struct spe_shadow), shadow_flags);
+ sizeof(struct spe_shadow),
+ pgprot_noncached_wc(PAGE_KERNEL_RO));
if (!spu_pdata(spu)->shadow) {
pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__);
goto fail_ioremap;
#ifdef CONFIG_MMU
#define arch_memremap_wb(addr, size) \
- ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL))
+ ((__force void *)ioremap_prot((addr), (size), __pgprot(_PAGE_KERNEL)))
#endif
#endif /* _ASM_RISCV_IO_H */
}
}
- return ioremap_prot(phys, size, pgprot_val(prot));
+ return ioremap_prot(phys, size, prot);
}
#ifdef CONFIG_PCI
#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL)
#define ioremap_wc(addr, size) \
- ioremap_prot((addr), (size), pgprot_val(pgprot_writecombine(PAGE_KERNEL)))
+ ioremap_prot((addr), (size), pgprot_writecombine(PAGE_KERNEL))
#define ioremap_wt(addr, size) \
- ioremap_prot((addr), (size), pgprot_val(pgprot_writethrough(PAGE_KERNEL)))
+ ioremap_prot((addr), (size), pgprot_writethrough(PAGE_KERNEL))
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
}
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot)
+ pgprot_t prot)
{
/*
* When PCI MIO instructions are unavailable the "physical" address
if (!static_branch_unlikely(&have_mio))
return (void __iomem *)phys_addr;
- return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
+ return generic_ioremap_prot(phys_addr, size, prot);
}
EXPORT_SYMBOL(ioremap_prot);
/* open I/O area window */
paddrbase = virt_to_phys((void *)PA_AREA5_IO);
prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
- cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot));
+ cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, prot);
if (!cf_ide_base) {
printk("allocate_cf_area : can't open CF I/O window!\n");
return -ENOMEM;
paddrbase = virt_to_phys((void*)PA_AREA5_IO);
psize = PAGE_SIZE;
prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
- cf0_io_base = (u32)ioremap_prot(paddrbase, psize, pgprot_val(prot));
+ cf0_io_base = (u32)ioremap_prot(paddrbase, psize, prot);
if (!cf0_io_base) {
printk(KERN_ERR "%s : can't open CF I/O window!\n" , __func__ );
return -ENOMEM;
/* open I/O area window */
paddrbase = virt_to_phys((void *)PA_AREA5_IO);
prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
- cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot));
+ cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, prot);
if (!cf_ide_base) {
printk("allocate_cf_area : can't open CF I/O window!\n");
return -ENOMEM;
#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_NOCACHE)
#define ioremap_cache(addr, size) \
- ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL))
+ ioremap_prot((addr), (size), PAGE_KERNEL)
#endif /* CONFIG_MMU */
#include <asm-generic/io.h>
#endif /* CONFIG_29BIT */
void __iomem __ref *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot)
+ pgprot_t pgprot)
{
void __iomem *mapped;
- pgprot_t pgprot = __pgprot(prot);
mapped = __ioremap_trapped(phys_addr, size);
if (mapped)
#define ioremap_uc ioremap_uc
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
#define ioremap_cache ioremap_cache
-extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val);
+extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, pgprot_t prot);
#define ioremap_prot ioremap_prot
extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size);
#define ioremap_encrypted ioremap_encrypted
EXPORT_SYMBOL(ioremap_cache);
void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
- unsigned long prot_val)
+ pgprot_t prot)
{
return __ioremap_caller(phys_addr, size,
- pgprot2cachemode(__pgprot(prot_val)),
+ pgprot2cachemode(prot),
__builtin_return_address(0), false);
}
EXPORT_SYMBOL(ioremap_prot);
* I/O memory mapping functions.
*/
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot);
+ pgprot_t prot);
#define ioremap_prot ioremap_prot
#define iounmap iounmap
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
else
return ioremap_prot(offset, size,
- pgprot_val(pgprot_noncached(PAGE_KERNEL)));
+ pgprot_noncached(PAGE_KERNEL));
}
#define ioremap ioremap
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
else
- return ioremap_prot(offset, size, pgprot_val(PAGE_KERNEL));
+ return ioremap_prot(offset, size, PAGE_KERNEL);
}
#define ioremap_cache ioremap_cache
#include <asm/io.h>
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot)
+ pgprot_t prot)
{
unsigned long pfn = __phys_to_pfn((phys_addr));
WARN_ON(pfn_valid(pfn));
- return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
+ return generic_ioremap_prot(phys_addr, size, prot);
}
EXPORT_SYMBOL(ioremap_prot);
pgprot_t prot);
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot);
+ pgprot_t prot);
void iounmap(volatile void __iomem *addr);
void generic_iounmap(volatile void __iomem *addr);
static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
{
/* _PAGE_IOREMAP needs to be supplied by the architecture */
- return ioremap_prot(addr, size, _PAGE_IOREMAP);
+ return ioremap_prot(addr, size, __pgprot(_PAGE_IOREMAP));
}
#endif
#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
#ifndef ioremap_prot
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot)
+ pgprot_t prot)
{
- return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
+ return generic_ioremap_prot(phys_addr, size, prot);
}
EXPORT_SYMBOL(ioremap_prot);
#endif
void *buf, int len, int write)
{
resource_size_t phys_addr;
- unsigned long prot = 0;
+ pgprot_t prot = __pgprot(0);
void __iomem *maddr;
int offset = offset_in_page(addr);
int ret = -EINVAL;
retry:
if (follow_pfnmap_start(&args))
return -EINVAL;
- prot = pgprot_val(args.pgprot);
+ prot = args.pgprot;
phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT;
writable = args.writable;
follow_pfnmap_end(&args);
if (follow_pfnmap_start(&args))
goto out_unmap;
- if ((prot != pgprot_val(args.pgprot)) ||
+ if ((pgprot_val(prot) != pgprot_val(args.pgprot)) ||
(phys_addr != (args.pfn << PAGE_SHIFT)) ||
(writable != args.writable)) {
follow_pfnmap_end(&args);