return pt;
}
-static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
- phys_addr_t dummy, size_t size)
+static void l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t dummy, size_t size)
{
unsigned int l2_entries;
unsigned int l2_idx;
WARN_ON_ONCE(size);
spin_unlock_irqrestore(&mmu_info->lock, flags);
- return l2_entries << ISP_PAGE_SHIFT;
}
static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
return l2_map(mmu_info, iova_start, paddr, size);
}
-static size_t __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info,
- unsigned long iova, size_t size)
+static void __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info,
+ unsigned long iova, size_t size)
{
- return l2_unmap(mmu_info, iova, 0, size);
+ l2_unmap(mmu_info, iova, 0, size);
}
static int allocate_trash_buffer(struct ipu6_mmu *mmu)
return phy_addr;
}
-size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
- size_t size)
+void ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ size_t size)
{
unsigned int min_pagesz;
if (!IS_ALIGNED(iova | size, min_pagesz)) {
dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
iova, size, min_pagesz);
- return 0;
+ return;
}
- return __ipu6_mmu_unmap(mmu_info, iova, size);
+ __ipu6_mmu_unmap(mmu_info, iova, size);
}
int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu);
int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
phys_addr_t paddr, size_t size);
-size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
- size_t size);
+void ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ size_t size);
phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info,
dma_addr_t iova);
#endif