goto out_free_pages;
if (force_dma_unencrypted(dev)) {
err = set_memory_decrypted((unsigned long)ret,
- 1 << get_order(size));
+ PFN_UP(size));
if (err)
goto out_free_pages;
}
ret = page_address(page);
if (force_dma_unencrypted(dev)) {
err = set_memory_decrypted((unsigned long)ret,
- 1 << get_order(size));
+ PFN_UP(size));
if (err)
goto out_free_pages;
}
out_encrypt_pages:
if (force_dma_unencrypted(dev)) {
err = set_memory_encrypted((unsigned long)page_address(page),
- 1 << get_order(size));
+ PFN_UP(size));
/* If memory cannot be re-encrypted, it must be leaked */
if (err)
return NULL;
void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{
- unsigned int page_order = get_order(size);
-
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
return;
if (force_dma_unencrypted(dev))
- set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
+ set_memory_encrypted((unsigned long)cpu_addr, PFN_UP(size));
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
vunmap(cpu_addr);
ret = page_address(page);
if (force_dma_unencrypted(dev)) {
- if (set_memory_decrypted((unsigned long)ret,
- 1 << get_order(size)))
+ if (set_memory_decrypted((unsigned long)ret, PFN_UP(size)))
goto out_free_pages;
}
memset(ret, 0, size);
struct page *page, dma_addr_t dma_addr,
enum dma_data_direction dir)
{
- unsigned int page_order = get_order(size);
void *vaddr = page_address(page);
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
return;
if (force_dma_unencrypted(dev))
- set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
+ set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
dma_free_contiguous(dev, page, size);
}