static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
{
iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 1);
--- return iommu->cmd_sem ? 0 : -ENOMEM;
+++++ if (!iommu->cmd_sem)
+++++ return -ENOMEM;
+++++ iommu->cmd_sem_paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
+++++ return 0;
+++++ }
+++++
+++++ static int __init remap_event_buffer(struct amd_iommu *iommu)
+++++ {
+++++ u64 paddr;
+++++
+++++ pr_info_once("Re-using event buffer from the previous kernel\n");
+++++ paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK;
+++++ iommu->evt_buf = iommu_memremap(paddr, EVT_BUFFER_SIZE);
+++++
+++++ return iommu->evt_buf ? 0 : -ENOMEM;
+++++ }
+++++
+++++ static int __init remap_command_buffer(struct amd_iommu *iommu)
+++++ {
+++++ u64 paddr;
+++++
+++++ pr_info_once("Re-using command buffer from the previous kernel\n");
+++++ paddr = readq(iommu->mmio_base + MMIO_CMD_BUF_OFFSET) & PM_ADDR_MASK;
+++++ iommu->cmd_buf = iommu_memremap(paddr, CMD_BUFFER_SIZE);
+++++
+++++ return iommu->cmd_buf ? 0 : -ENOMEM;
+++++ }
+++++
+++++ static int __init remap_or_alloc_cwwb_sem(struct amd_iommu *iommu)
+++++ {
+++++ u64 paddr;
+++++
+++++ if (check_feature(FEATURE_SNP)) {
+++++ /*
+++++ * When SNP is enabled, the exclusion base register is used for the
+++++ * completion wait buffer (CWB) address. Read and re-use it.
+++++ */
+++++ pr_info_once("Re-using CWB buffers from the previous kernel\n");
+++++ paddr = readq(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET) & PM_ADDR_MASK;
+++++ iommu->cmd_sem = iommu_memremap(paddr, PAGE_SIZE);
+++++ if (!iommu->cmd_sem)
+++++ return -ENOMEM;
+++++ iommu->cmd_sem_paddr = paddr;
+++++ } else {
+++++ return alloc_cwwb_sem(iommu);
+++++ }
+ +
- return iommu->cmd_sem ? 0 : -ENOMEM;
+++++ return 0;
+++++ }
+++++
+++++ static int __init alloc_iommu_buffers(struct amd_iommu *iommu)
+++++ {
+++++ int ret;
+++++
+++++ /*
+++++ * Reuse/Remap the previous kernel's allocated completion wait
+++++ * command and event buffers for kdump boot.
+++++ */
+++++ if (is_kdump_kernel()) {
+++++ ret = remap_or_alloc_cwwb_sem(iommu);
+++++ if (ret)
+++++ return ret;
+++++
+++++ ret = remap_command_buffer(iommu);
+++++ if (ret)
+++++ return ret;
++++
- return iommu->cmd_sem ? 0 : -ENOMEM;
+++++ ret = remap_event_buffer(iommu);
+++++ if (ret)
+++++ return ret;
+++++ } else {
+++++ ret = alloc_cwwb_sem(iommu);
+++++ if (ret)
+++++ return ret;
++++
+++++ ret = alloc_command_buffer(iommu);
+++++ if (ret)
+++++ return ret;
+++++
+++++ ret = alloc_event_buffer(iommu);
+++++ if (ret)
+++++ return ret;
+++++ }
+++++
+++++ return 0;
}
static void __init free_cwwb_sem(struct amd_iommu *iommu)
pr_err("The address of old device table is above 4G, not trustworthy!\n");
return false;
}
----- old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
----- ? (__force void *)ioremap_encrypted(old_devtb_phys,
----- pci_seg->dev_table_size)
----- : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB);
- -
- - if (!old_devtb)
- - return false;
--- if (!old_devtb)
--- return false;
---
----- pci_seg->old_dev_tbl_cpy = iommu_alloc_pages_sz(
----- GFP_KERNEL | GFP_DMA32, pci_seg->dev_table_size);
+++++ /*
+++++ * Re-use the previous kernel's device table for kdump.
+++++ */
+++++ pci_seg->old_dev_tbl_cpy = iommu_memremap(old_devtb_phys, pci_seg->dev_table_size);
if (pci_seg->old_dev_tbl_cpy == NULL) {
----- pr_err("Failed to allocate memory for copying old device table!\n");
----- memunmap(old_devtb);
+++++ pr_err("Failed to remap memory for reusing old device table!\n");
return false;
}