DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
        DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
 
-       /* enable PCIE atomic ops */
-       r = pci_enable_atomic_ops_to_root(adev->pdev,
-                                         PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
-                                         PCI_EXP_DEVCAP2_ATOMIC_COMP64);
-       if (r) {
-               adev->have_atomics_support = false;
-               DRM_INFO("PCIE atomic ops is not supported\n");
-       } else {
-               adev->have_atomics_support = true;
-       }
-
        amdgpu_device_get_pcie_info(adev);
 
        if (amdgpu_mcbp)
        if (r)
                return r;
 
+       /* enable PCIE atomic ops */
+       if (amdgpu_sriov_vf(adev))
+               adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
+                       adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags ==
+                       (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+       else
+               adev->have_atomics_support =
+                       !pci_enable_atomic_ops_to_root(adev->pdev,
+                                         PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+                                         PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+       if (!adev->have_atomics_support)
+               dev_info(adev->dev, "PCIE atomic ops is not supported\n");
+
        /* doorbell bar mapping and doorbell index init*/
        amdgpu_device_doorbell_init(adev);
 
 
        } mm_bw_management[AMD_SRIOV_MSG_RESERVE_VCN_INST];
        /* UUID info */
        struct amd_sriov_msg_uuid_info uuid_info;
+       /* pcie atomic Ops info */
+       uint32_t pcie_atomic_ops_enabled_flags;
        /* reserved */
-       uint32_t reserved[256 - 47];
+       uint32_t reserved[256 - 48];
 };
 
 struct amd_sriov_msg_vf2pf_info_header {