return psp_execute_ip_fw_load(&adev->psp, &ucode);
}
+
+static ssize_t amdgpu_get_vcn_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->vcn.supported_reset);
+}
+
+static DEVICE_ATTR(vcn_reset_mask, 0444,
+ amdgpu_get_vcn_reset_mask, NULL);
+
+int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->vcn.num_vcn_inst) {
+ r = device_create_file(adev->dev, &dev_attr_vcn_reset_mask);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev)
+{
+ if (adev->vcn.num_vcn_inst)
+ device_remove_file(adev->dev, &dev_attr_vcn_reset_mask);
+}
/* IP reg dump */
uint32_t *ip_dump;
+
+ uint32_t supported_reset;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
enum AMDGPU_UCODE_ID ucode_id);
int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev);
+int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev);
+void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev);
#endif
vcn_v4_0_fw_shared_init(adev, i);
}
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
adev->vcn.ip_dump = ptr;
}
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
if (r)
return r;
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
r = amdgpu_vcn_sw_fini(adev);
kfree(adev->vcn.ip_dump);
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
}
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
adev->vcn.ip_dump = ptr;
}
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
if (r)
return r;
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
r = amdgpu_vcn_sw_fini(adev);
kfree(adev->vcn.ip_dump);
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
}
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
} else {
adev->vcn.ip_dump = ptr;
}
+
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
if (r)
return r;
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
r = amdgpu_vcn_sw_fini(adev);
kfree(adev->vcn.ip_dump);