ret = vpe_init_microcode(vpe);
if (ret)
goto out;
+
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->vpe.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vpe.ring);
+ ret = amdgpu_vpe_sysfs_reset_mask_init(adev);
+ if (ret)
+ goto out;
out:
return ret;
}
release_firmware(vpe->fw);
vpe->fw = NULL;
+ amdgpu_vpe_sysfs_reset_mask_fini(adev);
vpe_ring_fini(vpe);
amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj,
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
+static ssize_t amdgpu_get_vpe_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->vpe.supported_reset);
+}
+
+static DEVICE_ATTR(vpe_reset_mask, 0444,
+ amdgpu_get_vpe_reset_mask, NULL);
+
+int amdgpu_vpe_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->vpe.num_instances) {
+ r = device_create_file(adev->dev, &dev_attr_vpe_reset_mask);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+void amdgpu_vpe_sysfs_reset_mask_fini(struct amdgpu_device *adev)
+{
+ if (adev->vpe.num_instances)
+ device_remove_file(adev->dev, &dev_attr_vpe_reset_mask);
+}
+
static const struct amdgpu_ring_funcs vpe_ring_funcs = {
.type = AMDGPU_RING_TYPE_VPE,
.align_mask = 0xf,
uint32_t num_instances;
bool collaborate_mode;
+ uint32_t supported_reset;
};
int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev);
int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe);
int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe);
int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe);
+void amdgpu_vpe_sysfs_reset_mask_fini(struct amdgpu_device *adev);
+int amdgpu_vpe_sysfs_reset_mask_init(struct amdgpu_device *adev);
#define vpe_ring_init(vpe) ((vpe)->funcs->ring_init ? (vpe)->funcs->ring_init((vpe)) : 0)
#define vpe_ring_start(vpe) ((vpe)->funcs->ring_start ? (vpe)->funcs->ring_start((vpe)) : 0)