u64 frame_start, frame_end;
bool crc_pending;
- spin_lock_irq(&out->state_lock);
+ spin_lock_irq(&out->crc_lock);
frame_start = crtc_state->frame_start;
frame_end = crtc_state->frame_end;
crc_pending = crtc_state->crc_pending;
crtc_state->frame_start = 0;
crtc_state->frame_end = 0;
crtc_state->crc_pending = false;
- spin_unlock_irq(&out->state_lock);
+ spin_unlock_irq(&out->crc_lock);
/*
* We raced with the vblank hrtimer and previous work already computed
/* update frame_start only if a queued vkms_crc_work_handle()
* has read the data
*/
- spin_lock(&output->state_lock);
+ spin_lock(&output->crc_lock);
if (!state->crc_pending)
state->frame_start = frame;
else
state->frame_start, frame);
state->frame_end = frame;
state->crc_pending = true;
- spin_unlock(&output->state_lock);
+ spin_unlock(&output->crc_lock);
ret = queue_work(output->crc_workq, &state->crc_work);
if (!ret)
drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
spin_lock_init(&vkms_out->lock);
- spin_lock_init(&vkms_out->state_lock);
+ spin_lock_init(&vkms_out->crc_lock);
vkms_out->crc_workq = alloc_ordered_workqueue("vkms_crc_workq", 0);
if (!vkms_out->crc_workq)
struct drm_crtc_state base;
struct work_struct crc_work;
+ /* below three are protected by vkms_output.crc_lock */
bool crc_pending;
u64 frame_start;
u64 frame_end;
struct workqueue_struct *crc_workq;
/* protects concurrent access to crc_data */
spinlock_t lock;
- /* protects concurrent access to crtc_state */
- spinlock_t state_lock;
+
+ spinlock_t crc_lock;
};
struct vkms_device {