struct workqueue_struct *wq;
struct work_struct work;
u32 pending;
+ struct mutex mutex;
} super;
#define NVKM_DISP_EVENT_CHAN_AWAKEN BIT(0)
nvkm_gpuobj_del(&disp->inst);
nvkm_event_fini(&disp->uevent);
- if (disp->super.wq)
+
+ if (disp->super.wq) {
destroy_workqueue(disp->super.wq);
+ mutex_destroy(&disp->super.mutex);
+ }
nvkm_event_fini(&disp->vblank);
nvkm_event_fini(&disp->hpd);
return -ENOMEM;
INIT_WORK(&disp->super.work, func->super);
+ mutex_init(&disp->super.mutex);
}
return nvkm_event_init(func->uevent, 1, ARRAY_SIZE(disp->chan), &disp->uevent);
u32 mask[4];
nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super.pending));
+ mutex_lock(&disp->super.mutex);
+
list_for_each_entry(head, &disp->heads, head) {
mask[head->id] = nvkm_rd32(device, 0x6101d4 + (head->id * 0x800));
HEAD_DBG(head, "%08x", mask[head->id]);
list_for_each_entry(head, &disp->heads, head)
nvkm_wr32(device, 0x6101d4 + (head->id * 0x800), 0x00000000);
+
nvkm_wr32(device, 0x6101d0, 0x80000000);
+ mutex_unlock(&disp->super.mutex);
}
void
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_head *head;
- u32 stat = nvkm_rd32(device, 0x6107a8);
- u32 mask[4];
+ u32 stat, mask[4];
+
+ mutex_lock(&disp->super.mutex);
+ stat = nvkm_rd32(device, 0x6107a8);
nvkm_debug(subdev, "supervisor %d: %08x\n", ffs(disp->super.pending), stat);
list_for_each_entry(head, &disp->heads, head) {
list_for_each_entry(head, &disp->heads, head)
nvkm_wr32(device, 0x6107ac + (head->id * 4), 0x00000000);
+
nvkm_wr32(device, 0x6107a8, 0x80000000);
+ mutex_unlock(&disp->super.mutex);
}
static void
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_head *head;
- u32 super = nvkm_rd32(device, 0x610030);
+ u32 super;
+
+ mutex_lock(&disp->super.mutex);
+ super = nvkm_rd32(device, 0x610030);
nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super.pending, super);
}
nvkm_wr32(device, 0x610030, 0x80000000);
+ mutex_unlock(&disp->super.mutex);
}
const struct nvkm_enum