etnaviv_core_dump_header(iter, type, iter->data + size);
 }
 
-void etnaviv_core_dump(struct etnaviv_gpu *gpu)
+void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
 {
+       struct etnaviv_gpu *gpu = submit->gpu;
        struct core_dump_iterator iter;
-       struct etnaviv_vram_mapping *vram;
        struct etnaviv_gem_object *obj;
-       struct etnaviv_gem_submit *submit;
-       struct drm_sched_job *s_job;
        unsigned int n_obj, n_bomap_pages;
        size_t file_size, mmu_size;
        __le64 *bomap, *bomap_start;
+       int i;
 
        /* Only catch the first event, or when manually re-armed */
        if (!etnaviv_dump_core)
 
        mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
 
-       /* We always dump registers, mmu, ring and end marker */
-       n_obj = 4;
+       /* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
+       n_obj = 5;
        n_bomap_pages = 0;
        file_size = ARRAY_SIZE(etnaviv_dump_registers) *
                        sizeof(struct etnaviv_dump_registers) +
-                   mmu_size + gpu->buffer.size;
-
-       /* Add in the active command buffers */
-       list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
-               submit = to_etnaviv_submit(s_job);
-               file_size += submit->cmdbuf.size;
-               n_obj++;
-       }
+                   mmu_size + gpu->buffer.size + submit->cmdbuf.size;
 
        /* Add in the active buffer objects */
-       list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
-               if (!vram->use)
-                       continue;
-
-               obj = vram->object;
+       for (i = 0; i < submit->nr_bos; i++) {
+               obj = submit->bos[i].obj;
                file_size += obj->base.size;
                n_bomap_pages += obj->base.size >> PAGE_SHIFT;
                n_obj++;
                              gpu->buffer.size,
                              etnaviv_cmdbuf_get_va(&gpu->buffer));
 
-       list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
-               submit = to_etnaviv_submit(s_job);
-               etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
-                                     submit->cmdbuf.vaddr, submit->cmdbuf.size,
-                                     etnaviv_cmdbuf_get_va(&submit->cmdbuf));
-       }
+       etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
+                             submit->cmdbuf.vaddr, submit->cmdbuf.size,
+                             etnaviv_cmdbuf_get_va(&submit->cmdbuf));
 
        /* Reserve space for the bomap */
        if (n_bomap_pages) {
                bomap_start = bomap = NULL;
        }
 
-       list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
+       for (i = 0; i < submit->nr_bos; i++) {
+               struct etnaviv_vram_mapping *vram;
                struct page **pages;
                void *vaddr;
 
-               if (vram->use == 0)
-                       continue;
-
-               obj = vram->object;
+               obj = submit->bos[i].obj;
+               vram = submit->bos[i].mapping;
 
                mutex_lock(&obj->lock);
                pages = etnaviv_gem_get_pages(obj);