We are traversing the linked list, invalid_list, deleting each entry by
kvm_mmu_free_page().  _safe version is there for such a case.
Reviewed-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
                                    struct list_head *invalid_list)
 {
-       struct kvm_mmu_page *sp;
+       struct kvm_mmu_page *sp, *nsp;
 
        if (list_empty(invalid_list))
                return;
         */
        kvm_flush_remote_tlbs(kvm);
 
-       do {
-               sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
+       list_for_each_entry_safe(sp, nsp, invalid_list, link) {
                WARN_ON(!sp->role.invalid || sp->root_count);
                kvm_mmu_free_page(sp);
-       } while (!list_empty(invalid_list));
+       }
 }
 
 /*