int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
- int r, i;
+ int r;
unsigned long n;
struct kvm_memory_slot *memslot;
- unsigned long is_dirty = 0;
+ int is_dirty = 0;
mutex_lock(&kvm->slots_lock);
goto out;
kvm_ia64_sync_dirty_log(kvm, memslot);
-
- n = kvm_dirty_bitmap_bytes(memslot);
-
- for (i = 0; !is_dirty && i < n/sizeof(long); ++i)
- is_dirty = memslot->dirty_bitmap[i];
+ r = kvm_get_dirty_log(kvm, log, &is_dirty);
+ if (r)
+ goto out;
/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
kvm_flush_remote_tlbs(kvm);
- r = -EFAULT;
- if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
- goto out;
+ n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
- } else {
- r = -EFAULT;
- if (clear_user(log->dirty_bitmap, n))
- goto out;
}
r = 0;
out:
struct kvm_memory_slot *memslot;
struct kvm_vcpu *vcpu;
ulong ga, ga_end;
- unsigned long is_dirty = 0;
- int r, i;
+ int is_dirty = 0;
+ int r;
unsigned long n;
mutex_lock(&kvm->slots_lock);
- r = -EINVAL;
- if (log->slot >= KVM_MEMORY_SLOTS)
- goto out;
-
- memslot = &kvm->memslots->memslots[log->slot];
- r = -ENOENT;
- if (!memslot->dirty_bitmap)
+ r = kvm_get_dirty_log(kvm, log, &is_dirty);
+ if (r)
goto out;
- n = kvm_dirty_bitmap_bytes(memslot);
-
- for (i = 0; !is_dirty && i < n/sizeof(long); ++i)
- is_dirty = memslot->dirty_bitmap[i];
-
/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
+ memslot = &kvm->memslots->memslots[log->slot];
+
ga = memslot->base_gfn << PAGE_SHIFT;
ga_end = ga + (memslot->npages << PAGE_SHIFT);
kvm_for_each_vcpu(n, vcpu, kvm)
kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
- r = -EFAULT;
- if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
- goto out;
-
+ n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
- } else {
- r = -EFAULT;
- if (clear_user(log->dirty_bitmap, n))
- goto out;
}
r = 0;
int kvm_dev_ioctl_check_extension(long ext);
+int kvm_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log, int *is_dirty);
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log);
return kvm_set_memory_region(kvm, mem, user_alloc);
}
+int kvm_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log, int *is_dirty)
+{
+ struct kvm_memory_slot *memslot;
+ int r, i;
+ unsigned long n;
+ unsigned long any = 0;
+
+ r = -EINVAL;
+ if (log->slot >= KVM_MEMORY_SLOTS)
+ goto out;
+
+ memslot = &kvm->memslots->memslots[log->slot];
+ r = -ENOENT;
+ if (!memslot->dirty_bitmap)
+ goto out;
+
+ n = kvm_dirty_bitmap_bytes(memslot);
+
+ for (i = 0; !any && i < n/sizeof(long); ++i)
+ any = memslot->dirty_bitmap[i];
+
+ r = -EFAULT;
+ if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
+ goto out;
+
+ if (any)
+ *is_dirty = 1;
+
+ r = 0;
+out:
+ return r;
+}
+
void kvm_disable_largepages(void)
{
largepages_enabled = false;