There's no need for this to be an int, it holds a boolean.
Move to the end of the struct for alignment.
Reviewed-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
                                        kvm_mem.guest_phys_addr;
                kvm_userspace_mem.memory_size = kvm_mem.memory_size;
                r = kvm_vm_ioctl_set_memory_region(kvm,
-                                       &kvm_userspace_mem, 0);
+                                       &kvm_userspace_mem, false);
                if (r)
                        goto out;
                break;
                struct kvm_memory_slot *memslot,
                struct kvm_memory_slot old,
                struct kvm_userspace_memory_region *mem,
-               int user_alloc)
+               bool user_alloc)
 {
        unsigned long i;
        unsigned long pfn;
 void kvm_arch_commit_memory_region(struct kvm *kvm,
                struct kvm_userspace_memory_region *mem,
                struct kvm_memory_slot old,
-               int user_alloc)
+               bool user_alloc)
 {
        return;
 }
 
                                    struct kvm_memory_slot *memslot,
                                    struct kvm_memory_slot old,
                                    struct kvm_userspace_memory_region *mem,
-                                   int user_alloc)
+                                   bool user_alloc)
 {
        return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
 }
 void kvm_arch_commit_memory_region(struct kvm *kvm,
                struct kvm_userspace_memory_region *mem,
                struct kvm_memory_slot old,
-               int user_alloc)
+               bool user_alloc)
 {
        kvmppc_core_commit_memory_region(kvm, mem, old);
 }
 
                                   struct kvm_memory_slot *memslot,
                                   struct kvm_memory_slot old,
                                   struct kvm_userspace_memory_region *mem,
-                                  int user_alloc)
+                                  bool user_alloc)
 {
        /* A few sanity checks. We can have exactly one memory slot which has
           to start at guest virtual zero and which has to be located at a
 void kvm_arch_commit_memory_region(struct kvm *kvm,
                                struct kvm_userspace_memory_region *mem,
                                struct kvm_memory_slot old,
-                               int user_alloc)
+                               bool user_alloc)
 {
        int rc;
 
 
        kvm_userspace_mem.flags = 0;
        kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
        kvm_userspace_mem.memory_size = PAGE_SIZE;
-       r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
+       r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false);
        if (r)
                goto out;
 
        kvm_userspace_mem.guest_phys_addr =
                kvm->arch.ept_identity_map_addr;
        kvm_userspace_mem.memory_size = PAGE_SIZE;
-       r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
+       r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false);
        if (r)
                goto out;
 
                .flags = 0,
        };
 
-       ret = kvm_set_memory_region(kvm, &tss_mem, 0);
+       ret = kvm_set_memory_region(kvm, &tss_mem, false);
        if (ret)
                return ret;
        kvm->arch.tss_addr = addr;
 
                                struct kvm_memory_slot *memslot,
                                struct kvm_memory_slot old,
                                struct kvm_userspace_memory_region *mem,
-                               int user_alloc)
+                               bool user_alloc)
 {
        int npages = memslot->npages;
        int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
 void kvm_arch_commit_memory_region(struct kvm *kvm,
                                struct kvm_userspace_memory_region *mem,
                                struct kvm_memory_slot old,
-                               int user_alloc)
+                               bool user_alloc)
 {
 
        int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
 
        unsigned long *dirty_bitmap;
        struct kvm_arch_memory_slot arch;
        unsigned long userspace_addr;
-       int user_alloc;
        int id;
+       bool user_alloc;
 };
 
 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
 
 int kvm_set_memory_region(struct kvm *kvm,
                          struct kvm_userspace_memory_region *mem,
-                         int user_alloc);
+                         bool user_alloc);
 int __kvm_set_memory_region(struct kvm *kvm,
                            struct kvm_userspace_memory_region *mem,
-                           int user_alloc);
+                           bool user_alloc);
 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
                           struct kvm_memory_slot *dont);
 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
                                struct kvm_memory_slot *memslot,
                                struct kvm_memory_slot old,
                                struct kvm_userspace_memory_region *mem,
-                               int user_alloc);
+                               bool user_alloc);
 void kvm_arch_commit_memory_region(struct kvm *kvm,
                                struct kvm_userspace_memory_region *mem,
                                struct kvm_memory_slot old,
-                               int user_alloc);
+                               bool user_alloc);
 bool kvm_largepages_enabled(void);
 void kvm_disable_largepages(void);
 /* flush all memory translations */
 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
                                   struct
                                   kvm_userspace_memory_region *mem,
-                                  int user_alloc);
+                                  bool user_alloc);
 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
 long kvm_arch_vm_ioctl(struct file *filp,
                       unsigned int ioctl, unsigned long arg);
 
  */
 int __kvm_set_memory_region(struct kvm *kvm,
                            struct kvm_userspace_memory_region *mem,
-                           int user_alloc)
+                           bool user_alloc)
 {
        int r;
        gfn_t base_gfn;
 
 int kvm_set_memory_region(struct kvm *kvm,
                          struct kvm_userspace_memory_region *mem,
-                         int user_alloc)
+                         bool user_alloc)
 {
        int r;
 
 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
                                   struct
                                   kvm_userspace_memory_region *mem,
-                                  int user_alloc)
+                                  bool user_alloc)
 {
        if (mem->slot >= KVM_USER_MEM_SLOTS)
                return -EINVAL;
                                                sizeof kvm_userspace_mem))
                        goto out;
 
-               r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
+               r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, true);
                break;
        }
        case KVM_GET_DIRTY_LOG: {