Unlike normal 'int' functions returning '0' on success, kvm_setup_async_pf()/
kvm_arch_setup_async_pf() return '1' when a job to handle page fault
asynchronously was scheduled and '0' otherwise. To avoid the confusion
change return type to 'bool'.
No functional change intended.
Suggested-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <
20200615121334.91300-1-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
        return true;
 }
 
-static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
+static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
 {
        hva_t hva;
        struct kvm_arch_async_pf arch;
-       int rc;
 
        if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
-               return 0;
+               return false;
        if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
            vcpu->arch.pfault_compare)
-               return 0;
+               return false;
        if (psw_extint_disabled(vcpu))
-               return 0;
+               return false;
        if (kvm_s390_vcpu_has_irq(vcpu, 0))
-               return 0;
+               return false;
        if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
-               return 0;
+               return false;
        if (!vcpu->arch.gmap->pfault_enabled)
-               return 0;
+               return false;
 
        hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
        hva += current->thread.gmap_addr & ~PAGE_MASK;
        if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
-               return 0;
+               return false;
 
-       rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
-       return rc;
+       return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
 }
 
 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
 
        walk_shadow_page_lockless_end(vcpu);
 }
 
-static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
-                                  gfn_t gfn)
+static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+                                   gfn_t gfn)
 {
        struct kvm_arch_async_pf arch;
 
 
 
 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
-                      unsigned long hva, struct kvm_arch_async_pf *arch);
+bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+                       unsigned long hva, struct kvm_arch_async_pf *arch);
 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 #endif
 
 
        }
 }
 
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
-                      unsigned long hva, struct kvm_arch_async_pf *arch)
+/*
+ * Try to schedule a job to handle page fault asynchronously. Returns 'true' on
+ * success, 'false' on failure (page fault has to be handled synchronously).
+ */
+bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+                       unsigned long hva, struct kvm_arch_async_pf *arch)
 {
        struct kvm_async_pf *work;
 
        if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
-               return 0;
+               return false;
 
        /* Arch specific code should not do async PF in this case */
        if (unlikely(kvm_is_error_hva(hva)))
-               return 0;
+               return false;
 
        /*
         * do alloc nowait since if we are going to sleep anyway we
         */
        work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
        if (!work)
-               return 0;
+               return false;
 
        work->wakeup_all = false;
        work->vcpu = vcpu;
 
        schedule_work(&work->work);
 
-       return 1;
+       return true;
 }
 
 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)