Make svm_copy_vmrun_state()/svm_copy_vmloadsave_state() interface match
'memcpy(dest, src)' to avoid any confusion.
No functional change intended.
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <
20210719090322.625277-1-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
 }
 
 /* Copy state save area fields which are handled by VMRUN */
-void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
-                         struct vmcb_save_area *to_save)
+void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
+                         struct vmcb_save_area *from_save)
 {
        to_save->es = from_save->es;
        to_save->cs = from_save->cs;
        to_save->cpl = 0;
 }
 
-void svm_copy_vmloadsave_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
+void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
 {
        to_vmcb->save.fs = from_vmcb->save.fs;
        to_vmcb->save.gs = from_vmcb->save.gs;
 
        svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
 
-       svm_copy_vmrun_state(save, &svm->vmcb01.ptr->save);
+       svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
        nested_load_control_from_vmcb12(svm, ctl);
 
        svm_switch_vmcb(svm, &svm->nested.vmcb02);
 
        ret = kvm_skip_emulated_instruction(vcpu);
 
        if (vmload) {
-               svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
+               svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
                svm->sysenter_eip_hi = 0;
                svm->sysenter_esp_hi = 0;
        } else {
-               svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
+               svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
        }
 
        kvm_vcpu_unmap(vcpu, &map, true);
 
                BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
 
-               svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
-                                    map_save.hva + 0x400);
+               svm_copy_vmrun_state(map_save.hva + 0x400,
+                                    &svm->vmcb01.ptr->save);
 
                kvm_vcpu_unmap(vcpu, &map_save, true);
        }
                                         &map_save) == -EINVAL)
                                return 1;
 
-                       svm_copy_vmrun_state(map_save.hva + 0x400,
-                                            &svm->vmcb01.ptr->save);
+                       svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
+                                            map_save.hva + 0x400);
 
                        kvm_vcpu_unmap(vcpu, &map_save, true);
                }
 
 void svm_free_nested(struct vcpu_svm *svm);
 int svm_allocate_nested(struct vcpu_svm *svm);
 int nested_svm_vmrun(struct kvm_vcpu *vcpu);
-void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
-                         struct vmcb_save_area *to_save);
-void svm_copy_vmloadsave_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
+void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
+                         struct vmcb_save_area *from_save);
+void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
 int nested_svm_vmexit(struct vcpu_svm *svm);
 
 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)