KVM_X86_OP_NULL(request_immediate_exit)
 KVM_X86_OP(sched_in)
 KVM_X86_OP_NULL(update_cpu_dirty_logging)
-KVM_X86_OP_NULL(pre_block)
-KVM_X86_OP_NULL(post_block)
 KVM_X86_OP_NULL(vcpu_blocking)
 KVM_X86_OP_NULL(vcpu_unblocking)
 KVM_X86_OP_NULL(update_pi_irte)
 
        const struct kvm_pmu_ops *pmu_ops;
        const struct kvm_x86_nested_ops *nested_ops;
 
-       /*
-        * Architecture specific hooks for vCPU blocking due to
-        * HLT instruction.
-        * Returns for .pre_block():
-        *    - 0 means continue to block the vCPU.
-        *    - 1 means we cannot block the vCPU since some event
-        *        happens during this period, such as, 'ON' bit in
-        *        posted-interrupts descriptor is set.
-        */
-       int (*pre_block)(struct kvm_vcpu *vcpu);
-       void (*post_block)(struct kvm_vcpu *vcpu);
-
        void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
        void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
 
 
                secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML);
 }
 
-static int vmx_pre_block(struct kvm_vcpu *vcpu)
-{
-       return 0;
-}
-
-static void vmx_post_block(struct kvm_vcpu *vcpu)
-{
-
-}
-
 static void vmx_setup_mce(struct kvm_vcpu *vcpu)
 {
        if (vcpu->arch.mcg_cap & MCG_LMCE_P)
        .cpu_dirty_log_size = PML_ENTITY_NUM,
        .update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
 
-       .pre_block = vmx_pre_block,
-       .post_block = vmx_post_block,
-
        .pmu_ops = &intel_pmu_ops,
        .nested_ops = &vmx_nested_ops,
 
 
 {
        bool hv_timer;
 
-       if (!kvm_arch_vcpu_runnable(vcpu) &&
-           (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) {
+       if (!kvm_arch_vcpu_runnable(vcpu)) {
                /*
                 * Switch to the software timer before halt-polling/blocking as
                 * the guest's timer may be a break event for the vCPU, and the
                if (hv_timer)
                        kvm_lapic_switch_to_hv_timer(vcpu);
 
-               if (kvm_x86_ops.post_block)
-                       static_call(kvm_x86_post_block)(vcpu);
-
                if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
                        return 1;
        }