struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
        struct kvmppc_44x_tlbe *tlbe;
        unsigned int gtlb_index;
+       int idx;
 
        gtlb_index = kvmppc_get_gpr(vcpu, ra);
        if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
                return EMULATE_FAIL;
        }
 
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
+
        if (tlbe_is_host_safe(vcpu, tlbe)) {
                gva_t eaddr;
                gpa_t gpaddr;
                kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
        }
 
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
        trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
                             tlbe->word2);
 
 
 {
        int r = RESUME_HOST;
        int s;
+       int idx;
 
        /* update before a new last_exit_type is rewritten */
        kvmppc_update_timing_stats(vcpu);
                        break;
                }
 
+               idx = srcu_read_lock(&vcpu->kvm->srcu);
+
                gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
                gfn = gpaddr >> PAGE_SHIFT;
 
                        kvmppc_account_exit(vcpu, MMIO_EXITS);
                }
 
+               srcu_read_unlock(&vcpu->kvm->srcu, idx);
                break;
        }
 
 
                kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
 
+               idx = srcu_read_lock(&vcpu->kvm->srcu);
+
                gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
                gfn = gpaddr >> PAGE_SHIFT;
 
                        kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
                }
 
+               srcu_read_unlock(&vcpu->kvm->srcu, idx);
                break;
        }
 
 
        struct kvm_book3e_206_tlb_entry *gtlbe;
        int tlbsel, esel;
        int recal = 0;
+       int idx;
 
        tlbsel = get_tlb_tlbsel(vcpu);
        esel = get_tlb_esel(vcpu, tlbsel);
                        kvmppc_set_tlb1map_range(vcpu, gtlbe);
        }
 
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
+
        /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
        if (tlbe_is_host_safe(vcpu, gtlbe)) {
                u64 eaddr = get_tlb_eaddr(gtlbe);
                kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
        }
 
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
        kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
        return EMULATE_DONE;
 }