u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
 
 extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
-                                          struct kvm_vcpu *vcpu);
+                                          struct kvm_vcpu *vcpu,
+                                          bool write_fault);
 
 extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
                                              struct kvm_vcpu *vcpu);
 
 extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
                                                struct kvm_mips_tlb *tlb,
-                                               unsigned long gva);
+                                               unsigned long gva,
+                                               bool write_fault);
 
 extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
                                                     u32 *opc,
                                                     struct kvm_run *run,
-                                                    struct kvm_vcpu *vcpu);
+                                                    struct kvm_vcpu *vcpu,
+                                                    bool write_fault);
 
 extern enum emulation_result kvm_mips_handle_tlbmod(u32 cause,
                                                    u32 *opc,
 
 enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
                                              u32 *opc,
                                              struct kvm_run *run,
-                                             struct kvm_vcpu *vcpu)
+                                             struct kvm_vcpu *vcpu,
+                                             bool write_fault)
 {
        enum emulation_result er = EMULATE_DONE;
        u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
                         * OK we have a Guest TLB entry, now inject it into the
                         * shadow host TLB
                         */
-                       if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
-                                                                va)) {
+                       if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va,
+                                                                write_fault)) {
                                kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
                                        __func__, va, index, vcpu,
                                        read_c0_entryhi());
 
  * kvm_mips_map_page() - Map a guest physical page.
  * @vcpu:              VCPU pointer.
  * @gpa:               Guest physical address of fault.
+ * @write_fault:       Whether the fault was due to a write.
  * @out_entry:         New PTE for @gpa (written on success unless NULL).
  * @out_buddy:         New PTE for @gpa's buddy (written on success unless
  *                     NULL).
  *             as an MMIO access.
  */
 static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
+                            bool write_fault,
                             pte_t *out_entry, pte_t *out_buddy)
 {
        struct kvm *kvm = vcpu->kvm;
 
 /* XXXKYMA: Must be called with interrupts disabled */
 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
-                                   struct kvm_vcpu *vcpu)
+                                   struct kvm_vcpu *vcpu,
+                                   bool write_fault)
 {
        unsigned long gpa;
        kvm_pfn_t pfn0, pfn1;
        gpa = KVM_GUEST_CPHYSADDR(badvaddr & (PAGE_MASK << 1));
        vaddr = badvaddr & (PAGE_MASK << 1);
 
-       if (kvm_mips_map_page(vcpu, gpa, &pte_gpa[0], NULL) < 0)
+       if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[0], NULL) < 0)
                return -1;
 
-       if (kvm_mips_map_page(vcpu, gpa | PAGE_SIZE, &pte_gpa[1], NULL) < 0)
+       if (kvm_mips_map_page(vcpu, gpa | PAGE_SIZE, write_fault, &pte_gpa[1],
+                             NULL) < 0)
                return -1;
 
        pfn0 = pte_pfn(pte_gpa[0]);
 
 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
                                         struct kvm_mips_tlb *tlb,
-                                        unsigned long gva)
+                                        unsigned long gva,
+                                        bool write_fault)
 {
        kvm_pfn_t pfn;
        long tlb_lo = 0;
                tlb_lo = tlb->tlb_lo[idx];
 
        /* Find host PFN */
-       if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo), &pte_gpa,
-                             NULL) < 0)
+       if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo), write_fault,
+                             &pte_gpa, NULL) < 0)
                return -1;
        pfn = pte_pfn(pte_gpa);
 
        int index;
 
        if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) {
-               if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu) < 0)
+               if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0)
                        return KVM_MIPS_GPA;
        } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) ||
                   KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) {
                if (write && !TLB_IS_DIRTY(*tlb, gva))
                        return KVM_MIPS_TLBMOD;
 
-               if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva))
+               if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write))
                        return KVM_MIPS_GPA;
        } else {
                return KVM_MIPS_GVA;
 
                 *     into the shadow host TLB
                 */
 
-               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
                if (er == EMULATE_DONE)
                        ret = RESUME_GUEST;
                else {
                 * not expect to ever get them
                 */
                if (kvm_mips_handle_kseg0_tlb_fault
-                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+                   (vcpu->arch.host_cp0_badvaddr, vcpu, store) < 0) {
                        run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                        ret = RESUME_HOST;
                }