if ((le16_to_cpu(reply_desc->reply_flags) &
                    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
                        break;
+#ifndef CONFIG_PREEMPT_RT
                /*
                 * Exit completion loop to avoid CPU lockup
                 * Ensure remaining completion happens from threaded ISR.
                        op_reply_q->enable_irq_poll = true;
                        break;
                }
-
+#endif
        } while (1);
 
        writel(reply_ci,
                return IRQ_NONE;
 }
 
+#ifndef CONFIG_PREEMPT_RT
+
 static irqreturn_t mpi3mr_isr(int irq, void *privdata)
 {
        struct mpi3mr_intr_info *intr_info = privdata;
        return IRQ_HANDLED;
 }
 
+#endif
+
 /**
  * mpi3mr_request_irq - Request IRQ and register ISR
  * @mrioc: Adapter instance reference
        snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
            mrioc->driver_name, mrioc->id, index);
 
+#ifndef CONFIG_PREEMPT_RT
        retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
            mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
+#else
+       retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
+           NULL, IRQF_SHARED, intr_info->name, intr_info);
+#endif
        if (retval) {
                ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
                    intr_info->name, pci_irq_vector(pdev, index));
                pi = 0;
        op_req_q->pi = pi;
 
+#ifndef CONFIG_PREEMPT_RT
        if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
            > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
                mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
+#else
+       atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
+#endif
 
        writel(op_req_q->pi,
            &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);