#include <linux/iopoll.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
+#include <linux/ktime.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/pinctrl/consumer.h>
  * @dma_buf_size: DMA buffer size
  * @is_curr_dma_xfer: indicates active DMA transfer
  * @dma_complete: DMA completion notifier
+ * @is_curr_atomic_xfer: indicates active atomic transfer
  */
 struct tegra_i2c_dev {
        struct device *dev;
        unsigned int dma_buf_size;
        bool is_curr_dma_xfer;
        struct completion dma_complete;
+       bool is_curr_atomic_xfer;
 };
 
 static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
                reg_offset = tegra_i2c_reg_addr(i2c_dev, I2C_CONFIG_LOAD);
                addr = i2c_dev->base + reg_offset;
                i2c_writel(i2c_dev, I2C_MSTR_CONFIG_LOAD, I2C_CONFIG_LOAD);
-               if (in_interrupt())
+
+               if (i2c_dev->is_curr_atomic_xfer)
                        err = readl_poll_timeout_atomic(addr, val, val == 0,
                                                        1000,
                                                        I2C_CONFIG_LOAD_TIMEOUT);
        i2c_writel(i2c_dev, val, reg);
 }
 
+static unsigned long
+tegra_i2c_poll_completion_timeout(struct tegra_i2c_dev *i2c_dev,
+                                 struct completion *complete,
+                                 unsigned int timeout_ms)
+{
+       ktime_t ktime = ktime_get();
+       ktime_t ktimeout = ktime_add_ms(ktime, timeout_ms);
+
+       do {
+               u32 status = i2c_readl(i2c_dev, I2C_INT_STATUS);
+
+               if (status) {
+                       tegra_i2c_isr(i2c_dev->irq, i2c_dev);
+
+                       if (completion_done(complete)) {
+                               s64 delta = ktime_ms_delta(ktimeout, ktime);
+
+                               return msecs_to_jiffies(delta) ?: 1;
+                       }
+               }
+
+               ktime = ktime_get();
+
+       } while (ktime_before(ktime, ktimeout));
+
+       return 0;
+}
+
 static unsigned long
 tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev,
                                  struct completion *complete,
 {
        unsigned long ret;
 
-       enable_irq(i2c_dev->irq);
-       ret = wait_for_completion_timeout(complete,
-                                         msecs_to_jiffies(timeout_ms));
-       disable_irq(i2c_dev->irq);
+       if (i2c_dev->is_curr_atomic_xfer) {
+               ret = tegra_i2c_poll_completion_timeout(i2c_dev, complete,
+                                                       timeout_ms);
+       } else {
+               enable_irq(i2c_dev->irq);
+               ret = wait_for_completion_timeout(complete,
+                                                 msecs_to_jiffies(timeout_ms));
+               disable_irq(i2c_dev->irq);
 
-       /*
-        * There is a chance that completion may happen after IRQ
-        * synchronization, which is done by disable_irq().
-        */
-       if (ret == 0 && completion_done(complete)) {
-               dev_warn(i2c_dev->dev, "completion done after timeout\n");
-               ret = 1;
+               /*
+                * There is a chance that completion may happen after IRQ
+                * synchronization, which is done by disable_irq().
+                */
+               if (ret == 0 && completion_done(complete)) {
+                       dev_warn(i2c_dev->dev,
+                                "completion done after timeout\n");
+                       ret = 1;
+               }
        }
 
        return ret;
 
        xfer_size = ALIGN(xfer_size, BYTES_PER_FIFO_WORD);
        i2c_dev->is_curr_dma_xfer = (xfer_size > I2C_PIO_MODE_MAX_LEN) &&
-                                   i2c_dev->dma_buf;
+                                   i2c_dev->dma_buf &&
+                                   !i2c_dev->is_curr_atomic_xfer;
        tegra_i2c_config_fifo_trig(i2c_dev, xfer_size);
        dma = i2c_dev->is_curr_dma_xfer;
        /*
        return ret ?: i;
 }
 
+static int tegra_i2c_xfer_atomic(struct i2c_adapter *adap,
+                                struct i2c_msg msgs[], int num)
+{
+       struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+       int ret;
+
+       i2c_dev->is_curr_atomic_xfer = true;
+       ret = tegra_i2c_xfer(adap, msgs, num);
+       i2c_dev->is_curr_atomic_xfer = false;
+
+       return ret;
+}
+
 static u32 tegra_i2c_func(struct i2c_adapter *adap)
 {
        struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
 }
 
 static const struct i2c_algorithm tegra_i2c_algo = {
-       .master_xfer    = tegra_i2c_xfer,
-       .functionality  = tegra_i2c_func,
+       .master_xfer            = tegra_i2c_xfer,
+       .master_xfer_atomic     = tegra_i2c_xfer_atomic,
+       .functionality          = tegra_i2c_func,
 };
 
 /* payload size is only 12 bit */
                goto unprepare_fast_clk;
        }
 
+       pm_runtime_irq_safe(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
        if (!pm_runtime_enabled(&pdev->dev)) {
                ret = tegra_i2c_runtime_resume(&pdev->dev);