]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
bus: mhi: ep: Fix chained transfer handling in read path
authorSumit Kumar <sumit.kumar@oss.qualcomm.com>
Wed, 10 Sep 2025 12:41:09 +0000 (18:11 +0530)
committerManivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
Fri, 12 Sep 2025 10:38:41 +0000 (16:08 +0530)
The mhi_ep_read_channel function incorrectly assumes the End of Transfer
(EOT) bit is present for each packet in a chained transactions, causing
it to advance mhi_chan->rd_offset beyond wr_offset during host-to-device
transfers when EOT has not yet arrived. This leads to access of unmapped
host memory, causing IOMMU faults and processing of stale TREs.

Modify the loop condition to ensure mhi_queue is not empty, allowing the
function to process only valid TREs up to the current write pointer to
prevent premature reads and ensure safe traversal of chained TREs.

Due to this change, buf_left needs to be removed from the while loop
condition to avoid exiting prematurely before reading the ring completely,
and also remove write_offset since it will always be zero because the new
cache buffer is allocated every time.

Fixes: 5301258899773 ("bus: mhi: ep: Add support for reading from the host")
Co-developed-by: Akhil Vinod <akhil.vinod@oss.qualcomm.com>
Signed-off-by: Akhil Vinod <akhil.vinod@oss.qualcomm.com>
Signed-off-by: Sumit Kumar <sumit.kumar@oss.qualcomm.com>
[mani: reworded description slightly]
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
Reviewed-by: Krishna Chaitanya Chundru <krishna.chundru@oss.qualcomm.com>
Cc: stable@vger.kernel.org
Link: https://patch.msgid.link/20250910-final_chained-v3-1-ec77c9d88ace@oss.qualcomm.com
drivers/bus/mhi/ep/main.c

index b3eafcf2a2c50d95e3efd3afb27038ecf55552a5..cdea24e9291959ae0a92487c1b9698dc8164d2f1 100644 (file)
@@ -403,17 +403,13 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
 {
        struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
-       size_t tr_len, read_offset, write_offset;
+       size_t tr_len, read_offset;
        struct mhi_ep_buf_info buf_info = {};
        u32 len = MHI_EP_DEFAULT_MTU;
        struct mhi_ring_element *el;
-       bool tr_done = false;
        void *buf_addr;
-       u32 buf_left;
        int ret;
 
-       buf_left = len;
-
        do {
                /* Don't process the transfer ring if the channel is not in RUNNING state */
                if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
@@ -426,24 +422,23 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
                /* Check if there is data pending to be read from previous read operation */
                if (mhi_chan->tre_bytes_left) {
                        dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
-                       tr_len = min(buf_left, mhi_chan->tre_bytes_left);
+                       tr_len = min(len, mhi_chan->tre_bytes_left);
                } else {
                        mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
                        mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
                        mhi_chan->tre_bytes_left = mhi_chan->tre_size;
 
-                       tr_len = min(buf_left, mhi_chan->tre_size);
+                       tr_len = min(len, mhi_chan->tre_size);
                }
 
                read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
-               write_offset = len - buf_left;
 
                buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL);
                if (!buf_addr)
                        return -ENOMEM;
 
                buf_info.host_addr = mhi_chan->tre_loc + read_offset;
-               buf_info.dev_addr = buf_addr + write_offset;
+               buf_info.dev_addr = buf_addr;
                buf_info.size = tr_len;
                buf_info.cb = mhi_ep_read_completion;
                buf_info.cb_buf = buf_addr;
@@ -459,16 +454,12 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
                        goto err_free_buf_addr;
                }
 
-               buf_left -= tr_len;
                mhi_chan->tre_bytes_left -= tr_len;
 
-               if (!mhi_chan->tre_bytes_left) {
-                       if (MHI_TRE_DATA_GET_IEOT(el))
-                               tr_done = true;
-
+               if (!mhi_chan->tre_bytes_left)
                        mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
-               }
-       } while (buf_left && !tr_done);
+       /* Read until the some buffer is left or the ring becomes not empty */
+       } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
 
        return 0;
 
@@ -502,15 +493,11 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring)
                mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
        } else {
                /* UL channel */
-               do {
-                       ret = mhi_ep_read_channel(mhi_cntrl, ring);
-                       if (ret < 0) {
-                               dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
-                               return ret;
-                       }
-
-                       /* Read until the ring becomes empty */
-               } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
+               ret = mhi_ep_read_channel(mhi_cntrl, ring);
+               if (ret < 0) {
+                       dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
+                       return ret;
+               }
        }
 
        return 0;