if (xe_tt->sg)
                return 0;
 
-       ret = sg_alloc_table_from_pages(&xe_tt->sgt, tt->pages, num_pages,
-                                       0, (u64)num_pages << PAGE_SHIFT,
-                                       GFP_KERNEL);
+       ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
+                                               num_pages, 0,
+                                               (u64)num_pages << PAGE_SHIFT,
+                                               xe_sg_segment_size(xe_tt->dev),
+                                               GFP_KERNEL);
        if (ret)
                return ret;
 
 
 
 struct sg_table *xe_bo_get_sg(struct xe_bo *bo);
 
+/*
+ * xe_sg_segment_size() - Provides upper limit for sg segment size.
+ * @dev: device pointer
+ *
+ * Returns the maximum segment size for the 'struct scatterlist'
+ * elements.
+ */
+static inline unsigned int xe_sg_segment_size(struct device *dev)
+{
+       struct scatterlist __maybe_unused sg;
+       size_t max = BIT_ULL(sizeof(sg.length) * 8) - 1;
+
+       max = min_t(size_t, max, dma_max_mapping_size(dev));
+
+       /*
+        * The iommu_dma_map_sg() function ensures iova allocation doesn't
+        * cross dma segment boundary. It does so by padding some sg elements.
+        * This can cause overflow, ending up with sg->length being set to 0.
+        * Avoid this by ensuring maximum segment size is half of 'max'
+        * rounded down to PAGE_SIZE.
+        */
+       return round_down(max / 2, PAGE_SIZE);
+}
+
 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
 /**
  * xe_bo_is_mem_type - Whether the bo currently resides in the given
 
 #include "regs/xe_engine_regs.h"
 #include "regs/xe_gt_regs.h"
 #include "regs/xe_regs.h"
+#include "xe_bo.h"
 #include "xe_device.h"
 #include "xe_gt.h"
 #include "xe_gt_mcr.h"
        unsigned int mask_size = xe->info.dma_mask_size;
        int err;
 
-       /*
-        * We don't have a max segment size, so set it to the max so sg's
-        * debugging layer doesn't complain
-        */
-       dma_set_max_seg_size(xe->drm.dev, UINT_MAX);
+       dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev));
 
        err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
        if (err)
 
        if (ret)
                goto out;
 
-       ret = sg_alloc_table_from_pages(&vma->userptr.sgt, pages, pinned,
-                                       0, (u64)pinned << PAGE_SHIFT,
-                                       GFP_KERNEL);
+       ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
+                                               pinned, 0,
+                                               (u64)pinned << PAGE_SHIFT,
+                                               xe_sg_segment_size(xe->drm.dev),
+                                               GFP_KERNEL);
        if (ret) {
                vma->userptr.sg = NULL;
                goto out;