.dma_supported = swiotlb_dma_supported,
        .map_page = swiotlb_map_page,
        .unmap_page = swiotlb_unmap_page,
-       .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
-       .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
+       .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+       .sync_single_for_device = swiotlb_sync_single_for_device,
        .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
        .sync_sg_for_device = swiotlb_sync_sg_for_device,
        .mapping_error = swiotlb_dma_mapping_error,
 
                __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
 }
 
-static inline void dma_direct_sync_single_range(struct device *dev,
-               dma_addr_t dma_handle, unsigned long offset, size_t size,
-               enum dma_data_direction direction)
+static inline void dma_direct_sync_single(struct device *dev,
+                                         dma_addr_t dma_handle, size_t size,
+                                         enum dma_data_direction direction)
 {
-       __dma_sync(bus_to_virt(dma_handle+offset), size, direction);
+       __dma_sync(bus_to_virt(dma_handle), size, direction);
 }
 #endif
 
        .map_page       = dma_direct_map_page,
        .unmap_page     = dma_direct_unmap_page,
 #ifdef CONFIG_NOT_COHERENT_CACHE
-       .sync_single_range_for_cpu      = dma_direct_sync_single_range,
-       .sync_single_range_for_device   = dma_direct_sync_single_range,
+       .sync_single_for_cpu            = dma_direct_sync_single,
+       .sync_single_for_device         = dma_direct_sync_single,
        .sync_sg_for_cpu                = dma_direct_sync_sg,
        .sync_sg_for_device             = dma_direct_sync_sg,
 #endif