unsigned long mask, gfp_t flag, int node);
 extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
                                void *vaddr, dma_addr_t dma_handle);
-extern dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
-                                  void *vaddr, size_t size, unsigned long mask,
-                                  enum dma_data_direction direction,
-                                  struct dma_attrs *attrs);
-extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
-                              size_t size, enum dma_data_direction direction,
-                              struct dma_attrs *attrs);
+extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
+                                struct page *page, unsigned long offset,
+                                size_t size, unsigned long mask,
+                                enum dma_data_direction direction,
+                                struct dma_attrs *attrs);
+extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
+                            size_t size, enum dma_data_direction direction,
+                            struct dma_attrs *attrs);
 
 extern void iommu_init_early_pSeries(void);
 extern void iommu_init_early_iSeries(void);
 
 }
 
 /* Creates TCEs for a user provided buffer.  The user buffer must be
- * contiguous real kernel storage (not vmalloc).  The address of the buffer
- * passed here is the kernel (virtual) address of the buffer.  The buffer
- * need not be page aligned, the dma_addr_t returned will point to the same
- * byte within the page as vaddr.
+ * contiguous real kernel storage (not vmalloc).  The address passed here
+ * comprises a page address and offset into that page. The dma_addr_t
+ * returned will point to the same byte within the page as was passed in.
  */
-static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr,
-                                      size_t size,
-                                      enum dma_data_direction direction,
-                                      struct dma_attrs *attrs)
+static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
+                                    unsigned long offset, size_t size,
+                                    enum dma_data_direction direction,
+                                    struct dma_attrs *attrs)
 {
-       return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size,
-                               device_to_mask(dev), direction, attrs);
+       return iommu_map_page(dev, dev->archdata.dma_data, page, offset, size,
+                             device_to_mask(dev), direction, attrs);
 }
 
 
-static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle,
-                                  size_t size,
-                                  enum dma_data_direction direction,
-                                  struct dma_attrs *attrs)
+static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
+                                size_t size, enum dma_data_direction direction,
+                                struct dma_attrs *attrs)
 {
-       iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction,
-                          attrs);
+       iommu_unmap_page(dev->archdata.dma_data, dma_handle, size, direction,
+                        attrs);
 }
 
 
 struct dma_mapping_ops dma_iommu_ops = {
        .alloc_coherent = dma_iommu_alloc_coherent,
        .free_coherent  = dma_iommu_free_coherent,
-       .map_single     = dma_iommu_map_single,
-       .unmap_single   = dma_iommu_unmap_single,
        .map_sg         = dma_iommu_map_sg,
        .unmap_sg       = dma_iommu_unmap_sg,
        .dma_supported  = dma_iommu_dma_supported,
+       .map_page       = dma_iommu_map_page,
+       .unmap_page     = dma_iommu_unmap_page,
 };
 EXPORT_SYMBOL(dma_iommu_ops);
 
        kfree(vaddr);
 }
 
-static dma_addr_t ibmebus_map_single(struct device *dev,
-                                    void *ptr,
-                                    size_t size,
-                                    enum dma_data_direction direction,
-                                    struct dma_attrs *attrs)
+static dma_addr_t ibmebus_map_page(struct device *dev,
+                                  struct page *page,
+                                  unsigned long offset,
+                                  size_t size,
+                                  enum dma_data_direction direction,
+                                  struct dma_attrs *attrs)
 {
-       return (dma_addr_t)(ptr);
+       return (dma_addr_t)(page_address(page) + offset);
 }
 
-static void ibmebus_unmap_single(struct device *dev,
-                                dma_addr_t dma_addr,
-                                size_t size,
-                                enum dma_data_direction direction,
-                                struct dma_attrs *attrs)
+static void ibmebus_unmap_page(struct device *dev,
+                              dma_addr_t dma_addr,
+                              size_t size,
+                              enum dma_data_direction direction,
+                              struct dma_attrs *attrs)
 {
        return;
 }
 static struct dma_mapping_ops ibmebus_dma_ops = {
        .alloc_coherent = ibmebus_alloc_coherent,
        .free_coherent  = ibmebus_free_coherent,
-       .map_single     = ibmebus_map_single,
-       .unmap_single   = ibmebus_unmap_single,
        .map_sg         = ibmebus_map_sg,
        .unmap_sg       = ibmebus_unmap_sg,
        .dma_supported  = ibmebus_dma_supported,
+       .map_page       = ibmebus_map_page,
+       .unmap_page     = ibmebus_unmap_page,
 };
 
 static int ibmebus_match_path(struct device *dev, void *data)
 
 }
 
 /* Creates TCEs for a user provided buffer.  The user buffer must be
- * contiguous real kernel storage (not vmalloc).  The address of the buffer
- * passed here is the kernel (virtual) address of the buffer.  The buffer
- * need not be page aligned, the dma_addr_t returned will point to the same
- * byte within the page as vaddr.
+ * contiguous real kernel storage (not vmalloc).  The address passed here
+ * comprises a page address and offset into that page. The dma_addr_t
+ * returned will point to the same byte within the page as was passed in.
  */
-dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
-                           void *vaddr, size_t size, unsigned long mask,
-               enum dma_data_direction direction, struct dma_attrs *attrs)
+dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
+                         struct page *page, unsigned long offset, size_t size,
+                         unsigned long mask, enum dma_data_direction direction,
+                         struct dma_attrs *attrs)
 {
        dma_addr_t dma_handle = DMA_ERROR_CODE;
+       void *vaddr;
        unsigned long uaddr;
        unsigned int npages, align;
 
        BUG_ON(direction == DMA_NONE);
 
+       vaddr = page_address(page) + offset;
        uaddr = (unsigned long)vaddr;
        npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
 
        return dma_handle;
 }
 
-void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
-               size_t size, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
+                     size_t size, enum dma_data_direction direction,
+                     struct dma_attrs *attrs)
 {
        unsigned int npages;
 
 
        vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
 }
 
-static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr,
-                                           size_t size,
-                                           enum dma_data_direction direction,
-                                           struct dma_attrs *attrs)
+static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
+                                         unsigned long offset, size_t size,
+                                         enum dma_data_direction direction,
+                                         struct dma_attrs *attrs)
 {
        struct vio_dev *viodev = to_vio_dev(dev);
        dma_addr_t ret = DMA_ERROR_CODE;
                return ret;
        }
 
-       ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs);
+       ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
        if (unlikely(dma_mapping_error(dev, ret))) {
                vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
                atomic_inc(&viodev->cmo.allocs_failed);
        return ret;
 }
 
-static void vio_dma_iommu_unmap_single(struct device *dev,
-               dma_addr_t dma_handle, size_t size,
-               enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
+                                    size_t size,
+                                    enum dma_data_direction direction,
+                                    struct dma_attrs *attrs)
 {
        struct vio_dev *viodev = to_vio_dev(dev);
 
-       dma_iommu_ops.unmap_single(dev, dma_handle, size, direction, attrs);
+       dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
 
        vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
 }
 struct dma_mapping_ops vio_dma_mapping_ops = {
        .alloc_coherent = vio_dma_iommu_alloc_coherent,
        .free_coherent  = vio_dma_iommu_free_coherent,
-       .map_single     = vio_dma_iommu_map_single,
-       .unmap_single   = vio_dma_iommu_unmap_single,
        .map_sg         = vio_dma_iommu_map_sg,
        .unmap_sg       = vio_dma_iommu_unmap_sg,
+       .map_page       = vio_dma_iommu_map_page,
+       .unmap_page     = vio_dma_iommu_unmap_page,
+
 };
 
 /**
 
                dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle);
 }
 
-static dma_addr_t dma_fixed_map_single(struct device *dev, void *ptr,
-                                      size_t size,
-                                      enum dma_data_direction direction,
-                                      struct dma_attrs *attrs)
+static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
+                                    unsigned long offset, size_t size,
+                                    enum dma_data_direction direction,
+                                    struct dma_attrs *attrs)
 {
        if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
-               return dma_direct_ops.map_single(dev, ptr, size, direction,
-                                                attrs);
+               return dma_direct_ops.map_page(dev, page, offset, size,
+                                              direction, attrs);
        else
-               return iommu_map_single(dev, cell_get_iommu_table(dev), ptr,
-                                       size, device_to_mask(dev), direction,
-                                       attrs);
+               return iommu_map_page(dev, cell_get_iommu_table(dev), page,
+                                     offset, size, device_to_mask(dev),
+                                     direction, attrs);
 }
 
-static void dma_fixed_unmap_single(struct device *dev, dma_addr_t dma_addr,
-                                  size_t size,
-                                  enum dma_data_direction direction,
-                                  struct dma_attrs *attrs)
+static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
+                                size_t size, enum dma_data_direction direction,
+                                struct dma_attrs *attrs)
 {
        if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
-               dma_direct_ops.unmap_single(dev, dma_addr, size, direction,
-                                           attrs);
+               dma_direct_ops.unmap_page(dev, dma_addr, size, direction,
+                                         attrs);
        else
-               iommu_unmap_single(cell_get_iommu_table(dev), dma_addr, size,
-                                  direction, attrs);
+               iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size,
+                                direction, attrs);
 }
 
 static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
 struct dma_mapping_ops dma_iommu_fixed_ops = {
        .alloc_coherent = dma_fixed_alloc_coherent,
        .free_coherent  = dma_fixed_free_coherent,
-       .map_single     = dma_fixed_map_single,
-       .unmap_single   = dma_fixed_unmap_single,
        .map_sg         = dma_fixed_map_sg,
        .unmap_sg       = dma_fixed_unmap_sg,
        .dma_supported  = dma_fixed_dma_supported,
        .set_dma_mask   = dma_set_mask_and_switch,
+       .map_page       = dma_fixed_map_page,
+       .unmap_page     = dma_fixed_unmap_page,
 };
 
 static void cell_dma_dev_setup_fixed(struct device *dev);
 
 dma_addr_t iseries_hv_map(void *vaddr, size_t size,
                        enum dma_data_direction direction)
 {
-       return iommu_map_single(NULL, &vio_iommu_table, vaddr, size,
-                               DMA_32BIT_MASK, direction, NULL);
+       return iommu_map_page(NULL, &vio_iommu_table, virt_to_page(vaddr),
+                             (unsigned long)vaddr % PAGE_SIZE, size,
+                             DMA_32BIT_MASK, direction, NULL);
 }
 
 void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
                        enum dma_data_direction direction)
 {
-       iommu_unmap_single(&vio_iommu_table, dma_handle, size, direction, NULL);
+       iommu_unmap_page(&vio_iommu_table, dma_handle, size, direction, NULL);
 }
 
 void __init iommu_vio_init(void)
 
 }
 
 /* Creates TCEs for a user provided buffer.  The user buffer must be
- * contiguous real kernel storage (not vmalloc).  The address of the buffer
- * passed here is the kernel (virtual) address of the buffer.  The buffer
- * need not be page aligned, the dma_addr_t returned will point to the same
- * byte within the page as vaddr.
+ * contiguous real kernel storage (not vmalloc).  The address passed here
+ * comprises a page address and offset into that page. The dma_addr_t
+ * returned will point to the same byte within the page as was passed in.
  */
 
-static dma_addr_t ps3_sb_map_single(struct device *_dev, void *ptr, size_t size,
-       enum dma_data_direction direction, struct dma_attrs *attrs)
+static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page,
+       unsigned long offset, size_t size, enum dma_data_direction direction,
+       struct dma_attrs *attrs)
 {
        struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
        int result;
        unsigned long bus_addr;
+       void *ptr = page_address(page) + offset;
 
        result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
                             &bus_addr,
        return bus_addr;
 }
 
-static dma_addr_t ps3_ioc0_map_single(struct device *_dev, void *ptr,
-                                     size_t size,
-                                     enum dma_data_direction direction,
-                                     struct dma_attrs *attrs)
+static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
+                                   unsigned long offset, size_t size,
+                                   enum dma_data_direction direction,
+                                   struct dma_attrs *attrs)
 {
        struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
        int result;
        unsigned long bus_addr;
        u64 iopte_flag;
+       void *ptr = page_address(page) + offset;
 
        iopte_flag = IOPTE_M;
        switch (direction) {
        return bus_addr;
 }
 
-static void ps3_unmap_single(struct device *_dev, dma_addr_t dma_addr,
+static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr,
        size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
 {
        struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
 static struct dma_mapping_ops ps3_sb_dma_ops = {
        .alloc_coherent = ps3_alloc_coherent,
        .free_coherent = ps3_free_coherent,
-       .map_single = ps3_sb_map_single,
-       .unmap_single = ps3_unmap_single,
        .map_sg = ps3_sb_map_sg,
        .unmap_sg = ps3_sb_unmap_sg,
-       .dma_supported = ps3_dma_supported
+       .dma_supported = ps3_dma_supported,
+       .map_page = ps3_sb_map_page,
+       .unmap_page = ps3_unmap_page,
 };
 
 static struct dma_mapping_ops ps3_ioc0_dma_ops = {
        .alloc_coherent = ps3_alloc_coherent,
        .free_coherent = ps3_free_coherent,
-       .map_single = ps3_ioc0_map_single,
-       .unmap_single = ps3_unmap_single,
        .map_sg = ps3_ioc0_map_sg,
        .unmap_sg = ps3_ioc0_unmap_sg,
-       .dma_supported = ps3_dma_supported
+       .dma_supported = ps3_dma_supported,
+       .map_page = ps3_ioc0_map_page,
+       .unmap_page = ps3_unmap_page,
 };
 
 /**