dma_addr_t
 dma_map_single_attrs(struct device *dev, void *cpu_addr, size_t size,
                     enum dma_data_direction dir,
-                    struct dma_attrs *attrs)
+                    unsigned long attrs)
 
 void
 dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
                       size_t size, enum dma_data_direction dir,
-                      struct dma_attrs *attrs)
+                      unsigned long attrs)
 
 int
 dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
                 int nents, enum dma_data_direction dir,
-                struct dma_attrs *attrs)
+                unsigned long attrs)
 
 void
 dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
                   int nents, enum dma_data_direction dir,
-                  struct dma_attrs *attrs)
+                  unsigned long attrs)
 
 The four functions above are just like the counterpart functions
 without the _attrs suffixes, except that they pass an optional
-struct dma_attrs*.
-
-struct dma_attrs encapsulates a set of "DMA attributes". For the
-definition of struct dma_attrs see linux/dma-attrs.h.
+dma_attrs.
 
 The interpretation of DMA attributes is architecture-specific, and
 each attribute should be documented in Documentation/DMA-attributes.txt.
 
-If struct dma_attrs* is NULL, the semantics of each of these
-functions is identical to those of the corresponding function
+If dma_attrs are 0, the semantics of each of these functions
+is identical to those of the corresponding function
 without the _attrs suffix. As a result dma_map_single_attrs()
 can generally replace dma_map_single(), etc.
 
 you could pass an attribute DMA_ATTR_FOO when mapping memory
 for DMA:
 
-#include <linux/dma-attrs.h>
-/* DMA_ATTR_FOO should be defined in linux/dma-attrs.h and
+#include <linux/dma-mapping.h>
+/* DMA_ATTR_FOO should be defined in linux/dma-mapping.h and
  * documented in Documentation/DMA-attributes.txt */
 ...
 
-       DEFINE_DMA_ATTRS(attrs);
-       dma_set_attr(DMA_ATTR_FOO, &attrs);
+       unsigned long attr;
+       attr |= DMA_ATTR_FOO;
        ....
-       n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, &attr);
+       n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, attr);
        ....
 
 Architectures that care about DMA_ATTR_FOO would check for its
 
 void whizco_dma_map_sg_attrs(struct device *dev, dma_addr_t dma_addr,
                             size_t size, enum dma_data_direction dir,
-                            struct dma_attrs *attrs)
+                            unsigned long attrs)
 {
        ....
-       int foo =  dma_get_attr(DMA_ATTR_FOO, attrs);
-       ....
-       if (foo)
+       if (attrs & DMA_ATTR_FOO)
                /* twizzle the frobnozzle */
        ....
 
 
                        ==============
 
 This document describes the semantics of the DMA attributes that are
-defined in linux/dma-attrs.h.
+defined in linux/dma-mapping.h.
 
 DMA_ATTR_WRITE_BARRIER
 ----------------------
 
 #ifndef _ALPHA_DMA_MAPPING_H
 #define _ALPHA_DMA_MAPPING_H
 
-#include <linux/dma-attrs.h>
-
 extern struct dma_map_ops *dma_ops;
 
 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 
 
 static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
                                       dma_addr_t *dma_handle, gfp_t gfp,
-                                      struct dma_attrs *attrs)
+                                      unsigned long attrs)
 {
        void *ret;
 
 
 static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
                                     unsigned long offset, size_t size,
                                     enum dma_data_direction dir,
-                                    struct dma_attrs *attrs)
+                                    unsigned long attrs)
 {
        struct pci_dev *pdev = alpha_gendev_to_pci(dev);
        int dac_allowed;
 
 static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
                                 size_t size, enum dma_data_direction dir,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        unsigned long flags;
        struct pci_dev *pdev = alpha_gendev_to_pci(dev);
 
 static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
                                      dma_addr_t *dma_addrp, gfp_t gfp,
-                                     struct dma_attrs *attrs)
+                                     unsigned long attrs)
 {
        struct pci_dev *pdev = alpha_gendev_to_pci(dev);
        void *cpu_addr;
 
 static void alpha_pci_free_coherent(struct device *dev, size_t size,
                                    void *cpu_addr, dma_addr_t dma_addr,
-                                   struct dma_attrs *attrs)
+                                   unsigned long attrs)
 {
        struct pci_dev *pdev = alpha_gendev_to_pci(dev);
        pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
 
 static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
                            int nents, enum dma_data_direction dir,
-                           struct dma_attrs *attrs)
+                           unsigned long attrs)
 {
        struct pci_dev *pdev = alpha_gendev_to_pci(dev);
        struct scatterlist *start, *end, *out;
 
 static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
                               int nents, enum dma_data_direction dir,
-                              struct dma_attrs *attrs)
+                              unsigned long attrs)
 {
        struct pci_dev *pdev = alpha_gendev_to_pci(dev);
        unsigned long flags;
 
 
 
 static void *arc_dma_alloc(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
        unsigned long order = get_order(size);
        struct page *page;
         *   (vs. always going to memory - thus are faster)
         */
        if ((is_isa_arcv2() && ioc_exists) ||
-           dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+           (attrs & DMA_ATTR_NON_CONSISTENT))
                need_coh = 0;
 
        /*
 }
 
 static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, struct dma_attrs *attrs)
+               dma_addr_t dma_handle, unsigned long attrs)
 {
        phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle);
        struct page *page = virt_to_page(paddr);
        int is_non_coh = 1;
 
-       is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
+       is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
                        (is_isa_arcv2() && ioc_exists);
 
        if (PageHighMem(page) || !is_non_coh)
 
 static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
                unsigned long offset, size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        phys_addr_t paddr = page_to_phys(page) + offset;
        _dma_cache_sync(paddr, size, dir);
 }
 
 static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
-          int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+          int nents, enum dma_data_direction dir, unsigned long attrs)
 {
        struct scatterlist *s;
        int i;
 
  */
 static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
                unsigned long offset, size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        dma_addr_t dma_addr;
        int ret;
  * should be)
  */
 static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
-               enum dma_data_direction dir, struct dma_attrs *attrs)
+               enum dma_data_direction dir, unsigned long attrs)
 {
        struct safe_buffer *buf;
 
 
 
 #include <linux/mm_types.h>
 #include <linux/scatterlist.h>
-#include <linux/dma-attrs.h>
 #include <linux/dma-debug.h>
 
 #include <asm/memory.h>
  * to be the device-viewed address.
  */
 extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
-                          gfp_t gfp, struct dma_attrs *attrs);
+                          gfp_t gfp, unsigned long attrs);
 
 /**
  * arm_dma_free - free memory allocated by arm_dma_alloc
  * during and after this call executing are illegal.
  */
 extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
-                        dma_addr_t handle, struct dma_attrs *attrs);
+                        dma_addr_t handle, unsigned long attrs);
 
 /**
  * arm_dma_mmap - map a coherent DMA allocation into user space
  */
 extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
                        void *cpu_addr, dma_addr_t dma_addr, size_t size,
-                       struct dma_attrs *attrs);
+                       unsigned long attrs);
 
 /*
  * This can be called during early boot to increase the size of the atomic
  * The scatter list versions of the above methods.
  */
 extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
-               enum dma_data_direction, struct dma_attrs *attrs);
+               enum dma_data_direction, unsigned long attrs);
 extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
-               enum dma_data_direction, struct dma_attrs *attrs);
+               enum dma_data_direction, unsigned long attrs);
 extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
                enum dma_data_direction);
 extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
                enum dma_data_direction);
 extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
                void *cpu_addr, dma_addr_t dma_addr, size_t size,
-               struct dma_attrs *attrs);
+               unsigned long attrs);
 
 #endif /* __KERNEL__ */
 #endif
 
 #define _ASM_ARM_XEN_PAGE_COHERENT_H
 
 #include <asm/page.h>
-#include <linux/dma-attrs.h>
 #include <linux/dma-mapping.h>
 
 void __xen_dma_map_page(struct device *hwdev, struct page *page,
             dma_addr_t dev_addr, unsigned long offset, size_t size,
-            enum dma_data_direction dir, struct dma_attrs *attrs);
+            enum dma_data_direction dir, unsigned long attrs);
 void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs);
+               unsigned long attrs);
 void __xen_dma_sync_single_for_cpu(struct device *hwdev,
                dma_addr_t handle, size_t size, enum dma_data_direction dir);
 
                dma_addr_t handle, size_t size, enum dma_data_direction dir);
 
 static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
-               dma_addr_t *dma_handle, gfp_t flags,
-               struct dma_attrs *attrs)
+               dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
 {
        return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
 }
 
 static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
-               void *cpu_addr, dma_addr_t dma_handle,
-               struct dma_attrs *attrs)
+               void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
 {
        __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
 }
 
 static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
             dma_addr_t dev_addr, unsigned long offset, size_t size,
-            enum dma_data_direction dir, struct dma_attrs *attrs)
+            enum dma_data_direction dir, unsigned long attrs)
 {
        unsigned long page_pfn = page_to_xen_pfn(page);
        unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
 }
 
 static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs)
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
        unsigned long pfn = PFN_DOWN(handle);
        /*
 
  */
 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
             unsigned long offset, size_t size, enum dma_data_direction dir,
-            struct dma_attrs *attrs)
+            unsigned long attrs)
 {
-       if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+       if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                __dma_page_cpu_to_dev(page, offset, size, dir);
        return pfn_to_dma(dev, page_to_pfn(page)) + offset;
 }
 
 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
             unsigned long offset, size_t size, enum dma_data_direction dir,
-            struct dma_attrs *attrs)
+            unsigned long attrs)
 {
        return pfn_to_dma(dev, page_to_pfn(page)) + offset;
 }
  * whatever the device wrote there.
  */
 static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs)
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
-       if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+       if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
                                      handle & ~PAGE_MASK, size, dir);
 }
 EXPORT_SYMBOL(arm_dma_ops);
 
 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
-       dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
+       dma_addr_t *handle, gfp_t gfp, unsigned long attrs);
 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
-                                 dma_addr_t handle, struct dma_attrs *attrs);
+                                 dma_addr_t handle, unsigned long attrs);
 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
-                struct dma_attrs *attrs);
+                unsigned long attrs);
 
 struct dma_map_ops arm_coherent_dma_ops = {
        .alloc                  = arm_coherent_dma_alloc,
        dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
 }
 
-static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
+static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
 {
-       prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
-                           pgprot_writecombine(prot) :
-                           pgprot_dmacoherent(prot);
+       prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
+                       pgprot_writecombine(prot) :
+                       pgprot_dmacoherent(prot);
        return prot;
 }
 
 
 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
                         gfp_t gfp, pgprot_t prot, bool is_coherent,
-                        struct dma_attrs *attrs, const void *caller)
+                        unsigned long attrs, const void *caller)
 {
        u64 mask = get_coherent_dma_mask(dev);
        struct page *page = NULL;
                .gfp = gfp,
                .prot = prot,
                .caller = caller,
-               .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
+               .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
                .coherent_flag = is_coherent ? COHERENT : NORMAL,
        };
 
  * virtual and bus address for that space.
  */
 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
-                   gfp_t gfp, struct dma_attrs *attrs)
+                   gfp_t gfp, unsigned long attrs)
 {
        pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
 
 }
 
 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
-       dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+       dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
 {
        return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
                           attrs, __builtin_return_address(0));
 
 static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
-                struct dma_attrs *attrs)
+                unsigned long attrs)
 {
        int ret = -ENXIO;
 #ifdef CONFIG_MMU
  */
 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
-                struct dma_attrs *attrs)
+                unsigned long attrs)
 {
        return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
 }
 
 int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
-                struct dma_attrs *attrs)
+                unsigned long attrs)
 {
 #ifdef CONFIG_MMU
        vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
  * Free a buffer as defined by the above mapping.
  */
 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
-                          dma_addr_t handle, struct dma_attrs *attrs,
+                          dma_addr_t handle, unsigned long attrs,
                           bool is_coherent)
 {
        struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
                .size = PAGE_ALIGN(size),
                .cpu_addr = cpu_addr,
                .page = page,
-               .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
+               .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
        };
 
        buf = arm_dma_buffer_find(cpu_addr);
 }
 
 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
-                 dma_addr_t handle, struct dma_attrs *attrs)
+                 dma_addr_t handle, unsigned long attrs)
 {
        __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
 }
 
 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
-                                 dma_addr_t handle, struct dma_attrs *attrs)
+                                 dma_addr_t handle, unsigned long attrs)
 {
        __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
 }
 
 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
                 void *cpu_addr, dma_addr_t handle, size_t size,
-                struct dma_attrs *attrs)
+                unsigned long attrs)
 {
        struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
        int ret;
  * here.
  */
 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-               enum dma_data_direction dir, struct dma_attrs *attrs)
+               enum dma_data_direction dir, unsigned long attrs)
 {
        struct dma_map_ops *ops = get_dma_ops(dev);
        struct scatterlist *s;
  * rules concerning calls here are the same as for dma_unmap_single().
  */
 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
-               enum dma_data_direction dir, struct dma_attrs *attrs)
+               enum dma_data_direction dir, unsigned long attrs)
 {
        struct dma_map_ops *ops = get_dma_ops(dev);
        struct scatterlist *s;
 static const int iommu_order_array[] = { 9, 8, 4, 0 };
 
 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
-                                         gfp_t gfp, struct dma_attrs *attrs,
+                                         gfp_t gfp, unsigned long attrs,
                                          int coherent_flag)
 {
        struct page **pages;
        if (!pages)
                return NULL;
 
-       if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
+       if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
        {
                unsigned long order = get_order(size);
                struct page *page;
        }
 
        /* Go straight to 4K chunks if caller says it's OK. */
-       if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs))
+       if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
                order_idx = ARRAY_SIZE(iommu_order_array) - 1;
 
        /*
 }
 
 static int __iommu_free_buffer(struct device *dev, struct page **pages,
-                              size_t size, struct dma_attrs *attrs)
+                              size_t size, unsigned long attrs)
 {
        int count = size >> PAGE_SHIFT;
        int i;
 
-       if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+       if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
                dma_release_from_contiguous(dev, pages[0], count);
        } else {
                for (i = 0; i < count; i++)
        return (struct page **)page;
 }
 
-static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
+static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
 {
        struct vm_struct *area;
 
        if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
                return __atomic_get_pages(cpu_addr);
 
-       if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+       if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
                return cpu_addr;
 
        area = find_vm_area(cpu_addr);
 }
 
 static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
-           dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs,
+           dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
            int coherent_flag)
 {
        pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
        if (*handle == DMA_ERROR_CODE)
                goto err_buffer;
 
-       if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+       if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
                return pages;
 
        addr = __iommu_alloc_remap(pages, size, gfp, prot,
 }
 
 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
-                   dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+           dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
 {
        return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
 }
 
 static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
-                   dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+                   dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
 {
        return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
 }
 
 static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
                    void *cpu_addr, dma_addr_t dma_addr, size_t size,
-                   struct dma_attrs *attrs)
+                   unsigned long attrs)
 {
        unsigned long uaddr = vma->vm_start;
        unsigned long usize = vma->vm_end - vma->vm_start;
 }
 static int arm_iommu_mmap_attrs(struct device *dev,
                struct vm_area_struct *vma, void *cpu_addr,
-               dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+               dma_addr_t dma_addr, size_t size, unsigned long attrs)
 {
        vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
 
 
 static int arm_coherent_iommu_mmap_attrs(struct device *dev,
                struct vm_area_struct *vma, void *cpu_addr,
-               dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+               dma_addr_t dma_addr, size_t size, unsigned long attrs)
 {
        return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
 }
  * Must not be called with IRQs disabled.
  */
 void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
-       dma_addr_t handle, struct dma_attrs *attrs, int coherent_flag)
+       dma_addr_t handle, unsigned long attrs, int coherent_flag)
 {
        struct page **pages;
        size = PAGE_ALIGN(size);
                return;
        }
 
-       if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
+       if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) {
                dma_common_free_remap(cpu_addr, size,
                        VM_ARM_DMA_CONSISTENT | VM_USERMAP);
        }
 }
 
 void arm_iommu_free_attrs(struct device *dev, size_t size,
-                   void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+                   void *cpu_addr, dma_addr_t handle, unsigned long attrs)
 {
        __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
 }
 
 void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
-                   void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+                   void *cpu_addr, dma_addr_t handle, unsigned long attrs)
 {
        __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
 }
 
 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
                                 void *cpu_addr, dma_addr_t dma_addr,
-                                size_t size, struct dma_attrs *attrs)
+                                size_t size, unsigned long attrs)
 {
        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        struct page **pages = __iommu_get_pages(cpu_addr, attrs);
  */
 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
                          size_t size, dma_addr_t *handle,
-                         enum dma_data_direction dir, struct dma_attrs *attrs,
+                         enum dma_data_direction dir, unsigned long attrs,
                          bool is_coherent)
 {
        struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
                phys_addr_t phys = page_to_phys(sg_page(s));
                unsigned int len = PAGE_ALIGN(s->offset + s->length);
 
-               if (!is_coherent &&
-                       !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+               if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                        __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
 
                prot = __dma_direction_to_prot(dir);
 }
 
 static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-                    enum dma_data_direction dir, struct dma_attrs *attrs,
+                    enum dma_data_direction dir, unsigned long attrs,
                     bool is_coherent)
 {
        struct scatterlist *s = sg, *dma = sg, *start = sg;
  * obtained via sg_dma_{address,length}.
  */
 int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
-               int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+               int nents, enum dma_data_direction dir, unsigned long attrs)
 {
        return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
 }
  * sg_dma_{address,length}.
  */
 int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
-               int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+               int nents, enum dma_data_direction dir, unsigned long attrs)
 {
        return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
 }
 
 static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
-               int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
-               bool is_coherent)
+               int nents, enum dma_data_direction dir,
+               unsigned long attrs, bool is_coherent)
 {
        struct scatterlist *s;
        int i;
                if (sg_dma_len(s))
                        __iommu_remove_mapping(dev, sg_dma_address(s),
                                               sg_dma_len(s));
-               if (!is_coherent &&
-                   !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+               if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                        __dma_page_dev_to_cpu(sg_page(s), s->offset,
                                              s->length, dir);
        }
  * rules concerning calls here are the same as for dma_unmap_single().
  */
 void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
-               int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+               int nents, enum dma_data_direction dir,
+               unsigned long attrs)
 {
        __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
 }
  * rules concerning calls here are the same as for dma_unmap_single().
  */
 void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
-                       enum dma_data_direction dir, struct dma_attrs *attrs)
+                       enum dma_data_direction dir,
+                       unsigned long attrs)
 {
        __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
 }
  */
 static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
             unsigned long offset, size_t size, enum dma_data_direction dir,
-            struct dma_attrs *attrs)
+            unsigned long attrs)
 {
        struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
        dma_addr_t dma_addr;
  */
 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
             unsigned long offset, size_t size, enum dma_data_direction dir,
-            struct dma_attrs *attrs)
+            unsigned long attrs)
 {
-       if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+       if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                __dma_page_cpu_to_dev(page, offset, size, dir);
 
        return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
  * Coherent IOMMU aware version of arm_dma_unmap_page()
  */
 static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs)
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
        struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
        dma_addr_t iova = handle & PAGE_MASK;
  * IOMMU aware version of arm_dma_unmap_page()
  */
 static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs)
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
        struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
        dma_addr_t iova = handle & PAGE_MASK;
        if (!iova)
                return;
 
-       if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+       if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                __dma_page_dev_to_cpu(page, offset, size, dir);
 
        iommu_unmap(mapping->domain, iova, len);
 
 
 void __xen_dma_map_page(struct device *hwdev, struct page *page,
             dma_addr_t dev_addr, unsigned long offset, size_t size,
-            enum dma_data_direction dir, struct dma_attrs *attrs)
+            enum dma_data_direction dir, unsigned long attrs)
 {
        if (is_device_dma_coherent(hwdev))
                return;
-       if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+       if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
                return;
 
        __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
 
 void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 
 {
        if (is_device_dma_coherent(hwdev))
                return;
-       if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+       if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
                return;
 
        __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
 
 
 static int swiotlb __read_mostly;
 
-static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
+static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
                                 bool coherent)
 {
-       if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+       if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
                return pgprot_writecombine(prot);
        return prot;
 }
 
 static void *__dma_alloc_coherent(struct device *dev, size_t size,
                                  dma_addr_t *dma_handle, gfp_t flags,
-                                 struct dma_attrs *attrs)
+                                 unsigned long attrs)
 {
        if (dev == NULL) {
                WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
 
 static void __dma_free_coherent(struct device *dev, size_t size,
                                void *vaddr, dma_addr_t dma_handle,
-                               struct dma_attrs *attrs)
+                               unsigned long attrs)
 {
        bool freed;
        phys_addr_t paddr = dma_to_phys(dev, dma_handle);
 
 static void *__dma_alloc(struct device *dev, size_t size,
                         dma_addr_t *dma_handle, gfp_t flags,
-                        struct dma_attrs *attrs)
+                        unsigned long attrs)
 {
        struct page *page;
        void *ptr, *coherent_ptr;
 
 static void __dma_free(struct device *dev, size_t size,
                       void *vaddr, dma_addr_t dma_handle,
-                      struct dma_attrs *attrs)
+                      unsigned long attrs)
 {
        void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
 
 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
                                     unsigned long offset, size_t size,
                                     enum dma_data_direction dir,
-                                    struct dma_attrs *attrs)
+                                    unsigned long attrs)
 {
        dma_addr_t dev_addr;
 
 
 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
                                 size_t size, enum dma_data_direction dir,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        if (!is_device_dma_coherent(dev))
                __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
 
 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
                                  int nelems, enum dma_data_direction dir,
-                                 struct dma_attrs *attrs)
+                                 unsigned long attrs)
 {
        struct scatterlist *sg;
        int i, ret;
 static void __swiotlb_unmap_sg_attrs(struct device *dev,
                                     struct scatterlist *sgl, int nelems,
                                     enum dma_data_direction dir,
-                                    struct dma_attrs *attrs)
+                                    unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 static int __swiotlb_mmap(struct device *dev,
                          struct vm_area_struct *vma,
                          void *cpu_addr, dma_addr_t dma_addr, size_t size,
-                         struct dma_attrs *attrs)
+                         unsigned long attrs)
 {
        int ret = -ENXIO;
        unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
 
 static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
                                 void *cpu_addr, dma_addr_t handle, size_t size,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
 
 
 static void *__dummy_alloc(struct device *dev, size_t size,
                           dma_addr_t *dma_handle, gfp_t flags,
-                          struct dma_attrs *attrs)
+                          unsigned long attrs)
 {
        return NULL;
 }
 
 static void __dummy_free(struct device *dev, size_t size,
                         void *vaddr, dma_addr_t dma_handle,
-                        struct dma_attrs *attrs)
+                        unsigned long attrs)
 {
 }
 
 static int __dummy_mmap(struct device *dev,
                        struct vm_area_struct *vma,
                        void *cpu_addr, dma_addr_t dma_addr, size_t size,
-                       struct dma_attrs *attrs)
+                       unsigned long attrs)
 {
        return -ENXIO;
 }
 static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
                                   unsigned long offset, size_t size,
                                   enum dma_data_direction dir,
-                                  struct dma_attrs *attrs)
+                                  unsigned long attrs)
 {
        return DMA_ERROR_CODE;
 }
 
 static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
                               size_t size, enum dma_data_direction dir,
-                              struct dma_attrs *attrs)
+                              unsigned long attrs)
 {
 }
 
 static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
                          int nelems, enum dma_data_direction dir,
-                         struct dma_attrs *attrs)
+                         unsigned long attrs)
 {
        return 0;
 }
 static void __dummy_unmap_sg(struct device *dev,
                             struct scatterlist *sgl, int nelems,
                             enum dma_data_direction dir,
-                            struct dma_attrs *attrs)
+                            unsigned long attrs)
 {
 }
 
 
 static void *__iommu_alloc_attrs(struct device *dev, size_t size,
                                 dma_addr_t *handle, gfp_t gfp,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        bool coherent = is_device_dma_coherent(dev);
        int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
 }
 
 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
-                              dma_addr_t handle, struct dma_attrs *attrs)
+                              dma_addr_t handle, unsigned long attrs)
 {
        size_t iosize = size;
 
         * Hence how dodgy the below logic looks...
         */
        if (__in_atomic_pool(cpu_addr, size)) {
-               iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
+               iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
                __free_from_pool(cpu_addr, size);
        } else if (is_vmalloc_addr(cpu_addr)){
                struct vm_struct *area = find_vm_area(cpu_addr);
                iommu_dma_free(dev, area->pages, iosize, &handle);
                dma_common_free_remap(cpu_addr, size, VM_USERMAP);
        } else {
-               iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
+               iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
                __free_pages(virt_to_page(cpu_addr), get_order(size));
        }
 }
 
 static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
                              void *cpu_addr, dma_addr_t dma_addr, size_t size,
-                             struct dma_attrs *attrs)
+                             unsigned long attrs)
 {
        struct vm_struct *area;
        int ret;
 
 static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
                               void *cpu_addr, dma_addr_t dma_addr,
-                              size_t size, struct dma_attrs *attrs)
+                              size_t size, unsigned long attrs)
 {
        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        struct vm_struct *area = find_vm_area(cpu_addr);
 static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
                                   unsigned long offset, size_t size,
                                   enum dma_data_direction dir,
-                                  struct dma_attrs *attrs)
+                                  unsigned long attrs)
 {
        bool coherent = is_device_dma_coherent(dev);
        int prot = dma_direction_to_prot(dir, coherent);
        dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
 
        if (!iommu_dma_mapping_error(dev, dev_addr) &&
-           !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+           (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                __iommu_sync_single_for_device(dev, dev_addr, size, dir);
 
        return dev_addr;
 
 static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
                               size_t size, enum dma_data_direction dir,
-                              struct dma_attrs *attrs)
+                              unsigned long attrs)
 {
-       if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+       if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
 
        iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
 
 static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
                                int nelems, enum dma_data_direction dir,
-                               struct dma_attrs *attrs)
+                               unsigned long attrs)
 {
        bool coherent = is_device_dma_coherent(dev);
 
-       if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+       if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
 
        return iommu_dma_map_sg(dev, sgl, nelems,
 static void __iommu_unmap_sg_attrs(struct device *dev,
                                   struct scatterlist *sgl, int nelems,
                                   enum dma_data_direction dir,
-                                  struct dma_attrs *attrs)
+                                  unsigned long attrs)
 {
-       if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+       if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
 
        iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
 
 }
 
 static void *avr32_dma_alloc(struct device *dev, size_t size,
-               dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+               dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
 {
        struct page *page;
        dma_addr_t phys;
                return NULL;
        phys = page_to_phys(page);
 
-       if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) {
+       if (attrs & DMA_ATTR_WRITE_COMBINE) {
                /* Now, map the page into P3 with write-combining turned on */
                *handle = phys;
                return __ioremap(phys, size, _PAGE_BUFFER);
 }
 
 static void avr32_dma_free(struct device *dev, size_t size,
-               void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+               void *cpu_addr, dma_addr_t handle, unsigned long attrs)
 {
        struct page *page;
 
-       if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) {
+       if (attrs & DMA_ATTR_WRITE_COMBINE) {
                iounmap(cpu_addr);
 
                page = phys_to_page(handle);
 
 static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
                unsigned long offset, size_t size,
-               enum dma_data_direction direction, struct dma_attrs *attrs)
+               enum dma_data_direction direction, unsigned long attrs)
 {
        void *cpu_addr = page_address(page) + offset;
 
 
 static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
                int nents, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        int i;
        struct scatterlist *sg;
 
 }
 
 static void *bfin_dma_alloc(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
        void *ret;
 
 }
 
 static void bfin_dma_free(struct device *dev, size_t size, void *vaddr,
-                 dma_addr_t dma_handle, struct dma_attrs *attrs)
+                 dma_addr_t dma_handle, unsigned long attrs)
 {
        __free_dma_pages((unsigned long)vaddr, get_pages(size));
 }
 
 static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list,
                int nents, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 
 static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page,
                unsigned long offset, size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        dma_addr_t handle = (dma_addr_t)(page_address(page) + offset);
 
 
 
 extern void coherent_mem_init(u32 start, u32 size);
 void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
-               gfp_t gfp, struct dma_attrs *attrs);
+               gfp_t gfp, unsigned long attrs);
 void c6x_dma_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, struct dma_attrs *attrs);
+               dma_addr_t dma_handle, unsigned long attrs);
 
 #endif /* _ASM_C6X_DMA_MAPPING_H */
 
 
 static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page,
                unsigned long offset, size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        dma_addr_t handle = virt_to_phys(page_address(page) + offset);
 
 }
 
 static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir, struct dma_attrs *attrs)
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
        c6x_dma_sync(handle, size, dir);
 }
 
 static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
-               int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+               int nents, enum dma_data_direction dir, unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 }
 
 static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
-                 int nents, enum dma_data_direction dir,
-                 struct dma_attrs *attrs)
+                 int nents, enum dma_data_direction dir, unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 
  * virtual and DMA address for that space.
  */
 void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
-               gfp_t gfp, struct dma_attrs *attrs)
+               gfp_t gfp, unsigned long attrs)
 {
        u32 paddr;
        int order;
  * Free DMA coherent memory as defined by the above mapping.
  */
 void c6x_dma_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, struct dma_attrs *attrs)
+               dma_addr_t dma_handle, unsigned long attrs)
 {
        int order;
 
 
 #include <asm/io.h>
 
 static void *v32_dma_alloc(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t gfp,  struct dma_attrs *attrs)
+               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
        void *ret;
 
 }
 
 static void v32_dma_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, struct dma_attrs *attrs)
+               dma_addr_t dma_handle, unsigned long attrs)
 {
        free_pages((unsigned long)vaddr, get_order(size));
 }
 
 static inline dma_addr_t v32_dma_map_page(struct device *dev,
                struct page *page, unsigned long offset, size_t size,
-               enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               enum dma_data_direction direction, unsigned long attrs)
 {
        return page_to_phys(page) + offset;
 }
 
 static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg,
                int nents, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        printk("Map sg\n");
        return nents;
 
 static LIST_HEAD(dma_alloc_list);
 
 static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
-               gfp_t gfp, struct dma_attrs *attrs)
+               gfp_t gfp, unsigned long attrs)
 {
        struct dma_alloc_record *new;
        struct list_head *this = &dma_alloc_list;
 }
 
 static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, struct dma_attrs *attrs)
+               dma_addr_t dma_handle, unsigned long attrs)
 {
        struct dma_alloc_record *rec;
        unsigned long flags;
 
 static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
                int nents, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        int i;
        struct scatterlist *sg;
 
 static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
                unsigned long offset, size_t size,
-               enum dma_data_direction direction, struct dma_attrs *attrs)
+               enum dma_data_direction direction, unsigned long attrs)
 {
        BUG_ON(direction == DMA_NONE);
        flush_dcache_page(page);
 
 #include <asm/io.h>
 
 static void *frv_dma_alloc(struct device *hwdev, size_t size,
-               dma_addr_t *dma_handle, gfp_t gfp,
-               struct dma_attrs *attrs)
+               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
        void *ret;
 
 }
 
 static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, struct dma_attrs *attrs)
+               dma_addr_t dma_handle, unsigned long attrs)
 {
        consistent_free(vaddr);
 }
 
 static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
                int nents, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        unsigned long dampr2;
        void *vaddr;
 
 static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
                unsigned long offset, size_t size,
-               enum dma_data_direction direction, struct dma_attrs *attrs)
+               enum dma_data_direction direction, unsigned long attrs)
 {
        flush_dcache_page(page);
        return (dma_addr_t) page_to_phys(page) + offset;
 
 
 static void *dma_alloc(struct device *dev, size_t size,
                       dma_addr_t *dma_handle, gfp_t gfp,
-                      struct dma_attrs *attrs)
+                      unsigned long attrs)
 {
        void *ret;
 
 
 static void dma_free(struct device *dev, size_t size,
                     void *vaddr, dma_addr_t dma_handle,
-                    struct dma_attrs *attrs)
+                    unsigned long attrs)
 
 {
        free_pages((unsigned long)vaddr, get_order(size));
 static dma_addr_t map_page(struct device *dev, struct page *page,
                                  unsigned long offset, size_t size,
                                  enum dma_data_direction direction,
-                                 struct dma_attrs *attrs)
+                                 unsigned long attrs)
 {
        return page_to_phys(page) + offset;
 }
 
 static int map_sg(struct device *dev, struct scatterlist *sgl,
                  int nents, enum dma_data_direction direction,
-                 struct dma_attrs *attrs)
+                 unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
 #include <linux/dma-debug.h>
-#include <linux/dma-attrs.h>
 #include <asm/io.h>
 
 struct device;
 
 
 static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
                                 dma_addr_t *dma_addr, gfp_t flag,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        void *ret;
 
 }
 
 static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr,
-                                 dma_addr_t dma_addr, struct dma_attrs *attrs)
+                                 dma_addr_t dma_addr, unsigned long attrs)
 {
        gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
 }
 
 static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
                          int nents, enum dma_data_direction dir,
-                         struct dma_attrs *attrs)
+                         unsigned long attrs)
 {
        struct scatterlist *s;
        int i;
 static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
                                   unsigned long offset, size_t size,
                                   enum dma_data_direction dir,
-                                  struct dma_attrs *attrs)
+                                  unsigned long attrs)
 {
        dma_addr_t bus = page_to_phys(page) + offset;
        WARN_ON(size == 0);
 
 static dma_addr_t sba_map_page(struct device *dev, struct page *page,
                               unsigned long poff, size_t size,
                               enum dma_data_direction dir,
-                              struct dma_attrs *attrs)
+                              unsigned long attrs)
 {
        struct ioc *ioc;
        void *addr = page_address(page) + poff;
 
 static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
                                       size_t size, enum dma_data_direction dir,
-                                      struct dma_attrs *attrs)
+                                      unsigned long attrs)
 {
        return sba_map_page(dev, virt_to_page(addr),
                            (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
  * See Documentation/DMA-API-HOWTO.txt
  */
 static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
-                          enum dma_data_direction dir, struct dma_attrs *attrs)
+                          enum dma_data_direction dir, unsigned long attrs)
 {
        struct ioc *ioc;
 #if DELAYED_RESOURCE_CNT > 0
 }
 
 void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
-                           enum dma_data_direction dir, struct dma_attrs *attrs)
+                           enum dma_data_direction dir, unsigned long attrs)
 {
        sba_unmap_page(dev, iova, size, dir, attrs);
 }
  */
 static void *
 sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                  gfp_t flags, struct dma_attrs *attrs)
+                  gfp_t flags, unsigned long attrs)
 {
        struct ioc *ioc;
        void *addr;
         * device to map single to get an iova mapping.
         */
        *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
-                                          size, 0, NULL);
+                                          size, 0, 0);
 
        return addr;
 }
  * See Documentation/DMA-API-HOWTO.txt
  */
 static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
-                             dma_addr_t dma_handle, struct dma_attrs *attrs)
+                             dma_addr_t dma_handle, unsigned long attrs)
 {
-       sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
+       sba_unmap_single_attrs(dev, dma_handle, size, 0, 0);
        free_pages((unsigned long) vaddr, get_order(size));
 }
 
 
 static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
                               int nents, enum dma_data_direction dir,
-                              struct dma_attrs *attrs);
+                              unsigned long attrs);
 /**
  * sba_map_sg - map Scatter/Gather list
  * @dev: instance of PCI owned by the driver that's asking.
  */
 static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
                            int nents, enum dma_data_direction dir,
-                           struct dma_attrs *attrs)
+                           unsigned long attrs)
 {
        struct ioc *ioc;
        int coalesced, filled = 0;
  */
 static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
                               int nents, enum dma_data_direction dir,
-                              struct dma_attrs *attrs)
+                              unsigned long attrs)
 {
 #ifdef ASSERT_PDIR_SANITY
        struct ioc *ioc;
 
 struct task_struct;
 struct pci_dev;
 struct msi_desc;
-struct dma_attrs;
 
 typedef void ia64_mv_setup_t (char **);
 typedef void ia64_mv_cpu_init_t (void);
 
 
 static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
                                         dma_addr_t *dma_handle, gfp_t gfp,
-                                        struct dma_attrs *attrs)
+                                        unsigned long attrs)
 {
        if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
                gfp |= GFP_DMA;
 
 static void ia64_swiotlb_free_coherent(struct device *dev, size_t size,
                                       void *vaddr, dma_addr_t dma_addr,
-                                      struct dma_attrs *attrs)
+                                      unsigned long attrs)
 {
        swiotlb_free_coherent(dev, size, vaddr, dma_addr);
 }
 
  */
 static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
                                   dma_addr_t * dma_handle, gfp_t flags,
-                                  struct dma_attrs *attrs)
+                                  unsigned long attrs)
 {
        void *cpuaddr;
        unsigned long phys_addr;
  * any associated IOMMU mappings.
  */
 static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
-                                dma_addr_t dma_handle, struct dma_attrs *attrs)
+                                dma_addr_t dma_handle, unsigned long attrs)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
                                  unsigned long offset, size_t size,
                                  enum dma_data_direction dir,
-                                 struct dma_attrs *attrs)
+                                 unsigned long attrs)
 {
        void *cpu_addr = page_address(page) + offset;
        dma_addr_t dma_addr;
        unsigned long phys_addr;
        struct pci_dev *pdev = to_pci_dev(dev);
        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
-       int dmabarr;
-
-       dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
 
        BUG_ON(!dev_is_pci(dev));
 
        phys_addr = __pa(cpu_addr);
-       if (dmabarr)
+       if (attrs & DMA_ATTR_WRITE_BARRIER)
                dma_addr = provider->dma_map_consistent(pdev, phys_addr,
                                                        size, SN_DMA_ADDR_PHYS);
        else
  */
 static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
                              size_t size, enum dma_data_direction dir,
-                             struct dma_attrs *attrs)
+                             unsigned long attrs)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
  */
 static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
                            int nhwentries, enum dma_data_direction dir,
-                           struct dma_attrs *attrs)
+                           unsigned long attrs)
 {
        int i;
        struct pci_dev *pdev = to_pci_dev(dev);
  */
 static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
                         int nhwentries, enum dma_data_direction dir,
-                        struct dma_attrs *attrs)
+                        unsigned long attrs)
 {
        unsigned long phys_addr;
        struct scatterlist *saved_sg = sgl, *sg;
        struct pci_dev *pdev = to_pci_dev(dev);
        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
        int i;
-       int dmabarr;
-
-       dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
 
        BUG_ON(!dev_is_pci(dev));
 
        for_each_sg(sgl, sg, nhwentries, i) {
                dma_addr_t dma_addr;
                phys_addr = SG_ENT_PHYS_ADDRESS(sg);
-               if (dmabarr)
+               if (attrs & DMA_ATTR_WRITE_BARRIER)
                        dma_addr = provider->dma_map_consistent(pdev,
                                                                phys_addr,
                                                                sg->length,
 
 #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
 
 static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
-               gfp_t flag, struct dma_attrs *attrs)
+               gfp_t flag, unsigned long attrs)
 {
        struct page *page, **map;
        pgprot_t pgprot;
 }
 
 static void m68k_dma_free(struct device *dev, size_t size, void *addr,
-               dma_addr_t handle, struct dma_attrs *attrs)
+               dma_addr_t handle, unsigned long attrs)
 {
        pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
        vfree(addr);
 #include <asm/cacheflush.h>
 
 static void *m68k_dma_alloc(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
        void *ret;
        /* ignore region specifiers */
 }
 
 static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, struct dma_attrs *attrs)
+               dma_addr_t dma_handle, unsigned long attrs)
 {
        free_pages((unsigned long)vaddr, get_order(size));
 }
 
 static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
                unsigned long offset, size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        dma_addr_t handle = page_to_phys(page) + offset;
 
 }
 
 static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
-               int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+               int nents, enum dma_data_direction dir, unsigned long attrs)
 {
        int i;
        struct scatterlist *sg;
 
  * virtual and bus address for that space.
  */
 static void *metag_dma_alloc(struct device *dev, size_t size,
-               dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+               dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
 {
        struct page *page;
        struct metag_vm_region *c;
  * free a page as defined by the above mapping.
  */
 static void metag_dma_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, struct dma_attrs *attrs)
+               dma_addr_t dma_handle, unsigned long attrs)
 {
        struct metag_vm_region *c;
        unsigned long flags, addr;
 
 static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma,
                void *cpu_addr, dma_addr_t dma_addr, size_t size,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        unsigned long flags, user_size, kern_size;
        struct metag_vm_region *c;
        int ret = -ENXIO;
 
-       if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+       if (attrs & DMA_ATTR_WRITE_COMBINE)
                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
        else
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
 static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page,
                unsigned long offset, size_t size,
-               enum dma_data_direction direction, struct dma_attrs *attrs)
+               enum dma_data_direction direction, unsigned long attrs)
 {
        dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
                            direction);
 
 static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
                size_t size, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
 }
 
 static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
                int nents, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 
 static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
                int nhwentries, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
 #include <linux/dma-debug.h>
-#include <linux/dma-attrs.h>
 #include <asm/io.h>
 #include <asm/cacheflush.h>
 
 
 
 static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
                                       dma_addr_t *dma_handle, gfp_t flag,
-                                      struct dma_attrs *attrs)
+                                      unsigned long attrs)
 {
 #ifdef NOT_COHERENT_CACHE
        return consistent_alloc(flag, size, dma_handle);
 
 static void dma_direct_free_coherent(struct device *dev, size_t size,
                                     void *vaddr, dma_addr_t dma_handle,
-                                    struct dma_attrs *attrs)
+                                    unsigned long attrs)
 {
 #ifdef NOT_COHERENT_CACHE
        consistent_free(size, vaddr);
 
 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
                             int nents, enum dma_data_direction direction,
-                            struct dma_attrs *attrs)
+                            unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
                                             unsigned long offset,
                                             size_t size,
                                             enum dma_data_direction direction,
-                                            struct dma_attrs *attrs)
+                                            unsigned long attrs)
 {
        __dma_sync(page_to_phys(page) + offset, size, direction);
        return page_to_phys(page) + offset;
                                         dma_addr_t dma_address,
                                         size_t size,
                                         enum dma_data_direction direction,
-                                        struct dma_attrs *attrs)
+                                        unsigned long attrs)
 {
 /* There is not necessary to do cache cleanup
  *
 static
 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
                             void *cpu_addr, dma_addr_t handle, size_t size,
-                            struct dma_attrs *attrs)
+                            unsigned long attrs)
 {
 #ifdef CONFIG_MMU
        unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 
 
 static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
        unsigned long offset, size_t size, enum dma_data_direction direction,
-       struct dma_attrs *attrs)
+       unsigned long attrs)
 {
        dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
                                            direction, attrs);
 }
 
 static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
-       int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+       int nents, enum dma_data_direction direction, unsigned long attrs)
 {
        int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
        mb();
 }
 
 static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
-       dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+       dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
        void *ret;
 
 }
 
 static void octeon_dma_free_coherent(struct device *dev, size_t size,
-       void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+       void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
 {
        swiotlb_free_coherent(dev, size, vaddr, dma_handle);
 }
 
 #include <dma-coherence.h>
 
 static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
        void *ret;
 
 }
 
 static void loongson_dma_free_coherent(struct device *dev, size_t size,
-               void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+               void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
 {
        swiotlb_free_coherent(dev, size, vaddr, dma_handle);
 }
 static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
                                unsigned long offset, size_t size,
                                enum dma_data_direction dir,
-                               struct dma_attrs *attrs)
+                               unsigned long attrs)
 {
        dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
                                        dir, attrs);
 
 static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg,
                                int nents, enum dma_data_direction dir,
-                               struct dma_attrs *attrs)
+                               unsigned long attrs)
 {
-       int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, NULL);
+       int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, 0);
        mb();
 
        return r;
 
 }
 
 static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
-       dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+       dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
        void *ret;
        struct page *page = NULL;
         * XXX: seems like the coherent and non-coherent implementations could
         * be consolidated.
         */
-       if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+       if (attrs & DMA_ATTR_NON_CONSISTENT)
                return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp);
 
        gfp = massage_gfp_flags(dev, gfp);
 }
 
 static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-       dma_addr_t dma_handle, struct dma_attrs *attrs)
+       dma_addr_t dma_handle, unsigned long attrs)
 {
        unsigned long addr = (unsigned long) vaddr;
        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        struct page *page = NULL;
 
-       if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
+       if (attrs & DMA_ATTR_NON_CONSISTENT) {
                mips_dma_free_noncoherent(dev, size, vaddr, dma_handle);
                return;
        }
 
 static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
        void *cpu_addr, dma_addr_t dma_addr, size_t size,
-       struct dma_attrs *attrs)
+       unsigned long attrs)
 {
        unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 
        pfn = page_to_pfn(virt_to_page((void *)addr));
 
-       if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+       if (attrs & DMA_ATTR_WRITE_COMBINE)
                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
        else
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 }
 
 static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
-       size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
+       size_t size, enum dma_data_direction direction, unsigned long attrs)
 {
        if (cpu_needs_post_dma_flush(dev))
                __dma_sync(dma_addr_to_page(dev, dma_addr),
 }
 
 static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
-       int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+       int nents, enum dma_data_direction direction, unsigned long attrs)
 {
        int i;
        struct scatterlist *sg;
 
 static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
        unsigned long offset, size_t size, enum dma_data_direction direction,
-       struct dma_attrs *attrs)
+       unsigned long attrs)
 {
        if (!plat_device_is_coherent(dev))
                __dma_sync(page, offset, size, direction);
 
 static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
        int nhwentries, enum dma_data_direction direction,
-       struct dma_attrs *attrs)
+       unsigned long attrs)
 {
        int i;
        struct scatterlist *sg;
 
 static char *nlm_swiotlb;
 
 static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
-       dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+       dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
        /* ignore region specifiers */
        gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
 }
 
 static void nlm_dma_free_coherent(struct device *dev, size_t size,
-       void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+       void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
 {
        swiotlb_free_coherent(dev, size, vaddr, dma_handle);
 }
 
 static unsigned long pci_sram_allocated = 0xbc000000;
 
 static void *mn10300_dma_alloc(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
        unsigned long addr;
        void *ret;
 }
 
 static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, struct dma_attrs *attrs)
+               dma_addr_t dma_handle, unsigned long attrs)
 {
        unsigned long addr = (unsigned long) vaddr & ~0x20000000;
 
 
 static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist,
                int nents, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 
 static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page,
                unsigned long offset, size_t size,
-               enum dma_data_direction direction, struct dma_attrs *attrs)
+               enum dma_data_direction direction, unsigned long attrs)
 {
        return page_to_bus(page) + offset;
 }
 
 }
 
 static void *nios2_dma_alloc(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
        void *ret;
 
 }
 
 static void nios2_dma_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, struct dma_attrs *attrs)
+               dma_addr_t dma_handle, unsigned long attrs)
 {
        unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr);
 
 
 static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg,
                int nents, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        int i;
 
 static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page,
                        unsigned long offset, size_t size,
                        enum dma_data_direction direction,
-                       struct dma_attrs *attrs)
+                       unsigned long attrs)
 {
        void *addr = page_address(page) + offset;
 
 
 static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
                size_t size, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
 }
 
 static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
                int nhwentries, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        void *addr;
        int i;
 
 #include <linux/dma-mapping.h>
 #include <linux/dma-debug.h>
 #include <linux/export.h>
-#include <linux/dma-attrs.h>
 
 #include <asm/cpuinfo.h>
 #include <asm/spr_defs.h>
 static void *
 or1k_dma_alloc(struct device *dev, size_t size,
               dma_addr_t *dma_handle, gfp_t gfp,
-              struct dma_attrs *attrs)
+              unsigned long attrs)
 {
        unsigned long va;
        void *page;
 
        va = (unsigned long)page;
 
-       if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
+       if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
                /*
                 * We need to iterate through the pages, clearing the dcache for
                 * them and setting the cache-inhibit bit.
 
 static void
 or1k_dma_free(struct device *dev, size_t size, void *vaddr,
-             dma_addr_t dma_handle, struct dma_attrs *attrs)
+             dma_addr_t dma_handle, unsigned long attrs)
 {
        unsigned long va = (unsigned long)vaddr;
        struct mm_walk walk = {
                .mm = &init_mm
        };
 
-       if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
+       if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
                /* walk_page_range shouldn't be able to fail here */
                WARN_ON(walk_page_range(va, va + size, &walk));
        }
 or1k_map_page(struct device *dev, struct page *page,
              unsigned long offset, size_t size,
              enum dma_data_direction dir,
-             struct dma_attrs *attrs)
+             unsigned long attrs)
 {
        unsigned long cl;
        dma_addr_t addr = page_to_phys(page) + offset;
 static void
 or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
                size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        /* Nothing special to do here... */
 }
 static int
 or1k_map_sg(struct device *dev, struct scatterlist *sg,
            int nents, enum dma_data_direction dir,
-           struct dma_attrs *attrs)
+           unsigned long attrs)
 {
        struct scatterlist *s;
        int i;
 
        for_each_sg(sg, s, nents, i) {
                s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
-                                              s->length, dir, NULL);
+                                              s->length, dir, 0);
        }
 
        return nents;
 static void
 or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
              int nents, enum dma_data_direction dir,
-             struct dma_attrs *attrs)
+             unsigned long attrs)
 {
        struct scatterlist *s;
        int i;
 
        for_each_sg(sg, s, nents, i) {
-               or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL);
+               or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0);
        }
 }
 
 
 __initcall(pcxl_dma_init);
 
 static void *pa11_dma_alloc(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
+               dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
 {
        unsigned long vaddr;
        unsigned long paddr;
 }
 
 static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, struct dma_attrs *attrs)
+               dma_addr_t dma_handle, unsigned long attrs)
 {
        int order;
 
 
 static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
                unsigned long offset, size_t size,
-               enum dma_data_direction direction, struct dma_attrs *attrs)
+               enum dma_data_direction direction, unsigned long attrs)
 {
        void *addr = page_address(page) + offset;
        BUG_ON(direction == DMA_NONE);
 
 static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
                size_t size, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        BUG_ON(direction == DMA_NONE);
 
 
 static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
                int nents, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        int i;
        struct scatterlist *sg;
 
 static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
                int nents, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        int i;
        struct scatterlist *sg;
 };
 
 static void *pcx_dma_alloc(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
+               dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
 {
        void *addr;
 
-       if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+       if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
                return NULL;
 
        addr = (void *)__get_free_pages(flag, get_order(size));
 }
 
 static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t iova, struct dma_attrs *attrs)
+               dma_addr_t iova, unsigned long attrs)
 {
        free_pages((unsigned long)vaddr, get_order(size));
        return;
 
 /* need struct page definitions */
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
-#include <linux/dma-attrs.h>
 #include <linux/dma-debug.h>
 #include <asm/io.h>
 #include <asm/swiotlb.h>
 /* Some dma direct funcs must be visible for use in other dma_ops */
 extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
                                         dma_addr_t *dma_handle, gfp_t flag,
-                                        struct dma_attrs *attrs);
+                                        unsigned long attrs);
 extern void __dma_direct_free_coherent(struct device *dev, size_t size,
                                       void *vaddr, dma_addr_t dma_handle,
-                                      struct dma_attrs *attrs);
+                                      unsigned long attrs);
 extern int dma_direct_mmap_coherent(struct device *dev,
                                    struct vm_area_struct *vma,
                                    void *cpu_addr, dma_addr_t handle,
-                                   size_t size, struct dma_attrs *attrs);
+                                   size_t size, unsigned long attrs);
 
 #ifdef CONFIG_NOT_COHERENT_CACHE
 /*
 
                        long index, long npages,
                        unsigned long uaddr,
                        enum dma_data_direction direction,
-                       struct dma_attrs *attrs);
+                       unsigned long attrs);
 #ifdef CONFIG_IOMMU_API
        /*
         * Exchanges existing TCE with new TCE plus direction bits;
                            struct scatterlist *sglist, int nelems,
                            unsigned long mask,
                            enum dma_data_direction direction,
-                           struct dma_attrs *attrs);
+                           unsigned long attrs);
 extern void ppc_iommu_unmap_sg(struct iommu_table *tbl,
                               struct scatterlist *sglist,
                               int nelems,
                               enum dma_data_direction direction,
-                              struct dma_attrs *attrs);
+                              unsigned long attrs);
 
 extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
                                  size_t size, dma_addr_t *dma_handle,
                                 struct page *page, unsigned long offset,
                                 size_t size, unsigned long mask,
                                 enum dma_data_direction direction,
-                                struct dma_attrs *attrs);
+                                unsigned long attrs);
 extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
                             size_t size, enum dma_data_direction direction,
-                            struct dma_attrs *attrs);
+                            unsigned long attrs);
 
 extern void iommu_init_early_pSeries(void);
 extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
 
  */
 static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
                                      dma_addr_t *dma_handle, gfp_t flag,
-                                     struct dma_attrs *attrs)
+                                     unsigned long attrs)
 {
        return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
                                    dma_handle, dev->coherent_dma_mask, flag,
 
 static void dma_iommu_free_coherent(struct device *dev, size_t size,
                                    void *vaddr, dma_addr_t dma_handle,
-                                   struct dma_attrs *attrs)
+                                   unsigned long attrs)
 {
        iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
 }
 static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
                                     unsigned long offset, size_t size,
                                     enum dma_data_direction direction,
-                                    struct dma_attrs *attrs)
+                                    unsigned long attrs)
 {
        return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
                              size, device_to_mask(dev), direction, attrs);
 
 static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
                                 size_t size, enum dma_data_direction direction,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
                         attrs);
 
 static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
                            int nelems, enum dma_data_direction direction,
-                           struct dma_attrs *attrs)
+                           unsigned long attrs)
 {
        return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
                                device_to_mask(dev), direction, attrs);
 
 static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
                int nelems, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
                           direction, attrs);
 
 
 void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
                                  dma_addr_t *dma_handle, gfp_t flag,
-                                 struct dma_attrs *attrs)
+                                 unsigned long attrs)
 {
        void *ret;
 #ifdef CONFIG_NOT_COHERENT_CACHE
 
 void __dma_direct_free_coherent(struct device *dev, size_t size,
                                void *vaddr, dma_addr_t dma_handle,
-                               struct dma_attrs *attrs)
+                               unsigned long attrs)
 {
 #ifdef CONFIG_NOT_COHERENT_CACHE
        __dma_free_coherent(size, vaddr);
 
 static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
                                       dma_addr_t *dma_handle, gfp_t flag,
-                                      struct dma_attrs *attrs)
+                                      unsigned long attrs)
 {
        struct iommu_table *iommu;
 
 
 static void dma_direct_free_coherent(struct device *dev, size_t size,
                                     void *vaddr, dma_addr_t dma_handle,
-                                    struct dma_attrs *attrs)
+                                    unsigned long attrs)
 {
        struct iommu_table *iommu;
 
 
 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
                             void *cpu_addr, dma_addr_t handle, size_t size,
-                            struct dma_attrs *attrs)
+                            unsigned long attrs)
 {
        unsigned long pfn;
 
 
 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
                             int nents, enum dma_data_direction direction,
-                            struct dma_attrs *attrs)
+                            unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 
 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
                                int nents, enum dma_data_direction direction,
-                               struct dma_attrs *attrs)
+                               unsigned long attrs)
 {
 }
 
                                             unsigned long offset,
                                             size_t size,
                                             enum dma_data_direction dir,
-                                            struct dma_attrs *attrs)
+                                            unsigned long attrs)
 {
        BUG_ON(dir == DMA_NONE);
        __dma_sync_page(page, offset, size, dir);
                                         dma_addr_t dma_address,
                                         size_t size,
                                         enum dma_data_direction direction,
-                                        struct dma_attrs *attrs)
+                                        unsigned long attrs)
 {
 }
 
 
                                    size_t size,
                                    dma_addr_t *dma_handle,
                                    gfp_t flag,
-                                   struct dma_attrs *attrs)
+                                   unsigned long attrs)
 {
        void *mem;
 
 static void ibmebus_free_coherent(struct device *dev,
                                  size_t size, void *vaddr,
                                  dma_addr_t dma_handle,
-                                 struct dma_attrs *attrs)
+                                 unsigned long attrs)
 {
        kfree(vaddr);
 }
                                   unsigned long offset,
                                   size_t size,
                                   enum dma_data_direction direction,
-                                  struct dma_attrs *attrs)
+                                  unsigned long attrs)
 {
        return (dma_addr_t)(page_address(page) + offset);
 }
                               dma_addr_t dma_addr,
                               size_t size,
                               enum dma_data_direction direction,
-                              struct dma_attrs *attrs)
+                              unsigned long attrs)
 {
        return;
 }
 static int ibmebus_map_sg(struct device *dev,
                          struct scatterlist *sgl,
                          int nents, enum dma_data_direction direction,
-                         struct dma_attrs *attrs)
+                         unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 static void ibmebus_unmap_sg(struct device *dev,
                             struct scatterlist *sg,
                             int nents, enum dma_data_direction direction,
-                            struct dma_attrs *attrs)
+                            unsigned long attrs)
 {
        return;
 }
 
                              void *page, unsigned int npages,
                              enum dma_data_direction direction,
                              unsigned long mask, unsigned int align_order,
-                             struct dma_attrs *attrs)
+                             unsigned long attrs)
 {
        unsigned long entry;
        dma_addr_t ret = DMA_ERROR_CODE;
 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
                     struct scatterlist *sglist, int nelems,
                     unsigned long mask, enum dma_data_direction direction,
-                    struct dma_attrs *attrs)
+                    unsigned long attrs)
 {
        dma_addr_t dma_next = 0, dma_addr;
        struct scatterlist *s, *outs, *segstart;
 
 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
                        int nelems, enum dma_data_direction direction,
-                       struct dma_attrs *attrs)
+                       unsigned long attrs)
 {
        struct scatterlist *sg;
 
 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
                          struct page *page, unsigned long offset, size_t size,
                          unsigned long mask, enum dma_data_direction direction,
-                         struct dma_attrs *attrs)
+                         unsigned long attrs)
 {
        dma_addr_t dma_handle = DMA_ERROR_CODE;
        void *vaddr;
 
 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
                      size_t size, enum dma_data_direction direction,
-                     struct dma_attrs *attrs)
+                     unsigned long attrs)
 {
        unsigned int npages;
 
        nio_pages = size >> tbl->it_page_shift;
        io_order = get_iommu_order(size, tbl);
        mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
-                             mask >> tbl->it_page_shift, io_order, NULL);
+                             mask >> tbl->it_page_shift, io_order, 0);
        if (mapping == DMA_ERROR_CODE) {
                free_pages((unsigned long)ret, order);
                return NULL;
 
 
 static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
                                          dma_addr_t *dma_handle, gfp_t flag,
-                                         struct dma_attrs *attrs)
+                                         unsigned long attrs)
 {
        struct vio_dev *viodev = to_vio_dev(dev);
        void *ret;
 
 static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
                                        void *vaddr, dma_addr_t dma_handle,
-                                       struct dma_attrs *attrs)
+                                       unsigned long attrs)
 {
        struct vio_dev *viodev = to_vio_dev(dev);
 
 static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
                                          unsigned long offset, size_t size,
                                          enum dma_data_direction direction,
-                                         struct dma_attrs *attrs)
+                                         unsigned long attrs)
 {
        struct vio_dev *viodev = to_vio_dev(dev);
        struct iommu_table *tbl;
 static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
                                     size_t size,
                                     enum dma_data_direction direction,
-                                    struct dma_attrs *attrs)
+                                    unsigned long attrs)
 {
        struct vio_dev *viodev = to_vio_dev(dev);
        struct iommu_table *tbl;
 
 static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
                                 int nelems, enum dma_data_direction direction,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        struct vio_dev *viodev = to_vio_dev(dev);
        struct iommu_table *tbl;
 static void vio_dma_iommu_unmap_sg(struct device *dev,
                struct scatterlist *sglist, int nelems,
                enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        struct vio_dev *viodev = to_vio_dev(dev);
        struct iommu_table *tbl;
 
 
 static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
                unsigned long uaddr, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        int i;
        unsigned long *io_pte, base_pte;
        base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
                CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask);
 #endif
-       if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)))
+       if (unlikely(attrs & DMA_ATTR_WEAK_ORDERING))
                base_pte &= ~CBE_IOPTE_SO_RW;
 
        io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
 
        __set_bit(0, window->table.it_map);
        tce_build_cell(&window->table, window->table.it_offset, 1,
-                      (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL);
+                      (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0);
 
        return window;
 }
 
 static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
                                      dma_addr_t *dma_handle, gfp_t flag,
-                                     struct dma_attrs *attrs)
+                                     unsigned long attrs)
 {
        if (iommu_fixed_is_weak)
                return iommu_alloc_coherent(dev, cell_get_iommu_table(dev),
 
 static void dma_fixed_free_coherent(struct device *dev, size_t size,
                                    void *vaddr, dma_addr_t dma_handle,
-                                   struct dma_attrs *attrs)
+                                   unsigned long attrs)
 {
        if (iommu_fixed_is_weak)
                iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
 static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
                                     unsigned long offset, size_t size,
                                     enum dma_data_direction direction,
-                                    struct dma_attrs *attrs)
+                                    unsigned long attrs)
 {
-       if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+       if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
                return dma_direct_ops.map_page(dev, page, offset, size,
                                               direction, attrs);
        else
 
 static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
                                 size_t size, enum dma_data_direction direction,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
-       if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+       if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
                dma_direct_ops.unmap_page(dev, dma_addr, size, direction,
                                          attrs);
        else
 
 static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
                           int nents, enum dma_data_direction direction,
-                          struct dma_attrs *attrs)
+                          unsigned long attrs)
 {
-       if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+       if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
                return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs);
        else
                return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg,
 
 static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
                               int nents, enum dma_data_direction direction,
-                              struct dma_attrs *attrs)
+                              unsigned long attrs)
 {
-       if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+       if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
                dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs);
        else
                ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents,
        pciep = of_find_node_by_type(NULL, "pcie-endpoint");
 
        if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0))
-               iommu_fixed_is_weak = 1;
+               iommu_fixed_is_weak = DMA_ATTR_WEAK_ORDERING;
 
        of_node_put(pciep);
 
 
 static int iobmap_build(struct iommu_table *tbl, long index,
                         long npages, unsigned long uaddr,
                         enum dma_data_direction direction,
-                        struct dma_attrs *attrs)
+                        unsigned long attrs)
 {
        u32 *ip;
        u32 rpn;
 
 
 static void *dma_npu_alloc(struct device *dev, size_t size,
                           dma_addr_t *dma_handle, gfp_t flag,
-                          struct dma_attrs *attrs)
+                          unsigned long attrs)
 {
        NPU_DMA_OP_UNSUPPORTED();
        return NULL;
 
 static void dma_npu_free(struct device *dev, size_t size,
                         void *vaddr, dma_addr_t dma_handle,
-                        struct dma_attrs *attrs)
+                        unsigned long attrs)
 {
        NPU_DMA_OP_UNSUPPORTED();
 }
 static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
                                   unsigned long offset, size_t size,
                                   enum dma_data_direction direction,
-                                  struct dma_attrs *attrs)
+                                  unsigned long attrs)
 {
        NPU_DMA_OP_UNSUPPORTED();
        return 0;
 
 static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
                          int nelems, enum dma_data_direction direction,
-                         struct dma_attrs *attrs)
+                         unsigned long attrs)
 {
        NPU_DMA_OP_UNSUPPORTED();
        return 0;
 
 static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
                long npages, unsigned long uaddr,
                enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
                        attrs);
 static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
                long npages, unsigned long uaddr,
                enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
                        attrs);
 
 
 int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
                unsigned long uaddr, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        u64 proto_tce = iommu_direction_to_tce_perm(direction);
        u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
 
 extern struct pci_ops pnv_pci_ops;
 extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
                unsigned long uaddr, enum dma_data_direction direction,
-               struct dma_attrs *attrs);
+               unsigned long attrs);
 extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
 extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
                unsigned long *hpa, enum dma_data_direction *direction);
 
  */
 static void * ps3_alloc_coherent(struct device *_dev, size_t size,
                                 dma_addr_t *dma_handle, gfp_t flag,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        int result;
        struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
 }
 
 static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
-                             dma_addr_t dma_handle, struct dma_attrs *attrs)
+                             dma_addr_t dma_handle, unsigned long attrs)
 {
        struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
 
 
 static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page,
        unsigned long offset, size_t size, enum dma_data_direction direction,
-       struct dma_attrs *attrs)
+       unsigned long attrs)
 {
        struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
        int result;
 static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
                                    unsigned long offset, size_t size,
                                    enum dma_data_direction direction,
-                                   struct dma_attrs *attrs)
+                                   unsigned long attrs)
 {
        struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
        int result;
 }
 
 static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr,
-       size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
+       size_t size, enum dma_data_direction direction, unsigned long attrs)
 {
        struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
        int result;
 }
 
 static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
-       int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+       int nents, enum dma_data_direction direction, unsigned long attrs)
 {
 #if defined(CONFIG_PS3_DYNAMIC_DMA)
        BUG_ON("do");
 static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg,
                           int nents,
                           enum dma_data_direction direction,
-                          struct dma_attrs *attrs)
+                          unsigned long attrs)
 {
        BUG();
        return 0;
 }
 
 static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg,
-       int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+       int nents, enum dma_data_direction direction, unsigned long attrs)
 {
 #if defined(CONFIG_PS3_DYNAMIC_DMA)
        BUG_ON("do");
 
 static void ps3_ioc0_unmap_sg(struct device *_dev, struct scatterlist *sg,
                            int nents, enum dma_data_direction direction,
-                           struct dma_attrs *attrs)
+                           unsigned long attrs)
 {
        BUG();
 }
 
 static int tce_build_pSeries(struct iommu_table *tbl, long index,
                              long npages, unsigned long uaddr,
                              enum dma_data_direction direction,
-                             struct dma_attrs *attrs)
+                             unsigned long attrs)
 {
        u64 proto_tce;
        __be64 *tcep, *tces;
 static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
                                long npages, unsigned long uaddr,
                                enum dma_data_direction direction,
-                               struct dma_attrs *attrs)
+                               unsigned long attrs)
 {
        u64 rc = 0;
        u64 proto_tce, tce;
 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
                                     long npages, unsigned long uaddr,
                                     enum dma_data_direction direction,
-                                    struct dma_attrs *attrs)
+                                    unsigned long attrs)
 {
        u64 rc = 0;
        u64 proto_tce;
 
 static int dart_build(struct iommu_table *tbl, long index,
                       long npages, unsigned long uaddr,
                       enum dma_data_direction direction,
-                      struct dma_attrs *attrs)
+                      unsigned long attrs)
 {
        unsigned int *dp, *orig_dp;
        unsigned int rpn;
 
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
-#include <linux/dma-attrs.h>
 #include <linux/dma-debug.h>
 #include <linux/io.h>
 
 
 static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
                                     unsigned long offset, size_t size,
                                     enum dma_data_direction direction,
-                                    struct dma_attrs *attrs)
+                                    unsigned long attrs)
 {
        struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
        unsigned long nr_pages, iommu_page_index;
 
 static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
                                 size_t size, enum dma_data_direction direction,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
        unsigned long iommu_page_index;
 
 static void *s390_dma_alloc(struct device *dev, size_t size,
                            dma_addr_t *dma_handle, gfp_t flag,
-                           struct dma_attrs *attrs)
+                           unsigned long attrs)
 {
        struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
        struct page *page;
        pa = page_to_phys(page);
        memset((void *) pa, 0, size);
 
-       map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, NULL);
+       map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
        if (dma_mapping_error(dev, map)) {
                free_pages(pa, get_order(size));
                return NULL;
 
 static void s390_dma_free(struct device *dev, size_t size,
                          void *pa, dma_addr_t dma_handle,
-                         struct dma_attrs *attrs)
+                         unsigned long attrs)
 {
        struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 
        size = PAGE_ALIGN(size);
        atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
-       s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
+       s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
        free_pages((unsigned long) pa, get_order(size));
 }
 
 static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
                           int nr_elements, enum dma_data_direction dir,
-                          struct dma_attrs *attrs)
+                          unsigned long attrs)
 {
        int mapped_elements = 0;
        struct scatterlist *s;
        for_each_sg(sg, s, nr_elements, i) {
                struct page *page = sg_page(s);
                s->dma_address = s390_dma_map_pages(dev, page, s->offset,
-                                                   s->length, dir, NULL);
+                                                   s->length, dir, 0);
                if (!dma_mapping_error(dev, s->dma_address)) {
                        s->dma_length = s->length;
                        mapped_elements++;
        for_each_sg(sg, s, mapped_elements, i) {
                if (s->dma_address)
                        s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
-                                            dir, NULL);
+                                            dir, 0);
                s->dma_address = 0;
                s->dma_length = 0;
        }
 
 static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
                              int nr_elements, enum dma_data_direction dir,
-                             struct dma_attrs *attrs)
+                             unsigned long attrs)
 {
        struct scatterlist *s;
        int i;
 
        for_each_sg(sg, s, nr_elements, i) {
-               s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
+               s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir,
+                                    0);
                s->dma_address = 0;
                s->dma_length = 0;
        }
 
 /* arch/sh/mm/consistent.c */
 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
                                        dma_addr_t *dma_addr, gfp_t flag,
-                                       struct dma_attrs *attrs);
+                                       unsigned long attrs);
 extern void dma_generic_free_coherent(struct device *dev, size_t size,
                                      void *vaddr, dma_addr_t dma_handle,
-                                     struct dma_attrs *attrs);
+                                     unsigned long attrs);
 
 #endif /* __ASM_SH_DMA_MAPPING_H */
 
 static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
                                 unsigned long offset, size_t size,
                                 enum dma_data_direction dir,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        dma_addr_t addr = page_to_phys(page) + offset;
 
 
 static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
                        int nents, enum dma_data_direction dir,
-                       struct dma_attrs *attrs)
+                       unsigned long attrs)
 {
        struct scatterlist *s;
        int i;
 
 
 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
                                 dma_addr_t *dma_handle, gfp_t gfp,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        void *ret, *ret_nocache;
        int order = get_order(size);
 
 void dma_generic_free_coherent(struct device *dev, size_t size,
                               void *vaddr, dma_addr_t dma_handle,
-                              struct dma_attrs *attrs)
+                              unsigned long attrs)
 {
        int order = get_order(size);
        unsigned long pfn = dma_handle >> PAGE_SHIFT;
 
 
 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
                                   dma_addr_t *dma_addrp, gfp_t gfp,
-                                  struct dma_attrs *attrs)
+                                  unsigned long attrs)
 {
        unsigned long order, first_page;
        struct iommu *iommu;
 
 static void dma_4u_free_coherent(struct device *dev, size_t size,
                                 void *cpu, dma_addr_t dvma,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        struct iommu *iommu;
        unsigned long order, npages;
 static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
                                  unsigned long offset, size_t sz,
                                  enum dma_data_direction direction,
-                                 struct dma_attrs *attrs)
+                                 unsigned long attrs)
 {
        struct iommu *iommu;
        struct strbuf *strbuf;
 
 static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
                              size_t sz, enum dma_data_direction direction,
-                             struct dma_attrs *attrs)
+                             unsigned long attrs)
 {
        struct iommu *iommu;
        struct strbuf *strbuf;
 
 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
                         int nelems, enum dma_data_direction direction,
-                        struct dma_attrs *attrs)
+                        unsigned long attrs)
 {
        struct scatterlist *s, *outs, *segstart;
        unsigned long flags, handle, prot, ctx;
 
 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
                            int nelems, enum dma_data_direction direction,
-                           struct dma_attrs *attrs)
+                           unsigned long attrs)
 {
        unsigned long flags, ctx;
        struct scatterlist *sg;
 
  */
 static void *sbus_alloc_coherent(struct device *dev, size_t len,
                                 dma_addr_t *dma_addrp, gfp_t gfp,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        struct platform_device *op = to_platform_device(dev);
        unsigned long len_total = PAGE_ALIGN(len);
 }
 
 static void sbus_free_coherent(struct device *dev, size_t n, void *p,
-                              dma_addr_t ba, struct dma_attrs *attrs)
+                              dma_addr_t ba, unsigned long attrs)
 {
        struct resource *res;
        struct page *pgv;
 static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
                                unsigned long offset, size_t len,
                                enum dma_data_direction dir,
-                               struct dma_attrs *attrs)
+                               unsigned long attrs)
 {
        void *va = page_address(page) + offset;
 
 }
 
 static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
-                           enum dma_data_direction dir, struct dma_attrs *attrs)
+                           enum dma_data_direction dir, unsigned long attrs)
 {
        mmu_release_scsi_one(dev, ba, n);
 }
 
 static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
-                      enum dma_data_direction dir, struct dma_attrs *attrs)
+                      enum dma_data_direction dir, unsigned long attrs)
 {
        mmu_get_scsi_sgl(dev, sg, n);
        return n;
 }
 
 static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
-                         enum dma_data_direction dir, struct dma_attrs *attrs)
+                         enum dma_data_direction dir, unsigned long attrs)
 {
        mmu_release_scsi_sgl(dev, sg, n);
 }
  */
 static void *pci32_alloc_coherent(struct device *dev, size_t len,
                                  dma_addr_t *pba, gfp_t gfp,
-                                 struct dma_attrs *attrs)
+                                 unsigned long attrs)
 {
        unsigned long len_total = PAGE_ALIGN(len);
        void *va;
  * past this call are illegal.
  */
 static void pci32_free_coherent(struct device *dev, size_t n, void *p,
-                               dma_addr_t ba, struct dma_attrs *attrs)
+                               dma_addr_t ba, unsigned long attrs)
 {
        struct resource *res;
 
 static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
                                 unsigned long offset, size_t size,
                                 enum dma_data_direction dir,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        /* IIep is write-through, not flushing. */
        return page_to_phys(page) + offset;
 }
 
 static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
-                            enum dma_data_direction dir, struct dma_attrs *attrs)
+                            enum dma_data_direction dir, unsigned long attrs)
 {
        if (dir != PCI_DMA_TODEVICE)
                dma_make_coherent(ba, PAGE_ALIGN(size));
  */
 static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
                        int nents, enum dma_data_direction dir,
-                       struct dma_attrs *attrs)
+                       unsigned long attrs)
 {
        struct scatterlist *sg;
        int n;
  */
 static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
                           int nents, enum dma_data_direction dir,
-                          struct dma_attrs *attrs)
+                          unsigned long attrs)
 {
        struct scatterlist *sg;
        int n;
 
 
 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
                                   dma_addr_t *dma_addrp, gfp_t gfp,
-                                  struct dma_attrs *attrs)
+                                  unsigned long attrs)
 {
        unsigned long flags, order, first_page, npages, n;
        struct iommu *iommu;
 }
 
 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
-                                dma_addr_t dvma, struct dma_attrs *attrs)
+                                dma_addr_t dvma, unsigned long attrs)
 {
        struct pci_pbm_info *pbm;
        struct iommu *iommu;
 static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
                                  unsigned long offset, size_t sz,
                                  enum dma_data_direction direction,
-                                 struct dma_attrs *attrs)
+                                 unsigned long attrs)
 {
        struct iommu *iommu;
        unsigned long flags, npages, oaddr;
 
 static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
                              size_t sz, enum dma_data_direction direction,
-                             struct dma_attrs *attrs)
+                             unsigned long attrs)
 {
        struct pci_pbm_info *pbm;
        struct iommu *iommu;
 
 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
                         int nelems, enum dma_data_direction direction,
-                        struct dma_attrs *attrs)
+                        unsigned long attrs)
 {
        struct scatterlist *s, *outs, *segstart;
        unsigned long flags, handle, prot;
 
 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
                            int nelems, enum dma_data_direction direction,
-                           struct dma_attrs *attrs)
+                           unsigned long attrs)
 {
        struct pci_pbm_info *pbm;
        struct scatterlist *sg;
 
 
 static void *tile_dma_alloc_coherent(struct device *dev, size_t size,
                                     dma_addr_t *dma_handle, gfp_t gfp,
-                                    struct dma_attrs *attrs)
+                                    unsigned long attrs)
 {
        u64 dma_mask = (dev && dev->coherent_dma_mask) ?
                dev->coherent_dma_mask : DMA_BIT_MASK(32);
  */
 static void tile_dma_free_coherent(struct device *dev, size_t size,
                                   void *vaddr, dma_addr_t dma_handle,
-                                  struct dma_attrs *attrs)
+                                  unsigned long attrs)
 {
        homecache_free_pages((unsigned long)vaddr, get_order(size));
 }
 
 static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist,
                           int nents, enum dma_data_direction direction,
-                          struct dma_attrs *attrs)
+                          unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 
 static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
                              int nents, enum dma_data_direction direction,
-                             struct dma_attrs *attrs)
+                             unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page,
                                    unsigned long offset, size_t size,
                                    enum dma_data_direction direction,
-                                   struct dma_attrs *attrs)
+                                   unsigned long attrs)
 {
        BUG_ON(!valid_dma_direction(direction));
 
 
 static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
                                size_t size, enum dma_data_direction direction,
-                               struct dma_attrs *attrs)
+                               unsigned long attrs)
 {
        BUG_ON(!valid_dma_direction(direction));
 
 
 static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size,
                                         dma_addr_t *dma_handle, gfp_t gfp,
-                                        struct dma_attrs *attrs)
+                                        unsigned long attrs)
 {
        int node = dev_to_node(dev);
        int order = get_order(size);
  */
 static void tile_pci_dma_free_coherent(struct device *dev, size_t size,
                                       void *vaddr, dma_addr_t dma_handle,
-                                      struct dma_attrs *attrs)
+                                      unsigned long attrs)
 {
        homecache_free_pages((unsigned long)vaddr, get_order(size));
 }
 
 static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist,
                               int nents, enum dma_data_direction direction,
-                              struct dma_attrs *attrs)
+                              unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 static void tile_pci_dma_unmap_sg(struct device *dev,
                                  struct scatterlist *sglist, int nents,
                                  enum dma_data_direction direction,
-                                 struct dma_attrs *attrs)
+                                 unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page,
                                        unsigned long offset, size_t size,
                                        enum dma_data_direction direction,
-                                       struct dma_attrs *attrs)
+                                       unsigned long attrs)
 {
        BUG_ON(!valid_dma_direction(direction));
 
 static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
                                    size_t size,
                                    enum dma_data_direction direction,
-                                   struct dma_attrs *attrs)
+                                   unsigned long attrs)
 {
        BUG_ON(!valid_dma_direction(direction));
 
 #ifdef CONFIG_SWIOTLB
 static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size,
                                         dma_addr_t *dma_handle, gfp_t gfp,
-                                        struct dma_attrs *attrs)
+                                        unsigned long attrs)
 {
        gfp |= GFP_DMA;
        return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
 
 static void tile_swiotlb_free_coherent(struct device *dev, size_t size,
                                       void *vaddr, dma_addr_t dma_addr,
-                                      struct dma_attrs *attrs)
+                                      unsigned long attrs)
 {
        swiotlb_free_coherent(dev, size, vaddr, dma_addr);
 }
 
 
 static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size,
                                            dma_addr_t *dma_handle, gfp_t flags,
-                                           struct dma_attrs *attrs)
+                                           unsigned long attrs)
 {
        return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
 }
 
 static void unicore_swiotlb_free_coherent(struct device *dev, size_t size,
                                          void *vaddr, dma_addr_t dma_addr,
-                                         struct dma_attrs *attrs)
+                                         unsigned long attrs)
 {
        swiotlb_free_coherent(dev, size, vaddr, dma_addr);
 }
 
 #include <linux/kmemcheck.h>
 #include <linux/scatterlist.h>
 #include <linux/dma-debug.h>
-#include <linux/dma-attrs.h>
 #include <asm/io.h>
 #include <asm/swiotlb.h>
 #include <linux/dma-contiguous.h>
 
 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
                                        dma_addr_t *dma_addr, gfp_t flag,
-                                       struct dma_attrs *attrs);
+                                       unsigned long attrs);
 
 extern void dma_generic_free_coherent(struct device *dev, size_t size,
                                      void *vaddr, dma_addr_t dma_addr,
-                                     struct dma_attrs *attrs);
+                                     unsigned long attrs);
 
 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
 
 
 extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                                        dma_addr_t *dma_handle, gfp_t flags,
-                                       struct dma_attrs *attrs);
+                                       unsigned long attrs);
 extern void x86_swiotlb_free_coherent(struct device *dev, size_t size,
                                        void *vaddr, dma_addr_t dma_addr,
-                                       struct dma_attrs *attrs);
+                                       unsigned long attrs);
 
 #endif /* _ASM_X86_SWIOTLB_H */
 
 #define _ASM_X86_XEN_PAGE_COHERENT_H
 
 #include <asm/page.h>
-#include <linux/dma-attrs.h>
 #include <linux/dma-mapping.h>
 
 static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
                dma_addr_t *dma_handle, gfp_t flags,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        void *vstart = (void*)__get_free_pages(flags, get_order(size));
        *dma_handle = virt_to_phys(vstart);
 
 static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
                void *cpu_addr, dma_addr_t dma_handle,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        free_pages((unsigned long) cpu_addr, get_order(size));
 }
 
 static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
             dma_addr_t dev_addr, unsigned long offset, size_t size,
-            enum dma_data_direction dir, struct dma_attrs *attrs) { }
+            enum dma_data_direction dir, unsigned long attrs) { }
 
 static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs) { }
+               unsigned long attrs) { }
 
 static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
                dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
 
 static dma_addr_t gart_map_page(struct device *dev, struct page *page,
                                unsigned long offset, size_t size,
                                enum dma_data_direction dir,
-                               struct dma_attrs *attrs)
+                               unsigned long attrs)
 {
        unsigned long bus;
        phys_addr_t paddr = page_to_phys(page) + offset;
  */
 static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
                            size_t size, enum dma_data_direction dir,
-                           struct dma_attrs *attrs)
+                           unsigned long attrs)
 {
        unsigned long iommu_page;
        int npages;
  * Wrapper for pci_unmap_single working with scatterlists.
  */
 static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
-                         enum dma_data_direction dir, struct dma_attrs *attrs)
+                         enum dma_data_direction dir, unsigned long attrs)
 {
        struct scatterlist *s;
        int i;
        for_each_sg(sg, s, nents, i) {
                if (!s->dma_length || !s->length)
                        break;
-               gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
+               gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0);
        }
 }
 
                        addr = dma_map_area(dev, addr, s->length, dir, 0);
                        if (addr == bad_dma_addr) {
                                if (i > 0)
-                                       gart_unmap_sg(dev, sg, i, dir, NULL);
+                                       gart_unmap_sg(dev, sg, i, dir, 0);
                                nents = 0;
                                sg[0].dma_length = 0;
                                break;
  * Merge chunks that have page aligned sizes into a continuous mapping.
  */
 static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-                      enum dma_data_direction dir, struct dma_attrs *attrs)
+                      enum dma_data_direction dir, unsigned long attrs)
 {
        struct scatterlist *s, *ps, *start_sg, *sgmap;
        int need = 0, nextneed, i, out, start;
 
 error:
        flush_gart();
-       gart_unmap_sg(dev, sg, out, dir, NULL);
+       gart_unmap_sg(dev, sg, out, dir, 0);
 
        /* When it was forced or merged try again in a dumb way */
        if (force_iommu || iommu_merge) {
 /* allocate and map a coherent mapping */
 static void *
 gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
-                   gfp_t flag, struct dma_attrs *attrs)
+                   gfp_t flag, unsigned long attrs)
 {
        dma_addr_t paddr;
        unsigned long align_mask;
 /* free a coherent mapping */
 static void
 gart_free_coherent(struct device *dev, size_t size, void *vaddr,
-                  dma_addr_t dma_addr, struct dma_attrs *attrs)
+                  dma_addr_t dma_addr, unsigned long attrs)
 {
-       gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
+       gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
        dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
 }
 
 
 
 static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
                             int nelems,enum dma_data_direction dir,
-                            struct dma_attrs *attrs)
+                            unsigned long attrs)
 {
        struct iommu_table *tbl = find_iommu_table(dev);
        struct scatterlist *s;
 
 static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
                          int nelems, enum dma_data_direction dir,
-                         struct dma_attrs *attrs)
+                         unsigned long attrs)
 {
        struct iommu_table *tbl = find_iommu_table(dev);
        struct scatterlist *s;
 
        return nelems;
 error:
-       calgary_unmap_sg(dev, sg, nelems, dir, NULL);
+       calgary_unmap_sg(dev, sg, nelems, dir, 0);
        for_each_sg(sg, s, nelems, i) {
                sg->dma_address = DMA_ERROR_CODE;
                sg->dma_length = 0;
 static dma_addr_t calgary_map_page(struct device *dev, struct page *page,
                                   unsigned long offset, size_t size,
                                   enum dma_data_direction dir,
-                                  struct dma_attrs *attrs)
+                                  unsigned long attrs)
 {
        void *vaddr = page_address(page) + offset;
        unsigned long uaddr;
 
 static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
                               size_t size, enum dma_data_direction dir,
-                              struct dma_attrs *attrs)
+                              unsigned long attrs)
 {
        struct iommu_table *tbl = find_iommu_table(dev);
        unsigned int npages;
 }
 
 static void* calgary_alloc_coherent(struct device *dev, size_t size,
-       dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
+       dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
 {
        void *ret = NULL;
        dma_addr_t mapping;
 
 static void calgary_free_coherent(struct device *dev, size_t size,
                                  void *vaddr, dma_addr_t dma_handle,
-                                 struct dma_attrs *attrs)
+                                 unsigned long attrs)
 {
        unsigned int npages;
        struct iommu_table *tbl = find_iommu_table(dev);
 
 }
 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
                                 dma_addr_t *dma_addr, gfp_t flag,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        unsigned long dma_mask;
        struct page *page;
 }
 
 void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
-                              dma_addr_t dma_addr, struct dma_attrs *attrs)
+                              dma_addr_t dma_addr, unsigned long attrs)
 {
        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        struct page *page = virt_to_page(vaddr);
 
 static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
                                 unsigned long offset, size_t size,
                                 enum dma_data_direction dir,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        dma_addr_t bus = page_to_phys(page) + offset;
        WARN_ON(size == 0);
  */
 static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
                        int nents, enum dma_data_direction dir,
-                       struct dma_attrs *attrs)
+                       unsigned long attrs)
 {
        struct scatterlist *s;
        int i;
 
 
 void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                                        dma_addr_t *dma_handle, gfp_t flags,
-                                       struct dma_attrs *attrs)
+                                       unsigned long attrs)
 {
        void *vaddr;
 
 
 void x86_swiotlb_free_coherent(struct device *dev, size_t size,
                                      void *vaddr, dma_addr_t dma_addr,
-                                     struct dma_attrs *attrs)
+                                     unsigned long attrs)
 {
        if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
                swiotlb_free_coherent(dev, size, vaddr, dma_addr);
 
                                            size_t size,
                                            dma_addr_t *dma_handle,
                                            gfp_t flags,
-                                           struct dma_attrs *attrs)
+                                           unsigned long attrs)
 {
        void *vaddr;
 
 
 }
 
 static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
-                      gfp_t flag, struct dma_attrs *attrs)
+                      gfp_t flag, unsigned long attrs)
 {
        return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag,
                                       attrs);
 }
 
 static void vmd_free(struct device *dev, size_t size, void *vaddr,
-                    dma_addr_t addr, struct dma_attrs *attrs)
+                    dma_addr_t addr, unsigned long attrs)
 {
        return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr,
                                      attrs);
 
 static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
                    void *cpu_addr, dma_addr_t addr, size_t size,
-                   struct dma_attrs *attrs)
+                   unsigned long attrs)
 {
        return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr,
                                      size, attrs);
 
 static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
                           void *cpu_addr, dma_addr_t addr, size_t size,
-                          struct dma_attrs *attrs)
+                          unsigned long attrs)
 {
        return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr,
                                             addr, size, attrs);
 static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
                               unsigned long offset, size_t size,
                               enum dma_data_direction dir,
-                              struct dma_attrs *attrs)
+                              unsigned long attrs)
 {
        return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size,
                                          dir, attrs);
 }
 
 static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
-                          enum dma_data_direction dir, struct dma_attrs *attrs)
+                          enum dma_data_direction dir, unsigned long attrs)
 {
        vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs);
 }
 
 static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-                     enum dma_data_direction dir, struct dma_attrs *attrs)
+                     enum dma_data_direction dir, unsigned long attrs)
 {
        return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
 }
 
 static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
-                        enum dma_data_direction dir, struct dma_attrs *attrs)
+                        enum dma_data_direction dir, unsigned long attrs)
 {
        vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
 }
 
 
 static void *xtensa_dma_alloc(struct device *dev, size_t size,
                              dma_addr_t *handle, gfp_t flag,
-                             struct dma_attrs *attrs)
+                             unsigned long attrs)
 {
        unsigned long ret;
        unsigned long uncached = 0;
 }
 
 static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr,
-                           dma_addr_t dma_handle, struct dma_attrs *attrs)
+                           dma_addr_t dma_handle, unsigned long attrs)
 {
        unsigned long addr = (unsigned long)vaddr +
                XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
 static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
                                  unsigned long offset, size_t size,
                                  enum dma_data_direction dir,
-                                 struct dma_attrs *attrs)
+                                 unsigned long attrs)
 {
        dma_addr_t dma_handle = page_to_phys(page) + offset;
 
 
 static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
                              size_t size, enum dma_data_direction dir,
-                             struct dma_attrs *attrs)
+                             unsigned long attrs)
 {
        xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
 }
 
 static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
                         int nents, enum dma_data_direction dir,
-                        struct dma_attrs *attrs)
+                        unsigned long attrs)
 {
        struct scatterlist *s;
        int i;
 static void xtensa_unmap_sg(struct device *dev,
                            struct scatterlist *sg, int nents,
                            enum dma_data_direction dir,
-                           struct dma_attrs *attrs)
+                           unsigned long attrs)
 {
        struct scatterlist *s;
        int i;
 
 
        ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie,
                             exynos_gem->dma_addr, exynos_gem->size,
-                            &exynos_gem->dma_attrs);
+                            exynos_gem->dma_attrs);
        if (ret < 0) {
                DRM_ERROR("failed to mmap.\n");
                return ret;
 
 #include <linux/slab.h>
 #include <linux/workqueue.h>
 #include <linux/dma-mapping.h>
-#include <linux/dma-attrs.h>
 #include <linux/of.h>
 
 #include <drm/drmP.h>
        struct mutex                    cmdlist_mutex;
        dma_addr_t                      cmdlist_pool;
        void                            *cmdlist_pool_virt;
-       struct dma_attrs                cmdlist_dma_attrs;
+       unsigned long                   cmdlist_dma_attrs;
 
        /* runqueue*/
        struct g2d_runqueue_node        *runqueue_node;
        int ret;
        struct g2d_buf_info *buf_info;
 
-       init_dma_attrs(&g2d->cmdlist_dma_attrs);
-       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
+       g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE;
 
        g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev),
                                                G2D_CMDLIST_POOL_SIZE,
                                                &g2d->cmdlist_pool, GFP_KERNEL,
-                                               &g2d->cmdlist_dma_attrs);
+                                               g2d->cmdlist_dma_attrs);
        if (!g2d->cmdlist_pool_virt) {
                dev_err(dev, "failed to allocate dma memory\n");
                return -ENOMEM;
 err:
        dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE,
                        g2d->cmdlist_pool_virt,
-                       g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+                       g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
        return ret;
 }
 
                dma_free_attrs(to_dma_dev(subdrv->drm_dev),
                                G2D_CMDLIST_POOL_SIZE,
                                g2d->cmdlist_pool_virt,
-                               g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+                               g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
        }
 }
 
 
 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
 {
        struct drm_device *dev = exynos_gem->base.dev;
-       enum dma_attr attr;
+       unsigned long attr;
        unsigned int nr_pages;
        struct sg_table sgt;
        int ret = -ENOMEM;
                return 0;
        }
 
-       init_dma_attrs(&exynos_gem->dma_attrs);
+       exynos_gem->dma_attrs = 0;
 
        /*
         * if EXYNOS_BO_CONTIG, fully physically contiguous memory
         * as possible.
         */
        if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
-               dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &exynos_gem->dma_attrs);
+               exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
 
        /*
         * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
        else
                attr = DMA_ATTR_NON_CONSISTENT;
 
-       dma_set_attr(attr, &exynos_gem->dma_attrs);
-       dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs);
+       exynos_gem->dma_attrs |= attr;
+       exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
 
        nr_pages = exynos_gem->size >> PAGE_SHIFT;
 
 
        exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
                                             &exynos_gem->dma_addr, GFP_KERNEL,
-                                            &exynos_gem->dma_attrs);
+                                            exynos_gem->dma_attrs);
        if (!exynos_gem->cookie) {
                DRM_ERROR("failed to allocate buffer.\n");
                goto err_free;
 
        ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
                                    exynos_gem->dma_addr, exynos_gem->size,
-                                   &exynos_gem->dma_attrs);
+                                   exynos_gem->dma_attrs);
        if (ret < 0) {
                DRM_ERROR("failed to get sgtable.\n");
                goto err_dma_free;
        sg_free_table(&sgt);
 err_dma_free:
        dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
-                      exynos_gem->dma_addr, &exynos_gem->dma_attrs);
+                      exynos_gem->dma_addr, exynos_gem->dma_attrs);
 err_free:
        drm_free_large(exynos_gem->pages);
 
 
        dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
                        (dma_addr_t)exynos_gem->dma_addr,
-                       &exynos_gem->dma_attrs);
+                       exynos_gem->dma_attrs);
 
        drm_free_large(exynos_gem->pages);
 }
 
        ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
                             exynos_gem->dma_addr, exynos_gem->size,
-                            &exynos_gem->dma_attrs);
+                            exynos_gem->dma_attrs);
        if (ret < 0) {
                DRM_ERROR("failed to mmap.\n");
                return ret;
 
        void                    *cookie;
        void __iomem            *kvaddr;
        dma_addr_t              dma_addr;
-       struct dma_attrs        dma_attrs;
+       unsigned long           dma_attrs;
        struct page             **pages;
        struct sg_table         *sgt;
 };
 
 
        obj = &mtk_gem->base;
 
-       init_dma_attrs(&mtk_gem->dma_attrs);
-       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &mtk_gem->dma_attrs);
+       mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE;
 
        if (!alloc_kmap)
-               dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &mtk_gem->dma_attrs);
+               mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
 
        mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size,
                                          &mtk_gem->dma_addr, GFP_KERNEL,
-                                         &mtk_gem->dma_attrs);
+                                         mtk_gem->dma_attrs);
        if (!mtk_gem->cookie) {
                DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
                ret = -ENOMEM;
                drm_prime_gem_destroy(obj, mtk_gem->sg);
        else
                dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie,
-                              mtk_gem->dma_addr, &mtk_gem->dma_attrs);
+                              mtk_gem->dma_addr, mtk_gem->dma_attrs);
 
        /* release file pointer to gem object. */
        drm_gem_object_release(obj);
        vma->vm_pgoff = 0;
 
        ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
-                            mtk_gem->dma_addr, obj->size, &mtk_gem->dma_attrs);
+                            mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
        if (ret)
                drm_gem_vm_close(vma);
 
 
        ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie,
                                    mtk_gem->dma_addr, obj->size,
-                                   &mtk_gem->dma_attrs);
+                                   mtk_gem->dma_attrs);
        if (ret) {
                DRM_ERROR("failed to allocate sgt, %d\n", ret);
                kfree(sgt);
 
        void                    *cookie;
        void                    *kvaddr;
        dma_addr_t              dma_addr;
-       struct dma_attrs        dma_attrs;
+       unsigned long           dma_attrs;
        struct sg_table         *sg;
 };
 
 
        }
 
        if (priv->vram.paddr) {
-               DEFINE_DMA_ATTRS(attrs);
-               dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+               unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
                drm_mm_takedown(&priv->vram.mm);
                dma_free_attrs(dev, priv->vram.size, NULL,
-                              priv->vram.paddr, &attrs);
+                              priv->vram.paddr, attrs);
        }
 
        component_unbind_all(dev, ddev);
        }
 
        if (size) {
-               DEFINE_DMA_ATTRS(attrs);
+               unsigned long attrs = 0;
                void *p;
 
                priv->vram.size = size;
 
                drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
 
-               dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
-               dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+               attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+               attrs |= DMA_ATTR_WRITE_COMBINE;
 
                /* note that for no-kernel-mapping, the vaddr returned
                 * is bogus, but non-null if allocation succeeded:
                 */
                p = dma_alloc_attrs(dev->dev, size,
-                               &priv->vram.paddr, GFP_KERNEL, &attrs);
+                               &priv->vram.paddr, GFP_KERNEL, attrs);
                if (!p) {
                        dev_err(dev->dev, "failed to allocate VRAM\n");
                        priv->vram.paddr = 0;
 
        u16 iommu_bit;
 
        /* Only used by DMA API */
-       struct dma_attrs attrs;
+       unsigned long attrs;
 };
 #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
 
                goto out;
 
        dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr,
-                      node->handle, &imem->attrs);
+                      node->handle, imem->attrs);
 
 out:
        return node;
 
        node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
                                           &node->handle, GFP_KERNEL,
-                                          &imem->attrs);
+                                          imem->attrs);
        if (!node->base.vaddr) {
                nvkm_error(subdev, "cannot allocate DMA memory\n");
                return -ENOMEM;
 
                nvkm_info(&imem->base.subdev, "using IOMMU\n");
        } else {
-               init_dma_attrs(&imem->attrs);
-               dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
-               dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
-               dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
+               imem->attrs = DMA_ATTR_NON_CONSISTENT |
+                             DMA_ATTR_WEAK_ORDERING |
+                             DMA_ATTR_WRITE_COMBINE;
 
                nvkm_info(&imem->base.subdev, "using DMA API\n");
        }
 
 #include <drm/drm_gem.h>
 #include <drm/drm_vma_manager.h>
 
-#include <linux/dma-attrs.h>
-
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_gem.h"
 
        struct drm_gem_object *obj = &rk_obj->base;
        struct drm_device *drm = obj->dev;
 
-       init_dma_attrs(&rk_obj->dma_attrs);
-       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs);
+       rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
 
        if (!alloc_kmap)
-               dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs);
+               rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
 
        rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
                                         &rk_obj->dma_addr, GFP_KERNEL,
-                                        &rk_obj->dma_attrs);
+                                        rk_obj->dma_attrs);
        if (!rk_obj->kvaddr) {
                DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
                return -ENOMEM;
        struct drm_device *drm = obj->dev;
 
        dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
-                      &rk_obj->dma_attrs);
+                      rk_obj->dma_attrs);
 }
 
 static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
        vma->vm_pgoff = 0;
 
        ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
-                            obj->size, &rk_obj->dma_attrs);
+                            obj->size, rk_obj->dma_attrs);
        if (ret)
                drm_gem_vm_close(vma);
 
 
        ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
                                    rk_obj->dma_addr, obj->size,
-                                   &rk_obj->dma_attrs);
+                                   rk_obj->dma_attrs);
        if (ret) {
                DRM_ERROR("failed to allocate sgt, %d\n", ret);
                kfree(sgt);
 {
        struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
 
-       if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs))
+       if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
                return NULL;
 
        return rk_obj->kvaddr;
 
 
        void *kvaddr;
        dma_addr_t dma_addr;
-       struct dma_attrs dma_attrs;
+       unsigned long dma_attrs;
 };
 
 struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj);
 
 #include <linux/sched.h>
 #include <linux/export.h>
 #include <linux/hugetlb.h>
-#include <linux/dma-attrs.h>
 #include <linux/slab.h>
 #include <rdma/ib_umem_odp.h>
 
        unsigned long npages;
        int ret;
        int i;
-       DEFINE_DMA_ATTRS(attrs);
+       unsigned long dma_attrs = 0;
        struct scatterlist *sg, *sg_list_start;
        int need_release = 0;
 
        if (dmasync)
-               dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+               dma_attrs |= DMA_ATTR_WRITE_BARRIER;
 
        if (!size)
                return ERR_PTR(-EINVAL);
                                  umem->sg_head.sgl,
                                  umem->npages,
                                  DMA_BIDIRECTIONAL,
-                                 &attrs);
+                                 dma_attrs);
 
        if (umem->nmap <= 0) {
                ret = -ENOMEM;
 
 static dma_addr_t map_page(struct device *dev, struct page *page,
                           unsigned long offset, size_t size,
                           enum dma_data_direction dir,
-                          struct dma_attrs *attrs)
+                          unsigned long attrs)
 {
        phys_addr_t paddr = page_to_phys(page) + offset;
        struct protection_domain *domain;
  * The exported unmap_single function for dma_ops.
  */
 static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
-                      enum dma_data_direction dir, struct dma_attrs *attrs)
+                      enum dma_data_direction dir, unsigned long attrs)
 {
        struct protection_domain *domain;
        struct dma_ops_domain *dma_dom;
  */
 static int map_sg(struct device *dev, struct scatterlist *sglist,
                  int nelems, enum dma_data_direction direction,
-                 struct dma_attrs *attrs)
+                 unsigned long attrs)
 {
        int mapped_pages = 0, npages = 0, prot = 0, i;
        struct protection_domain *domain;
  */
 static void unmap_sg(struct device *dev, struct scatterlist *sglist,
                     int nelems, enum dma_data_direction dir,
-                    struct dma_attrs *attrs)
+                    unsigned long attrs)
 {
        struct protection_domain *domain;
        struct dma_ops_domain *dma_dom;
  */
 static void *alloc_coherent(struct device *dev, size_t size,
                            dma_addr_t *dma_addr, gfp_t flag,
-                           struct dma_attrs *attrs)
+                           unsigned long attrs)
 {
        u64 dma_mask = dev->coherent_dma_mask;
        struct protection_domain *domain;
  */
 static void free_coherent(struct device *dev, size_t size,
                          void *virt_addr, dma_addr_t dma_addr,
-                         struct dma_attrs *attrs)
+                         unsigned long attrs)
 {
        struct protection_domain *domain;
        struct dma_ops_domain *dma_dom;
 
  *        or NULL on failure.
  */
 struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
-               struct dma_attrs *attrs, int prot, dma_addr_t *handle,
+               unsigned long attrs, int prot, dma_addr_t *handle,
                void (*flush_page)(struct device *, const void *, phys_addr_t))
 {
        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
        } else {
                size = ALIGN(size, min_size);
        }
-       if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs))
+       if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
                alloc_sizes = min_size;
 
        count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 }
 
 void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
-               enum dma_data_direction dir, struct dma_attrs *attrs)
+               enum dma_data_direction dir, unsigned long attrs)
 {
        __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
 }
 }
 
 void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
-               enum dma_data_direction dir, struct dma_attrs *attrs)
+               enum dma_data_direction dir, unsigned long attrs)
 {
        /*
         * The scatterlist segments are mapped into a single
 
 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
                                 unsigned long offset, size_t size,
                                 enum dma_data_direction dir,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        return __intel_map_single(dev, page_to_phys(page) + offset, size,
                                  dir, *dev->dma_mask);
 
 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
                             size_t size, enum dma_data_direction dir,
-                            struct dma_attrs *attrs)
+                            unsigned long attrs)
 {
        intel_unmap(dev, dev_addr, size);
 }
 
 static void *intel_alloc_coherent(struct device *dev, size_t size,
                                  dma_addr_t *dma_handle, gfp_t flags,
-                                 struct dma_attrs *attrs)
+                                 unsigned long attrs)
 {
        struct page *page = NULL;
        int order;
 }
 
 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
-                               dma_addr_t dma_handle, struct dma_attrs *attrs)
+                               dma_addr_t dma_handle, unsigned long attrs)
 {
        int order;
        struct page *page = virt_to_page(vaddr);
 
 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
                           int nelems, enum dma_data_direction dir,
-                          struct dma_attrs *attrs)
+                          unsigned long attrs)
 {
        dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
        unsigned long nrpages = 0;
 }
 
 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
-                       enum dma_data_direction dir, struct dma_attrs *attrs)
+                       enum dma_data_direction dir, unsigned long attrs)
 {
        int i;
        struct dmar_domain *domain;
 
  */
 void bdisp_hw_free_nodes(struct bdisp_ctx *ctx)
 {
-       if (ctx && ctx->node[0]) {
-               DEFINE_DMA_ATTRS(attrs);
-
-               dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+       if (ctx && ctx->node[0])
                dma_free_attrs(ctx->bdisp_dev->dev,
                               sizeof(struct bdisp_node) * MAX_NB_NODE,
-                              ctx->node[0], ctx->node_paddr[0], &attrs);
-       }
+                              ctx->node[0], ctx->node_paddr[0],
+                              DMA_ATTR_WRITE_COMBINE);
 }
 
 /**
        unsigned int i, node_size = sizeof(struct bdisp_node);
        void *base;
        dma_addr_t paddr;
-       DEFINE_DMA_ATTRS(attrs);
 
        /* Allocate all the nodes within a single memory page */
-       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
        base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr,
-                              GFP_KERNEL | GFP_DMA, &attrs);
+                              GFP_KERNEL | GFP_DMA, DMA_ATTR_WRITE_COMBINE);
        if (!base) {
                dev_err(dev, "%s no mem\n", __func__);
                return -ENOMEM;
 {
        int size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
 
-       if (bdisp_h_filter[0].virt) {
-               DEFINE_DMA_ATTRS(attrs);
-
-               dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+       if (bdisp_h_filter[0].virt)
                dma_free_attrs(dev, size, bdisp_h_filter[0].virt,
-                              bdisp_h_filter[0].paddr, &attrs);
-       }
+                              bdisp_h_filter[0].paddr, DMA_ATTR_WRITE_COMBINE);
 }
 
 /**
        unsigned int i, size;
        void *base;
        dma_addr_t paddr;
-       DEFINE_DMA_ATTRS(attrs);
 
        /* Allocate all the filters within a single memory page */
        size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
-       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
-       base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA, &attrs);
+       base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA,
+                              DMA_ATTR_WRITE_COMBINE);
        if (!base)
                return -ENOMEM;
 
 
        unsigned long                   size;
        void                            *cookie;
        dma_addr_t                      dma_addr;
-       struct dma_attrs                attrs;
+       unsigned long                   attrs;
        enum dma_data_direction         dma_dir;
        struct sg_table                 *dma_sgt;
        struct frame_vector             *vec;
                kfree(buf->sgt_base);
        }
        dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
-                       &buf->attrs);
+                      buf->attrs);
        put_device(buf->dev);
        kfree(buf);
 }
 
-static void *vb2_dc_alloc(struct device *dev, const struct dma_attrs *attrs,
+static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
                          unsigned long size, enum dma_data_direction dma_dir,
                          gfp_t gfp_flags)
 {
                return ERR_PTR(-ENOMEM);
 
        if (attrs)
-               buf->attrs = *attrs;
+               buf->attrs = attrs;
        buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
-                                       GFP_KERNEL | gfp_flags, &buf->attrs);
+                                       GFP_KERNEL | gfp_flags, buf->attrs);
        if (!buf->cookie) {
                dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
                kfree(buf);
                return ERR_PTR(-ENOMEM);
        }
 
-       if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->attrs))
+       if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
                buf->vaddr = buf->cookie;
 
        /* Prevent the device from being released while the buffer is used */
        vma->vm_pgoff = 0;
 
        ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
-               buf->dma_addr, buf->size, &buf->attrs);
+               buf->dma_addr, buf->size, buf->attrs);
 
        if (ret) {
                pr_err("Remapping memory failed, error: %d\n", ret);
        }
 
        ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
-               buf->size, &buf->attrs);
+               buf->size, buf->attrs);
        if (ret < 0) {
                dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
                kfree(sgt);
        struct page **pages;
 
        if (sgt) {
-               DEFINE_DMA_ATTRS(attrs);
-
-               dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
                /*
                 * No need to sync to CPU, it's already synced to the CPU
                 * since the finish() memop will have been called before this.
                 */
                dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
-                                  buf->dma_dir, &attrs);
+                                  buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
                pages = frame_vector_pages(buf->vec);
                /* sgt should exist only if vector contains pages... */
                BUG_ON(IS_ERR(pages));
        struct sg_table *sgt;
        unsigned long contig_size;
        unsigned long dma_align = dma_get_cache_alignment();
-       DEFINE_DMA_ATTRS(attrs);
-
-       dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
 
        /* Only cache aligned DMA transfers are reliable */
        if (!IS_ALIGNED(vaddr | size, dma_align)) {
         * prepare() memop is called.
         */
        sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
-                                     buf->dma_dir, &attrs);
+                                     buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
        if (sgt->nents <= 0) {
                pr_err("failed to map scatterlist\n");
                ret = -EIO;
 
 fail_map_sg:
        dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
-                          buf->dma_dir, &attrs);
+                          buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
 
 fail_sgt_init:
        sg_free_table(sgt);
 
        return 0;
 }
 
-static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_attrs,
+static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
                              unsigned long size, enum dma_data_direction dma_dir,
                              gfp_t gfp_flags)
 {
        struct sg_table *sgt;
        int ret;
        int num_pages;
-       DEFINE_DMA_ATTRS(attrs);
-
-       dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
 
        if (WARN_ON(dev == NULL))
                return NULL;
         * prepare() memop is called.
         */
        sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
-                                     buf->dma_dir, &attrs);
+                                     buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
        if (!sgt->nents)
                goto fail_map;
 
        int i = buf->num_pages;
 
        if (atomic_dec_and_test(&buf->refcount)) {
-               DEFINE_DMA_ATTRS(attrs);
-
-               dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
                dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
                        buf->num_pages);
                dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
-                                  buf->dma_dir, &attrs);
+                                  buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
                if (buf->vaddr)
                        vm_unmap_ram(buf->vaddr, buf->num_pages);
                sg_free_table(buf->dma_sgt);
 {
        struct vb2_dma_sg_buf *buf;
        struct sg_table *sgt;
-       DEFINE_DMA_ATTRS(attrs);
        struct frame_vector *vec;
 
-       dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
        buf = kzalloc(sizeof *buf, GFP_KERNEL);
        if (!buf)
                return NULL;
         * prepare() memop is called.
         */
        sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
-                                     buf->dma_dir, &attrs);
+                                     buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
        if (!sgt->nents)
                goto userptr_fail_map;
 
        struct vb2_dma_sg_buf *buf = buf_priv;
        struct sg_table *sgt = &buf->sg_table;
        int i = buf->num_pages;
-       DEFINE_DMA_ATTRS(attrs);
-
-       dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
 
        dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
               __func__, buf->num_pages);
        dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
-                          &attrs);
+                          DMA_ATTR_SKIP_CPU_SYNC);
        if (buf->vaddr)
                vm_unmap_ram(buf->vaddr, buf->num_pages);
        sg_free_table(buf->dma_sgt);
 
 
 static void vb2_vmalloc_put(void *buf_priv);
 
-static void *vb2_vmalloc_alloc(struct device *dev, const struct dma_attrs *attrs,
+static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
                               unsigned long size, enum dma_data_direction dma_dir,
                               gfp_t gfp_flags)
 {
 
 static dma_addr_t
 _mic_dma_map_page(struct device *dev, struct page *page,
                  unsigned long offset, size_t size,
-                 enum dma_data_direction dir, struct dma_attrs *attrs)
+                 enum dma_data_direction dir, unsigned long attrs)
 {
        void *va = phys_to_virt(page_to_phys(page)) + offset;
        struct mic_device *mdev = vpdev_to_mdev(dev);
 
 static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
                                size_t size, enum dma_data_direction dir,
-                               struct dma_attrs *attrs)
+                               unsigned long attrs)
 {
        struct mic_device *mdev = vpdev_to_mdev(dev);
 
 
 static void *__mic_dma_alloc(struct device *dev, size_t size,
                             dma_addr_t *dma_handle, gfp_t gfp,
-                            struct dma_attrs *attrs)
+                            unsigned long attrs)
 {
        struct scif_hw_dev *scdev = dev_get_drvdata(dev);
        struct mic_device *mdev = scdev_to_mdev(scdev);
 }
 
 static void __mic_dma_free(struct device *dev, size_t size, void *vaddr,
-                          dma_addr_t dma_handle, struct dma_attrs *attrs)
+                          dma_addr_t dma_handle, unsigned long attrs)
 {
        struct scif_hw_dev *scdev = dev_get_drvdata(dev);
        struct mic_device *mdev = scdev_to_mdev(scdev);
 static dma_addr_t
 __mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset,
                   size_t size, enum dma_data_direction dir,
-                  struct dma_attrs *attrs)
+                  unsigned long attrs)
 {
        void *va = phys_to_virt(page_to_phys(page)) + offset;
        struct scif_hw_dev *scdev = dev_get_drvdata(dev);
 static void
 __mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
                     size_t size, enum dma_data_direction dir,
-                    struct dma_attrs *attrs)
+                    unsigned long attrs)
 {
        struct scif_hw_dev *scdev = dev_get_drvdata(dev);
        struct mic_device *mdev = scdev_to_mdev(scdev);
 
 static int __mic_dma_map_sg(struct device *dev, struct scatterlist *sg,
                            int nents, enum dma_data_direction dir,
-                           struct dma_attrs *attrs)
+                           unsigned long attrs)
 {
        struct scif_hw_dev *scdev = dev_get_drvdata(dev);
        struct mic_device *mdev = scdev_to_mdev(scdev);
 static void __mic_dma_unmap_sg(struct device *dev,
                               struct scatterlist *sg, int nents,
                               enum dma_data_direction dir,
-                              struct dma_attrs *attrs)
+                              unsigned long attrs)
 {
        struct scif_hw_dev *scdev = dev_get_drvdata(dev);
        struct mic_device *mdev = scdev_to_mdev(scdev);
 static dma_addr_t
 mic_dma_map_page(struct device *dev, struct page *page,
                 unsigned long offset, size_t size, enum dma_data_direction dir,
-                struct dma_attrs *attrs)
+                unsigned long attrs)
 {
        void *va = phys_to_virt(page_to_phys(page)) + offset;
        struct mic_device *mdev = dev_get_drvdata(dev->parent);
 static void
 mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
                   size_t size, enum dma_data_direction dir,
-                  struct dma_attrs *attrs)
+                  unsigned long attrs)
 {
        struct mic_device *mdev = dev_get_drvdata(dev->parent);
        mic_unmap_single(mdev, dma_addr, size);
 
 static dma_addr_t
 ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
                size_t size, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        return ccio_map_single(dev, page_address(page) + offset, size,
                        direction);
  */
 static void 
 ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
-               enum dma_data_direction direction, struct dma_attrs *attrs)
+               enum dma_data_direction direction, unsigned long attrs)
 {
        struct ioc *ioc;
        unsigned long flags; 
  */
 static void * 
 ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
       void *ret;
 #if 0
  */
 static void 
 ccio_free(struct device *dev, size_t size, void *cpu_addr,
-               dma_addr_t dma_handle, struct dma_attrs *attrs)
+               dma_addr_t dma_handle, unsigned long attrs)
 {
-       ccio_unmap_page(dev, dma_handle, size, 0, NULL);
+       ccio_unmap_page(dev, dma_handle, size, 0, 0);
        free_pages((unsigned long)cpu_addr, get_order(size));
 }
 
  */
 static int
 ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, 
-           enum dma_data_direction direction, struct dma_attrs *attrs)
+           enum dma_data_direction direction, unsigned long attrs)
 {
        struct ioc *ioc;
        int coalesced, filled = 0;
  */
 static void 
 ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, 
-             enum dma_data_direction direction, struct dma_attrs *attrs)
+             enum dma_data_direction direction, unsigned long attrs)
 {
        struct ioc *ioc;
 
                ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
 #endif
                ccio_unmap_page(dev, sg_dma_address(sglist),
-                                 sg_dma_len(sglist), direction, NULL);
+                                 sg_dma_len(sglist), direction, 0);
                ++sglist;
        }
 
 
 static dma_addr_t
 sba_map_page(struct device *dev, struct page *page, unsigned long offset,
                size_t size, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
+               unsigned long attrs)
 {
        return sba_map_single(dev, page_address(page) + offset, size,
                        direction);
  */
 static void
 sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
-               enum dma_data_direction direction, struct dma_attrs *attrs)
+               enum dma_data_direction direction, unsigned long attrs)
 {
        struct ioc *ioc;
 #if DELAYED_RESOURCE_CNT > 0
  * See Documentation/DMA-API-HOWTO.txt
  */
 static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
-               gfp_t gfp, struct dma_attrs *attrs)
+               gfp_t gfp, unsigned long attrs)
 {
        void *ret;
 
  */
 static void
 sba_free(struct device *hwdev, size_t size, void *vaddr,
-                   dma_addr_t dma_handle, struct dma_attrs *attrs)
+                   dma_addr_t dma_handle, unsigned long attrs)
 {
-       sba_unmap_page(hwdev, dma_handle, size, 0, NULL);
+       sba_unmap_page(hwdev, dma_handle, size, 0, 0);
        free_pages((unsigned long) vaddr, get_order(size));
 }
 
  */
 static int
 sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
-          enum dma_data_direction direction, struct dma_attrs *attrs)
+          enum dma_data_direction direction, unsigned long attrs)
 {
        struct ioc *ioc;
        int coalesced, filled = 0;
  */
 static void 
 sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
-            enum dma_data_direction direction, struct dma_attrs *attrs)
+            enum dma_data_direction direction, unsigned long attrs)
 {
        struct ioc *ioc;
 #ifdef ASSERT_PDIR_SANITY
        while (sg_dma_len(sglist) && nents--) {
 
                sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
-                               direction, NULL);
+                               direction, 0);
 #ifdef SBA_COLLECT_STATS
                ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
                ioc->usingle_calls--;   /* kluge since call is unmap_sg() */
 
 
 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
 {
-       DEFINE_DMA_ATTRS(attrs);
+       unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
        dma_addr_t phys;
        void *ptr;
        int ret;
 
-       dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs);
-       ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs);
+       ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs);
        if (!ptr) {
                dev_err(qproc->dev, "failed to allocate mdt buffer\n");
                return -ENOMEM;
        else if (ret < 0)
                dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
 
-       dma_free_attrs(qproc->dev, fw->size, ptr, phys, &attrs);
+       dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
 
        return ret < 0 ? ret : 0;
 }
 
        }
 
        dma_free_attrs(fbdev->dev, rg->size, rg->token, rg->dma_handle,
-                       &rg->attrs);
+                       rg->attrs);
 
        rg->token = NULL;
        rg->vaddr = NULL;
        struct omapfb2_device *fbdev = ofbi->fbdev;
        struct omapfb2_mem_region *rg;
        void *token;
-       DEFINE_DMA_ATTRS(attrs);
+       unsigned long attrs;
        dma_addr_t dma_handle;
        int r;
 
 
        size = PAGE_ALIGN(size);
 
-       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+       attrs = DMA_ATTR_WRITE_COMBINE;
 
        if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
-               dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+               attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
 
        DBG("allocating %lu bytes for fb %d\n", size, ofbi->id);
 
        token = dma_alloc_attrs(fbdev->dev, size, &dma_handle,
-                       GFP_KERNEL, &attrs);
+                       GFP_KERNEL, attrs);
 
        if (token == NULL) {
                dev_err(fbdev->dev, "failed to allocate framebuffer\n");
                r = omap_vrfb_request_ctx(&rg->vrfb);
                if (r) {
                        dma_free_attrs(fbdev->dev, size, token, dma_handle,
-                                       &attrs);
+                                       attrs);
                        dev_err(fbdev->dev, "vrfb create ctx failed\n");
                        return r;
                }
 
 #endif
 
 #include <linux/rwsem.h>
-#include <linux/dma-attrs.h>
 #include <linux/dma-mapping.h>
 
 #include <video/omapfb_dss.h>
 
 struct omapfb2_mem_region {
        int             id;
-       struct dma_attrs attrs;
+       unsigned long   attrs;
        void            *token;
        dma_addr_t      dma_handle;
        u32             paddr;
 
 void *
 xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                           dma_addr_t *dma_handle, gfp_t flags,
-                          struct dma_attrs *attrs)
+                          unsigned long attrs)
 {
        void *ret;
        int order = get_order(size);
 
 void
 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
-                         dma_addr_t dev_addr, struct dma_attrs *attrs)
+                         dma_addr_t dev_addr, unsigned long attrs)
 {
        int order = get_order(size);
        phys_addr_t phys;
 dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
                                unsigned long offset, size_t size,
                                enum dma_data_direction dir,
-                               struct dma_attrs *attrs)
+                               unsigned long attrs)
 {
        phys_addr_t map, phys = page_to_phys(page) + offset;
        dma_addr_t dev_addr = xen_phys_to_bus(phys);
  */
 static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
                             size_t size, enum dma_data_direction dir,
-                                struct dma_attrs *attrs)
+                            unsigned long attrs)
 {
        phys_addr_t paddr = xen_bus_to_phys(dev_addr);
 
 
 void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
                            size_t size, enum dma_data_direction dir,
-                           struct dma_attrs *attrs)
+                           unsigned long attrs)
 {
        xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
 }
 int
 xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
                         int nelems, enum dma_data_direction dir,
-                        struct dma_attrs *attrs)
+                        unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 void
 xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
                           int nelems, enum dma_data_direction dir,
-                          struct dma_attrs *attrs)
+                          unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 
+++ /dev/null
-#ifndef _DMA_ATTR_H
-#define _DMA_ATTR_H
-
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/bug.h>
-
-/**
- * an enum dma_attr represents an attribute associated with a DMA
- * mapping. The semantics of each attribute should be defined in
- * Documentation/DMA-attributes.txt.
- */
-enum dma_attr {
-       DMA_ATTR_WRITE_BARRIER,
-       DMA_ATTR_WEAK_ORDERING,
-       DMA_ATTR_WRITE_COMBINE,
-       DMA_ATTR_NON_CONSISTENT,
-       DMA_ATTR_NO_KERNEL_MAPPING,
-       DMA_ATTR_SKIP_CPU_SYNC,
-       DMA_ATTR_FORCE_CONTIGUOUS,
-       DMA_ATTR_ALLOC_SINGLE_PAGES,
-       DMA_ATTR_MAX,
-};
-
-#define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX)
-
-/**
- * struct dma_attrs - an opaque container for DMA attributes
- * @flags - bitmask representing a collection of enum dma_attr
- */
-struct dma_attrs {
-       unsigned long flags[__DMA_ATTRS_LONGS];
-};
-
-#define DEFINE_DMA_ATTRS(x)                                    \
-       struct dma_attrs x = {                                  \
-               .flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 },   \
-       }
-
-static inline void init_dma_attrs(struct dma_attrs *attrs)
-{
-       bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS);
-}
-
-/**
- * dma_set_attr - set a specific attribute
- * @attr: attribute to set
- * @attrs: struct dma_attrs (may be NULL)
- */
-static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
-{
-       if (attrs == NULL)
-               return;
-       BUG_ON(attr >= DMA_ATTR_MAX);
-       __set_bit(attr, attrs->flags);
-}
-
-/**
- * dma_get_attr - check for a specific attribute
- * @attr: attribute to set
- * @attrs: struct dma_attrs (may be NULL)
- */
-static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
-{
-       if (attrs == NULL)
-               return 0;
-       BUG_ON(attr >= DMA_ATTR_MAX);
-       return test_bit(attr, attrs->flags);
-}
-
-#endif /* _DMA_ATTR_H */
 
  * the arch code to take care of attributes and cache maintenance
  */
 struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
-               struct dma_attrs *attrs, int prot, dma_addr_t *handle,
+               unsigned long attrs, int prot, dma_addr_t *handle,
                void (*flush_page)(struct device *, const void *, phys_addr_t));
 void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
                dma_addr_t *handle);
  * directly as DMA mapping callbacks for simplicity
  */
 void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
-               enum dma_data_direction dir, struct dma_attrs *attrs);
+               enum dma_data_direction dir, unsigned long attrs);
 void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
-               enum dma_data_direction dir, struct dma_attrs *attrs);
+               enum dma_data_direction dir, unsigned long attrs);
 int iommu_dma_supported(struct device *dev, u64 mask);
 int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
 
 
 #include <linux/string.h>
 #include <linux/device.h>
 #include <linux/err.h>
-#include <linux/dma-attrs.h>
 #include <linux/dma-debug.h>
 #include <linux/dma-direction.h>
 #include <linux/scatterlist.h>
 #include <linux/kmemcheck.h>
 #include <linux/bug.h>
 
+/**
+ * List of possible attributes associated with a DMA mapping. The semantics
+ * of each attribute should be defined in Documentation/DMA-attributes.txt.
+ *
+ * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
+ * forces all pending DMA writes to complete.
+ */
+#define DMA_ATTR_WRITE_BARRIER         (1UL << 0)
+/*
+ * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
+ * may be weakly ordered, that is that reads and writes may pass each other.
+ */
+#define DMA_ATTR_WEAK_ORDERING         (1UL << 1)
+/*
+ * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
+ * buffered to improve performance.
+ */
+#define DMA_ATTR_WRITE_COMBINE         (1UL << 2)
+/*
+ * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
+ * consistent or non-consistent memory as it sees fit.
+ */
+#define DMA_ATTR_NON_CONSISTENT                (1UL << 3)
+/*
+ * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
+ * virtual mapping for the allocated buffer.
+ */
+#define DMA_ATTR_NO_KERNEL_MAPPING     (1UL << 4)
+/*
+ * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
+ * the CPU cache for the given buffer assuming that it has been already
+ * transferred to 'device' domain.
+ */
+#define DMA_ATTR_SKIP_CPU_SYNC         (1UL << 5)
+/*
+ * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
+ * in physical memory.
+ */
+#define DMA_ATTR_FORCE_CONTIGUOUS      (1UL << 6)
+/*
+ * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
+ * that it's probably not worth the time to try to allocate memory to in a way
+ * that gives better TLB efficiency.
+ */
+#define DMA_ATTR_ALLOC_SINGLE_PAGES    (1UL << 7)
+
 /*
  * A dma_addr_t can hold any valid DMA or bus address for the platform.
  * It can be given to a device to use as a DMA source or target.  A CPU cannot
 struct dma_map_ops {
        void* (*alloc)(struct device *dev, size_t size,
                                dma_addr_t *dma_handle, gfp_t gfp,
-                               struct dma_attrs *attrs);
+                               unsigned long attrs);
        void (*free)(struct device *dev, size_t size,
                              void *vaddr, dma_addr_t dma_handle,
-                             struct dma_attrs *attrs);
+                             unsigned long attrs);
        int (*mmap)(struct device *, struct vm_area_struct *,
-                         void *, dma_addr_t, size_t, struct dma_attrs *attrs);
+                         void *, dma_addr_t, size_t,
+                         unsigned long attrs);
 
        int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
-                          dma_addr_t, size_t, struct dma_attrs *attrs);
+                          dma_addr_t, size_t, unsigned long attrs);
 
        dma_addr_t (*map_page)(struct device *dev, struct page *page,
                               unsigned long offset, size_t size,
                               enum dma_data_direction dir,
-                              struct dma_attrs *attrs);
+                              unsigned long attrs);
        void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
                           size_t size, enum dma_data_direction dir,
-                          struct dma_attrs *attrs);
+                          unsigned long attrs);
        /*
         * map_sg returns 0 on error and a value > 0 on success.
         * It should never return a value < 0.
         */
        int (*map_sg)(struct device *dev, struct scatterlist *sg,
                      int nents, enum dma_data_direction dir,
-                     struct dma_attrs *attrs);
+                     unsigned long attrs);
        void (*unmap_sg)(struct device *dev,
                         struct scatterlist *sg, int nents,
                         enum dma_data_direction dir,
-                        struct dma_attrs *attrs);
+                        unsigned long attrs);
        void (*sync_single_for_cpu)(struct device *dev,
                                    dma_addr_t dma_handle, size_t size,
                                    enum dma_data_direction dir);
 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
                                              size_t size,
                                              enum dma_data_direction dir,
-                                             struct dma_attrs *attrs)
+                                             unsigned long attrs)
 {
        struct dma_map_ops *ops = get_dma_ops(dev);
        dma_addr_t addr;
 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
                                          size_t size,
                                          enum dma_data_direction dir,
-                                         struct dma_attrs *attrs)
+                                         unsigned long attrs)
 {
        struct dma_map_ops *ops = get_dma_ops(dev);
 
  */
 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
                                   int nents, enum dma_data_direction dir,
-                                  struct dma_attrs *attrs)
+                                  unsigned long attrs)
 {
        struct dma_map_ops *ops = get_dma_ops(dev);
        int i, ents;
 
 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
                                      int nents, enum dma_data_direction dir,
-                                     struct dma_attrs *attrs)
+                                     unsigned long attrs)
 {
        struct dma_map_ops *ops = get_dma_ops(dev);
 
 
        kmemcheck_mark_initialized(page_address(page) + offset, size);
        BUG_ON(!valid_dma_direction(dir));
-       addr = ops->map_page(dev, page, offset, size, dir, NULL);
+       addr = ops->map_page(dev, page, offset, size, dir, 0);
        debug_dma_map_page(dev, page, offset, size, dir, addr, false);
 
        return addr;
 
        BUG_ON(!valid_dma_direction(dir));
        if (ops->unmap_page)
-               ops->unmap_page(dev, addr, size, dir, NULL);
+               ops->unmap_page(dev, addr, size, dir, 0);
        debug_dma_unmap_page(dev, addr, size, dir, false);
 }
 
 
 }
 
-#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
-#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
-#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
-#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
+#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
+#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
+#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
+#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
 
 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
                           void *cpu_addr, dma_addr_t dma_addr, size_t size);
  */
 static inline int
 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
-              dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+              dma_addr_t dma_addr, size_t size, unsigned long attrs)
 {
        struct dma_map_ops *ops = get_dma_ops(dev);
        BUG_ON(!ops);
        return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
 }
 
-#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
 
 int
 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
 
 static inline int
 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
-                     dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+                     dma_addr_t dma_addr, size_t size,
+                     unsigned long attrs)
 {
        struct dma_map_ops *ops = get_dma_ops(dev);
        BUG_ON(!ops);
        return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
 }
 
-#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
 
 #ifndef arch_dma_alloc_attrs
 #define arch_dma_alloc_attrs(dev, flag)        (true)
 
 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
                                       dma_addr_t *dma_handle, gfp_t flag,
-                                      struct dma_attrs *attrs)
+                                      unsigned long attrs)
 {
        struct dma_map_ops *ops = get_dma_ops(dev);
        void *cpu_addr;
 
 static inline void dma_free_attrs(struct device *dev, size_t size,
                                     void *cpu_addr, dma_addr_t dma_handle,
-                                    struct dma_attrs *attrs)
+                                    unsigned long attrs)
 {
        struct dma_map_ops *ops = get_dma_ops(dev);
 
 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
                dma_addr_t *dma_handle, gfp_t flag)
 {
-       return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
+       return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
 }
 
 static inline void dma_free_coherent(struct device *dev, size_t size,
                void *cpu_addr, dma_addr_t dma_handle)
 {
-       return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
+       return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
 }
 
 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
                dma_addr_t *dma_handle, gfp_t gfp)
 {
-       DEFINE_DMA_ATTRS(attrs);
-
-       dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
-       return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
+       return dma_alloc_attrs(dev, size, dma_handle, gfp,
+                              DMA_ATTR_NON_CONSISTENT);
 }
 
 static inline void dma_free_noncoherent(struct device *dev, size_t size,
                void *cpu_addr, dma_addr_t dma_handle)
 {
-       DEFINE_DMA_ATTRS(attrs);
-
-       dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
-       dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+       dma_free_attrs(dev, size, cpu_addr, dma_handle,
+                      DMA_ATTR_NON_CONSISTENT);
 }
 
 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 static inline void *dma_alloc_wc(struct device *dev, size_t size,
                                 dma_addr_t *dma_addr, gfp_t gfp)
 {
-       DEFINE_DMA_ATTRS(attrs);
-       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
-       return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
+       return dma_alloc_attrs(dev, size, dma_addr, gfp,
+                              DMA_ATTR_WRITE_COMBINE);
 }
 #ifndef dma_alloc_writecombine
 #define dma_alloc_writecombine dma_alloc_wc
 static inline void dma_free_wc(struct device *dev, size_t size,
                               void *cpu_addr, dma_addr_t dma_addr)
 {
-       DEFINE_DMA_ATTRS(attrs);
-       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
-       return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
+       return dma_free_attrs(dev, size, cpu_addr, dma_addr,
+                             DMA_ATTR_WRITE_COMBINE);
 }
 #ifndef dma_free_writecombine
 #define dma_free_writecombine dma_free_wc
                              void *cpu_addr, dma_addr_t dma_addr,
                              size_t size)
 {
-       DEFINE_DMA_ATTRS(attrs);
-       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
-       return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+       return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
+                             DMA_ATTR_WRITE_COMBINE);
 }
 #ifndef dma_mmap_writecombine
 #define dma_mmap_writecombine dma_mmap_wc
 
 #include <linux/types.h>
 
 struct device;
-struct dma_attrs;
 struct page;
 struct scatterlist;
 
 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
                                   unsigned long offset, size_t size,
                                   enum dma_data_direction dir,
-                                  struct dma_attrs *attrs);
+                                  unsigned long attrs);
 extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
                               size_t size, enum dma_data_direction dir,
-                              struct dma_attrs *attrs);
+                              unsigned long attrs);
 
 extern int
 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
 
 extern int
 swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
-                    enum dma_data_direction dir, struct dma_attrs *attrs);
+                    enum dma_data_direction dir,
+                    unsigned long attrs);
 
 extern void
 swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
                       int nelems, enum dma_data_direction dir,
-                      struct dma_attrs *attrs);
+                      unsigned long attrs);
 
 extern void
 swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
 
  *    #) Required ops for DMABUF types: attach_dmabuf, detach_dmabuf, map_dmabuf, unmap_dmabuf.
  */
 struct vb2_mem_ops {
-       void            *(*alloc)(struct device *dev, const struct dma_attrs *attrs,
+       void            *(*alloc)(struct device *dev, unsigned long attrs,
                                  unsigned long size, enum dma_data_direction dma_dir,
                                  gfp_t gfp_flags);
        void            (*put)(void *buf_priv);
  * @io_modes:  supported io methods (see vb2_io_modes enum)
  * @dev:       device to use for the default allocation context if the driver
  *             doesn't fill in the @alloc_devs array.
- * @dma_attrs: DMA attributes to use for the DMA. May be NULL.
+ * @dma_attrs: DMA attributes to use for the DMA.
  * @fileio_read_once:          report EOF after reading the first buffer
  * @fileio_write_immediately:  queue buffer after each write() call
  * @allow_zero_bytesused:      allow bytesused == 0 to be passed to the driver
        unsigned int                    type;
        unsigned int                    io_modes;
        struct device                   *dev;
-       const struct dma_attrs          *dma_attrs;
+       unsigned long                   dma_attrs;
        unsigned                        fileio_read_once:1;
        unsigned                        fileio_write_immediately:1;
        unsigned                        allow_zero_bytesused:1;
 
 #include <media/videobuf2-v4l2.h>
 #include <linux/dma-mapping.h>
 
-struct dma_attrs;
-
 static inline dma_addr_t
 vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no)
 {
 
 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
                                          void *cpu_addr, size_t size,
                                          enum dma_data_direction direction,
-                                         struct dma_attrs *attrs)
+                                         unsigned long dma_attrs)
 {
        return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
-                                   direction, attrs);
+                                   direction, dma_attrs);
 }
 
 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
                                             u64 addr, size_t size,
                                             enum dma_data_direction direction,
-                                            struct dma_attrs *attrs)
+                                            unsigned long dma_attrs)
 {
        return dma_unmap_single_attrs(dev->dma_device, addr, size,
-                                     direction, attrs);
+                                     direction, dma_attrs);
 }
 
 /**
 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
                                      struct scatterlist *sg, int nents,
                                      enum dma_data_direction direction,
-                                     struct dma_attrs *attrs)
+                                     unsigned long dma_attrs)
 {
-       return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
+       return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
+                               dma_attrs);
 }
 
 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
                                         struct scatterlist *sg, int nents,
                                         enum dma_data_direction direction,
-                                        struct dma_attrs *attrs)
+                                        unsigned long dma_attrs)
 {
-       dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
+       dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
 }
 /**
  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
 
 extern void
 *xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                            dma_addr_t *dma_handle, gfp_t flags,
-                           struct dma_attrs *attrs);
+                           unsigned long attrs);
 
 extern void
 xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
                          void *vaddr, dma_addr_t dma_handle,
-                         struct dma_attrs *attrs);
+                         unsigned long attrs);
 
 extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
                                       unsigned long offset, size_t size,
                                       enum dma_data_direction dir,
-                                      struct dma_attrs *attrs);
+                                      unsigned long attrs);
 
 extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
                                   size_t size, enum dma_data_direction dir,
-                                  struct dma_attrs *attrs);
+                                  unsigned long attrs);
 extern int
 xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
                         int nelems, enum dma_data_direction dir,
-                        struct dma_attrs *attrs);
+                        unsigned long attrs);
 
 extern void
 xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
                           int nelems, enum dma_data_direction dir,
-                          struct dma_attrs *attrs);
+                          unsigned long attrs);
 
 extern void
 xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
 
 
 static void *dma_noop_alloc(struct device *dev, size_t size,
                            dma_addr_t *dma_handle, gfp_t gfp,
-                           struct dma_attrs *attrs)
+                           unsigned long attrs)
 {
        void *ret;
 
 
 static void dma_noop_free(struct device *dev, size_t size,
                          void *cpu_addr, dma_addr_t dma_addr,
-                         struct dma_attrs *attrs)
+                         unsigned long attrs)
 {
        free_pages((unsigned long)cpu_addr, get_order(size));
 }
 static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page,
                                      unsigned long offset, size_t size,
                                      enum dma_data_direction dir,
-                                     struct dma_attrs *attrs)
+                                     unsigned long attrs)
 {
        return page_to_phys(page) + offset;
 }
 
 static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
-                            enum dma_data_direction dir, struct dma_attrs *attrs)
+                            enum dma_data_direction dir,
+                            unsigned long attrs)
 {
        int i;
        struct scatterlist *sg;
 
 dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
                            unsigned long offset, size_t size,
                            enum dma_data_direction dir,
-                           struct dma_attrs *attrs)
+                           unsigned long attrs)
 {
        phys_addr_t map, phys = page_to_phys(page) + offset;
        dma_addr_t dev_addr = phys_to_dma(dev, phys);
 
 void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
                        size_t size, enum dma_data_direction dir,
-                       struct dma_attrs *attrs)
+                       unsigned long attrs)
 {
        unmap_single(hwdev, dev_addr, size, dir);
 }
  */
 int
 swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
-                    enum dma_data_direction dir, struct dma_attrs *attrs)
+                    enum dma_data_direction dir, unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
               enum dma_data_direction dir)
 {
-       return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
+       return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, 0);
 }
 EXPORT_SYMBOL(swiotlb_map_sg);
 
  */
 void
 swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
-                      int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
+                      int nelems, enum dma_data_direction dir,
+                      unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
 swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
                 enum dma_data_direction dir)
 {
-       return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
+       return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, 0);
 }
 EXPORT_SYMBOL(swiotlb_unmap_sg);