}
        }
 
+       spin_lock_irqsave(&dma_dom->domain.lock, flags);
+
        /* First take the bitmap_lock and then publish the range */
-       spin_lock_irqsave(&range->bitmap_lock, flags);
+       spin_lock(&range->bitmap_lock);
 
        old_size                 = dma_dom->aperture_size;
        dma_dom->aperture[index] = range;
 
        update_domain(&dma_dom->domain);
 
-       spin_unlock_irqrestore(&range->bitmap_lock, flags);
+       spin_unlock(&range->bitmap_lock);
+
+       spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
 
        return 0;
 
                           enum dma_data_direction dir,
                           struct dma_attrs *attrs)
 {
-       unsigned long flags;
+       phys_addr_t paddr = page_to_phys(page) + offset;
        struct protection_domain *domain;
-       dma_addr_t addr;
        u64 dma_mask;
-       phys_addr_t paddr = page_to_phys(page) + offset;
 
        INC_STATS_COUNTER(cnt_map_single);
 
 
        dma_mask = *dev->dma_mask;
 
-       spin_lock_irqsave(&domain->lock, flags);
-
-       addr = __map_single(dev, domain->priv, paddr, size, dir, false,
+       return __map_single(dev, domain->priv, paddr, size, dir, false,
                            dma_mask);
-       if (addr == DMA_ERROR_CODE)
-               goto out;
-
-       domain_flush_complete(domain);
-
-out:
-       spin_unlock_irqrestore(&domain->lock, flags);
-
-       return addr;
 }
 
 /*
 static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
                       enum dma_data_direction dir, struct dma_attrs *attrs)
 {
-       unsigned long flags;
        struct protection_domain *domain;
 
        INC_STATS_COUNTER(cnt_unmap_single);
        if (IS_ERR(domain))
                return;
 
-       spin_lock_irqsave(&domain->lock, flags);
-
        __unmap_single(domain->priv, dma_addr, size, dir);
-
-       domain_flush_complete(domain);
-
-       spin_unlock_irqrestore(&domain->lock, flags);
 }
 
 /*
                  int nelems, enum dma_data_direction dir,
                  struct dma_attrs *attrs)
 {
-       unsigned long flags;
        struct protection_domain *domain;
        int i;
        struct scatterlist *s;
 
        dma_mask = *dev->dma_mask;
 
-       spin_lock_irqsave(&domain->lock, flags);
-
        for_each_sg(sglist, s, nelems, i) {
                paddr = sg_phys(s);
 
                        goto unmap;
        }
 
-       domain_flush_complete(domain);
-
-out:
-       spin_unlock_irqrestore(&domain->lock, flags);
-
        return mapped_elems;
+
 unmap:
        for_each_sg(sglist, s, mapped_elems, i) {
                if (s->dma_address)
                s->dma_address = s->dma_length = 0;
        }
 
-       mapped_elems = 0;
-
-       goto out;
+       return 0;
 }
 
 /*
                     int nelems, enum dma_data_direction dir,
                     struct dma_attrs *attrs)
 {
-       unsigned long flags;
        struct protection_domain *domain;
        struct scatterlist *s;
        int i;
        if (IS_ERR(domain))
                return;
 
-       spin_lock_irqsave(&domain->lock, flags);
-
        for_each_sg(sglist, s, nelems, i) {
                __unmap_single(domain->priv, s->dma_address,
                               s->dma_length, dir);
                s->dma_address = s->dma_length = 0;
        }
-
-       domain_flush_complete(domain);
-
-       spin_unlock_irqrestore(&domain->lock, flags);
 }
 
 /*
 {
        u64 dma_mask = dev->coherent_dma_mask;
        struct protection_domain *domain;
-       unsigned long flags;
        struct page *page;
 
        INC_STATS_COUNTER(cnt_alloc_coherent);
        if (!dma_mask)
                dma_mask = *dev->dma_mask;
 
-       spin_lock_irqsave(&domain->lock, flags);
-
        *dma_addr = __map_single(dev, domain->priv, page_to_phys(page),
                                 size, DMA_BIDIRECTIONAL, true, dma_mask);
 
-       if (*dma_addr == DMA_ERROR_CODE) {
-               spin_unlock_irqrestore(&domain->lock, flags);
+       if (*dma_addr == DMA_ERROR_CODE)
                goto out_free;
-       }
-
-       domain_flush_complete(domain);
-
-       spin_unlock_irqrestore(&domain->lock, flags);
 
        return page_address(page);
 
                          struct dma_attrs *attrs)
 {
        struct protection_domain *domain;
-       unsigned long flags;
        struct page *page;
 
        INC_STATS_COUNTER(cnt_free_coherent);
        if (IS_ERR(domain))
                goto free_mem;
 
-       spin_lock_irqsave(&domain->lock, flags);
-
        __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
 
-       domain_flush_complete(domain);
-
-       spin_unlock_irqrestore(&domain->lock, flags);
-
 free_mem:
        if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
                __free_pages(page, get_order(size));