{
        struct ddcb_requ *req;
 
-       req = kzalloc(sizeof(*req), GFP_ATOMIC);
+       req = kzalloc(sizeof(*req), GFP_KERNEL);
        if (!req)
                return NULL;
 
 
        if (get_order(vsize) > MAX_ORDER)
                return -ENOMEM;
 
-       dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC);
+       dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
        if (dma_map == NULL)
                return -ENOMEM;
 
        map_addr = (m->addr & PAGE_MASK);
        map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
 
-       dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC);
+       dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
        if (dma_map == NULL)
                return -ENOMEM;
 
 
        if (get_order(size) > MAX_ORDER)
                return NULL;
 
-       return pci_alloc_consistent(cd->pci_dev, size, dma_handle);
+       return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
+                                 GFP_KERNEL);
 }
 
 void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
        if (vaddr == NULL)
                return;
 
-       pci_free_consistent(cd->pci_dev, size, vaddr, dma_handle);
+       dma_free_coherent(&cd->pci_dev->dev, size, vaddr, dma_handle);
 }
 
 static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list,