enum {
        MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+       MLX5E_SQ_STATE_BF_ENABLE,
 };
 
 struct mlx5e_sq {
        struct mlx5_wq_cyc         wq;
        u32                        dma_fifo_mask;
        void __iomem              *uar_map;
-       void __iomem              *uar_bf_map;
        struct netdev_queue       *txq;
        u32                        sqn;
        u16                        bf_buf_size;
         * doorbell
         */
        wmb();
-
-       if (bf_sz) {
-               __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
-
-               /* flush the write-combining mapped buffer */
-               wmb();
-
-       } else {
+       if (bf_sz)
+               __iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz);
+       else
                mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
-       }
+       /* flush the write-combining mapped buffer */
+       wmb();
 
        sq->bf_offset ^= sq->bf_buf_size;
 }
 
        int txq_ix;
        int err;
 
-       err = mlx5_alloc_map_uar(mdev, &sq->uar);
+       err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
        if (err)
                return err;
 
                goto err_unmap_free_uar;
 
        sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
-       sq->uar_map     = sq->uar.map;
-       sq->uar_bf_map  = sq->uar.bf_map;
+       if (sq->uar.bf_map) {
+               set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
+               sq->uar_map = sq->uar.bf_map;
+       } else {
+               sq->uar_map = sq->uar.map;
+       }
        sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
        sq->max_inline  = param->max_inline;
 
 
        priv = netdev_priv(netdev);
 
-       err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
+       err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
        if (err) {
                mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
                goto err_free_netdev;
 
        if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
                int bf_sz = 0;
 
-               if (bf && sq->uar_bf_map)
+               if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state))
                        bf_sz = wi->num_wqebbs << 3;
 
                cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
 
        return -ENOTSUPP;
 }
 
-static int map_bf_area(struct mlx5_core_dev *dev)
-{
-       resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
-       resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
-
-       dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
-
-       return dev->priv.bf_mapping ? 0 : -ENOMEM;
-}
-
-static void unmap_bf_area(struct mlx5_core_dev *dev)
-{
-       if (dev->priv.bf_mapping)
-               io_mapping_free(dev->priv.bf_mapping);
-}
-
 static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
 {
        struct mlx5_device_context *dev_ctx;
                goto err_stop_eqs;
        }
 
-       if (map_bf_area(dev))
-               dev_err(&pdev->dev, "Failed to map blue flame area\n");
-
        err = mlx5_irq_set_affinity_hints(dev);
-       if (err) {
+       if (err)
                dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
-               goto err_unmap_bf_area;
-       }
 
        MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
 
        mlx5_cleanup_qp_table(dev);
        mlx5_cleanup_cq_table(dev);
        mlx5_irq_clear_affinity_hints(dev);
-
-err_unmap_bf_area:
-       unmap_bf_area(dev);
-
        free_comp_eqs(dev);
 
 err_stop_eqs:
        mlx5_cleanup_qp_table(dev);
        mlx5_cleanup_cq_table(dev);
        mlx5_irq_clear_affinity_hints(dev);
-       unmap_bf_area(dev);
        free_comp_eqs(dev);
        mlx5_stop_eqs(dev);
        mlx5_free_uuars(dev, &priv->uuari);
 
        return 0;
 }
 
-int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
+                      bool map_wc)
 {
        phys_addr_t pfn;
        phys_addr_t uar_bar_start;
 
        uar_bar_start = pci_resource_start(mdev->pdev, 0);
        pfn           = (uar_bar_start >> PAGE_SHIFT) + uar->index;
-       uar->map      = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
-       if (!uar->map) {
-               mlx5_core_warn(mdev, "ioremap() failed, %d\n", err);
-               err = -ENOMEM;
-               goto err_free_uar;
-       }
 
-       if (mdev->priv.bf_mapping)
-               uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
-                                               uar->index << PAGE_SHIFT);
+       if (map_wc) {
+               uar->bf_map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
+               if (!uar->bf_map) {
+                       mlx5_core_warn(mdev, "ioremap_wc() failed\n");
+                       uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+                       if (!uar->map)
+                               goto err_free_uar;
+               }
+       } else {
+               uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+               if (!uar->map)
+                       goto err_free_uar;
+       }
 
        return 0;
 
 err_free_uar:
+       mlx5_core_warn(mdev, "ioremap() failed\n");
+       err = -ENOMEM;
        mlx5_cmd_free_uar(mdev, uar->index);
 
        return err;
 
 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
 {
-       io_mapping_unmap(uar->bf_map);
        iounmap(uar->map);
+       iounmap(uar->bf_map);
        mlx5_cmd_free_uar(mdev, uar->index);
 }
 EXPORT_SYMBOL(mlx5_unmap_free_uar);
 
        struct mlx5_uuar_info   uuari;
        MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
 
-       struct io_mapping       *bf_mapping;
-
        /* pages stuff */
        struct workqueue_struct *pg_wq;
        struct rb_root          page_root;
 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
 int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
 int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
-int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
+                      bool map_wc);
 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
 int mlx5_health_init(struct mlx5_core_dev *dev);