#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
 
+#define MLX5_IB_DM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE   |\
+                                  IB_ACCESS_REMOTE_WRITE  |\
+                                  IB_ACCESS_REMOTE_READ   |\
+                                  IB_ACCESS_REMOTE_ATOMIC |\
+                                  IB_ZERO_BASED)
+
 struct mlx5_ib_mr {
        struct ib_mr            ibmr;
        void                    *descs;
                               struct ib_dm_alloc_attr *attr,
                               struct uverbs_attr_bundle *attrs);
 int mlx5_ib_dealloc_dm(struct ib_dm *ibdm);
+struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
+                               struct ib_dm_mr_attr *attr,
+                               struct uverbs_attr_bundle *attrs);
 
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
 
        mr->access_flags = access_flags;
 }
 
+static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
+                                         u64 length, int acc)
+{
+       struct mlx5_ib_dev *dev = to_mdev(pd->device);
+       int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+       struct mlx5_core_dev *mdev = dev->mdev;
+       struct mlx5_ib_mr *mr;
+       void *mkc;
+       u32 *in;
+       int err;
+
+       mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+       if (!mr)
+               return ERR_PTR(-ENOMEM);
+
+       in = kzalloc(inlen, GFP_KERNEL);
+       if (!in) {
+               err = -ENOMEM;
+               goto err_free;
+       }
+
+       mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+       MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MEMIC & 0x3);
+       MLX5_SET(mkc, mkc, access_mode_4_2,
+                (MLX5_MKC_ACCESS_MODE_MEMIC >> 2) & 0x7);
+       MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
+       MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
+       MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
+       MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
+       MLX5_SET(mkc, mkc, lr, 1);
+
+       MLX5_SET64(mkc, mkc, len, length);
+       MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+       MLX5_SET(mkc, mkc, qpn, 0xffffff);
+       MLX5_SET64(mkc, mkc, start_addr,
+                  memic_addr - pci_resource_start(dev->mdev->pdev, 0));
+
+       err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
+       if (err)
+               goto err_in;
+
+       kfree(in);
+
+       mr->umem = NULL;
+       set_mr_fileds(dev, mr, 0, length, acc);
+
+       return &mr->ibmr;
+
+err_in:
+       kfree(in);
+
+err_free:
+       kfree(mr);
+
+       return ERR_PTR(err);
+}
+
+struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
+                               struct ib_dm_mr_attr *attr,
+                               struct uverbs_attr_bundle *attrs)
+{
+       struct mlx5_ib_dm *mdm = to_mdm(dm);
+       u64 memic_addr;
+
+       if (attr->access_flags & ~MLX5_IB_DM_ALLOWED_ACCESS)
+               return ERR_PTR(-EINVAL);
+
+       memic_addr = mdm->dev_addr + attr->offset;
+
+       return mlx5_ib_get_memic_mr(pd, memic_addr, attr->length,
+                                   attr->access_flags);
+}
+
 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                                  u64 virt_addr, int access_flags,
                                  struct ib_udata *udata)