props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
        if (MLX5_CAP_GEN(mdev, xrc))
                props->device_cap_flags |= IB_DEVICE_XRC;
+       if (MLX5_CAP_GEN(mdev, imaicl)) {
+               props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
+                                          IB_DEVICE_MEM_WINDOW_TYPE_2B;
+               props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+       }
        props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
        if (MLX5_CAP_GEN(mdev, sho)) {
                props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
 
        mlx5_ib_internal_fill_odp_caps(dev);
 
+       if (MLX5_CAP_GEN(mdev, imaicl)) {
+               dev->ib_dev.alloc_mw            = mlx5_ib_alloc_mw;
+               dev->ib_dev.dealloc_mw          = mlx5_ib_dealloc_mw;
+               dev->ib_dev.uverbs_cmd_mask |=
+                       (1ull << IB_USER_VERBS_CMD_ALLOC_MW)    |
+                       (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
+       }
+
        if (MLX5_CAP_GEN(mdev, xrc)) {
                dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
                dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
 
 #include <linux/mlx5/srq.h>
 #include <linux/types.h>
 #include <linux/mlx5/transobj.h>
+#include <rdma/ib_user_verbs.h>
 
 #define mlx5_ib_dbg(dev, format, arg...)                               \
 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,   \
        int                     access_flags; /* Needed for rereg MR */
 };
 
+struct mlx5_ib_mw {
+       struct ib_mw            ibmw;
+       struct mlx5_core_mkey   mmkey;
+};
+
 struct mlx5_ib_umr_context {
        enum ib_wc_status       status;
        struct completion       done;
        return container_of(ibmr, struct mlx5_ib_mr, ibmr);
 }
 
+static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
+{
+       return container_of(ibmw, struct mlx5_ib_mw, ibmw);
+}
+
 struct mlx5_ib_ah {
        struct ib_ah            ibah;
        struct mlx5_av          av;
 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                                  u64 virt_addr, int access_flags,
                                  struct ib_udata *udata);
+struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+                              struct ib_udata *udata);
+int mlx5_ib_dealloc_mw(struct ib_mw *mw);
 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
                       int npages, int zap);
 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
 
 #include <rdma/ib_umem_odp.h>
 #include <rdma/ib_verbs.h>
 #include "mlx5_ib.h"
+#include "user.h"
 
 enum {
        MAX_PENDING_REG_MR = 8,
        return ERR_PTR(err);
 }
 
+struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+                              struct ib_udata *udata)
+{
+       struct mlx5_ib_dev *dev = to_mdev(pd->device);
+       struct mlx5_create_mkey_mbox_in *in = NULL;
+       struct mlx5_ib_mw *mw = NULL;
+       int ndescs;
+       int err;
+       struct mlx5_ib_alloc_mw req = {};
+       struct {
+               __u32   comp_mask;
+               __u32   response_length;
+       } resp = {};
+
+       err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
+       if (err)
+               return ERR_PTR(err);
+
+       if (req.comp_mask || req.reserved1 || req.reserved2)
+               return ERR_PTR(-EOPNOTSUPP);
+
+       if (udata->inlen > sizeof(req) &&
+           !ib_is_udata_cleared(udata, sizeof(req),
+                                udata->inlen - sizeof(req)))
+               return ERR_PTR(-EOPNOTSUPP);
+
+       ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
+
+       mw = kzalloc(sizeof(*mw), GFP_KERNEL);
+       in = kzalloc(sizeof(*in), GFP_KERNEL);
+       if (!mw || !in) {
+               err = -ENOMEM;
+               goto free;
+       }
+
+       in->seg.status = MLX5_MKEY_STATUS_FREE;
+       in->seg.xlt_oct_size = cpu_to_be32(ndescs);
+       in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
+       in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_KLM |
+               MLX5_PERM_LOCAL_READ;
+       if (type == IB_MW_TYPE_2)
+               in->seg.flags_pd |= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
+       in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+
+       err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, sizeof(*in),
+                                   NULL, NULL, NULL);
+       if (err)
+               goto free;
+
+       mw->ibmw.rkey = mw->mmkey.key;
+
+       resp.response_length = min(offsetof(typeof(resp), response_length) +
+                                  sizeof(resp.response_length), udata->outlen);
+       if (resp.response_length) {
+               err = ib_copy_to_udata(udata, &resp, resp.response_length);
+               if (err) {
+                       mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
+                       goto free;
+               }
+       }
+
+       kfree(in);
+       return &mw->ibmw;
+
+free:
+       kfree(mw);
+       kfree(in);
+       return ERR_PTR(err);
+}
+
+int mlx5_ib_dealloc_mw(struct ib_mw *mw)
+{
+       struct mlx5_ib_mw *mmw = to_mmw(mw);
+       int err;
+
+       err =  mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
+                                     &mmw->mmkey);
+       if (!err)
+               kfree(mmw);
+       return err;
+}
+
 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
                            struct ib_mr_status *mr_status)
 {
 
        __u32   uuar_index;
 };
 
+struct mlx5_ib_alloc_mw {
+       __u32   comp_mask;
+       __u8    num_klms;
+       __u8    reserved1;
+       __u16   reserved2;
+};
+
 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
                                    struct mlx5_ib_create_qp *ucmd,
                                    int inlen,
 
        u8         cd[0x1];
        u8         reserved_at_22c[0x1];
        u8         apm[0x1];
-       u8         reserved_at_22e[0x7];
+       u8         reserved_at_22e[0x2];
+       u8         imaicl[0x1];
+       u8         reserved_at_231[0x4];
        u8         qkv[0x1];
        u8         pkv[0x1];
        u8         set_deth_sqpn[0x1];