]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
IB/Shared PD support from Oracle
authorEli Cohen <eli@mellanox.co.il>
Sun, 5 Jun 2011 12:36:46 +0000 (15:36 +0300)
committerMukesh Kacker <mukesh.kacker@oracle.com>
Fri, 11 Sep 2015 17:14:19 +0000 (10:14 -0700)
Orabug: 21496696

Signed-off-by: Arun Kaimalettu <arun.kaimalettu@oracle.com>
Signed-off-by: Eli Cohen <eli@mellanox.co.il>
(Ported from UEK2/OFED 1.5.5)

Signed-off-by: Mukesh Kacker <mukesh.kacker@oracle.com>
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/amso1100/c2_provider.c
drivers/infiniband/hw/ehca/ehca_main.c
drivers/infiniband/hw/ipath/ipath_verbs.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mthca/mthca_provider.c
include/rdma/ib_verbs.h
include/uapi/rdma/ib_user_verbs.h

index b716b08156446e186c9ae608f3f4e6343c6f200f..55a0ac08b91e23690d962cad6cca6bca8f2d2ef5 100644 (file)
@@ -165,6 +165,7 @@ struct ib_ucq_object {
 
 extern spinlock_t ib_uverbs_idr_lock;
 extern struct idr ib_uverbs_pd_idr;
+extern struct idr ib_uverbs_shpd_idr;
 extern struct idr ib_uverbs_mr_idr;
 extern struct idr ib_uverbs_mw_idr;
 extern struct idr ib_uverbs_ah_idr;
@@ -250,6 +251,8 @@ IB_UVERBS_DECLARE_CMD(destroy_srq);
 IB_UVERBS_DECLARE_CMD(create_xsrq);
 IB_UVERBS_DECLARE_CMD(open_xrcd);
 IB_UVERBS_DECLARE_CMD(close_xrcd);
+IB_UVERBS_DECLARE_CMD(alloc_shpd);
+IB_UVERBS_DECLARE_CMD(share_pd);
 
 #define IB_UVERBS_DECLARE_EX_CMD(name)                         \
        int ib_uverbs_ex_##name(struct ib_uverbs_file *file,    \
index 9825b41a1f0cae67f9f850b3755d0c37a76ba3b3..2f5d5441f7673271fcf796f105002a12acf710f9 100644 (file)
@@ -49,6 +49,7 @@ struct uverbs_lock_class {
 };
 
 static struct uverbs_lock_class pd_lock_class  = { .name = "PD-uobj" };
+static struct uverbs_lock_class shpd_lock_class = { .name = "SHPD-uobj" };
 static struct uverbs_lock_class mr_lock_class  = { .name = "MR-uobj" };
 static struct uverbs_lock_class mw_lock_class  = { .name = "MW-uobj" };
 static struct uverbs_lock_class cq_lock_class  = { .name = "CQ-uobj" };
@@ -216,6 +217,11 @@ static void put_pd_read(struct ib_pd *pd)
        put_uobj_read(pd->uobject);
 }
 
+static void put_pd_write(struct ib_pd *pd)
+{
+       put_uobj_write(pd->uobject);
+}
+
 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
 {
        return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
@@ -561,6 +567,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
 
        pd->device  = file->device->ib_dev;
        pd->uobject = uobj;
+       pd->shpd  = NULL;    /* will be filled in when pd is shared */
        atomic_set(&pd->usecnt, 0);
 
        uobj->object = pd;
@@ -598,25 +605,264 @@ err:
        return ret;
 }
 
+ssize_t ib_uverbs_alloc_shpd(struct ib_uverbs_file *file,
+                            const char __user *buf,
+                            int in_len, int out_len)
+{
+       struct ib_uverbs_alloc_shpd cmd;
+       struct ib_uverbs_alloc_shpd_resp resp;
+       struct ib_udata                udata;
+       struct ib_uobject       *uobj;
+       struct ib_uobject       *shuobj = NULL;
+       struct ib_pd              *pd;
+       struct ib_shpd          *shpd = NULL;
+       int                       ret;
+
+       if (!file->device->ib_dev->alloc_shpd ||
+                       !file->device->ib_dev->share_pd ||
+                       !file->device->ib_dev->remove_shpd)
+               return -ENOSYS;
+
+       if (copy_from_user(&cmd, buf, sizeof(cmd)))
+               return -EFAULT;
+
+       INIT_UDATA(&udata, buf + sizeof(cmd),
+                  (unsigned long) cmd.response + sizeof(resp),
+                  in_len - sizeof(cmd), out_len - sizeof(resp));
+
+       uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
+       if (!uobj)
+               return -EINVAL;
+
+       pd = uobj->object;
+
+       /* pd can be shared only once */
+       if (pd->shpd) {
+               ret = -EINVAL;
+               goto err_pd;
+       }
+
+
+       /* create a new uobj */
+       shuobj = kmalloc(sizeof(*shuobj), GFP_KERNEL);
+       if (!shuobj) {
+               ret = -ENOMEM;
+               goto err_pd;
+       }
+
+       init_uobj(shuobj, 0, 0/* global */, &shpd_lock_class);
+       down_write(&shuobj->mutex);
+
+       /* alloc shared pd from device driver */
+       shpd = file->device->ib_dev->alloc_shpd(file->device->ib_dev, pd);
+       if (IS_ERR(shpd)) {
+               ret = PTR_ERR(shpd);
+               goto err_shobj;
+       }
+
+       shpd->device = file->device->ib_dev;
+       shpd->uobject = shuobj;
+       shpd->share_key = cmd.share_key;
+
+       shuobj->object = shpd;
+
+       /* link new uobj to device level list */
+       ret = idr_add_uobj(&ib_uverbs_shpd_idr, shuobj);
+       if (ret)
+               goto err_idr;
+
+       /* return pd_handle */
+       memset(&resp, 0, sizeof(resp));
+       resp.shpd_handle = shuobj->id;
+
+       if (copy_to_user((void __user *) (unsigned long) cmd.response,
+                        &resp, sizeof(resp))) {
+               ret = -EFAULT;
+               goto err_copy;
+       }
+
+       shuobj->live = 1;
+
+       /* mark pd as shared */
+       pd->shpd = shpd;
+
+       up_write(&shuobj->mutex);
+       put_pd_write(pd);
+
+       return in_len;
+
+err_copy:
+       idr_remove_uobj(&ib_uverbs_shpd_idr, shuobj);
+
+err_idr:
+       file->device->ib_dev->remove_shpd(file->device->ib_dev, shpd, 1);
+
+err_shobj:
+       put_uobj_write(shuobj);
+
+err_pd:
+       put_pd_write(pd);
+
+       return ret;
+}
+
+ssize_t ib_uverbs_share_pd(struct ib_uverbs_file *file,
+                          const char __user *buf,
+                          int in_len, int out_len)
+{
+       struct ib_uverbs_share_pd cmd;
+       struct ib_uverbs_share_pd_resp resp;
+       struct ib_udata                udata;
+       struct ib_uobject       *uobj = NULL;
+       struct ib_uobject       *shuobj;
+       struct ib_pd              *pd;
+       struct ib_shpd          *shpd;
+       int                       ret;
+
+       if (copy_from_user(&cmd, buf, sizeof(cmd)))
+               return -EFAULT;
+
+       INIT_UDATA(&udata, buf + sizeof(cmd),
+                  (unsigned long) cmd.response + sizeof(resp),
+                  in_len - sizeof(cmd), out_len - sizeof(resp));
+
+       /* get global uobject for the shared pd */
+       shuobj = idr_read_uobj(&ib_uverbs_shpd_idr, cmd.shpd_handle,
+                              0/* global */, 0);
+       if (!shuobj)
+               return -EINVAL;
+
+       shpd = shuobj->object;
+
+       /* check if the key matches */
+       if (shpd->share_key != cmd.share_key) {
+               pr_warn("WARNING : invalid shared pd key\n");
+               ret = -EINVAL;
+               goto err_putshpd;
+       }
+
+       /* check if the devices match */
+       if (strncmp(file->device->ib_dev->name, shpd->device->name,
+                   IB_DEVICE_NAME_MAX)) {
+               ret = -EINVAL;
+               goto err_putshpd;
+       }
+
+       /* allocate a new user object */
+       uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
+       if (!uobj) {
+               ret = -ENOMEM;
+               goto err_putshpd;
+       }
+
+
+       init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
+       down_write(&uobj->mutex);
+
+       /* share the pd at device driver level */
+       pd = file->device->ib_dev->share_pd(file->device->ib_dev,
+                       file->ucontext, &udata, shpd);
+       if (IS_ERR(pd)) {
+               ret = PTR_ERR(pd);
+               goto err_putuobj;
+       }
+
+       pd->device  = file->device->ib_dev;
+       pd->uobject = uobj;
+       pd->shpd  = shpd;
+       atomic_set(&pd->usecnt, 0);
+
+       /* initialize uobj and return pd_handle */
+       uobj->object = pd;
+       ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
+       if (ret)
+               goto err_idr;
+
+       memset(&resp, 0, sizeof(resp));
+       resp.pd_handle = uobj->id;
+
+       if (copy_to_user((void __user *) (unsigned long) cmd.response,
+                        &resp, sizeof(resp))) {
+               ret = -EFAULT;
+               goto err_copy;
+       }
+
+       mutex_lock(&file->mutex);
+       list_add_tail(&uobj->list, &file->ucontext->pd_list);
+       mutex_unlock(&file->mutex);
+
+       uobj->live = 1;
+       atomic_inc(&shpd->shared);
+
+       up_write(&uobj->mutex);
+
+       put_uobj_read(shuobj);
+
+       return in_len;
+
+err_copy:
+       idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
+
+err_idr:
+       ib_dealloc_pd(pd);
+
+err_putuobj:
+
+       put_uobj_write(uobj);
+
+err_putshpd:
+       put_uobj_read(shuobj);
+
+       return ret;
+}
+
 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
                             const char __user *buf,
                             int in_len, int out_len)
 {
        struct ib_uverbs_dealloc_pd cmd;
        struct ib_uobject          *uobj;
-       int                         ret;
+       int                         ret = 0;
+       struct ib_uobject          *shuobj = 0;
+       struct ib_pd               *pd = NULL;
+       struct ib_shpd             *shpd = NULL;
 
-       if (copy_from_user(&cmd, buf, sizeof cmd))
+       if (copy_from_user(&cmd, buf, sizeof(cmd)))
                return -EFAULT;
 
        uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
        if (!uobj)
                return -EINVAL;
 
+       pd = uobj->object;
+
+       /* is pd shared ?*/
+       if (pd->shpd) {
+               shpd = pd->shpd;
+               shuobj = shpd->uobject;
+       }
+
        ret = ib_dealloc_pd(uobj->object);
        if (!ret)
                uobj->live = 0;
 
+       if (!ret && shpd) {
+               down_write(&shuobj->mutex);
+               atomic_dec(&shpd->shared);
+
+               /* if this shpd is no longer shared */
+               if (!atomic_read(&shpd->shared)) {
+                       /* free the shpd info from device driver */
+                       file->device->ib_dev->remove_shpd(file->device->ib_dev,
+                                                         shpd, 0);
+                       shuobj->live = 0;
+                       up_write(&shuobj->mutex);
+                       idr_remove_uobj(&ib_uverbs_shpd_idr, shuobj);
+                       put_uobj(shuobj);
+               } else
+                       up_write(&shuobj->mutex);
+       }
+
        put_uobj_write(uobj);
 
        if (ret)
index 6be3f0faa17a223f3fee72382a46ecac842f6184..32c654abfc2f50835ce4bcce49c5c7e526ca2c3e 100644 (file)
@@ -66,6 +66,7 @@ static struct class *uverbs_class;
 
 DEFINE_SPINLOCK(ib_uverbs_idr_lock);
 DEFINE_IDR(ib_uverbs_pd_idr);
+DEFINE_IDR(ib_uverbs_shpd_idr);
 DEFINE_IDR(ib_uverbs_mr_idr);
 DEFINE_IDR(ib_uverbs_mw_idr);
 DEFINE_IDR(ib_uverbs_ah_idr);
@@ -116,6 +117,13 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
        [IB_USER_VERBS_CMD_CLOSE_XRCD]          = ib_uverbs_close_xrcd,
        [IB_USER_VERBS_CMD_CREATE_XSRQ]         = ib_uverbs_create_xsrq,
        [IB_USER_VERBS_CMD_OPEN_QP]             = ib_uverbs_open_qp,
+       /*
+        * Upstream verbs index 0-40 above.
+        * Oracle additions to verbs start here with some
+        * space (index 46)
+        */
+       [IB_USER_VERBS_CMD_ALLOC_SHPD]          = ib_uverbs_alloc_shpd,
+       [IB_USER_VERBS_CMD_SHARE_PD]            = ib_uverbs_share_pd,
 };
 
 static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
@@ -1058,6 +1066,7 @@ static void __exit ib_uverbs_cleanup(void)
        if (overflow_maj)
                unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES);
        idr_destroy(&ib_uverbs_pd_idr);
+       idr_destroy(&ib_uverbs_shpd_idr);
        idr_destroy(&ib_uverbs_mr_idr);
        idr_destroy(&ib_uverbs_mw_idr);
        idr_destroy(&ib_uverbs_ah_idr);
index 83185752cc025b71115955c2fda96ba591713664..f3e7694d00fc99ead71e369e622f7c38bbd6991f 100644 (file)
@@ -835,6 +835,10 @@ int c2_register_device(struct c2_dev *dev)
        dev->ibdev.post_send = c2_post_send;
        dev->ibdev.post_recv = c2_post_receive;
 
+       dev->ibdev.alloc_shpd           = NULL;
+       dev->ibdev.share_pd             = NULL;
+       dev->ibdev.remove_shpd          = NULL;
+
        dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
        if (dev->ibdev.iwcm == NULL) {
                ret = -ENOMEM;
index cd8d290a09fc2029f5542660c42e652b9216fe3e..d3aeda8351aef8f7f2293dcc3d2b3b94dd9b0ac0 100644 (file)
@@ -510,6 +510,9 @@ static int ehca_init_device(struct ehca_shca *shca)
        shca->ib_device.process_mad         = ehca_process_mad;
        shca->ib_device.mmap                = ehca_mmap;
        shca->ib_device.dma_ops             = &ehca_dma_mapping_ops;
+       shca->ib_device.alloc_shpd          = NULL;
+       shca->ib_device.share_pd            = NULL;
+       shca->ib_device.remove_shpd         = NULL;
 
        if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
                shca->ib_device.uverbs_cmd_mask |=
index 9f1bf4d6dd65acbcc892addb3fb292863c8e8f91..3f4fe9e9d73ba0b62164364f2ddbb08c795cf865 100644 (file)
@@ -2180,6 +2180,9 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
        dev->process_mad = ipath_process_mad;
        dev->mmap = ipath_mmap;
        dev->dma_ops = &ipath_dma_mapping_ops;
+       dev->alloc_shpd = NULL;
+       dev->share_pd = NULL;
+       dev->remove_shpd = NULL;
 
        snprintf(dev->node_desc, sizeof(dev->node_desc),
                 IPATH_IDSTR " %s", init_utsname()->nodename);
index f4787e8c294ff437e991392f09da2c363dfce9b7..aefe6027aaec8d71ccb58f60b42d4c0f2a41949e 100644 (file)
@@ -765,10 +765,71 @@ static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
        return &pd->ibpd;
 }
 
+static struct ib_shpd *mlx4_ib_alloc_shpd(struct ib_device *ibdev,
+                                         struct ib_pd *pd)
+{
+       struct mlx4_ib_shpd *shpd;
+
+       shpd = kzalloc(sizeof(*shpd), GFP_KERNEL);
+       if (!shpd)
+               return ERR_PTR(-ENOMEM);
+
+       shpd->pdn = to_mpd(pd)->pdn;
+
+       return &shpd->ibshpd;
+}
+
+static struct ib_pd *mlx4_ib_share_pd(struct ib_device *ibdev,
+                                     struct ib_ucontext *context,
+                                     struct ib_udata *udata,
+                                     struct ib_shpd *shpd)
+{
+       struct mlx4_ib_pd *pd;
+
+       pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+       if (!pd)
+               return ERR_PTR(-ENOMEM);
+
+       pd->pdn = to_mshpd(shpd)->pdn;
+
+       if (context)
+               if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
+                       kfree(pd);
+                       return ERR_PTR(-EFAULT);
+               }
+
+       return &pd->ibpd;
+}
+
+static int mlx4_ib_remove_shpd(struct ib_device *ibdev,
+                              struct ib_shpd *shpd, int atinit)
+{
+
+       /*
+        * if remove shpd is called during shpd creation time itself, then
+        * pd should not be freed from device. it will be freed when deall_pd
+        * is called
+        */
+       if (!atinit)
+               mlx4_pd_free(to_mdev(ibdev)->dev, to_mshpd(shpd)->pdn);
+       kfree(shpd);
+
+       return 0;
+}
+
 static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
 {
-       mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
-       kfree(pd);
+       struct ib_shpd *shpd = pd->shpd;
+
+       if (shpd) {
+               /*
+                * if pd is shared, pd number will be freed by remove_shpd call
+                */
+               kfree(pd);
+       } else {
+               mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
+               kfree(pd);
+       }
 
        return 0;
 }
@@ -2234,7 +2295,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
                (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
                (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
-               (1ull << IB_USER_VERBS_CMD_OPEN_QP);
+               (1ull << IB_USER_VERBS_CMD_OPEN_QP)             |
+               (1ull << IB_USER_VERBS_CMD_ALLOC_SHPD)          |
+               (1ull << IB_USER_VERBS_CMD_SHARE_PD);
 
        ibdev->ib_dev.query_device      = mlx4_ib_query_device;
        ibdev->ib_dev.query_port        = mlx4_ib_query_port;
@@ -2278,6 +2341,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        ibdev->ib_dev.attach_mcast      = mlx4_ib_mcg_attach;
        ibdev->ib_dev.detach_mcast      = mlx4_ib_mcg_detach;
        ibdev->ib_dev.process_mad       = mlx4_ib_process_mad;
+       ibdev->ib_dev.alloc_shpd        = mlx4_ib_alloc_shpd;
+       ibdev->ib_dev.share_pd          = mlx4_ib_share_pd;
+       ibdev->ib_dev.remove_shpd       = mlx4_ib_remove_shpd;
 
        if (!mlx4_is_slave(ibdev->dev)) {
                ibdev->ib_dev.alloc_fmr         = mlx4_ib_fmr_alloc;
index 99f4cd4079892b7de00849658a74bf36a3086536..432ef2eb9212eceeb988a65d7fc5d89add234267 100644 (file)
@@ -85,6 +85,11 @@ struct mlx4_ib_pd {
        u32                     pdn;
 };
 
+struct mlx4_ib_shpd {
+       struct ib_shpd          ibshpd;
+       u32                     pdn;
+};
+
 struct mlx4_ib_xrcd {
        struct ib_xrcd          ibxrcd;
        u32                     xrcdn;
@@ -573,6 +578,11 @@ static inline struct mlx4_ib_pd *to_mpd(struct ib_pd *ibpd)
        return container_of(ibpd, struct mlx4_ib_pd, ibpd);
 }
 
+static inline struct mlx4_ib_shpd *to_mshpd(struct ib_shpd *ibshpd)
+{
+       return container_of(ibshpd, struct mlx4_ib_shpd, ibshpd);
+}
+
 static inline struct mlx4_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
 {
        return container_of(ibxrcd, struct mlx4_ib_xrcd, ibxrcd);
index a6c68dac901e5fc29aa710fdbf6f65944a1f36a5..c17cee0fec1d0bf27a2ae991d496b0340a7b7c8b 100644 (file)
@@ -1349,6 +1349,10 @@ int mthca_register_device(struct mthca_dev *dev)
                dev->ib_dev.post_recv     = mthca_tavor_post_receive;
        }
 
+       dev->ib_dev.alloc_shpd          = NULL;
+       dev->ib_dev.share_pd            = NULL;
+       dev->ib_dev.remove_shpd         = NULL;
+
        mutex_init(&dev->cap_mask_mutex);
 
        ret = ib_register_device(&dev->ib_dev, NULL);
index 744c3f244584bf13f4f5cefa318cfdc5185292d4..25c43efb607e9b3f7cfd46716c5cd1c744fd73a5 100644 (file)
@@ -1200,9 +1200,18 @@ struct ib_udata {
 struct ib_pd {
        struct ib_device       *device;
        struct ib_uobject      *uobject;
+       struct ib_shpd         *shpd;    /* global uobj id if this
+                                           pd is shared */
        atomic_t                usecnt; /* count all resources */
 };
 
+struct ib_shpd {
+       struct ib_device       *device;
+       struct ib_uobject      *uobject;
+       atomic_t                shared; /* count procs sharing the pd*/
+       u64                     share_key;
+};
+
 struct ib_xrcd {
        struct ib_device       *device;
        atomic_t                usecnt; /* count all exposed resources */
@@ -1658,6 +1667,15 @@ struct ib_device {
        int                        (*destroy_flow)(struct ib_flow *flow_id);
        int                        (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
                                                      struct ib_mr_status *mr_status);
+       struct ib_shpd            *(*alloc_shpd)(struct ib_device *ibdev,
+                                                struct ib_pd *pd);
+       struct ib_pd              *(*share_pd)(struct ib_device *ibdev,
+                                              struct ib_ucontext *context,
+                                              struct ib_udata *udata,
+                                              struct ib_shpd *shpd);
+       int                        (*remove_shpd)(struct ib_device *ibdev,
+                                                 struct ib_shpd *shpd,
+                                                 int atinit);
 
        struct ib_dma_mapping_ops   *dma_ops;
 
index d6f5a147350e410f4dc4908bdc836954ff5328be..016b14164468d1fcf1ba4882354b20c60a550987 100644 (file)
@@ -86,7 +86,24 @@ enum {
        IB_USER_VERBS_CMD_OPEN_XRCD,
        IB_USER_VERBS_CMD_CLOSE_XRCD,
        IB_USER_VERBS_CMD_CREATE_XSRQ,
-       IB_USER_VERBS_CMD_OPEN_QP,
+       IB_USER_VERBS_CMD_OPEN_QP, /* =40 */
+       /*
+        * Note: 0-40 verbs defined above
+        * Start oracle verb additions leaving a gap
+        * for upstream verbs growth.
+        *
+        * We start at 46 which is the starting value used
+        * for these verbs in UEK2 and add them in same
+        * order.
+        *
+        * (Even if we dont care about aligning with UEK2 values,
+        *  cannot go beyond 63 because of "struct ib_device"
+        *  has uverbs_cmd_mask which is 64 bits wide!)
+        */
+#define IB_USER_VERBS_CMD_ORACLE_ADDS_START 46
+       IB_USER_VERBS_CMD_ALLOC_SHPD = IB_USER_VERBS_CMD_ORACLE_ADDS_START,
+                                       /* =46 */
+       IB_USER_VERBS_CMD_SHARE_PD, /* =47 */
 };
 
 enum {
@@ -264,6 +281,26 @@ struct ib_uverbs_alloc_pd_resp {
        __u32 pd_handle;
 };
 
+struct ib_uverbs_alloc_shpd {
+       __u64 response;
+       __u32 pd_handle;
+       __u64 share_key;
+};
+
+struct ib_uverbs_alloc_shpd_resp {
+       __u32 shpd_handle;
+};
+
+struct ib_uverbs_share_pd {
+       __u64 response;
+       __u32 shpd_handle;
+       __u64 share_key;
+};
+
+struct ib_uverbs_share_pd_resp {
+       __u32 pd_handle;
+};
+
 struct ib_uverbs_dealloc_pd {
        __u32 pd_handle;
 };