}
 
 static void irdma_fill_qos_info(struct irdma_l2params *l2params,
-                               struct iidc_qos_params *qos_info)
+                               struct iidc_rdma_qos_params *qos_info)
 {
        int i;
 
        }
 }
 
-static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event)
+static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_rdma_event *event)
 {
        struct irdma_device *iwdev = dev_get_drvdata(&pf->adev->dev);
        struct irdma_l2params l2params = {};
 
-       if (*event->type & BIT(IIDC_EVENT_AFTER_MTU_CHANGE)) {
+       if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE)) {
                ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu);
                if (iwdev->vsi.mtu != iwdev->netdev->mtu) {
                        l2params.mtu = iwdev->netdev->mtu;
                        irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
                        irdma_change_l2params(&iwdev->vsi, &l2params);
                }
-       } else if (*event->type & BIT(IIDC_EVENT_BEFORE_TC_CHANGE)) {
+       } else if (*event->type & BIT(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE)) {
                if (iwdev->vsi.tc_change_pending)
                        return;
 
                irdma_prep_tc_change(iwdev);
-       } else if (*event->type & BIT(IIDC_EVENT_AFTER_TC_CHANGE)) {
-               struct iidc_qos_params qos_info = {};
+       } else if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_TC_CHANGE)) {
+               struct iidc_rdma_qos_params qos_info = {};
 
                if (!iwdev->vsi.tc_change_pending)
                        return;
                if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
                        iwdev->dcb_vlan_mode = qos_info.num_tc > 1 && !l2params.dscp_mode;
                irdma_change_l2params(&iwdev->vsi, &l2params);
-       } else if (*event->type & BIT(IIDC_EVENT_CRIT_ERR)) {
+       } else if (*event->type & BIT(IIDC_RDMA_EVENT_CRIT_ERR)) {
                ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
                           event->reg);
                if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
 
 static void irdma_remove(struct auxiliary_device *aux_dev)
 {
-       struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
-                                                           struct iidc_auxiliary_dev,
-                                                           adev);
-       struct ice_pf *pf = iidc_adev->pf;
        struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
+       struct iidc_rdma_core_auxiliary_dev *iidc_adev;
+       struct ice_pf *pf;
+
+       iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+       pf = iidc_adev->pf;
 
        irdma_ib_unregister_device(iwdev);
        ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false);
 
 static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
 {
-       struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
-                                                           struct iidc_auxiliary_dev,
-                                                           adev);
-       struct ice_pf *pf = iidc_adev->pf;
-       struct ice_vsi *vsi = ice_get_main_vsi(pf);
-       struct iidc_qos_params qos_info = {};
+       struct iidc_rdma_core_auxiliary_dev *iidc_adev;
+       struct iidc_rdma_qos_params qos_info = {};
+       struct irdma_l2params l2params = {};
        struct irdma_device *iwdev;
        struct irdma_pci_f *rf;
-       struct irdma_l2params l2params = {};
+       struct ice_vsi *vsi;
+       struct ice_pf *pf;
        int err;
 
+       iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+       pf = iidc_adev->pf;
+       vsi = ice_get_main_vsi(pf);
+
        if (!vsi)
                return -EIO;
        iwdev = ib_alloc_device(irdma_device, ibdev);
 
 MODULE_DEVICE_TABLE(auxiliary, irdma_auxiliary_id_table);
 
-static struct iidc_auxiliary_drv irdma_auxiliary_drv = {
+static struct iidc_rdma_core_auxiliary_drv irdma_auxiliary_drv = {
        .adrv = {
            .id_table = irdma_auxiliary_id_table,
            .probe = irdma_probe,
 
        struct ice_aqc_port_ets_elem buf = { 0 };
        struct ice_dcbx_cfg *old_cfg, *curr_cfg;
        struct device *dev = ice_pf_to_dev(pf);
+       struct iidc_rdma_event *event;
        int ret = ICE_DCB_NO_HW_CHG;
-       struct iidc_event *event;
        struct ice_vsi *pf_vsi;
 
        curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
                goto free_cfg;
        }
 
-       set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type);
+       set_bit(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE, event->type);
        ice_send_event_to_aux(pf, event);
        kfree(event);
 
 void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
 {
        struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
-       struct iidc_event *event;
+       struct iidc_rdma_event *event;
        u8 tc_map = 0;
        int v, ret;
 
                if (!event)
                        return;
 
-               set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
+               set_bit(IIDC_RDMA_EVENT_AFTER_TC_CHANGE, event->type);
                ice_send_event_to_aux(pf, event);
                kfree(event);
        }
 
  * This function has to be called with a device_lock on the
  * pf->adev.dev to avoid race conditions.
  */
-static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
+static
+struct iidc_rdma_core_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
 {
        struct auxiliary_device *adev;
 
        if (!adev || !adev->dev.driver)
                return NULL;
 
-       return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
-                           adrv.driver);
+       return container_of(adev->dev.driver,
+                           struct iidc_rdma_core_auxiliary_drv, adrv.driver);
 }
 
 /**
  * @pf: pointer to PF struct
  * @event: event struct
  */
-void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event)
 {
-       struct iidc_auxiliary_drv *iadrv;
+       struct iidc_rdma_core_auxiliary_drv *iadrv;
 
        if (WARN_ON_ONCE(!in_task()))
                return;
  * @pf: struct for PF
  * @reset_type: type of reset
  */
-int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
+int ice_rdma_request_reset(struct ice_pf *pf,
+                          enum iidc_rdma_reset_type reset_type)
 {
        enum ice_reset_req reset;
 
  * @pf: pointer to PF struct
  * @qos: set of QoS values
  */
-void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
+void ice_get_qos_params(struct ice_pf *pf, struct iidc_rdma_qos_params *qos)
 {
        struct ice_dcbx_cfg *dcbx_cfg;
        unsigned int i;
  */
 static void ice_adev_release(struct device *dev)
 {
-       struct iidc_auxiliary_dev *iadev;
+       struct iidc_rdma_core_auxiliary_dev *iadev;
 
-       iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
+       iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev,
+                            adev.dev);
        kfree(iadev);
 }
 
  */
 int ice_plug_aux_dev(struct ice_pf *pf)
 {
-       struct iidc_auxiliary_dev *iadev;
+       struct iidc_rdma_core_auxiliary_dev *iadev;
        struct auxiliary_device *adev;
        int ret;
 
 
 
 struct ice_pf;
 
-void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event);
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event);
 
 #endif /* !_ICE_IDC_INT_H_ */
 
        }
 
        if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
-               struct iidc_event *event;
+               struct iidc_rdma_event *event;
 
                event = kzalloc(sizeof(*event), GFP_KERNEL);
                if (event) {
-                       set_bit(IIDC_EVENT_CRIT_ERR, event->type);
+                       set_bit(IIDC_RDMA_EVENT_CRIT_ERR, event->type);
                        /* report the entire OICR value to AUX driver */
                        swap(event->reg, pf->oicr_err_reg);
                        ice_send_event_to_aux(pf, event);
                ice_plug_aux_dev(pf);
 
        if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
-               struct iidc_event *event;
+               struct iidc_rdma_event *event;
 
                event = kzalloc(sizeof(*event), GFP_KERNEL);
                if (event) {
-                       set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
+                       set_bit(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE, event->type);
                        ice_send_event_to_aux(pf, event);
                        kfree(event);
                }
 
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
 
-enum iidc_event_type {
-       IIDC_EVENT_BEFORE_MTU_CHANGE,
-       IIDC_EVENT_AFTER_MTU_CHANGE,
-       IIDC_EVENT_BEFORE_TC_CHANGE,
-       IIDC_EVENT_AFTER_TC_CHANGE,
-       IIDC_EVENT_CRIT_ERR,
-       IIDC_EVENT_NBITS                /* must be last */
+enum iidc_rdma_event_type {
+       IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE,
+       IIDC_RDMA_EVENT_AFTER_MTU_CHANGE,
+       IIDC_RDMA_EVENT_BEFORE_TC_CHANGE,
+       IIDC_RDMA_EVENT_AFTER_TC_CHANGE,
+       IIDC_RDMA_EVENT_CRIT_ERR,
+       IIDC_RDMA_EVENT_NBITS           /* must be last */
 };
 
-enum iidc_reset_type {
+enum iidc_rdma_reset_type {
        IIDC_PFR,
        IIDC_CORER,
        IIDC_GLOBR,
        u8 tc; /* TC branch the Qset should belong to */
 };
 
-struct iidc_qos_info {
+struct iidc_rdma_qos_info {
        u64 tc_ctx;
        u8 rel_bw;
        u8 prio_type;
 };
 
 /* Struct to pass QoS info */
-struct iidc_qos_params {
-       struct iidc_qos_info tc_info[IEEE_8021QAZ_MAX_TCS];
+struct iidc_rdma_qos_params {
+       struct iidc_rdma_qos_info tc_info[IEEE_8021QAZ_MAX_TCS];
        u8 up2tc[IIDC_MAX_USER_PRIORITY];
        u8 vport_relative_bw;
        u8 vport_priority_type;
        u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
 };
 
-struct iidc_event {
-       DECLARE_BITMAP(type, IIDC_EVENT_NBITS);
+struct iidc_rdma_event {
+       DECLARE_BITMAP(type, IIDC_RDMA_EVENT_NBITS);
        u32 reg;
 };
 
 
 int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
 int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
-int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type);
+int ice_rdma_request_reset(struct ice_pf *pf,
+                          enum iidc_rdma_reset_type reset_type);
 int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable);
-void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos);
+void ice_get_qos_params(struct ice_pf *pf,
+                       struct iidc_rdma_qos_params *qos);
 int ice_alloc_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry);
 void ice_free_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry);
 
  * instance of this struct dedicated to it.
  */
 
-struct iidc_auxiliary_dev {
+struct iidc_rdma_core_auxiliary_dev {
        struct auxiliary_device adev;
        struct ice_pf *pf;
 };
  * driver will access these ops by performing a container_of on the
  * auxiliary_device->dev.driver.
  */
-struct iidc_auxiliary_drv {
+struct iidc_rdma_core_auxiliary_drv {
        struct auxiliary_driver adrv;
        /* This event_handler is meant to be a blocking call.  For instance,
         * when a BEFORE_MTU_CHANGE event comes in, the event_handler will not
         * return until the auxiliary driver is ready for the MTU change to
         * happen.
         */
-       void (*event_handler)(struct ice_pf *pf, struct iidc_event *event);
+       void (*event_handler)(struct ice_pf *pf, struct iidc_rdma_event *event);
 };
 
 #endif /* _IIDC_RDMA_H_*/