return readq(g->bar0_va + offset);
 }
 
-static void mana_gd_init_registers(struct pci_dev *pdev)
+static void mana_gd_init_pf_regs(struct pci_dev *pdev)
+{
+       struct gdma_context *gc = pci_get_drvdata(pdev);
+       void __iomem *sriov_base_va;
+       u64 sriov_base_off;
+
+       gc->db_page_size = mana_gd_r32(gc, GDMA_PF_REG_DB_PAGE_SIZE) & 0xFFFF;
+       gc->db_page_base = gc->bar0_va +
+                               mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+
+       sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
+
+       sriov_base_va = gc->bar0_va + sriov_base_off;
+       gc->shm_base = sriov_base_va +
+                       mana_gd_r64(gc, sriov_base_off + GDMA_PF_REG_SHM_OFF);
+}
+
+static void mana_gd_init_vf_regs(struct pci_dev *pdev)
 {
        struct gdma_context *gc = pci_get_drvdata(pdev);
 
        gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
 }
 
+static void mana_gd_init_registers(struct pci_dev *pdev)
+{
+       struct gdma_context *gc = pci_get_drvdata(pdev);
+
+       if (gc->is_pf)
+               mana_gd_init_pf_regs(pdev);
+       else
+               mana_gd_init_vf_regs(pdev);
+}
+
 static int mana_gd_query_max_resources(struct pci_dev *pdev)
 {
        struct gdma_context *gc = pci_get_drvdata(pdev);
        mana_gd_remove_irqs(pdev);
 }
 
+static bool mana_is_pf(unsigned short dev_id)
+{
+       return dev_id == MANA_PF_DEVICE_ID;
+}
+
 static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct gdma_context *gc;
        if (!bar0_va)
                goto free_gc;
 
+       gc->is_pf = mana_is_pf(pdev->device);
        gc->bar0_va = bar0_va;
        gc->dev = &pdev->dev;
 
-
        err = mana_gd_setup(pdev);
        if (err)
                goto unmap_bar;
 #endif
 
 static const struct pci_device_id mana_id_table[] = {
-       { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, 0x00BA) },
+       { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) },
+       { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) },
        { }
 };
 
 
                        hwc->rxq->msg_buf->gpa_mkey = val;
                        hwc->txq->msg_buf->gpa_mkey = val;
                        break;
+
+               case HWC_INIT_DATA_PF_DEST_RQ_ID:
+                       hwc->pf_dest_vrq_id = val;
+                       break;
+
+               case HWC_INIT_DATA_PF_DEST_CQ_ID:
+                       hwc->pf_dest_vrcq_id = val;
+                       break;
                }
 
                break;
 int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
                          const void *req, u32 resp_len, void *resp)
 {
+       struct gdma_context *gc = hwc->gdma_dev->gdma_context;
        struct hwc_work_request *tx_wr;
        struct hwc_wq *txq = hwc->txq;
        struct gdma_req_hdr *req_msg;
        struct hwc_caller_ctx *ctx;
+       u32 dest_vrcq = 0;
+       u32 dest_vrq = 0;
        u16 msg_id;
        int err;
 
 
        tx_wr->msg_size = req_len;
 
-       err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false);
+       if (gc->is_pf) {
+               dest_vrq = hwc->pf_dest_vrq_id;
+               dest_vrcq = hwc->pf_dest_vrcq_id;
+       }
+
+       err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
        if (err) {
                dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
                goto out;
 
        unsigned int num_queues;
 
        mana_handle_t port_handle;
+       mana_handle_t pf_filter_handle;
 
        u16 port_idx;
 
        MANA_FENCE_RQ           = 0x20006,
        MANA_CONFIG_VPORT_RX    = 0x20007,
        MANA_QUERY_VPORT_CONFIG = 0x20008,
+
+       /* Privileged commands for the PF mode */
+       MANA_REGISTER_FILTER    = 0x28000,
+       MANA_DEREGISTER_FILTER  = 0x28001,
+       MANA_REGISTER_HW_PORT   = 0x28003,
+       MANA_DEREGISTER_HW_PORT = 0x28004,
 };
 
 /* Query Device Configuration */
        struct gdma_resp_hdr hdr;
 }; /* HW DATA */
 
+/* Register HW vPort */
+struct mana_register_hw_vport_req {
+       struct gdma_req_hdr hdr;
+       u16 attached_gfid;
+       u8 is_pf_default_vport;
+       u8 reserved1;
+       u8 allow_all_ether_types;
+       u8 reserved2;
+       u8 reserved3;
+       u8 reserved4;
+}; /* HW DATA */
+
+struct mana_register_hw_vport_resp {
+       struct gdma_resp_hdr hdr;
+       mana_handle_t hw_vport_handle;
+}; /* HW DATA */
+
+/* Deregister HW vPort */
+struct mana_deregister_hw_vport_req {
+       struct gdma_req_hdr hdr;
+       mana_handle_t hw_vport_handle;
+}; /* HW DATA */
+
+struct mana_deregister_hw_vport_resp {
+       struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+/* Register filter */
+struct mana_register_filter_req {
+       struct gdma_req_hdr hdr;
+       mana_handle_t vport;
+       u8 mac_addr[6];
+       u8 reserved1;
+       u8 reserved2;
+       u8 reserved3;
+       u8 reserved4;
+       u16 reserved5;
+       u32 reserved6;
+       u32 reserved7;
+       u32 reserved8;
+}; /* HW DATA */
+
+struct mana_register_filter_resp {
+       struct gdma_resp_hdr hdr;
+       mana_handle_t filter_handle;
+}; /* HW DATA */
+
+/* Deregister filter */
+struct mana_deregister_filter_req {
+       struct gdma_req_hdr hdr;
+       mana_handle_t filter_handle;
+}; /* HW DATA */
+
+struct mana_deregister_filter_resp {
+       struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
 #define MANA_MAX_NUM_QUEUES 64
 
 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
 
        return 0;
 }
 
+static int mana_pf_register_hw_vport(struct mana_port_context *apc)
+{
+       struct mana_register_hw_vport_resp resp = {};
+       struct mana_register_hw_vport_req req = {};
+       int err;
+
+       mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
+                            sizeof(req), sizeof(resp));
+       req.attached_gfid = 1;
+       req.is_pf_default_vport = 1;
+       req.allow_all_ether_types = 1;
+
+       err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+                               sizeof(resp));
+       if (err) {
+               netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
+               return err;
+       }
+
+       err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
+                                  sizeof(resp));
+       if (err || resp.hdr.status) {
+               netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
+                          err, resp.hdr.status);
+               return err ? err : -EPROTO;
+       }
+
+       apc->port_handle = resp.hw_vport_handle;
+       return 0;
+}
+
+static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
+{
+       struct mana_deregister_hw_vport_resp resp = {};
+       struct mana_deregister_hw_vport_req req = {};
+       int err;
+
+       mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
+                            sizeof(req), sizeof(resp));
+       req.hw_vport_handle = apc->port_handle;
+
+       err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+                               sizeof(resp));
+       if (err) {
+               netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
+                          err);
+               return;
+       }
+
+       err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
+                                  sizeof(resp));
+       if (err || resp.hdr.status)
+               netdev_err(apc->ndev,
+                          "Failed to deregister hw vPort: %d, 0x%x\n",
+                          err, resp.hdr.status);
+}
+
+static int mana_pf_register_filter(struct mana_port_context *apc)
+{
+       struct mana_register_filter_resp resp = {};
+       struct mana_register_filter_req req = {};
+       int err;
+
+       mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
+                            sizeof(req), sizeof(resp));
+       req.vport = apc->port_handle;
+       memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
+
+       err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+                               sizeof(resp));
+       if (err) {
+               netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
+               return err;
+       }
+
+       err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
+                                  sizeof(resp));
+       if (err || resp.hdr.status) {
+               netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
+                          err, resp.hdr.status);
+               return err ? err : -EPROTO;
+       }
+
+       apc->pf_filter_handle = resp.filter_handle;
+       return 0;
+}
+
+static void mana_pf_deregister_filter(struct mana_port_context *apc)
+{
+       struct mana_deregister_filter_resp resp = {};
+       struct mana_deregister_filter_req req = {};
+       int err;
+
+       mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
+                            sizeof(req), sizeof(resp));
+       req.filter_handle = apc->pf_filter_handle;
+
+       err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+                               sizeof(resp));
+       if (err) {
+               netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
+                          err);
+               return;
+       }
+
+       err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
+                                  sizeof(resp));
+       if (err || resp.hdr.status)
+               netdev_err(apc->ndev,
+                          "Failed to deregister filter: %d, 0x%x\n",
+                          err, resp.hdr.status);
+}
+
 static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
                                 u32 proto_minor_ver, u32 proto_micro_ver,
                                 u16 *max_num_vports)
 
 static void mana_destroy_vport(struct mana_port_context *apc)
 {
+       struct gdma_dev *gd = apc->ac->gdma_dev;
        struct mana_rxq *rxq;
        u32 rxq_idx;
 
        }
 
        mana_destroy_txq(apc);
+
+       if (gd->gdma_context->is_pf)
+               mana_pf_deregister_hw_vport(apc);
 }
 
 static int mana_create_vport(struct mana_port_context *apc,
 
        apc->default_rxobj = INVALID_MANA_HANDLE;
 
+       if (gd->gdma_context->is_pf) {
+               err = mana_pf_register_hw_vport(apc);
+               if (err)
+                       return err;
+       }
+
        err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
        if (err)
                return err;
 int mana_alloc_queues(struct net_device *ndev)
 {
        struct mana_port_context *apc = netdev_priv(ndev);
+       struct gdma_dev *gd = apc->ac->gdma_dev;
        int err;
 
        err = mana_create_vport(apc, ndev);
        if (err)
                goto destroy_vport;
 
+       if (gd->gdma_context->is_pf) {
+               err = mana_pf_register_filter(apc);
+               if (err)
+                       goto destroy_vport;
+       }
+
        mana_chn_setxdp(apc, mana_xdp_get(apc));
 
        return 0;
 static int mana_dealloc_queues(struct net_device *ndev)
 {
        struct mana_port_context *apc = netdev_priv(ndev);
+       struct gdma_dev *gd = apc->ac->gdma_dev;
        struct mana_txq *txq;
        int i, err;
 
 
        mana_chn_setxdp(apc, NULL);
 
+       if (gd->gdma_context->is_pf)
+               mana_pf_deregister_filter(apc);
+
        /* No packet can be transmitted now since apc->port_is_up is false.
         * There is still a tiny chance that mana_poll_tx_cq() can re-enable
         * a txq because it may not timely see apc->port_is_up being cleared
        apc->max_queues = gc->max_num_queues;
        apc->num_queues = gc->max_num_queues;
        apc->port_handle = INVALID_MANA_HANDLE;
+       apc->pf_filter_handle = INVALID_MANA_HANDLE;
        apc->port_idx = port_idx;
 
        ndev->netdev_ops = &mana_devops;