msg_rsp)                                 \
 M(CPT_CTX_CACHE_SYNC,   0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp)    \
 M(CPT_LF_RESET,         0xA08, cpt_lf_reset, cpt_lf_rst_req, msg_rsp)  \
+M(CPT_FLT_ENG_INFO,     0xA09, cpt_flt_eng_info, cpt_flt_eng_info_req, \
+                              cpt_flt_eng_info_rsp)                    \
 /* SDP mbox IDs (range 0x1000 - 0x11FF) */                             \
 M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
 M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
        u32 rsvd;
 };
 
+/* Mailbox message format to request for CPT faulted engines */
+struct cpt_flt_eng_info_req {
+       struct mbox_msghdr hdr;
+       int blkaddr;
+       bool reset;
+       u32 rsvd;
+};
+
+struct cpt_flt_eng_info_rsp {
+       struct mbox_msghdr hdr;
+       u64 flt_eng_map[CPT_10K_AF_INT_VEC_RVU];
+       u64 rcvrd_eng_map[CPT_10K_AF_INT_VEC_RVU];
+       u64 rsvd;
+};
+
 struct sdp_node_info {
        /* Node to which this PF belons to */
        u8 node_id;
 
        u64  lfreset_reg;
        unsigned char name[NAME_SIZE];
        struct rvu *rvu;
+       u64 cpt_flt_eng_map[3];
+       u64 cpt_rcvrd_eng_map[3];
 };
 
 struct nix_mcast {
        struct list_head        mcs_intrq_head;
        /* mcs interrupt queue lock */
        spinlock_t              mcs_intrq_lock;
+       /* CPT interrupt lock */
+       spinlock_t              cpt_intr_lock;
 };
 
 static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
 
 
                rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp);
                rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL);
+
+               spin_lock(&rvu->cpt_intr_lock);
+               block->cpt_flt_eng_map[vec] |= BIT_ULL(i);
+               val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(eng));
+               val = val & 0x3;
+               if (val == 0x1 || val == 0x2)
+                       block->cpt_rcvrd_eng_map[vec] |= BIT_ULL(i);
+               spin_unlock(&rvu->cpt_intr_lock);
        }
        rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg);
 
        return 0;
 }
 
+int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_req *req,
+                                     struct cpt_flt_eng_info_rsp *rsp)
+{
+       struct rvu_block *block;
+       unsigned long flags;
+       int blkaddr, vec;
+
+       blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+       if (blkaddr < 0)
+               return blkaddr;
+
+       block = &rvu->hw->block[blkaddr];
+       for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) {
+               spin_lock_irqsave(&rvu->cpt_intr_lock, flags);
+               rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec];
+               rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec];
+               if (req->reset) {
+                       block->cpt_flt_eng_map[vec] = 0x0;
+                       block->cpt_rcvrd_eng_map[vec] = 0x0;
+               }
+               spin_unlock_irqrestore(&rvu->cpt_intr_lock, flags);
+       }
+       return 0;
+}
+
 static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
 {
        struct cpt_rxc_time_cfg_req req, prev;
 {
        /* Retrieve CPT PF number */
        rvu->cpt_pf_num = get_cpt_pf_num(rvu);
+       spin_lock_init(&rvu->cpt_intr_lock);
+
        return 0;
 }