#include <linux/kernel.h>
 #include <linux/types.h>
+#include <linux/if_ether.h>
 
 /* PCIe device related definition. */
 #define ERDMA_PCI_WIDTH 64
        CMDQ_OPCODE_REG_MR = 8,
        CMDQ_OPCODE_DEREG_MR = 9,
        CMDQ_OPCODE_SET_GID = 14,
+       CMDQ_OPCODE_CREATE_AH = 15,
+       CMDQ_OPCODE_DESTROY_AH = 16,
 };
 
 enum CMDQ_COMMON_OPCODE {
        u32 cfg;
 };
 
+/* create_av cfg0 */
+#define ERDMA_CMD_CREATE_AV_FL_MASK GENMASK(19, 0)
+#define ERDMA_CMD_CREATE_AV_NTYPE_MASK BIT(20)
+
+struct erdma_av_cfg {
+       u32 cfg0;
+       u8 traffic_class;
+       u8 hop_limit;
+       u8 sl;
+       u8 rsvd;
+       u16 udp_sport;
+       u16 sgid_index;
+       u8 dmac[ETH_ALEN];
+       u8 padding[2];
+       u8 dgid[ERDMA_ROCEV2_GID_SIZE];
+};
+
+struct erdma_cmdq_create_ah_req {
+       u64 hdr;
+       u32 pdn;
+       u32 ahn;
+       struct erdma_av_cfg av_cfg;
+};
+
+struct erdma_cmdq_destroy_ah_req {
+       u64 hdr;
+       u32 pdn;
+       u32 ahn;
+};
+
 /* modify qp cfg */
 #define ERDMA_CMD_MODIFY_QP_STATE_MASK GENMASK(31, 24)
 #define ERDMA_CMD_MODIFY_QP_CC_MASK GENMASK(23, 20)
 #define ERDMA_CMD_DEV_CAP_MAX_CQE_MASK GENMASK_ULL(47, 40)
 #define ERDMA_CMD_DEV_CAP_FLAGS_MASK GENMASK_ULL(31, 24)
 #define ERDMA_CMD_DEV_CAP_MAX_RECV_WR_MASK GENMASK_ULL(23, 16)
+#define ERDMA_CMD_DEV_CAP_MAX_AH_MASK GENMASK_ULL(15, 8)
 #define ERDMA_CMD_DEV_CAP_MAX_MR_SIZE_MASK GENMASK_ULL(7, 0)
 
 /* cap qword 1 definition */
 
        dev->attrs.max_mw = 1 << ERDMA_GET_CAP(MAX_MW, cap1);
        dev->attrs.max_recv_wr = 1 << ERDMA_GET_CAP(MAX_RECV_WR, cap0);
        dev->attrs.max_gid = 1 << ERDMA_GET_CAP(MAX_GID, cap0);
+       dev->attrs.max_ah = 1 << ERDMA_GET_CAP(MAX_AH, cap0);
        dev->attrs.local_dma_key = ERDMA_GET_CAP(DMA_LOCAL_KEY, cap1);
        dev->attrs.cc = ERDMA_GET_CAP(DEFAULT_CC, cap1);
        dev->attrs.max_qp = ERDMA_NQP_PER_QBLOCK * ERDMA_GET_CAP(QBLOCK, cap1);
 
        dev->res_cb[ERDMA_RES_TYPE_PD].max_cap = ERDMA_MAX_PD;
        dev->res_cb[ERDMA_RES_TYPE_STAG_IDX].max_cap = dev->attrs.max_mr;
+       dev->res_cb[ERDMA_RES_TYPE_AH].max_cap = dev->attrs.max_ah;
 
        erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_COMMON,
                                CMDQ_OPCODE_QUERY_FW_INFO);
        .add_gid = erdma_add_gid,
        .del_gid = erdma_del_gid,
        .query_pkey = erdma_query_pkey,
+       .create_ah = erdma_create_ah,
+       .destroy_ah = erdma_destroy_ah,
 };
 
 static const struct ib_device_ops erdma_device_ops_iwarp = {
 
        attr->max_fast_reg_page_list_len = ERDMA_MAX_FRMR_PA;
        attr->page_size_cap = ERDMA_PAGE_SIZE_SUPPORT;
 
-       if (erdma_device_rocev2(dev))
+       if (erdma_device_rocev2(dev)) {
                attr->max_pkeys = ERDMA_MAX_PKEYS;
+               attr->max_ah = dev->attrs.max_ah;
+       }
 
        if (dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_ATOMIC)
                attr->atomic_cap = IB_ATOMIC_GLOB;
        *pkey = ERDMA_DEFAULT_PKEY;
        return 0;
 }
+
+int erdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+                   struct ib_udata *udata)
+{
+       const struct ib_global_route *grh =
+               rdma_ah_read_grh(init_attr->ah_attr);
+       struct erdma_dev *dev = to_edev(ibah->device);
+       struct erdma_pd *pd = to_epd(ibah->pd);
+       struct erdma_ah *ah = to_eah(ibah);
+       struct erdma_cmdq_create_ah_req req;
+       u32 udp_sport;
+       int ret;
+
+       ret = erdma_check_gid_attr(grh->sgid_attr);
+       if (ret)
+               return ret;
+
+       ret = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_AH]);
+       if (ret < 0)
+               return ret;
+
+       ah->ahn = ret;
+
+       if (grh->flow_label)
+               udp_sport = rdma_flow_label_to_udp_sport(grh->flow_label);
+       else
+               udp_sport =
+                       IB_ROCE_UDP_ENCAP_VALID_PORT_MIN + (ah->ahn & 0x3FFF);
+
+       ah->av.port = rdma_ah_get_port_num(init_attr->ah_attr);
+       ah->av.sgid_index = grh->sgid_index;
+       ah->av.hop_limit = grh->hop_limit;
+       ah->av.traffic_class = grh->traffic_class;
+       ah->av.sl = rdma_ah_get_sl(init_attr->ah_attr);
+       ah->av.flow_label = grh->flow_label;
+       ah->av.udp_sport = udp_sport;
+
+       ether_addr_copy(ah->av.dmac, init_attr->ah_attr->roce.dmac);
+       memcpy(ah->av.dgid, grh->dgid.raw, ERDMA_ROCEV2_GID_SIZE);
+
+       if (ipv6_addr_v4mapped((struct in6_addr *)&grh->dgid))
+               ah->av.ntype = ERDMA_NETWORK_TYPE_IPV4;
+       else
+               ah->av.ntype = ERDMA_NETWORK_TYPE_IPV6;
+
+       erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+                               CMDQ_OPCODE_CREATE_AH);
+
+       req.pdn = pd->pdn;
+       req.ahn = ah->ahn;
+
+       req.av_cfg.cfg0 =
+               FIELD_PREP(ERDMA_CMD_CREATE_AV_FL_MASK, ah->av.flow_label) |
+               FIELD_PREP(ERDMA_CMD_CREATE_AV_NTYPE_MASK, ah->av.ntype);
+       req.av_cfg.traffic_class = ah->av.traffic_class;
+       req.av_cfg.hop_limit = ah->av.hop_limit;
+       req.av_cfg.sl = ah->av.sl;
+       req.av_cfg.udp_sport = ah->av.udp_sport;
+       req.av_cfg.sgid_index = ah->av.sgid_index;
+       ether_addr_copy(req.av_cfg.dmac, ah->av.dmac);
+       memcpy(req.av_cfg.dgid, ah->av.dgid, ERDMA_ROCEV2_GID_SIZE);
+
+       ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+       if (ret) {
+               erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn);
+               return ret;
+       }
+
+       return 0;
+}
+
+int erdma_destroy_ah(struct ib_ah *ibah, u32 flags)
+{
+       struct erdma_dev *dev = to_edev(ibah->device);
+       struct erdma_pd *pd = to_epd(ibah->pd);
+       struct erdma_ah *ah = to_eah(ibah);
+       struct erdma_cmdq_destroy_ah_req req;
+       int ret;
+
+       erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+                               CMDQ_OPCODE_DESTROY_AH);
+
+       req.pdn = pd->pdn;
+       req.ahn = ah->ahn;
+
+       ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+       if (ret)
+               return ret;
+
+       erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn);
+
+       return 0;
+}
+
+int erdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+{
+       struct erdma_ah *ah = to_eah(ibah);
+
+       memset(ah_attr, 0, sizeof(*ah_attr));
+
+       ah_attr->type = RDMA_AH_ATTR_TYPE_ROCE;
+       rdma_ah_set_sl(ah_attr, ah->av.sl);
+       rdma_ah_set_port_num(ah_attr, ah->av.port);
+       rdma_ah_set_ah_flags(ah_attr, IB_AH_GRH);
+       rdma_ah_set_grh(ah_attr, NULL, ah->av.flow_label, ah->av.sgid_index,
+                       ah->av.hop_limit, ah->av.traffic_class);
+       rdma_ah_set_dgid_raw(ah_attr, ah->av.dgid);
+
+       return 0;
+}
 
        int refcnt;
 };
 
+struct erdma_av {
+       u8 port;
+       u8 hop_limit;
+       u8 traffic_class;
+       u8 sl;
+       u8 sgid_index;
+       u16 udp_sport;
+       u32 flow_label;
+       u8 dmac[ETH_ALEN];
+       u8 dgid[ERDMA_ROCEV2_GID_SIZE];
+       enum erdma_network_type ntype;
+};
+
+struct erdma_ah {
+       struct ib_ah ibah;
+       struct erdma_av av;
+       u32 ahn;
+};
+
 struct erdma_uqp {
        struct erdma_mem sq_mem;
        struct erdma_mem rq_mem;
        return container_of(ibcq, struct erdma_cq, ibcq);
 }
 
+static inline struct erdma_ah *to_eah(struct ib_ah *ibah)
+{
+       return container_of(ibah, struct erdma_ah, ibah);
+}
+
 static inline int erdma_check_gid_attr(const struct ib_gid_attr *attr)
 {
        u8 ntype = rdma_gid_attr_network_type(attr);
 int erdma_add_gid(const struct ib_gid_attr *attr, void **context);
 int erdma_del_gid(const struct ib_gid_attr *attr, void **context);
 int erdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
+int erdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+                   struct ib_udata *udata);
+int erdma_destroy_ah(struct ib_ah *ibah, u32 flags);
+int erdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
 
 #endif