}
 
 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
-                         unsigned int size, enum hns_desc_type type)
+                         unsigned int size, unsigned int type)
 {
 #define HNS3_LIKELY_BD_NUM     1
 
        int k, sizeoflast;
        dma_addr_t dma;
 
-       if (type == DESC_TYPE_FRAGLIST_SKB ||
-           type == DESC_TYPE_SKB) {
+       if (type & (DESC_TYPE_FRAGLIST_SKB | DESC_TYPE_SKB)) {
                struct sk_buff *skb = (struct sk_buff *)priv;
 
                dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
 
        for (i = 0; i < ring->desc_num; i++) {
                struct hns3_desc *desc = &ring->desc[ring->next_to_use];
+               struct hns3_desc_cb *desc_cb;
 
                memset(desc, 0, sizeof(*desc));
 
                /* rollback one */
                ring_ptr_move_bw(ring, next_to_use);
 
-               if (!ring->desc_cb[ring->next_to_use].dma)
+               desc_cb = &ring->desc_cb[ring->next_to_use];
+
+               if (!desc_cb->dma)
                        continue;
 
                /* unmap the descriptor dma address */
-               if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB ||
-                   ring->desc_cb[ring->next_to_use].type ==
-                   DESC_TYPE_FRAGLIST_SKB)
-                       dma_unmap_single(dev,
-                                        ring->desc_cb[ring->next_to_use].dma,
-                                       ring->desc_cb[ring->next_to_use].length,
-                                       DMA_TO_DEVICE);
-               else if (ring->desc_cb[ring->next_to_use].length)
-                       dma_unmap_page(dev,
-                                      ring->desc_cb[ring->next_to_use].dma,
-                                      ring->desc_cb[ring->next_to_use].length,
+               if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
+                       dma_unmap_single(dev, desc_cb->dma, desc_cb->length,
+                                        DMA_TO_DEVICE);
+               else if (desc_cb->length)
+                       dma_unmap_page(dev, desc_cb->dma, desc_cb->length,
                                       DMA_TO_DEVICE);
 
-               ring->desc_cb[ring->next_to_use].length = 0;
-               ring->desc_cb[ring->next_to_use].dma = 0;
-               ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN;
+               desc_cb->length = 0;
+               desc_cb->dma = 0;
+               desc_cb->type = DESC_TYPE_UNKNOWN;
        }
 }
 
 static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
-                                struct sk_buff *skb, enum hns_desc_type type)
+                                struct sk_buff *skb, unsigned int type)
 {
        unsigned int size = skb_headlen(skb);
        struct sk_buff *frag_skb;
 static void hns3_free_buffer(struct hns3_enet_ring *ring,
                             struct hns3_desc_cb *cb, int budget)
 {
-       if (cb->type == DESC_TYPE_SKB)
+       if (cb->type & DESC_TYPE_SKB)
                napi_consume_skb(cb->priv, budget);
        else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias)
                __page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
                              struct hns3_desc_cb *cb)
 {
-       if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB)
+       if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
                dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
                                 ring_to_dma_dir(ring));
        else if (cb->length)
 
                desc_cb = &ring->desc_cb[ntc];
 
-               if (desc_cb->type == DESC_TYPE_SKB) {
+               if (desc_cb->type & DESC_TYPE_SKB) {
                        (*pkts)++;
                        (*bytes) += desc_cb->send_bytes;
                }