static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
                               u16 *allocated_size, bool is_alloc);
+static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
+static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
 
 static struct hnae3_ae_algo ae_algo;
 
 
        mod_timer(&hdev->service_timer, jiffies + HZ);
        hdev->hw_stats.stats_timer++;
+       hdev->fd_arfs_expire_timer++;
        hclge_task_schedule(hdev);
 }
 
        hclge_update_port_info(hdev);
        hclge_update_link_status(hdev);
        hclge_update_vport_alive(hdev);
+       if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
+               hclge_rfs_filter_expire(hdev);
+               hdev->fd_arfs_expire_timer = 0;
+       }
        hclge_service_complete(hdev);
 }
 
        rule->action = action;
        rule->rule_type = HCLGE_FD_EP_ACTIVE;
 
+       /* to avoid rule conflict, when user configure rule by ethtool,
+        * we need to clear all arfs rules
+        */
+       hclge_clear_arfs_rules(handle);
+
        spin_lock_bh(&hdev->fd_rule_lock);
        ret = hclge_fd_config_rule(hdev, rule);
 
        return 0;
 }
 
+static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
+                                    struct hclge_fd_rule_tuples *tuples)
+{
+       tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
+       tuples->ip_proto = fkeys->basic.ip_proto;
+       tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
+
+       if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+               tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
+               tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
+       } else {
+               memcpy(tuples->src_ip,
+                      fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
+                      sizeof(tuples->src_ip));
+               memcpy(tuples->dst_ip,
+                      fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
+                      sizeof(tuples->dst_ip));
+       }
+}
+
+/* traverse all rules, check whether an existed rule has the same tuples */
+static struct hclge_fd_rule *
+hclge_fd_search_flow_keys(struct hclge_dev *hdev,
+                         const struct hclge_fd_rule_tuples *tuples)
+{
+       struct hclge_fd_rule *rule = NULL;
+       struct hlist_node *node;
+
+       hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+               if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
+                       return rule;
+       }
+
+       return NULL;
+}
+
+static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
+                                    struct hclge_fd_rule *rule)
+{
+       rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+                            BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
+                            BIT(INNER_SRC_PORT);
+       rule->action = 0;
+       rule->vf_id = 0;
+       rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
+       if (tuples->ether_proto == ETH_P_IP) {
+               if (tuples->ip_proto == IPPROTO_TCP)
+                       rule->flow_type = TCP_V4_FLOW;
+               else
+                       rule->flow_type = UDP_V4_FLOW;
+       } else {
+               if (tuples->ip_proto == IPPROTO_TCP)
+                       rule->flow_type = TCP_V6_FLOW;
+               else
+                       rule->flow_type = UDP_V6_FLOW;
+       }
+       memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
+       memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
+}
+
+static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
+                                     u16 flow_id, struct flow_keys *fkeys)
+{
+#ifdef CONFIG_RFS_ACCEL
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_fd_rule_tuples new_tuples;
+       struct hclge_dev *hdev = vport->back;
+       struct hclge_fd_rule *rule;
+       u16 tmp_queue_id;
+       u16 bit_id;
+       int ret;
+
+       if (!hnae3_dev_fd_supported(hdev))
+               return -EOPNOTSUPP;
+
+       memset(&new_tuples, 0, sizeof(new_tuples));
+       hclge_fd_get_flow_tuples(fkeys, &new_tuples);
+
+       spin_lock_bh(&hdev->fd_rule_lock);
+
+       /* when there is already fd rule existed add by user,
+        * arfs should not work
+        */
+       if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
+               spin_unlock_bh(&hdev->fd_rule_lock);
+
+               return -EOPNOTSUPP;
+       }
+
+       /* check is there flow director filter existed for this flow,
+        * if not, create a new filter for it;
+        * if filter exist with different queue id, modify the filter;
+        * if filter exist with same queue id, do nothing
+        */
+       rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
+       if (!rule) {
+               bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
+               if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
+                       spin_unlock_bh(&hdev->fd_rule_lock);
+
+                       return -ENOSPC;
+               }
+
+               rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+               if (!rule) {
+                       spin_unlock_bh(&hdev->fd_rule_lock);
+
+                       return -ENOMEM;
+               }
+
+               set_bit(bit_id, hdev->fd_bmap);
+               rule->location = bit_id;
+               rule->flow_id = flow_id;
+               rule->queue_id = queue_id;
+               hclge_fd_build_arfs_rule(&new_tuples, rule);
+               ret = hclge_fd_config_rule(hdev, rule);
+
+               spin_unlock_bh(&hdev->fd_rule_lock);
+
+               if (ret)
+                       return ret;
+
+               return rule->location;
+       }
+
+       spin_unlock_bh(&hdev->fd_rule_lock);
+
+       if (rule->queue_id == queue_id)
+               return rule->location;
+
+       tmp_queue_id = rule->queue_id;
+       rule->queue_id = queue_id;
+       ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+       if (ret) {
+               rule->queue_id = tmp_queue_id;
+               return ret;
+       }
+
+       return rule->location;
+#endif
+}
+
+static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+       struct hnae3_handle *handle = &hdev->vport[0].nic;
+       struct hclge_fd_rule *rule;
+       struct hlist_node *node;
+       HLIST_HEAD(del_list);
+
+       spin_lock_bh(&hdev->fd_rule_lock);
+       if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
+               spin_unlock_bh(&hdev->fd_rule_lock);
+               return;
+       }
+       hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+               if (rps_may_expire_flow(handle->netdev, rule->queue_id,
+                                       rule->flow_id, rule->location)) {
+                       hlist_del_init(&rule->rule_node);
+                       hlist_add_head(&rule->rule_node, &del_list);
+                       hdev->hclge_fd_rule_num--;
+                       clear_bit(rule->location, hdev->fd_bmap);
+               }
+       }
+       spin_unlock_bh(&hdev->fd_rule_lock);
+
+       hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
+               hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+                                    rule->location, NULL, false);
+               kfree(rule);
+       }
+#endif
+}
+
+static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
+{
+#ifdef CONFIG_RFS_ACCEL
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+
+       if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
+               hclge_del_all_fd_entries(handle, true);
+#endif
+}
+
 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
 
        set_bit(HCLGE_STATE_DOWN, &hdev->state);
 
+       hclge_clear_arfs_rules(handle);
+
        /* If it is not PF reset, the firmware will disable the MAC,
         * so it only need to stop phy here.
         */
        .get_fd_all_rules = hclge_get_all_rules,
        .restore_fd_rules = hclge_restore_fd_entries,
        .enable_fd = hclge_enable_fd,
+       .add_arfs_entry = hclge_add_fd_entry_by_arfs,
        .dbg_run_cmd = hclge_dbg_run_cmd,
        .handle_hw_ras_error = hclge_handle_hw_ras_error,
        .get_hw_reset_stat = hclge_get_hw_reset_stat,