}
 EXPORT_SYMBOL_GPL(flow_offload_dead);
 
+static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
+{
+       const struct flow_offload_tuple *tuple = data;
+
+       return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
+}
+
+static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
+{
+       const struct flow_offload_tuple_rhash *tuplehash = data;
+
+       return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
+}
+
+static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
+                                       const void *ptr)
+{
+       const struct flow_offload_tuple *tuple = arg->key;
+       const struct flow_offload_tuple_rhash *x = ptr;
+
+       if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
+               return 1;
+
+       return 0;
+}
+
+static const struct rhashtable_params nf_flow_offload_rhash_params = {
+       .head_offset            = offsetof(struct flow_offload_tuple_rhash, node),
+       .hashfn                 = flow_offload_hash,
+       .obj_hashfn             = flow_offload_hash_obj,
+       .obj_cmpfn              = flow_offload_hash_cmp,
+       .automatic_shrinking    = true,
+};
+
 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
 {
        flow->timeout = (u32)jiffies;
 
        rhashtable_insert_fast(&flow_table->rhashtable,
                               &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
-                              *flow_table->type->params);
+                              nf_flow_offload_rhash_params);
        rhashtable_insert_fast(&flow_table->rhashtable,
                               &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
-                              *flow_table->type->params);
+                              nf_flow_offload_rhash_params);
        return 0;
 }
 EXPORT_SYMBOL_GPL(flow_offload_add);
 {
        rhashtable_remove_fast(&flow_table->rhashtable,
                               &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
-                              *flow_table->type->params);
+                              nf_flow_offload_rhash_params);
        rhashtable_remove_fast(&flow_table->rhashtable,
                               &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
-                              *flow_table->type->params);
+                              nf_flow_offload_rhash_params);
 
        flow_offload_free(flow);
 }
                    struct flow_offload_tuple *tuple)
 {
        return rhashtable_lookup_fast(&flow_table->rhashtable, tuple,
-                                     *flow_table->type->params);
+                                     nf_flow_offload_rhash_params);
 }
 EXPORT_SYMBOL_GPL(flow_offload_lookup);
 
        return 1;
 }
 
-void nf_flow_offload_work_gc(struct work_struct *work)
+static void nf_flow_offload_work_gc(struct work_struct *work)
 {
        struct nf_flowtable *flow_table;
 
        nf_flow_offload_gc_step(flow_table);
        queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
 }
-EXPORT_SYMBOL_GPL(nf_flow_offload_work_gc);
-
-static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
-{
-       const struct flow_offload_tuple *tuple = data;
-
-       return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
-}
-
-static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
-{
-       const struct flow_offload_tuple_rhash *tuplehash = data;
-
-       return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
-}
-
-static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
-                                       const void *ptr)
-{
-       const struct flow_offload_tuple *tuple = arg->key;
-       const struct flow_offload_tuple_rhash *x = ptr;
-
-       if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
-               return 1;
-
-       return 0;
-}
-
-const struct rhashtable_params nf_flow_offload_rhash_params = {
-       .head_offset            = offsetof(struct flow_offload_tuple_rhash, node),
-       .hashfn                 = flow_offload_hash,
-       .obj_hashfn             = flow_offload_hash_obj,
-       .obj_cmpfn              = flow_offload_hash_cmp,
-       .automatic_shrinking    = true,
-};
-EXPORT_SYMBOL_GPL(nf_flow_offload_rhash_params);
 
 static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
                                __be16 port, __be16 new_port)
 }
 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
 
+int nf_flow_table_init(struct nf_flowtable *flowtable)
+{
+       int err;
+
+       INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
+
+       err = rhashtable_init(&flowtable->rhashtable,
+                             &nf_flow_offload_rhash_params);
+       if (err < 0)
+               return err;
+
+       queue_delayed_work(system_power_efficient_wq,
+                          &flowtable->gc_work, HZ);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nf_flow_table_init);
+
 static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
 {
        struct net_device *dev = data;
 
 void nf_flow_table_free(struct nf_flowtable *flow_table)
 {
+       cancel_delayed_work_sync(&flow_table->gc_work);
        nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
        WARN_ON(!nf_flow_offload_gc_step(flow_table));
+       rhashtable_destroy(&flow_table->rhashtable);
 }
 EXPORT_SYMBOL_GPL(nf_flow_table_free);
 
 
        }
 
        flowtable->data.type = type;
-       err = rhashtable_init(&flowtable->data.rhashtable, type->params);
+       err = type->init(&flowtable->data);
        if (err < 0)
                goto err3;
 
        err = nf_tables_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK],
                                             flowtable);
        if (err < 0)
-               goto err3;
+               goto err4;
 
        for (i = 0; i < flowtable->ops_len; i++) {
                if (!flowtable->ops[i].dev)
                                if (flowtable->ops[i].dev == ft->ops[k].dev &&
                                    flowtable->ops[i].pf == ft->ops[k].pf) {
                                        err = -EBUSY;
-                                       goto err4;
+                                       goto err5;
                                }
                        }
                }
 
                err = nf_register_net_hook(net, &flowtable->ops[i]);
                if (err < 0)
-                       goto err4;
+                       goto err5;
        }
 
        err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
        if (err < 0)
-               goto err5;
-
-       INIT_DEFERRABLE_WORK(&flowtable->data.gc_work, type->gc);
-       queue_delayed_work(system_power_efficient_wq,
-                          &flowtable->data.gc_work, HZ);
+               goto err6;
 
        list_add_tail_rcu(&flowtable->list, &table->flowtables);
        table->use++;
 
        return 0;
-err5:
+err6:
        i = flowtable->ops_len;
-err4:
+err5:
        for (k = i - 1; k >= 0; k--) {
                kfree(flowtable->dev_name[k]);
                nf_unregister_net_hook(net, &flowtable->ops[k]);
        }
 
        kfree(flowtable->ops);
+err4:
+       flowtable->data.type->free(&flowtable->data);
 err3:
        module_put(type->owner);
 err2:
 
 static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
 {
-       cancel_delayed_work_sync(&flowtable->data.gc_work);
        kfree(flowtable->ops);
        kfree(flowtable->name);
        flowtable->data.type->free(&flowtable->data);
-       rhashtable_destroy(&flowtable->data.rhashtable);
        module_put(flowtable->data.type->owner);
 }