static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                           struct vxlan_rdst *rdst, bool did_rsc)
 {
-       struct ip_tunnel_info *info = skb_tunnel_info(skb);
+       struct ip_tunnel_info *info;
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct sock *sk = vxlan->vn_sock->sock->sk;
        struct rtable *rt = NULL;
        int err;
        u32 flags = vxlan->flags;
 
+       /* FIXME: Support IPv6 */
+       info = skb_tunnel_info(skb, AF_INET);
+
        if (rdst) {
                dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
                vni = rdst->remote_vni;
 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
-       const struct ip_tunnel_info *info = skb_tunnel_info(skb);
+       const struct ip_tunnel_info *info;
        struct ethhdr *eth;
        bool did_rsc = false;
        struct vxlan_rdst *rdst, *fdst = NULL;
        struct vxlan_fdb *f;
 
+       /* FIXME: Support IPv6 */
+       info = skb_tunnel_info(skb, AF_INET);
+
        skb_reset_mac_header(skb);
        eth = eth_hdr(skb);
 
 
        return tot;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
+
+static const struct nla_policy ip_tun_policy[IP_TUN_MAX + 1] = {
+       [IP_TUN_ID]             = { .type = NLA_U64 },
+       [IP_TUN_DST]            = { .type = NLA_U32 },
+       [IP_TUN_SRC]            = { .type = NLA_U32 },
+       [IP_TUN_TTL]            = { .type = NLA_U8 },
+       [IP_TUN_TOS]            = { .type = NLA_U8 },
+       [IP_TUN_SPORT]          = { .type = NLA_U16 },
+       [IP_TUN_DPORT]          = { .type = NLA_U16 },
+       [IP_TUN_FLAGS]          = { .type = NLA_U16 },
+};
+
+static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
+                             struct lwtunnel_state **ts)
+{
+       struct ip_tunnel_info *tun_info;
+       struct lwtunnel_state *new_state;
+       struct nlattr *tb[IP_TUN_MAX + 1];
+       int err;
+
+       err = nla_parse_nested(tb, IP_TUN_MAX, attr, ip_tun_policy);
+       if (err < 0)
+               return err;
+
+       new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+       if (!new_state)
+               return -ENOMEM;
+
+       new_state->type = LWTUNNEL_ENCAP_IP;
+
+       tun_info = lwt_tun_info(new_state);
+
+       if (tb[IP_TUN_ID])
+               tun_info->key.tun_id = nla_get_u64(tb[IP_TUN_ID]);
+
+       if (tb[IP_TUN_DST])
+               tun_info->key.ipv4_dst = nla_get_be32(tb[IP_TUN_DST]);
+
+       if (tb[IP_TUN_SRC])
+               tun_info->key.ipv4_src = nla_get_be32(tb[IP_TUN_SRC]);
+
+       if (tb[IP_TUN_TTL])
+               tun_info->key.ipv4_ttl = nla_get_u8(tb[IP_TUN_TTL]);
+
+       if (tb[IP_TUN_TOS])
+               tun_info->key.ipv4_tos = nla_get_u8(tb[IP_TUN_TOS]);
+
+       if (tb[IP_TUN_SPORT])
+               tun_info->key.tp_src = nla_get_be16(tb[IP_TUN_SPORT]);
+
+       if (tb[IP_TUN_DPORT])
+               tun_info->key.tp_dst = nla_get_be16(tb[IP_TUN_DPORT]);
+
+       if (tb[IP_TUN_FLAGS])
+               tun_info->key.tun_flags = nla_get_u16(tb[IP_TUN_FLAGS]);
+
+       tun_info->mode = IP_TUNNEL_INFO_TX;
+       tun_info->options = NULL;
+       tun_info->options_len = 0;
+
+       *ts = new_state;
+
+       return 0;
+}
+
+static int ip_tun_fill_encap_info(struct sk_buff *skb,
+                                 struct lwtunnel_state *lwtstate)
+{
+       struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
+
+       if (nla_put_u64(skb, IP_TUN_ID, tun_info->key.tun_id) ||
+           nla_put_be32(skb, IP_TUN_DST, tun_info->key.ipv4_dst) ||
+           nla_put_be32(skb, IP_TUN_SRC, tun_info->key.ipv4_src) ||
+           nla_put_u8(skb, IP_TUN_TOS, tun_info->key.ipv4_tos) ||
+           nla_put_u8(skb, IP_TUN_TTL, tun_info->key.ipv4_ttl) ||
+           nla_put_u16(skb, IP_TUN_SPORT, tun_info->key.tp_src) ||
+           nla_put_u16(skb, IP_TUN_DPORT, tun_info->key.tp_dst) ||
+           nla_put_u16(skb, IP_TUN_FLAGS, tun_info->key.tun_flags))
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+       return nla_total_size(8)        /* IP_TUN_ID */
+               + nla_total_size(4)     /* IP_TUN_DST */
+               + nla_total_size(4)     /* IP_TUN_SRC */
+               + nla_total_size(1)     /* IP_TUN_TOS */
+               + nla_total_size(1)     /* IP_TUN_TTL */
+               + nla_total_size(2)     /* IP_TUN_SPORT */
+               + nla_total_size(2)     /* IP_TUN_DPORT */
+               + nla_total_size(2);    /* IP_TUN_FLAGS */
+}
+
+static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
+       .build_state = ip_tun_build_state,
+       .fill_encap = ip_tun_fill_encap_info,
+       .get_encap_size = ip_tun_encap_nlsize,
+};
+
+static int __init ip_tunnel_core_init(void)
+{
+       lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
+
+       return 0;
+}
+module_init(ip_tunnel_core_init);
+
+static void __exit ip_tunnel_core_exit(void)
+{
+       lwtunnel_encap_del_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
+}
+module_exit(ip_tunnel_core_exit);