[RTA_PRIORITY]          = { .type = NLA_U32 },
        [RTA_METRICS]           = { .type = NLA_NESTED },
        [RTA_MULTIPATH]         = { .len = sizeof(struct rtnexthop) },
+       [RTA_PREF]              = { .type = NLA_U8 },
 };
 
 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
 {
        struct rtmsg *rtm;
        struct nlattr *tb[RTA_MAX+1];
+       unsigned int pref;
        int err;
 
        err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
                cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
        }
 
+       if (tb[RTA_PREF]) {
+               pref = nla_get_u8(tb[RTA_PREF]);
+               if (pref != ICMPV6_ROUTER_PREF_LOW &&
+                   pref != ICMPV6_ROUTER_PREF_HIGH)
+                       pref = ICMPV6_ROUTER_PREF_MEDIUM;
+               cfg->fc_flags |= RTF_PREF(pref);
+       }
+
        err = 0;
 errout:
        return err;
               + nla_total_size(4) /* RTA_PRIORITY */
               + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
               + nla_total_size(sizeof(struct rta_cacheinfo))
-              + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
+              + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
+              + nla_total_size(1); /* RTA_PREF */
 }
 
 static int rt6_fill_node(struct net *net,
        if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
                goto nla_put_failure;
 
+       if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
+               goto nla_put_failure;
+
        nlmsg_end(skb, nlh);
        return 0;