}
 
 
+static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
+                                               netdev_features_t features)
+{
+       netdev_features_t mask;
+       struct hsr_port *port;
+
+       mask = features;
+
+       /* Mask out all features that, if supported by one device, should be
+        * enabled for all devices (see NETIF_F_ONE_FOR_ALL).
+        *
+        * Anything that's off in mask will not be enabled - so only things
+        * that were in features originally, and also is in NETIF_F_ONE_FOR_ALL,
+        * may become enabled.
+        */
+       features &= ~NETIF_F_ONE_FOR_ALL;
+       hsr_for_each_port(hsr, port)
+               features = netdev_increment_features(features,
+                                                    port->dev->features,
+                                                    mask);
+
+       return features;
+}
+
+static netdev_features_t hsr_fix_features(struct net_device *dev,
+                                         netdev_features_t features)
+{
+       struct hsr_priv *hsr = netdev_priv(dev);
+
+       return hsr_features_recompute(hsr, features);
+}
+
+
 static void hsr_fill_tag(struct hsr_ethhdr *hsr_ethhdr, struct hsr_priv *hsr)
 {
        unsigned long irqflags;
        .ndo_open = hsr_dev_open,
        .ndo_stop = hsr_dev_close,
        .ndo_start_xmit = hsr_dev_xmit,
+       .ndo_fix_features = hsr_fix_features,
 };
 
 
        dev->tx_queue_len        = 0;
 
        dev->destructor = hsr_dev_destroy;
+
+       dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
+                          NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
+                          NETIF_F_HW_VLAN_CTAG_TX;
+
+       dev->features = dev->hw_features;
+
+       /* Prevent recursive tx locking */
+       dev->features |= NETIF_F_LLTX;
+       /* VLAN on top of HSR needs testing and probably some work on
+        * hsr_header_create() etc.
+        */
+       dev->features |= NETIF_F_VLAN_CHALLENGED;
 }
 
 
        if (res < 0)
                return res;
 
-       hsr_dev->features = slave[0]->features & slave[1]->features;
-       /* Prevent recursive tx locking */
-       hsr_dev->features |= NETIF_F_LLTX;
-       /* VLAN on top of HSR needs testing and probably some work on
-        * hsr_header_create() etc.
-        */
-       hsr_dev->features |= NETIF_F_VLAN_CHALLENGED;
-
        spin_lock_init(&hsr->seqnr_lock);
        /* Overflow soon to find bugs easier: */
        hsr->sequence_nr = USHRT_MAX - 1024;
 
        if (dev->hard_header_len + HSR_HLEN > master->dev->hard_header_len)
                master->dev->hard_header_len = dev->hard_header_len + HSR_HLEN;
 
+       netdev_update_features(master->dev);
        dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
 
        return 0;
        list_del_rcu(&port->port_list);
 
        if (port != master) {
+               netdev_update_features(master->dev);
                dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
                netdev_rx_handler_unregister(port->dev);
                dev_set_promiscuity(port->dev, -1);