spin_unlock_bh(&port->backlog.lock);
 
        while ((skb = __skb_dequeue(&list)) != NULL) {
+               struct net_device *dev = skb->dev;
+               bool consumed = false;
+
                ethh = eth_hdr(skb);
                hlocal = ether_addr_equal(ethh->h_source, port->dev->dev_addr);
                mac_hash = ipvlan_mac_hash(ethh->h_dest);
                dlocal = false;
                rcu_read_lock();
                list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
-                       if (hlocal && (ipvlan->dev == skb->dev)) {
+                       if (hlocal && (ipvlan->dev == dev)) {
                                dlocal = true;
                                continue;
                        }
                        if (!test_bit(mac_hash, ipvlan->mac_filters))
                                continue;
-
+                       if (!(ipvlan->dev->flags & IFF_UP))
+                               continue;
                        ret = NET_RX_DROP;
                        len = skb->len + ETH_HLEN;
                        nskb = skb_clone(skb, GFP_ATOMIC);
-                       if (!nskb)
-                               goto acct;
-
-                       nskb->pkt_type = pkt_type;
-                       nskb->dev = ipvlan->dev;
-                       if (hlocal)
-                               ret = dev_forward_skb(ipvlan->dev, nskb);
-                       else
-                               ret = netif_rx(nskb);
-acct:
+                       local_bh_disable();
+                       if (nskb) {
+                               consumed = true;
+                               nskb->pkt_type = pkt_type;
+                               nskb->dev = ipvlan->dev;
+                               if (hlocal)
+                                       ret = dev_forward_skb(ipvlan->dev, nskb);
+                               else
+                                       ret = netif_rx(nskb);
+                       }
                        ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
+                       local_bh_enable();
                }
                rcu_read_unlock();
 
                        skb->pkt_type = pkt_type;
                        dev_queue_xmit(skb);
                } else {
-                       kfree_skb(skb);
+                       if (consumed)
+                               consume_skb(skb);
+                       else
+                               kfree_skb(skb);
                }
+               if (dev)
+                       dev_put(dev);
        }
 }
 
 
        spin_lock(&port->backlog.lock);
        if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
+               if (skb->dev)
+                       dev_hold(skb->dev);
                __skb_queue_tail(&port->backlog, skb);
                spin_unlock(&port->backlog.lock);
                schedule_work(&port->wq);
 
 static void ipvlan_port_destroy(struct net_device *dev)
 {
        struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
+       struct sk_buff *skb;
 
        dev->priv_flags &= ~IFF_IPVLAN_MASTER;
        if (port->mode == IPVLAN_MODE_L3S) {
        }
        netdev_rx_handler_unregister(dev);
        cancel_work_sync(&port->wq);
-       __skb_queue_purge(&port->backlog);
+       while ((skb = __skb_dequeue(&port->backlog)) != NULL) {
+               if (skb->dev)
+                       dev_put(skb->dev);
+               kfree_skb(skb);
+       }
        kfree(port);
 }