mdp->rx_skbuff[entry] = NULL;
                        if (mdp->cd->rpadir)
                                skb_reserve(skb, NET_IP_ALIGN);
 +                      dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
 +                                              mdp->rx_buf_sz,
 +                                              DMA_FROM_DEVICE);
                        skb_put(skb, pkt_len);
                        skb->protocol = eth_type_trans(skb, ndev);
-                       netif_rx(skb);
+                       netif_receive_skb(skb);
                        ndev->stats.rx_packets++;
                        ndev->stats.rx_bytes += pkt_len;
                }
 
                plat->force_sf_dma_mode = 1;
        }
  
-       dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
-       if (!dma_cfg)
-               return -ENOMEM;
- 
-       plat->dma_cfg = dma_cfg;
-       of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
-       dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst");
-       dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
+       if (of_find_property(np, "snps,pbl", NULL)) {
+               dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
+                                      GFP_KERNEL);
+               if (!dma_cfg)
+                       return -ENOMEM;
+               plat->dma_cfg = dma_cfg;
+               of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
+               dma_cfg->fixed_burst =
+                       of_property_read_bool(np, "snps,fixed-burst");
+               dma_cfg->mixed_burst =
+                       of_property_read_bool(np, "snps,mixed-burst");
+       }
 +      plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
 +      if (plat->force_thresh_dma_mode) {
 +              plat->force_sf_dma_mode = 0;
 +              pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
-       }
  
        return 0;
  }
 
                if (!mld2q->mld2q_nsrcs)
                        group = &mld2q->mld2q_mca;
  
 -              max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL);
 +              max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
        }
  
-       br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr),
-                                   max_delay);
+       br_multicast_query_received(br, port, &br->ip6_querier,
+                                   !ipv6_addr_any(&ip6h->saddr), max_delay);
  
        if (!group)
                goto out;
                                         __u16 vid)
  {
        struct br_ip br_group;
+       struct bridge_mcast_query *query = port ? &port->ip6_query :
+                                                 &br->ip6_query;
+ 
  
 -      if (!ipv6_is_transient_multicast(group))
 +      if (ipv6_addr_is_ll_all_nodes(group))
                return;
  
        br_group.u.ip6 = *group;
 
                ttl = iph6->hop_limit;
        tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
  
 -      err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr,
 -                          IPPROTO_IPV6, tos, ttl, df);
+       if (likely(!skb->encapsulation)) {
+               skb_reset_inner_headers(skb);
+               skb->encapsulation = 1;
+       }
+ 
 +      err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
 +                          ttl, df, !net_eq(tunnel->net, dev_net(dev)));
        iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
        return NETDEV_TX_OK;