/* Does the affinity hint is set for virtqueues? */
        bool affinity_hint_set;
 
-       /* Per-cpu variable to show the mapping from CPU to virtqueue */
-       int __percpu *vq_index;
-
        /* CPU hot plug notifier */
        struct notifier_block nb;
 };
 static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
 {
        int i;
-       int cpu;
 
        if (vi->affinity_hint_set) {
                for (i = 0; i < vi->max_queue_pairs; i++) {
 
                vi->affinity_hint_set = false;
        }
-
-       i = 0;
-       for_each_online_cpu(cpu) {
-               if (cpu == hcpu) {
-                       *per_cpu_ptr(vi->vq_index, cpu) = -1;
-               } else {
-                       *per_cpu_ptr(vi->vq_index, cpu) =
-                               ++i % vi->curr_queue_pairs;
-               }
-       }
 }
 
 static void virtnet_set_affinity(struct virtnet_info *vi)
        for_each_online_cpu(cpu) {
                virtqueue_set_affinity(vi->rq[i].vq, cpu);
                virtqueue_set_affinity(vi->sq[i].vq, cpu);
-               *per_cpu_ptr(vi->vq_index, cpu) = i;
+               netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
                i++;
        }
 
        return 0;
 }
 
-/* To avoid contending a lock hold by a vcpu who would exit to host, select the
- * txq based on the processor id.
- */
-static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
-       int txq;
-       struct virtnet_info *vi = netdev_priv(dev);
-
-       if (skb_rx_queue_recorded(skb)) {
-               txq = skb_get_rx_queue(skb);
-       } else {
-               txq = *__this_cpu_ptr(vi->vq_index);
-               if (txq == -1)
-                       txq = 0;
-       }
-
-       while (unlikely(txq >= dev->real_num_tx_queues))
-               txq -= dev->real_num_tx_queues;
-
-       return txq;
-}
-
 static const struct net_device_ops virtnet_netdev = {
        .ndo_open            = virtnet_open,
        .ndo_stop            = virtnet_close,
        .ndo_get_stats64     = virtnet_stats,
        .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
-       .ndo_select_queue     = virtnet_select_queue,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = virtnet_netpoll,
 #endif
        if (vi->stats == NULL)
                goto free;
 
-       vi->vq_index = alloc_percpu(int);
-       if (vi->vq_index == NULL)
-               goto free_stats;
-
        mutex_init(&vi->config_lock);
        vi->config_enable = true;
        INIT_WORK(&vi->config_work, virtnet_config_changed_work);
        /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
        err = init_vqs(vi);
        if (err)
-               goto free_index;
+               goto free_stats;
 
        netif_set_real_num_tx_queues(dev, 1);
        netif_set_real_num_rx_queues(dev, 1);
        virtnet_del_vqs(vi);
        if (vi->alloc_frag.page)
                put_page(vi->alloc_frag.page);
-free_index:
-       free_percpu(vi->vq_index);
 free_stats:
        free_percpu(vi->stats);
 free:
 
        flush_work(&vi->config_work);
 
-       free_percpu(vi->vq_index);
        free_percpu(vi->stats);
        free_netdev(vi->dev);
 }