*/
 unsigned int rx_drain_timeout_msecs = 10000;
 module_param(rx_drain_timeout_msecs, uint, 0444);
-unsigned int rx_drain_timeout_jiffies;
 
 /* The length of time before the frontend is considered unresponsive
  * because it isn't providing Rx slots.
  */
-static unsigned int rx_stall_timeout_msecs = 60000;
+unsigned int rx_stall_timeout_msecs = 60000;
 module_param(rx_stall_timeout_msecs, uint, 0444);
-static unsigned int rx_stall_timeout_jiffies;
 
 unsigned int xenvif_max_queues;
 module_param_named(max_queues, xenvif_max_queues, uint, 0644);
        return !queue->stalled
                && prod - cons < XEN_NETBK_RX_SLOTS_MAX
                && time_after(jiffies,
-                             queue->last_rx_time + rx_stall_timeout_jiffies);
+                             queue->last_rx_time + queue->vif->stall_timeout);
 }
 
 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
 {
        return (!skb_queue_empty(&queue->rx_queue)
                && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
-               || xenvif_rx_queue_stalled(queue)
-               || xenvif_rx_queue_ready(queue)
+               || (queue->vif->stall_timeout &&
+                   (xenvif_rx_queue_stalled(queue)
+                    || xenvif_rx_queue_ready(queue)))
                || kthread_should_stop()
                || queue->vif->disabled;
 }
        struct xenvif_queue *queue = data;
        struct xenvif *vif = queue->vif;
 
+       if (!vif->stall_timeout)
+               xenvif_queue_carrier_on(queue);
+
        for (;;) {
                xenvif_wait_for_rx_work(queue);
 
                 * while it's probably not responsive, drop the
                 * carrier so packets are dropped earlier.
                 */
-               if (xenvif_rx_queue_stalled(queue))
-                       xenvif_queue_carrier_off(queue);
-               else if (xenvif_rx_queue_ready(queue))
-                       xenvif_queue_carrier_on(queue);
+               if (vif->stall_timeout) {
+                       if (xenvif_rx_queue_stalled(queue))
+                               xenvif_queue_carrier_off(queue);
+                       else if (xenvif_rx_queue_ready(queue))
+                               xenvif_queue_carrier_on(queue);
+               }
 
                /* Queued packets may have foreign pages from other
                 * domains.  These cannot be queued indefinitely as
        if (rc)
                goto failed_init;
 
-       rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
-       rx_stall_timeout_jiffies = msecs_to_jiffies(rx_stall_timeout_msecs);
-
 #ifdef CONFIG_DEBUG_FS
        xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
        if (IS_ERR_OR_NULL(xen_netback_dbg_root))
 
                return -EOPNOTSUPP;
 
        if (xenbus_scanf(XBT_NIL, dev->otherend,
-                        "feature-rx-notify", "%d", &val) < 0 || val == 0) {
-               xenbus_dev_fatal(dev, -EINVAL, "feature-rx-notify is mandatory");
-               return -EINVAL;
+                        "feature-rx-notify", "%d", &val) < 0)
+               val = 0;
+       if (!val) {
+               /* - Reduce drain timeout to poll more frequently for
+                *   Rx requests.
+                * - Disable Rx stall detection.
+                */
+               be->vif->drain_timeout = msecs_to_jiffies(30);
+               be->vif->stall_timeout = 0;
        }
 
        if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",