#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
 static DECLARE_FAULT_ATTR(null_timeout_attr);
+static DECLARE_FAULT_ATTR(null_requeue_attr);
 #endif
 
 static inline u64 mb_per_tick(int mbps)
        wait_queue_head_t wait;
        unsigned int queue_depth;
        struct nullb_device *dev;
+       unsigned int requeue_selection;
 
        struct nullb_cmd *cmds;
 };
 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
 static char g_timeout_str[80];
 module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), S_IRUGO);
+
+static char g_requeue_str[80];
+module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), S_IRUGO);
 #endif
 
 static int g_queue_mode = NULL_Q_MQ;
        if (g_timeout_str[0])
                return should_fail(&null_timeout_attr, 1);
 #endif
+       return false;
+}
 
+static bool should_requeue_request(struct request *rq)
+{
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+       if (g_requeue_str[0])
+               return should_fail(&null_requeue_attr, 1);
+#endif
        return false;
 }
 
        while ((rq = blk_fetch_request(q)) != NULL) {
                struct nullb_cmd *cmd = rq->special;
 
-               if (!should_timeout_request(rq)) {
-                       spin_unlock_irq(q->queue_lock);
-                       null_handle_cmd(cmd);
-                       spin_lock_irq(q->queue_lock);
+               /* just ignore the request */
+               if (should_timeout_request(rq))
+                       continue;
+               if (should_requeue_request(rq)) {
+                       blk_requeue_request(q, rq);
+                       continue;
                }
+
+               spin_unlock_irq(q->queue_lock);
+               null_handle_cmd(cmd);
+               spin_lock_irq(q->queue_lock);
        }
 }
 
 
        blk_mq_start_request(bd->rq);
 
-       if (!should_timeout_request(bd->rq))
-               return null_handle_cmd(cmd);
+       if (should_requeue_request(bd->rq)) {
+               /*
+                * Alternate between hitting the core BUSY path, and the
+                * driver driven requeue path
+                */
+               nq->requeue_selection++;
+               if (nq->requeue_selection & 1)
+                       return BLK_STS_RESOURCE;
+               else {
+                       blk_mq_requeue_request(bd->rq, true);
+                       return BLK_STS_OK;
+               }
+       }
+       if (should_timeout_request(bd->rq))
+               return BLK_STS_OK;
 
-       return BLK_STS_OK;
+       return null_handle_cmd(cmd);
 }
 
 static const struct blk_mq_ops null_mq_ops = {
                dev->mbps = 0;
 }
 
-static bool null_setup_fault(void)
-{
 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
-       if (!g_timeout_str[0])
+static bool __null_setup_fault(struct fault_attr *attr, char *str)
+{
+       if (!str[0])
                return true;
 
-       if (!setup_fault_attr(&null_timeout_attr, g_timeout_str))
+       if (!setup_fault_attr(attr, str))
                return false;
 
-       null_timeout_attr.verbose = 0;
+       attr->verbose = 0;
+       return true;
+}
+#endif
+
+static bool null_setup_fault(void)
+{
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+       if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
+               return false;
+       if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
+               return false;
 #endif
        return true;
 }