struct fuse_req *fuse_get_req(struct fuse_conn *fc)
 {
-       struct fuse_req *req = fuse_request_alloc();
+       struct fuse_req *req;
+       sigset_t oldset;
+       int err;
+
+       block_sigs(&oldset);
+       err = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
+       restore_sigs(&oldset);
+       if (err)
+               return ERR_PTR(-EINTR);
+
+       req = fuse_request_alloc();
        if (!req)
                return ERR_PTR(-ENOMEM);
 
                fput(req->file);
        spin_lock(&fc->lock);
        list_del(&req->bg_entry);
+       if (fc->num_background == FUSE_MAX_BACKGROUND) {
+               fc->blocked = 0;
+               wake_up_all(&fc->blocked_waitq);
+       }
+       fc->num_background--;
        spin_unlock(&fc->lock);
 }
 
 {
        req->background = 1;
        list_add(&req->bg_entry, &fc->background);
+       fc->num_background++;
+       if (fc->num_background == FUSE_MAX_BACKGROUND)
+               fc->blocked = 1;
        if (req->inode)
                req->inode = igrab(req->inode);
        if (req->inode2)
 static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
 {
        spin_lock(&fc->lock);
+       background_request(fc, req);
        if (fc->connected) {
                queue_request(fc, req);
                spin_unlock(&fc->lock);
 void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
 {
        req->isreply = 1;
-       spin_lock(&fc->lock);
-       background_request(fc, req);
-       spin_unlock(&fc->lock);
        request_send_nowait(fc, req);
 }
 
 
 /** Max number of pages that can be used in a single read request */
 #define FUSE_MAX_PAGES_PER_REQ 32
 
+/** Maximum number of outstanding background requests */
+#define FUSE_MAX_BACKGROUND 10
+
 /** It could be as large as PATH_MAX, but would that have any uses? */
 #define FUSE_NAME_MAX 1024
 
            interrupted request) */
        struct list_head background;
 
+       /** Number of requests currently in the background */
+       unsigned num_background;
+
+       /** Flag indicating if connection is blocked.  This will be
+           the case before the INIT reply is received, and if there
+           are too many outstading backgrounds requests */
+       int blocked;
+
+       /** waitq for blocked connection */
+       wait_queue_head_t blocked_waitq;
+
        /** RW semaphore for exclusion with fuse_put_super() */
        struct rw_semaphore sbput_sem;
 
 
        if (fc) {
                spin_lock_init(&fc->lock);
                init_waitqueue_head(&fc->waitq);
+               init_waitqueue_head(&fc->blocked_waitq);
                INIT_LIST_HEAD(&fc->pending);
                INIT_LIST_HEAD(&fc->processing);
                INIT_LIST_HEAD(&fc->io);
                fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
                fc->bdi.unplug_io_fn = default_unplug_io_fn;
                fc->reqctr = 0;
+               fc->blocked = 1;
        }
        return fc;
 }
                fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
        }
        fuse_put_request(fc, req);
+       fc->blocked = 0;
+       wake_up_all(&fc->blocked_waitq);
 }
 
 static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)