if (__ceph_caps_dirty(ci)) {
                        struct ceph_mds_client *mdsc =
                                ceph_inode_to_fs_client(inode)->mdsc;
-                       __cap_delay_requeue_front(mdsc, ci);
+
+                       doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode,
+                             ceph_vinop(inode));
+                       spin_lock(&mdsc->cap_unlink_delay_lock);
+                       ci->i_ceph_flags |= CEPH_I_FLUSH;
+                       if (!list_empty(&ci->i_cap_delay_list))
+                               list_del_init(&ci->i_cap_delay_list);
+                       list_add_tail(&ci->i_cap_delay_list,
+                                     &mdsc->cap_unlink_delay_list);
+                       spin_unlock(&mdsc->cap_unlink_delay_lock);
+
+                       /*
+                        * Fire the work immediately, because the MDS maybe
+                        * waiting for caps release.
+                        */
+                       ceph_queue_cap_unlink_work(mdsc);
                }
        }
        spin_unlock(&ci->i_ceph_lock);
 
        }
 }
 
+void ceph_queue_cap_unlink_work(struct ceph_mds_client *mdsc)
+{
+       struct ceph_client *cl = mdsc->fsc->client;
+       if (mdsc->stopping)
+               return;
+
+        if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_unlink_work)) {
+                doutc(cl, "caps unlink work queued\n");
+        } else {
+                doutc(cl, "failed to queue caps unlink work\n");
+        }
+}
+
+static void ceph_cap_unlink_work(struct work_struct *work)
+{
+       struct ceph_mds_client *mdsc =
+               container_of(work, struct ceph_mds_client, cap_unlink_work);
+       struct ceph_client *cl = mdsc->fsc->client;
+
+       doutc(cl, "begin\n");
+       spin_lock(&mdsc->cap_unlink_delay_lock);
+       while (!list_empty(&mdsc->cap_unlink_delay_list)) {
+               struct ceph_inode_info *ci;
+               struct inode *inode;
+
+               ci = list_first_entry(&mdsc->cap_unlink_delay_list,
+                                     struct ceph_inode_info,
+                                     i_cap_delay_list);
+               list_del_init(&ci->i_cap_delay_list);
+
+               inode = igrab(&ci->netfs.inode);
+               if (inode) {
+                       spin_unlock(&mdsc->cap_unlink_delay_lock);
+                       doutc(cl, "on %p %llx.%llx\n", inode,
+                             ceph_vinop(inode));
+                       ceph_check_caps(ci, CHECK_CAPS_FLUSH);
+                       iput(inode);
+                       spin_lock(&mdsc->cap_unlink_delay_lock);
+               }
+       }
+       spin_unlock(&mdsc->cap_unlink_delay_lock);
+       doutc(cl, "done\n");
+}
+
 /*
  * requests
  */
        INIT_LIST_HEAD(&mdsc->cap_delay_list);
        INIT_LIST_HEAD(&mdsc->cap_wait_list);
        spin_lock_init(&mdsc->cap_delay_lock);
+       INIT_LIST_HEAD(&mdsc->cap_unlink_delay_list);
+       spin_lock_init(&mdsc->cap_unlink_delay_lock);
        INIT_LIST_HEAD(&mdsc->snap_flush_list);
        spin_lock_init(&mdsc->snap_flush_lock);
        mdsc->last_cap_flush_tid = 1;
        spin_lock_init(&mdsc->cap_dirty_lock);
        init_waitqueue_head(&mdsc->cap_flushing_wq);
        INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
+       INIT_WORK(&mdsc->cap_unlink_work, ceph_cap_unlink_work);
        err = ceph_metric_init(&mdsc->metric);
        if (err)
                goto err_mdsmap;
        ceph_cleanup_global_and_empty_realms(mdsc);
 
        cancel_work_sync(&mdsc->cap_reclaim_work);
+       cancel_work_sync(&mdsc->cap_unlink_work);
        cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
 
        doutc(cl, "done\n");
 
        unsigned long    last_renew_caps;  /* last time we renewed our caps */
        struct list_head cap_delay_list;   /* caps with delayed release */
        spinlock_t       cap_delay_lock;   /* protects cap_delay_list */
+       struct list_head cap_unlink_delay_list;  /* caps with delayed release for unlink */
+       spinlock_t       cap_unlink_delay_lock;  /* protects cap_unlink_delay_list */
        struct list_head snap_flush_list;  /* cap_snaps ready to flush */
        spinlock_t       snap_flush_lock;
 
        struct work_struct cap_reclaim_work;
        atomic_t           cap_reclaim_pending;
 
+       struct work_struct cap_unlink_work;
+
        /*
         * Cap reservations
         *
                                    struct ceph_mds_session *session);
 extern void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc);
 extern void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr);
+extern void ceph_queue_cap_unlink_work(struct ceph_mds_client *mdsc);
 extern int ceph_iterate_session_caps(struct ceph_mds_session *session,
                                     int (*cb)(struct inode *, int mds, void *),
                                     void *arg);