/*
  * Dquot List Management:
- * The quota code uses four lists for dquot management: the inuse_list,
- * free_dquots, dqi_dirty_list, and dquot_hash[] array. A single dquot
- * structure may be on some of those lists, depending on its current state.
+ * The quota code uses five lists for dquot management: the inuse_list,
+ * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array.
+ * A single dquot structure may be on some of those lists, depending on
+ * its current state.
  *
  * All dquots are placed to the end of inuse_list when first created, and this
  * list is used for invalidate operation, which must look at every dquot.
  *
+ * When the last reference of a dquot will be dropped, the dquot will be
+ * added to releasing_dquots. We'd then queue work item which would call
+ * synchronize_srcu() and after that perform the final cleanup of all the
+ * dquots on the list. Both releasing_dquots and free_dquots use the
+ * dq_free list_head in the dquot struct. When a dquot is removed from
+ * releasing_dquots, a reference count is always subtracted, and if
+ * dq_count == 0 at that point, the dquot will be added to the free_dquots.
+ *
  * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
  * and this list is searched whenever we need an available dquot.  Dquots are
  * removed from the list as soon as they are used again, and
 
 static LIST_HEAD(inuse_list);
 static LIST_HEAD(free_dquots);
+static LIST_HEAD(releasing_dquots);
 static unsigned int dq_hash_bits, dq_hash_mask;
 static struct hlist_head *dquot_hash;
 
 static qsize_t __inode_get_rsv_space(struct inode *inode);
 static int __dquot_initialize(struct inode *inode, int type);
 
+static void quota_release_workfn(struct work_struct *work);
+static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn);
+
 static inline unsigned int
 hashfn(const struct super_block *sb, struct kqid qid)
 {
        dqstats_inc(DQST_FREE_DQUOTS);
 }
 
+static inline void put_releasing_dquots(struct dquot *dquot)
+{
+       list_add_tail(&dquot->dq_free, &releasing_dquots);
+}
+
 static inline void remove_free_dquot(struct dquot *dquot)
 {
        if (list_empty(&dquot->dq_free))
                return;
        list_del_init(&dquot->dq_free);
-       dqstats_dec(DQST_FREE_DQUOTS);
+       if (!atomic_read(&dquot->dq_count))
+               dqstats_dec(DQST_FREE_DQUOTS);
 }
 
 static inline void put_inuse(struct dquot *dquot)
        struct dquot *dquot, *tmp;
 
 restart:
+       flush_delayed_work("a_release_work);
+
        spin_lock(&dq_list_lock);
        list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
                if (dquot->dq_sb != sb)
                        continue;
                /* Wait for dquot users */
                if (atomic_read(&dquot->dq_count)) {
+                       /* dquot in releasing_dquots, flush and retry */
+                       if (!list_empty(&dquot->dq_free)) {
+                               spin_unlock(&dq_list_lock);
+                               goto restart;
+                       }
+
                        atomic_inc(&dquot->dq_count);
                        spin_unlock(&dq_list_lock);
                        /*
        .seeks = DEFAULT_SEEKS,
 };
 
+/*
+ * Safely release dquot and put reference to dquot.
+ */
+static void quota_release_workfn(struct work_struct *work)
+{
+       struct dquot *dquot;
+       struct list_head rls_head;
+
+       spin_lock(&dq_list_lock);
+       /* Exchange the list head to avoid livelock. */
+       list_replace_init(&releasing_dquots, &rls_head);
+       spin_unlock(&dq_list_lock);
+
+restart:
+       synchronize_srcu(&dquot_srcu);
+       spin_lock(&dq_list_lock);
+       while (!list_empty(&rls_head)) {
+               dquot = list_first_entry(&rls_head, struct dquot, dq_free);
+               /* Dquot got used again? */
+               if (atomic_read(&dquot->dq_count) > 1) {
+                       remove_free_dquot(dquot);
+                       atomic_dec(&dquot->dq_count);
+                       continue;
+               }
+               if (dquot_dirty(dquot)) {
+                       spin_unlock(&dq_list_lock);
+                       /* Commit dquot before releasing */
+                       dquot_write_dquot(dquot);
+                       goto restart;
+               }
+               if (dquot_active(dquot)) {
+                       spin_unlock(&dq_list_lock);
+                       dquot->dq_sb->dq_op->release_dquot(dquot);
+                       goto restart;
+               }
+               /* Dquot is inactive and clean, now move it to free list */
+               remove_free_dquot(dquot);
+               atomic_dec(&dquot->dq_count);
+               put_dquot_last(dquot);
+       }
+       spin_unlock(&dq_list_lock);
+}
+
 /*
  * Put reference to dquot
  */
        }
 #endif
        dqstats_inc(DQST_DROPS);
-we_slept:
+
        spin_lock(&dq_list_lock);
        if (atomic_read(&dquot->dq_count) > 1) {
                /* We have more than one user... nothing to do */
                spin_unlock(&dq_list_lock);
                return;
        }
+
        /* Need to release dquot? */
-       if (dquot_dirty(dquot)) {
-               spin_unlock(&dq_list_lock);
-               /* Commit dquot before releasing */
-               dquot_write_dquot(dquot);
-               goto we_slept;
-       }
-       if (dquot_active(dquot)) {
-               spin_unlock(&dq_list_lock);
-               dquot->dq_sb->dq_op->release_dquot(dquot);
-               goto we_slept;
-       }
-       atomic_dec(&dquot->dq_count);
 #ifdef CONFIG_QUOTA_DEBUG
        /* sanity check */
        BUG_ON(!list_empty(&dquot->dq_free));
 #endif
-       put_dquot_last(dquot);
+       put_releasing_dquots(dquot);
        spin_unlock(&dq_list_lock);
+       queue_delayed_work(system_unbound_wq, "a_release_work, 1);
 }
 EXPORT_SYMBOL(dqput);