static void delete_work_func(struct work_struct *work)
 {
-       struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
        struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
        struct inode *inode;
        u64 no_addr = gl->gl_name.ln_number;
 
+       spin_lock(&gl->gl_lockref.lock);
+       clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+       spin_unlock(&gl->gl_lockref.lock);
+
        /* If someone's using this glock to create a new dinode, the block must
           have been freed by another node, then re-used, in which case our
           iopen callback is too late after the fact. Ignore it. */
        gl->gl_object = NULL;
        gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
        INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
-       INIT_WORK(&gl->gl_delete, delete_work_func);
+       INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
 
        mapping = gfs2_glock2aspace(gl);
        if (mapping) {
        rhashtable_walk_exit(&iter);
 }
 
+bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay)
+{
+       bool queued;
+
+       spin_lock(&gl->gl_lockref.lock);
+       queued = queue_delayed_work(gfs2_delete_workqueue,
+                                   &gl->gl_delete, delay);
+       if (queued)
+               set_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+       spin_unlock(&gl->gl_lockref.lock);
+       return queued;
+}
+
+void gfs2_cancel_delete_work(struct gfs2_glock *gl)
+{
+       if (cancel_delayed_work_sync(&gl->gl_delete)) {
+               clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+               gfs2_glock_put(gl);
+       }
+}
+
+bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
+{
+       return test_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+}
+
+static void flush_delete_work(struct gfs2_glock *gl)
+{
+       flush_delayed_work(&gl->gl_delete);
+       gfs2_glock_queue_work(gl, 0);
+}
+
+void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
+{
+       glock_hash_walk(flush_delete_work, sdp);
+       flush_workqueue(gfs2_delete_workqueue);
+}
+
 /**
  * thaw_glock - thaw out a glock which has an unprocessed reply waiting
  * @gl: The glock to thaw
 
 
 extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
 extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
+extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
+extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
+extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
+extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
 extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
 extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
 extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
 
        if (gl->gl_demote_state == LM_ST_UNLOCKED &&
            gl->gl_state == LM_ST_SHARED && ip) {
                gl->gl_lockref.count++;
-               if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
+               if (!queue_delayed_work(gfs2_delete_workqueue,
+                                       &gl->gl_delete, 0))
                        gl->gl_lockref.count--;
        }
 }
 
+static int iopen_go_demote_ok(const struct gfs2_glock *gl)
+{
+       return !gfs2_delete_work_queued(gl);
+}
+
 /**
  * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
  * @gl: glock being freed
 const struct gfs2_glock_operations gfs2_iopen_glops = {
        .go_type = LM_TYPE_IOPEN,
        .go_callback = iopen_go_callback,
+       .go_demote_ok = iopen_go_demote_ok,
        .go_flags = GLOF_LRU | GLOF_NONDISK,
 };
 
 
        GLF_OBJECT                      = 14, /* Used only for tracing */
        GLF_BLOCKING                    = 15,
        GLF_INODE_CREATING              = 16, /* Inode creation occurring */
+       GLF_PENDING_DELETE              = 17,
        GLF_FREEING                     = 18, /* Wait for glock to be freed */
 };
 
        atomic_t gl_revokes;
        struct delayed_work gl_work;
        union {
-               /* For inode and iopen glocks only */
-               struct work_struct gl_delete;
+               /* For iopen glocks only */
+               struct delayed_work gl_delete;
                /* For rgrp glocks only */
                struct {
                        loff_t start;
 
                error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
                if (unlikely(error))
                        goto fail;
+               gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
                glock_set_object(ip->i_iopen_gh.gh_gl, ip);
                gfs2_glock_put(io_gl);
                io_gl = NULL;
        if (error)
                goto fail_gunlock2;
 
+       gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
        glock_set_object(ip->i_iopen_gh.gh_gl, ip);
        gfs2_set_iop(inode);
        insert_inode_hash(inode);
 
                 */
                ip = gl->gl_object;
 
-               if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
+               if (ip || !gfs2_queue_delete_work(gl, 0))
                        gfs2_glock_put(gl);
                else
                        found++;
 
                }
        }
 
-       flush_workqueue(gfs2_delete_workqueue);
+       gfs2_flush_delete_work(sdp);
        if (!log_write_allowed && current == sdp->sd_quotad_process)
                fs_warn(sdp, "The quotad daemon is withdrawing.\n");
        else if (sdp->sd_quotad_process)
                struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
 
                gfs2_glock_hold(gl);
-               if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
+               if (!gfs2_queue_delete_work(gl, 0))
                        gfs2_glock_queue_put(gl);
                return false;
        }