rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
 }
 
+static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
+{
+       struct gfs2_inode *ip;
+
+       spin_lock(&gl->gl_lockref.lock);
+       ip = gl->gl_object;
+       if (ip)
+               set_bit(GIF_GLOP_PENDING, &ip->i_flags);
+       spin_unlock(&gl->gl_lockref.lock);
+       return ip;
+}
+
+static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
+{
+       if (!ip)
+               return;
+
+       clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
+       wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
+}
+
 /**
  * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
  * @gl: the glock protecting the inode
 
 static void inode_go_sync(struct gfs2_glock *gl)
 {
-       struct gfs2_inode *ip = gl->gl_object;
+       struct gfs2_inode *ip = gfs2_glock2inode(gl);
+       int isreg = ip && S_ISREG(ip->i_inode.i_mode);
        struct address_space *metamapping = gfs2_glock2aspace(gl);
        int error;
 
-       if (ip && !S_ISREG(ip->i_inode.i_mode))
-               ip = NULL;
-       if (ip) {
+       if (isreg) {
                if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
                        unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
                inode_dio_wait(&ip->i_inode);
        }
        if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
-               return;
+               goto out;
 
        GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
 
        gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH);
        filemap_fdatawrite(metamapping);
-       if (ip) {
+       if (isreg) {
                struct address_space *mapping = ip->i_inode.i_mapping;
                filemap_fdatawrite(mapping);
                error = filemap_fdatawait(mapping);
         */
        smp_mb__before_atomic();
        clear_bit(GLF_DIRTY, &gl->gl_flags);
+
+out:
+       gfs2_clear_glop_pending(ip);
 }
 
 /**
 
 static void inode_go_inval(struct gfs2_glock *gl, int flags)
 {
-       struct gfs2_inode *ip = gl->gl_object;
+       struct gfs2_inode *ip = gfs2_glock2inode(gl);
 
        gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
 
        }
        if (ip && S_ISREG(ip->i_inode.i_mode))
                truncate_inode_pages(ip->i_inode.i_mapping, 0);
+
+       gfs2_clear_glop_pending(ip);
 }
 
 /**
 
                error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
                if (unlikely(error))
                        goto fail;
-               ip->i_gl->gl_object = ip;
+               flush_delayed_work(&ip->i_gl->gl_work);
+               glock_set_object(ip->i_gl, ip);
 
                error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
                if (unlikely(error))
                error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
                if (unlikely(error))
                        goto fail_put;
-
-               ip->i_iopen_gh.gh_gl->gl_object = ip;
+               flush_delayed_work(&ip->i_iopen_gh.gh_gl->gl_work);
+               glock_set_object(ip->i_iopen_gh.gh_gl, ip);
                gfs2_glock_put(io_gl);
                io_gl = NULL;
 
 
        gfs2_ordered_del_inode(ip);
        clear_inode(inode);
        gfs2_dir_hash_inval(ip);
-       ip->i_gl->gl_object = NULL;
-       flush_delayed_work(&ip->i_gl->gl_work);
+       glock_set_object(ip->i_gl, NULL);
+       wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
        gfs2_glock_add_to_lru(ip->i_gl);
        gfs2_glock_put(ip->i_gl);
        ip->i_gl = NULL;