/* io prio of this group */
        unsigned short ioprio, org_ioprio;
-       unsigned short ioprio_class, org_ioprio_class;
+       unsigned short ioprio_class;
 
        pid_t pid;
 
         * elevate the priority of this queue
         */
        cfqq->org_ioprio = cfqq->ioprio;
-       cfqq->org_ioprio_class = cfqq->ioprio_class;
        cfq_clear_cfqq_prio_changed(cfqq);
 }
 
                cfq_schedule_dispatch(cfqd);
 }
 
-/*
- * we temporarily boost lower priority queues if they are holding fs exclusive
- * resources. they are boosted to normal prio (CLASS_BE/4)
- */
-static void cfq_prio_boost(struct cfq_queue *cfqq)
-{
-       if (has_fs_excl()) {
-               /*
-                * boost idle prio on transactions that would lock out other
-                * users of the filesystem
-                */
-               if (cfq_class_idle(cfqq))
-                       cfqq->ioprio_class = IOPRIO_CLASS_BE;
-               if (cfqq->ioprio > IOPRIO_NORM)
-                       cfqq->ioprio = IOPRIO_NORM;
-       } else {
-               /*
-                * unboost the queue (if needed)
-                */
-               cfqq->ioprio_class = cfqq->org_ioprio_class;
-               cfqq->ioprio = cfqq->org_ioprio;
-       }
-}
-
 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
 {
        if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
        cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
        if (cfqq) {
                cfq_init_prio_data(cfqq, cic->ioc);
-               cfq_prio_boost(cfqq);
 
                return __cfq_may_queue(cfqq);
        }
 
 static void write_chunk(struct buffer_chunk *chunk)
 {
        int i;
-       get_fs_excl();
        for (i = 0; i < chunk->nr; i++) {
                submit_logged_buffer(chunk->bh[i]);
        }
        chunk->nr = 0;
-       put_fs_excl();
 }
 
 static void write_ordered_chunk(struct buffer_chunk *chunk)
 {
        int i;
-       get_fs_excl();
        for (i = 0; i < chunk->nr; i++) {
                submit_ordered_buffer(chunk->bh[i]);
        }
        chunk->nr = 0;
-       put_fs_excl();
 }
 
 static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
                return 0;
        }
 
-       get_fs_excl();
-
        /* before we can put our commit blocks on disk, we have to make sure everyone older than
         ** us is on disk too
         */
        if (retval)
                reiserfs_abort(s, retval, "Journal write error in %s",
                               __func__);
-       put_fs_excl();
        return retval;
 }
 
                return 0;
        }
 
-       get_fs_excl();
-
        /* if all the work is already done, get out of here */
        if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
            atomic_read(&(jl->j_commit_left)) <= 0) {
        put_journal_list(s, jl);
        if (flushall)
                mutex_unlock(&journal->j_flush_mutex);
-       put_fs_excl();
        return err;
 }
 
        th->t_trans_id = journal->j_trans_id;
        unlock_journal(sb);
        INIT_LIST_HEAD(&th->t_list);
-       get_fs_excl();
        return 0;
 
       out_fail:
        flush = flags & FLUSH_ALL;
        wait_on_commit = flags & WAIT;
 
-       put_fs_excl();
        current->journal_info = th->t_handle_save;
        reiserfs_check_lock_depth(sb, "journal end");
        if (journal->j_len == 0) {
        dump_stack();
 #endif
 }
-
 
  */
 void lock_super(struct super_block * sb)
 {
-       get_fs_excl();
        mutex_lock(&sb->s_lock);
 }
 
 void unlock_super(struct super_block * sb)
 {
-       put_fs_excl();
        mutex_unlock(&sb->s_lock);
 }
 
        if (sb->s_root) {
                shrink_dcache_for_umount(sb);
                sync_filesystem(sb);
-               get_fs_excl();
                sb->s_flags &= ~MS_ACTIVE;
 
                fsnotify_unmount_inodes(&sb->s_inodes);
                           "Self-destruct in 5 seconds.  Have a nice day...\n",
                           sb->s_id);
                }
-               put_fs_excl();
        }
        spin_lock(&sb_lock);
        /* should be initialized for __put_super_and_need_restart() */
 
 #define vfs_check_frozen(sb, level) \
        wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level)))
 
-#define get_fs_excl() atomic_inc(¤t->fs_excl)
-#define put_fs_excl() atomic_dec(¤t->fs_excl)
-#define has_fs_excl() atomic_read(¤t->fs_excl)
-
 /*
  * until VFS tracks user namespaces for inodes, just make all files
  * belong to init_user_ns
 
        .alloc_lock     = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock),         \
        .journal_info   = NULL,                                         \
        .cpu_timers     = INIT_CPU_TIMERS(tsk.cpu_timers),              \
-       .fs_excl        = ATOMIC_INIT(0),                               \
        .pi_lock        = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock),        \
        .timer_slack_ns = 50000, /* 50 usec default slack */            \
        .pids = {                                                       \
 
        short il_next;
        short pref_node_fork;
 #endif
-       atomic_t fs_excl;       /* holding fs exclusive resources */
        struct rcu_head rcu;
 
        /*
 
 
        profile_task_exit(tsk);
 
-       WARN_ON(atomic_read(&tsk->fs_excl));
        WARN_ON(blk_needs_flush_plug(tsk));
 
        if (unlikely(in_interrupt()))
 
 
        /* One for us, one for whoever does the "release_task()" (usually parent) */
        atomic_set(&tsk->usage,2);
-       atomic_set(&tsk->fs_excl, 0);
 #ifdef CONFIG_BLK_DEV_IO_TRACE
        tsk->btrace_seq = 0;
 #endif