static LIST_HEAD(qd_lru_list);
 static atomic_t qd_lru_count = ATOMIC_INIT(0);
-static DEFINE_SPINLOCK(qd_lru_lock);
+static DEFINE_SPINLOCK(qd_lock);
 
 unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
                                  struct shrink_control *sc)
        if (!(sc->gfp_mask & __GFP_FS))
                return SHRINK_STOP;
 
-       spin_lock(&qd_lru_lock);
+       spin_lock(&qd_lock);
        while (nr_to_scan && !list_empty(&qd_lru_list)) {
                qd = list_entry(qd_lru_list.next,
                                struct gfs2_quota_data, qd_reclaim);
                /* Delete it from the common reclaim list */
                list_del_init(&qd->qd_reclaim);
                atomic_dec(&qd_lru_count);
-               spin_unlock(&qd_lru_lock);
+               spin_unlock(&qd_lock);
                kmem_cache_free(gfs2_quotad_cachep, qd);
-               spin_lock(&qd_lru_lock);
+               spin_lock(&qd_lock);
                nr_to_scan--;
                freed++;
        }
-       spin_unlock(&qd_lru_lock);
+       spin_unlock(&qd_lock);
        return freed;
 }
 
 
        for (;;) {
                found = 0;
-               spin_lock(&qd_lru_lock);
+               spin_lock(&qd_lock);
                list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
                        if (qid_eq(qd->qd_id, qid)) {
                                lockref_get(&qd->qd_lockref);
                        new_qd = NULL;
                }
 
-               spin_unlock(&qd_lru_lock);
+               spin_unlock(&qd_lock);
 
                if (qd) {
                        if (new_qd) {
 
 static void qd_put(struct gfs2_quota_data *qd)
 {
-       spin_lock(&qd_lru_lock);
+       spin_lock(&qd_lock);
 
        if (!lockref_put_or_lock(&qd->qd_lockref)) {
 
                spin_unlock(&qd->qd_lockref.lock);
        }
 
-       spin_unlock(&qd_lru_lock);
+       spin_unlock(&qd_lock);
 }
 
 static int slot_get(struct gfs2_quota_data *qd)
        unsigned int c, o = 0, b;
        unsigned char byte = 0;
 
-       spin_lock(&qd_lru_lock);
+       spin_lock(&qd_lock);
 
        if (qd->qd_slot_count++) {
-               spin_unlock(&qd_lru_lock);
+               spin_unlock(&qd_lock);
                return 0;
        }
 
 
        sdp->sd_quota_bitmap[c][o] |= 1 << b;
 
-       spin_unlock(&qd_lru_lock);
+       spin_unlock(&qd_lock);
 
        return 0;
 
 fail:
        qd->qd_slot_count--;
-       spin_unlock(&qd_lru_lock);
+       spin_unlock(&qd_lock);
        return -ENOSPC;
 }
 
 {
        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 
-       spin_lock(&qd_lru_lock);
+       spin_lock(&qd_lock);
        gfs2_assert(sdp, qd->qd_slot_count);
        qd->qd_slot_count++;
-       spin_unlock(&qd_lru_lock);
+       spin_unlock(&qd_lock);
 }
 
 static void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
 {
        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 
-       spin_lock(&qd_lru_lock);
+       spin_lock(&qd_lock);
        gfs2_assert(sdp, qd->qd_slot_count);
        if (!--qd->qd_slot_count) {
                gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
                qd->qd_slot = -1;
        }
-       spin_unlock(&qd_lru_lock);
+       spin_unlock(&qd_lock);
 }
 
 static int bh_get(struct gfs2_quota_data *qd)
        if (sdp->sd_vfs->s_flags & MS_RDONLY)
                return 0;
 
-       spin_lock(&qd_lru_lock);
+       spin_lock(&qd_lock);
 
        list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
                found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
        if (!found)
                qd = NULL;
 
-       spin_unlock(&qd_lru_lock);
+       spin_unlock(&qd_lock);
 
        if (qd) {
                gfs2_assert_warn(sdp, qd->qd_change_sync);
        x = be64_to_cpu(qc->qc_change) + change;
        qc->qc_change = cpu_to_be64(x);
 
-       spin_lock(&qd_lru_lock);
+       spin_lock(&qd_lock);
        qd->qd_change = x;
-       spin_unlock(&qd_lru_lock);
+       spin_unlock(&qd_lock);
 
        if (!x) {
                gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
        if (!qd->qd_qb.qb_limit)
                return 0;
 
-       spin_lock(&qd_lru_lock);
+       spin_lock(&qd_lock);
        value = qd->qd_change;
-       spin_unlock(&qd_lru_lock);
+       spin_unlock(&qd_lock);
 
        spin_lock(>->gt_spin);
        num = gt->gt_quota_scale_num;
                if (!sync)
                        continue;
 
-               spin_lock(&qd_lru_lock);
+               spin_lock(&qd_lock);
                found = qd_check_sync(sdp, qd, NULL);
-               spin_unlock(&qd_lru_lock);
+               spin_unlock(&qd_lock);
 
                if (!found)
                        continue;
                        continue;
 
                value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
-               spin_lock(&qd_lru_lock);
+               spin_lock(&qd_lock);
                value += qd->qd_change;
-               spin_unlock(&qd_lru_lock);
+               spin_unlock(&qd_lock);
 
                if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
                        print_message(qd, "exceeded");
                        qd->qd_slot = slot;
                        qd->qd_slot_count = 1;
 
-                       spin_lock(&qd_lru_lock);
+                       spin_lock(&qd_lock);
                        gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
                        list_add(&qd->qd_list, &sdp->sd_quota_list);
                        atomic_inc(&sdp->sd_quota_count);
-                       spin_unlock(&qd_lru_lock);
+                       spin_unlock(&qd_lock);
 
                        found++;
                }
        struct gfs2_quota_data *qd;
        unsigned int x;
 
-       spin_lock(&qd_lru_lock);
+       spin_lock(&qd_lock);
        while (!list_empty(head)) {
                qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
 
                    (qd->qd_lockref.count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
                        spin_unlock(&qd->qd_lockref.lock);
                        list_move(&qd->qd_list, head);
-                       spin_unlock(&qd_lru_lock);
+                       spin_unlock(&qd_lock);
                        schedule();
-                       spin_lock(&qd_lru_lock);
+                       spin_lock(&qd_lock);
                        continue;
                }
                spin_unlock(&qd->qd_lockref.lock);
                        atomic_dec(&qd_lru_count);
                }
                atomic_dec(&sdp->sd_quota_count);
-               spin_unlock(&qd_lru_lock);
+               spin_unlock(&qd_lock);
 
                if (!qd->qd_lockref.count) {
                        gfs2_assert_warn(sdp, !qd->qd_change);
                gfs2_glock_put(qd->qd_gl);
                kmem_cache_free(gfs2_quotad_cachep, qd);
 
-               spin_lock(&qd_lru_lock);
+               spin_lock(&qd_lock);
        }
-       spin_unlock(&qd_lru_lock);
+       spin_unlock(&qd_lock);
 
        gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));