*/
 DEFINE_STATIC_LGLOCK(file_lock_lglock);
 static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
+DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
 
 /*
  * The blocked_hash is used to find POSIX lock loops for deadlock detection.
 /* Must be called with the flc_lock held! */
 static void locks_insert_global_locks(struct file_lock *fl)
 {
+       percpu_rwsem_assert_held(&file_rwsem);
+
        lg_local_lock(&file_lock_lglock);
        fl->fl_link_cpu = smp_processor_id();
        hlist_add_head(&fl->fl_link, this_cpu_ptr(&file_lock_list));
 /* Must be called with the flc_lock held! */
 static void locks_delete_global_locks(struct file_lock *fl)
 {
+       percpu_rwsem_assert_held(&file_rwsem);
+
        /*
         * Avoid taking lock if already unhashed. This is safe since this check
         * is done while holding the flc_lock, and new insertions into the list
                        return -ENOMEM;
        }
 
+       percpu_down_read(&file_rwsem);
        spin_lock(&ctx->flc_lock);
        if (request->fl_flags & FL_ACCESS)
                goto find_conflict;
 
 out:
        spin_unlock(&ctx->flc_lock);
+       percpu_up_read(&file_rwsem);
        if (new_fl)
                locks_free_lock(new_fl);
        locks_dispose_list(&dispose);
                new_fl2 = locks_alloc_lock();
        }
 
+       percpu_down_read(&file_rwsem);
        spin_lock(&ctx->flc_lock);
        /*
         * New lock request. Walk all POSIX locks and look for conflicts. If
        }
  out:
        spin_unlock(&ctx->flc_lock);
+       percpu_up_read(&file_rwsem);
        /*
         * Free any unused locks.
         */
                return error;
        }
 
+       percpu_down_read(&file_rwsem);
        spin_lock(&ctx->flc_lock);
 
        time_out_leases(inode, &dispose);
        locks_insert_block(fl, new_fl);
        trace_break_lease_block(inode, new_fl);
        spin_unlock(&ctx->flc_lock);
+       percpu_up_read(&file_rwsem);
+
        locks_dispose_list(&dispose);
        error = wait_event_interruptible_timeout(new_fl->fl_wait,
                                                !new_fl->fl_next, break_time);
+
+       percpu_down_read(&file_rwsem);
        spin_lock(&ctx->flc_lock);
        trace_break_lease_unblock(inode, new_fl);
        locks_delete_block(new_fl);
        }
 out:
        spin_unlock(&ctx->flc_lock);
+       percpu_up_read(&file_rwsem);
        locks_dispose_list(&dispose);
        locks_free_lock(new_fl);
        return error;
                return -EINVAL;
        }
 
+       percpu_down_read(&file_rwsem);
        spin_lock(&ctx->flc_lock);
        time_out_leases(inode, &dispose);
        error = check_conflicting_open(dentry, arg, lease->fl_flags);
                lease->fl_lmops->lm_setup(lease, priv);
 out:
        spin_unlock(&ctx->flc_lock);
+       percpu_up_read(&file_rwsem);
        locks_dispose_list(&dispose);
        if (is_deleg)
                inode_unlock(inode);
                return error;
        }
 
+       percpu_down_read(&file_rwsem);
        spin_lock(&ctx->flc_lock);
        list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
                if (fl->fl_file == filp &&
        if (victim)
                error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
        spin_unlock(&ctx->flc_lock);
+       percpu_up_read(&file_rwsem);
        locks_dispose_list(&dispose);
        return error;
 }
        struct locks_iterator *iter = f->private;
 
        iter->li_pos = *pos + 1;
+       percpu_down_write(&file_rwsem);
        lg_global_lock(&file_lock_lglock);
        spin_lock(&blocked_lock_lock);
        return seq_hlist_start_percpu(&file_lock_list, &iter->li_cpu, *pos);
 {
        spin_unlock(&blocked_lock_lock);
        lg_global_unlock(&file_lock_lglock);
+       percpu_up_write(&file_rwsem);
 }
 
 static const struct seq_operations locks_seq_operations = {