/* All arch specific implementations share the same struct */
struct rw_semaphore {
atomic_long_t count;
- struct list_head wait_list;
+ struct wlist_head waiters;
raw_spinlock_t wait_lock;
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* spinner MCS lock */
#define __RWSEM_INITIALIZER(name) \
{ __RWSEM_INIT_COUNT(name), \
- .wait_list = LIST_HEAD_INIT((name).wait_list), \
+ .waiters = WLIST_HEAD_INIT, \
.wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
__RWSEM_OPT_INIT(name) \
__RWSEM_DEP_MAP_INIT(name) }
/*
* This is the same regardless of which rwsem implementation that is being used.
- * It is just a heuristic meant to be called by somebody alreadying holding the
+ * It is just a heuristic meant to be called by somebody already holding the
* rwsem to see if somebody from an incompatible type is wanting access to the
* lock.
*/
-static inline int rwsem_is_contended(struct rw_semaphore *sem)
+static inline bool rwsem_is_contended(struct rw_semaphore *sem)
{
- return !list_empty(&sem->wait_list);
+ return !wlist_empty(&sem->waiters);
}
/*
};
struct rwsem_waiter {
- struct list_head list;
+ struct wlist_node list;
struct task_struct *task;
enum rwsem_waiter_type type;
};
#endif
sem->count = 0;
raw_spin_lock_init(&sem->wait_lock);
- INIT_LIST_HEAD(&sem->wait_list);
+ INIT_WLIST_HEAD(&sem->waiters);
}
EXPORT_SYMBOL(__init_rwsem);
+static inline struct rwsem_waiter *first_waiter(struct rw_semaphore *sem)
+{
+ return wlist_first_entry(&sem->waiters, struct rwsem_waiter, list);
+}
+
/*
* handle the lock release when processes blocked on it that can now run
* - if we come here, then:
__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
{
struct rwsem_waiter *waiter;
+ struct wlist_node *next;
struct task_struct *tsk;
int woken;
- waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
+ waiter = first_waiter(sem);
if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
if (wakewrite)
/* grant an infinite number of read locks to the front of the queue */
woken = 0;
- do {
- struct list_head *next = waiter->list.next;
-
- list_del(&waiter->list);
+ wlist_for_each_entry_from_safe(waiter, next, list) {
+ if (waiter->type == RWSEM_WAITING_FOR_WRITE)
+ break;
+ wlist_del(&sem->waiters, &waiter->list);
tsk = waiter->task;
/*
* Make sure we do not wakeup the next reader before
wake_up_process(tsk);
put_task_struct(tsk);
woken++;
- if (next == &sem->wait_list)
- break;
- waiter = list_entry(next, struct rwsem_waiter, list);
- } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
+ }
sem->count += woken;
static inline struct rw_semaphore *
__rwsem_wake_one_writer(struct rw_semaphore *sem)
{
- struct rwsem_waiter *waiter;
+ struct rwsem_waiter *waiter = first_waiter(sem);
- waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
wake_up_process(waiter->task);
return sem;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- if (sem->count >= 0 && list_empty(&sem->wait_list)) {
+ if (sem->count >= 0 && wlist_empty(&sem->waiters)) {
/* granted */
sem->count++;
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
waiter.task = tsk;
waiter.type = RWSEM_WAITING_FOR_READ;
get_task_struct(tsk);
-
- list_add_tail(&waiter.list, &sem->wait_list);
+ wlist_add(&sem->waiters, &waiter.list);
/* we don't need to touch the semaphore struct anymore */
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
unsigned long flags;
int ret = 0;
-
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- if (sem->count >= 0 && list_empty(&sem->wait_list)) {
+ if (sem->count >= 0 && wlist_empty(&sem->waiters)) {
/* granted */
sem->count++;
ret = 1;
tsk = current;
waiter.task = tsk;
waiter.type = RWSEM_WAITING_FOR_WRITE;
- list_add_tail(&waiter.list, &sem->wait_list);
+ wlist_add(&sem->waiters, &waiter.list);
/* wait for someone to release the lock */
for (;;) {
/* got the lock */
sem->count = -1;
out:
- list_del(&waiter.list);
+ wlist_del(&sem->waiters, &waiter.list);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- if (--sem->count == 0 && !list_empty(&sem->wait_list))
+ if (--sem->count == 0 && !wlist_empty(&sem->waiters))
sem = __rwsem_wake_one_writer(sem);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
raw_spin_lock_irqsave(&sem->wait_lock, flags);
sem->count = 0;
- if (!list_empty(&sem->wait_list))
+ if (!wlist_empty(&sem->waiters))
sem = __rwsem_do_wake(sem, 1);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
raw_spin_lock_irqsave(&sem->wait_lock, flags);
sem->count = 1;
- if (!list_empty(&sem->wait_list))
+ if (!wlist_empty(&sem->waiters))
sem = __rwsem_do_wake(sem, 0);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
#endif
atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
raw_spin_lock_init(&sem->wait_lock);
- INIT_LIST_HEAD(&sem->wait_list);
+ INIT_WLIST_HEAD(&sem->waiters);
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
sem->owner = NULL;
osq_lock_init(&sem->osq);
#endif
}
-
EXPORT_SYMBOL(__init_rwsem);
enum rwsem_waiter_type {
};
struct rwsem_waiter {
- struct list_head list;
+ struct wlist_node list;
struct task_struct *task;
enum rwsem_waiter_type type;
};
enum rwsem_wake_type wake_type,
struct wake_q_head *wake_q)
{
- struct rwsem_waiter *waiter, *tmp;
+ struct rwsem_waiter *waiter;
+ struct wlist_node *next;
long oldcount, woken = 0, adjustment = 0;
/*
* Take a peek at the queue head waiter such that we can determine
* the wakeup(s) to perform.
*/
- waiter = list_first_entry(&sem->wait_list, struct rwsem_waiter, list);
+ waiter = wlist_first_entry(&sem->waiters, struct rwsem_waiter, list);
if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
if (wake_type == RWSEM_WAKE_ANY) {
* for above. Note we increment the 'active part' of the count by the
* number of readers before waking any processes up.
*/
- list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
+ wlist_for_each_entry_from_safe(waiter, next, list) {
struct task_struct *tsk;
if (waiter->type == RWSEM_WAITING_FOR_WRITE)
tsk = waiter->task;
wake_q_add(wake_q, tsk);
- list_del(&waiter->list);
+ wlist_del(&sem->waiters, &waiter->list);
/*
* Ensure that the last operation is setting the reader
* waiter to nil such that rwsem_down_read_failed() cannot
}
adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
- if (list_empty(&sem->wait_list)) {
+ if (wlist_empty(&sem->waiters)) {
/* hit end of list above */
adjustment -= RWSEM_WAITING_BIAS;
}
waiter.type = RWSEM_WAITING_FOR_READ;
raw_spin_lock_irq(&sem->wait_lock);
- if (list_empty(&sem->wait_list))
+ if (!wlist_add(&sem->waiters, &waiter.list))
adjustment += RWSEM_WAITING_BIAS;
- list_add_tail(&waiter.list, &sem->wait_list);
/* we're now waiting on the lock, but no longer actively locking */
count = atomic_long_add_return(adjustment, &sem->count);
* race conditions between checking the rwsem wait list and setting the
* sem->count accordingly.
*/
-static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
+static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem,
+ bool only_me)
{
/*
* Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
* Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
* are other tasks on the wait list, we need to add on WAITING_BIAS.
*/
- count = list_is_singular(&sem->wait_list) ?
- RWSEM_ACTIVE_WRITE_BIAS :
+ count = only_me ? RWSEM_ACTIVE_WRITE_BIAS :
RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
__rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
{
long count;
- bool waiting = true; /* any queued threads before us */
struct rwsem_waiter waiter;
struct rw_semaphore *ret = sem;
DEFINE_WAKE_Q(wake_q);
raw_spin_lock_irq(&sem->wait_lock);
- /* account for this before adding a new element to the list */
- if (list_empty(&sem->wait_list))
- waiting = false;
-
- list_add_tail(&waiter.list, &sem->wait_list);
-
/* we're now waiting on the lock, but no longer actively locking */
- if (waiting) {
+ if (!wlist_add(&sem->waiters, &waiter.list)) {
count = atomic_long_read(&sem->count);
/*
/* wait until we successfully acquire the lock */
set_current_state(state);
while (true) {
- if (rwsem_try_write_lock(count, sem))
+ if (rwsem_try_write_lock(count, sem,
+ wlist_deleted(&waiter.list)))
break;
raw_spin_unlock_irq(&sem->wait_lock);
raw_spin_lock_irq(&sem->wait_lock);
}
__set_current_state(TASK_RUNNING);
- list_del(&waiter.list);
+ wlist_del(&sem->waiters, &waiter.list);
raw_spin_unlock_irq(&sem->wait_lock);
return ret;
out_nolock:
__set_current_state(TASK_RUNNING);
raw_spin_lock_irq(&sem->wait_lock);
- list_del(&waiter.list);
- if (list_empty(&sem->wait_list))
+ if (wlist_del(&sem->waiters, &waiter.list))
atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
else
__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
raw_spin_lock_irqsave(&sem->wait_lock, flags);
locked:
- if (!list_empty(&sem->wait_list))
+ if (!wlist_empty(&sem->waiters))
__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- if (!list_empty(&sem->wait_list))
+ if (!wlist_empty(&sem->waiters))
__rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);