#c, atomic_long_read(&(sem)->count), \
(unsigned long) sem->magic, \
atomic_long_read(&(sem)->owner), (long)current, \
- list_empty(&(sem)->wait_list) ? "" : "not ")) \
+ wlist_empty(&(sem)->waiters) ? "" : "not ")) \
debug_locks_off(); \
} while (0)
#else
* 3) Error path of rwsem_down_write_slowpath().
*
* For all the above cases, wait_lock will be held. A writer must also
- * be the first one in the wait_list to be eligible for setting the handoff
+ * be the first one in the waiters list to be eligible for setting the handoff
* bit. So concurrent setting/clearing of handoff bit is not possible.
*/
#define RWSEM_WRITER_LOCKED (1UL << 0)
#endif
atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
raw_spin_lock_init(&sem->wait_lock);
- INIT_LIST_HEAD(&sem->wait_list);
+ INIT_WLIST_HEAD(&sem->waiters);
atomic_long_set(&sem->owner, 0L);
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
osq_lock_init(&sem->osq);
};
struct rwsem_waiter {
- struct list_head list;
+ struct wlist_node list;
struct task_struct *task;
enum rwsem_waiter_type type;
unsigned long timeout;
};
#define rwsem_first_waiter(sem) \
- list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
+ wlist_first_entry(&sem->waiters, struct rwsem_waiter, list)
enum rwsem_wake_type {
RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
enum rwsem_wake_type wake_type,
struct wake_q_head *wake_q)
{
- struct rwsem_waiter *waiter, *tmp;
+ struct rwsem_waiter *waiter;
long oldcount, woken = 0, adjustment = 0;
- struct list_head wlist;
+ struct wlist_node *next;
+ struct wlist_head wlist;
lockdep_assert_held(&sem->wait_lock);
* 2) For each waiters in the new list, clear waiter->task and
* put them into wake_q to be woken up later.
*/
- INIT_LIST_HEAD(&wlist);
- list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
+ INIT_WLIST_HEAD(&wlist);
+ wlist_for_each_entry_safe(waiter, next, &sem->waiters, list) {
if (waiter->type == RWSEM_WAITING_FOR_WRITE)
continue;
woken++;
- list_move_tail(&waiter->list, &wlist);
+ wlist_move(&sem->waiters, &waiter->list, &wlist);
/*
* Limit # of readers that can be woken up per wakeup call.
adjustment = woken * RWSEM_READER_BIAS - adjustment;
lockevent_cond_inc(rwsem_wake_reader, woken);
- if (list_empty(&sem->wait_list)) {
+ if (wlist_empty(&sem->waiters)) {
/* hit end of list above */
adjustment -= RWSEM_FLAG_WAITERS;
}
atomic_long_add(adjustment, &sem->count);
/* 2nd pass */
- list_for_each_entry_safe(waiter, tmp, &wlist, list) {
+ wlist_for_each_entry_safe(waiter, next, &wlist, list) {
struct task_struct *tsk;
tsk = waiter->task;
new |= RWSEM_WRITER_LOCKED;
new &= ~RWSEM_FLAG_HANDOFF;
- if (list_is_singular(&sem->wait_list))
+ if (wlist_is_singular(&sem->waiters))
new &= ~RWSEM_FLAG_WAITERS;
}
} while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
*/
if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) {
raw_spin_lock_irq(&sem->wait_lock);
- if (!list_empty(&sem->wait_list))
+ if (!wlist_empty(&sem->waiters))
rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
&wake_q);
raw_spin_unlock_irq(&sem->wait_lock);
waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
raw_spin_lock_irq(&sem->wait_lock);
- if (list_empty(&sem->wait_list)) {
+ if (wlist_empty(&sem->waiters)) {
/*
* In case the wait queue is empty and the lock isn't owned
* by a writer or has the handoff bit set, this reader can
}
adjustment += RWSEM_FLAG_WAITERS;
}
- list_add_tail(&waiter.list, &sem->wait_list);
+ wlist_add(&sem->waiters, &waiter.list);
/* we're now waiting on the lock, but no longer actively locking */
count = atomic_long_add_return(adjustment, &sem->count);
return sem;
out_nolock:
- list_del(&waiter.list);
- if (list_empty(&sem->wait_list)) {
+ if (wlist_del(&sem->waiters, &waiter.list)) {
atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
&sem->count);
}
raw_spin_lock_irq(&sem->wait_lock);
- /* account for this before adding a new element to the list */
- wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
-
- list_add_tail(&waiter.list, &sem->wait_list);
+ if (wlist_add(&sem->waiters, &waiter.list))
+ wstate = WRITER_FIRST;
+ else
+ wstate = WRITER_NOT_FIRST;
/* we're now waiting on the lock */
if (wstate == WRITER_NOT_FIRST) {
raw_spin_lock_irq(&sem->wait_lock);
}
__set_current_state(TASK_RUNNING);
- list_del(&waiter.list);
+ wlist_del(&sem->waiters, &waiter.list);
raw_spin_unlock_irq(&sem->wait_lock);
lockevent_inc(rwsem_wlock);
out_nolock:
__set_current_state(TASK_RUNNING);
raw_spin_lock_irq(&sem->wait_lock);
- list_del(&waiter.list);
if (unlikely(wstate == WRITER_HANDOFF))
atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count);
- if (list_empty(&sem->wait_list))
+ if (wlist_del(&sem->waiters, &waiter.list))
atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
else
rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- if (!list_empty(&sem->wait_list))
+ if (!wlist_empty(&sem->waiters))
rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- if (!list_empty(&sem->wait_list))
+ if (!wlist_empty(&sem->waiters))
rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);