struct ib_fmr_pool {
spinlock_t pool_lock;
- spinlock_t used_pool_lock;
-
int pool_size;
int max_pages;
int max_remaps;
int count = 0;
LIST_HEAD(temp_list);
- spin_lock_irq(&pool->used_pool_lock);
+ /* The pach of allocating from cache has a bad race with this
+ * path removing fmr from used_list (this path do list splice
+ * against the same list). Add pool_lock lock here to make the
+ * race safe.
+ */
+ spin_lock_irq(&pool->pool_lock);
list_splice_init(&pool->used_list, &temp_list);
- spin_unlock_irq(&pool->used_pool_lock);
list_for_each_entry(fmr, &temp_list, list) {
/* find first fmr that is not mapped yet */
if (fmr->remap_count == 0 ||
/* split the list 2 two */
list_cut_position(&unmap_list, &temp_list,
&fmr->list);
- spin_lock_irq(&pool->used_pool_lock);
list_splice(&temp_list, &pool->used_list);
- spin_unlock_irq(&pool->used_pool_lock);
already_split = 1;
break;
} else {
}
}
+ /* fmrs to unmap are detached from cache, now it safe to drop
+ * pool_lock
+ */
+ spin_unlock_irq(&pool->pool_lock);
+
if (!already_split) {
/* All are mapped once */
list_splice_tail(&temp_list, &unmap_list);
pool->dirty_watermark = params->dirty_watermark;
pool->dirty_len = 0;
spin_lock_init(&pool->pool_lock);
- spin_lock_init(&pool->used_pool_lock);
atomic_set(&pool->req_ser, 0);
atomic_set(&pool->flush_ser, 0);
init_waitqueue_head(&pool->force_wait);
* Put them on the dirty list now so that the cleanup
* thread will reap them too.
*/
- spin_lock_irq(&pool->used_pool_lock);
+ /* lock pool_lock when accessing dirty_list */
+ spin_lock_irq(&pool->pool_lock);
list_splice_init(&pool->used_list, &pool->dirty_list);
- spin_unlock_irq(&pool->used_pool_lock);
+ spin_unlock_irq(&pool->pool_lock);
serial = atomic_inc_return(&pool->req_ser);
wake_up_process(pool->thread);
if (fmr) {
/* found in cache */
++fmr->ref_count;
- if (fmr->ref_count == 1) {
+ if (fmr->ref_count == 1)
list_del(&fmr->list);
- }
spin_unlock_irqrestore(&pool->pool_lock, flags);
}
if (list_empty(&pool->free_list)) {
- spin_unlock_irqrestore(&pool->pool_lock, flags);
- spin_lock_irqsave(&pool->used_pool_lock, flags);
if (list_empty(&pool->used_list)) {
- spin_unlock_irqrestore(&pool->used_pool_lock, flags);
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
return ERR_PTR(-EAGAIN);
}
fmr = list_entry(pool->used_list.next, struct ib_pool_fmr,
list);
list_del(&fmr->list);
hlist_del_init(&fmr->cache_node);
- spin_unlock_irqrestore(&pool->used_pool_lock, flags);
} else {
fmr = list_entry(pool->free_list.next, struct ib_pool_fmr,
list);
list_del(&fmr->list);
- hlist_del_init(&fmr->cache_node);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
}
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
if (pool->relaxed && fmr->pd != rargs->pd) {
result = ib_set_fmr_pd(fmr->fmr, rargs->pd);
if (result) {
- spin_lock_irqsave(&pool->used_pool_lock, flags);
+ spin_lock_irqsave(&pool->pool_lock, flags);
list_add(&fmr->list, &pool->used_list);
- spin_unlock_irqrestore(&pool->used_pool_lock, flags);
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
pr_warn(PFX "set_fmr_pd returns %d\n", result);
io_virtual_address);
if (result) {
- spin_lock_irqsave(&pool->used_pool_lock, flags);
+ spin_lock_irqsave(&pool->pool_lock, flags);
list_add(&fmr->list, &pool->used_list);
- spin_unlock_irqrestore(&pool->used_pool_lock, flags);
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
pr_warn(PFX "fmr_map returns %d\n", result);
pool = fmr->pool;
- spin_lock_irqsave(&pool->used_pool_lock, flags);
+ spin_lock_irqsave(&pool->pool_lock, flags);
--fmr->ref_count;
if (!fmr->ref_count) {
fmr, fmr->ref_count);
#endif
- spin_unlock_irqrestore(&pool->used_pool_lock, flags);
-
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
return 0;
}
EXPORT_SYMBOL(ib_fmr_pool_unmap);