struct vm_area_struct *prev;
struct rb_node **rb_link, *rb_parent;
- if (find_vma_links(mm, vma->vm_start, vma->vm_end,
- &prev, &rb_link, &rb_parent))
- BUG();
+ BUG_ON(find_vma_links(mm, vma->vm_start, vma->vm_end,
+ &prev, &rb_link, &rb_parent));
__vma_link(mm, vma, prev, rb_link, rb_parent);
mm->map_count++;
}
* can't change from under us thanks to the
* anon_vma->root->rwsem.
*/
- if (__test_and_set_bit(0, (unsigned long *)
- &anon_vma->root->rb_root.rb_root.rb_node))
- BUG();
+ BUG_ON(__test_and_set_bit(0, (unsigned long *)
+ &anon_vma->root->rb_root.rb_root.rb_node));
}
}
* mm_all_locks_mutex, there may be other cpus
* changing other bitflags in parallel to us.
*/
- if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
- BUG();
+ BUG_ON(test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags));
down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
}
}
* can't change from under us until we release the
* anon_vma->root->rwsem.
*/
- if (!__test_and_clear_bit(0, (unsigned long *)
- &anon_vma->root->rb_root.rb_root.rb_node))
- BUG();
+ BUG_ON(!__test_and_clear_bit(0, (unsigned long *)
+ &anon_vma->root->rb_root.rb_root.rb_node));
anon_vma_unlock_write(anon_vma);
}
}
* because we hold the mm_all_locks_mutex.
*/
i_mmap_unlock_write(mapping);
- if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
- &mapping->flags))
- BUG();
+ BUG_ON(!test_and_clear_bit(AS_MM_ALL_LOCKS, &mapping->flags));
}
}