check_seq(mt, limit, false);
rcu_read_lock();
+ // Check the first one and get ma_state in the correct state.
+ MT_BUG_ON(mt, mas_walk(&mas) != xa_mk_value(i++));
for ( ; i <= limit + 1; i++) {
entry = mas_next(&mas, limit);
if (i > limit)
unsigned int loop_sleep;
unsigned int val_sleep;
+ unsigned int failed; /* failed detection for other threads */
unsigned int seen_entry2; /* Number of threads that have seen the new value */
unsigned int seen_entry3; /* Number of threads that have seen the new value */
unsigned int seen_both; /* Number of threads that have seen both new values */
+ unsigned int seen_toggle;
+ unsigned int seen_added;
+ unsigned int seen_removed;
unsigned long last; /* The end of the range to write. */
+
+ unsigned long removed; /* The index of the removed entry */
+ unsigned long added; /* The index of the removed entry */
+ unsigned long toggle; /* The index of the removed entry */
};
static inline
/* Out of the interesting range */
if (mas.index < test->index || mas.index > test->last) {
+ if (entry != expected) {
+ printk("%lx - %lx = %p not %p\n",
+ mas.index, mas.last, entry, expected);
+ }
MT_BUG_ON(test->mt, entry != expected);
continue;
}
return NULL;
}
+/*
+ * Ensure the expected values remain empty.
+ * These are 6-9, 16-19, etc
+ */
+static void *rcu_empty(void *ptr)
+{
+ struct rcu_test_struct *test = (struct rcu_test_struct*)ptr;
+ unsigned long count = test->count;
+ void *entry;
+ unsigned long i, nr_entries = 1000;
+ MA_STATE(mas, test->mt, 6, 6);
+
+ rcu_register_thread();
+
+ /*
+ * Loop through the test->range_start - test->range_end test->count
+ * times
+ */
+ while (count--) {
+ usleep(test->loop_sleep);
+ rcu_read_lock();
+ for (i = 0; i <= nr_entries; i++) {
+ entry = mas_walk(&mas);
+ MT_BUG_ON(test->mt, entry != NULL);
+ mas.index += 10;
+ }
+ rcu_read_unlock();
+ mas_set(&mas, 6);
+ }
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+static void *rcu_present(void *ptr)
+{
+ struct rcu_test_struct *test = (struct rcu_test_struct*)ptr;
+ unsigned long i, count = test->count, nr_entries = 1000;
+ void *entry, *expected;
+ MA_STATE(mas, test->mt, 0, 0);
+
+ rcu_register_thread();
+
+ while (count--) {
+ usleep(test->loop_sleep);
+ for (i = 0; i <= nr_entries; i++) {
+ rcu_read_lock();
+ entry = mas_walk(&mas);
+ /* The expected value is based on the start range. */
+ expected = xa_mk_value(mas.index ? mas.index / 10 : 0);
+ if (mas.index <= test->removed &&
+ mas.last >= test->removed) {
+ MT_BUG_ON(test->mt, entry && entry != expected);
+ } else if (mas.index < test->index ||
+ mas.index > test->last) {
+ /* Out of the interesting range */
+ MT_BUG_ON(test->mt, entry != expected);
+ } else {
+ if (entry == test->entry2)
+ uatomic_inc(&test->seen_entry2);
+ else if (entry == test->entry3)
+ uatomic_inc(&test->seen_entry3);
+ else
+ MT_BUG_ON(test->mt, entry != expected);
+ }
+ mas_set(&mas, (i + 1) * 10);
+ rcu_read_unlock();
+ }
+ mas_set(&mas, 0);
+ }
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+static void *rcu_added(void *ptr)
+{
+ struct rcu_test_struct *test = (struct rcu_test_struct*)ptr;
+ unsigned long count = test->count;
+ void *entry;
+ bool update = false;
+ MA_STATE(mas, test->mt, test->added, test->added);
+
+ rcu_register_thread();
+ while (count--) {
+ usleep(test->loop_sleep);
+ rcu_read_lock();
+ entry = mas_walk(&mas);
+ rcu_read_unlock();
+
+ MT_BUG_ON(test->mt, update && (entry != test->entry2 &&
+ entry != test->entry3));
+ if (!update && entry) {
+ update = true;
+ uatomic_inc(&test->seen_added);
+ }
+ mas_reset(&mas);
+ }
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+static void *rcu_removed(void *ptr)
+{
+ struct rcu_test_struct *test = (struct rcu_test_struct*)ptr;
+ unsigned long count = test->count;
+ void *entry, *expected;
+ bool update = false;
+ MA_STATE(mas, test->mt, test->removed, test->removed);
+
+ rcu_register_thread();
+ expected = xa_mk_value(mas.index ? mas.index / 10 : 0);
+ while (count--) {
+ usleep(test->loop_sleep);
+ rcu_read_lock();
+ entry = mas_walk(&mas);
+ rcu_read_unlock();
+
+ MT_BUG_ON(test->mt, update && entry);
+ if (!update && !entry) {
+ update = true;
+ uatomic_inc(&test->seen_removed);
+ }
+
+ MT_BUG_ON(test->mt, !update && entry != expected);
+ mas_reset(&mas);
+ }
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+static void *rcu_toggle(void *ptr)
+{
+ struct rcu_test_struct *test = (struct rcu_test_struct*)ptr;
+ unsigned long count = test->count;
+ void *entry, *expected;
+ bool exists = false;
+ MA_STATE(mas, test->mt, test->toggle, test->toggle);
+
+ rcu_register_thread();
+ expected = xa_mk_value(mas.index ? mas.index / 10 : 0);
+ while (count--) {
+ usleep(test->loop_sleep);
+ rcu_read_lock();
+ entry = mas_walk(&mas);
+ if (entry) {
+ if (!exists)
+ uatomic_inc(&test->seen_toggle);
+ exists = true;
+ MT_BUG_ON(test->mt, entry != expected);
+ } else
+ exists = false;
+
+ rcu_read_unlock();
+ mas_reset(&mas);
+ }
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+/*
+ * This is to check:
+ * 1. Range that is not ever present
+ * 2. Range that is always present
+ * 3. Things being added but not removed.
+ * 4. Things being removed but not added.
+ * 5. Things are being added and removed, searches my succeed or fail
+ */
+static noinline
+void run_rcu_stress(struct maple_tree *mt, struct rcu_test_struct *vals)
+{
+
+ int i, count, max = 15000;
+ void *(*function)(void*);
+ pthread_t readers[50];
+ bool toggle = true;
+ void *expected = xa_mk_value(vals->toggle/10);
+
+ mt_set_in_rcu(mt);
+ MT_BUG_ON(mt, !mt_in_rcu(mt));
+
+ mtree_store_range(mt, vals->removed, vals->removed,
+ xa_mk_value(vals->removed/10), GFP_KERNEL);
+
+ for (i = 0; i < ARRAY_SIZE(readers); i++) {
+ if (i < 9)
+ function = rcu_empty;
+ else if (i < 19)
+ function = rcu_present;
+ else if (i < 29)
+ function = rcu_added;
+ else if (i < 39)
+ function = rcu_removed;
+ else
+ function = rcu_toggle;
+
+ if (pthread_create(&readers[i], NULL, *function, vals)) {
+ perror("creating reader thread");
+ exit(1);
+ }
+ }
+
+ usleep(5); /* small yield to ensure all threads are at least started. */
+ for (count = 0; count < max; count++) {
+ /* Add and modify */
+ mtree_store_range(mt, vals->index, vals->last,
+ count % 2 ? vals->entry2 : vals->entry3,
+ GFP_KERNEL);
+ /* Remove */
+ mtree_store_range(mt, vals->removed, vals->removed, NULL,
+ GFP_KERNEL);
+
+ /* Toggle */
+ if (toggle) {
+ toggle = false;
+ mtree_store_range(mt, vals->toggle, vals->toggle,
+ expected, GFP_KERNEL);
+ } else {
+ toggle = true;
+ mtree_store_range(mt, vals->toggle, vals->toggle, NULL,
+ GFP_KERNEL);
+ }
+ usleep(5);
+ }
+ while (i--)
+ pthread_join(readers[i], NULL);
+
+ /* Make sure the test caught at least one update. */
+ MT_BUG_ON(mt, !vals->seen_entry2);
+ MT_BUG_ON(mt, !vals->seen_entry3);
+ MT_BUG_ON(mt, !vals->seen_toggle);
+ MT_BUG_ON(mt, !vals->seen_added);
+ MT_BUG_ON(mt, !vals->seen_removed);
+}
+
static noinline
void run_check_rcu(struct maple_tree *mt, struct rcu_test_struct *vals)
{
mas_reset(&mas_reader);
mas_set_range(&mas_writer, target, target + 5);
mas_set_range(&mas_reader, target, target);
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
rcu_read_lock();
mas_prev(&mas_reader, 0);
mas_lock(&mas_writer);
mas_unlock(&mas_writer);
MT_BUG_ON(mt, mas_prev(&mas_reader, 0) != xa_mk_value(val));
rcu_read_unlock();
- mt_dump(mt);
}
static noinline void check_rcu_threaded(struct maple_tree *mt)
struct rcu_test_struct vals;
vals.val_sleep = 200;
- vals.loop_sleep = 100;
+ vals.loop_sleep = 110;
rcu_register_thread();
for (i = 0; i <= nr_entries; i++)
vals.range_start = 4316;
vals.range_end = 5035;
run_check_rcu(mt, &vals);
+ mtree_destroy(mt);
+ mtree_init(mt, MAPLE_ALLOC_RANGE);
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+ /* 4390-4395: value 439 (0x1b7) [0x36f] */
+ /* Store across several slots. */
+ /* Spanning store. */
+ vals.loop_sleep = 3;
+ vals.count = 10000;
+ vals.mt = mt;
+ vals.index = 4390;
+ vals.last = 4398;
+ vals.entry1 = xa_mk_value(4390);
+ vals.entry2 = xa_mk_value(439);
+ vals.entry3 = xa_mk_value(4391);
+ vals.seen_entry2 = 0;
+ vals.seen_toggle = 0;
+ vals.seen_added = 0;
+ vals.seen_removed = 0;
+ vals.range_start = 4316;
+ vals.range_end = 5035;
+ vals.removed = 4360;
+ vals.added = 4396;
+ vals.toggle = 4347;
+ run_rcu_stress(mt, &vals);
mtree_destroy(mt);
+ /* Slow reader test */
mtree_init(mt, MAPLE_ALLOC_RANGE);
-
for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
xa_mk_value(i), GFP_KERNEL);
+
vals.val_sleep = 400;
vals.loop_sleep = 200;
vals.seen_entry2 = 0;