This reorder actually improves performance by 20% (from 39.1s to 32.8s)
on x86-64 quad core Opteron.
I have no explanation for this, possibly it makes some other entries are
better cache-aligned.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
        atomic_t holders;
        atomic_t open_count;
 
+       /*
+        * The current mapping.
+        * Use dm_get_live_table{_fast} or take suspend_lock for
+        * dereference.
+        */
+       struct dm_table *map;
+
        unsigned long flags;
 
        struct request_queue *queue;
         */
        struct workqueue_struct *wq;
 
-       /*
-        * The current mapping.
-        * Use dm_get_live_table{_fast} or take suspend_lock for
-        * dereference.
-        */
-       struct dm_table *map;
-
        /*
         * io objects are allocated from here.
         */