unmap_mapping_pages(vnode->netfs.inode.i_mapping, 0, 0, false);
 }
 
-void afs_server_init_callback_work(struct work_struct *work)
+static void afs_server_init_callback(struct afs_server *server)
 {
-       struct afs_server *server = container_of(work, struct afs_server, initcb_work);
        struct afs_vnode *vnode;
        struct afs_cell *cell = server->cell;
 
  */
 void afs_init_callback_state(struct afs_server *server)
 {
-       rcu_read_lock();
+       struct afs_cell *cell = server->cell;
+
+       down_read(&cell->vs_lock);
+
        do {
                server->cb_s_break++;
                atomic_inc(&server->cell->fs_s_break);
                if (!list_empty(&server->cell->fs_open_mmaps))
-                       queue_work(system_unbound_wq, &server->initcb_work);
+                       afs_server_init_callback(server);
 
        } while ((server = rcu_dereference(server->uuid_next)));
-       rcu_read_unlock();
+
+       up_read(&cell->vs_lock);
 }
 
 /*
        struct rb_node *p;
        int seq = 1;
 
-       do {
+       for (;;) {
                /* Unfortunately, rbtree walking doesn't give reliable results
                 * under just the RCU read lock, so we have to check for
                 * changes.
                        volume = NULL;
                }
 
-       } while (need_seqretry(&cell->volume_lock, seq));
+               if (volume && afs_try_get_volume(volume, afs_volume_trace_get_callback))
+                       break;
+               if (!need_seqretry(&cell->volume_lock, seq))
+                       break;
+               seq |= 1; /* Want a lock next time */
+       }
 
        done_seqretry(&cell->volume_lock, seq);
        return volume;
        afs_volid_t vid = cbb->fid.vid;
        size_t i;
 
+       rcu_read_lock();
        volume = afs_lookup_volume_rcu(server->cell, vid);
-
        /* TODO: Find all matching volumes if we couldn't match the server and
         * break them anyway.
         */
-
        for (i = *_count; i > 0; cbb++, i--) {
                if (cbb->fid.vid == vid) {
                        _debug("- Fid { vl=%08llx n=%llu u=%u }",
                        *residue++ = *cbb;
                }
        }
+
+       rcu_read_unlock();
+       afs_put_volume(volume, afs_volume_trace_put_callback);
 }
 
 /*
 
        ASSERT(server != NULL);
 
-       rcu_read_lock();
-
        while (count > 0)
                afs_break_some_callbacks(server, callbacks, &count);
-
-       rcu_read_unlock();
-       return;
 }
 
        refcount_set(&cell->ref, 1);
        atomic_set(&cell->active, 0);
        INIT_WORK(&cell->manager, afs_manage_cell_work);
-       spin_lock_init(&cell->vs_lock);
+       init_rwsem(&cell->vs_lock);
        cell->volumes = RB_ROOT;
        INIT_HLIST_HEAD(&cell->proc_volumes);
        seqlock_init(&cell->volume_lock);
 
        unsigned int            debug_id;
 
        /* The volumes belonging to this cell */
-       spinlock_t              vs_lock;        /* Lock for server->volumes */
+       struct rw_semaphore     vs_lock;        /* Lock for server->volumes */
        struct rb_root          volumes;        /* Tree of volumes on this server */
        struct hlist_head       proc_volumes;   /* procfs volume list */
        seqlock_t               volume_lock;    /* For volumes */
        struct hlist_node       addr6_link;     /* Link in net->fs_addresses6 */
        struct hlist_node       proc_link;      /* Link in net->fs_proc */
        struct list_head        volumes;        /* RCU list of afs_server_entry objects */
-       struct work_struct      initcb_work;    /* Work for CB.InitCallBackState* */
        struct afs_server       *gc_next;       /* Next server in manager's list */
        time64_t                unuse_time;     /* Time at which last unused */
        unsigned long           flags;
  * callback.c
  */
 extern void afs_invalidate_mmap_work(struct work_struct *);
-extern void afs_server_init_callback_work(struct work_struct *work);
 extern void afs_init_callback_state(struct afs_server *);
 extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
 extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
 
        server->uuid = *uuid;
        rwlock_init(&server->fs_lock);
        INIT_LIST_HEAD(&server->volumes);
-       INIT_WORK(&server->initcb_work, afs_server_init_callback_work);
        init_waitqueue_head(&server->probe_wq);
        INIT_LIST_HEAD(&server->probe_link);
        spin_lock_init(&server->probe_lock);
        if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
                afs_give_up_callbacks(net, server);
 
-       flush_work(&server->initcb_work);
        afs_put_server(net, server, afs_server_trace_destroy);
 }
 
 
        struct list_head *p;
        unsigned int i;
 
-       spin_lock(&volume->cell->vs_lock);
+       down_write(&volume->cell->vs_lock);
 
        for (i = 0; i < slist->nr_servers; i++) {
                se = &slist->servers[i];
                        if (volume->vid <= pe->volume->vid)
                                break;
                }
-               list_add_tail_rcu(&se->slink, p);
+               list_add_tail(&se->slink, p);
        }
 
        slist->attached = true;
-       spin_unlock(&volume->cell->vs_lock);
+       up_write(&volume->cell->vs_lock);
 }
 
 /*
 {
        unsigned int n = 0, o = 0;
 
-       spin_lock(&volume->cell->vs_lock);
+       down_write(&volume->cell->vs_lock);
 
        while (n < new->nr_servers || o < old->nr_servers) {
                struct afs_server_entry *pn = n < new->nr_servers ? &new->servers[n] : NULL;
                int diff;
 
                if (pn && po && pn->server == po->server) {
-                       list_replace_rcu(&po->slink, &pn->slink);
+                       list_replace(&po->slink, &pn->slink);
                        n++;
                        o++;
                        continue;
                                if (volume->vid <= s->volume->vid)
                                        break;
                        }
-                       list_add_tail_rcu(&pn->slink, p);
+                       list_add_tail(&pn->slink, p);
                        n++;
                } else {
-                       list_del_rcu(&po->slink);
+                       list_del(&po->slink);
                        o++;
                }
        }
 
-       spin_unlock(&volume->cell->vs_lock);
+       up_write(&volume->cell->vs_lock);
 }
 
 /*
        if (!slist->attached)
                return;
 
-       spin_lock(&volume->cell->vs_lock);
+       down_write(&volume->cell->vs_lock);
 
        for (i = 0; i < slist->nr_servers; i++)
-               list_del_rcu(&slist->servers[i].slink);
+               list_del(&slist->servers[i].slink);
 
        slist->attached = false;
-       spin_unlock(&volume->cell->vs_lock);
+       up_write(&volume->cell->vs_lock);
 }
 
        EM(afs_volume_trace_alloc,              "ALLOC         ") \
        EM(afs_volume_trace_free,               "FREE          ") \
        EM(afs_volume_trace_get_alloc_sbi,      "GET sbi-alloc ") \
+       EM(afs_volume_trace_get_callback,       "GET callback  ") \
        EM(afs_volume_trace_get_cell_insert,    "GET cell-insrt") \
        EM(afs_volume_trace_get_new_op,         "GET op-new    ") \
        EM(afs_volume_trace_get_query_alias,    "GET cell-alias") \
+       EM(afs_volume_trace_put_callback,       "PUT callback  ") \
        EM(afs_volume_trace_put_cell_dup,       "PUT cell-dup  ") \
        EM(afs_volume_trace_put_cell_root,      "PUT cell-root ") \
        EM(afs_volume_trace_put_destroy_sbi,    "PUT sbi-destry") \