fc->lock protects other members along with sequence counter. Since the
change introduced by next few patches increases parallelism, it increases
contention on fc->lock. Having a new seq_lock spinlock to protect unique
sequence counter will reduce contention and also makes code simpler.
Signed-off-by: Ashish Samant <ashish.samant@oracle.com>
Reviewed-by: Srinivas Eeda <srinivas.eeda@oracle.com>
static u64 fuse_get_unique(struct fuse_conn *fc)
{
+ u64 ctr;
+
+ /** Using a separate seq_lock will reduce contention for fc->lock
+ * and make the code cleaner.
+ */
+ spin_lock(&fc->seq_lock);
fc->reqctr++;
/* zero is special */
if (fc->reqctr == 0)
fc->reqctr = 1;
+ ctr = fc->reqctr;
+ spin_unlock(&fc->seq_lock);
- return fc->reqctr;
+ return ctr;
}
static void queue_request(struct fuse_node *fn, struct fuse_req *req)
/** The next unique request id */
u64 reqctr;
+ /** Lock for protecting access to the reqctr */
+ spinlock_t seq_lock;
+
/** Connection failed (version mismatch). Cannot race with
setting other bitfields since it is only set once in INIT
reply, before any other request, and never cleared */
memset(fc, 0, sizeof(*fc));
spin_lock_init(&fc->lock);
+ spin_lock_init(&fc->seq_lock);
init_rwsem(&fc->killsb);
atomic_set(&fc->count, 1);
init_waitqueue_head(&fc->reserved_req_waitq);