struct page             **rq_enc_pages; /* scratch pages for use by
                                                   gss privacy code */
        void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
-       struct list_head        rq_list;
+
+       union {
+               struct list_head        rq_list;        /* Slot allocation list */
+               struct list_head        rq_recv;        /* Receive queue */
+       };
 
        void                    *rq_buffer;     /* Call XDR encode buffer */
        size_t                  rq_callsize;
        struct list_head        bc_pa_list;     /* List of preallocated
                                                 * backchannel rpc_rqst's */
 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
-       struct list_head        recv;
+
+       struct list_head        recv_queue;     /* Receive queue */
 
        struct {
                unsigned long           bind_count,     /* total number of binds */
 
 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
        __must_hold(&xprt->transport_lock)
 {
-       if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
+       if (list_empty(&xprt->recv_queue) && xprt_has_timer(xprt))
                mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
 }
 
        struct rpc_xprt *xprt = from_timer(xprt, t, timer);
 
        spin_lock(&xprt->transport_lock);
-       if (!list_empty(&xprt->recv))
+       if (!list_empty(&xprt->recv_queue))
                goto out_abort;
        /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
        xprt->last_used = jiffies;
 {
        struct rpc_rqst *entry;
 
-       list_for_each_entry(entry, &xprt->recv, rq_list)
+       list_for_each_entry(entry, &xprt->recv_queue, rq_recv)
                if (entry->rq_xid == xid) {
                        trace_xprt_lookup_rqst(xprt, xid, 0);
                        entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
                        sizeof(req->rq_private_buf));
 
        /* Add request to the receive list */
-       list_add_tail(&req->rq_list, &xprt->recv);
+       list_add_tail(&req->rq_recv, &xprt->recv_queue);
        set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
        spin_unlock(&xprt->queue_lock);
 
 xprt_request_dequeue_receive_locked(struct rpc_task *task)
 {
        if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
-               list_del(&task->tk_rqstp->rq_list);
+               list_del(&task->tk_rqstp->rq_recv);
 }
 
 /**
        spin_lock_init(&xprt->queue_lock);
 
        INIT_LIST_HEAD(&xprt->free);
-       INIT_LIST_HEAD(&xprt->recv);
+       INIT_LIST_HEAD(&xprt->recv_queue);
 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
        spin_lock_init(&xprt->bc_pa_lock);
        INIT_LIST_HEAD(&xprt->bc_pa_list);