struct rds_ib_refill_cache *cache)
 {
        unsigned long flags;
-       struct rds_ib_cache_head *chp;
        struct list_head *old;
+       struct list_head __percpu *chpfirst;
 
        local_irq_save(flags);
 
-       chp = per_cpu_ptr(cache->percpu, smp_processor_id());
-       if (!chp->first)
+       chpfirst = __this_cpu_read(cache->percpu->first);
+       if (!chpfirst)
                INIT_LIST_HEAD(new_item);
        else /* put on front */
-               list_add_tail(new_item, chp->first);
-       chp->first = new_item;
-       chp->count++;
+               list_add_tail(new_item, chpfirst);
 
-       if (chp->count < RDS_IB_RECYCLE_BATCH_COUNT)
+       __this_cpu_write(chpfirst, new_item);
+       __this_cpu_inc(cache->percpu->count);
+
+       if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
                goto end;
 
        /*
        do {
                old = xchg(&cache->xfer, NULL);
                if (old)
-                       list_splice_entire_tail(old, chp->first);
-               old = cmpxchg(&cache->xfer, NULL, chp->first);
+                       list_splice_entire_tail(old, chpfirst);
+               old = cmpxchg(&cache->xfer, NULL, chpfirst);
        } while (old);
 
-       chp->first = NULL;
-       chp->count = 0;
+
+       __this_cpu_write(chpfirst, NULL);
+       __this_cpu_write(cache->percpu->count, 0);
 end:
        local_irq_restore(flags);
 }