nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
        dd->cspec->numctxts = nchipctxts;
        if (qib_n_krcv_queues > 1 && dd->num_pports) {
-               /*
-                * Set the mask for which bits from the QPN are used
-                * to select a context number.
-                */
-               dd->qpn_mask = 0x3f;
                dd->first_user_ctxt = NUM_IB_PORTS +
                        (qib_n_krcv_queues - 1) * dd->num_pports;
                if (dd->first_user_ctxt > nchipctxts)
                unsigned n, regno;
                unsigned long flags;
 
-               if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported)
+               if (dd->n_krcv_queues < 2 ||
+                       !dd->pport[pidx].link_speed_supported)
                        continue;
 
                ppd = &dd->pport[pidx];
 
 
 static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
                                        struct qpn_map *map, unsigned off,
-                                       unsigned r)
+                                       unsigned n)
 {
        if (qpt->mask) {
                off++;
-               if ((off & qpt->mask) >> 1 != r)
-                       off = ((off & qpt->mask) ?
-                               (off | qpt->mask) + 1 : off) | (r << 1);
+               if (((off & qpt->mask) >> 1) >= n)
+                       off = (off | qpt->mask) + 2;
        } else
                off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
        return off;
        u32 i, offset, max_scan, qpn;
        struct qpn_map *map;
        u32 ret;
-       int r;
 
        if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
                unsigned n;
                goto bail;
        }
 
-       r = smp_processor_id();
-       if (r >= dd->n_krcv_queues)
-               r %= dd->n_krcv_queues;
        qpn = qpt->last + 1;
        if (qpn >= QPN_MAX)
                qpn = 2;
-       if (qpt->mask && ((qpn & qpt->mask) >> 1) != r)
-               qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) |
-                       (r << 1);
+       if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
+               qpn = (qpn | qpt->mask) + 2;
        offset = qpn & BITS_PER_PAGE_MASK;
        map = &qpt->map[qpn / BITS_PER_PAGE];
        max_scan = qpt->nmaps - !offset;
                                ret = qpn;
                                goto bail;
                        }
-                       offset = find_next_offset(qpt, map, offset, r);
+                       offset = find_next_offset(qpt, map, offset,
+                               dd->n_krcv_queues);
                        qpn = mk_qpn(qpt, map, offset);
                        /*
                         * This test differs from alloc_pidmap().
                        if (qpt->nmaps == QPNMAP_ENTRIES)
                                break;
                        map = &qpt->map[qpt->nmaps++];
-                       offset = qpt->mask ? (r << 1) : 0;
+                       offset = 0;
                } else if (map < &qpt->map[qpt->nmaps]) {
                        ++map;
-                       offset = qpt->mask ? (r << 1) : 0;
+                       offset = 0;
                } else {
                        map = &qpt->map[0];
-                       offset = qpt->mask ? (r << 1) : 2;
+                       offset = 2;
                }
                qpn = mk_qpn(qpt, map, offset);
        }
                }
                qp->ibqp.qp_num = err;
                qp->port_num = init_attr->port_num;
-               qp->processor_id = smp_processor_id();
                qib_reset_qp(qp, init_attr->qp_type);
                break;
 
 
        spinlock_t r_lock;      /* used for APM */
        spinlock_t s_lock;
        atomic_t s_dma_busy;
-       unsigned processor_id;  /* Processor ID QP is bound to */
        u32 s_flags;
        u32 s_cur_size;         /* size of send packet in bytes */
        u32 s_len;              /* total length of s_sge */
  */
 static inline void qib_schedule_send(struct qib_qp *qp)
 {
-       if (qib_send_ok(qp)) {
-               if (qp->processor_id == smp_processor_id())
-                       queue_work(qib_wq, &qp->s_work);
-               else
-                       queue_work_on(qp->processor_id,
-                                     qib_wq, &qp->s_work);
-       }
+       if (qib_send_ok(qp))
+               queue_work(qib_wq, &qp->s_work);
 }
 
 static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)