* See ipath_mmap() for details.
         */
        if (udata && udata->outlen >= sizeof(__u64)) {
-               struct ipath_mmap_info *ip;
-               __u64 offset = (__u64) wc;
                int err;
+               u32 s = sizeof *wc + sizeof(struct ib_wc) * entries;
 
-               err = ib_copy_to_udata(udata, &offset, sizeof(offset));
-               if (err) {
-                       ret = ERR_PTR(err);
+               cq->ip = ipath_create_mmap_info(dev, s, context, wc);
+               if (!cq->ip) {
+                       ret = ERR_PTR(-ENOMEM);
                        goto bail_wc;
                }
 
-               /* Allocate info for ipath_mmap(). */
-               ip = kmalloc(sizeof(*ip), GFP_KERNEL);
-               if (!ip) {
-                       ret = ERR_PTR(-ENOMEM);
-                       goto bail_wc;
+               err = ib_copy_to_udata(udata, &cq->ip->offset,
+                                      sizeof(cq->ip->offset));
+               if (err) {
+                       ret = ERR_PTR(err);
+                       goto bail_ip;
                }
-               cq->ip = ip;
-               ip->context = context;
-               ip->obj = wc;
-               kref_init(&ip->ref);
-               ip->mmap_cnt = 0;
-               ip->size = PAGE_ALIGN(sizeof(*wc) +
-                                     sizeof(struct ib_wc) * entries);
-               spin_lock_irq(&dev->pending_lock);
-               ip->next = dev->pending_mmaps;
-               dev->pending_mmaps = ip;
-               spin_unlock_irq(&dev->pending_lock);
        } else
                cq->ip = NULL;
 
        if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
                spin_unlock(&dev->n_cqs_lock);
                ret = ERR_PTR(-ENOMEM);
-               goto bail_wc;
+               goto bail_ip;
        }
 
        dev->n_cqs_allocated++;
        spin_unlock(&dev->n_cqs_lock);
 
+       if (cq->ip) {
+               spin_lock_irq(&dev->pending_lock);
+               list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
+               spin_unlock_irq(&dev->pending_lock);
+       }
+
        /*
         * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
         * The number of entries should be >= the number requested or return
 
        goto done;
 
+bail_ip:
+       kfree(cq->ip);
 bail_wc:
        vfree(wc);
-
 bail_cq:
        kfree(cq);
-
 done:
        return ret;
 }
        if (cq->ip) {
                struct ipath_ibdev *dev = to_idev(ibcq->device);
                struct ipath_mmap_info *ip = cq->ip;
+               u32 s = sizeof *wc + sizeof(struct ib_wc) * cqe;
 
-               ip->obj = wc;
-               ip->size = PAGE_ALIGN(sizeof(*wc) +
-                                     sizeof(struct ib_wc) * cqe);
+               ipath_update_mmap_info(dev, ip, s, wc);
                spin_lock_irq(&dev->pending_lock);
-               ip->next = dev->pending_mmaps;
-               dev->pending_mmaps = ip;
+               if (list_empty(&ip->pending_mmaps))
+                       list_add(&ip->pending_mmaps, &dev->pending_mmaps);
                spin_unlock_irq(&dev->pending_lock);
        }
 
 
 {
        struct ipath_mmap_info *ip =
                container_of(ref, struct ipath_mmap_info, ref);
+       struct ipath_ibdev *dev = to_idev(ip->context->device);
+
+       spin_lock_irq(&dev->pending_lock);
+       list_del(&ip->pending_mmaps);
+       spin_unlock_irq(&dev->pending_lock);
 
        vfree(ip->obj);
        kfree(ip);
        struct ipath_mmap_info *ip = vma->vm_private_data;
 
        kref_get(&ip->ref);
-       ip->mmap_cnt++;
 }
 
 static void ipath_vma_close(struct vm_area_struct *vma)
 {
        struct ipath_mmap_info *ip = vma->vm_private_data;
 
-       ip->mmap_cnt--;
        kref_put(&ip->ref, ipath_release_mmap_info);
 }
 
        struct ipath_ibdev *dev = to_idev(context->device);
        unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
        unsigned long size = vma->vm_end - vma->vm_start;
-       struct ipath_mmap_info *ip, **pp;
+       struct ipath_mmap_info *ip, *pp;
        int ret = -EINVAL;
 
        /*
         * CQ, QP, or SRQ is soon followed by a call to mmap().
         */
        spin_lock_irq(&dev->pending_lock);
-       for (pp = &dev->pending_mmaps; (ip = *pp); pp = &ip->next) {
+       list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
+                                pending_mmaps) {
                /* Only the creator is allowed to mmap the object */
-               if (context != ip->context || (void *) offset != ip->obj)
+               if (context != ip->context || (__u64) offset != ip->offset)
                        continue;
                /* Don't allow a mmap larger than the object. */
                if (size > ip->size)
                        break;
 
-               *pp = ip->next;
+               list_del_init(&ip->pending_mmaps);
                spin_unlock_irq(&dev->pending_lock);
 
                ret = remap_vmalloc_range(vma, ip->obj, 0);
 done:
        return ret;
 }
+
+/*
+ * Allocate information for ipath_mmap
+ */
+struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
+                                              u32 size,
+                                              struct ib_ucontext *context,
+                                              void *obj) {
+       struct ipath_mmap_info *ip;
+
+       ip = kmalloc(sizeof *ip, GFP_KERNEL);
+       if (!ip)
+               goto bail;
+
+       size = PAGE_ALIGN(size);
+
+       spin_lock_irq(&dev->mmap_offset_lock);
+       if (dev->mmap_offset == 0)
+               dev->mmap_offset = PAGE_SIZE;
+       ip->offset = dev->mmap_offset;
+       dev->mmap_offset += size;
+       spin_unlock_irq(&dev->mmap_offset_lock);
+
+       INIT_LIST_HEAD(&ip->pending_mmaps);
+       ip->size = size;
+       ip->context = context;
+       ip->obj = obj;
+       kref_init(&ip->ref);
+
+bail:
+       return ip;
+}
+
+void ipath_update_mmap_info(struct ipath_ibdev *dev,
+                           struct ipath_mmap_info *ip,
+                           u32 size, void *obj) {
+       size = PAGE_ALIGN(size);
+
+       spin_lock_irq(&dev->mmap_offset_lock);
+       if (dev->mmap_offset == 0)
+               dev->mmap_offset = PAGE_SIZE;
+       ip->offset = dev->mmap_offset;
+       dev->mmap_offset += size;
+       spin_unlock_irq(&dev->mmap_offset_lock);
+
+       ip->size = size;
+       ip->obj = obj;
+}
 
         * See ipath_mmap() for details.
         */
        if (udata && udata->outlen >= sizeof(__u64)) {
-               struct ipath_mmap_info *ip;
-               __u64 offset = (__u64) qp->r_rq.wq;
                int err;
 
-               err = ib_copy_to_udata(udata, &offset, sizeof(offset));
-               if (err) {
-                       ret = ERR_PTR(err);
-                       goto bail_rwq;
-               }
+               if (!qp->r_rq.wq) {
+                       __u64 offset = 0;
 
-               if (qp->r_rq.wq) {
-                       /* Allocate info for ipath_mmap(). */
-                       ip = kmalloc(sizeof(*ip), GFP_KERNEL);
-                       if (!ip) {
+                       err = ib_copy_to_udata(udata, &offset,
+                                              sizeof(offset));
+                       if (err) {
+                               ret = ERR_PTR(err);
+                               goto bail_rwq;
+                       }
+               } else {
+                       u32 s = sizeof(struct ipath_rwq) +
+                               qp->r_rq.size * sz;
+
+                       qp->ip =
+                           ipath_create_mmap_info(dev, s,
+                                                  ibpd->uobject->context,
+                                                  qp->r_rq.wq);
+                       if (!qp->ip) {
                                ret = ERR_PTR(-ENOMEM);
                                goto bail_rwq;
                        }
-                       qp->ip = ip;
-                       ip->context = ibpd->uobject->context;
-                       ip->obj = qp->r_rq.wq;
-                       kref_init(&ip->ref);
-                       ip->mmap_cnt = 0;
-                       ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
-                                             qp->r_rq.size * sz);
-                       spin_lock_irq(&dev->pending_lock);
-                       ip->next = dev->pending_mmaps;
-                       dev->pending_mmaps = ip;
-                       spin_unlock_irq(&dev->pending_lock);
+
+                       err = ib_copy_to_udata(udata, &(qp->ip->offset),
+                                              sizeof(qp->ip->offset));
+                       if (err) {
+                               ret = ERR_PTR(err);
+                               goto bail_ip;
+                       }
                }
        }
 
        dev->n_qps_allocated++;
        spin_unlock(&dev->n_qps_lock);
 
+       if (qp->ip) {
+               spin_lock_irq(&dev->pending_lock);
+               list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
+               spin_unlock_irq(&dev->pending_lock);
+       }
+
        ret = &qp->ibqp;
        goto bail;
 
 
         * See ipath_mmap() for details.
         */
        if (udata && udata->outlen >= sizeof(__u64)) {
-               struct ipath_mmap_info *ip;
-               __u64 offset = (__u64) srq->rq.wq;
                int err;
+               u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz;
 
-               err = ib_copy_to_udata(udata, &offset, sizeof(offset));
-               if (err) {
-                       ret = ERR_PTR(err);
+               srq->ip =
+                   ipath_create_mmap_info(dev, s,
+                                          ibpd->uobject->context,
+                                          srq->rq.wq);
+               if (!srq->ip) {
+                       ret = ERR_PTR(-ENOMEM);
                        goto bail_wq;
                }
 
-               /* Allocate info for ipath_mmap(). */
-               ip = kmalloc(sizeof(*ip), GFP_KERNEL);
-               if (!ip) {
-                       ret = ERR_PTR(-ENOMEM);
-                       goto bail_wq;
+               err = ib_copy_to_udata(udata, &srq->ip->offset,
+                                      sizeof(srq->ip->offset));
+               if (err) {
+                       ret = ERR_PTR(err);
+                       goto bail_ip;
                }
-               srq->ip = ip;
-               ip->context = ibpd->uobject->context;
-               ip->obj = srq->rq.wq;
-               kref_init(&ip->ref);
-               ip->mmap_cnt = 0;
-               ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
-                                     srq->rq.size * sz);
-               spin_lock_irq(&dev->pending_lock);
-               ip->next = dev->pending_mmaps;
-               dev->pending_mmaps = ip;
-               spin_unlock_irq(&dev->pending_lock);
        } else
                srq->ip = NULL;
 
        if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
                spin_unlock(&dev->n_srqs_lock);
                ret = ERR_PTR(-ENOMEM);
-               goto bail_wq;
+               goto bail_ip;
        }
 
        dev->n_srqs_allocated++;
        spin_unlock(&dev->n_srqs_lock);
 
+       if (srq->ip) {
+               spin_lock_irq(&dev->pending_lock);
+               list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
+               spin_unlock_irq(&dev->pending_lock);
+       }
+
        ret = &srq->ibsrq;
        goto done;
 
+bail_ip:
+       kfree(srq->ip);
 bail_wq:
        vfree(srq->rq.wq);
-
 bail_srq:
        kfree(srq);
-
 done:
        return ret;
 }
                if (srq->ip) {
                        struct ipath_mmap_info *ip = srq->ip;
                        struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
+                       u32 s = sizeof(struct ipath_rwq) + size * sz;
 
-                       ip->obj = wq;
-                       ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
-                                             size * sz);
+                       ipath_update_mmap_info(dev, ip, s, wq);
                        spin_lock_irq(&dev->pending_lock);
-                       ip->next = dev->pending_mmaps;
-                       dev->pending_mmaps = ip;
+                       if (list_empty(&ip->pending_mmaps))
+                               list_add(&ip->pending_mmaps,
+                                        &dev->pending_mmaps);
                        spin_unlock_irq(&dev->pending_lock);
                }
        } else if (attr_mask & IB_SRQ_LIMIT) {
 
                ret = -ENOMEM;
                goto err_lk;
        }
+       INIT_LIST_HEAD(&idev->pending_mmaps);
        spin_lock_init(&idev->pending_lock);
+       idev->mmap_offset = PAGE_SIZE;
+       spin_lock_init(&idev->mmap_offset_lock);
        INIT_LIST_HEAD(&idev->pending[0]);
        INIT_LIST_HEAD(&idev->pending[1]);
        INIT_LIST_HEAD(&idev->pending[2]);
 
  * this as its vm_private_data.
  */
 struct ipath_mmap_info {
-       struct ipath_mmap_info *next;
+       struct list_head pending_mmaps;
        struct ib_ucontext *context;
        void *obj;
+       __u64 offset;
        struct kref ref;
        unsigned size;
-       unsigned mmap_cnt;
 };
 
 /*
 
 struct ipath_ibdev {
        struct ib_device ibdev;
-       struct list_head dev_list;
        struct ipath_devdata *dd;
-       struct ipath_mmap_info *pending_mmaps;
+       struct list_head pending_mmaps;
+       spinlock_t mmap_offset_lock;
+       u32 mmap_offset;
        int ib_unit;            /* This is the device number */
        u16 sm_lid;             /* in host order */
        u8 sm_sl;
 
 void ipath_release_mmap_info(struct kref *ref);
 
+struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
+                                              u32 size,
+                                              struct ib_ucontext *context,
+                                              void *obj);
+
+void ipath_update_mmap_info(struct ipath_ibdev *dev,
+                           struct ipath_mmap_info *ip,
+                           u32 size, void *obj);
+
 int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
 
 void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev);