IB_DEVICE_MEM_WINDOW);
 
        /* Allocate the qptr_array */
-       c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *));
+       c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *));
        if (!c2dev->qptr_array) {
                return -ENOMEM;
        }
 
-       /* Inialize the qptr_array */
-       memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *));
+       /* Initialize the qptr_array */
        c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
        c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
        c2dev->qptr_array[2] = (void *) &c2dev->aeq;
 
        queue->small_page = NULL;
 
        /* allocate queue page pointers */
-       queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
+       queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
        if (!queue->queue_pages) {
-               queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
+               queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
                if (!queue->queue_pages) {
                        ehca_gen_err("Couldn't allocate queue page list");
                        return 0;
                }
        }
-       memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
 
        /* allocate actual queue pages */
        if (is_small) {
 
                goto bail;
        }
 
-       dd = vmalloc(sizeof(*dd));
+       dd = vzalloc(sizeof(*dd));
        if (!dd) {
                dd = ERR_PTR(-ENOMEM);
                goto bail;
        }
-       memset(dd, 0, sizeof(*dd));
        dd->ipath_unit = -1;
 
        spin_lock_irqsave(&ipath_devs_lock, flags);
 
        }
 
        num_subports = uinfo->spu_subport_cnt;
-       pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports);
+       pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports);
        if (!pd->subport_uregbase) {
                ret = -ENOMEM;
                goto bail;
        /* Note: pd->port_rcvhdrq_size isn't initialized yet. */
        size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
                     sizeof(u32), PAGE_SIZE) * num_subports;
-       pd->subport_rcvhdr_base = vmalloc(size);
+       pd->subport_rcvhdr_base = vzalloc(size);
        if (!pd->subport_rcvhdr_base) {
                ret = -ENOMEM;
                goto bail_ureg;
        }
 
-       pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks *
+       pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks *
                                        pd->port_rcvegrbuf_size *
                                        num_subports);
        if (!pd->subport_rcvegrbuf) {
        pd->port_subport_id = uinfo->spu_subport_id;
        pd->active_slaves = 1;
        set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
-       memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports);
-       memset(pd->subport_rcvhdr_base, 0, size);
-       memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks *
-                                        pd->port_rcvegrbuf_size *
-                                        num_subports);
        goto bail;
 
 bail_rhdr:
 
        struct page **pages;
        dma_addr_t *addrs;
 
-       pages = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
+       pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
                        sizeof(struct page *));
        if (!pages) {
                ipath_dev_err(dd, "failed to allocate shadow page * "
                return;
        }
 
-       memset(pages, 0, dd->ipath_cfgports * dd->ipath_rcvtidcnt *
-              sizeof(struct page *));
-
        dd->ipath_pageshadow = pages;
        dd->ipath_physshadow = addrs;
 }
 
        struct page **pages;
        dma_addr_t *addrs;
 
-       pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
+       pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
        if (!pages) {
                qib_dev_err(dd, "failed to allocate shadow page * "
                            "array, no expected sends!\n");
                goto bail;
        }
 
-       addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
+       addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
        if (!addrs) {
                qib_dev_err(dd, "failed to allocate shadow dma handle "
                            "array, no expected sends!\n");
                goto bail_free;
        }
 
-       memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
-       memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
-
        dd->pageshadow = pages;
        dd->physshadow = addrs;
        return;
 
        int ret;
        int i;
 
-       rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
+       rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
        if (!rx->rx_ring) {
                printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
                       priv->ca->name, ipoib_recvq_size);
                return -ENOMEM;
        }
 
-       memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
-
        t = kmalloc(sizeof *t, GFP_KERNEL);
        if (!t) {
                ret = -ENOMEM;
        struct ipoib_dev_priv *priv = netdev_priv(p->dev);
        int ret;
 
-       p->tx_ring = vmalloc(ipoib_sendq_size * sizeof *p->tx_ring);
+       p->tx_ring = vzalloc(ipoib_sendq_size * sizeof *p->tx_ring);
        if (!p->tx_ring) {
                ipoib_warn(priv, "failed to allocate tx ring\n");
                ret = -ENOMEM;
                goto err_tx;
        }
-       memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
 
        p->qp = ipoib_cm_create_tx_qp(p->dev, p);
        if (IS_ERR(p->qp)) {
                return;
        }
 
-       priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
+       priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
        if (!priv->cm.srq_ring) {
                printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
                       priv->ca->name, ipoib_recvq_size);
                return;
        }
 
-       memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
 }
 
 int ipoib_cm_dev_init(struct net_device *dev)
 
                goto out;
        }
 
-       priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
+       priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
        if (!priv->tx_ring) {
                printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
                       ca->name, ipoib_sendq_size);
                goto out_rx_ring_cleanup;
        }
-       memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring);
 
        /* priv->tx_head, tx_tail & tx_outstanding are already 0 */