/* initialize slab caches for managed objects */
        err = rxe_cache_init();
        if (err) {
-               pr_err("rxe: unable to init object pools\n");
+               pr_err("unable to init object pools\n");
                return err;
        }
 
-       err = rxe_net_ipv4_init();
-       if (err) {
-               pr_err("rxe: unable to init ipv4 tunnel\n");
-               rxe_cache_exit();
-               goto exit;
-       }
-
-       err = rxe_net_ipv6_init();
-       if (err) {
-               pr_err("rxe: unable to init ipv6 tunnel\n");
-               rxe_cache_exit();
-               goto exit;
-       }
-
-       err = register_netdevice_notifier(&rxe_net_notifier);
-       if (err) {
-               pr_err("rxe: Failed to rigister netdev notifier\n");
-               goto exit;
-       }
-
-       pr_info("rxe: loaded\n");
+       err = rxe_net_init();
+       if (err)
+               return err;
 
+       pr_info("loaded\n");
        return 0;
-
-exit:
-       rxe_release_udp_tunnel(recv_sockets.sk4);
-       rxe_release_udp_tunnel(recv_sockets.sk6);
-       return err;
 }
 
 static void __exit rxe_module_exit(void)
        rxe_net_exit();
        rxe_cache_exit();
 
-       pr_info("rxe: unloaded\n");
+       pr_info("unloaded\n");
 }
 
 late_initcall(rxe_module_init);
 
 #ifndef RXE_H
 #define RXE_H
 
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/crc32.h>
 
        struct rxe_port *port;
 
        if (attr->port_num != 1) {
-               pr_info("rxe: invalid port_num = %d\n", attr->port_num);
+               pr_info("invalid port_num = %d\n", attr->port_num);
                return -EINVAL;
        }
 
 
        if (attr->ah_flags & IB_AH_GRH) {
                if (attr->grh.sgid_index > port->attr.gid_tbl_len) {
-                       pr_info("rxe: invalid sgid index = %d\n",
+                       pr_info("invalid sgid index = %d\n",
                                attr->grh.sgid_index);
                        return -EINVAL;
                }
 
        state = COMPST_GET_ACK;
 
        while (1) {
-               pr_debug("state = %s\n", comp_state_name[state]);
+               pr_debug("qp#%d state = %s\n", qp_num(qp),
+                        comp_state_name[state]);
                switch (state) {
                case COMPST_GET_ACK:
                        skb = skb_dequeue(&qp->resp_pkts);
                                        qp->comp.rnr_retry--;
 
                                qp->req.need_retry = 1;
-                               pr_debug("set rnr nak timer\n");
+                               pr_debug("qp#%d set rnr nak timer\n",
+                                        qp_num(qp));
                                mod_timer(&qp->rnr_nak_timer,
                                          jiffies + rnrnak_jiffies(aeth_syn(pkt)
                                                & ~AETH_TYPE_MASK));
 
 
        ret = remap_vmalloc_range(vma, ip->obj, 0);
        if (ret) {
-               pr_err("rxe: err %d from remap_vmalloc_range\n", ret);
+               pr_err("err %d from remap_vmalloc_range\n", ret);
                goto done;
        }
 
 
  */
 static u8 rxe_get_key(void)
 {
-       static unsigned key = 1;
+       static u32 key = 1;
 
        key = key << 1;
 
 
        return found;
 }
 
-struct rxe_dev *get_rxe_by_name(const char* name)
+struct rxe_dev *get_rxe_by_name(const char *name)
 {
        struct rxe_dev *rxe;
        struct rxe_dev *found = NULL;
        port->attr.phys_state = IB_PHYS_STATE_LINK_UP;
 
        rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
-       pr_info("rxe: set %s active\n", rxe->ib_dev.name);
-       return;
+       pr_info("set %s active\n", rxe->ib_dev.name);
 }
 
 /* Caller must hold net_info_lock */
        port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN;
 
        rxe_port_event(rxe, IB_EVENT_PORT_ERR);
-       pr_info("rxe: set %s down\n", rxe->ib_dev.name);
-       return;
+       pr_info("set %s down\n", rxe->ib_dev.name);
 }
 
 static int rxe_notify(struct notifier_block *not_blk,
                rxe_port_down(rxe);
                break;
        case NETDEV_CHANGEMTU:
-               pr_info("rxe: %s changed mtu to %d\n", ndev->name, ndev->mtu);
+               pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu);
                rxe_set_mtu(rxe, ndev->mtu);
                break;
        case NETDEV_REBOOT:
        case NETDEV_CHANGENAME:
        case NETDEV_FEAT_CHANGE:
        default:
-               pr_info("rxe: ignoring netdev event = %ld for %s\n",
+               pr_info("ignoring netdev event = %ld for %s\n",
                        event, ndev->name);
                break;
        }
                                htons(ROCE_V2_UDP_DPORT), false);
        if (IS_ERR(recv_sockets.sk4)) {
                recv_sockets.sk4 = NULL;
-               pr_err("rxe: Failed to create IPv4 UDP tunnel\n");
+               pr_err("Failed to create IPv4 UDP tunnel\n");
                return -1;
        }
 
                                                htons(ROCE_V2_UDP_DPORT), true);
        if (IS_ERR(recv_sockets.sk6)) {
                recv_sockets.sk6 = NULL;
-               pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
+               pr_err("Failed to create IPv6 UDP tunnel\n");
                return -1;
        }
 #endif
        rxe_release_udp_tunnel(recv_sockets.sk4);
        unregister_netdevice_notifier(&rxe_net_notifier);
 }
+
+int rxe_net_init(void)
+{
+       int err;
+
+       recv_sockets.sk6 = NULL;
+
+       err = rxe_net_ipv4_init();
+       if (err)
+               return err;
+       err = rxe_net_ipv6_init();
+       if (err)
+               goto err_out;
+       err = register_netdevice_notifier(&rxe_net_notifier);
+       if (err) {
+               pr_err("Failed to register netdev notifier\n");
+               goto err_out;
+       }
+       return 0;
+err_out:
+       rxe_net_exit();
+       return err;
+}
 
 
 struct rxe_dev *rxe_net_add(struct net_device *ndev);
 
-int rxe_net_ipv4_init(void);
-int rxe_net_ipv6_init(void);
+int rxe_net_init(void);
 void rxe_net_exit(void);
 
 #endif /* RXE_NET_H */
 
 
                wqe_size = rcv_wqe_size(qp->rq.max_sge);
 
-               pr_debug("max_wr = %d, max_sge = %d, wqe_size = %d\n",
-                        qp->rq.max_wr, qp->rq.max_sge, wqe_size);
+               pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
+                        qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
 
                qp->rq.queue = rxe_queue_init(rxe,
                                              &qp->rq.max_wr,
        if (mask & IB_QP_RETRY_CNT) {
                qp->attr.retry_cnt = attr->retry_cnt;
                qp->comp.retry_cnt = attr->retry_cnt;
-               pr_debug("set retry count = %d\n", attr->retry_cnt);
+               pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
+                        attr->retry_cnt);
        }
 
        if (mask & IB_QP_RNR_RETRY) {
                qp->attr.rnr_retry = attr->rnr_retry;
                qp->comp.rnr_retry = attr->rnr_retry;
-               pr_debug("set rnr retry count = %d\n", attr->rnr_retry);
+               pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
+                        attr->rnr_retry);
        }
 
        if (mask & IB_QP_RQ_PSN) {
                qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
                qp->resp.psn = qp->attr.rq_psn;
-               pr_debug("set resp psn = 0x%x\n", qp->resp.psn);
+               pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
+                        qp->resp.psn);
        }
 
        if (mask & IB_QP_MIN_RNR_TIMER) {
                qp->attr.min_rnr_timer = attr->min_rnr_timer;
-               pr_debug("set min rnr timer = 0x%x\n",
+               pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
                         attr->min_rnr_timer);
        }
 
                qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
                qp->req.psn = qp->attr.sq_psn;
                qp->comp.psn = qp->attr.sq_psn;
-               pr_debug("set req psn = 0x%x\n", qp->req.psn);
+               pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
        }
 
        if (mask & IB_QP_PATH_MIG_STATE)
 
                switch (attr->qp_state) {
                case IB_QPS_RESET:
-                       pr_debug("qp state -> RESET\n");
+                       pr_debug("qp#%d state -> RESET\n", qp_num(qp));
                        rxe_qp_reset(qp);
                        break;
 
                case IB_QPS_INIT:
-                       pr_debug("qp state -> INIT\n");
+                       pr_debug("qp#%d state -> INIT\n", qp_num(qp));
                        qp->req.state = QP_STATE_INIT;
                        qp->resp.state = QP_STATE_INIT;
                        break;
 
                case IB_QPS_RTR:
-                       pr_debug("qp state -> RTR\n");
+                       pr_debug("qp#%d state -> RTR\n", qp_num(qp));
                        qp->resp.state = QP_STATE_READY;
                        break;
 
                case IB_QPS_RTS:
-                       pr_debug("qp state -> RTS\n");
+                       pr_debug("qp#%d state -> RTS\n", qp_num(qp));
                        qp->req.state = QP_STATE_READY;
                        break;
 
                case IB_QPS_SQD:
-                       pr_debug("qp state -> SQD\n");
+                       pr_debug("qp#%d state -> SQD\n", qp_num(qp));
                        rxe_qp_drain(qp);
                        break;
 
                case IB_QPS_SQE:
-                       pr_warn("qp state -> SQE !!?\n");
+                       pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
                        /* Not possible from modify_qp. */
                        break;
 
                case IB_QPS_ERR:
-                       pr_debug("qp state -> ERR\n");
+                       pr_debug("qp#%d state -> ERR\n", qp_num(qp));
                        rxe_qp_error(qp);
                        break;
                }
 
        pack_icrc = be32_to_cpu(*icrcp);
 
        calc_icrc = rxe_icrc_hdr(pkt, skb);
-       calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt), payload_size(pkt));
+       calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt),
+                            payload_size(pkt));
        calc_icrc = cpu_to_be32(~calc_icrc);
        if (unlikely(calc_icrc != pack_icrc)) {
                char saddr[sizeof(struct in6_addr)];
 
 #include "rxe_queue.h"
 
 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
-                      unsigned opcode);
+                      u32 opcode);
 
 static inline void retry_first_write_send(struct rxe_qp *qp,
                                          struct rxe_send_wqe *wqe,
 {
        struct rxe_qp *qp = (struct rxe_qp *)data;
 
-       pr_debug("rnr nak timer fired\n");
+       pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
        rxe_run_task(&qp->req.task, 1);
 }
 
        return wqe;
 }
 
-static int next_opcode_rc(struct rxe_qp *qp, unsigned opcode, int fits)
+static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
 {
        switch (opcode) {
        case IB_WR_RDMA_WRITE:
        return -EINVAL;
 }
 
-static int next_opcode_uc(struct rxe_qp *qp, unsigned opcode, int fits)
+static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
 {
        switch (opcode) {
        case IB_WR_RDMA_WRITE:
 }
 
 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
-                      unsigned opcode)
+                      u32 opcode)
 {
        int fits = (wqe->dma.resid <= qp->mtu);
 
        struct rxe_pkt_info pkt;
        struct sk_buff *skb;
        struct rxe_send_wqe *wqe;
-       unsigned mask;
+       enum rxe_hdr_mask mask;
        int payload;
        int mtu;
        int opcode;
                        rmr = rxe_pool_get_index(&rxe->mr_pool,
                                                 wqe->wr.ex.invalidate_rkey >> 8);
                        if (!rmr) {
-                               pr_err("No mr for key %#x\n", wqe->wr.ex.invalidate_rkey);
+                               pr_err("No mr for key %#x\n",
+                                      wqe->wr.ex.invalidate_rkey);
                                wqe->state = wqe_state_error;
                                wqe->status = IB_WC_MW_BIND_ERR;
                                goto exit;
 
        skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
        if (unlikely(!skb)) {
-               pr_err("Failed allocating skb\n");
+               pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
                goto err;
        }
 
        if (fill_packet(qp, wqe, &pkt, skb, payload)) {
-               pr_debug("Error during fill packet\n");
+               pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
                goto err;
        }
 
 
        return state;
 }
 
+static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
+                                  struct rxe_pkt_info *pkt)
+{
+       struct sk_buff *skb = PKT_TO_SKB(pkt);
+
+       memset(hdr, 0, sizeof(*hdr));
+       if (skb->protocol == htons(ETH_P_IP))
+               memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
+       else if (skb->protocol == htons(ETH_P_IPV6))
+               memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
+}
+
 /* Executes a new request. A retried request never reach that function (send
  * and writes are discarded, and reads and atomics are retried elsewhere.
  */
                    qp_type(qp) == IB_QPT_SMI ||
                    qp_type(qp) == IB_QPT_GSI) {
                        union rdma_network_hdr hdr;
-                       struct sk_buff *skb = PKT_TO_SKB(pkt);
 
-                       memset(&hdr, 0, sizeof(hdr));
-                       if (skb->protocol == htons(ETH_P_IP))
-                               memcpy(&hdr.roce4grh, ip_hdr(skb), sizeof(hdr.roce4grh));
-                       else if (skb->protocol == htons(ETH_P_IPV6))
-                               memcpy(&hdr.ibgrh, ipv6_hdr(skb), sizeof(hdr.ibgrh));
+                       build_rdma_network_hdr(&hdr, pkt);
 
                        err = send_data_in(qp, &hdr, sizeof(hdr));
                        if (err)
                                rmr = rxe_pool_get_index(&rxe->mr_pool,
                                                         wc->ex.invalidate_rkey >> 8);
                                if (unlikely(!rmr)) {
-                                       pr_err("Bad rkey %#x invalidation\n", wc->ex.invalidate_rkey);
+                                       pr_err("Bad rkey %#x invalidation\n",
+                                              wc->ex.invalidate_rkey);
                                        return RESPST_ERROR;
                                }
                                rmr->state = RXE_MEM_STATE_FREE;
        }
 
        while (1) {
-               pr_debug("state = %s\n", resp_state_name[state]);
+               pr_debug("qp#%d state = %s\n", qp_num(qp),
+                        resp_state_name[state]);
                switch (state) {
                case RESPST_GET_REQ:
                        state = get_req(qp, &pkt);
 
 
        len = sanitize_arg(val, intf, sizeof(intf));
        if (!len) {
-               pr_err("rxe: add: invalid interface name\n");
+               pr_err("add: invalid interface name\n");
                err = -EINVAL;
                goto err;
        }
        }
 
        if (net_to_rxe(ndev)) {
-               pr_err("rxe: already configured on %s\n", intf);
+               pr_err("already configured on %s\n", intf);
                err = -EINVAL;
                goto err;
        }
 
        rxe = rxe_net_add(ndev);
        if (!rxe) {
-               pr_err("rxe: failed to add %s\n", intf);
+               pr_err("failed to add %s\n", intf);
                err = -EINVAL;
                goto err;
        }
 
        rxe_set_port_state(ndev);
-       pr_info("rxe: added %s to %s\n", rxe->ib_dev.name, intf);
+       pr_info("added %s to %s\n", rxe->ib_dev.name, intf);
 err:
        if (ndev)
                dev_put(ndev);
 
        len = sanitize_arg(val, intf, sizeof(intf));
        if (!len) {
-               pr_err("rxe: add: invalid interface name\n");
+               pr_err("add: invalid interface name\n");
                return -EINVAL;
        }
 
        rxe = get_rxe_by_name(intf);
 
        if (!rxe) {
-               pr_err("rxe: not configured on %s\n", intf);
+               pr_err("not configured on %s\n", intf);
                return -EINVAL;
        }
 
 
                rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
                speed = cmd.speed;
        } else {
-               pr_warn("%s speed is unknown, defaulting to 1000\n", rxe->ndev->name);
+               pr_warn("%s speed is unknown, defaulting to 1000\n",
+                       rxe->ndev->name);
                speed = 1000;
        }
-       rxe_eth_speed_to_ib_speed(speed, &attr->active_speed, &attr->active_width);
+       rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
+                                 &attr->active_width);
        mutex_unlock(&rxe->usdev_lock);
 
        return 0;
 }
 
 static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
-                        unsigned mask, u32 length)
+                        unsigned int mask, u32 length)
 {
        int err;
        struct rxe_sq *sq = &qp->sq;
        return 0;
 }
 
-static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
-                        unsigned int *sg_offset)
+static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+                        int sg_nents, unsigned int *sg_offset)
 {
        struct rxe_mem *mr = to_rmr(ibmr);
        int n;