static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk,
                                     struct pollfd *fds)
 {
+       struct xsk_umem_info *umem = xsk->umem;
        u32 idx_cq = 0, idx_fq = 0;
        unsigned int rcvd;
        size_t ndescs;
                xsk->outstanding_tx;
 
        /* re-add completed Tx buffers */
-       rcvd = xsk_ring_cons__peek(&xsk->umem->cq, ndescs, &idx_cq);
+       rcvd = xsk_ring_cons__peek(&umem->cq, ndescs, &idx_cq);
        if (rcvd > 0) {
                unsigned int i;
                int ret;
 
-               ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
+               ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
                while (ret != rcvd) {
                        if (ret < 0)
                                exit_with_error(-ret);
-                       if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
+                       if (xsk_ring_prod__needs_wakeup(&umem->fq))
                                ret = poll(fds, num_socks, opt_timeout);
-                       ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd,
-                                                    &idx_fq);
+                       ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
                }
+
                for (i = 0; i < rcvd; i++)
-                       *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) =
-                               *xsk_ring_cons__comp_addr(&xsk->umem->cq,
-                                                         idx_cq++);
+                       *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) =
+                               *xsk_ring_cons__comp_addr(&umem->cq, idx_cq++);
 
                xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
                xsk_ring_cons__release(&xsk->umem->cq, rcvd);
        for (i = 0; i < rcvd; i++) {
                u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
                u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
+               u64 orig = xsk_umem__extract_addr(addr);
+
+               addr = xsk_umem__add_offset_to_addr(addr);
                char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
 
                hex_dump(pkt, len, addr);
-               *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = addr;
+               *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig;
        }
 
        xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
        for (i = 0; i < rcvd; i++) {
                u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
                u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
+               u64 orig = xsk_umem__extract_addr(addr);
+
+               addr = xsk_umem__add_offset_to_addr(addr);
                char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
 
                swap_mac_addresses(pkt);
 
                hex_dump(pkt, len, addr);
-               xsk_ring_prod__tx_desc(&xsk->tx, idx_tx)->addr = addr;
+               xsk_ring_prod__tx_desc(&xsk->tx, idx_tx)->addr = orig;
                xsk_ring_prod__tx_desc(&xsk->tx, idx_tx++)->len = len;
        }