((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
        (R)->next_to_clean - (R)->next_to_use - 1)
 
-#define IXGBE_RX_DESC_ADV(R, i)            \
-       (&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
-#define IXGBE_TX_DESC_ADV(R, i)            \
-       (&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
-#define IXGBE_TX_CTXTDESC_ADV(R, i)        \
-       (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+#define IXGBEVF_RX_DESC(R, i)      \
+       (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
+#define IXGBEVF_TX_DESC(R, i)      \
+       (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
+#define IXGBEVF_TX_CTXTDESC(R, i)          \
+       (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
 
 #define IXGBE_MAX_JUMBO_FRAME_SIZE        16128
 
 
 
        i = tx_ring->next_to_clean;
        eop = tx_ring->tx_buffer_info[i].next_to_watch;
-       eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+       eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
 
        while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
               (count < tx_ring->count)) {
                        goto cont_loop;
                for ( ; !cleaned; count++) {
                        struct sk_buff *skb;
-                       tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+                       tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
                        tx_buffer_info = &tx_ring->tx_buffer_info[i];
                        cleaned = (i == eop);
                        skb = tx_buffer_info->skb;
 
 cont_loop:
                eop = tx_ring->tx_buffer_info[i].next_to_watch;
-               eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+               eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
        }
 
        tx_ring->next_to_clean = i;
        bi = &rx_ring->rx_buffer_info[i];
 
        while (cleaned_count--) {
-               rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+               rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
                skb = bi->skb;
                if (!skb) {
                        skb = netdev_alloc_skb(adapter->netdev,
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 
        i = rx_ring->next_to_clean;
-       rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+       rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
        rx_buffer_info = &rx_ring->rx_buffer_info[i];
 
                if (i == rx_ring->count)
                        i = 0;
 
-               next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+               next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
                prefetch(next_rxd);
                cleaned_count++;
 
                i = tx_ring->next_to_use;
 
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+               context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
 
                /* VLAN MACLEN IPLEN */
                if (tx_flags & IXGBE_TX_FLAGS_VLAN)
            (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
                i = tx_ring->next_to_use;
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+               context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
 
                if (tx_flags & IXGBE_TX_FLAGS_VLAN)
                        vlan_macip_lens |= (tx_flags &
        i = tx_ring->next_to_use;
        while (count--) {
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+               tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
                tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
                tx_desc->read.cmd_type_len =
                        cpu_to_le32(cmd_type_len | tx_buffer_info->length);