switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
        /* return 0 to bypass going to ULD for DDPed data */
-       case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
+       case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
                /* update length of DDPed data */
                ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
                rc = 0;
                break;
        /* unmap the sg list when FCPRSP is received */
-       case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
+       case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
                dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
                             ddp->sgc, DMA_FROM_DEVICE);
                ddp->err = ddp_err;
                ddp->sgc = 0;
                /* fall through */
        /* if DDP length is present pass it through to ULD */
-       case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
+       case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
                /* update length of DDPed data */
                ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
                if (ddp->len)
                        rc = ddp->len;
                break;
        /* no match will return as an error */
-       case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
+       case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
        default:
                break;
        }
 
        hdr.network += ETH_HLEN;
 
        /* handle any vlan tag if present */
-       if (protocol == __constant_htons(ETH_P_8021Q)) {
+       if (protocol == htons(ETH_P_8021Q)) {
                if ((hdr.network - data) > (max_len - VLAN_HLEN))
                        return max_len;
 
        }
 
        /* handle L3 protocols */
-       if (protocol == __constant_htons(ETH_P_IP)) {
+       if (protocol == htons(ETH_P_IP)) {
                if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
                        return max_len;
 
                /* record next protocol if header is present */
                if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
                        nexthdr = hdr.ipv4->protocol;
-       } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+       } else if (protocol == htons(ETH_P_IPV6)) {
                if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
                        return max_len;
 
                nexthdr = hdr.ipv6->nexthdr;
                hlen = sizeof(struct ipv6hdr);
 #ifdef IXGBE_FCOE
-       } else if (protocol == __constant_htons(ETH_P_FCOE)) {
+       } else if (protocol == htons(ETH_P_FCOE)) {
                if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
                        return max_len;
                hlen = FCOE_HEADER_LEN;
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
        type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
 
-       if (first->protocol == __constant_htons(ETH_P_IP)) {
+       if (first->protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
        } else {
                u8 l4_hdr = 0;
                switch (first->protocol) {
-               case __constant_htons(ETH_P_IP):
+               case htons(ETH_P_IP):
                        vlan_macip_lens |= skb_network_header_len(skb);
                        type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
                        l4_hdr = ip_hdr(skb)->protocol;
                        break;
-               case __constant_htons(ETH_P_IPV6):
+               case htons(ETH_P_IPV6):
                        vlan_macip_lens |= skb_network_header_len(skb);
                        l4_hdr = ipv6_hdr(skb)->nexthdr;
                        break;
        hdr.network = skb_network_header(first->skb);
 
        /* Currently only IPv4/IPv6 with TCP is supported */
-       if ((first->protocol != __constant_htons(ETH_P_IPV6) ||
+       if ((first->protocol != htons(ETH_P_IPV6) ||
             hdr.ipv6->nexthdr != IPPROTO_TCP) &&
-           (first->protocol != __constant_htons(ETH_P_IP) ||
+           (first->protocol != htons(ETH_P_IP) ||
             hdr.ipv4->protocol != IPPROTO_TCP))
                return;
 
         * and write the value to source port portion of compressed dword
         */
        if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
-               common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
+               common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
        else
                common.port.src ^= th->dest ^ first->protocol;
        common.port.dst ^= th->source;
 
-       if (first->protocol == __constant_htons(ETH_P_IP)) {
+       if (first->protocol == htons(ETH_P_IP)) {
                input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
                common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
        } else {
         * or FIP and we have FCoE enabled on the adapter
         */
        switch (vlan_get_protocol(skb)) {
-       case __constant_htons(ETH_P_FCOE):
-       case __constant_htons(ETH_P_FIP):
+       case htons(ETH_P_FCOE):
+       case htons(ETH_P_FIP):
                adapter = netdev_priv(dev);
 
                if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
                tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
        /* else if it is a SW VLAN check the next protocol and store the tag */
-       } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+       } else if (protocol == htons(ETH_P_8021Q)) {
                struct vlan_hdr *vhdr, _vhdr;
                vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
                if (!vhdr)
 
 #ifdef IXGBE_FCOE
        /* setup tx offload for FCoE */
-       if ((protocol == __constant_htons(ETH_P_FCOE)) &&
+       if ((protocol == htons(ETH_P_FCOE)) &&
            (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
                tso = ixgbe_fso(tx_ring, first, &hdr_len);
                if (tso < 0)