* License as published by the Free Software Foundation.
  */
 
+#include <endian.h>
 #include <asm/types.h>
 #include <linux/types.h>
 #include <stdint.h>
                "check skb->hash byte load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, hash)),
 #else
                "check skb->hash byte load not permitted 3",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, hash) + 3),
 #else
                "check skb->hash half load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, hash)),
 #else
                "check skb->hash half load not permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, hash) + 2),
 #else
                "check bpf_perf_event_data->sample_period byte load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct bpf_perf_event_data, sample_period)),
 #else
                "check bpf_perf_event_data->sample_period half load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct bpf_perf_event_data, sample_period)),
 #else
                "check bpf_perf_event_data->sample_period word load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct bpf_perf_event_data, sample_period)),
 #else
                "check skb->data half load not permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, data)),
 #else
                "check skb->tc_classid half load not permitted for lwt prog",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, tc_classid)),
 #else