goto next_insn;
                }
 
+#ifdef CONFIG_X86_64
+               /* Implement bpf_get_smp_processor_id() inline. */
+               if (insn->imm == BPF_FUNC_get_smp_processor_id &&
+                   prog->jit_requested && bpf_jit_supports_percpu_insn()) {
+                       /* BPF_FUNC_get_smp_processor_id inlining is an
+                        * optimization, so if pcpu_hot.cpu_number is ever
+                        * changed in some incompatible and hard to support
+                        * way, it's fine to back out this inlining logic
+                        */
+                       insn_buf[0] = BPF_MOV32_IMM(BPF_REG_0, (u32)(unsigned long)&pcpu_hot.cpu_number);
+                       insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
+                       insn_buf[2] = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0);
+                       cnt = 3;
+
+                       new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+                       if (!new_prog)
+                               return -ENOMEM;
+
+                       delta    += cnt - 1;
+                       env->prog = prog = new_prog;
+                       insn      = new_prog->insnsi + i + delta;
+                       goto next_insn;
+               }
+#endif
                /* Implement bpf_get_func_arg inline. */
                if (prog_type == BPF_PROG_TYPE_TRACING &&
                    insn->imm == BPF_FUNC_get_func_arg) {