EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
                        break;
 
+               case BPF_ALU | BPF_LSH | BPF_X:
+               case BPF_ALU | BPF_RSH | BPF_X:
+               case BPF_ALU | BPF_ARSH | BPF_X:
+               case BPF_ALU64 | BPF_LSH | BPF_X:
+               case BPF_ALU64 | BPF_RSH | BPF_X:
+               case BPF_ALU64 | BPF_ARSH | BPF_X:
+
+                       /* check for bad case when dst_reg == rcx */
+                       if (dst_reg == BPF_REG_4) {
+                               /* mov r11, dst_reg */
+                               EMIT_mov(AUX_REG, dst_reg);
+                               dst_reg = AUX_REG;
+                       }
+
+                       if (src_reg != BPF_REG_4) { /* common case */
+                               EMIT1(0x51); /* push rcx */
+
+                               /* mov rcx, src_reg */
+                               EMIT_mov(BPF_REG_4, src_reg);
+                       }
+
+                       /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
+                       if (BPF_CLASS(insn->code) == BPF_ALU64)
+                               EMIT1(add_1mod(0x48, dst_reg));
+                       else if (is_ereg(dst_reg))
+                               EMIT1(add_1mod(0x40, dst_reg));
+
+                       switch (BPF_OP(insn->code)) {
+                       case BPF_LSH: b3 = 0xE0; break;
+                       case BPF_RSH: b3 = 0xE8; break;
+                       case BPF_ARSH: b3 = 0xF8; break;
+                       }
+                       EMIT2(0xD3, add_1reg(b3, dst_reg));
+
+                       if (src_reg != BPF_REG_4)
+                               EMIT1(0x59); /* pop rcx */
+
+                       if (insn->dst_reg == BPF_REG_4)
+                               /* mov dst_reg, r11 */
+                               EMIT_mov(insn->dst_reg, AUX_REG);
+                       break;
+
                case BPF_ALU | BPF_END | BPF_FROM_BE:
                        switch (imm32) {
                        case 16:
 
                { },
                { { 0, -1 } }
        },
+       {
+               "INT: shifts by register",
+               .u.insns_int = {
+                       BPF_MOV64_IMM(R0, -1234),
+                       BPF_MOV64_IMM(R1, 1),
+                       BPF_ALU32_REG(BPF_RSH, R0, R1),
+                       BPF_JMP_IMM(BPF_JEQ, R0, 0x7ffffd97, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(R2, 1),
+                       BPF_ALU64_REG(BPF_LSH, R0, R2),
+                       BPF_MOV32_IMM(R4, -1234),
+                       BPF_JMP_REG(BPF_JEQ, R0, R4, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_AND, R4, 63),
+                       BPF_ALU64_REG(BPF_LSH, R0, R4), /* R0 <= 46 */
+                       BPF_MOV64_IMM(R3, 47),
+                       BPF_ALU64_REG(BPF_ARSH, R0, R3),
+                       BPF_JMP_IMM(BPF_JEQ, R0, -617, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(R2, 1),
+                       BPF_ALU64_REG(BPF_LSH, R4, R2), /* R4 = 46 << 1 */
+                       BPF_JMP_IMM(BPF_JEQ, R4, 92, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(R4, 4),
+                       BPF_ALU64_REG(BPF_LSH, R4, R4), /* R4 = 4 << 4 */
+                       BPF_JMP_IMM(BPF_JEQ, R4, 64, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(R4, 5),
+                       BPF_ALU32_REG(BPF_LSH, R4, R4), /* R4 = 5 << 5 */
+                       BPF_JMP_IMM(BPF_JEQ, R4, 160, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(R0, -1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, -1 } }
+       },
        {
                "INT: DIV + ABS",
                .u.insns_int = {