#include <linux/kcov.h>
 #include <asm/setup.h>
 
+/* Number of 64-bit words written per one comparison: */
+#define KCOV_WORDS_PER_CMP 4
+
 /*
  * kcov descriptor (one per opened debugfs file).
  * State transitions of the descriptor:
  *  - initial state after open()
  *  - then there must be a single ioctl(KCOV_INIT_TRACE) call
  *  - then, mmap() call (several calls are allowed but not useful)
- *  - then, repeated enable/disable for a task (only one task a time allowed)
+ *  - then, ioctl(KCOV_ENABLE, arg), where arg is
+ *     KCOV_TRACE_PC - to trace only the PCs
+ *     or
+ *     KCOV_TRACE_CMP - to trace only the comparison operands
+ *  - then, ioctl(KCOV_DISABLE) to disable the task.
+ * Enabling/disabling ioctls can be repeated (only one task a time allowed).
  */
 struct kcov {
        /*
        struct task_struct      *t;
 };
 
-/*
- * Entry point from instrumented code.
- * This is called once per basic-block/edge.
- */
-void notrace __sanitizer_cov_trace_pc(void)
+static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
 {
-       struct task_struct *t;
        enum kcov_mode mode;
 
-       t = current;
        /*
         * We are interested in code coverage as a function of a syscall inputs,
         * so we ignore code executed in interrupts.
         */
        if (!in_task())
-               return;
+               return false;
        mode = READ_ONCE(t->kcov_mode);
-       if (mode == KCOV_MODE_TRACE) {
-               unsigned long *area;
-               unsigned long pos;
-               unsigned long ip = _RET_IP_;
+       /*
+        * There is some code that runs in interrupts but for which
+        * in_interrupt() returns false (e.g. preempt_schedule_irq()).
+        * READ_ONCE()/barrier() effectively provides load-acquire wrt
+        * interrupts, there are paired barrier()/WRITE_ONCE() in
+        * kcov_ioctl_locked().
+        */
+       barrier();
+       return mode == needed_mode;
+}
 
+static unsigned long canonicalize_ip(unsigned long ip)
+{
 #ifdef CONFIG_RANDOMIZE_BASE
-               ip -= kaslr_offset();
+       ip -= kaslr_offset();
 #endif
+       return ip;
+}
 
-               /*
-                * There is some code that runs in interrupts but for which
-                * in_interrupt() returns false (e.g. preempt_schedule_irq()).
-                * READ_ONCE()/barrier() effectively provides load-acquire wrt
-                * interrupts, there are paired barrier()/WRITE_ONCE() in
-                * kcov_ioctl_locked().
-                */
-               barrier();
-               area = t->kcov_area;
-               /* The first word is number of subsequent PCs. */
-               pos = READ_ONCE(area[0]) + 1;
-               if (likely(pos < t->kcov_size)) {
-                       area[pos] = ip;
-                       WRITE_ONCE(area[0], pos);
-               }
+/*
+ * Entry point from instrumented code.
+ * This is called once per basic-block/edge.
+ */
+void notrace __sanitizer_cov_trace_pc(void)
+{
+       struct task_struct *t;
+       unsigned long *area;
+       unsigned long ip = canonicalize_ip(_RET_IP_);
+       unsigned long pos;
+
+       t = current;
+       if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
+               return;
+
+       area = t->kcov_area;
+       /* The first 64-bit word is the number of subsequent PCs. */
+       pos = READ_ONCE(area[0]) + 1;
+       if (likely(pos < t->kcov_size)) {
+               area[pos] = ip;
+               WRITE_ONCE(area[0], pos);
        }
 }
 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
 
+#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
+static void write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
+{
+       struct task_struct *t;
+       u64 *area;
+       u64 count, start_index, end_pos, max_pos;
+
+       t = current;
+       if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
+               return;
+
+       ip = canonicalize_ip(ip);
+
+       /*
+        * We write all comparison arguments and types as u64.
+        * The buffer was allocated for t->kcov_size unsigned longs.
+        */
+       area = (u64 *)t->kcov_area;
+       max_pos = t->kcov_size * sizeof(unsigned long);
+
+       count = READ_ONCE(area[0]);
+
+       /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
+       start_index = 1 + count * KCOV_WORDS_PER_CMP;
+       end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
+       if (likely(end_pos <= max_pos)) {
+               area[start_index] = type;
+               area[start_index + 1] = arg1;
+               area[start_index + 2] = arg2;
+               area[start_index + 3] = ip;
+               WRITE_ONCE(area[0], count + 1);
+       }
+}
+
+void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
+{
+       write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
+
+void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
+{
+       write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
+
+void notrace __sanitizer_cov_trace_cmp4(u16 arg1, u16 arg2)
+{
+       write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
+
+void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
+{
+       write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
+
+void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
+{
+       write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
+                       _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
+
+void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
+{
+       write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
+                       _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
+
+void notrace __sanitizer_cov_trace_const_cmp4(u16 arg1, u16 arg2)
+{
+       write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
+                       _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
+
+void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
+{
+       write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
+                       _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
+
+void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
+{
+       u64 i;
+       u64 count = cases[0];
+       u64 size = cases[1];
+       u64 type = KCOV_CMP_CONST;
+
+       switch (size) {
+       case 8:
+               type |= KCOV_CMP_SIZE(0);
+               break;
+       case 16:
+               type |= KCOV_CMP_SIZE(1);
+               break;
+       case 32:
+               type |= KCOV_CMP_SIZE(2);
+               break;
+       case 64:
+               type |= KCOV_CMP_SIZE(3);
+               break;
+       default:
+               return;
+       }
+       for (i = 0; i < count; i++)
+               write_comp_data(type, cases[i + 2], val, _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
+#endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
+
 static void kcov_get(struct kcov *kcov)
 {
        atomic_inc(&kcov->refcount);
        /* Just to not leave dangling references behind. */
        kcov_task_init(t);
        kcov->t = NULL;
+       kcov->mode = KCOV_MODE_INIT;
        spin_unlock(&kcov->lock);
        kcov_put(kcov);
 }
 
        spin_lock(&kcov->lock);
        size = kcov->size * sizeof(unsigned long);
-       if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
+       if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
            vma->vm_end - vma->vm_start != size) {
                res = -EINVAL;
                goto exit;
        kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
        if (!kcov)
                return -ENOMEM;
+       kcov->mode = KCOV_MODE_DISABLED;
        atomic_set(&kcov->refcount, 1);
        spin_lock_init(&kcov->lock);
        filep->private_data = kcov;
                if (size < 2 || size > INT_MAX / sizeof(unsigned long))
                        return -EINVAL;
                kcov->size = size;
-               kcov->mode = KCOV_MODE_TRACE;
+               kcov->mode = KCOV_MODE_INIT;
                return 0;
        case KCOV_ENABLE:
                /*
                 * at task exit or voluntary by KCOV_DISABLE. After that it can
                 * be enabled for another task.
                 */
-               unused = arg;
-               if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
-                   kcov->area == NULL)
+               if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
                        return -EINVAL;
                if (kcov->t != NULL)
                        return -EBUSY;
+               if (arg == KCOV_TRACE_PC)
+                       kcov->mode = KCOV_MODE_TRACE_PC;
+               else if (arg == KCOV_TRACE_CMP)
+#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
+                       kcov->mode = KCOV_MODE_TRACE_CMP;
+#else
+               return -ENOTSUPP;
+#endif
+               else
+                       return -EINVAL;
                t = current;
                /* Cache in task struct for performance. */
                t->kcov_size = kcov->size;
                t->kcov_area = kcov->area;
-               /* See comment in __sanitizer_cov_trace_pc(). */
+               /* See comment in check_kcov_mode(). */
                barrier();
                WRITE_ONCE(t->kcov_mode, kcov->mode);
                t->kcov = kcov;
                        return -EINVAL;
                kcov_task_init(t);
                kcov->t = NULL;
+               kcov->mode = KCOV_MODE_INIT;
                kcov_put(kcov);
                return 0;
        default: