#include <linux/bpf.h>
 #include <linux/filter.h>
 #include <linux/uaccess.h>
+#include <linux/ctype.h>
 #include "trace.h"
 
 static DEFINE_PER_CPU(int, bpf_prog_active);
        .ret_type       = RET_INTEGER,
 };
 
+/*
+ * limited trace_printk()
+ * only %d %u %x %ld %lu %lx %lld %llu %llx %p conversion specifiers allowed
+ */
+static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
+{
+       char *fmt = (char *) (long) r1;
+       int mod[3] = {};
+       int fmt_cnt = 0;
+       int i;
+
+       /*
+        * bpf_check()->check_func_arg()->check_stack_boundary()
+        * guarantees that fmt points to bpf program stack,
+        * fmt_size bytes of it were initialized and fmt_size > 0
+        */
+       if (fmt[--fmt_size] != 0)
+               return -EINVAL;
+
+       /* check format string for allowed specifiers */
+       for (i = 0; i < fmt_size; i++) {
+               if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
+                       return -EINVAL;
+
+               if (fmt[i] != '%')
+                       continue;
+
+               if (fmt_cnt >= 3)
+                       return -EINVAL;
+
+               /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
+               i++;
+               if (fmt[i] == 'l') {
+                       mod[fmt_cnt]++;
+                       i++;
+               } else if (fmt[i] == 'p') {
+                       mod[fmt_cnt]++;
+                       i++;
+                       if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
+                               return -EINVAL;
+                       fmt_cnt++;
+                       continue;
+               }
+
+               if (fmt[i] == 'l') {
+                       mod[fmt_cnt]++;
+                       i++;
+               }
+
+               if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x')
+                       return -EINVAL;
+               fmt_cnt++;
+       }
+
+       return __trace_printk(1/* fake ip will not be printed */, fmt,
+                             mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3,
+                             mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4,
+                             mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5);
+}
+
+static const struct bpf_func_proto bpf_trace_printk_proto = {
+       .func           = bpf_trace_printk,
+       .gpl_only       = true,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_STACK,
+       .arg2_type      = ARG_CONST_STACK_SIZE,
+};
+
 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
 {
        switch (func_id) {
                return &bpf_probe_read_proto;
        case BPF_FUNC_ktime_get_ns:
                return &bpf_ktime_get_ns_proto;
+
+       case BPF_FUNC_trace_printk:
+               /*
+                * this program might be calling bpf_trace_printk,
+                * so allocate per-cpu printk buffers
+                */
+               trace_printk_init_buffers();
+
+               return &bpf_trace_printk_proto;
        default:
                return NULL;
        }