bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
 
        fp->bpf_func = (void *)ctx.target;
+       fp->jited = 1;
 out:
        kfree(ctx.offsets);
        return;
 
 void bpf_jit_free(struct sk_filter *fp)
 {
-       if (fp->bpf_func != sk_run_filter)
+       if (fp->jited)
                module_free(NULL, fp->bpf_func);
        kfree(fp);
 }
 
                ((u64 *)image)[0] = (u64)code_base;
                ((u64 *)image)[1] = local_paca->kernel_toc;
                fp->bpf_func = (void *)image;
+               fp->jited = 1;
        }
 out:
        kfree(addrs);
 
 void bpf_jit_free(struct sk_filter *fp)
 {
-       if (fp->bpf_func != sk_run_filter)
+       if (fp->jited)
                module_free(NULL, fp->bpf_func);
        kfree(fp);
 }
 
        if (jit.start) {
                set_memory_ro((unsigned long)header, header->pages);
                fp->bpf_func = (void *) jit.start;
+               fp->jited = 1;
        }
 out:
        kfree(addrs);
        unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
        struct bpf_binary_header *header = (void *)addr;
 
-       if (fp->bpf_func == sk_run_filter)
+       if (!fp->jited)
                goto free_filter;
+
        set_memory_rw(addr, header->pages);
        module_free(NULL, header);
+
 free_filter:
        kfree(fp);
 }
 
        if (image) {
                bpf_flush_icache(image, image + proglen);
                fp->bpf_func = (void *)image;
+               fp->jited = 1;
        }
 out:
        kfree(addrs);
 
 void bpf_jit_free(struct sk_filter *fp)
 {
-       if (fp->bpf_func != sk_run_filter)
+       if (fp->jited)
                module_free(NULL, fp->bpf_func);
        kfree(fp);
 }
 
                bpf_flush_icache(header, image + proglen);
                set_memory_ro((unsigned long)header, header->pages);
                fp->bpf_func = (void *)image;
+               fp->jited = 1;
        }
 out:
        kfree(addrs);
 
 void bpf_jit_free(struct sk_filter *fp)
 {
-       if (fp->bpf_func != sk_run_filter) {
+       if (fp->jited) {
                INIT_WORK(&fp->work, bpf_jit_free_deferred);
                schedule_work(&fp->work);
        } else {
 
 struct sk_filter
 {
        atomic_t                refcnt;
-       unsigned int            len;    /* Number of filter blocks */
+       u32                     jited:1,        /* Is our filter JIT'ed? */
+                               len:31;         /* Number of filter blocks */
        struct rcu_head         rcu;
        unsigned int            (*bpf_func)(const struct sk_buff *skb,
                                            const struct sock_filter *filter);
 
        int err;
 
        fp->bpf_func = sk_run_filter;
+       fp->jited = 0;
 
        err = sk_chk_filter(fp->insns, fp->len);
        if (err)