void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
 {
-       atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
-       sk_filter_release(fp);
-}
+       u32 filter_size = sk_filter_size(fp->len);
 
-void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
-{
-       atomic_inc(&fp->refcnt);
-       atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
+       atomic_sub(filter_size, &sk->sk_omem_alloc);
+       sk_filter_release(fp);
 }
 
-static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
-                                             struct sock *sk,
-                                             unsigned int len)
+/* try to charge the socket memory if there is space available
+ * return true on success
+ */
+bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
 {
-       struct sk_filter *fp_new;
-
-       if (sk == NULL)
-               return krealloc(fp, len, GFP_KERNEL);
-
-       fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
-       if (fp_new) {
-               *fp_new = *fp;
-               /* As we're keeping orig_prog in fp_new along,
-                * we need to make sure we're not evicting it
-                * from the old fp.
-                */
-               fp->orig_prog = NULL;
-               sk_filter_uncharge(sk, fp);
+       u32 filter_size = sk_filter_size(fp->len);
+
+       /* same check as in sock_kmalloc() */
+       if (filter_size <= sysctl_optmem_max &&
+           atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
+               atomic_inc(&fp->refcnt);
+               atomic_add(filter_size, &sk->sk_omem_alloc);
+               return true;
        }
-
-       return fp_new;
+       return false;
 }
 
-static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
-                                            struct sock *sk)
+static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp)
 {
        struct sock_filter *old_prog;
        struct sk_filter *old_fp;
 
        /* Expand fp for appending the new filter representation. */
        old_fp = fp;
-       fp = __sk_migrate_realloc(old_fp, sk, sk_filter_size(new_len));
+       fp = krealloc(old_fp, sk_filter_size(new_len), GFP_KERNEL);
        if (!fp) {
                /* The old_fp is still around in case we couldn't
                 * allocate new memory, so uncharge on that one.
                /* 2nd sk_convert_filter() can fail only if it fails
                 * to allocate memory, remapping must succeed. Note,
                 * that at this time old_fp has already been released
-                * by __sk_migrate_realloc().
+                * by krealloc().
                 */
                goto out_err_free;
 
 out_err_free:
        kfree(old_prog);
 out_err:
-       /* Rollback filter setup. */
-       if (sk != NULL)
-               sk_filter_uncharge(sk, fp);
-       else
-               kfree(fp);
+       __sk_filter_release(fp);
        return ERR_PTR(err);
 }
 
-static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
-                                            struct sock *sk)
+static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp)
 {
        int err;
 
 
        err = sk_chk_filter(fp->insns, fp->len);
        if (err) {
-               if (sk != NULL)
-                       sk_filter_uncharge(sk, fp);
-               else
-                       kfree(fp);
+               __sk_filter_release(fp);
                return ERR_PTR(err);
        }
 
         * internal BPF translation for the optimized interpreter.
         */
        if (!fp->jited)
-               fp = __sk_migrate_filter(fp, sk);
+               fp = __sk_migrate_filter(fp);
 
        return fp;
 }
         */
        fp->orig_prog = NULL;
 
-       /* __sk_prepare_filter() already takes care of uncharging
+       /* __sk_prepare_filter() already takes care of freeing
         * memory in case something goes wrong.
         */
-       fp = __sk_prepare_filter(fp, NULL);
+       fp = __sk_prepare_filter(fp);
        if (IS_ERR(fp))
                return PTR_ERR(fp);
 
        if (fprog->filter == NULL)
                return -EINVAL;
 
-       fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
+       fp = kmalloc(sk_fsize, GFP_KERNEL);
        if (!fp)
                return -ENOMEM;
 
        if (copy_from_user(fp->insns, fprog->filter, fsize)) {
-               sock_kfree_s(sk, fp, sk_fsize);
+               kfree(fp);
                return -EFAULT;
        }
 
-       atomic_set(&fp->refcnt, 1);
        fp->len = fprog->len;
 
        err = sk_store_orig_filter(fp, fprog);
        if (err) {
-               sk_filter_uncharge(sk, fp);
+               kfree(fp);
                return -ENOMEM;
        }
 
-       /* __sk_prepare_filter() already takes care of uncharging
+       /* __sk_prepare_filter() already takes care of freeing
         * memory in case something goes wrong.
         */
-       fp = __sk_prepare_filter(fp, sk);
+       fp = __sk_prepare_filter(fp);
        if (IS_ERR(fp))
                return PTR_ERR(fp);
 
+       atomic_set(&fp->refcnt, 0);
+
+       if (!sk_filter_charge(sk, fp)) {
+               __sk_filter_release(fp);
+               return -ENOMEM;
+       }
+
        old_fp = rcu_dereference_protected(sk->sk_filter,
                                           sock_owned_by_user(sk));
        rcu_assign_pointer(sk->sk_filter, fp);
 
 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
 {
        struct sock *newsk;
+       bool is_charged = true;
 
        newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
        if (newsk != NULL) {
 
                filter = rcu_dereference_protected(newsk->sk_filter, 1);
                if (filter != NULL)
-                       sk_filter_charge(newsk, filter);
+                       /* though it's an empty new sock, the charging may fail
+                        * if sysctl_optmem_max was changed between creation of
+                        * original socket and cloning
+                        */
+                       is_charged = sk_filter_charge(newsk, filter);
 
-               if (unlikely(xfrm_sk_clone_policy(newsk))) {
+               if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) {
                        /* It is still raw copy of parent, so invalidate
                         * destructor and make plain sk_free() */
                        newsk->sk_destruct = NULL;