#ifdef CONFIG_PAGE_POOL
 
-/* This is the number of tokens that the user can SO_DEVMEM_DONTNEED in
- * 1 syscall. The limit exists to limit the amount of memory the kernel
- * allocates to copy these tokens.
+/* This is the number of tokens and frags that the user can SO_DEVMEM_DONTNEED
+ * in 1 syscall. The limit exists to limit the amount of memory the kernel
+ * allocates to copy these tokens, and to prevent looping over the frags for
+ * too long.
  */
 #define MAX_DONTNEED_TOKENS 128
+#define MAX_DONTNEED_FRAGS 1024
 
 static noinline_for_stack int
 sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen)
 {
        unsigned int num_tokens, i, j, k, netmem_num = 0;
        struct dmabuf_token *tokens;
+       int ret = 0, num_frags = 0;
        netmem_ref netmems[16];
-       int ret = 0;
 
        if (!sk_is_tcp(sk))
                return -EBADF;
 
-       if (optlen % sizeof(struct dmabuf_token) ||
+       if (optlen % sizeof(*tokens) ||
            optlen > sizeof(*tokens) * MAX_DONTNEED_TOKENS)
                return -EINVAL;
 
-       tokens = kvmalloc_array(optlen, sizeof(*tokens), GFP_KERNEL);
+       num_tokens = optlen / sizeof(*tokens);
+       tokens = kvmalloc_array(num_tokens, sizeof(*tokens), GFP_KERNEL);
        if (!tokens)
                return -ENOMEM;
 
-       num_tokens = optlen / sizeof(struct dmabuf_token);
        if (copy_from_sockptr(tokens, optval, optlen)) {
                kvfree(tokens);
                return -EFAULT;
        xa_lock_bh(&sk->sk_user_frags);
        for (i = 0; i < num_tokens; i++) {
                for (j = 0; j < tokens[i].token_count; j++) {
+                       if (++num_frags > MAX_DONTNEED_FRAGS)
+                               goto frag_limit_reached;
+
                        netmem_ref netmem = (__force netmem_ref)__xa_erase(
                                &sk->sk_user_frags, tokens[i].token_start + j);
 
-                       if (netmem &&
-                           !WARN_ON_ONCE(!netmem_is_net_iov(netmem))) {
-                               netmems[netmem_num++] = netmem;
-                               if (netmem_num == ARRAY_SIZE(netmems)) {
-                                       xa_unlock_bh(&sk->sk_user_frags);
-                                       for (k = 0; k < netmem_num; k++)
-                                               WARN_ON_ONCE(!napi_pp_put_page(netmems[k]));
-                                       netmem_num = 0;
-                                       xa_lock_bh(&sk->sk_user_frags);
-                               }
-                               ret++;
+                       if (!netmem || WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
+                               continue;
+
+                       netmems[netmem_num++] = netmem;
+                       if (netmem_num == ARRAY_SIZE(netmems)) {
+                               xa_unlock_bh(&sk->sk_user_frags);
+                               for (k = 0; k < netmem_num; k++)
+                                       WARN_ON_ONCE(!napi_pp_put_page(netmems[k]));
+                               netmem_num = 0;
+                               xa_lock_bh(&sk->sk_user_frags);
                        }
+                       ret++;
                }
        }
 
+frag_limit_reached:
        xa_unlock_bh(&sk->sk_user_frags);
        for (k = 0; k < netmem_num; k++)
                WARN_ON_ONCE(!napi_pp_put_page(netmems[k]));