goto bad_fork_cleanup_io;
 
        if (pid != &init_struct_pid) {
-               retval = -ENOMEM;
                pid = alloc_pid(p->nsproxy->pid_ns_for_children);
-               if (!pid)
+               if (IS_ERR(pid)) {
+                       retval = PTR_ERR(pid);
                        goto bad_fork_cleanup_io;
+               }
        }
 
        p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
 
                        spin_unlock_irq(&pidmap_lock);
                        kfree(page);
                        if (unlikely(!map->page))
-                               break;
+                               return -ENOMEM;
                }
                if (likely(atomic_read(&map->nr_free))) {
                        for ( ; ; ) {
                }
                pid = mk_pid(pid_ns, map, offset);
        }
-       return -1;
+       return -EAGAIN;
 }
 
 int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
        int i, nr;
        struct pid_namespace *tmp;
        struct upid *upid;
+       int retval = -ENOMEM;
 
        pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
        if (!pid)
-               goto out;
+               return ERR_PTR(retval);
 
        tmp = ns;
        pid->level = ns->level;
        for (i = ns->level; i >= 0; i--) {
                nr = alloc_pidmap(tmp);
-               if (nr < 0)
+               if (IS_ERR_VALUE(nr)) {
+                       retval = nr;
                        goto out_free;
+               }
 
                pid->numbers[i].nr = nr;
                pid->numbers[i].ns = tmp;
        }
        spin_unlock_irq(&pidmap_lock);
 
-out:
        return pid;
 
 out_unlock:
                free_pidmap(pid->numbers + i);
 
        kmem_cache_free(ns->pid_cachep, pid);
-       pid = NULL;
-       goto out;
+       return ERR_PTR(retval);
 }
 
 void disable_pid_allocation(struct pid_namespace *ns)