static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info,
                                int nonblock)
 {
+       enum pid_type type;
        ssize_t ret;
        DECLARE_WAITQUEUE(wait, current);
 
        spin_lock_irq(¤t->sighand->siglock);
-       ret = dequeue_signal(current, &ctx->sigmask, info);
+       ret = dequeue_signal(current, &ctx->sigmask, info, &type);
        switch (ret) {
        case 0:
                if (!nonblock)
        add_wait_queue(¤t->sighand->signalfd_wqh, &wait);
        for (;;) {
                set_current_state(TASK_INTERRUPTIBLE);
-               ret = dequeue_signal(current, &ctx->sigmask, info);
+               ret = dequeue_signal(current, &ctx->sigmask, info, &type);
                if (ret != 0)
                        break;
                if (signal_pending(current)) {
 
 extern void flush_signals(struct task_struct *);
 extern void ignore_signals(struct task_struct *);
 extern void flush_signal_handlers(struct task_struct *, int force_default);
-extern int dequeue_signal(struct task_struct *task,
-                         sigset_t *mask, kernel_siginfo_t *info);
+extern int dequeue_signal(struct task_struct *task, sigset_t *mask,
+                         kernel_siginfo_t *info, enum pid_type *type);
 
 static inline int kernel_dequeue_signal(void)
 {
        struct task_struct *task = current;
        kernel_siginfo_t __info;
+       enum pid_type __type;
        int ret;
 
        spin_lock_irq(&task->sighand->siglock);
-       ret = dequeue_signal(task, &task->blocked, &__info);
+       ret = dequeue_signal(task, &task->blocked, &__info, &__type);
        spin_unlock_irq(&task->sighand->siglock);
 
        return ret;
 
  *
  * All callers have to hold the siglock.
  */
-int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
+int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
+                  kernel_siginfo_t *info, enum pid_type *type)
 {
        bool resched_timer = false;
        int signr;
        /* We only dequeue private signals from ourselves, we don't let
         * signalfd steal them
         */
+       *type = PIDTYPE_PID;
        signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
        if (!signr) {
+               *type = PIDTYPE_TGID;
                signr = __dequeue_signal(&tsk->signal->shared_pending,
                                         mask, info, &resched_timer);
 #ifdef CONFIG_POSIX_TIMERS
        freezable_schedule();
 }
 
-static int ptrace_signal(int signr, kernel_siginfo_t *info)
+static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
 {
        /*
         * We do not check sig_kernel_stop(signr) but set this marker
 
        /* If the (new) signal is now blocked, requeue it.  */
        if (sigismember(¤t->blocked, signr)) {
-               send_signal(signr, info, current, PIDTYPE_PID);
+               send_signal(signr, info, current, type);
                signr = 0;
        }
 
 
        for (;;) {
                struct k_sigaction *ka;
+               enum pid_type type;
 
                /* Has this task already been marked for death? */
                if (signal_group_exit(signal)) {
                 * so that the instruction pointer in the signal stack
                 * frame points to the faulting instruction.
                 */
+               type = PIDTYPE_PID;
                signr = dequeue_synchronous_signal(&ksig->info);
                if (!signr)
-                       signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
+                       signr = dequeue_signal(current, ¤t->blocked,
+                                              &ksig->info, &type);
 
                if (!signr)
                        break; /* will return 0 */
 
                if (unlikely(current->ptrace) && (signr != SIGKILL) &&
                    !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
-                       signr = ptrace_signal(signr, &ksig->info);
+                       signr = ptrace_signal(signr, &ksig->info, type);
                        if (!signr)
                                continue;
                }
        ktime_t *to = NULL, timeout = KTIME_MAX;
        struct task_struct *tsk = current;
        sigset_t mask = *which;
+       enum pid_type type;
        int sig, ret = 0;
 
        if (ts) {
        signotset(&mask);
 
        spin_lock_irq(&tsk->sighand->siglock);
-       sig = dequeue_signal(tsk, &mask, info);
+       sig = dequeue_signal(tsk, &mask, info, &type);
        if (!sig && timeout) {
                /*
                 * None ready, temporarily unblock those we're interested
                spin_lock_irq(&tsk->sighand->siglock);
                __set_task_blocked(tsk, &tsk->real_blocked);
                sigemptyset(&tsk->real_blocked);
-               sig = dequeue_signal(tsk, &mask, info);
+               sig = dequeue_signal(tsk, &mask, info, &type);
        }
        spin_unlock_irq(&tsk->sighand->siglock);