* ticking in case the signal is deliverable next time.
                 */
                posix_cpu_timer_schedule(timer);
+               ++timer->it_requeue_pending;
        }
 }
 
                cpu_clock_sample(timer->it_clock, p, &now);
                bump_cpu_timer(timer, now);
                if (unlikely(p->exit_state))
-                       goto out;
+                       return;
 
                /* Protect timer list r/w in arm_timer() */
                sighand = lock_task_sighand(p, &flags);
                if (!sighand)
-                       goto out;
+                       return;
        } else {
                /*
                 * Protect arm_timer() and timer sampling in case of call to
                         * We can't even collect a sample any more.
                         */
                        timer->it.cpu.expires = 0;
-                       goto out;
+                       return;
                } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
-                       unlock_task_sighand(p, &flags);
-                       /* Optimizations: if the process is dying, no need to rearm */
-                       goto out;
+                       /* If the process is dying, no need to rearm */
+                       goto unlock;
                }
                cpu_timer_sample_group(timer->it_clock, p, &now);
                bump_cpu_timer(timer, now);
         */
        WARN_ON_ONCE(!irqs_disabled());
        arm_timer(timer);
+unlock:
        unlock_task_sighand(p, &flags);
-
-out:
-       timer->it_overrun_last = timer->it_overrun;
-       timer->it_overrun = -1;
-       ++timer->it_requeue_pending;
 }
 
 /**
 
        timr->it_overrun += (unsigned int) hrtimer_forward(timer,
                                                timer->base->get_time(),
                                                timr->it.real.interval);
-
-       timr->it_overrun_last = timr->it_overrun;
-       timr->it_overrun = -1;
-       ++timr->it_requeue_pending;
        hrtimer_restart(timer);
 }
 
        unsigned long flags;
 
        timr = lock_timer(info->si_tid, &flags);
+       if (!timr)
+               return;
 
-       if (timr && timr->it_requeue_pending == info->si_sys_private) {
+       if (timr->it_requeue_pending == info->si_sys_private) {
                if (timr->it_clock < 0)
                        posix_cpu_timer_schedule(timr);
                else
                        schedule_next_timer(timr);
 
+               timr->it_overrun_last = timr->it_overrun;
+               timr->it_overrun = -1;
+               ++timr->it_requeue_pending;
+
                info->si_overrun += timr->it_overrun_last;
        }
 
-       if (timr)
-               unlock_timer(timr, flags);
+       unlock_timer(timr, flags);
 }
 
 int posix_timer_event(struct k_itimer *timr, int si_private)