/* NOTE: Need to enable interrupts incase we schedule. */
        ssm     PSW_SM_I, %r0
 
-       /* Check for software interrupts */
-
-       .import irq_stat,data
-
-       load32  irq_stat,%r19
-#ifdef CONFIG_SMP
-       mfctl   %cr30,%r1
-       ldw     TI_CPU(%r1),%r1 /* get cpu # - int */
-       /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
-       ** irq_stat[] is defined using ____cacheline_aligned.
-       */
-       SHLREG  %r1,L1_CACHE_SHIFT,%r20
-       add     %r19,%r20,%r19  /* now have &irq_stat[smp_processor_id()] */
-#endif /* CONFIG_SMP */
-
 intr_check_resched:
 
        /* check for reschedule */
         */
        loadgp
 
-syscall_check_bh:
-
-       /* Check for software interrupts */
-
-       .import irq_stat,data
-
-       load32  irq_stat,%r19
-
-#ifdef CONFIG_SMP
-       /* sched.h: int processor */
-       /* %r26 is used as scratch register to index into irq_stat[] */
-       ldw     TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
-
-       /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
-       SHLREG  %r26,L1_CACHE_SHIFT,%r20
-       add     %r19,%r20,%r19  /* now have &irq_stat[smp_processor_id()] */
-#endif /* CONFIG_SMP */
-
 syscall_check_resched:
 
        /* check for reschedule */
 #else
        nop
 #endif
-       b       syscall_check_bh  /* if resched, we start over again */
+       b       syscall_check_resched   /* if resched, we start over again */
        nop
 ENDPROC(syscall_exit)