/* Actual code that puts the SoC in different idle states */
 static int at91_enter_idle(struct cpuidle_device *dev,
-                              struct cpuidle_state *state)
+                              int index)
 {
        struct timeval before, after;
        int idle_time;
 
        local_irq_disable();
        do_gettimeofday(&before);
-       if (state == &dev->states[0])
+       if (index == 0)
                /* Wait for interrupt state */
                cpu_do_idle();
-       else if (state == &dev->states[1]) {
+       else if (index == 1) {
                asm("b 1f; .align 5; 1:");
                asm("mcr p15, 0, r0, c7, c10, 4");      /* drain write buffer */
                saved_lpr = sdram_selfrefresh_enable();
        local_irq_enable();
        idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
                        (after.tv_usec - before.tv_usec);
-       return idle_time;
+
+       dev->last_residency = idle_time;
+       return index;
 }
 
 /* Initialize CPU idle by registering the idle states */
 
 
 /* Actual code that puts the SoC in different idle states */
 static int davinci_enter_idle(struct cpuidle_device *dev,
-                                               struct cpuidle_state *state)
+                                               int index)
 {
-       struct davinci_ops *ops = cpuidle_get_statedata(state);
+       struct davinci_ops *ops = cpuidle_get_statedata(&dev->states[index]);
        struct timeval before, after;
        int idle_time;
 
        local_irq_enable();
        idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
                        (after.tv_usec - before.tv_usec);
-       return idle_time;
+
+       dev->last_residency = idle_time;
+
+       return index;
 }
 
 static int __init davinci_cpuidle_probe(struct platform_device *pdev)
 
 #include <asm/proc-fns.h>
 
 static int exynos4_enter_idle(struct cpuidle_device *dev,
-                             struct cpuidle_state *state);
+                             int index);
 
 static struct cpuidle_state exynos4_cpuidle_set[] = {
        [0] = {
 };
 
 static int exynos4_enter_idle(struct cpuidle_device *dev,
-                             struct cpuidle_state *state)
+                             int index)
 {
        struct timeval before, after;
        int idle_time;
        idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
                    (after.tv_usec - before.tv_usec);
 
-       return idle_time;
+       dev->last_residency = idle_time;
+       return index;
 }
 
 static int __init exynos4_init_cpuidle(void)
 
 
 /* Actual code that puts the SoC in different idle states */
 static int kirkwood_enter_idle(struct cpuidle_device *dev,
-                              struct cpuidle_state *state)
+                              int index)
 {
        struct timeval before, after;
        int idle_time;
 
        local_irq_disable();
        do_gettimeofday(&before);
-       if (state == &dev->states[0])
+       if (index == 0)
                /* Wait for interrupt state */
                cpu_do_idle();
-       else if (state == &dev->states[1]) {
+       else if (index == 1) {
                /*
                 * Following write will put DDR in self refresh.
                 * Note that we have 256 cycles before DDR puts it
        local_irq_enable();
        idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
                        (after.tv_usec - before.tv_usec);
-       return idle_time;
+
+       /* Update last residency */
+       dev->last_residency = idle_time;
+
+       return index;
 }
 
 /* Initialize CPU idle by registering the idle states */
 
 /**
  * omap3_enter_idle - Programs OMAP3 to enter the specified state
  * @dev: cpuidle device
- * @state: The target state to be programmed
+ * @index: the index of state to be entered
  *
  * Called from the CPUidle framework to program the device to the
  * specified target state selected by the governor.
  */
 static int omap3_enter_idle(struct cpuidle_device *dev,
-                       struct cpuidle_state *state)
+                               int index)
 {
-       struct omap3_idle_statedata *cx = cpuidle_get_statedata(state);
+       struct omap3_idle_statedata *cx =
+                       cpuidle_get_statedata(&dev->states[index]);
        struct timespec ts_preidle, ts_postidle, ts_idle;
        u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
+       int idle_time;
 
        /* Used to keep track of the total time in idle */
        getnstimeofday(&ts_preidle);
                goto return_sleep_time;
 
        /* Deny idle for C1 */
-       if (state == &dev->states[0]) {
+       if (index == 0) {
                pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
                pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
        }
        omap_sram_idle();
 
        /* Re-allow idle for C1 */
-       if (state == &dev->states[0]) {
+       if (index == 0) {
                pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
                pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
        }
        local_irq_enable();
        local_fiq_enable();
 
-       return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC;
+       idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
+                                                               USEC_PER_SEC;
+
+       /* Update cpuidle counters */
+       dev->last_residency = idle_time;
+
+       return index;
 }
 
 /**
  * next_valid_state - Find next valid C-state
  * @dev: cpuidle device
- * @state: Currently selected C-state
+ * @index: Index of currently selected c-state
  *
- * If the current state is valid, it is returned back to the caller.
- * Else, this function searches for a lower c-state which is still
- * valid.
+ * If the state corresponding to index is valid, index is returned back
+ * to the caller. Else, this function searches for a lower c-state which is
+ * still valid (as defined in omap3_power_states[]) and returns its index.
  *
  * A state is valid if the 'valid' field is enabled and
  * if it satisfies the enable_off_mode condition.
  */
-static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
-                                             struct cpuidle_state *curr)
+static int next_valid_state(struct cpuidle_device *dev,
+                               int index)
 {
-       struct cpuidle_state *next = NULL;
+       struct cpuidle_state *curr = &dev->states[index];
        struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr);
        u32 mpu_deepest_state = PWRDM_POWER_RET;
        u32 core_deepest_state = PWRDM_POWER_RET;
+       int next_index = -1;
 
        if (enable_off_mode) {
                mpu_deepest_state = PWRDM_POWER_OFF;
        if ((cx->valid) &&
            (cx->mpu_state >= mpu_deepest_state) &&
            (cx->core_state >= core_deepest_state)) {
-               return curr;
+               return index;
        } else {
                int idx = OMAP3_NUM_STATES - 1;
 
                /* Reach the current state starting at highest C-state */
                for (; idx >= 0; idx--) {
                        if (&dev->states[idx] == curr) {
-                               next = &dev->states[idx];
+                               next_index = idx;
                                break;
                        }
                }
 
                /* Should never hit this condition */
-               WARN_ON(next == NULL);
+               WARN_ON(next_index == -1);
 
                /*
                 * Drop to next valid state.
                        if ((cx->valid) &&
                            (cx->mpu_state >= mpu_deepest_state) &&
                            (cx->core_state >= core_deepest_state)) {
-                               next = &dev->states[idx];
+                               next_index = idx;
                                break;
                        }
                }
                /*
                 * C1 is always valid.
-                * So, no need to check for 'next==NULL' outside this loop.
+                * So, no need to check for 'next_index == -1' outside
+                * this loop.
                 */
        }
 
-       return next;
+       return next_index;
 }
 
 /**
  * omap3_enter_idle_bm - Checks for any bus activity
  * @dev: cpuidle device
- * @state: The target state to be programmed
+ * @index: array index of target state to be programmed
  *
  * This function checks for any pending activity and then programs
  * the device to the specified or a safer state.
  */
 static int omap3_enter_idle_bm(struct cpuidle_device *dev,
-                              struct cpuidle_state *state)
+                              int index)
 {
-       struct cpuidle_state *new_state;
+       struct cpuidle_state *state = &dev->states[index];
+       int new_state_idx;
        u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state;
        struct omap3_idle_statedata *cx;
        int ret;
 
        if (!omap3_can_sleep()) {
-               new_state = dev->safe_state;
+               new_state_idx = dev->safe_state_index;
                goto select_state;
        }
 
         */
        cam_state = pwrdm_read_pwrst(cam_pd);
        if (cam_state == PWRDM_POWER_ON) {
-               new_state = dev->safe_state;
+               new_state_idx = dev->safe_state_index;
                goto select_state;
        }
 
        if (per_next_state != per_saved_state)
                pwrdm_set_next_pwrst(per_pd, per_next_state);
 
-       new_state = next_valid_state(dev, state);
+       new_state_idx = next_valid_state(dev, index);
 
 select_state:
-       dev->last_state = new_state;
-       ret = omap3_enter_idle(dev, new_state);
+       ret = omap3_enter_idle(dev, new_state_idx);
 
        /* Restore original PER state if it was modified */
        if (per_next_state != per_saved_state)
 
        cpuidle_register_driver(&omap3_idle_driver);
        dev = &per_cpu(omap3_idle_dev, smp_processor_id());
+       dev->safe_state_index = -1;
 
        /* C1 . MPU WFI + Core active */
        cx = _fill_cstate(dev, 0, "MPU ON + CORE ON");
        (&dev->states[0])->enter = omap3_enter_idle;
-       dev->safe_state = &dev->states[0];
+       dev->safe_state_index = 0;
        cx->valid = 1;  /* C1 is always valid */
        cx->mpu_state = PWRDM_POWER_ON;
        cx->core_state = PWRDM_POWER_ON;
 
 };
 
 static int cpuidle_sleep_enter(struct cpuidle_device *dev,
-                              struct cpuidle_state *state)
+                               int index)
 {
        unsigned long allowed_mode = arch_hwblk_sleep_mode();
        ktime_t before, after;
-       int requested_state = state - &dev->states[0];
+       int requested_state = index;
        int allowed_state;
        int k;
 
         */
        k = min_t(int, allowed_state, requested_state);
 
-       dev->last_state = &dev->states[k];
        before = ktime_get();
        sh_mobile_call_standby(cpuidle_mode[k]);
        after = ktime_get();
-       return ktime_to_ns(ktime_sub(after, before)) >> 10;
+
+       dev->last_residency = (int)ktime_to_ns(ktime_sub(after, before)) >> 10;
+
+       return k;
 }
 
 static struct cpuidle_device cpuidle_dev;
        state->flags |= CPUIDLE_FLAG_TIME_VALID;
        state->enter = cpuidle_sleep_enter;
 
-       dev->safe_state = state;
+       dev->safe_state_index = i-1;
 
        if (sh_mobile_sleep_supported & SUSP_SH_SF) {
                state = &dev->states[i++];
 
 /**
  * acpi_idle_enter_c1 - enters an ACPI C1 state-type
  * @dev: the target CPU
- * @state: the state data
+ * @index: index of target state
  *
  * This is equivalent to the HALT instruction.
  */
 static int acpi_idle_enter_c1(struct cpuidle_device *dev,
-                             struct cpuidle_state *state)
+                               int index)
 {
        ktime_t  kt1, kt2;
        s64 idle_time;
        struct acpi_processor *pr;
+       struct cpuidle_state *state = &dev->states[index];
        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
 
        pr = __this_cpu_read(processors);
+       dev->last_residency = 0;
 
        if (unlikely(!pr))
-               return 0;
+               return -EINVAL;
 
        local_irq_disable();
 
        if (acpi_idle_suspend) {
                local_irq_enable();
                cpu_relax();
-               return 0;
+               return -EINVAL;
        }
 
        lapic_timer_state_broadcast(pr, cx, 1);
        kt2 = ktime_get_real();
        idle_time =  ktime_to_us(ktime_sub(kt2, kt1));
 
+       /* Update device last_residency*/
+       dev->last_residency = (int)idle_time;
+
        local_irq_enable();
        cx->usage++;
        lapic_timer_state_broadcast(pr, cx, 0);
 
-       return idle_time;
+       return index;
 }
 
 /**
  * acpi_idle_enter_simple - enters an ACPI state without BM handling
  * @dev: the target CPU
- * @state: the state data
+ * @index: the index of suggested state
  */
 static int acpi_idle_enter_simple(struct cpuidle_device *dev,
-                                 struct cpuidle_state *state)
+                               int index)
 {
        struct acpi_processor *pr;
+       struct cpuidle_state *state = &dev->states[index];
        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
        ktime_t  kt1, kt2;
        s64 idle_time_ns;
        s64 idle_time;
 
        pr = __this_cpu_read(processors);
+       dev->last_residency = 0;
 
        if (unlikely(!pr))
-               return 0;
-
-       if (acpi_idle_suspend)
-               return(acpi_idle_enter_c1(dev, state));
+               return -EINVAL;
 
        local_irq_disable();
 
+       if (acpi_idle_suspend) {
+               local_irq_enable();
+               cpu_relax();
+               return -EINVAL;
+       }
+
+
        if (cx->entry_method != ACPI_CSTATE_FFH) {
                current_thread_info()->status &= ~TS_POLLING;
                /*
                if (unlikely(need_resched())) {
                        current_thread_info()->status |= TS_POLLING;
                        local_irq_enable();
-                       return 0;
+                       return -EINVAL;
                }
        }
 
        idle_time = idle_time_ns;
        do_div(idle_time, NSEC_PER_USEC);
 
+       /* Update device last_residency*/
+       dev->last_residency = (int)idle_time;
+
        /* Tell the scheduler how much we idled: */
        sched_clock_idle_wakeup_event(idle_time_ns);
 
 
        lapic_timer_state_broadcast(pr, cx, 0);
        cx->time += idle_time;
-       return idle_time;
+       return index;
 }
 
 static int c3_cpu_count;
 /**
  * acpi_idle_enter_bm - enters C3 with proper BM handling
  * @dev: the target CPU
- * @state: the state data
+ * @index: the index of suggested state
  *
  * If BM is detected, the deepest non-C3 idle state is entered instead.
  */
 static int acpi_idle_enter_bm(struct cpuidle_device *dev,
-                             struct cpuidle_state *state)
+                               int index)
 {
        struct acpi_processor *pr;
+       struct cpuidle_state *state = &dev->states[index];
        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
        ktime_t  kt1, kt2;
        s64 idle_time_ns;
 
 
        pr = __this_cpu_read(processors);
+       dev->last_residency = 0;
 
        if (unlikely(!pr))
-               return 0;
+               return -EINVAL;
 
-       if (acpi_idle_suspend)
-               return(acpi_idle_enter_c1(dev, state));
+
+       if (acpi_idle_suspend) {
+               cpu_relax();
+               return -EINVAL;
+       }
 
        if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
-               if (dev->safe_state) {
-                       dev->last_state = dev->safe_state;
-                       return dev->safe_state->enter(dev, dev->safe_state);
+               if (dev->safe_state_index >= 0) {
+                       return dev->states[dev->safe_state_index].enter(dev,
+                                               dev->safe_state_index);
                } else {
                        local_irq_disable();
                        acpi_safe_halt();
                        local_irq_enable();
-                       return 0;
+                       return -EINVAL;
                }
        }
 
                if (unlikely(need_resched())) {
                        current_thread_info()->status |= TS_POLLING;
                        local_irq_enable();
-                       return 0;
+                       return -EINVAL;
                }
        }
 
        idle_time = idle_time_ns;
        do_div(idle_time, NSEC_PER_USEC);
 
+       /* Update device last_residency*/
+       dev->last_residency = (int)idle_time;
+
        /* Tell the scheduler how much we idled: */
        sched_clock_idle_wakeup_event(idle_time_ns);
 
 
        lapic_timer_state_broadcast(pr, cx, 0);
        cx->time += idle_time;
-       return idle_time;
+       return index;
 }
 
 struct cpuidle_driver acpi_idle_driver = {
        }
 
        dev->cpu = pr->id;
+       dev->safe_state_index = -1;
        for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
                dev->states[i].name[0] = '\0';
                dev->states[i].desc[0] = '\0';
                                state->flags |= CPUIDLE_FLAG_TIME_VALID;
 
                        state->enter = acpi_idle_enter_c1;
-                       dev->safe_state = state;
+                       dev->safe_state_index = count;
                        break;
 
                        case ACPI_STATE_C2:
                        state->flags |= CPUIDLE_FLAG_TIME_VALID;
                        state->enter = acpi_idle_enter_simple;
-                       dev->safe_state = state;
+                       dev->safe_state_index = count;
                        break;
 
                        case ACPI_STATE_C3:
 
 {
        struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
        struct cpuidle_state *target_state;
-       int next_state;
+       int next_state, entered_state;
 
        if (off)
                return -ENODEV;
 
        target_state = &dev->states[next_state];
 
-       /* enter the state and update stats */
-       dev->last_state = target_state;
-
        trace_power_start(POWER_CSTATE, next_state, dev->cpu);
        trace_cpu_idle(next_state, dev->cpu);
 
-       dev->last_residency = target_state->enter(dev, target_state);
+       entered_state = target_state->enter(dev, next_state);
 
        trace_power_end(dev->cpu);
        trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
 
-       if (dev->last_state)
-               target_state = dev->last_state;
-
-       target_state->time += (unsigned long long)dev->last_residency;
-       target_state->usage++;
+       if (entered_state >= 0) {
+               /* Update cpuidle counters */
+               /* This can be moved to within driver enter routine
+                * but that results in multiple copies of same code.
+                */
+               dev->states[entered_state].time +=
+                               (unsigned long long)dev->last_residency;
+               dev->states[entered_state].usage++;
+       }
 
        /* give the governor an opportunity to reflect on the outcome */
        if (cpuidle_curr_governor->reflect)
-               cpuidle_curr_governor->reflect(dev);
+               cpuidle_curr_governor->reflect(dev, entered_state);
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
 
 #ifdef CONFIG_ARCH_HAS_CPU_RELAX
-static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
+static int poll_idle(struct cpuidle_device *dev, int index)
 {
        ktime_t t1, t2;
        s64 diff;
-       int ret;
 
        t1 = ktime_get();
        local_irq_enable();
        if (diff > INT_MAX)
                diff = INT_MAX;
 
-       ret = (int) diff;
-       return ret;
+       dev->last_residency = (int) diff;
+
+       return index;
 }
 
 static void poll_idle_init(struct cpuidle_device *dev)
                dev->states[i].time = 0;
        }
        dev->last_residency = 0;
-       dev->last_state = NULL;
 
        smp_wmb();
 
 
        return 0;
 }
 
+/**
+ * ladder_reflect - update the correct last_state_idx
+ * @dev: the CPU
+ * @index: the index of actual state entered
+ */
+static void ladder_reflect(struct cpuidle_device *dev, int index)
+{
+       struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
+       if (index > 0)
+               ldev->last_state_idx = index;
+}
+
 static struct cpuidle_governor ladder_governor = {
        .name =         "ladder",
        .rating =       10,
        .enable =       ladder_enable_device,
        .select =       ladder_select_state,
+       .reflect =      ladder_reflect,
        .owner =        THIS_MODULE,
 };
 
 
 /**
  * menu_reflect - records that data structures need update
  * @dev: the CPU
+ * @index: the index of actual entered state
  *
  * NOTE: it's important to be fast here because this operation will add to
  *       the overall exit latency.
  */
-static void menu_reflect(struct cpuidle_device *dev)
+static void menu_reflect(struct cpuidle_device *dev, int index)
 {
        struct menu_device *data = &__get_cpu_var(menu_devices);
-       data->needs_update = 1;
+       data->last_state_idx = index;
+       if (index >= 0)
+               data->needs_update = 1;
 }
 
 /**
 
 static unsigned int lapic_timer_reliable_states = (1 << 1);     /* Default to only C1 */
 
 static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
-static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
+static int intel_idle(struct cpuidle_device *dev, int index);
 
 static struct cpuidle_state *cpuidle_state_table;
 
 /**
  * intel_idle
  * @dev: cpuidle_device
- * @state: cpuidle state
+ * @index: index of cpuidle state
  *
  */
-static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
+static int intel_idle(struct cpuidle_device *dev, int index)
 {
        unsigned long ecx = 1; /* break on interrupt flag */
+       struct cpuidle_state *state = &dev->states[index];
        unsigned long eax = (unsigned long)cpuidle_get_statedata(state);
        unsigned int cstate;
        ktime_t kt_before, kt_after;
        if (!(lapic_timer_reliable_states & (1 << (cstate))))
                clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
 
-       return usec_delta;
+       /* Update cpuidle counters */
+       dev->last_residency = (int)usec_delta;
+
+       return index;
 }
 
 static void __setup_broadcast_timer(void *arg)
 
        unsigned long long      time; /* in US */
 
        int (*enter)    (struct cpuidle_device *dev,
-                        struct cpuidle_state *state);
+                       int index);
 };
 
 /* Idle State Flags */
        int                     state_count;
        struct cpuidle_state    states[CPUIDLE_STATE_MAX];
        struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
-       struct cpuidle_state    *last_state;
 
        struct list_head        device_list;
        struct kobject          kobj;
        struct completion       kobj_unregister;
        void                    *governor_data;
-       struct cpuidle_state    *safe_state;
+       int                     safe_state_index;
 
        int (*prepare)          (struct cpuidle_device *dev);
 };
        void (*disable)         (struct cpuidle_device *dev);
 
        int  (*select)          (struct cpuidle_device *dev);
-       void (*reflect)         (struct cpuidle_device *dev);
+       void (*reflect)         (struct cpuidle_device *dev, int index);
 
        struct module           *owner;
 };