#define _COMPONENT              ACPI_PROCESSOR_COMPONENT
 ACPI_MODULE_NAME("processor_idle");
 
+#define ACPI_IDLE_STATE_START  (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
+
 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
 module_param(max_cstate, uint, 0000);
 static unsigned int nocst __read_mostly;
 
        if (cx->type != ACPI_STATE_C1) {
                if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
-                       index = CPUIDLE_DRIVER_STATE_START;
+                       index = ACPI_IDLE_STATE_START;
                        cx = per_cpu(acpi_cstate[index], dev->cpu);
                } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
                        if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
                                           struct cpuidle_device *dev)
 {
-       int i, count = CPUIDLE_DRIVER_STATE_START;
+       int i, count = ACPI_IDLE_STATE_START;
        struct acpi_processor_cx *cx;
 
        if (max_cstate == 0)
 
 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
 {
-       int i, count = CPUIDLE_DRIVER_STATE_START;
+       int i, count = ACPI_IDLE_STATE_START;
        struct acpi_processor_cx *cx;
        struct cpuidle_state *state;
        struct cpuidle_driver *drv = &acpi_idle_driver;
                return -EINVAL;
 
        drv->safe_state_index = -1;
-       for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
+       for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
                drv->states[i].name[0] = '\0';
                drv->states[i].desc[0] = '\0';
        }
 
        state->power_usage = -1;
        state->enter = poll_idle;
        state->disabled = false;
+       state->flags = CPUIDLE_FLAG_POLLING;
 }
 #else
 static void poll_idle_init(struct cpuidle_driver *drv) {}
 
        struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
        struct ladder_device_state *last_state;
        int last_residency, last_idx = ldev->last_state_idx;
+       int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
        int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
 
        /* Special case when user has set very strict latency requirement */
        }
 
        /* consider demotion */
-       if (last_idx > CPUIDLE_DRIVER_STATE_START &&
+       if (last_idx > first_idx &&
            (drv->states[last_idx].disabled ||
            dev->states_usage[last_idx].disable ||
            drv->states[last_idx].exit_latency > latency_req)) {
                int i;
 
-               for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
+               for (i = last_idx - 1; i > first_idx; i--) {
                        if (drv->states[i].exit_latency <= latency_req)
                                break;
                }
                return i;
        }
 
-       if (last_idx > CPUIDLE_DRIVER_STATE_START &&
+       if (last_idx > first_idx &&
            last_residency < last_state->threshold.demotion_time) {
                last_state->stats.demotion_count++;
                last_state->stats.promotion_count = 0;
                                struct cpuidle_device *dev)
 {
        int i;
+       int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
        struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
        struct ladder_device_state *lstate;
        struct cpuidle_state *state;
 
-       ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
+       ldev->last_state_idx = first_idx;
 
-       for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
+       for (i = first_idx; i < drv->state_count; i++) {
                state = &drv->states[i];
                lstate = &ldev->states[i];
 
 
                if (i < drv->state_count - 1)
                        lstate->threshold.promotion_time = state->exit_latency;
-               if (i > CPUIDLE_DRIVER_STATE_START)
+               if (i > first_idx)
                        lstate->threshold.demotion_time = state->exit_latency;
        }
 
 
        expected_interval = get_typical_interval(data);
        expected_interval = min(expected_interval, data->next_timer_us);
 
-       if (CPUIDLE_DRIVER_STATE_START > 0) {
-               struct cpuidle_state *s = &drv->states[CPUIDLE_DRIVER_STATE_START];
+       first_idx = 0;
+       if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) {
+               struct cpuidle_state *s = &drv->states[1];
                unsigned int polling_threshold;
 
                /*
                polling_threshold = max_t(unsigned int, 20, s->target_residency);
                if (data->next_timer_us > polling_threshold &&
                    latency_req > s->exit_latency && !s->disabled &&
-                   !dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable)
-                       first_idx = CPUIDLE_DRIVER_STATE_START;
-               else
-                       first_idx = CPUIDLE_DRIVER_STATE_START - 1;
-       } else {
-               first_idx = 0;
+                   !dev->states_usage[1].disable)
+                       first_idx = 1;
        }
 
        /*
 
 
 /* Idle State Flags */
 #define CPUIDLE_FLAG_NONE       (0x00)
+#define CPUIDLE_FLAG_POLLING   (0x01) /* polling state */
 #define CPUIDLE_FLAG_COUPLED   (0x02) /* state applies to multiple cpus */
 #define CPUIDLE_FLAG_TIMER_STOP (0x04)  /* timer is stopped on this state */
 
 {return 0;}
 #endif
 
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-#define CPUIDLE_DRIVER_STATE_START     1
-#else
-#define CPUIDLE_DRIVER_STATE_START     0
-#endif
-
 #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx)       \
 ({                                                             \
        int __ret;                                              \