}
 }
 
-static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
+static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
+                                  struct intel_power_domain_mask *mask)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
        struct drm_encoder *encoder;
        enum pipe pipe = crtc->pipe;
-       u64 mask;
+
+       bitmap_zero(mask->bits, POWER_DOMAIN_NUM);
 
        if (!crtc_state->hw.active)
-               return 0;
+               return;
 
-       mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
-       mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder));
+       set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits);
+       set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits);
        if (crtc_state->pch_pfit.enabled ||
            crtc_state->pch_pfit.force_thru)
-               mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
+               set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
 
        drm_for_each_encoder_mask(encoder, &dev_priv->drm,
                                  crtc_state->uapi.encoder_mask) {
                struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
 
-               mask |= BIT_ULL(intel_encoder->power_domain);
+               set_bit(intel_encoder->power_domain, mask->bits);
        }
 
        if (HAS_DDI(dev_priv) && crtc_state->has_audio)
-               mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
+               set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
 
        if (crtc_state->shared_dpll)
-               mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
+               set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
 
        if (crtc_state->dsc.compression_enable)
-               mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder));
-
-       return mask;
+               set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
 }
 
-static u64
-modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
+static void
+modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
+                              struct intel_power_domain_mask *old_domains)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum intel_display_power_domain domain;
-       u64 domains, new_domains, old_domains;
+       struct intel_power_domain_mask domains, new_domains;
 
-       domains = get_crtc_power_domains(crtc_state);
+       get_crtc_power_domains(crtc_state, &domains);
 
-       new_domains = domains & ~crtc->enabled_power_domains.mask;
-       old_domains = crtc->enabled_power_domains.mask & ~domains;
+       bitmap_andnot(new_domains.bits,
+                     domains.bits,
+                     crtc->enabled_power_domains.mask.bits,
+                     POWER_DOMAIN_NUM);
+       bitmap_andnot(old_domains->bits,
+                     crtc->enabled_power_domains.mask.bits,
+                     domains.bits,
+                     POWER_DOMAIN_NUM);
 
-       for_each_power_domain(domain, new_domains)
+       for_each_power_domain(domain, &new_domains)
                intel_display_power_get_in_set(dev_priv,
                                               &crtc->enabled_power_domains,
                                               domain);
-
-       return old_domains;
 }
 
 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
-                                          u64 domains)
+                                          struct intel_power_domain_mask *domains)
 {
        intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
                                            &crtc->enabled_power_domains,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc_state *new_crtc_state, *old_crtc_state;
        struct intel_crtc *crtc;
-       u64 put_domains[I915_MAX_PIPES] = {};
+       struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
        intel_wakeref_t wakeref = 0;
        int i;
 
                                            new_crtc_state, i) {
                if (intel_crtc_needs_modeset(new_crtc_state) ||
                    new_crtc_state->update_pipe) {
-
-                       put_domains[crtc->pipe] =
-                               modeset_get_crtc_power_domains(new_crtc_state);
+                       modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
                }
        }
 
        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                intel_post_plane_update(state, crtc);
 
-               modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
+               modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
 
                intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
 
        for_each_intel_crtc(dev, crtc) {
                struct intel_crtc_state *crtc_state =
                        to_intel_crtc_state(crtc->base.state);
-               u64 put_domains;
+               struct intel_power_domain_mask put_domains;
 
-               put_domains = modeset_get_crtc_power_domains(crtc_state);
-               if (drm_WARN_ON(dev, put_domains))
-                       modeset_put_crtc_power_domains(crtc, put_domains);
+               modeset_get_crtc_power_domains(crtc_state, &put_domains);
+               if (drm_WARN_ON(dev, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
+                       modeset_put_crtc_power_domains(crtc, &put_domains);
        }
 
        intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
 
 #include "intel_snps_phy.h"
 #include "vlv_sideband.h"
 
-#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask)    \
+#define for_each_power_domain_well(__dev_priv, __power_well, __domain) \
        for_each_power_well(__dev_priv, __power_well)                           \
-               for_each_if((__power_well)->domains & (__domain_mask))
+               for_each_if(test_bit((__domain), (__power_well)->domains.bits))
 
-#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
+#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \
        for_each_power_well_reverse(__dev_priv, __power_well)                   \
-               for_each_if((__power_well)->domains & (__domain_mask))
+               for_each_if(test_bit((__domain), (__power_well)->domains.bits))
 
 const char *
 intel_display_power_domain_str(enum intel_display_power_domain domain)
 
        is_enabled = true;
 
-       for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
+       for_each_power_domain_well_reverse(dev_priv, power_well, domain) {
                if (intel_power_well_is_always_on(power_well))
                        continue;
 
 
 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
 
-static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
+static void __async_put_domains_mask(struct i915_power_domains *power_domains,
+                                    struct intel_power_domain_mask *mask)
 {
-       return power_domains->async_put_domains[0] |
-              power_domains->async_put_domains[1];
+       bitmap_or(mask->bits,
+                 power_domains->async_put_domains[0].bits,
+                 power_domains->async_put_domains[1].bits,
+                 POWER_DOMAIN_NUM);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
        struct drm_i915_private *i915 = container_of(power_domains,
                                                     struct drm_i915_private,
                                                     power_domains);
-       return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &
-                           power_domains->async_put_domains[1]);
+
+       return !drm_WARN_ON(&i915->drm,
+                           bitmap_intersects(power_domains->async_put_domains[0].bits,
+                                             power_domains->async_put_domains[1].bits,
+                                             POWER_DOMAIN_NUM));
 }
 
 static bool
        struct drm_i915_private *i915 = container_of(power_domains,
                                                     struct drm_i915_private,
                                                     power_domains);
+       struct intel_power_domain_mask async_put_mask;
        enum intel_display_power_domain domain;
        bool err = false;
 
        err |= !assert_async_put_domain_masks_disjoint(power_domains);
-       err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=
-                          !!__async_put_domains_mask(power_domains));
+       __async_put_domains_mask(power_domains, &async_put_mask);
+       err |= drm_WARN_ON(&i915->drm,
+                          !!power_domains->async_put_wakeref !=
+                          !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
 
-       for_each_power_domain(domain, __async_put_domains_mask(power_domains))
+       for_each_power_domain(domain, &async_put_mask)
                err |= drm_WARN_ON(&i915->drm,
                                   power_domains->domain_use_count[domain] != 1);
 
 }
 
 static void print_power_domains(struct i915_power_domains *power_domains,
-                               const char *prefix, u64 mask)
+                               const char *prefix, struct intel_power_domain_mask *mask)
 {
        struct drm_i915_private *i915 = container_of(power_domains,
                                                     struct drm_i915_private,
                                                     power_domains);
        enum intel_display_power_domain domain;
 
-       drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
+       drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
        for_each_power_domain(domain, mask)
                drm_dbg(&i915->drm, "%s use_count %d\n",
                        intel_display_power_domain_str(domain),
                power_domains->async_put_wakeref);
 
        print_power_domains(power_domains, "async_put_domains[0]",
-                           power_domains->async_put_domains[0]);
+                           &power_domains->async_put_domains[0]);
        print_power_domains(power_domains, "async_put_domains[1]",
-                           power_domains->async_put_domains[1]);
+                           &power_domains->async_put_domains[1]);
 }
 
 static void
 
 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
 
-static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
+static void async_put_domains_mask(struct i915_power_domains *power_domains,
+                                  struct intel_power_domain_mask *mask)
+
 {
        assert_async_put_domain_masks_disjoint(power_domains);
 
-       return __async_put_domains_mask(power_domains);
+       __async_put_domains_mask(power_domains, mask);
 }
 
 static void
 {
        assert_async_put_domain_masks_disjoint(power_domains);
 
-       power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
-       power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
+       clear_bit(domain, power_domains->async_put_domains[0].bits);
+       clear_bit(domain, power_domains->async_put_domains[1].bits);
 }
 
 static bool
                                       enum intel_display_power_domain domain)
 {
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct intel_power_domain_mask async_put_mask;
        bool ret = false;
 
-       if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
+       async_put_domains_mask(power_domains, &async_put_mask);
+       if (!test_bit(domain, async_put_mask.bits))
                goto out_verify;
 
        async_put_domains_clear_domain(power_domains, domain);
 
        ret = true;
 
-       if (async_put_domains_mask(power_domains))
+       async_put_domains_mask(power_domains, &async_put_mask);
+       if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
                goto out_verify;
 
        cancel_delayed_work(&power_domains->async_put_work);
        if (intel_display_power_grab_async_put_ref(dev_priv, domain))
                return;
 
-       for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
+       for_each_power_domain_well(dev_priv, power_well, domain)
                intel_power_well_get(dev_priv, power_well);
 
        power_domains->domain_use_count[domain]++;
        struct i915_power_domains *power_domains;
        struct i915_power_well *power_well;
        const char *name = intel_display_power_domain_str(domain);
+       struct intel_power_domain_mask async_put_mask;
 
        power_domains = &dev_priv->power_domains;
 
        drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
                 "Use count on domain %s is already zero\n",
                 name);
+       async_put_domains_mask(power_domains, &async_put_mask);
        drm_WARN(&dev_priv->drm,
-                async_put_domains_mask(power_domains) & BIT_ULL(domain),
+                test_bit(domain, async_put_mask.bits),
                 "Async disabling of domain %s is pending\n",
                 name);
 
        power_domains->domain_use_count[domain]--;
 
-       for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
+       for_each_power_domain_well_reverse(dev_priv, power_well, domain)
                intel_power_well_put(dev_priv, power_well);
 }
 
 }
 
 static void
-release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
+release_async_put_domains(struct i915_power_domains *power_domains,
+                         struct intel_power_domain_mask *mask)
 {
        struct drm_i915_private *dev_priv =
                container_of(power_domains, struct drm_i915_private,
                goto out_verify;
 
        release_async_put_domains(power_domains,
-                                 power_domains->async_put_domains[0]);
+                                 &power_domains->async_put_domains[0]);
 
        /* Requeue the work if more domains were async put meanwhile. */
-       if (power_domains->async_put_domains[1]) {
-               power_domains->async_put_domains[0] =
-                       fetch_and_zero(&power_domains->async_put_domains[1]);
+       if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
+               bitmap_copy(power_domains->async_put_domains[0].bits,
+                           power_domains->async_put_domains[1].bits,
+                           POWER_DOMAIN_NUM);
+               bitmap_zero(power_domains->async_put_domains[1].bits,
+                           POWER_DOMAIN_NUM);
                queue_async_put_domains_work(power_domains,
                                             fetch_and_zero(&new_work_wakeref));
        } else {
 
        /* Let a pending work requeue itself or queue a new one. */
        if (power_domains->async_put_wakeref) {
-               power_domains->async_put_domains[1] |= BIT_ULL(domain);
+               set_bit(domain, power_domains->async_put_domains[1].bits);
        } else {
-               power_domains->async_put_domains[0] |= BIT_ULL(domain);
+               set_bit(domain, power_domains->async_put_domains[0].bits);
                queue_async_put_domains_work(power_domains,
                                             fetch_and_zero(&work_wakeref));
        }
 void intel_display_power_flush_work(struct drm_i915_private *i915)
 {
        struct i915_power_domains *power_domains = &i915->power_domains;
+       struct intel_power_domain_mask async_put_mask;
        intel_wakeref_t work_wakeref;
 
        mutex_lock(&power_domains->lock);
        if (!work_wakeref)
                goto out_verify;
 
-       release_async_put_domains(power_domains,
-                                 async_put_domains_mask(power_domains));
+       async_put_domains_mask(power_domains, &async_put_mask);
+       release_async_put_domains(power_domains, &async_put_mask);
        cancel_delayed_work(&power_domains->async_put_work);
 
 out_verify:
 {
        intel_wakeref_t __maybe_unused wf;
 
-       drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
+       drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
 
        wf = intel_display_power_get(i915, domain);
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
        power_domain_set->wakerefs[domain] = wf;
 #endif
-       power_domain_set->mask |= BIT_ULL(domain);
+       set_bit(domain, power_domain_set->mask.bits);
 }
 
 bool
 {
        intel_wakeref_t wf;
 
-       drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
+       drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
 
        wf = intel_display_power_get_if_enabled(i915, domain);
        if (!wf)
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
        power_domain_set->wakerefs[domain] = wf;
 #endif
-       power_domain_set->mask |= BIT_ULL(domain);
+       set_bit(domain, power_domain_set->mask.bits);
 
        return true;
 }
 void
 intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
                                    struct intel_display_power_domain_set *power_domain_set,
-                                   u64 mask)
+                                   struct intel_power_domain_mask *mask)
 {
        enum intel_display_power_domain domain;
 
-       drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask);
+       drm_WARN_ON(&i915->drm,
+                   !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
 
        for_each_power_domain(domain, mask) {
                intel_wakeref_t __maybe_unused wf = -1;
                wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
 #endif
                intel_display_power_put(i915, domain, wf);
-               power_domain_set->mask &= ~BIT_ULL(domain);
+               clear_bit(domain, power_domain_set->mask.bits);
        }
 }
 
        dev_priv->dmc.target_dc_state =
                sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
 
-       BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
-
        mutex_init(&power_domains->lock);
 
        INIT_DELAYED_WORK(&power_domains->async_put_work,