};
 
 struct amdgpu_cu_info {
-       uint32_t number; /* total active CU number */
-       uint32_t ao_cu_mask;
        uint32_t max_waves_per_simd;
        uint32_t wave_front_size;
        uint32_t max_scratch_slots_per_cu;
        uint32_t lds_size;
+
+       /* total active CU number */
+       uint32_t number;
+       uint32_t ao_cu_mask;
+       uint32_t ao_cu_bitmap[4][4];
        uint32_t bitmap[4][4];
 };
 
 
  * - 3.15.0 - Export more gpu info for gfx9
  * - 3.16.0 - Add reserved vmid support
  * - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
+ * - 3.18.0 - Export gpu always on cu bitmap
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       17
+#define KMS_DRIVER_MINOR       18
 #define KMS_DRIVER_PATCHLEVEL  0
 
 int amdgpu_vram_limit = 0;
 
                dev_info.cu_active_number = adev->gfx.cu_info.number;
                dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
                dev_info.ce_ram_size = adev->gfx.ce_ram_size;
+               memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
+                      sizeof(adev->gfx.cu_info.ao_cu_bitmap));
                memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
                       sizeof(adev->gfx.cu_info.bitmap));
                dev_info.vram_type = adev->mc.vram_type;
 
                                mask <<= 1;
                        }
                        active_cu_number += counter;
-                       ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+                       if (i < 2 && j < 2)
+                               ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+                       cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
                }
        }
 
 
                                mask <<= 1;
                        }
                        active_cu_number += counter;
-                       ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+                       if (i < 2 && j < 2)
+                               ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+                       cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
                }
        }
        gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 
                                mask <<= 1;
                        }
                        active_cu_number += counter;
-                       ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+                       if (i < 2 && j < 2)
+                               ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+                       cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
                }
        }
        gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 
                                mask <<= 1;
                        }
                        active_cu_number += counter;
-                       ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+                       if (i < 2 && j < 2)
+                               ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+                       cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
                }
        }
        gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 
        __u64 max_memory_clock;
        /* cu information */
        __u32 cu_active_number;
+       /* NOTE: cu_ao_mask is INVALID, DON'T use it */
        __u32 cu_ao_mask;
        __u32 cu_bitmap[4][4];
        /** Render backend pipe mask. One render backend is CB+DB. */
        /* max gs wavefront per vgt*/
        __u32 max_gs_waves_per_vgt;
        __u32 _pad1;
+       /* always on cu bitmap */
+       __u32 cu_ao_bitmap[4][4];
 };
 
 struct drm_amdgpu_info_hw_ip {