/*
  * UVD
  */
-#define AMDGPU_MAX_UVD_HANDLES 10
-#define AMDGPU_UVD_STACK_SIZE  (1024*1024)
-#define AMDGPU_UVD_HEAP_SIZE   (1024*1024)
-#define AMDGPU_UVD_FIRMWARE_OFFSET 256
+#define AMDGPU_DEFAULT_UVD_HANDLES     10
+#define AMDGPU_MAX_UVD_HANDLES         40
+#define AMDGPU_UVD_STACK_SIZE          (200*1024)
+#define AMDGPU_UVD_HEAP_SIZE           (256*1024)
+#define AMDGPU_UVD_SESSION_SIZE                (50*1024)
+#define AMDGPU_UVD_FIRMWARE_OFFSET     256
 
 struct amdgpu_uvd {
        struct amdgpu_bo        *vcpu_bo;
        void                    *cpu_addr;
        uint64_t                gpu_addr;
        void                    *saved_bo;
+       unsigned                max_handles;
        atomic_t                handles[AMDGPU_MAX_UVD_HANDLES];
        struct drm_file         *filp[AMDGPU_MAX_UVD_HANDLES];
        struct delayed_work     idle_work;
 
                return r;
        }
 
+       /* Set the default UVD handles that the firmware can handle */
+       adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
+
        hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
        family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
        version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
        DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
                version_major, version_minor, family_id);
 
+       /*
+        * Limit the number of UVD handles depending on microcode major
+        * and minor versions. The firmware version which has 40 UVD
+        * instances support is 1.80. So all subsequent versions should
+        * also have the same support.
+        */
+       if ((version_major > 0x01) ||
+           ((version_major == 0x01) && (version_minor >= 0x50)))
+               adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
        bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
-                +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
+                 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
+                 +  AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
        r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
                             AMDGPU_GEM_DOMAIN_VRAM,
                             AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
                return r;
        }
 
-       for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+       for (i = 0; i < adev->uvd.max_handles; ++i) {
                atomic_set(&adev->uvd.handles[i], 0);
                adev->uvd.filp[i] = NULL;
        }
        if (adev->uvd.vcpu_bo == NULL)
                return 0;
 
-       for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+       for (i = 0; i < adev->uvd.max_handles; ++i)
                if (atomic_read(&adev->uvd.handles[i]))
                        break;
 
        struct amdgpu_ring *ring = &adev->uvd.ring;
        int i, r;
 
-       for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+       for (i = 0; i < adev->uvd.max_handles; ++i) {
                uint32_t handle = atomic_read(&adev->uvd.handles[i]);
                if (handle != 0 && adev->uvd.filp[i] == filp) {
                        struct fence *fence;
                amdgpu_bo_kunmap(bo);
 
                /* try to alloc a new handle */
-               for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+               for (i = 0; i < adev->uvd.max_handles; ++i) {
                        if (atomic_read(&adev->uvd.handles[i]) == handle) {
                                DRM_ERROR("Handle 0x%x already in use!\n", handle);
                                return -EINVAL;
                        return r;
 
                /* validate the handle */
-               for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+               for (i = 0; i < adev->uvd.max_handles; ++i) {
                        if (atomic_read(&adev->uvd.handles[i]) == handle) {
                                if (adev->uvd.filp[i] != ctx->parser->filp) {
                                        DRM_ERROR("UVD handle collision detected!\n");
 
        case 2:
                /* it's a destroy msg, free the handle */
-               for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+               for (i = 0; i < adev->uvd.max_handles; ++i)
                        atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
                amdgpu_bo_kunmap(bo);
                return 0;
 
        fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
 
-       for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+       for (i = 0; i < adev->uvd.max_handles; ++i)
                if (atomic_read(&adev->uvd.handles[i]))
                        ++handles;
 
 
        WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 
        addr += size;
-       size = AMDGPU_UVD_STACK_SIZE >> 3;
+       size = AMDGPU_UVD_HEAP_SIZE >> 3;
        WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
        WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 
        addr += size;
-       size = AMDGPU_UVD_HEAP_SIZE >> 3;
+       size = (AMDGPU_UVD_STACK_SIZE +
+              (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
        WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
        WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 
 
        WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 
        offset += size;
-       size = AMDGPU_UVD_STACK_SIZE;
+       size = AMDGPU_UVD_HEAP_SIZE;
        WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
        WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 
        offset += size;
-       size = AMDGPU_UVD_HEAP_SIZE;
+       size = AMDGPU_UVD_STACK_SIZE +
+              (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
        WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
        WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 
 
        WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 
        offset += size;
-       size = AMDGPU_UVD_STACK_SIZE;
+       size = AMDGPU_UVD_HEAP_SIZE;
        WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
        WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 
        offset += size;
-       size = AMDGPU_UVD_HEAP_SIZE;
+       size = AMDGPU_UVD_STACK_SIZE +
+              (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
        WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
        WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 
        WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
        WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
        WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+
+       WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 }
 
 #if 0