goto out;
        }
 
-       mutex_lock(&hdev->fpriv_list_lock);
+       mutex_lock(&hdev->fpriv_ctrl_list_lock);
        list_del(&hpriv->dev_node);
-       mutex_unlock(&hdev->fpriv_list_lock);
+       mutex_unlock(&hdev->fpriv_ctrl_list_lock);
 out:
        put_pid(hpriv->taskpid);
 
        INIT_LIST_HEAD(&hdev->cs_mirror_list);
        spin_lock_init(&hdev->cs_mirror_lock);
        INIT_LIST_HEAD(&hdev->fpriv_list);
+       INIT_LIST_HEAD(&hdev->fpriv_ctrl_list);
        mutex_init(&hdev->fpriv_list_lock);
+       mutex_init(&hdev->fpriv_ctrl_list_lock);
        atomic_set(&hdev->in_reset, 0);
        mutex_init(&hdev->clk_throttling.lock);
 
        mutex_destroy(&hdev->send_cpu_message_lock);
 
        mutex_destroy(&hdev->fpriv_list_lock);
+       mutex_destroy(&hdev->fpriv_ctrl_list_lock);
 
        mutex_destroy(&hdev->clk_throttling.lock);
 
        /* Flush anyone that is inside device open */
        mutex_lock(&hdev->fpriv_list_lock);
        mutex_unlock(&hdev->fpriv_list_lock);
+       mutex_lock(&hdev->fpriv_ctrl_list_lock);
+       mutex_unlock(&hdev->fpriv_ctrl_list_lock);
 }
 
 static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset)
        return rc;
 }
 
-static int device_kill_open_processes(struct hl_device *hdev, u32 timeout)
+static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev)
 {
-       struct hl_fpriv *hpriv;
        struct task_struct *task = NULL;
+       struct list_head *fd_list;
+       struct hl_fpriv *hpriv;
+       struct mutex *fd_lock;
        u32 pending_cnt;
 
+       fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
+       fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
 
        /* Giving time for user to close FD, and for processes that are inside
         * hl_device_open to finish
         */
-       if (!list_empty(&hdev->fpriv_list))
+       if (!list_empty(fd_list))
                ssleep(1);
 
        if (timeout) {
                }
        }
 
-       mutex_lock(&hdev->fpriv_list_lock);
+       mutex_lock(fd_lock);
 
        /* This section must be protected because we are dereferencing
         * pointers that are freed if the process exits
         */
-       list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
+       list_for_each_entry(hpriv, fd_list, dev_node) {
                task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
                if (task) {
                        dev_info(hdev->dev, "Killing user process pid=%d\n",
                } else {
                        dev_warn(hdev->dev,
                                "Can't get task struct for PID so giving up on killing process\n");
-                       mutex_unlock(&hdev->fpriv_list_lock);
+                       mutex_unlock(fd_lock);
                        return -ETIME;
                }
        }
 
-       mutex_unlock(&hdev->fpriv_list_lock);
+       mutex_unlock(fd_lock);
 
        /*
         * We killed the open users, but that doesn't mean they are closed.
         */
 
 wait_for_processes:
-       while ((!list_empty(&hdev->fpriv_list)) && (pending_cnt)) {
+       while ((!list_empty(fd_list)) && (pending_cnt)) {
                dev_dbg(hdev->dev,
                        "Waiting for all unmap operations to finish before hard reset\n");
 
        }
 
        /* All processes exited successfully */
-       if (list_empty(&hdev->fpriv_list))
+       if (list_empty(fd_list))
                return 0;
 
        /* Give up waiting for processes to exit */
        return -EBUSY;
 }
 
-static void device_disable_open_processes(struct hl_device *hdev)
+static void device_disable_open_processes(struct hl_device *hdev, bool control_dev)
 {
+       struct list_head *fd_list;
        struct hl_fpriv *hpriv;
+       struct mutex *fd_lock;
 
-       mutex_lock(&hdev->fpriv_list_lock);
-       list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
+       fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
+       fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
+
+       mutex_lock(fd_lock);
+       list_for_each_entry(hpriv, fd_list, dev_node)
                hpriv->hdev = NULL;
-       mutex_unlock(&hdev->fpriv_list_lock);
+       mutex_unlock(fd_lock);
 }
 
 static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
                 * process can't really exit until all its CSs are done, which
                 * is what we do in cs rollback
                 */
-               rc = device_kill_open_processes(hdev, 0);
+               rc = device_kill_open_processes(hdev, 0, false);
 
                if (rc == -EBUSY) {
                        if (hdev->device_fini_pending) {
                "Waiting for all processes to exit (timeout of %u seconds)",
                HL_PENDING_RESET_LONG_SEC);
 
-       rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC);
+       rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC, false);
        if (rc) {
                dev_crit(hdev->dev, "Failed to kill all open processes\n");
-               device_disable_open_processes(hdev);
+               device_disable_open_processes(hdev, false);
+       }
+
+       rc = device_kill_open_processes(hdev, 0, true);
+       if (rc) {
+               dev_crit(hdev->dev, "Failed to kill all control device open processes\n");
+               device_disable_open_processes(hdev, true);
        }
 
        hl_cb_pool_fini(hdev);
 
  * @dev_node: node in the device list of file private data
  * @refcount: number of related contexts.
  * @restore_phase_mutex: lock for context switch and restore phase.
- * @is_control: true for control device, false otherwise
  */
 struct hl_fpriv {
        struct hl_device        *hdev;
        struct list_head        dev_node;
        struct kref             refcount;
        struct mutex            restore_phase_mutex;
-       u8                      is_control;
 };
 
 
  * @internal_cb_va_base: internal cb pool mmu virtual address base
  * @fpriv_list: list of file private data structures. Each structure is created
  *              when a user opens the device
+ * @fpriv_ctrl_list: list of file private data structures. Each structure is created
+ *              when a user opens the control device
  * @fpriv_list_lock: protects the fpriv_list
+ * @fpriv_ctrl_list_lock: protects the fpriv_ctrl_list
  * @aggregated_cs_counters: aggregated cs counters among all contexts
  * @mmu_priv: device-specific MMU data.
  * @mmu_func: device-related MMU functions.
        u64                             internal_cb_va_base;
 
        struct list_head                fpriv_list;
+       struct list_head                fpriv_ctrl_list;
        struct mutex                    fpriv_list_lock;
+       struct mutex                    fpriv_ctrl_list_lock;
 
        struct hl_cs_counters_atomic    aggregated_cs_counters;