This removes the chrdev_lock from the counter subsystem. This was
intended to prevent opening the chrdev more than once. However, this
doesn't work in practice since userspace can duplicate file descriptors
and pass file descriptors to other processes. Since this protection
can't be relied on, it is best to just remove it.
Suggested-by: Greg KH <gregkh@linuxfoundation.org>
Acked-by: William Breathitt Gray <vilhelm.gray@gmail.com>
Signed-off-by: David Lechner <david@lechnology.com>
Link: https://lore.kernel.org/r/20211017185521.3468640-1-david@lechnology.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 
                                                            typeof(*counter),
                                                            chrdev);
 
-       /* Ensure chrdev is not opened more than 1 at a time */
-       if (!atomic_add_unless(&counter->chrdev_lock, 1, 1))
-               return -EBUSY;
-
        get_device(&counter->dev);
        filp->private_data = counter;
 
        mutex_unlock(&counter->ops_exist_lock);
 
        put_device(&counter->dev);
-       atomic_dec(&counter->chrdev_lock);
 
        return ret;
 }
        mutex_init(&counter->events_lock);
 
        /* Initialize character device */
-       atomic_set(&counter->chrdev_lock, 0);
        cdev_init(&counter->chrdev, &counter_fops);
 
        /* Allocate Counter events queue */
 
                                           u64 val)
 {
        DECLARE_KFIFO_PTR(events, struct counter_event);
-       int err = 0;
-
-       /* Ensure chrdev is not opened more than 1 at a time */
-       if (!atomic_add_unless(&counter->chrdev_lock, 1, 1))
-               return -EBUSY;
+       int err;
 
        /* Allocate new events queue */
        err = kfifo_alloc(&events, val, GFP_KERNEL);
        if (err)
-               goto exit_early;
+               return err;
 
        /* Swap in new events queue */
        kfifo_free(&counter->events);
        counter->events.kfifo = events.kfifo;
 
-exit_early:
-       atomic_dec(&counter->chrdev_lock);
-
-       return err;
+       return 0;
 }
 
 static struct counter_comp counter_num_signals_comp =
 
  * @events:            queue of detected Counter events
  * @events_wait:       wait queue to allow blocking reads of Counter events
  * @events_lock:       lock to protect Counter events queue read operations
- * @chrdev_lock:       lock to limit chrdev to a single open at a time
  * @ops_exist_lock:    lock to prevent use during removal
  */
 struct counter_device {
        DECLARE_KFIFO_PTR(events, struct counter_event);
        wait_queue_head_t events_wait;
        struct mutex events_lock;
-       /*
-        * chrdev_lock is locked by counter_chrdev_open() and unlocked by
-        * counter_chrdev_release(), so a mutex is not possible here because
-        * chrdev_lock will invariably be held when returning to user space
-        */
-       atomic_t chrdev_lock;
        struct mutex ops_exist_lock;
 };