]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
staging: vchiq_core: Use killable wait completions for bulk transfers
authorUmang Jain <umang.jain@ideasonboard.com>
Wed, 18 Sep 2024 16:30:55 +0000 (22:00 +0530)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 9 Oct 2024 09:58:46 +0000 (11:58 +0200)
commit f27e47bc6b8b ("staging: vchiq: use completions instead of
semaphores") introduced completions for events in vchiq interface.
It introduced _interruptible() version of completions for waiting
on events. However, it missed a subtle down_interruptible() macro
override in vchiq_killable.h, which used to mask most of the signals
and only interrupt on fatal ones.

The above issue was fixed in commit a772f116702e ("staging: vchiq: switch
to wait_for_completion_killable"). Given the override logic of
down_interruptible() that existed in vchiq_killable.h, that commit
fixed the completions with the correct variation i.e. killable() family
of functions.

However, commit a772f116702e ("staging: vchiq: switch to
wait_for_completion_killable") later got reverted [1] due to high CPU
load noticed by various downstream and upstream distributions [2].
Reverting the commit solved this problem but the root cause was never
diagonsed and the entire commit was reverted.

This patch brings back killable version of wait events but only for
bulk transfers and queue_message() transfer code paths.

The idea is to bring back killable versions for various event
completions in a phased manner so that we do not re-regress again as
noticed in [2]. Hence, no other wait events are converted from
interruptible -> killable in this patch.

Since the bulk transfers are no longer interruptible (but killable),
drop the "_interruptible" suffix from all vchiq_bulk_xfer_* functions.

[1]: commit 086efbabdc04 ("staging: vchiq: revert "switch to wait_for_completion_killable"")
[2]: https://patchwork.kernel.org/project/linux-arm-kernel/cover/20190509143137.31254-1-nsaenzjulienne@suse.de/

Signed-off-by: Umang Jain <umang.jain@ideasonboard.com>
Link: https://lore.kernel.org/r/20240918163100.870596-2-umang.jain@ideasonboard.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c

index e09642a19243b8cdd41a330f3ea4663093ec9b29..3d469b88a1182771e6c8149f4464b3e9eea5d793 100644 (file)
@@ -857,10 +857,9 @@ vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const
                switch (mode) {
                case VCHIQ_BULK_MODE_NOCALLBACK:
                case VCHIQ_BULK_MODE_CALLBACK:
-                       ret = vchiq_bulk_xfer_callback_interruptible(instance, handle,
-                                                                    (void *)data, NULL,
-                                                                    size, mode, userdata,
-                                                                    VCHIQ_BULK_TRANSMIT);
+                       ret = vchiq_bulk_xfer_callback(instance, handle, (void *)data,
+                                                      NULL, size, mode, userdata,
+                                                      VCHIQ_BULK_TRANSMIT);
                        break;
                case VCHIQ_BULK_MODE_BLOCKING:
                        ret = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
@@ -895,10 +894,8 @@ int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
                switch (mode) {
                case VCHIQ_BULK_MODE_NOCALLBACK:
                case VCHIQ_BULK_MODE_CALLBACK:
-                       ret = vchiq_bulk_xfer_callback_interruptible(instance, handle,
-                                                                    (void *)data, NULL,
-                                                                    size, mode, userdata,
-                                                                    VCHIQ_BULK_RECEIVE);
+                       ret = vchiq_bulk_xfer_callback(instance, handle, (void *)data, NULL,
+                                                      size, mode, userdata, VCHIQ_BULK_RECEIVE);
                        break;
                case VCHIQ_BULK_MODE_BLOCKING:
                        ret = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
@@ -969,8 +966,8 @@ vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handl
                        return -ENOMEM;
        }
 
-       ret = vchiq_bulk_xfer_blocking_interruptible(instance, handle, data, NULL, size,
-                                                    &waiter->bulk_waiter, dir);
+       ret = vchiq_bulk_xfer_blocking(instance, handle, data, NULL, size,
+                                      &waiter->bulk_waiter, dir);
        if ((ret != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
                struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
 
index 1f94db6e0cd984847207a2b3668af1326ac77820..a381a633d3d56bb1f67a9398efe9b542f3cc31f1 100644 (file)
@@ -962,7 +962,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
                        spin_unlock(&state->quota_spinlock);
                        mutex_unlock(&state->slot_mutex);
 
-                       if (wait_for_completion_interruptible(&state->data_quota_event))
+                       if (wait_for_completion_killable(&state->data_quota_event))
                                return -EAGAIN;
 
                        mutex_lock(&state->slot_mutex);
@@ -986,7 +986,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
                                quota->message_use_count, quota->slot_use_count);
                        VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
                        mutex_unlock(&state->slot_mutex);
-                       if (wait_for_completion_interruptible(&quota->quota_event))
+                       if (wait_for_completion_killable(&quota->quota_event))
                                return -EAGAIN;
                        if (service->closing)
                                return -EHOSTDOWN;
@@ -2662,11 +2662,11 @@ close_service_complete(struct vchiq_service *service, int failstate)
  * returned to user context.
  */
 static int
-vchiq_bulk_xfer_queue_msg_interruptible(struct vchiq_service *service,
-                                       void *offset, void __user *uoffset,
-                                       int size, void *userdata,
-                                       enum vchiq_bulk_mode mode,
-                                       enum vchiq_bulk_dir dir)
+vchiq_bulk_xfer_queue_msg_killable(struct vchiq_service *service,
+                                  void *offset, void __user *uoffset,
+                                  int size, void *userdata,
+                                  enum vchiq_bulk_mode mode,
+                                  enum vchiq_bulk_dir dir)
 {
        struct vchiq_bulk_queue *queue;
        struct bulk_waiter *bulk_waiter = NULL;
@@ -2695,7 +2695,7 @@ vchiq_bulk_xfer_queue_msg_interruptible(struct vchiq_service *service,
                VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
                do {
                        mutex_unlock(&service->bulk_mutex);
-                       if (wait_for_completion_interruptible(&service->bulk_remove_event))
+                       if (wait_for_completion_killable(&service->bulk_remove_event))
                                return -EAGAIN;
                        if (mutex_lock_killable(&service->bulk_mutex))
                                return -EAGAIN;
@@ -2763,7 +2763,7 @@ vchiq_bulk_xfer_queue_msg_interruptible(struct vchiq_service *service,
 
         if (bulk_waiter) {
                 bulk_waiter->bulk = bulk;
-                if (wait_for_completion_interruptible(&bulk_waiter->event))
+               if (wait_for_completion_killable(&bulk_waiter->event))
                         status = -EAGAIN;
                 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
                         status = -EINVAL;
@@ -3105,9 +3105,9 @@ vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
 }
 
 int
-vchiq_bulk_xfer_blocking_interruptible(struct vchiq_instance *instance, unsigned int handle,
-                                      void *offset, void __user *uoffset, int size,
-                                      void __user *userdata, enum vchiq_bulk_dir dir)
+vchiq_bulk_xfer_blocking(struct vchiq_instance *instance, unsigned int handle,
+                        void *offset, void __user *uoffset, int size,
+                        void __user *userdata, enum vchiq_bulk_dir dir)
 {
        struct vchiq_service *service = find_service_by_handle(instance, handle);
        enum vchiq_bulk_mode mode = VCHIQ_BULK_MODE_BLOCKING;
@@ -3126,8 +3126,8 @@ vchiq_bulk_xfer_blocking_interruptible(struct vchiq_instance *instance, unsigned
                goto error_exit;
 
 
-       status = vchiq_bulk_xfer_queue_msg_interruptible(service, offset, uoffset, size,
-                                                        userdata, mode, dir);
+       status = vchiq_bulk_xfer_queue_msg_killable(service, offset, uoffset, size,
+                                                   userdata, mode, dir);
 
 error_exit:
        vchiq_service_put(service);
@@ -3136,10 +3136,10 @@ error_exit:
 }
 
 int
-vchiq_bulk_xfer_callback_interruptible(struct vchiq_instance *instance, unsigned int handle,
-                                      void *offset, void __user *uoffset, int size,
-                                      enum vchiq_bulk_mode mode, void *userdata,
-                                      enum vchiq_bulk_dir dir)
+vchiq_bulk_xfer_callback(struct vchiq_instance *instance, unsigned int handle,
+                        void *offset, void __user *uoffset, int size,
+                        enum vchiq_bulk_mode mode, void *userdata,
+                        enum vchiq_bulk_dir dir)
 {
        struct vchiq_service *service = find_service_by_handle(instance, handle);
        int status = -EINVAL;
@@ -3160,8 +3160,8 @@ vchiq_bulk_xfer_callback_interruptible(struct vchiq_instance *instance, unsigned
        if (vchiq_check_service(service))
                goto error_exit;
 
-       status = vchiq_bulk_xfer_queue_msg_interruptible(service, offset, uoffset,
-                                                        size, userdata, mode, dir);
+       status = vchiq_bulk_xfer_queue_msg_killable(service, offset, uoffset,
+                                                   size, userdata, mode, dir);
 
 error_exit:
        vchiq_service_put(service);
@@ -3175,8 +3175,8 @@ error_exit:
  * and the call should be retried after being returned to user context.
  */
 int
-vchiq_bulk_xfer_waiting_interruptible(struct vchiq_instance *instance,
-                                     unsigned int handle, struct bulk_waiter *userdata)
+vchiq_bulk_xfer_waiting(struct vchiq_instance *instance,
+                       unsigned int handle, struct bulk_waiter *userdata)
 {
        struct vchiq_service *service = find_service_by_handle(instance, handle);
        struct bulk_waiter *bulk_waiter;
@@ -3200,7 +3200,7 @@ vchiq_bulk_xfer_waiting_interruptible(struct vchiq_instance *instance,
 
        status = 0;
 
-       if (wait_for_completion_interruptible(&bulk_waiter->event))
+       if (wait_for_completion_killable(&bulk_waiter->event))
                return -EAGAIN;
        else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
                return -EINVAL;
index 468463f318018ca4ecb6cf6121098ba67934c1aa..5bf543dfc9c7a1614ea328af450af02b8e4936e4 100644 (file)
@@ -471,19 +471,19 @@ extern void
 remote_event_pollall(struct vchiq_state *state);
 
 extern int
-vchiq_bulk_xfer_waiting_interruptible(struct vchiq_instance *instance,
-                                     unsigned int handle, struct bulk_waiter *userdata);
+vchiq_bulk_xfer_waiting(struct vchiq_instance *instance, unsigned int handle,
+                       struct bulk_waiter *userdata);
 
 extern int
-vchiq_bulk_xfer_blocking_interruptible(struct vchiq_instance *instance, unsigned int handle,
-                                      void *offset, void __user *uoffset, int size,
-                                      void __user *userdata, enum vchiq_bulk_dir dir);
+vchiq_bulk_xfer_blocking(struct vchiq_instance *instance, unsigned int handle,
+                        void *offset, void __user *uoffset, int size,
+                        void __user *userdata, enum vchiq_bulk_dir dir);
 
 extern int
-vchiq_bulk_xfer_callback_interruptible(struct vchiq_instance *instance, unsigned int handle,
-                                      void *offset, void __user *uoffset, int size,
-                                      enum vchiq_bulk_mode mode, void *userdata,
-                                      enum vchiq_bulk_dir dir);
+vchiq_bulk_xfer_callback(struct vchiq_instance *instance, unsigned int handle,
+                        void *offset, void __user *uoffset, int size,
+                        enum vchiq_bulk_mode mode, void *userdata,
+                        enum vchiq_bulk_dir dir);
 
 extern void
 vchiq_dump_state(struct seq_file *f, struct vchiq_state *state);
index d41a4624cc92c39eafa1bfdb229b15c3c21ff6aa..aca237919696217736b3768931a6c28cc1bf87d2 100644 (file)
@@ -305,9 +305,9 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
 
                userdata = &waiter->bulk_waiter;
 
-               status = vchiq_bulk_xfer_blocking_interruptible(instance, args->handle,
-                                                               NULL, args->data, args->size,
-                                                               userdata, dir);
+               status = vchiq_bulk_xfer_blocking(instance, args->handle,
+                                                 NULL, args->data, args->size,
+                                                 userdata, dir);
 
        } else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
                mutex_lock(&instance->bulk_waiter_list_mutex);
@@ -330,13 +330,13 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
                        waiter, current->pid);
                userdata = &waiter->bulk_waiter;
 
-               status = vchiq_bulk_xfer_waiting_interruptible(instance, args->handle, userdata);
+               status = vchiq_bulk_xfer_waiting(instance, args->handle, userdata);
        } else {
                userdata = args->userdata;
 
-               status = vchiq_bulk_xfer_callback_interruptible(instance, args->handle, NULL,
-                                                               args->data, args->size,
-                                                               args->mode, userdata, dir);
+               status = vchiq_bulk_xfer_callback(instance, args->handle, NULL,
+                                                 args->data, args->size,
+                                                 args->mode, userdata, dir);
 
        }