]> www.infradead.org Git - users/hch/misc.git/commitdiff
xhci: dbgtty: Improve performance by handling received data immediately.
authorMathias Nyman <mathias.nyman@linux.intel.com>
Fri, 27 Dec 2024 12:01:39 +0000 (14:01 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 27 Dec 2024 12:06:10 +0000 (13:06 +0100)
Improve dbc transfer rate performance by copying the received data to
the tty buffer directly in the request complete callback function if
possible.
Only defer it in case there is already pending deferred work, tty is
throttled, or we fail copy the data to the tty buffer

The request complete callback is already called by a workqueue.

This is part 3/3 of a dbc performance improvement series that roughly
triples dbc performace when using adb push and pull over dbc.

Max/min push rate after patches is 210/118 MB/s, pull rate 171/133 MB/s,
tested with large files (300MB-9GB) by Łukasz Bartosik

Cc: Łukasz Bartosik <ukaszb@chromium.org>
Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
Link: https://lore.kernel.org/r/20241227120142.1035206-3-mathias.nyman@linux.intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/usb/host/xhci-dbgtty.c

index d719c16ea30b57f8499d2f481386f3471403d301..60ed753c85bbc1c417ffbe0b3447cada6a63c76f 100644 (file)
@@ -110,15 +110,74 @@ static void dbc_start_rx(struct dbc_port *port)
        }
 }
 
+/*
+ * Queue received data to tty buffer and push it.
+ *
+ * Returns nr of remaining bytes that didn't fit tty buffer, i.e. 0 if all
+ * bytes sucessfullt moved. In case of error returns negative errno.
+ * Call with lock held
+ */
+static int dbc_rx_push_buffer(struct dbc_port *port, struct dbc_request *req)
+{
+       char            *packet = req->buf;
+       unsigned int    n, size = req->actual;
+       int             count;
+
+       if (!req->actual)
+               return 0;
+
+       /* if n_read is set then request was partially moved to tty buffer */
+       n = port->n_read;
+       if (n) {
+               packet += n;
+               size -= n;
+       }
+
+       count = tty_insert_flip_string(&port->port, packet, size);
+       if (count)
+               tty_flip_buffer_push(&port->port);
+       if (count != size) {
+               port->n_read += count;
+               return size - count;
+       }
+
+       port->n_read = 0;
+       return 0;
+}
+
 static void
 dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req)
 {
        unsigned long           flags;
        struct dbc_port         *port = dbc_to_port(dbc);
+       struct tty_struct       *tty;
+       int                     untransferred;
+
+       tty = port->port.tty;
 
        spin_lock_irqsave(&port->port_lock, flags);
+
+       /*
+        * Only defer copyig data to tty buffer in case:
+        * - !list_empty(&port->read_queue), there are older pending data
+        * - tty is throttled
+        * - failed to copy all data to buffer, defer remaining part
+        */
+
+       if (list_empty(&port->read_queue) && tty && !tty_throttled(tty)) {
+               untransferred = dbc_rx_push_buffer(port, req);
+               if (untransferred == 0) {
+                       list_add_tail(&req->list_pool, &port->read_pool);
+                       if (req->status != -ESHUTDOWN)
+                               dbc_start_rx(port);
+                       goto out;
+               }
+       }
+
+       /* defer moving data from req to tty buffer to a tasklet */
        list_add_tail(&req->list_pool, &port->read_queue);
        tasklet_schedule(&port->push);
+out:
        spin_unlock_irqrestore(&port->port_lock, flags);
 }
 
@@ -331,10 +390,10 @@ static void dbc_rx_push(struct tasklet_struct *t)
        struct dbc_request      *req;
        struct tty_struct       *tty;
        unsigned long           flags;
-       bool                    do_push = false;
        bool                    disconnect = false;
        struct dbc_port         *port = from_tasklet(port, t, push);
        struct list_head        *queue = &port->read_queue;
+       int                     untransferred;
 
        spin_lock_irqsave(&port->port_lock, flags);
        tty = port->port.tty;
@@ -356,42 +415,15 @@ static void dbc_rx_push(struct tasklet_struct *t)
                        break;
                }
 
-               if (req->actual) {
-                       char            *packet = req->buf;
-                       unsigned int    n, size = req->actual;
-                       int             count;
-
-                       n = port->n_read;
-                       if (n) {
-                               packet += n;
-                               size -= n;
-                       }
-
-                       count = tty_insert_flip_string(&port->port, packet,
-                                                      size);
-                       if (count)
-                               do_push = true;
-                       if (count != size) {
-                               port->n_read += count;
-                               break;
-                       }
-                       port->n_read = 0;
-               }
+               untransferred = dbc_rx_push_buffer(port, req);
+               if (untransferred > 0)
+                       break;
 
                list_move_tail(&req->list_pool, &port->read_pool);
        }
 
-       if (do_push)
-               tty_flip_buffer_push(&port->port);
-
-       if (!list_empty(queue) && tty) {
-               if (!tty_throttled(tty)) {
-                       if (do_push)
-                               tasklet_schedule(&port->push);
-                       else
-                               pr_warn("ttyDBC0: RX not scheduled?\n");
-               }
-       }
+       if (!list_empty(queue))
+               tasklet_schedule(&port->push);
 
        if (!disconnect)
                dbc_start_rx(port);