]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
block: loop: use kthread_work
authorMing Lei <ming.lei@canonical.com>
Sun, 28 Jun 2015 06:06:32 +0000 (23:06 -0700)
committerSantosh Shilimkar <santosh.shilimkar@oracle.com>
Mon, 29 Jun 2015 15:33:45 +0000 (08:33 -0700)
The following patch will use dio/aio to submit IO to backing file,
then it needn't to schedule IO concurrently from work, so
use kthread_work for decreasing context switch cost a lot.

For non-AIO case, single thread has been used for long long time,
and it was just converted to work in v4.0, which has caused performance
regression for fedora live booting already. In discussion[1], even
though submitting I/O via work concurrently can improve random read IO
throughput, meantime it might hurt sequential read IO performance, so
better to restore to single thread behaviour.

For the following AIO support, it is better to use multi hw-queue
with per-hwq kthread than current work approach suppose there is so
high performance requirement for loop.

[1] http://marc.info/?t=143082678400002&r=1&w=2
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Santosh Shilimkar <ssantosh@kernel.org>
drivers/block/loop.c
drivers/block/loop.h

index e4708e81d2cd4200366bb0d8ca0d77ffd3a99956..3868e6950ed0b7915573fe5b0937a450b2fb04f3 100644 (file)
@@ -666,6 +666,23 @@ static void loop_config_discard(struct loop_device *lo)
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 }
 
+static void loop_unprepare_queue(struct loop_device *lo)
+{
+       flush_kthread_worker(&lo->worker);
+       kthread_stop(lo->worker_task);
+}
+
+static int loop_prepare_queue(struct loop_device *lo)
+{
+       init_kthread_worker(&lo->worker);
+       lo->worker_task = kthread_run(kthread_worker_fn,
+                       &lo->worker, "loop%d", lo->lo_number);
+       if (IS_ERR(lo->worker_task))
+               return -ENOMEM;
+       set_user_nice(lo->worker_task, MIN_NICE);
+       return 0;
+}
+
 static int loop_set_fd(struct loop_device *lo, fmode_t mode,
                       struct block_device *bdev, unsigned int arg)
 {
@@ -723,11 +740,8 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
        size = get_loop_size(lo, file);
        if ((loff_t)(sector_t)size != size)
                goto out_putf;
-       error = -ENOMEM;
-       lo->wq = alloc_workqueue("kloopd%d",
-                       WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 16,
-                       lo->lo_number);
-       if (!lo->wq)
+       error = loop_prepare_queue(lo);
+       if (error)
                goto out_putf;
 
        error = 0;
@@ -876,8 +890,7 @@ static int loop_clr_fd(struct loop_device *lo)
        lo->lo_flags = 0;
        if (!part_shift)
                lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
-       destroy_workqueue(lo->wq);
-       lo->wq = NULL;
+       loop_unprepare_queue(lo);
        mutex_unlock(&lo->lo_ctl_mutex);
        /*
         * Need not hold lo_ctl_mutex to fput backing file.
@@ -1438,23 +1451,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (lo->lo_state != Lo_bound)
                return -EIO;
 
-       if (cmd->rq->cmd_flags & REQ_WRITE) {
-               struct loop_device *lo = cmd->rq->q->queuedata;
-               bool need_sched = true;
-
-               spin_lock_irq(&lo->lo_lock);
-               if (lo->write_started)
-                       need_sched = false;
-               else
-                       lo->write_started = true;
-               list_add_tail(&cmd->list, &lo->write_cmd_head);
-               spin_unlock_irq(&lo->lo_lock);
-
-               if (need_sched)
-                       queue_work(lo->wq, &lo->write_work);
-       } else {
-               queue_work(lo->wq, &cmd->read_work);
-       }
+       queue_kthread_work(&lo->worker, &cmd->work);
 
        return BLK_MQ_RQ_QUEUE_OK;
 }
@@ -1476,35 +1473,10 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
        blk_mq_complete_request(cmd->rq);
 }
 
-static void loop_queue_write_work(struct work_struct *work)
-{
-       struct loop_device *lo =
-               container_of(work, struct loop_device, write_work);
-       LIST_HEAD(cmd_list);
-
-       spin_lock_irq(&lo->lo_lock);
- repeat:
-       list_splice_init(&lo->write_cmd_head, &cmd_list);
-       spin_unlock_irq(&lo->lo_lock);
-
-       while (!list_empty(&cmd_list)) {
-               struct loop_cmd *cmd = list_first_entry(&cmd_list,
-                               struct loop_cmd, list);
-               list_del_init(&cmd->list);
-               loop_handle_cmd(cmd);
-       }
-
-       spin_lock_irq(&lo->lo_lock);
-       if (!list_empty(&lo->write_cmd_head))
-               goto repeat;
-       lo->write_started = false;
-       spin_unlock_irq(&lo->lo_lock);
-}
-
-static void loop_queue_read_work(struct work_struct *work)
+static void loop_queue_work(struct kthread_work *work)
 {
        struct loop_cmd *cmd =
-               container_of(work, struct loop_cmd, read_work);
+               container_of(work, struct loop_cmd, work);
 
        loop_handle_cmd(cmd);
 }
@@ -1516,7 +1488,7 @@ static int loop_init_request(void *data, struct request *rq,
        struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
 
        cmd->rq = rq;
-       INIT_WORK(&cmd->read_work, loop_queue_read_work);
+       init_kthread_work(&cmd->work, loop_queue_work);
 
        return 0;
 }
@@ -1578,9 +1550,6 @@ static int loop_add(struct loop_device **l, int i)
         */
        queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
 
-       INIT_LIST_HEAD(&lo->write_cmd_head);
-       INIT_WORK(&lo->write_work, loop_queue_write_work);
-
        disk = lo->lo_disk = alloc_disk(1 << part_shift);
        if (!disk)
                goto out_free_queue;
index 49564edf55816329de3330f742f1a74da53e33f2..54c6aa51813b87458d296c698a0fa8c5529d3d2f 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/blk-mq.h>
 #include <linux/spinlock.h>
 #include <linux/mutex.h>
-#include <linux/workqueue.h>
+#include <linux/kthread.h>
 #include <uapi/linux/loop.h>
 
 /* Possible states of device */
@@ -54,12 +54,10 @@ struct loop_device {
        gfp_t           old_gfp_mask;
 
        spinlock_t              lo_lock;
-       struct workqueue_struct *wq;
-       struct list_head        write_cmd_head;
-       struct work_struct      write_work;
-       bool                    write_started;
        int                     lo_state;
        struct mutex            lo_ctl_mutex;
+       struct kthread_worker   worker;
+       struct task_struct      *worker_task;
 
        struct request_queue    *lo_queue;
        struct blk_mq_tag_set   tag_set;
@@ -67,7 +65,7 @@ struct loop_device {
 };
 
 struct loop_cmd {
-       struct work_struct read_work;
+       struct kthread_work work;
        struct request *rq;
        struct list_head list;
 };