]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
block, bfq: fix uaf for accessing waker_bfqq after splitting
authorYu Kuai <yukuai3@huawei.com>
Mon, 9 Sep 2024 13:41:48 +0000 (21:41 +0800)
committerJens Axboe <axboe@kernel.dk>
Tue, 10 Sep 2024 22:32:09 +0000 (16:32 -0600)
After commit 42c306ed7233 ("block, bfq: don't break merge chain in
bfq_split_bfqq()"), if the current procress is the last holder of bfqq,
the bfqq can be freed after bfq_split_bfqq(). Hence recored the bfqq and
then access bfqq->waker_bfqq may trigger UAF. What's more, the waker_bfqq
may in the merge chain of bfqq, hence just recored waker_bfqq is still
not safe.

Fix the problem by adding a helper bfq_waker_bfqq() to check if
bfqq->waker_bfqq is in the merge chain, and current procress is the only
holder.

Fixes: 42c306ed7233 ("block, bfq: don't break merge chain in bfq_split_bfqq()")
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Link: https://lore.kernel.org/r/20240909134154.954924-2-yukuai1@huaweicloud.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/bfq-iosched.c

index d1bf2b8a35769f44cd56cae0347d39d32dd711eb..d5d39974c67486754a07d9492969cd41c5d55671 100644 (file)
@@ -6825,6 +6825,31 @@ static void bfq_prepare_request(struct request *rq)
        rq->elv.priv[0] = rq->elv.priv[1] = NULL;
 }
 
+static struct bfq_queue *bfq_waker_bfqq(struct bfq_queue *bfqq)
+{
+       struct bfq_queue *new_bfqq = bfqq->new_bfqq;
+       struct bfq_queue *waker_bfqq = bfqq->waker_bfqq;
+
+       if (!waker_bfqq)
+               return NULL;
+
+       while (new_bfqq) {
+               if (new_bfqq == waker_bfqq) {
+                       /*
+                        * If waker_bfqq is in the merge chain, and current
+                        * is the only procress.
+                        */
+                       if (bfqq_process_refs(waker_bfqq) == 1)
+                               return NULL;
+                       break;
+               }
+
+               new_bfqq = new_bfqq->new_bfqq;
+       }
+
+       return waker_bfqq;
+}
+
 /*
  * If needed, init rq, allocate bfq data structures associated with
  * rq, and increment reference counters in the destination bfq_queue
@@ -6886,7 +6911,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
                /* If the queue was seeky for too long, break it apart. */
                if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) &&
                        !bic->bfqq_data[a_idx].stably_merged) {
-                       struct bfq_queue *old_bfqq = bfqq;
+                       struct bfq_queue *waker_bfqq = bfq_waker_bfqq(bfqq);
 
                        /* Update bic before losing reference to bfqq */
                        if (bfq_bfqq_in_large_burst(bfqq))
@@ -6906,7 +6931,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
                                bfqq_already_existing = true;
 
                        if (!bfqq_already_existing) {
-                               bfqq->waker_bfqq = old_bfqq->waker_bfqq;
+                               bfqq->waker_bfqq = waker_bfqq;
                                bfqq->tentative_waker_bfqq = NULL;
 
                                /*
@@ -6916,7 +6941,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
                                 * woken_list of the waker. See
                                 * bfq_check_waker for details.
                                 */
-                               if (bfqq->waker_bfqq)
+                               if (waker_bfqq)
                                        hlist_add_head(&bfqq->woken_list_node,
                                                       &bfqq->waker_bfqq->woken_list);
                        }