]> www.infradead.org Git - users/hch/misc.git/commitdiff
ceph: cleanup in ceph_alloc_readdir_reply_buffer()
authorViacheslav Dubeyko <Slava.Dubeyko@ibm.com>
Tue, 2 Sep 2025 19:08:45 +0000 (12:08 -0700)
committerIlya Dryomov <idryomov@gmail.com>
Wed, 8 Oct 2025 21:30:47 +0000 (23:30 +0200)
The Coverity Scan service has reported potential issue
in ceph_alloc_readdir_reply_buffer() [1]. If order could
be negative one, then it expects the issue in the logic:

num_entries = (PAGE_SIZE << order) / size;

Technically speaking, this logic [2] should prevent from
making the order variable negative:

if (!rinfo->dir_entries)
    return -ENOMEM;

However, the allocation logic requires some cleanup.
This patch makes sure that calculated bytes count
will never exceed ULONG_MAX before get_order()
calculation. And it adds the checking of order
variable on negative value to guarantee that second
half of the function's code will never operate by
negative value of order variable even if something
will be wrong or to be changed in the first half of
the function's logic.

v2
Alex Markuze suggested to add unlikely() macro
for introduced condition checks.

[1] https://scan5.scan.coverity.com/#/project-view/64304/10063?selectedIssue=1198252
[2] https://elixir.bootlin.com/linux/v6.17-rc3/source/fs/ceph/mds_client.c#L2553

Signed-off-by: Viacheslav Dubeyko <Slava.Dubeyko@ibm.com>
Reviewed-by: Alex Markuze <amarkuze@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
fs/ceph/mds_client.c

index aa2f74142cf4d59efca135f413dbc4d2ef7fe58f..8104350b155372b5a9de79dba1e8fcde3bf7b36d 100644 (file)
@@ -2533,6 +2533,7 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
        struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
        size_t size = sizeof(struct ceph_mds_reply_dir_entry);
        unsigned int num_entries;
+       u64 bytes_count;
        int order;
 
        spin_lock(&ci->i_ceph_lock);
@@ -2541,7 +2542,11 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
        num_entries = max(num_entries, 1U);
        num_entries = min(num_entries, opt->max_readdir);
 
-       order = get_order(size * num_entries);
+       bytes_count = (u64)size * num_entries;
+       if (unlikely(bytes_count > ULONG_MAX))
+               bytes_count = ULONG_MAX;
+
+       order = get_order((unsigned long)bytes_count);
        while (order >= 0) {
                rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
                                                             __GFP_NOWARN |
@@ -2551,7 +2556,7 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
                        break;
                order--;
        }
-       if (!rinfo->dir_entries)
+       if (!rinfo->dir_entries || unlikely(order < 0))
                return -ENOMEM;
 
        num_entries = (PAGE_SIZE << order) / size;