spin_lock(&tree->lock);
                state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
-               if (state && state->start == start) {
+               if (likely(state && state->start == start)) {
                        /*
                         * take a reference on the state, unlock will drop
                         * the ref
                spin_unlock(&tree->lock);
 
                mirror = io_bio->mirror_num;
-               if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
+               if (likely(uptodate && tree->ops &&
+                          tree->ops->readpage_end_io_hook)) {
                        ret = tree->ops->readpage_end_io_hook(page, start, end,
                                                              state, mirror);
                        if (ret)
                                clean_io_failure(start, page);
                }
 
-               if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
+               if (likely(uptodate))
+                       goto readpage_ok;
+
+               if (tree->ops && tree->ops->readpage_io_failed_hook) {
                        ret = tree->ops->readpage_io_failed_hook(page, mirror);
                        if (!ret && !err &&
                            test_bit(BIO_UPTODATE, &bio->bi_flags))
                                uptodate = 1;
-               } else if (!uptodate) {
+               } else {
                        /*
                         * The generic bio_readpage_error handles errors the
                         * following way: If possible, new read requests are
                                continue;
                        }
                }
-
+readpage_ok:
                if (uptodate && tree->track_uptodate) {
                        set_extent_uptodate(tree, start, end, &cached,
                                            GFP_ATOMIC);