We want to avoid bio_split for bios crossing activity log boundaries.
So we may need to activate two activity log extents "atomically".
drbd_al_begin_io() needs to know more than just the start sector.
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
        return al_ext;
 }
 
-void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
+void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i)
 {
-       unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
+       unsigned int enr = (i->sector >> (AL_EXTENT_SHIFT-9));
        struct lc_element *al_ext;
        struct update_al_work al_work;
 
        }
 }
 
-void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector)
+void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
 {
-       unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
+       unsigned int enr = (i->sector >> (AL_EXTENT_SHIFT-9));
        struct lc_element *extent;
        unsigned long flags;
 
 
 extern const char *drbd_role_str(enum drbd_role s);
 
 /* drbd_actlog.c */
-extern void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector);
-extern void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector);
+extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i);
+extern void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i);
 extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector);
 extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
 extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
 
                drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
                peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
                peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
-               drbd_al_begin_io(mdev, peer_req->i.sector);
+               drbd_al_begin_io(mdev, &peer_req->i);
        }
 
        err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
        drbd_remove_epoch_entry_interval(mdev, peer_req);
        spin_unlock_irq(&mdev->tconn->req_lock);
        if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
-               drbd_al_complete_io(mdev, peer_req->i.sector);
+               drbd_al_complete_io(mdev, &peer_req->i);
 
 out_interrupted:
        drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + EV_CLEANUP);
 
                if (s & RQ_LOCAL_MASK) {
                        if (get_ldev_if_state(mdev, D_FAILED)) {
                                if (s & RQ_IN_ACT_LOG)
-                                       drbd_al_complete_io(mdev, req->i.sector);
+                                       drbd_al_complete_io(mdev, &req->i);
                                put_ldev(mdev);
                        } else if (__ratelimit(&drbd_ratelimit_state)) {
-                               dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu), "
-                                    "but my Disk seems to have failed :(\n",
-                                    (unsigned long long) req->i.sector);
+                               dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), "
+                                        "but my Disk seems to have failed :(\n",
+                                        (unsigned long long) req->i.sector, req->i.size);
                        }
                }
        }
         * of transactional on-disk meta data updates. */
        if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) {
                req->rq_state |= RQ_IN_ACT_LOG;
-               drbd_al_begin_io(mdev, sector);
+               drbd_al_begin_io(mdev, &req->i);
        }
 
        remote = remote && drbd_should_do_remote(mdev->state);
 
 fail_free_complete:
        if (req->rq_state & RQ_IN_ACT_LOG)
-               drbd_al_complete_io(mdev, sector);
+               drbd_al_complete_io(mdev, &req->i);
 fail_and_free_req:
        if (local) {
                bio_put(req->private_bio);
 
 {
        unsigned long flags = 0;
        struct drbd_conf *mdev = peer_req->w.mdev;
-       sector_t e_sector;
+       struct drbd_interval i;
        int do_wake;
        u64 block_id;
        int do_al_complete_io;
         * we may no longer access it,
         * it may be freed/reused already!
         * (as soon as we release the req_lock) */
-       e_sector = peer_req->i.sector;
+       i = peer_req->i;
        do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
        block_id = peer_req->block_id;
 
        spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
 
        if (block_id == ID_SYNCER)
-               drbd_rs_complete_io(mdev, e_sector);
+               drbd_rs_complete_io(mdev, i.sector);
 
        if (do_wake)
                wake_up(&mdev->ee_wait);
 
        if (do_al_complete_io)
-               drbd_al_complete_io(mdev, e_sector);
+               drbd_al_complete_io(mdev, &i);
 
        wake_asender(mdev->tconn);
        put_ldev(mdev);
        struct drbd_conf *mdev = w->mdev;
 
        if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
-               drbd_al_begin_io(mdev, req->i.sector);
+               drbd_al_begin_io(mdev, &req->i);
        /* Calling drbd_al_begin_io() out of the worker might deadlocks
           theoretically. Practically it can not deadlock, since this is
           only used when unfreezing IOs. All the extents of the requests