drbd_free_net_ee(mdev, peer_req);
 
        /* possible callbacks here:
-        * e_end_block, and e_end_resync_block, e_send_discard_ack.
+        * e_end_block, and e_end_resync_block, e_send_discard_write.
         * all ignore the last argument.
         */
        list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
        return ok;
 }
 
+static int w_restart_write(struct drbd_work *w, int cancel)
+{
+       struct drbd_request *req = container_of(w, struct drbd_request, w);
+       struct drbd_conf *mdev = w->mdev;
+       struct bio *bio;
+       unsigned long start_time;
+       unsigned long flags;
+
+       spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+       if (!expect(req->rq_state & RQ_POSTPONED)) {
+               spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+               return 0;
+       }
+       bio = req->master_bio;
+       start_time = req->start_time;
+       /* Postponed requests will not have their master_bio completed!  */
+       __req_mod(req, DISCARD_WRITE, NULL);
+       spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+
+       while (__drbd_make_request(mdev, bio, start_time))
+               /* retry */ ;
+       return 1;
+}
+
+static void restart_conflicting_writes(struct drbd_conf *mdev,
+                                      sector_t sector, int size)
+{
+       struct drbd_interval *i;
+       struct drbd_request *req;
+
+       drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+               if (!i->local)
+                       continue;
+               req = container_of(i, struct drbd_request, i);
+               if (req->rq_state & RQ_LOCAL_PENDING ||
+                   !(req->rq_state & RQ_POSTPONED))
+                       continue;
+               if (expect(list_empty(&req->w.list))) {
+                       req->w.mdev = mdev;
+                       req->w.cb = w_restart_write;
+                       drbd_queue_work(&mdev->tconn->data.work, &req->w);
+               }
+       }
+}
+
 /* e_end_block() is called via drbd_process_done_ee().
  * this means this function only runs in the asender thread
  */
                spin_lock_irq(&mdev->tconn->req_lock);
                D_ASSERT(!drbd_interval_empty(&peer_req->i));
                drbd_remove_epoch_entry_interval(mdev, peer_req);
+               if (peer_req->flags & EE_RESTART_REQUESTS)
+                       restart_conflicting_writes(mdev, sector, peer_req->i.size);
                spin_unlock_irq(&mdev->tconn->req_lock);
        } else
                D_ASSERT(drbd_interval_empty(&peer_req->i));
        return ok;
 }
 
-static int e_send_discard_ack(struct drbd_work *w, int unused)
+static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
 {
+       struct drbd_conf *mdev = w->mdev;
        struct drbd_peer_request *peer_req =
                container_of(w, struct drbd_peer_request, w);
-       struct drbd_conf *mdev = w->mdev;
        int ok;
 
-       D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
-       ok = drbd_send_ack(mdev, P_DISCARD_ACK, peer_req);
+       ok = drbd_send_ack(mdev, ack, peer_req);
        dec_unacked(mdev);
 
        return ok;
 }
 
+static int e_send_discard_write(struct drbd_work *w, int unused)
+{
+       return e_send_ack(w, P_DISCARD_WRITE);
+}
+
+static int e_send_retry_write(struct drbd_work *w, int unused)
+{
+       struct drbd_tconn *tconn = w->mdev->tconn;
+
+       return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
+                            P_RETRY_WRITE : P_DISCARD_WRITE);
+}
+
 static bool seq_greater(u32 a, u32 b)
 {
        /*
        return seq_greater(a, b) ? a : b;
 }
 
+static bool need_peer_seq(struct drbd_conf *mdev)
+{
+       struct drbd_tconn *tconn = mdev->tconn;
+
+       /*
+        * We only need to keep track of the last packet_seq number of our peer
+        * if we are in dual-primary mode and we have the discard flag set; see
+        * handle_write_conflicts().
+        */
+       return tconn->net_conf->two_primaries &&
+              test_bit(DISCARD_CONCURRENT, &tconn->flags);
+}
+
 static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
 {
        unsigned int old_peer_seq;
 
-       spin_lock(&mdev->peer_seq_lock);
-       old_peer_seq = mdev->peer_seq;
-       mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
-       spin_unlock(&mdev->peer_seq_lock);
-       if (old_peer_seq != peer_seq)
-               wake_up(&mdev->seq_wait);
+       if (need_peer_seq(mdev)) {
+               spin_lock(&mdev->peer_seq_lock);
+               old_peer_seq = mdev->peer_seq;
+               mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
+               spin_unlock(&mdev->peer_seq_lock);
+               if (old_peer_seq != peer_seq)
+                       wake_up(&mdev->seq_wait);
+       }
 }
 
 /* Called from receive_Data.
  *
  * returns 0 if we may process the packet,
  * -ERESTARTSYS if we were interrupted (by disconnect signal). */
-static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
+static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
 {
        DEFINE_WAIT(wait);
-       unsigned int p_seq;
        long timeout;
-       int ret = 0;
+       int ret;
+
+       if (!need_peer_seq(mdev))
+               return 0;
+
        spin_lock(&mdev->peer_seq_lock);
        for (;;) {
-               prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
-               if (!seq_greater(packet_seq, mdev->peer_seq + 1))
+               if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
+                       mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
+                       ret = 0;
                        break;
+               }
                if (signal_pending(current)) {
                        ret = -ERESTARTSYS;
                        break;
                }
-               p_seq = mdev->peer_seq;
+               prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
                spin_unlock(&mdev->peer_seq_lock);
                timeout = mdev->tconn->net_conf->ping_timeo*HZ/10;
                timeout = schedule_timeout(timeout);
                spin_lock(&mdev->peer_seq_lock);
-               if (timeout == 0 && p_seq == mdev->peer_seq) {
+               if (!timeout) {
                        ret = -ETIMEDOUT;
                        dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
                        break;
                }
        }
-       finish_wait(&mdev->seq_wait, &wait);
-       if (mdev->peer_seq+1 == packet_seq)
-               mdev->peer_seq++;
        spin_unlock(&mdev->peer_seq_lock);
+       finish_wait(&mdev->seq_wait, &wait);
        return ret;
 }
 
                (dpf & DP_DISCARD ? REQ_DISCARD : 0);
 }
 
+static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
+                                   unsigned int size)
+{
+       struct drbd_interval *i;
+
+    repeat:
+       drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+               struct drbd_request *req;
+               struct bio_and_error m;
+
+               if (!i->local)
+                       continue;
+               req = container_of(i, struct drbd_request, i);
+               if (!(req->rq_state & RQ_POSTPONED))
+                       continue;
+               req->rq_state &= ~RQ_POSTPONED;
+               __req_mod(req, NEG_ACKED, &m);
+               spin_unlock_irq(&mdev->tconn->req_lock);
+               if (m.bio)
+                       complete_master_bio(mdev, &m);
+               spin_lock_irq(&mdev->tconn->req_lock);
+               goto repeat;
+       }
+}
+
+static int handle_write_conflicts(struct drbd_conf *mdev,
+                                 struct drbd_peer_request *peer_req)
+{
+       struct drbd_tconn *tconn = mdev->tconn;
+       bool resolve_conflicts = test_bit(DISCARD_CONCURRENT, &tconn->flags);
+       sector_t sector = peer_req->i.sector;
+       const unsigned int size = peer_req->i.size;
+       struct drbd_interval *i;
+       bool equal;
+       int err;
+
+       /*
+        * Inserting the peer request into the write_requests tree will prevent
+        * new conflicting local requests from being added.
+        */
+       drbd_insert_interval(&mdev->write_requests, &peer_req->i);
+
+    repeat:
+       drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+               if (i == &peer_req->i)
+                       continue;
+
+               if (!i->local) {
+                       /*
+                        * Our peer has sent a conflicting remote request; this
+                        * should not happen in a two-node setup.  Wait for the
+                        * earlier peer request to complete.
+                        */
+                       err = drbd_wait_misc(mdev, i);
+                       if (err)
+                               goto out;
+                       goto repeat;
+               }
+
+               equal = i->sector == sector && i->size == size;
+               if (resolve_conflicts) {
+                       /*
+                        * If the peer request is fully contained within the
+                        * overlapping request, it can be discarded; otherwise,
+                        * it will be retried once all overlapping requests
+                        * have completed.
+                        */
+                       bool discard = i->sector <= sector && i->sector +
+                                      (i->size >> 9) >= sector + (size >> 9);
+
+                       if (!equal)
+                               dev_alert(DEV, "Concurrent writes detected: "
+                                              "local=%llus +%u, remote=%llus +%u, "
+                                              "assuming %s came first\n",
+                                         (unsigned long long)i->sector, i->size,
+                                         (unsigned long long)sector, size,
+                                         discard ? "local" : "remote");
+
+                       inc_unacked(mdev);
+                       peer_req->w.cb = discard ? e_send_discard_write :
+                                                  e_send_retry_write;
+                       list_add_tail(&peer_req->w.list, &mdev->done_ee);
+                       wake_asender(mdev->tconn);
+
+                       err = -ENOENT;
+                       goto out;
+               } else {
+                       struct drbd_request *req =
+                               container_of(i, struct drbd_request, i);
+
+                       if (!equal)
+                               dev_alert(DEV, "Concurrent writes detected: "
+                                              "local=%llus +%u, remote=%llus +%u\n",
+                                         (unsigned long long)i->sector, i->size,
+                                         (unsigned long long)sector, size);
+
+                       if (req->rq_state & RQ_LOCAL_PENDING ||
+                           !(req->rq_state & RQ_POSTPONED)) {
+                               /*
+                                * Wait for the node with the discard flag to
+                                * decide if this request will be discarded or
+                                * retried.  Requests that are discarded will
+                                * disappear from the write_requests tree.
+                                *
+                                * In addition, wait for the conflicting
+                                * request to finish locally before submitting
+                                * the conflicting peer request.
+                                */
+                               err = drbd_wait_misc(mdev, &req->i);
+                               if (err) {
+                                       _conn_request_state(mdev->tconn,
+                                                           NS(conn, C_TIMEOUT),
+                                                           CS_HARD);
+                                       fail_postponed_requests(mdev, sector, size);
+                                       goto out;
+                               }
+                               goto repeat;
+                       }
+                       /*
+                        * Remember to restart the conflicting requests after
+                        * the new peer request has completed.
+                        */
+                       peer_req->flags |= EE_RESTART_REQUESTS;
+               }
+       }
+       err = 0;
+
+    out:
+       if (err)
+               drbd_remove_epoch_entry_interval(mdev, peer_req);
+       return err;
+}
+
 /* mirrored write */
 static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
                        unsigned int data_size)
        sector_t sector;
        struct drbd_peer_request *peer_req;
        struct p_data *p = &mdev->tconn->data.rbuf.data;
+       u32 peer_seq = be32_to_cpu(p->seq_num);
        int rw = WRITE;
        u32 dp_flags;
+       int err;
 
-       if (!get_ldev(mdev)) {
-               spin_lock(&mdev->peer_seq_lock);
-               if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
-                       mdev->peer_seq++;
-               spin_unlock(&mdev->peer_seq_lock);
 
+       if (!get_ldev(mdev)) {
+               err = wait_for_and_update_peer_seq(mdev, peer_seq);
                drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
                atomic_inc(&mdev->current_epoch->epoch_size);
-               return drbd_drain_block(mdev, data_size);
+               return drbd_drain_block(mdev, data_size) && err == 0;
        }
 
        /*
        atomic_inc(&peer_req->epoch->active);
        spin_unlock(&mdev->epoch_lock);
 
-       /* I'm the receiver, I do hold a net_cnt reference. */
-       if (!mdev->tconn->net_conf->two_primaries) {
-               spin_lock_irq(&mdev->tconn->req_lock);
-       } else {
-               /* don't get the req_lock yet,
-                * we may sleep in drbd_wait_peer_seq */
-               const int size = peer_req->i.size;
-               const int discard = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
-               DEFINE_WAIT(wait);
-               int first;
-
-               D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
-
-               /* conflict detection and handling:
-                * 1. wait on the sequence number,
-                *    in case this data packet overtook ACK packets.
-                * 2. check for conflicting write requests.
-                *
-                * Note: for two_primaries, we are protocol C,
-                * so there cannot be any request that is DONE
-                * but still on the transfer log.
-                *
-                * if no conflicting request is found:
-                *    submit.
-                *
-                * if any conflicting request is found
-                * that has not yet been acked,
-                * AND I have the "discard concurrent writes" flag:
-                *       queue (via done_ee) the P_DISCARD_ACK; OUT.
-                *
-                * if any conflicting request is found:
-                *       block the receiver, waiting on misc_wait
-                *       until no more conflicting requests are there,
-                *       or we get interrupted (disconnect).
-                *
-                *       we do not just write after local io completion of those
-                *       requests, but only after req is done completely, i.e.
-                *       we wait for the P_DISCARD_ACK to arrive!
-                *
-                *       then proceed normally, i.e. submit.
-                */
-               if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
+       if (mdev->tconn->net_conf->two_primaries) {
+               err = wait_for_and_update_peer_seq(mdev, peer_seq);
+               if (err)
                        goto out_interrupted;
-
                spin_lock_irq(&mdev->tconn->req_lock);
-
-               /*
-                * Inserting the peer request into the write_requests tree will
-                * prevent new conflicting local requests from being added.
-                */
-               drbd_insert_interval(&mdev->write_requests, &peer_req->i);
-
-               first = 1;
-               for (;;) {
-                       struct drbd_interval *i;
-                       int have_unacked = 0;
-                       int have_conflict = 0;
-                       prepare_to_wait(&mdev->misc_wait, &wait,
-                               TASK_INTERRUPTIBLE);
-
-                       drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
-                               struct drbd_request *req2;
-
-                               if (i == &peer_req->i || !i->local)
-                                       continue;
-
-                               /* only ALERT on first iteration,
-                                * we may be woken up early... */
-                               if (first)
-                                       dev_alert(DEV, "%s[%u] Concurrent local write detected!"
-                                             " new: %llus +%u; pending: %llus +%u\n",
-                                             current->comm, current->pid,
-                                             (unsigned long long)sector, size,
-                                             (unsigned long long)i->sector, i->size);
-
-                               req2 = container_of(i, struct drbd_request, i);
-                               if (req2->rq_state & RQ_NET_PENDING)
-                                       ++have_unacked;
-                               ++have_conflict;
-                               break;
-                       }
-                       if (!have_conflict)
-                               break;
-
-                       /* Discard Ack only for the _first_ iteration */
-                       if (first && discard && have_unacked) {
-                               dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
-                                    (unsigned long long)sector);
-                               drbd_remove_epoch_entry_interval(mdev, peer_req);
-                               inc_unacked(mdev);
-                               peer_req->w.cb = e_send_discard_ack;
-                               list_add_tail(&peer_req->w.list, &mdev->done_ee);
-
-                               spin_unlock_irq(&mdev->tconn->req_lock);
-
-                               /* we could probably send that P_DISCARD_ACK ourselves,
-                                * but I don't like the receiver using the msock */
-
+               err = handle_write_conflicts(mdev, peer_req);
+               if (err) {
+                       spin_unlock_irq(&mdev->tconn->req_lock);
+                       if (err == -ENOENT) {
                                put_ldev(mdev);
-                               wake_asender(mdev->tconn);
-                               finish_wait(&mdev->misc_wait, &wait);
                                return true;
                        }
-
-                       if (signal_pending(current)) {
-                               drbd_remove_epoch_entry_interval(mdev, peer_req);
-                               spin_unlock_irq(&mdev->tconn->req_lock);
-                               finish_wait(&mdev->misc_wait, &wait);
-                               goto out_interrupted;
-                       }
-
-                       /* Indicate to wake up mdev->misc_wait upon completion.  */
-                       i->waiting = true;
-
-                       spin_unlock_irq(&mdev->tconn->req_lock);
-                       if (first) {
-                               first = 0;
-                               dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
-                                    "sec=%llus\n", (unsigned long long)sector);
-                       } else if (discard) {
-                               /* we had none on the first iteration.
-                                * there must be none now. */
-                               D_ASSERT(have_unacked == 0);
-                       }
-                       /* FIXME: Introduce a timeout here after which we disconnect.  */
-                       schedule();
-                       spin_lock_irq(&mdev->tconn->req_lock);
+                       goto out_interrupted;
                }
-               finish_wait(&mdev->misc_wait, &wait);
-       }
-
+       } else
+               spin_lock_irq(&mdev->tconn->req_lock);
        list_add(&peer_req->w.list, &mdev->active_ee);
        spin_unlock_irq(&mdev->tconn->req_lock);
 
                D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B);
                what = RECV_ACKED_BY_PEER;
                break;
-       case P_DISCARD_ACK:
+       case P_DISCARD_WRITE:
                D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
-               what = CONFLICT_DISCARDED_BY_PEER;
+               what = DISCARD_WRITE;
+               break;
+       case P_RETRY_WRITE:
+               D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
+               what = POSTPONE_WRITE;
                break;
        default:
                D_ASSERT(0);
        sector_t sector = be64_to_cpu(p->sector);
 
        update_peer_seq(mdev, be32_to_cpu(p->seq_num));
+
        dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
            (unsigned long long)sector, be32_to_cpu(p->blksize));
 
        [P_RECV_ACK]        = { sizeof(struct p_block_ack), got_BlockAck },
        [P_WRITE_ACK]       = { sizeof(struct p_block_ack), got_BlockAck },
        [P_RS_WRITE_ACK]    = { sizeof(struct p_block_ack), got_BlockAck },
-       [P_DISCARD_ACK]     = { sizeof(struct p_block_ack), got_BlockAck },
+       [P_DISCARD_WRITE]   = { sizeof(struct p_block_ack), got_BlockAck },
        [P_NEG_ACK]         = { sizeof(struct p_block_ack), got_NegAck },
        [P_NEG_DREPLY]      = { sizeof(struct p_block_ack), got_NegDReply },
        [P_NEG_RS_DREPLY]   = { sizeof(struct p_block_ack), got_NegRSDReply},
        [P_DELAY_PROBE]     = { sizeof(struct p_delay_probe93), got_skip },
        [P_RS_CANCEL]       = { sizeof(struct p_block_ack), got_NegRSDReply},
        [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_RqSReply },
+       [P_RETRY_WRITE]     = { sizeof(struct p_block_ack), got_BlockAck },
        [P_MAX_CMD]         = { 0, NULL },
        };
        if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
 
         *      the receiver,
         *      the bio_endio completion callbacks.
         */
+       if (s & RQ_LOCAL_PENDING)
+               return;
+       if (req->i.waiting) {
+               /* Retry all conflicting peer requests.  */
+               wake_up(&mdev->misc_wait);
+       }
        if (s & RQ_NET_QUEUED)
                return;
        if (s & RQ_NET_PENDING)
                return;
-       if (s & RQ_LOCAL_PENDING)
-               return;
 
        if (req->master_bio) {
                /* this is DATA_RECEIVED (remote read)
                        else
                                root = &mdev->read_requests;
                        drbd_remove_request_interval(root, req);
-               } else
+               } else if (!(s & RQ_POSTPONED))
                        D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
 
                /* for writes we need to do some extra housekeeping */
                /* Update disk stats */
                _drbd_end_io_acct(mdev, req);
 
-               m->error = ok ? 0 : (error ?: -EIO);
-               m->bio = req->master_bio;
+               if (!(s & RQ_POSTPONED)) {
+                       m->error = ok ? 0 : (error ?: -EIO);
+                       m->bio = req->master_bio;
+               }
                req->master_bio = NULL;
        }
 
 {
        struct drbd_conf *mdev = req->w.mdev;
        int rv = 0;
-       m->bio = NULL;
+
+       if (m)
+               m->bio = NULL;
 
        switch (what) {
        default:
                */
 
        case TO_BE_SENT: /* via network */
-               /* reached via drbd_make_request_common
+               /* reached via __drbd_make_request
                 * and from w_read_retry_remote */
                D_ASSERT(!(req->rq_state & RQ_NET_MASK));
                req->rq_state |= RQ_NET_PENDING;
                break;
 
        case TO_BE_SUBMITTED: /* locally */
-               /* reached via drbd_make_request_common */
+               /* reached via __drbd_make_request */
                D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK));
                req->rq_state |= RQ_LOCAL_PENDING;
                break;
                 * no local disk,
                 * or target area marked as invalid,
                 * or just got an io-error. */
-               /* from drbd_make_request_common
+               /* from __drbd_make_request
                 * or from bio_endio during read io-error recovery */
 
                /* so we can verify the handle in the answer packet
 
        case QUEUE_FOR_NET_WRITE:
                /* assert something? */
-               /* from drbd_make_request_common only */
+               /* from __drbd_make_request only */
 
                /* corresponding hlist_del is in _req_may_be_done() */
                drbd_insert_interval(&mdev->write_requests, &req->i);
                 *
                 * _req_add_to_epoch(req); this has to be after the
                 * _maybe_start_new_epoch(req); which happened in
-                * drbd_make_request_common, because we now may set the bit
+                * __drbd_make_request, because we now may set the bit
                 * again ourselves to close the current epoch.
                 *
                 * Add req to the (now) current epoch (barrier). */
                 * hurting performance. */
                set_bit(UNPLUG_REMOTE, &mdev->flags);
 
-               /* see drbd_make_request_common,
+               /* see __drbd_make_request,
                 * just after it grabs the req_lock */
                D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
 
 
        case WRITE_ACKED_BY_PEER_AND_SIS:
                req->rq_state |= RQ_NET_SIS;
-       case CONFLICT_DISCARDED_BY_PEER:
+       case DISCARD_WRITE:
                /* for discarded conflicting writes of multiple primaries,
                 * there is no need to keep anything in the tl, potential
                 * node crashes are covered by the activity log. */
-               if (what == CONFLICT_DISCARDED_BY_PEER)
-                       dev_alert(DEV, "Got DiscardAck packet %llus +%u!"
-                             " DRBD is not a random data generator!\n",
-                             (unsigned long long)req->i.sector, req->i.size);
                req->rq_state |= RQ_NET_DONE;
                /* fall through */
        case WRITE_ACKED_BY_PEER:
                _req_may_be_done_not_susp(req, m);
                break;
 
+       case POSTPONE_WRITE:
+               /*
+                * If this node has already detected the write conflict, the
+                * worker will be waiting on misc_wait.  Wake it up once this
+                * request has completed locally.
+                */
+               D_ASSERT(req->rq_state & RQ_NET_PENDING);
+               req->rq_state |= RQ_POSTPONED;
+               _req_may_be_done_not_susp(req, m);
+               break;
+
        case NEG_ACKED:
                /* assert something? */
                if (req->rq_state & RQ_NET_PENDING) {
                                       sector_t sector, int size)
 {
        for(;;) {
-               DEFINE_WAIT(wait);
                struct drbd_interval *i;
+               int err;
 
                i = drbd_find_overlap(&mdev->write_requests, sector, size);
                if (!i)
                        return 0;
-               i->waiting = true;
-               prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
-               spin_unlock_irq(&mdev->tconn->req_lock);
-               schedule();
-               finish_wait(&mdev->misc_wait, &wait);
-               spin_lock_irq(&mdev->tconn->req_lock);
-               if (signal_pending(current))
-                       return -ERESTARTSYS;
+               err = drbd_wait_misc(mdev, i);
+               if (err)
+                       return err;
        }
 }
 
-static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
 {
        const int rw = bio_rw(bio);
        const int size = bio->bi_size;
        if (rw == WRITE) {
                err = complete_conflicting_writes(mdev, sector, size);
                if (err) {
+                       if (err != -ERESTARTSYS)
+                               _conn_request_state(mdev->tconn,
+                                                   NS(conn, C_TIMEOUT),
+                                                   CS_HARD);
                        spin_unlock_irq(&mdev->tconn->req_lock);
+                       err = -EIO;
                        goto fail_free_complete;
                }
        }
 
        if (likely(s_enr == e_enr)) {
                inc_ap_bio(mdev, 1);
-               return drbd_make_request_common(mdev, bio, start_time);
+               return __drbd_make_request(mdev, bio, start_time);
        }
 
        /* can this bio be split generically?
 
                D_ASSERT(e_enr == s_enr + 1);
 
-               while (drbd_make_request_common(mdev, &bp->bio1, start_time))
+               while (__drbd_make_request(mdev, &bp->bio1, start_time))
                        inc_ap_bio(mdev, 1);
 
-               while (drbd_make_request_common(mdev, &bp->bio2, start_time))
+               while (__drbd_make_request(mdev, &bp->bio2, start_time))
                        inc_ap_bio(mdev, 1);
 
                dec_ap_bio(mdev);