#define susp_MASK 1
 #define user_isp_MASK 1
 #define aftr_isp_MASK 1
+#define susp_nod_MASK 1
+#define susp_fen_MASK 1
 
 #define NS(T, S) \
        ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \
        return 1;
 }
 
+static inline int is_susp(union drbd_state s)
+{
+       return s.susp || s.susp_nod || s.susp_fen;
+}
+
 static inline int __inc_ap_bio_cond(struct drbd_conf *mdev)
 {
        int mxb = drbd_get_max_buffers(mdev);
 
-       if (mdev->state.susp)
+       if (is_susp(mdev->state))
                return 0;
        if (test_bit(SUSPEND_IO, &mdev->flags))
                return 0;
 
            drbd_role_str(ns.peer),
            drbd_disk_str(ns.disk),
            drbd_disk_str(ns.pdsk),
-           ns.susp ? 's' : 'r',
+           is_susp(ns) ? 's' : 'r',
            ns.aftr_isp ? 'a' : '-',
            ns.peer_isp ? 'p' : '-',
            ns.user_isp ? 'u' : '-'
        if (fp == FP_STONITH &&
            (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
            !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
-               ns.susp = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
+               ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
 
        if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
            (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
            !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
-               ns.susp = 1; /* Suspend IO while no data available (no accessible data available) */
+               ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
 
        if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
                if (ns.conn == C_SYNC_SOURCE)
                PSC(conn);
                PSC(disk);
                PSC(pdsk);
-               PSC(susp);
+               if (is_susp(ns) != is_susp(os))
+                       pbp += sprintf(pbp, "susp( %s -> %s ) ",
+                                      drbd_susp_str(is_susp(os)),
+                                      drbd_susp_str(is_susp(ns)));
                PSC(aftr_isp);
                PSC(peer_isp);
                PSC(user_isp);
 {
        enum drbd_fencing_p fp;
        enum drbd_req_event what = nothing;
+       union drbd_state nsm = (union drbd_state){ .i = -1 };
 
        if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
                clear_bit(CRASHED_PRIMARY, &mdev->flags);
        /* Here we have the actions that are performed after a
           state change. This function might sleep */
 
-       if (os.susp && ns.susp && mdev->sync_conf.on_no_data == OND_SUSPEND_IO) {
+       nsm.i = -1;
+       if (ns.susp_nod) {
                if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
                        if (ns.conn == C_CONNECTED)
-                               what = resend;
+                               what = resend, nsm.susp_nod = 0;
                        else /* ns.conn > C_CONNECTED */
                                dev_err(DEV, "Unexpected Resynd going on!\n");
                }
 
                if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
-                       what = restart_frozen_disk_io;
+                       what = restart_frozen_disk_io, nsm.susp_nod = 0;
+
        }
 
-       if (fp == FP_STONITH && ns.susp) {
+       if (ns.susp_fen) {
                /* case1: The outdate peer handler is successful: */
                if (os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) {
                        tl_clear(mdev);
                                drbd_md_sync(mdev);
                        }
                        spin_lock_irq(&mdev->req_lock);
-                       _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL);
+                       _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
                        spin_unlock_irq(&mdev->req_lock);
                }
                /* case2: The connection was established again: */
                if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
                        clear_bit(NEW_CUR_UUID, &mdev->flags);
                        what = resend;
+                       nsm.susp_fen = 0;
                }
        }
 
        if (what != nothing) {
                spin_lock_irq(&mdev->req_lock);
                _tl_restart(mdev, what);
-               _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL);
+               nsm.i &= mdev->state.i;
+               _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
                spin_unlock_irq(&mdev->req_lock);
        }
 
                if (get_ldev(mdev)) {
                        if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
                            mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
-                               if (mdev->state.susp) {
+                               if (is_susp(mdev->state)) {
                                        set_bit(NEW_CUR_UUID, &mdev->flags);
                                } else {
                                        drbd_uuid_new_current(mdev);
                resume_next_sg(mdev);
 
        /* free tl_hash if we Got thawed and are C_STANDALONE */
-       if (ns.conn == C_STANDALONE && ns.susp == 0 && mdev->tl_hash)
+       if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
                drbd_free_tl_hash(mdev);
 
        /* Upon network connection, we need to start the receiver */
                  .conn = C_STANDALONE,
                  .disk = D_DISKLESS,
                  .pdsk = D_UNKNOWN,
-                 .susp = 0
+                 .susp = 0,
+                 .susp_nod = 0,
+                 .susp_fen = 0
                } };
 }
 
 
                put_ldev(mdev);
        } else {
                dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
-               return mdev->state.pdsk;
+               nps = mdev->state.pdsk;
+               goto out;
        }
 
        r = drbd_khelper(mdev, "fence-peer");
 
        dev_info(DEV, "fence-peer helper returned %d (%s)\n",
                        (r>>8) & 0xff, ex_to_string);
+
+out:
+       if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
+               /* The handler was not successful... unfreeze here, the
+                  state engine can not unfreeze... */
+               _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
+       }
+
        return nps;
 }
 
 void drbd_suspend_io(struct drbd_conf *mdev)
 {
        set_bit(SUSPEND_IO, &mdev->flags);
-       if (mdev->state.susp)
+       if (is_susp(mdev->state))
                return;
        wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
 }
 
        drbd_suspend_io(mdev);
        /* also wait for the last barrier ack. */
-       wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || mdev->state.susp);
+       wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
        /* and for any other previously queued work */
        drbd_flush_workqueue(mdev);
 
                clear_bit(CRASHED_PRIMARY, &mdev->flags);
 
        if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
-           !(mdev->state.role == R_PRIMARY && mdev->state.susp &&
-             mdev->sync_conf.on_no_data == OND_SUSPEND_IO)) {
+           !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
                set_bit(CRASHED_PRIMARY, &mdev->flags);
                cp_discovered = 1;
        }
                drbd_md_sync(mdev);
        }
        drbd_suspend_io(mdev);
-       reply->ret_code = drbd_request_state(mdev, NS(susp, 0));
+       reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
        if (reply->ret_code == SS_SUCCESS) {
                if (mdev->state.conn < C_CONNECTED)
                        tl_clear(mdev);
 
                           drbd_disk_str(mdev->state.pdsk),
                           (mdev->net_conf == NULL ? ' ' :
                            (mdev->net_conf->wire_protocol - DRBD_PROT_A+'A')),
-                          mdev->state.susp ? 's' : 'r',
+                          is_susp(mdev->state) ? 's' : 'r',
                           mdev->state.aftr_isp ? 'a' : '-',
                           mdev->state.peer_isp ? 'p' : '-',
                           mdev->state.user_isp ? 'u' : '-',
 
        if ((nconn == C_CONNECTED || nconn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
                ns.disk = mdev->new_state_tmp.disk;
        cs_flags = CS_VERBOSE + (oconn < C_CONNECTED && nconn >= C_CONNECTED ? 0 : CS_HARD);
-       if (ns.pdsk == D_CONSISTENT && ns.susp && nconn == C_CONNECTED && oconn < C_CONNECTED &&
+       if (ns.pdsk == D_CONSISTENT && is_susp(ns) && nconn == C_CONNECTED && oconn < C_CONNECTED &&
            test_bit(NEW_CUR_UUID, &mdev->flags)) {
                /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
                   for temporal network outages! */
        kfree(mdev->p_uuid);
        mdev->p_uuid = NULL;
 
-       if (!mdev->state.susp)
+       if (!is_susp(mdev->state))
                tl_clear(mdev);
 
        dev_info(DEV, "Connection closed\n");
        if (os.conn == C_DISCONNECTING) {
                wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
 
-               if (!mdev->state.susp) {
+               if (!is_susp(mdev->state)) {
                        /* we must not free the tl_hash
                         * while application io is still on the fly */
                        wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
 
 {
        struct drbd_conf *mdev = req->mdev;
 
-       if (!mdev->state.susp)
+       if (!is_susp(mdev->state))
                _req_may_be_done(req, m);
 }
 
                            (mdev->state.pdsk == D_INCONSISTENT &&
                             mdev->state.conn >= C_CONNECTED));
 
-       if (!(local || remote) && !mdev->state.susp) {
+       if (!(local || remote) && !is_susp(mdev->state)) {
                dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
                goto fail_free_complete;
        }
        /* GOOD, everything prepared, grab the spin_lock */
        spin_lock_irq(&mdev->req_lock);
 
-       if (mdev->state.susp) {
+       if (is_susp(mdev->state)) {
                /* If we got suspended, use the retry mechanism of
                   generic_make_request() to restart processing of this
                   bio. In the next call to drbd_make_request_26
 
                unsigned conn:5 ;   /* 17/32     cstates */
                unsigned disk:4 ;   /* 8/16      from D_DISKLESS to D_UP_TO_DATE */
                unsigned pdsk:4 ;   /* 8/16      from D_DISKLESS to D_UP_TO_DATE */
-               unsigned susp:1 ;   /* 2/2       IO suspended  no/yes */
+               unsigned susp:1 ;   /* 2/2       IO suspended no/yes (by user) */
                unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
                unsigned peer_isp:1 ;
                unsigned user_isp:1 ;
-               unsigned _pad:11;   /* 0         unused */
+               unsigned susp_nod:1 ; /* IO suspended because no data */
+               unsigned susp_fen:1 ; /* IO suspended because fence peer handler runs*/
+               unsigned _pad:9;   /* 0  unused */
 #elif defined(__BIG_ENDIAN_BITFIELD)
-               unsigned _pad:11;   /* 0         unused */
+               unsigned _pad:9;
+               unsigned susp_fen:1 ;
+               unsigned susp_nod:1 ;
                unsigned user_isp:1 ;
                unsigned peer_isp:1 ;
                unsigned aftr_isp:1 ; /* isp .. imposed sync pause */