struct smc_link_group *lgr = link->lgr;
        int i, rc = 0;
 
+       rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
+       if (rc)
+               return rc;
+       /* protect against parallel smc_llc_cli_rkey_exchange() and
+        * parallel smcr_link_reg_rmb()
+        */
+       mutex_lock(&lgr->llc_conf_mutex);
        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
                if (lgr->lnk[i].state != SMC_LNK_ACTIVE)
                        continue;
        }
        rmb_desc->is_conf_rkey = true;
 out:
+       mutex_unlock(&lgr->llc_conf_mutex);
+       smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
        return rc;
 }
 
 
 static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
                           struct smc_link_group *lgr)
 {
+       int rc;
+
        if (rmb_desc->is_conf_rkey && !list_empty(&lgr->list)) {
                /* unregister rmb with peer */
-               smc_llc_do_delete_rkey(lgr, rmb_desc);
-               rmb_desc->is_conf_rkey = false;
+               rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
+               if (!rc) {
+                       /* protect against smc_llc_cli_rkey_exchange() */
+                       mutex_lock(&lgr->llc_conf_mutex);
+                       smc_llc_do_delete_rkey(lgr, rmb_desc);
+                       rmb_desc->is_conf_rkey = false;
+                       mutex_unlock(&lgr->llc_conf_mutex);
+                       smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
+               }
        }
+
        if (rmb_desc->is_reg_err) {
                /* buf registration failed, reuse not possible */
                mutex_lock(&lgr->rmbs_lock);
        }
 }
 
+/* must be called under lgr->llc_conf_mutex lock */
 void smcr_link_clear(struct smc_link *lnk)
 {
        struct smc_ib_device *smcibdev;
        return rc;
 }
 
-/* register a new rmb on IB device */
+/* register a new rmb on IB device,
+ * must be called under lgr->llc_conf_mutex lock
+ */
 int smcr_link_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc)
 {
        if (list_empty(&link->lgr->list))
        return 0;
 }
 
-/* register all used buffers of lgr for a new link */
+/* register all used buffers of lgr for a new link,
+ * must be called under lgr->llc_conf_mutex lock
+ */
 int smcr_buf_reg_lgr(struct smc_link *lnk)
 {
        struct smc_link_group *lgr = lnk->lgr;
 {
        int i, rc = 0;
 
+       /* protect against parallel link reconfiguration */
+       mutex_lock(&lgr->llc_conf_mutex);
        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
                struct smc_link *lnk = &lgr->lnk[i];
 
                }
        }
 out:
+       mutex_unlock(&lgr->llc_conf_mutex);
        return rc;
 }
 
 
        spin_lock_init(&lgr->llc_event_q_lock);
        spin_lock_init(&lgr->llc_flow_lock);
        init_waitqueue_head(&lgr->llc_waiter);
+       mutex_init(&lgr->llc_conf_mutex);
        lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time;
 }
 
        struct smc_llc_qentry *qentry = NULL;
        int rc = 0;
 
-       rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
-       if (rc)
-               return rc;
        rc = smc_llc_send_confirm_rkey(send_link, rmb_desc);
        if (rc)
                goto out;
 out:
        if (qentry)
                smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
-       smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
        return rc;
 }
 
        if (!send_link)
                return -ENOLINK;
 
-       rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
-       if (rc)
-               return rc;
        /* protected by llc_flow control */
        rc = smc_llc_send_delete_rkey(send_link, rmb_desc);
        if (rc)
 out:
        if (qentry)
                smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
-       smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
        return rc;
 }