struct work_struct      disconnect_work;
 
        bool                    negotiation_requested;
+
+       bool                    legacy_iwarp;
+       u8                      initiator_depth;
+       u8                      responder_resources;
 };
 
 #define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))
        t->cm_id = cm_id;
        cm_id->context = t;
 
+       t->initiator_depth = SMB_DIRECT_CM_INITIATOR_DEPTH;
+       t->responder_resources = 1;
+
        t->status = SMB_DIRECT_CS_NEW;
        init_waitqueue_head(&t->wait_status);
 
 static int smb_direct_accept_client(struct smb_direct_transport *t)
 {
        struct rdma_conn_param conn_param;
-       struct ib_port_immutable port_immutable;
-       u32 ird_ord_hdr[2];
+       __be32 ird_ord_hdr[2];
        int ret;
 
+       /*
+        * smb_direct_handle_connect_request()
+        * already negotiated t->initiator_depth
+        * and t->responder_resources
+        */
        memset(&conn_param, 0, sizeof(conn_param));
-       conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom,
-                                          SMB_DIRECT_CM_INITIATOR_DEPTH);
-       conn_param.responder_resources = 0;
-
-       t->cm_id->device->ops.get_port_immutable(t->cm_id->device,
-                                                t->cm_id->port_num,
-                                                &port_immutable);
-       if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
-               ird_ord_hdr[0] = conn_param.responder_resources;
-               ird_ord_hdr[1] = 1;
+       conn_param.initiator_depth = t->initiator_depth;
+       conn_param.responder_resources = t->responder_resources;
+
+       if (t->legacy_iwarp) {
+               ird_ord_hdr[0] = cpu_to_be32(conn_param.responder_resources);
+               ird_ord_hdr[1] = cpu_to_be32(conn_param.initiator_depth);
                conn_param.private_data = ird_ord_hdr;
                conn_param.private_data_len = sizeof(ird_ord_hdr);
        } else {
        return true;
 }
 
-static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
+static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id,
+                                            struct rdma_cm_event *event)
 {
        struct smb_direct_transport *t;
        struct task_struct *handler;
+       u8 peer_initiator_depth;
+       u8 peer_responder_resources;
        int ret;
 
        if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
        if (!t)
                return -ENOMEM;
 
+       peer_initiator_depth = event->param.conn.initiator_depth;
+       peer_responder_resources = event->param.conn.responder_resources;
+       if (rdma_protocol_iwarp(new_cm_id->device, new_cm_id->port_num) &&
+           event->param.conn.private_data_len == 8) {
+               /*
+                * Legacy clients with only iWarp MPA v1 support
+                * need a private blob in order to negotiate
+                * the IRD/ORD values.
+                */
+               const __be32 *ird_ord_hdr = event->param.conn.private_data;
+               u32 ird32 = be32_to_cpu(ird_ord_hdr[0]);
+               u32 ord32 = be32_to_cpu(ird_ord_hdr[1]);
+
+               /*
+                * cifs.ko sends the legacy IRD/ORD negotiation
+                * event if iWarp MPA v2 was used.
+                *
+                * Here we check that the values match and only
+                * mark the client as legacy if they don't match.
+                */
+               if ((u32)event->param.conn.initiator_depth != ird32 ||
+                   (u32)event->param.conn.responder_resources != ord32) {
+                       /*
+                        * There are broken clients (old cifs.ko)
+                        * using little endian and also
+                        * struct rdma_conn_param only uses u8
+                        * for initiator_depth and responder_resources,
+                        * so we truncate the value to U8_MAX.
+                        *
+                        * smb_direct_accept_client() will then
+                        * do the real negotiation in order to
+                        * select the minimum between client and
+                        * server.
+                        */
+                       ird32 = min_t(u32, ird32, U8_MAX);
+                       ord32 = min_t(u32, ord32, U8_MAX);
+
+                       t->legacy_iwarp = true;
+                       peer_initiator_depth = (u8)ird32;
+                       peer_responder_resources = (u8)ord32;
+               }
+       }
+
+       /*
+        * First set what the we as server are able to support
+        */
+       t->initiator_depth = min_t(u8, t->initiator_depth,
+                                  new_cm_id->device->attrs.max_qp_rd_atom);
+
+       /*
+        * negotiate the value by using the minimum
+        * between client and server if the client provided
+        * non 0 values.
+        */
+       if (peer_initiator_depth != 0)
+               t->initiator_depth = min_t(u8, t->initiator_depth,
+                                          peer_initiator_depth);
+       if (peer_responder_resources != 0)
+               t->responder_resources = min_t(u8, t->responder_resources,
+                                              peer_responder_resources);
+
        ret = smb_direct_connect(t);
        if (ret)
                goto out_err;
 {
        switch (event->event) {
        case RDMA_CM_EVENT_CONNECT_REQUEST: {
-               int ret = smb_direct_handle_connect_request(cm_id);
+               int ret = smb_direct_handle_connect_request(cm_id, event);
 
                if (ret) {
                        pr_err("Can't create transport: %d\n", ret);