wake_up_all(&conn->c_waitq);
}
+static int rds_match_uuid(struct rds_connection *conn, struct rds_message *rm)
+{
+ int ret = 0;
+
+ if (!conn->c_acl_en || !rm->uuid.enable) {
+ rdsdebug("uuid is not enabled acl_en=%d uuid_en=%d val=%s\n",
+ conn->c_acl_en, rm->uuid.enable, rm->uuid.value);
+ return 0;
+ }
+
+ ret = memcmp(conn->c_uuid, rm->uuid.value, sizeof(conn->c_uuid));
+
+ if (!ret && rm->m_rs)
+ rm->m_rs->rs_uuid_sent_cnt++;
+
+ if (ret && rds_sysctl_uuid_tx_no_drop)
+ return 0;
+
+ return ret;
+}
+
/*
* We're making the concious trade-off here to only send one message
* down the connection at a time.
}
rm->data.op_active = 1;
+ if (conn->c_acl_en) {
+ memcpy(rm->uuid.value, conn->c_uuid,
+ RDS_UUID_MAXLEN);
+ rm->uuid.enable = 1;
+ }
+
conn->c_xmit_rm = rm;
}
&rm->m_flags))) {
spin_lock_irqsave(&conn->c_lock, flags);
if (test_and_clear_bit(RDS_MSG_ON_CONN,
- &rm->m_flags))
+ &rm->m_flags)) {
+ rm->m_status = RDS_RDMA_SEND_DROPPED;
list_move_tail(&rm->m_conn_item,
&to_be_dropped);
+ }
spin_unlock_irqrestore(&conn->c_lock, flags);
continue;
}
conn->c_xmit_rm = rm;
}
+ /* If fail uuid match, drop the message */
+ if (rds_match_uuid(conn, rm)) {
+ spin_lock_irqsave(&conn->c_lock, flags);
+ if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
+ rm->m_status = RDS_RDMA_SEND_ACCVIO;
+ list_move_tail(&rm->m_conn_item,
+ &to_be_dropped);
+ }
+ spin_unlock_irqrestore(&conn->c_lock, flags);
+ conn->c_xmit_rm = 0;
+ rm->m_rs->rs_uuid_drop_cnt++;
+ continue;
+ }
+
/* The transport either sends the whole rdma or none of it */
if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
rm->m_final_op = &rm->rdma;
rds_message_unmapped(rm);
rds_message_put(rm);
}
- rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_SEND_DROPPED);
+ rds_send_remove_from_sock(&to_be_dropped, rm->m_status);
}
/*
size += sizeof(struct scatterlist);
break;
+ case RDS_CMSG_UUID:
+ cmsg_groups |= 1;
+ size += sizeof(struct scatterlist);
+ break;
+
default:
return -EINVAL;
}
return 0;
}
+static int rds_cmsg_uuid(struct rds_sock *rs, struct rds_message *rm,
+ struct cmsghdr *cmsg)
+{
+ struct rds_uuid_args *args = CMSG_DATA(cmsg);
+
+ /* Copy uuid to rm */
+ memcpy(rm->uuid.value, args->uuid, RDS_UUID_MAXLEN);
+
+ return 0;
+}
+
static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
struct msghdr *msg, int *allocated_mr)
{
ret = rds_cmsg_asend(rs, rm, cmsg);
break;
+ case RDS_CMSG_UUID:
+ ret = rds_cmsg_uuid(rs, rm, cmsg);
+ break;
+
default:
return -EINVAL;
}
rm->data.op_active = 1;
rm->m_daddr = daddr;
+ rm->uuid.enable = rs->rs_uuid_en;
/* For RDMA operation(s), add up rmda bytes to payload to make
* sure its within system QoS threshold limits.
goto out;
}
+ if (conn->c_acl_init && rds_match_uuid(conn, rm)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
/* Not accepting new sends until all the failed ops have been reaped */
if (rds_async_send_enabled && conn->c_pending_flush) {
ret = -EAGAIN;
conn->c_next_tx_seq++;
spin_unlock_irqrestore(&conn->c_lock, flags);
+ if (conn->c_acl_en) {
+ memcpy(rm->uuid.value, conn->c_uuid, RDS_UUID_MAXLEN);
+ rm->uuid.enable = 1;
+ }
+
rds_stats_inc(s_send_queued);
rds_stats_inc(s_send_pong);
conn->c_next_tx_seq++;
spin_unlock_irqrestore(&conn->c_lock, flags);
+ if (conn->c_acl_en) {
+ memcpy(rm->uuid.value, conn->c_uuid, RDS_UUID_MAXLEN);
+ rm->uuid.enable = 1;
+ }
+
ret = rds_send_xmit(conn);
if (ret == -ENOMEM || ret == -EAGAIN)
queue_delayed_work(rds_wq, &conn->c_send_w, 1);