#endif
#endif
-#define inet_num(sk) inet_sk(sk)->num
-#define inet_sport(sk) inet_sk(sk)->sport
-#define inet_dport(sk) inet_sk(sk)->dport
-#define inet_saddr(sk) inet_sk(sk)->saddr
-#define sdp_inet_daddr(sk) inet_sk(sk)->daddr
-#define sdp_inet_rcv_saddr(sk) inet_sk(sk)->rcv_saddr
-
-#define sdp_sk_sleep(sk) (sk)->sk_sleep
+#define inet_num(sk) inet_sk(sk)->inet_num
+#define inet_sport(sk) inet_sk(sk)->inet_sport
+#define inet_dport(sk) inet_sk(sk)->inet_dport
+#define inet_saddr(sk) inet_sk(sk)->inet_saddr
+#define sdp_inet_daddr(sk) inet_sk(sk)->inet_daddr
+#define sdp_inet_rcv_saddr(sk) inet_sk(sk)->inet_rcv_saddr
+
+#define sdp_sk_sleep(sk) sk_sleep(sk)
#define sk_ssk(ssk) ((struct sock *)ssk)
/* Interval between sucessive polls in the Tx routine when polling is used
!ssk->nagle_last_unacked ||
skb->next != (struct sk_buff *)&sk_ssk(ssk)->sk_write_queue ||
skb->len + sizeof(struct sdp_bsdh) >= ssk->xmit_size_goal ||
- (SDP_SKB_CB(skb)->flags & TCPCB_FLAG_PSH) ||
- (SDP_SKB_CB(skb)->flags & TCPCB_FLAG_URG);
+ (SDP_SKB_CB(skb)->flags & TCPHDR_PSH) ||
+ (SDP_SKB_CB(skb)->flags & TCPHDR_URG);
if (send_now) {
unsigned long mseq = ring_head(ssk->tx_ring);
if (sk->sk_wmem_queued || atomic_read(&sk->sk_rmem_alloc) || sk->sk_forward_alloc) {
sdp_dbg(sk, "wmem_queued: 0x%x rmem_alloc: 0x%x forward: 0x%x "
- "proto: 0x%x\n", sk->sk_wmem_queued,
+ "proto: 0x%lx\n", sk->sk_wmem_queued,
atomic_read(&sk->sk_rmem_alloc),
sk->sk_forward_alloc,
- atomic_read(sk->sk_prot->memory_allocated));
+ atomic_long_read(sk->sk_prot->memory_allocated));
}
if (ssk->parent)
static void sdp_mark_push(struct sdp_sock *ssk, struct sk_buff *skb)
{
- SDP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+ SDP_SKB_CB(skb)->flags |= TCPHDR_PSH;
sdp_do_posts(ssk);
}
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
- SDP_SKB_CB(skb)->flags |= TCPCB_FLAG_URG;
+ SDP_SKB_CB(skb)->flags |= TCPHDR_URG;
}
}
}
if (!copied)
- SDP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
+ SDP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
ssk->write_seq += copy;
SDP_SKB_CB(skb)->end_seq += copy;
}
static struct percpu_counter *sockets_allocated;
-static atomic_t memory_allocated;
+static atomic_long_t memory_allocated;
static struct percpu_counter *orphan_count;
static int memory_pressure;
struct proto sdp_proto = {
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static int sdp_create_v6_socket(struct net *net, struct socket *sock, int protocol)
+static int sdp_create_v6_socket(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
return sdp_create_ipvx_socket(net, sock, protocol, &sdp_ipv6_proto_ops);
}
#endif
-static int sdp_create_v4_socket(struct net *net, struct socket *sock, int protocol)
+static int sdp_create_v4_socket(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
return sdp_create_ipvx_socket(net, sock, protocol, &sdp_ipv4_proto_ops);
}
BUG_ON(!list_empty(&sock_list));
- if (atomic_read(&memory_allocated))
- sdp_dbg(NULL, "SDP detected memory leak. Memory_allocated: %d\n",
- atomic_read(&memory_allocated));
+ if (atomic_long_read(&memory_allocated))
+ sdp_dbg(NULL, "SDP detected memory leak. Memory_allocated: %ld\n",
+ atomic_long_read(&memory_allocated));
if (percpu_counter_sum(sockets_allocated))
printk(KERN_WARNING "%s: sockets_allocated %lld\n", __func__,
ssk->hst_idx, ARRAY_SIZE(ssk->hst));
seq_printf(seq, "rmem: %d wmem: %d wqueue: %d "
- "fw: %d prot->alloc: %d\n",
+ "fw: %d prot->alloc: %ld\n",
atomic_read(&sk->sk_rmem_alloc),
atomic_read(&sk->sk_wmem_alloc),
sk->sk_wmem_queued,
sk->sk_forward_alloc,
- atomic_read(sk->sk_prot->memory_allocated));
+ atomic_long_read(sk->sk_prot->memory_allocated));
for (i = 0; i < min(ssk->hst_idx, ARRAY_SIZE(ssk->hst)); ++i) {
struct sdp_sock_hist *hst = &ssk->hst[i];
*/
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
+#include <linux/rcupdate.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include "sdp.h"
{
struct sock *sk = sk_ssk(ssk);
struct socket *sock = sk->sk_socket;
+ struct socket_wq *wq;
if (tx_credits(ssk) < ssk->min_bufs || !sock)
return;
clear_bit(SOCK_NOSPACE, &sock->flags);
sdp_prf1(sk, NULL, "Waking up sleepers");
- if (sdp_sk_sleep(sk) && waitqueue_active(sdp_sk_sleep(sk)))
- wake_up_interruptible(sdp_sk_sleep(sk));
- if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible(&wq->wait);
+ if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock, 2, POLL_OUT);
+ rcu_read_unlock();
}
int sdp_poll_rx_cq(struct sdp_sock *ssk)
TX_SRCAVAIL_STATE(skb)->mseq = mseq;
}
- if (unlikely(SDP_SKB_CB(skb)->flags & TCPCB_FLAG_URG))
+ if (unlikely(SDP_SKB_CB(skb)->flags & TCPHDR_URG))
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
tx_wr.num_sge = frags + 1;
tx_wr.opcode = IB_WR_SEND;
tx_wr.send_flags = send_flags;
- if (unlikely(SDP_SKB_CB(skb)->flags & TCPCB_FLAG_URG))
+ if (unlikely(SDP_SKB_CB(skb)->flags & TCPHDR_URG))
tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &tx_wr, &bad_wr);