sdp_arm_rx_cq(sk);
}
- if (unlikely((ssk->sa_post_rdma_rd_compl || ssk->sa_post_sendsm) &&
+ if (unlikely((ssk->sa_post_rdma_rd_compl || ssk->sa_post_sendsm) &&
tx_credits(ssk) < SDP_MIN_TX_CREDITS)) {
sdp_dbg_data(sk, "Run out of credits, can't abort SrcAvail. "
"RdmaRdCompl: %d SendSm: %d\n",
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (inet6_sk(sk)) {
struct ipv6_pinfo *newnp;
-
+
newnp = inet_sk(child)->pinet6 = sdp_inet6_sk_generic(child);
memcpy(newnp, inet6_sk(sk), sizeof(struct ipv6_pinfo));
ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
} else if ((h->ipv_cap & HH_IPV_MASK) == HH_IPV6) {
struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst_addr;
- struct sockaddr_in6 *src_addr6 =
+ struct sockaddr_in6 *src_addr6 =
(struct sockaddr_in6 *)&id->route.addr.src_addr;
ipv6_addr_copy(&newnp->daddr, &dst_addr6->sin6_addr);
}
inet_daddr(child) =inet_saddr(child) = inet_rcv_saddr(child) = LOOPBACK4_IPV6;
- } else
+ } else
#endif
{
inet_daddr(child) = dst_addr->sin_addr.s_addr;
hh.bsdh.mid = SDP_MID_HELLO;
hh.bsdh.len = htonl(sizeof(struct sdp_hh));
hh.max_adverts = 1;
-
+
hh.majv_minv = SDP_MAJV_MINV;
sdp_init_buffers(sdp_sk(sk), rcvbuf_initial_size);
hh.bsdh.bufs = htons(rx_ring_posted(sdp_sk(sk)));
}
inet6_sk(sk)->saddr = inet6_sk(sk)->rcv_saddr;
}
- else
+ else
#endif
{
inet_saddr(sk) = inet_rcv_saddr(sk) =
case RDMA_CM_EVENT_ALT_ROUTE_ERROR:
sdp_warn(sk, "alt route resolve error\n");
break;
-
+
case RDMA_CM_EVENT_ROUTE_ERROR:
rc = -ETIMEDOUT;
break;
addr_len = sizeof(*addr6);
}
}
- else
+ else
#endif
{
sk_mem_reclaim(sk);
if (sk->sk_wmem_queued || atomic_read(&sk->sk_rmem_alloc) || sk->sk_forward_alloc) {
- sdp_warn(sk, "wmem_queued: 0x%x rmem_alloc: 0x%x forward: 0x%x proto: 0x%x\n",
- sk->sk_wmem_queued, atomic_read(&sk->sk_rmem_alloc), sk->sk_forward_alloc,
+ sdp_warn(sk, "wmem_queued: 0x%x rmem_alloc: 0x%x forward: 0x%x proto: 0x%x\n",
+ sk->sk_wmem_queued, atomic_read(&sk->sk_rmem_alloc), sk->sk_forward_alloc,
atomic_read(sk->sk_prot->memory_allocated));
}
int rc;
int addr_type;
- if (addr_len < SIN6_LEN_RFC2133)
+ if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (uaddr->sa_family == AF_INET6_SDP)
* connect() to INADDR_ANY means loopback (BSD'ism).
*/
if(ipv6_addr_any(&usin->sin6_addr))
- usin->sin6_addr.s6_addr[15] = 0x1;
+ usin->sin6_addr.s6_addr[15] = 0x1;
addr_type = ipv6_addr_type(&usin->sin6_addr);
}
#endif
-static int sdp_ipv4_connect(struct sock *sk, struct sockaddr_storage *saddr,
+static int sdp_ipv4_connect(struct sock *sk, struct sockaddr_storage *saddr,
struct sockaddr *uaddr, int addr_len)
{
struct sdp_sock *ssk = sdp_sk(sk);
}
break;
case SDP_ZCOPY_THRESH:
- if (val != 0 && (val < SDP_MIN_ZCOPY_THRESH ||
+ if (val != 0 && (val < SDP_MIN_ZCOPY_THRESH ||
val > SDP_MAX_ZCOPY_THRESH))
err = -EINVAL;
else
static inline int cycles_before(cycles_t a, cycles_t b)
{
/* cycles_t is unsigned, but may be int/long/long long. */
-
+
if (sizeof(cycles_t) == 4)
return before(a, b);
else
posts_handler_put(ssk, SDP_RX_ARMING_DELAY);
sdp_auto_moderation(ssk);
-
+
if (!err && !ssk->qp_active) {
err = -EPIPE;
sdp_set_error(sk, err);
int count = min(PAGE_SIZE, SDP_MAX_PAYLOAD) *
(skb_shinfo(skb)->nr_frags - i);
skb->truesize -= count;
-
+
skb_shinfo(skb)->nr_frags = i;
bytes_reused += count;
break;
ssk->tx_packets++;
- if (h->mid != SDP_MID_SRCAVAIL &&
- h->mid != SDP_MID_DATA &&
+ if (h->mid != SDP_MID_SRCAVAIL &&
+ h->mid != SDP_MID_DATA &&
h->mid != SDP_MID_SRCAVAIL_CANCEL) {
struct sock *sk = sk_ssk(ssk);
if (likely(!wc->status) || wc->status == IB_WC_WR_FLUSH_ERR)
return;
- sdp_warn(sk, "Send completion with error. wr_id 0x%llx Status %d\n",
+ sdp_warn(sk, "Send completion with error. wr_id 0x%llx Status %d\n",
wc->wr_id, wc->status);
sdp_set_error(sk, -ECONNRESET);
unsigned len2;
len2 = ib_sg_dma_len(dev,
&chunk->page_list[j]) >> PAGE_SHIFT;
-
+
SDP_WARN_ON(len2 > len);
len -= len2;