ntohl(req_size->size));
break;
case SDP_MID_DATA:
- len += snprintf(buf + len, 255-len, "data_len: 0x%lx |",
+ len += snprintf(buf + len, 255-len, "data_len: 0x%zx |",
ntohl(h->len) - sizeof(struct sdp_bsdh));
break;
case SDP_MID_RDMARDCOMPL:
case SDP_MID_SRCAVAIL:
srcah = (struct sdp_srcah *)(h+1);
- len += snprintf(buf + len, 255-len, " | payload: 0x%lx, "
- "len: 0x%x, rkey: 0x%x, vaddr: 0x%llx |",
+ len += snprintf(buf + len, 255-len, " | payload: 0x%zx, "
+ "len: 0x%zx, rkey: 0x%x, vaddr: 0x%llx |",
ntohl(h->len) - sizeof(struct sdp_bsdh) -
sizeof(struct sdp_srcah),
ntohl(srcah->len), ntohl(srcah->rkey),
SDPSTATS_COUNTER_INC(sendmsg);
lock_sock(sk);
- sdp_dbg_data(sk, "%s size = 0x%lx\n", __func__, size);
+ sdp_dbg_data(sk, "%s size = 0x%zx\n", __func__, size);
posts_handler_get(ssk);
int seglen = iov->iov_len;
char __user *from = iov->iov_base;
- sdp_dbg_data(sk, "Sending iov: %d/%ld %p\n", i, msg->msg_iovlen, from);
+ sdp_dbg_data(sk, "Sending iov: 0x%x/0x%zx %p\n", i, msg->msg_iovlen, from);
SDPSTATS_HIST(sendmsg_seglen, seglen);
copy = size_goal;
sdp_dbg_data(sk, "created new skb: %p"
- " len = 0x%lx, sk_send_head: %p "
+ " len = 0x%zx, sk_send_head: %p "
"copy: 0x%x size_goal: 0x%x\n",
skb, skb->len - sizeof(struct sdp_bsdh),
sk->sk_send_head, copy, size_goal);
} else {
sdp_dbg_data(sk, "adding to existing skb: %p"
- " len = 0x%lx, sk_send_head: %p "
+ " len = 0x%zx, sk_send_head: %p "
"copy: 0x%x\n",
skb, skb->len - sizeof(struct sdp_bsdh),
sk->sk_send_head, copy);
unsigned long max_lockable_bytes;
if (unlikely(len > SDP_MAX_RDMA_READ_LEN)) {
- sdp_dbg_data(sk, "len:0x%lx > FMR_SIZE: 0x%lx\n",
+ sdp_dbg_data(sk, "len:0x%zx > FMR_SIZE: 0x%lx\n",
len, SDP_MAX_RDMA_READ_LEN);
len = SDP_MAX_RDMA_READ_LEN;
}
max_lockable_bytes = sdp_get_max_memlockable_bytes((unsigned long)uaddr & ~PAGE_MASK);
if (unlikely(len > max_lockable_bytes)) {
- sdp_dbg_data(sk, "len:0x%lx > RLIMIT_MEMLOCK available: 0x%lx\n",
+ sdp_dbg_data(sk, "len:0x%zx > RLIMIT_MEMLOCK available: 0x%lx\n",
len, max_lockable_bytes);
len = max_lockable_bytes;
}
- sdp_dbg_data(sk, "user buf: %p, len:0x%lx max_lockable_bytes: 0x%lx\n",
+ sdp_dbg_data(sk, "user buf: %p, len:0x%zx max_lockable_bytes: 0x%lx\n",
uaddr, len, max_lockable_bytes);
umem = ib_umem_get(&sdp_sk(sk)->context, (unsigned long)uaddr, len,
if (IS_ERR(umem)) {
rc = PTR_ERR(umem);
- sdp_warn(sk, "Error doing umem_get 0x%lx bytes: %d\n", len, rc);
+ sdp_warn(sk, "Error doing umem_get 0x%zx bytes: %d\n", len, rc);
sdp_warn(sk, "RLIMIT_MEMLOCK: 0x%lx[cur] 0x%lx[max] CAP_IPC_LOCK: %d\n",
current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur,
current->signal->rlim[RLIMIT_MEMLOCK].rlim_max,
goto err_umem_get;
}
- sdp_dbg_data(sk, "umem->offset = 0x%x, length = 0x%lx\n",
+ sdp_dbg_data(sk, "umem->offset = 0x%x, length = 0x%zx\n",
umem->offset, umem->length);
pages = (u64 *) __get_free_page(GFP_KERNEL);
++iov;
sdp_dbg_data(&ssk->isk.sk, "preparing RDMA read."
- " len: 0x%x. buffer len: 0x%lx\n", len, iov->iov_len);
+ " len: 0x%x. buffer len: 0x%zx\n", len, iov->iov_len);
sock_hold(sk, SOCK_REF_RDMA_RD);
size_t bytes_to_copy = 0;
int copied = 0;
- sdp_dbg_data(sk, "Sending iov: %p, iov_len: 0x%lx\n",
+ sdp_dbg_data(sk, "Sending iov: %p, iov_len: 0x%zx\n",
iov->iov_base, iov->iov_len);
sdp_prf1(sk, NULL, "sdp_sendmsg_zcopy start");
if (ssk->rx_sa) {
rc = do_sdp_sendmsg_zcopy(sk, tx_sa, iov, &timeo);
if (iov->iov_len && iov->iov_len < sdp_zcopy_thresh) {
- sdp_dbg_data(sk, "0x%lx bytes left, switching to bcopy\n",
+ sdp_dbg_data(sk, "0x%zx bytes left, switching to bcopy\n",
iov->iov_len);
break;
}