dma_info[0].len = skb_headlen(skb);
- wqe_desc->hi_addr = upper_32_bits(dma_info[0].dma);
- wqe_desc->lo_addr = lower_32_bits(dma_info[0].dma);
+ wqe_desc->hi_addr = cpu_to_le32(upper_32_bits(dma_info[0].dma));
+ wqe_desc->lo_addr = cpu_to_le32(lower_32_bits(dma_info[0].dma));
- wqe_desc->ctrl_len = dma_info[0].len;
+ wqe_desc->ctrl_len = cpu_to_le32(dma_info[0].len);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &(skb_shinfo(skb)->frags[i]);
union hinic3_ip ip;
u8 l4_proto;
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG);
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ TUNNEL_FLAG));
ip.hdr = skb_network_header(skb);
if (ip.v4->version == 4) {
}
}
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, INNER_L4_EN));
return 1;
}
}
}
-static void hinic3_set_tso_info(struct hinic3_sq_task *task, u32 *queue_info,
+static void hinic3_set_tso_info(struct hinic3_sq_task *task, __le32 *queue_info,
enum hinic3_l4_offload_type l4_offload,
u32 offset, u32 mss)
{
if (l4_offload == HINIC3_L4_OFFLOAD_TCP) {
- *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, TSO);
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, TSO));
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ INNER_L4_EN));
} else if (l4_offload == HINIC3_L4_OFFLOAD_UDP) {
- *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UFO);
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, UFO));
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ INNER_L4_EN));
}
/* enable L3 calculation */
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L3_EN);
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, INNER_L3_EN));
- *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF);
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF));
/* set MSS value */
- *queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK;
- *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS);
+ *queue_info &= cpu_to_le32(~SQ_CTRL_QUEUE_INFO_MSS_MASK);
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(mss, MSS));
}
static __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto)
csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
}
-static int hinic3_tso(struct hinic3_sq_task *task, u32 *queue_info,
+static int hinic3_tso(struct hinic3_sq_task *task, __le32 *queue_info,
struct sk_buff *skb)
{
enum hinic3_l4_offload_type l4_offload;
if (skb->encapsulation) {
u32 gso_type = skb_shinfo(skb)->gso_type;
/* L3 checksum is always enabled */
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L3_EN);
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG);
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, OUT_L3_EN));
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ TUNNEL_FLAG));
l4.hdr = skb_transport_header(skb);
ip.hdr = skb_network_header(skb);
if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L4_EN);
+ task->pkt_info0 |=
+ cpu_to_le32(SQ_TASK_INFO0_SET(1, OUT_L4_EN));
}
ip.hdr = skb_inner_network_header(skb);
* 2=select TPID2 in IPSU, 3=select TPID3 in IPSU,
* 4=select TPID4 in IPSU
*/
- task->vlan_offload = SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) |
- SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) |
- SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID);
+ task->vlan_offload =
+ cpu_to_le32(SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) |
+ SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) |
+ SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID));
}
static u32 hinic3_tx_offload(struct sk_buff *skb, struct hinic3_sq_task *task,
- u32 *queue_info, struct hinic3_txq *txq)
+ __le32 *queue_info, struct hinic3_txq *txq)
{
u32 offload = 0;
int tso_cs_en;
}
static void hinic3_prepare_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo,
- u32 queue_info, int nr_descs, u16 owner)
+ __le32 queue_info, int nr_descs, u16 owner)
{
struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0;
if (wqe_combo->wqe_type == SQ_WQE_COMPACT_TYPE) {
wqe_desc->ctrl_len |=
- SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
- SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
- SQ_CTRL_SET(owner, OWNER);
+ cpu_to_le32(SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER));
/* compact wqe queue_info will transfer to chip */
wqe_desc->queue_info = 0;
return;
}
- wqe_desc->ctrl_len |= SQ_CTRL_SET(nr_descs, BUFDESC_NUM) |
- SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) |
- SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
- SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
- SQ_CTRL_SET(owner, OWNER);
+ wqe_desc->ctrl_len |=
+ cpu_to_le32(SQ_CTRL_SET(nr_descs, BUFDESC_NUM) |
+ SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) |
+ SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER));
wqe_desc->queue_info = queue_info;
- wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UC);
+ wqe_desc->queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, UC));
if (!SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS)) {
wqe_desc->queue_info |=
- SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS);
+ cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS));
} else if (SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS) <
HINIC3_TX_MSS_MIN) {
/* mss should not be less than 80 */
- wqe_desc->queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK;
+ wqe_desc->queue_info &=
+ cpu_to_le32(~SQ_CTRL_QUEUE_INFO_MSS_MASK);
wqe_desc->queue_info |=
- SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS);
+ cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS));
}
}
{
struct hinic3_sq_wqe_combo wqe_combo = {};
struct hinic3_tx_info *tx_info;
- u32 offload, queue_info = 0;
struct hinic3_sq_task task;
u16 wqebb_cnt, num_sge;
+ __le32 queue_info = 0;
u16 saved_wq_prod_idx;
u16 owner, pi = 0;
u8 saved_sq_owner;
+ u32 offload;
int err;
if (unlikely(skb->len < MIN_SKB_LEN)) {