* Reduce the size of the RX ring to a fraction of the fill ring size.
* iv. fill queue empty
* Do not populate the fill queue and then try to receive pkts.
+ * f. bpf_link resource persistence
+ * Configure sockets at indexes 0 and 1, run a traffic on queue ids 0,
+ * then remove xsk sockets from queue 0 on both veth interfaces and
+ * finally run a traffic on queues ids 1
*
- * Total tests: 10
+ * Total tests: 12
*
* Flow:
* -----
#define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__)
#define print_ksft_result(void)\
- (ksft_test_result_pass("PASS: %s %s %s%s%s\n", configured_mode ? "DRV" : "SKB",\
+ (ksft_test_result_pass("PASS: %s %s %s%s%s%s\n", configured_mode ? "DRV" : "SKB",\
test_type == TEST_TYPE_POLL ? "POLL" : "NOPOLL",\
test_type == TEST_TYPE_TEARDOWN ? "Socket Teardown" : "",\
test_type == TEST_TYPE_BIDI ? "Bi-directional Sockets" : "",\
- test_type == TEST_TYPE_STATS ? "Stats" : ""))
+ test_type == TEST_TYPE_STATS ? "Stats" : "",\
+ test_type == TEST_TYPE_BPF_RES ? "BPF RES" : ""))
static void init_sync_resources(void)
{
memcpy(xsk_umem__get_data(umem->buffer, addr), pkt_data, PKT_SIZE);
}
-static void xsk_configure_umem(struct ifobject *data, void *buffer, u64 size)
+static void xsk_configure_umem(struct ifobject *data, void *buffer, int idx)
{
- int ret;
struct xsk_umem_config cfg = {
.fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
.comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
.frame_headroom = frame_headroom,
.flags = XSK_UMEM__DEFAULT_FLAGS
};
+ int size = num_frames * XSK_UMEM__DEFAULT_FRAME_SIZE;
+ struct xsk_umem_info *umem;
+ int ret;
- data->umem = calloc(1, sizeof(struct xsk_umem_info));
- if (!data->umem)
+ umem = calloc(1, sizeof(struct xsk_umem_info));
+ if (!umem)
exit_with_error(errno);
- ret = xsk_umem__create(&data->umem->umem, buffer, size,
- &data->umem->fq, &data->umem->cq, &cfg);
+ ret = xsk_umem__create(&umem->umem, buffer, size,
+ &umem->fq, &umem->cq, &cfg);
if (ret)
exit_with_error(ret);
- data->umem->buffer = buffer;
+ umem->buffer = buffer;
+
+ data->umem_arr[idx] = umem;
}
static void xsk_populate_fill_ring(struct xsk_umem_info *umem)
xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS);
}
-static int xsk_configure_socket(struct ifobject *ifobject)
+static int xsk_configure_socket(struct ifobject *ifobject, int idx)
{
struct xsk_socket_config cfg;
+ struct xsk_socket_info *xsk;
struct xsk_ring_cons *rxr;
struct xsk_ring_prod *txr;
int ret;
- ifobject->xsk = calloc(1, sizeof(struct xsk_socket_info));
- if (!ifobject->xsk)
+ xsk = calloc(1, sizeof(struct xsk_socket_info));
+ if (!xsk)
exit_with_error(errno);
- ifobject->xsk->umem = ifobject->umem;
+ xsk->umem = ifobject->umem;
cfg.rx_size = rxqsize;
cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
cfg.libbpf_flags = 0;
cfg.bind_flags = xdp_bind_flags;
if (test_type != TEST_TYPE_BIDI) {
- rxr = (ifobject->fv.vector == rx) ? &ifobject->xsk->rx : NULL;
- txr = (ifobject->fv.vector == tx) ? &ifobject->xsk->tx : NULL;
+ rxr = (ifobject->fv.vector == rx) ? &xsk->rx : NULL;
+ txr = (ifobject->fv.vector == tx) ? &xsk->tx : NULL;
} else {
- rxr = &ifobject->xsk->rx;
- txr = &ifobject->xsk->tx;
+ rxr = &xsk->rx;
+ txr = &xsk->tx;
}
- ret = xsk_socket__create(&ifobject->xsk->xsk, ifobject->ifname,
- opt_queue, ifobject->umem->umem, rxr, txr, &cfg);
-
+ ret = xsk_socket__create(&xsk->xsk, ifobject->ifname, idx,
+ ifobject->umem->umem, rxr, txr, &cfg);
if (ret)
return 1;
+ ifobject->xsk_arr[idx] = xsk;
+
return 0;
}
opterr = 0;
for (;;) {
- c = getopt_long(argc, argv, "i:q:DC:v", long_options, &option_index);
+ c = getopt_long(argc, argv, "i:DC:v", long_options, &option_index);
if (c == -1)
break;
MAX_INTERFACES_NAMESPACE_CHARS);
interface_index++;
break;
- case 'q':
- opt_queue = atoi(optarg);
- break;
case 'D':
debug_pkt_dump = 1;
break;
static void thread_common_ops(struct ifobject *ifobject, void *bufs)
{
+ int umem_sz = num_frames * XSK_UMEM__DEFAULT_FRAME_SIZE;
int ctr = 0;
int ret;
pthread_attr_setstacksize(&attr, THREAD_STACK);
- bufs = mmap(NULL, num_frames * XSK_UMEM__DEFAULT_FRAME_SIZE,
+ ifobject->ns_fd = switch_namespace(ifobject->nsname);
+
+ if (test_type == TEST_TYPE_BPF_RES)
+ umem_sz *= 2;
+
+ bufs = mmap(NULL, umem_sz,
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (bufs == MAP_FAILED)
exit_with_error(errno);
- ifobject->ns_fd = switch_namespace(ifobject->nsname);
-
- xsk_configure_umem(ifobject, bufs, num_frames * XSK_UMEM__DEFAULT_FRAME_SIZE);
- ret = xsk_configure_socket(ifobject);
+ xsk_configure_umem(ifobject, bufs, 0);
+ ifobject->umem = ifobject->umem_arr[0];
+ ret = xsk_configure_socket(ifobject, 0);
/* Retry Create Socket if it fails as xsk_socket__create()
* is asynchronous
*/
while (ret && ctr < SOCK_RECONF_CTR) {
- xsk_configure_umem(ifobject, bufs, num_frames * XSK_UMEM__DEFAULT_FRAME_SIZE);
- ret = xsk_configure_socket(ifobject);
+ xsk_configure_umem(ifobject, bufs, 0);
+ ifobject->umem = ifobject->umem_arr[0];
+ ret = xsk_configure_socket(ifobject, 0);
usleep(USLEEP_MAX);
ctr++;
}
if (ctr >= SOCK_RECONF_CTR)
exit_with_error(ret);
+ ifobject->umem = ifobject->umem_arr[0];
+ ifobject->xsk = ifobject->xsk_arr[0];
+
+ if (test_type == TEST_TYPE_BPF_RES) {
+ xsk_configure_umem(ifobject, (u8 *)bufs + (umem_sz / 2), 1);
+ ifobject->umem = ifobject->umem_arr[1];
+ ret = xsk_configure_socket(ifobject, 1);
+ }
+
+ ifobject->umem = ifobject->umem_arr[0];
+ ifobject->xsk = ifobject->xsk_arr[0];
print_verbose("Interface [%s] vector [%s]\n",
ifobject->ifname, ifobject->fv.vector == tx ? "Tx" : "Rx");
}
+static bool testapp_is_test_two_stepped(void)
+{
+ return (test_type != TEST_TYPE_BIDI && test_type != TEST_TYPE_BPF_RES) || second_step;
+}
+
+static void testapp_cleanup_xsk_res(struct ifobject *ifobj)
+{
+ if (testapp_is_test_two_stepped()) {
+ xsk_socket__delete(ifobj->xsk->xsk);
+ (void)xsk_umem__delete(ifobj->umem->umem);
+ }
+}
+
static void *worker_testapp_validate_tx(void *arg)
{
struct udphdr *udp_hdr =
struct generic_data data;
void *bufs = NULL;
- if (!bidi_pass)
+ if (!second_step)
thread_common_ops(ifobject, bufs);
for (int i = 0; i < num_frames; i++) {
(opt_pkt_count - 1), ifobject->ifname);
tx_only_all(ifobject);
- if (test_type != TEST_TYPE_BIDI || bidi_pass) {
- xsk_socket__delete(ifobject->xsk->xsk);
- (void)xsk_umem__delete(ifobject->umem->umem);
- }
+ testapp_cleanup_xsk_res(ifobject);
pthread_exit(NULL);
}
struct pollfd fds[MAX_SOCKS] = { };
void *bufs = NULL;
- if (!bidi_pass)
+ if (!second_step)
thread_common_ops(ifobject, bufs);
if (stat_test_type != STAT_TEST_RX_FILL_EMPTY)
if (test_type == TEST_TYPE_TEARDOWN)
print_verbose("Destroying socket\n");
- if ((test_type != TEST_TYPE_BIDI) || bidi_pass) {
- xsk_socket__delete(ifobject->xsk->xsk);
- (void)xsk_umem__delete(ifobject->umem->umem);
- }
+ testapp_cleanup_xsk_res(ifobject);
pthread_exit(NULL);
}
{
struct timespec max_wait = { 0, 0 };
bool bidi = test_type == TEST_TYPE_BIDI;
+ bool bpf = test_type == TEST_TYPE_BPF_RES;
pthread_attr_init(&attr);
pthread_attr_setstacksize(&attr, THREAD_STACK);
free(pkt_buf);
}
- if (!(test_type == TEST_TYPE_TEARDOWN) && !bidi && !(test_type == TEST_TYPE_STATS))
+ if (!(test_type == TEST_TYPE_TEARDOWN) && !bidi && !bpf && !(test_type == TEST_TYPE_STATS))
print_ksft_result();
}
sigvar = 0;
print_verbose("Creating socket\n");
testapp_validate();
- if (!bidi_pass) {
+ if (!second_step) {
print_verbose("Switching Tx/Rx vectors\n");
swap_vectors(ifdict[1], ifdict[0]);
}
- bidi_pass++;
+ second_step = true;
}
swap_vectors(ifdict[0], ifdict[1]);
print_ksft_result();
}
+static void swap_xsk_res(void)
+{
+ xsk_socket__delete(ifdict_tx->xsk->xsk);
+ xsk_umem__delete(ifdict_tx->umem->umem);
+ xsk_socket__delete(ifdict_rx->xsk->xsk);
+ xsk_umem__delete(ifdict_rx->umem->umem);
+ ifdict_tx->umem = ifdict_tx->umem_arr[1];
+ ifdict_tx->xsk = ifdict_tx->xsk_arr[1];
+ ifdict_rx->umem = ifdict_rx->umem_arr[1];
+ ifdict_rx->xsk = ifdict_rx->xsk_arr[1];
+}
+
+static void testapp_bpf_res(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_BPF_ITER; i++) {
+ pkt_counter = 0;
+ prev_pkt = -1;
+ sigvar = 0;
+ print_verbose("Creating socket\n");
+ testapp_validate();
+ if (!second_step)
+ swap_xsk_res();
+ second_step = true;
+ }
+
+ print_ksft_result();
+}
+
static void testapp_stats(void)
{
for (int i = 0; i < STAT_TEST_TYPE_MAX; i++) {
/* reset defaults after potential previous test */
xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
pkt_counter = 0;
- bidi_pass = 0;
+ second_step = 0;
prev_pkt = -1;
sigvar = 0;
stat_test_type = -1;
rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
+ configured_mode = mode;
+
switch (mode) {
case (TEST_MODE_SKB):
xdp_flags |= XDP_FLAGS_SKB_MODE;
case TEST_TYPE_BIDI:
testapp_bidi();
break;
+ case TEST_TYPE_BPF_RES:
+ testapp_bpf_res();
+ break;
default:
testapp_validate();
break;
int main(int argc, char **argv)
{
struct rlimit _rlim = { RLIM_INFINITY, RLIM_INFINITY };
+ bool failure = false;
int i, j;
if (setrlimit(RLIMIT_MEMLOCK, &_rlim))
exit_with_error(errno);
ifdict[i]->ifdict_index = i;
+ ifdict[i]->xsk_arr = calloc(2, sizeof(struct xsk_socket_info *));
+ if (!ifdict[i]->xsk_arr) {
+ failure = true;
+ goto cleanup;
+ }
+ ifdict[i]->umem_arr = calloc(2, sizeof(struct xsk_umem_info *));
+ if (!ifdict[i]->umem_arr) {
+ failure = true;
+ goto cleanup;
+ }
}
setlocale(LC_ALL, "");
run_pkt_test(i, j);
}
+ destroy_sync_resources();
+
+cleanup:
for (int i = 0; i < MAX_INTERFACES; i++) {
if (ifdict[i]->ns_fd != -1)
close(ifdict[i]->ns_fd);
+ free(ifdict[i]->xsk_arr);
+ free(ifdict[i]->umem_arr);
free(ifdict[i]);
}
- destroy_sync_resources();
+ if (failure)
+ exit_with_error(errno);
ksft_exit_pass();