}
#ifdef CONFIG_BPF_SYSCALL
+union bpf_tcp_iter_batch_item {
+ struct sock *sk;
+};
+
struct bpf_tcp_iter_state {
struct tcp_iter_state state;
unsigned int cur_sk;
unsigned int end_sk;
unsigned int max_sk;
- struct sock **batch;
+ union bpf_tcp_iter_batch_item *batch;
};
struct bpf_iter__tcp {
unsigned int cur_sk = iter->cur_sk;
while (cur_sk < iter->end_sk)
- sock_gen_put(iter->batch[cur_sk++]);
+ sock_gen_put(iter->batch[cur_sk++].sk);
}
static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
unsigned int new_batch_sz, gfp_t flags)
{
- struct sock **new_batch;
+ union bpf_tcp_iter_batch_item *new_batch;
new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
flags | __GFP_NOWARN);
struct sock *sk;
sock_hold(*start_sk);
- iter->batch[iter->end_sk++] = *start_sk;
+ iter->batch[iter->end_sk++].sk = *start_sk;
sk = sk_nulls_next(*start_sk);
*start_sk = NULL;
if (seq_sk_match(seq, sk)) {
if (iter->end_sk < iter->max_sk) {
sock_hold(sk);
- iter->batch[iter->end_sk++] = sk;
+ iter->batch[iter->end_sk++].sk = sk;
} else if (!*start_sk) {
/* Remember where we left off. */
*start_sk = sk;
struct sock *sk;
sock_hold(*start_sk);
- iter->batch[iter->end_sk++] = *start_sk;
+ iter->batch[iter->end_sk++].sk = *start_sk;
sk = sk_nulls_next(*start_sk);
*start_sk = NULL;
if (seq_sk_match(seq, sk)) {
if (iter->end_sk < iter->max_sk) {
sock_hold(sk);
- iter->batch[iter->end_sk++] = sk;
+ iter->batch[iter->end_sk++].sk = sk;
} else if (!*start_sk) {
/* Remember where we left off. */
*start_sk = sk;
WARN_ON_ONCE(iter->end_sk != expected);
done:
bpf_iter_tcp_unlock_bucket(seq);
- return iter->batch[0];
+ return iter->batch[0].sk;
}
static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos)
* st->bucket. See tcp_seek_last_pos().
*/
st->offset++;
- sock_gen_put(iter->batch[iter->cur_sk++]);
+ sock_gen_put(iter->batch[iter->cur_sk++].sk);
}
if (iter->cur_sk < iter->end_sk)
- sk = iter->batch[iter->cur_sk];
+ sk = iter->batch[iter->cur_sk].sk;
else
sk = bpf_iter_tcp_batch(seq);