// SPDX-License-Identifier: GPL-2.0 /* * To speed up listener socket lookup, create an array to store all sockets * listening on the same port. This allows a decision to be made after finding * the first socket. An optional BPF program can also be configured for * selecting the socket index from the array of available sockets.
*/
void reuseport_update_incoming_cpu(struct sock *sk, int val)
{ struct sock_reuseport *reuse; int old_sk_incoming_cpu;
if (unlikely(!rcu_access_pointer(sk->sk_reuseport_cb))) { /* Paired with REAE_ONCE() in sk_incoming_cpu_update() * and compute_score().
*/
WRITE_ONCE(sk->sk_incoming_cpu, val); return;
}
spin_lock_bh(&reuseport_lock);
/* This must be done under reuseport_lock to avoid a race with * reuseport_grow(), which accesses sk->sk_incoming_cpu without * lock_sock() when detaching a shutdown()ed sk. * * Paired with READ_ONCE() in reuseport_select_sock_by_hash().
*/
old_sk_incoming_cpu = sk->sk_incoming_cpu;
WRITE_ONCE(sk->sk_incoming_cpu, val);
int reuseport_alloc(struct sock *sk, bool bind_inany)
{ struct sock_reuseport *reuse; int id, ret = 0;
/* bh lock used since this function call may precede hlist lock in * soft irq of receive path or setsockopt from process context
*/
spin_lock_bh(&reuseport_lock);
/* Allocation attempts can occur concurrently via the setsockopt path * and the bind/hash path. Nothing to do when we lose the race.
*/
reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
lockdep_is_held(&reuseport_lock)); if (reuse) { if (reuse->num_closed_socks) { /* sk was shutdown()ed before */
ret = reuseport_resurrect(sk, reuse, NULL, bind_inany); goto out;
}
/* Only set reuse->bind_inany if the bind_inany is true. * Otherwise, it will overwrite the reuse->bind_inany * which was set by the bind/hash path.
*/ if (bind_inany)
reuse->bind_inany = bind_inany; goto out;
}
reuse = __reuseport_alloc(INIT_SOCKS); if (!reuse) {
ret = -ENOMEM; goto out;
}
id = ida_alloc(&reuseport_ida, GFP_ATOMIC); if (id < 0) {
kfree(reuse);
ret = id; goto out;
}
more_socks_size = reuse->max_socks * 2U; if (more_socks_size > U16_MAX) { if (reuse->num_closed_socks) { /* Make room by removing a closed sk. * The child has already been migrated. * Only reqsk left at this point.
*/ struct sock *sk;
sk = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
RCU_INIT_POINTER(sk->sk_reuseport_cb, NULL);
__reuseport_detach_closed_sock(sk, reuse);
return reuse;
}
return NULL;
}
more_reuse = __reuseport_alloc(more_socks_size); if (!more_reuse) return NULL;
for (i = 0; i < reuse->max_socks; ++i)
rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
more_reuse);
/* Note: we use kfree_rcu here instead of reuseport_free_rcu so * that reuse and more_reuse can temporarily share a reference * to prog.
*/
kfree_rcu(reuse, rcu); return more_reuse;
}
/** * reuseport_add_sock - Add a socket to the reuseport group of another. * @sk: New socket to add to the group. * @sk2: Socket belonging to the existing reuseport group. * @bind_inany: Whether or not the group is bound to a local INANY address. * * May return ENOMEM and not add socket to group under memory pressure.
*/ int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
{ struct sock_reuseport *old_reuse, *reuse;
if (!rcu_access_pointer(sk2->sk_reuseport_cb)) { int err = reuseport_alloc(sk2, bind_inany);
if (err) return err;
}
spin_lock_bh(&reuseport_lock);
reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
lockdep_is_held(&reuseport_lock));
old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
lockdep_is_held(&reuseport_lock)); if (old_reuse && old_reuse->num_closed_socks) { /* sk was shutdown()ed before */ int err = reuseport_resurrect(sk, old_reuse, reuse, reuse->bind_inany);
if (old_reuse)
call_rcu(&old_reuse->rcu, reuseport_free_rcu); return 0;
}
EXPORT_SYMBOL(reuseport_add_sock);
staticint reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse, struct sock_reuseport *reuse, bool bind_inany)
{ if (old_reuse == reuse) { /* If sk was in the same reuseport group, just pop sk out of * the closed section and push sk into the listening section.
*/
__reuseport_detach_closed_sock(sk, old_reuse);
__reuseport_add_sock(sk, old_reuse); return 0;
}
if (!reuse) { /* In bind()/listen() path, we cannot carry over the eBPF prog * for the shutdown()ed socket. In setsockopt() path, we should * not change the eBPF prog of listening sockets by attaching a * prog to the shutdown()ed socket. Thus, we will allocate a new * reuseport group and detach sk from the old group.
*/ int id;
reuse = __reuseport_alloc(INIT_SOCKS); if (!reuse) return -ENOMEM;
id = ida_alloc(&reuseport_ida, GFP_ATOMIC); if (id < 0) {
kfree(reuse); return id;
}
reuse->reuseport_id = id;
reuse->bind_inany = bind_inany;
} else { /* Move sk from the old group to the new one if * - all the other listeners in the old group were close()d or * shutdown()ed, and then sk2 has listen()ed on the same port * OR * - sk listen()ed without bind() (or with autobind), was * shutdown()ed, and then listen()s on another port which * sk2 listen()s on.
*/ if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) {
reuse = reuseport_grow(reuse); if (!reuse) return -ENOMEM;
}
}
/* reuseport_grow() has detached a closed sk */ if (!reuse) goto out;
/* Notify the bpf side. The sk may be added to a sockarray * map. If so, sockarray logic will remove it from the map. * * Other bpf map types that work with reuseport, like sockmap, * don't need an explicit callback from here. They override sk * unhash/close ops to remove the sk from the map before we * get to this point.
*/
bpf_sk_reuseport_detach(sk);
rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
if (!__reuseport_detach_closed_sock(sk, reuse))
__reuseport_detach_sock(sk, reuse);
if (reuse->num_socks + reuse->num_closed_socks == 0)
call_rcu(&reuse->rcu, reuseport_free_rcu);
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req) ||
(prog && prog->expected_attach_type == BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)) { /* Migration capable, move sk from the listening section * to the closed section.
*/
bpf_sk_reuseport_detach(sk);
if (skb_shared(skb)) {
nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return NULL;
skb = nskb;
}
/* temporarily advance data past protocol header */ if (!pskb_pull(skb, hdr_len)) {
kfree_skb(nskb); return NULL;
}
index = bpf_prog_run_save_cb(prog, skb);
__skb_push(skb, hdr_len);
consume_skb(nskb);
if (index >= socks) return NULL;
return reuse->socks[index];
}
staticstruct sock *reuseport_select_sock_by_hash(struct sock_reuseport *reuse,
u32 hash, u16 num_socks)
{ struct sock *first_valid_sk = NULL; int i, j;
i = j = reciprocal_scale(hash, num_socks); do { struct sock *sk = reuse->socks[i];
if (sk->sk_state != TCP_ESTABLISHED) { /* Paired with WRITE_ONCE() in __reuseport_(get|put)_incoming_cpu(). */ if (!READ_ONCE(reuse->incoming_cpu)) return sk;
/* Paired with WRITE_ONCE() in reuseport_update_incoming_cpu(). */ if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) return sk;
if (!first_valid_sk)
first_valid_sk = sk;
}
i++; if (i >= num_socks)
i = 0;
} while (i != j);
return first_valid_sk;
}
/** * reuseport_select_sock - Select a socket from an SO_REUSEPORT group. * @sk: First socket in the group. * @hash: When no BPF filter is available, use this hash to select. * @skb: skb to run through BPF filter. * @hdr_len: BPF filter expects skb data pointer at payload data. If * the skb does not yet point at the payload, this parameter represents * how far the pointer needs to advance to reach the payload. * Returns a socket that should receive the packet (or NULL on error).
*/ struct sock *reuseport_select_sock(struct sock *sk,
u32 hash, struct sk_buff *skb, int hdr_len)
{ struct sock_reuseport *reuse; struct bpf_prog *prog; struct sock *sk2 = NULL;
u16 socks;
/** * reuseport_migrate_sock - Select a socket from an SO_REUSEPORT group. * @sk: close()ed or shutdown()ed socket in the group. * @migrating_sk: ESTABLISHED/SYN_RECV full socket in the accept queue or * NEW_SYN_RECV request socket during 3WHS. * @skb: skb to run through BPF filter. * Returns a socket (with sk_refcnt +1) that should accept the child socket * (or NULL on error).
*/ struct sock *reuseport_migrate_sock(struct sock *sk, struct sock *migrating_sk, struct sk_buff *skb)
{ struct sock_reuseport *reuse; struct sock *nsk = NULL; bool allocated = false; struct bpf_prog *prog;
u16 socks;
u32 hash;
rcu_read_lock();
reuse = rcu_dereference(sk->sk_reuseport_cb); if (!reuse) goto out;
socks = READ_ONCE(reuse->num_socks); if (unlikely(!socks)) goto failure;
/* paired with smp_wmb() in __reuseport_add_sock() */
smp_rmb();
hash = migrating_sk->sk_hash;
prog = rcu_dereference(reuse->prog); if (!prog || prog->expected_attach_type != BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) { if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req)) goto select_by_hash; goto failure;
}
if (!skb) {
skb = alloc_skb(0, GFP_ATOMIC); if (!skb) goto failure;
allocated = true;
}
/* reuse must be checked after acquiring the reuseport_lock * because reuseport_grow() can detach a closed sk.
*/ if (!reuse) {
spin_unlock_bh(&reuseport_lock); return sk->sk_reuseport ? -ENOENT : -EINVAL;
}
if (sk_unhashed(sk) && reuse->num_closed_socks) {
spin_unlock_bh(&reuseport_lock); return -ENOENT;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.