/* tcp_fastopen_reset_cipher publishes the new context * atomically, so we allow this race happening here. * * All call sites of tcp_fastopen_cookie_gen also check * for a valid cookie, so this is an acceptable risk.
*/
get_random_bytes(key, sizeof(key));
tcp_fastopen_reset_cipher(net, NULL, key, NULL);
}
/* If an incoming SYN or SYNACK frame contains a payload and/or FIN, * queue this additional data / FIN.
*/ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
{ struct tcp_sock *tp = tcp_sk(sk);
if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt) return;
skb = skb_clone(skb, GFP_ATOMIC); if (!skb) return;
tcp_cleanup_skb(skb); /* segs_in has been initialized to 1 in tcp_create_openreq_child(). * Hence, reset segs_in to 0 before calling tcp_segs_in() * to avoid double counting. Also, tcp_segs_in() expects * skb->len to include the tcp_hdrlen. Hence, it should * be called before __skb_pull().
*/
tp->segs_in = 0;
tcp_segs_in(tp, skb);
__skb_pull(skb, tcp_hdrlen(skb));
sk_forced_mem_schedule(sk, skb->truesize);
skb_set_owner_r(skb, sk);
/* Initialize the child socket. Have to fix some values to take * into account the child is a Fast Open socket and is created * only out of the bits carried in the SYN packet.
*/
tp = tcp_sk(child);
/* RFC1323: The window in SYN & SYN/ACK segments is never * scaled. So correct it appropriately.
*/
tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
tp->max_window = tp->snd_wnd;
/* Activate the retrans timer so that SYNACK can be retransmitted. * The request socket is not added to the ehash * because it's been added to the accept queue directly.
*/
req->timeout = tcp_timeout_init(child);
tcp_reset_xmit_timer(child, ICSK_TIME_RETRANS,
req->timeout, false);
refcount_set(&req->rsk_refcnt, 2);
sk_mark_napi_id_set(child, skb);
/* Now finish processing the fastopen child socket. */
tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
tcp_fastopen_add_skb(child, skb);
tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
tp->rcv_wup = tp->rcv_nxt; /* tcp_conn_request() is sending the SYNACK, * and queues the child into listener accept queue.
*/ return child;
}
staticbool tcp_fastopen_queue_check(struct sock *sk)
{ struct fastopen_queue *fastopenq; int max_qlen;
/* Make sure the listener has enabled fastopen, and we don't * exceed the max # of pending TFO requests allowed before trying * to validating the cookie in order to avoid burning CPU cycles * unnecessarily. * * XXX (TFO) - The implication of checking the max_qlen before * processing a cookie request is that clients can't differentiate * between qlen overflow causing Fast Open to be disabled * temporarily vs a server not supporting Fast Open at all.
*/
fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
max_qlen = READ_ONCE(fastopenq->max_qlen); if (max_qlen == 0) returnfalse;
/* Returns true if we should perform Fast Open on the SYN. The cookie (foc) * may be updated and return the client in the SYN-ACK later. E.g., Fast Open * cookie request (foc->len == 0).
*/ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct tcp_fastopen_cookie *foc, conststruct dst_entry *dst)
{ bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen); struct tcp_fastopen_cookie valid_foc = { .len = -1 }; struct sock *child; int ret = 0;
if (foc->len == 0) /* Client requests a cookie */
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD)) goto fastopen;
if (foc->len == 0) { /* Client requests a cookie. */
tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
} elseif (foc->len > 0) {
ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
&valid_foc); if (!ret) {
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
} else { /* Cookie is valid. Create a (full) child socket to * accept the data in SYN before returning a SYN-ACK to * ack the data. If we fail to create the socket, fall * back and ack the ISN only but includes the same * cookie. * * Note: Data-less SYN with valid cookie is allowed to * send data in SYN_RECV state.
*/
fastopen:
child = tcp_fastopen_create_child(sk, skb, req); if (child) { if (ret == 2) {
valid_foc.exp = foc->exp;
*foc = valid_foc;
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENPASSIVEALTKEY);
} else {
foc->len = -1;
}
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENPASSIVE);
tcp_sk(child)->syn_fastopen_child = 1; return child;
}
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
}
}
valid_foc.exp = foc->exp;
*foc = valid_foc; return NULL;
}
if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
cookie->len = -1; returntrue;
} if (cookie->len > 0) returntrue;
tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE; returnfalse;
}
/* This function checks if we want to defer sending SYN until the first * write(). We defer under the following conditions: * 1. fastopen_connect sockopt is set * 2. we have a valid cookie * Return value: return true if we want to defer until application writes data * return false if we want to send out SYN immediately
*/ bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
{ struct tcp_fastopen_cookie cookie = { .len = 0 }; struct tcp_sock *tp = tcp_sk(sk);
u16 mss;
if (tp->fastopen_connect && !tp->fastopen_req) { if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
inet_set_bit(DEFER_CONNECT, sk); returntrue;
}
/* Alloc fastopen_req in order for FO option to be included * in SYN
*/
tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
sk->sk_allocation); if (tp->fastopen_req)
tp->fastopen_req->cookie = cookie; else
*err = -ENOBUFS;
} returnfalse;
}
EXPORT_IPV6_MOD(tcp_fastopen_defer_connect);
/* * The following code block is to deal with middle box issues with TFO: * Middlebox firewall issues can potentially cause server's data being * blackholed after a successful 3WHS using TFO. * The proposed solution is to disable active TFO globally under the * following circumstances: * 1. client side TFO socket receives out of order FIN * 2. client side TFO socket receives out of order RST * 3. client side TFO socket has timed out three times consecutively during * or after handshake * We disable active side TFO globally for 1hr at first. Then if it * happens again, we disable it for 2h, then 4h, 8h, ... * And we reset the timeout back to 1hr when we see a successful active * TFO connection with data exchanges.
*/
/* Disable active TFO and record current jiffies and * tfo_active_disable_times
*/ void tcp_fastopen_active_disable(struct sock *sk)
{ struct net *net = sock_net(sk);
if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout)) return;
/* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);
/* Paired with smp_rmb() in tcp_fastopen_active_should_disable(). * We want net->ipv4.tfo_active_disable_stamp to be updated first.
*/
smp_mb__before_atomic();
atomic_inc(&net->ipv4.tfo_active_disable_times);
/* Calculate timeout for tfo active disable * Return true if we are still in the active TFO disable period * Return false if timeout already expired and we should use active TFO
*/ bool tcp_fastopen_active_should_disable(struct sock *sk)
{ unsignedint tfo_bh_timeout =
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout); unsignedlong timeout; int tfo_da_times; int multiplier;
if (!tfo_bh_timeout) returnfalse;
tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times); if (!tfo_da_times) returnfalse;
/* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */
smp_rmb();
/* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */
timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
multiplier * tfo_bh_timeout * HZ; if (time_before(jiffies, timeout)) returntrue;
/* Mark check bit so we can check for successful active TFO * condition and reset tfo_active_disable_times
*/
tcp_sk(sk)->syn_fastopen_ch = 1; returnfalse;
}
/* Disable active TFO if FIN is the only packet in the ofo queue * and no data is received. * Also check if we can reset tfo_active_disable_times if data is * received successfully on a marked active TFO sockets opened on * a non-loopback interface
*/ void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
{ struct tcp_sock *tp = tcp_sk(sk); struct net_device *dev; struct dst_entry *dst; struct sk_buff *skb;
if (!tp->syn_fastopen) return;
if (!tp->data_segs_in) {
skb = skb_rb_first(&tp->out_of_order_queue); if (skb && !skb_rb_next(skb)) { if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
tcp_fastopen_active_disable(sk); return;
}
}
} elseif (tp->syn_fastopen_ch &&
atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
rcu_read_lock();
dst = __sk_dst_get(sk);
dev = dst ? dst_dev_rcu(dst) : NULL; if (!(dev && (dev->flags & IFF_LOOPBACK)))
atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
rcu_read_unlock();
}
}
/* Broken middle-boxes may black-hole Fast Open connection during or * even after the handshake. Be extremely conservative and pause * Fast Open globally after hitting the third consecutive timeout or * exceeding the configured timeout limit.
*/ if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
(timeouts == 2 || (timeouts < 2 && expired))) {
tcp_fastopen_active_disable(sk);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.