| SYN cookie analysis on 3.10 |
| |
| include/net/request_sock.h: |
| |
| static inline int reqsk_queue_is_full(const struct request_sock_queue *queue) |
| { |
| return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log; |
| } |
| |
| include/net/inet_connection_sock.h: |
| |
| static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) |
| { |
| return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue); |
| } |
| |
| max_qlen_log is computed to equal log2(min(min(listen_backlog,somaxconn), sysctl_max_syn_backlog), |
| and this is done this way following this path : |
| |
| socket.c:listen(fd, backlog) : |
| |
| backlog = min(backlog, somaxconn) |
| => af_inet.c:inet_listen(sock, backlog) |
| |
| => inet_connection_sock.c:inet_csk_listen_start(sk, backlog) |
| |
| sk_max_ack_backlog = backlog |
| => request_sock.c:reqsk_queue_alloc(sk, backlog (=nr_table_entries)) |
| |
| nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog); |
| nr_table_entries = max_t(u32, nr_table_entries, 8); |
| nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); |
| for (lopt->max_qlen_log = 3; |
| (1 << lopt->max_qlen_log) < nr_table_entries; |
| lopt->max_qlen_log++); |
| |
| |
| tcp_ipv4.c:tcp_v4_conn_request() |
| - inet_csk_reqsk_queue_is_full() returns true when the listening socket's |
| qlen is larger than 1 << max_qlen_log, so basically qlen >= min(backlog,max_backlog) |
| |
| - tcp_syn_flood_action() returns true when sysctl_tcp_syncookies is set. It |
| also emits a warning once per listening socket when activating the feature. |
| |
| if (inet_csk_reqsk_queue_is_full(sk) && !isn) { |
| want_cookie = tcp_syn_flood_action(sk, skb, "TCP"); |
| if (!want_cookie) |
| goto drop; |
| } |
| |
| => when the socket's current backlog is >= min(backlog,max_backlog), |
| either tcp_syn_cookies is set so we set want_cookie to 1, or we drop. |
| |
| |
| /* Accept backlog is full. If we have already queued enough |
| * of warm entries in syn queue, drop request. It is better than |
| * clogging syn queue with openreqs with exponentially increasing |
| * timeout. |
| */ |
| |
| sock.h:sk_acceptq_is_full() = sk_ack_backlog > sk_max_ack_backlog |
| = sk_ack_backlog > min(somaxconn, listen_backlog) |
| |
| if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { |
| NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); |
| goto drop; |
| } |
| |
| ====> the following algorithm is applied in the reverse order but with these |
| priorities : |
| |
| 1) IF socket's accept queue >= min(somaxconn, listen_backlog) THEN drop |
| |
| 2) IF socket's SYN backlog < min(somaxconn, listen_backlog, tcp_max_syn_backlog) THEN accept |
| |
| 3) IF tcp_syn_cookies THEN send_syn_cookie |
| |
| 4) otherwise drop |
| |
| ====> the problem is the accept queue being filled, but it's supposed to be |
| filled only with validated client requests (step 1). |
| |
| |
| |
| req = inet_reqsk_alloc(&tcp_request_sock_ops); |
| if (!req) |
| goto drop; |
| |
| ... |
| if (!sysctl_tcp_syncookies && |
| (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < |
| (sysctl_max_syn_backlog >> 2)) && |
| !tcp_peer_is_proven(req, dst, false)) { |
| /* Without syncookies last quarter of |
| * backlog is filled with destinations, |
| * proven to be alive. |
| * It means that we continue to communicate |
| * to destinations, already remembered |
| * to the moment of synflood. |
| */ |
| LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"), |
| &saddr, ntohs(tcp_hdr(skb)->source)); |
| goto drop_and_release; |
| } |
| |
| |