Download raw body.
TCP sync cache sysctl MP safe
> On 29 Dec 2024, at 03:59, Alexander Bluhm <bluhm@openbsd.org> wrote:
>
> Hi,
>
> The sysctl values for the TCP syn cache are either atomic or can
> be protected by sync cache mutex. This allows to remove net lock
> for two TCP sysctl.
>
> ok?
>
You forgot to add ‘a’ to the locks description in
netinet/tcp_input.c. The rest is ok mvs.
> bluhm
>
> Index: netinet/tcp_input.c
> ===================================================================
> RCS file: /data/mirror/openbsd/cvs/src/sys/netinet/tcp_input.c,v
> diff -u -p -r1.414 tcp_input.c
> --- netinet/tcp_input.c 28 Dec 2024 22:17:09 -0000 1.414
> +++ netinet/tcp_input.c 28 Dec 2024 22:49:22 -0000
> @@ -3108,16 +3108,16 @@ tcp_mss_adv(struct mbuf *m, int af)
> */
>
> /* syn hash parameters */
> -int tcp_syn_hash_size = TCP_SYN_HASH_SIZE; /* [N] size of hash table */
> -int tcp_syn_cache_limit = /* [N] global entry limit */
> +int tcp_syn_hash_size = TCP_SYN_HASH_SIZE; /* [S] size of hash table */
> +int tcp_syn_cache_limit = /* [a] global entry limit */
> TCP_SYN_HASH_SIZE * TCP_SYN_BUCKET_SIZE;
> -int tcp_syn_bucket_limit = /* [N] per bucket limit */
> +int tcp_syn_bucket_limit = /* [a] per bucket limit */
> 3 * TCP_SYN_BUCKET_SIZE;
> -int tcp_syn_use_limit = 100000; /* [N] reseed after uses */
> +int tcp_syn_use_limit = 100000; /* [S] reseed after uses */
>
> struct pool syn_cache_pool;
> -struct syn_cache_set tcp_syn_cache[2];
> -int tcp_syn_cache_active;
> +struct syn_cache_set tcp_syn_cache[2]; /* [S] */
> +int tcp_syn_cache_active; /* [S] */
> struct mutex syn_cache_mtx = MUTEX_INITIALIZER(IPL_SOFTNET);
>
> #define SYN_HASH(sa, sp, dp, rand) \
> @@ -3212,7 +3212,7 @@ syn_cache_init(void)
> void
> syn_cache_insert(struct syn_cache *sc, struct tcpcb *tp)
> {
> - struct syn_cache_set *set = &tcp_syn_cache[tcp_syn_cache_active];
> + struct syn_cache_set *set;
> struct syn_cache_head *scp;
> struct syn_cache *sc2;
> int i;
> @@ -3220,6 +3220,8 @@ syn_cache_insert(struct syn_cache *sc, s
> NET_ASSERT_LOCKED();
> MUTEX_ASSERT_LOCKED(&syn_cache_mtx);
>
> + set = &tcp_syn_cache[tcp_syn_cache_active];
> +
> /*
> * If there are no entries in the hash table, reinitialize
> * the hash secrets. To avoid useless cache swaps and
> @@ -3257,7 +3259,7 @@ syn_cache_insert(struct syn_cache *sc, s
> * Make sure that we don't overflow the per-bucket
> * limit or the total cache size limit.
> */
> - if (scp->sch_length >= tcp_syn_bucket_limit) {
> + if (scp->sch_length >= atomic_load_int(&tcp_syn_bucket_limit)) {
> tcpstat_inc(tcps_sc_bucketoverflow);
> /*
> * Someone might attack our bucket hash function. Reseed
> @@ -3279,7 +3281,7 @@ syn_cache_insert(struct syn_cache *sc, s
> #endif
> syn_cache_rm(sc2);
> syn_cache_put(sc2);
> - } else if (set->scs_count >= tcp_syn_cache_limit) {
> + } else if (set->scs_count >= atomic_load_int(&tcp_syn_cache_limit)) {
> struct syn_cache_head *scp2, *sce;
>
> tcpstat_inc(tcps_sc_overflowed);
> Index: netinet/tcp_usrreq.c
> ===================================================================
> RCS file: /data/mirror/openbsd/cvs/src/sys/netinet/tcp_usrreq.c,v
> diff -u -p -r1.233 tcp_usrreq.c
> --- netinet/tcp_usrreq.c 19 Dec 2024 22:11:35 -0000 1.233
> +++ netinet/tcp_usrreq.c 29 Dec 2024 00:51:51 -0000
> @@ -1341,7 +1341,7 @@ tcp_sysctl_tcpstat(void *oldp, size_t *o
> set = &tcp_syn_cache[tcp_syn_cache_active];
> tcpstat.tcps_sc_hash_size = set->scs_size;
> tcpstat.tcps_sc_entry_count = set->scs_count;
> - tcpstat.tcps_sc_entry_limit = tcp_syn_cache_limit;
> + tcpstat.tcps_sc_entry_limit = atomic_load_int(&tcp_syn_cache_limit);
> tcpstat.tcps_sc_bucket_maxlen = 0;
> for (i = 0; i < set->scs_size; i++) {
> if (tcpstat.tcps_sc_bucket_maxlen <
> @@ -1349,7 +1349,7 @@ tcp_sysctl_tcpstat(void *oldp, size_t *o
> tcpstat.tcps_sc_bucket_maxlen =
> set->scs_buckethead[i].sch_length;
> }
> - tcpstat.tcps_sc_bucket_limit = tcp_syn_bucket_limit;
> + tcpstat.tcps_sc_bucket_limit = atomic_load_int(&tcp_syn_bucket_limit);
> tcpstat.tcps_sc_uses_left = set->scs_use;
> mtx_leave(&syn_cache_mtx);
>
> @@ -1364,7 +1364,7 @@ int
> tcp_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
> size_t newlen)
> {
> - int error, nval;
> + int error, oval, nval;
>
> /* All sysctl names at this level are terminal. */
> if (namelen != 1)
> @@ -1457,30 +1457,29 @@ tcp_sysctl(int *name, u_int namelen, voi
> return (tcp_sysctl_tcpstat(oldp, oldlenp, newp));
>
> case TCPCTL_SYN_USE_LIMIT:
> - NET_LOCK();
> + oval = nval = atomic_load_int(&tcp_syn_use_limit);
> error = sysctl_int_bounded(oldp, oldlenp, newp, newlen,
> - &tcp_syn_use_limit, 0, INT_MAX);
> - if (!error && newp != NULL) {
> + &nval, 0, INT_MAX);
> + if (!error && oval != nval) {
> /*
> * Global tcp_syn_use_limit is used when reseeding a
> * new cache. Also update the value in active cache.
> */
> mtx_enter(&syn_cache_mtx);
> - if (tcp_syn_cache[0].scs_use > tcp_syn_use_limit)
> - tcp_syn_cache[0].scs_use = tcp_syn_use_limit;
> - if (tcp_syn_cache[1].scs_use > tcp_syn_use_limit)
> - tcp_syn_cache[1].scs_use = tcp_syn_use_limit;
> + if (tcp_syn_cache[0].scs_use > nval)
> + tcp_syn_cache[0].scs_use = nval;
> + if (tcp_syn_cache[1].scs_use > nval)
> + tcp_syn_cache[1].scs_use = nval;
> + tcp_syn_use_limit = nval;
> mtx_leave(&syn_cache_mtx);
> }
> - NET_UNLOCK();
> return (error);
>
> case TCPCTL_SYN_HASH_SIZE:
> - NET_LOCK();
> - nval = tcp_syn_hash_size;
> + oval = nval = atomic_load_int(&tcp_syn_hash_size);
> error = sysctl_int_bounded(oldp, oldlenp, newp, newlen,
> &nval, 1, 100000);
> - if (!error && nval != tcp_syn_hash_size) {
> + if (!error && oval != nval) {
> /*
> * If global hash size has been changed,
> * switch sets as soon as possible. Then
> @@ -1494,7 +1493,6 @@ tcp_sysctl(int *name, u_int namelen, voi
> tcp_syn_hash_size = nval;
> mtx_leave(&syn_cache_mtx);
> }
> - NET_UNLOCK();
> return (error);
>
> default:
>
TCP sync cache sysctl MP safe