Index | Thread | Search

From:
Alexander Bluhm <alexander.bluhm@gmx.net>
Subject:
Re: Please test: shared solock for all intet sockets within knote(9) routines
To:
Vitaliy Makkoveev <mvs@openbsd.org>
Cc:
tech@openbsd.org
Date:
Sun, 28 Jan 2024 12:54:24 +0100

Download raw body.

Thread
On Fri, Jan 26, 2024 at 07:59:02PM +0300, Vitaliy Makkoveev wrote:
> Not for commit yet, but could be interesting. The idea is the same as
> for PCB layer: take shared netlock and exclusive `so_lock' to serialize
> access to the socket. The tcp(4) sockets permit this because knote(9)
> routines do read-only access to sockets.

Passes regress with witness.

OK bluhm@

> Should provide some additional performance.

My setup is currently busy with some other test run.
Can do performance comparison in a few days.

> Index: sys/kern/uipc_socket.c
> ===================================================================
> RCS file: /cvs/src/sys/kern/uipc_socket.c,v
> retrieving revision 1.314
> diff -u -p -r1.314 uipc_socket.c
> --- sys/kern/uipc_socket.c	12 Jan 2024 10:48:03 -0000	1.314
> +++ sys/kern/uipc_socket.c	26 Jan 2024 16:50:58 -0000
> @@ -2135,13 +2135,43 @@ sohasoutofband(struct socket *so)
>  	knote_locked(&so->so_rcv.sb_klist, 0);
>  }
>  
> +void
> +sofilt_lock(struct socket *so)
> +{
> +	switch (so->so_proto->pr_domain->dom_family) {
> +	case PF_INET:
> +	case PF_INET6:
> +		NET_LOCK_SHARED();
> +		rw_enter_write(&so->so_lock);
> +		break;
> +	default:
> +		rw_enter_write(&so->so_lock);
> +		break;
> +	}
> +}
> +
> +void
> +sofilt_unlock(struct socket *so)
> +{
> +	switch (so->so_proto->pr_domain->dom_family) {
> +	case PF_INET:
> +	case PF_INET6:
> +		rw_exit_write(&so->so_lock);
> +		NET_UNLOCK_SHARED();
> +		break;
> +	default:
> +		rw_exit_write(&so->so_lock);
> +		break;
> +	}
> +}
> +
>  int
>  soo_kqfilter(struct file *fp, struct knote *kn)
>  {
>  	struct socket *so = kn->kn_fp->f_data;
>  	struct sockbuf *sb;
>  
> -	solock(so);
> +	sofilt_lock(so);
>  	switch (kn->kn_filter) {
>  	case EVFILT_READ:
>  		if (so->so_options & SO_ACCEPTCONN)
> @@ -2159,12 +2189,12 @@ soo_kqfilter(struct file *fp, struct kno
>  		sb = &so->so_rcv;
>  		break;
>  	default:
> -		sounlock(so);
> +		sofilt_unlock(so);
>  		return (EINVAL);
>  	}
>  
>  	klist_insert_locked(&sb->sb_klist, kn);
> -	sounlock(so);
> +	sofilt_unlock(so);
>  
>  	return (0);
>  }
> @@ -2185,6 +2215,8 @@ filt_soread(struct knote *kn, long hint)
>  
>  	soassertlocked(so);
>  
> +	pru_lock(so);
> +
>  	kn->kn_data = so->so_rcv.sb_cc;
>  #ifdef SOCKET_SPLICE
>  	if (isspliced(so)) {
> @@ -2207,6 +2239,8 @@ filt_soread(struct knote *kn, long hint)
>  		rv = (kn->kn_data >= so->so_rcv.sb_lowat);
>  	}
>  
> +	pru_unlock(so);
> +
>  	return rv;
>  }
>  
> @@ -2226,6 +2260,8 @@ filt_sowrite(struct knote *kn, long hint
>  
>  	soassertlocked(so);
>  
> +	pru_lock(so);
> +
>  	kn->kn_data = sbspace(so, &so->so_snd);
>  	if (so->so_snd.sb_state & SS_CANTSENDMORE) {
>  		kn->kn_flags |= EV_EOF;
> @@ -2246,6 +2282,8 @@ filt_sowrite(struct knote *kn, long hint
>  		rv = (kn->kn_data >= so->so_snd.sb_lowat);
>  	}
>  
> +	pru_unlock(so);
> +
>  	return (rv);
>  }
>  
> @@ -2309,9 +2347,9 @@ filt_somodify(struct kevent *kev, struct
>  	struct socket *so = kn->kn_fp->f_data;
>  	int rv;
>  
> -	solock(so);
> +	sofilt_lock(so);
>  	rv = knote_modify(kev, kn);
> -	sounlock(so);
> +	sofilt_unlock(so);
>  
>  	return (rv);
>  }
> @@ -2322,9 +2360,9 @@ filt_soprocess(struct knote *kn, struct 
>  	struct socket *so = kn->kn_fp->f_data;
>  	int rv;
>  
> -	solock(so);
> +	sofilt_lock(so);
>  	rv = knote_process(kn, kev);
> -	sounlock(so);
> +	sofilt_unlock(so);
>  
>  	return (rv);
>  }
> @@ -2342,7 +2380,7 @@ klist_solock(void *arg)
>  {
>  	struct socket *so = arg;
>  
> -	solock(so);
> +	sofilt_lock(so);
>  	return (1);
>  }
>  
> @@ -2351,7 +2389,7 @@ klist_sounlock(void *arg, int ls)
>  {
>  	struct socket *so = arg;
>  
> -	sounlock(so);
> +	sofilt_unlock(so);
>  }
>  
>  #ifdef DDB