Download raw body.
Rework filt_sor{modify,process}()
First split them by filt_sor*() and filt_soe*() functions for
`soread_filtops' and `soexcept_filtops' respectively. The
filt_soexcept() path is like filt_sowrite() and the `sb_mtx'
serialization is pretty enough, but filt_soread() is more complicated
for the connection oriented sockets. It is not yet clean is the
concurrent SO_ACCEPTCONN transition safe for select(2) and poll(2), so
keep socket lock together with `sb_mtx' lock for that case.
Also get rid of sofilt_*lock() functions. They were temporary made to
take `sb_mtx' mutex with shared netlock for inet sockets case or with
socket lock for other cases. Now shared netlock is not enough and should
be taken together with `so_lock' rwlock. We have special solock_shared()
function for that purpose, so use it directly in corresponding
filt_sor*() handlers.
Kirill, could you run sys/netinet/tcpthread/ and sys/kern/sosplice on
your arm64 machine?
Index: sys/kern/uipc_socket.c
===================================================================
RCS file: /cvs/src/sys/kern/uipc_socket.c,v
diff -u -p -r1.370 uipc_socket.c
--- sys/kern/uipc_socket.c 3 Feb 2025 09:00:55 -0000 1.370
+++ sys/kern/uipc_socket.c 3 Feb 2025 10:41:32 -0000
@@ -77,6 +77,9 @@ int filt_sowprocess(struct knote *kn, st
int filt_sormodify(struct kevent *kev, struct knote *kn);
int filt_sorprocess(struct knote *kn, struct kevent *kev);
+int filt_soemodify(struct kevent *kev, struct knote *kn);
+int filt_soeprocess(struct knote *kn, struct kevent *kev);
+
const struct filterops soread_filtops = {
.f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
.f_attach = NULL,
@@ -100,8 +103,8 @@ const struct filterops soexcept_filtops
.f_attach = NULL,
.f_detach = filt_sordetach,
.f_event = filt_soexcept,
- .f_modify = filt_sormodify,
- .f_process = filt_sorprocess,
+ .f_modify = filt_soemodify,
+ .f_process = filt_soeprocess,
};
#ifndef SOMINCONN
@@ -2241,38 +2244,6 @@ sohasoutofband(struct socket *so)
knote(&so->so_rcv.sb_klist, 0);
}
-void
-sofilt_lock(struct socket *so, struct sockbuf *sb)
-{
- switch (so->so_proto->pr_domain->dom_family) {
- case PF_INET:
- case PF_INET6:
- NET_LOCK_SHARED();
- break;
- default:
- rw_enter_write(&so->so_lock);
- break;
- }
-
- mtx_enter(&sb->sb_mtx);
-}
-
-void
-sofilt_unlock(struct socket *so, struct sockbuf *sb)
-{
- mtx_leave(&sb->sb_mtx);
-
- switch (so->so_proto->pr_domain->dom_family) {
- case PF_INET:
- case PF_INET6:
- NET_UNLOCK_SHARED();
- break;
- default:
- rw_exit_write(&so->so_lock);
- break;
- }
-}
-
int
soo_kqfilter(struct file *fp, struct knote *kn)
{
@@ -2470,9 +2441,13 @@ filt_sormodify(struct kevent *kev, struc
struct socket *so = kn->kn_fp->f_data;
int rv;
- sofilt_lock(so, &so->so_rcv);
+ if (so->so_proto->pr_flags & PR_WANTRCVD)
+ solock_shared(so);
+ mtx_enter(&so->so_rcv.sb_mtx);
rv = knote_modify(kev, kn);
- sofilt_unlock(so, &so->so_rcv);
+ mtx_leave(&so->so_rcv.sb_mtx);
+ if (so->so_proto->pr_flags & PR_WANTRCVD)
+ sounlock_shared(so);
return (rv);
}
@@ -2483,9 +2458,39 @@ filt_sorprocess(struct knote *kn, struct
struct socket *so = kn->kn_fp->f_data;
int rv;
- sofilt_lock(so, &so->so_rcv);
+ if (so->so_proto->pr_flags & PR_WANTRCVD)
+ solock_shared(so);
+ mtx_enter(&so->so_rcv.sb_mtx);
rv = knote_process(kn, kev);
- sofilt_unlock(so, &so->so_rcv);
+ mtx_leave(&so->so_rcv.sb_mtx);
+ if (so->so_proto->pr_flags & PR_WANTRCVD)
+ sounlock_shared(so);
+
+ return (rv);
+}
+
+int
+filt_soemodify(struct kevent *kev, struct knote *kn)
+{
+ struct socket *so = kn->kn_fp->f_data;
+ int rv;
+
+ mtx_enter(&so->so_rcv.sb_mtx);
+ rv = knote_modify(kev, kn);
+ mtx_leave(&so->so_rcv.sb_mtx);
+
+ return (rv);
+}
+
+int
+filt_soeprocess(struct knote *kn, struct kevent *kev)
+{
+ struct socket *so = kn->kn_fp->f_data;
+ int rv;
+
+ mtx_enter(&so->so_rcv.sb_mtx);
+ rv = knote_process(kn, kev);
+ mtx_leave(&so->so_rcv.sb_mtx);
return (rv);
}
Rework filt_sor{modify,process}()