Download raw body.
tcp output MP fixes
On Wed, Nov 06, 2024 at 02:04:31PM +0100, Alexander Bluhm wrote:
> On Wed, Nov 06, 2024 at 11:50:03AM +0100, Alexander Bluhm wrote:
> > I am not sure about sbchecklowmem(). For MP safety we must serialize
> > calls to m_pool_used() and protect sblowmem with a mutex. In
> > practice not much can ever go wrong. I have chosen mtx_enter_try()
> > to update without spinning and using a possibly obsololete value
> > in case of lock contention.
>
> I have discussed sbchecklowmem() with claudio@. We want to fix MP
> with a comment. I added atomic load and store functions to show
> where the multiple CPU access happens. The actual assembler
> instructions should not change much with them.
>
> ok?
Fine with me with the usual caveat that m_pool_used() is returning not
what we want. I still hope dlg@ would fix this with some magic.
> bluhm
>
> Index: kern/uipc_mbuf.c
> ===================================================================
> RCS file: /data/mirror/openbsd/cvs/src/sys/kern/uipc_mbuf.c,v
> diff -u -p -r1.294 uipc_mbuf.c
> --- kern/uipc_mbuf.c 10 Sep 2024 14:52:42 -0000 1.294
> +++ kern/uipc_mbuf.c 6 Nov 2024 12:42:17 -0000
> @@ -129,8 +129,8 @@ struct mutex m_extref_mtx = MUTEX_INITIA
> void m_extfree(struct mbuf *);
> void m_zero(struct mbuf *);
>
> -unsigned long mbuf_mem_limit; /* how much memory can be allocated */
> -unsigned long mbuf_mem_alloc; /* how much memory has been allocated */
> +unsigned long mbuf_mem_limit; /* [a] how much memory can be allocated */
> +unsigned long mbuf_mem_alloc; /* [a] how much memory has been allocated */
>
> void *m_pool_alloc(struct pool *, int, int *);
> void m_pool_free(struct pool *, void *);
> @@ -219,7 +219,7 @@ nmbclust_update(long newval)
> return ERANGE;
> /* update the global mbuf memory limit */
> nmbclust = newval;
> - mbuf_mem_limit = nmbclust * MCLBYTES;
> + atomic_store_long(&mbuf_mem_limit, nmbclust * MCLBYTES);
>
> pool_wakeup(&mbpool);
> for (i = 0; i < nitems(mclsizes); i++)
> @@ -1458,7 +1458,8 @@ m_pool_alloc(struct pool *pp, int flags,
> {
> void *v;
>
> - if (atomic_add_long_nv(&mbuf_mem_alloc, pp->pr_pgsize) > mbuf_mem_limit)
> + if (atomic_add_long_nv(&mbuf_mem_alloc, pp->pr_pgsize) >
> + atomic_load_long(&mbuf_mem_limit))
> goto fail;
>
> v = (*pool_allocator_multi.pa_alloc)(pp, flags, slowdown);
> @@ -1488,7 +1489,8 @@ m_pool_init(struct pool *pp, u_int size,
> u_int
> m_pool_used(void)
> {
> - return ((mbuf_mem_alloc * 100) / mbuf_mem_limit);
> + return ((atomic_load_long(&mbuf_mem_alloc) * 100) /
> + atomic_load_long(&mbuf_mem_limit));
> }
>
> #ifdef DDB
> Index: kern/uipc_socket2.c
> ===================================================================
> RCS file: /data/mirror/openbsd/cvs/src/sys/kern/uipc_socket2.c,v
> diff -u -p -r1.158 uipc_socket2.c
> --- kern/uipc_socket2.c 12 Jul 2024 19:50:35 -0000 1.158
> +++ kern/uipc_socket2.c 6 Nov 2024 12:57:32 -0000
> @@ -684,14 +684,20 @@ int
> sbchecklowmem(void)
> {
> static int sblowmem;
> - unsigned int used = m_pool_used();
> + unsigned int used;
>
> + /*
> + * m_pool_used() is thread safe. Global variable sblowmem is updated
> + * by multiple CPUs, but most times with the same value. And even
> + * if the value is not correct for a short time, it does not matter.
> + */
> + used = m_pool_used();
> if (used < 60)
> - sblowmem = 0;
> + atomic_store_int(&sblowmem, 0);
> else if (used > 80)
> - sblowmem = 1;
> + atomic_store_int(&sblowmem, 1);
>
> - return (sblowmem);
> + return (atomic_load_int(&sblowmem));
> }
>
> /*
>
--
:wq Claudio
tcp output MP fixes