Download raw body.
[PATCH] return from mi_switch() without the scheduler lock
On Tue, Nov 26, 2024 at 12:47:41PM +0100, Mateusz Guzik wrote:
> On Tue, Nov 26, 2024 at 12:21 PM Martin Pieuchot <mpi@grenadille.net> wrote:
> >
> > On 26/11/24(Tue) 09:26, Mateusz Guzik wrote:
> > > The scheduler lock gets taken at the end only to be immediately released
> > > by most callers.
> > >
> > > The expectations of the lock not being held is documented with
> > > SCHED_ASSERT_UNLOCKED().
> >
> > Nice.
I disagree, some of those SCHED_ASSERT_UNLOCKED() are not helpful. Every
assert has a cost, it makes no sense to check a condition over and over
and over again.
> > > ====
> > >
> > > The change is not even compile-tested, it came up while I was poking
> > > around the rwlock diff (to which I'll respond some time later).
> >
> > That's so sad. That's the most important task. We're missing all the
> > fun!
> >
> > > I did patch up everything which grepped for mi_switch, but I reserve the
> > > right to have missed something.
> >
> > Please, would you test it and fix the possible remaining issues?
> > Thanks!
> >
>
> I'm a passer by without a working OpenBSD installation mate.
>
> Someone with one can sort this out within few minutes I would think.
I had similar diffs in my work trees and I never was fully conviced by
them. Especially since more is needed in this area so we can move away
from the SCHED_LOCK().
Throwing a diff like this at other people without even compile testing
it is not the right way. Right now this is just a distraction. Sorry.
> > > diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c
> > > index b84e4b0d08d..b9b52851ba1 100644
> > > --- a/sys/kern/kern_sched.c
> > > +++ b/sys/kern/kern_sched.c
> > > @@ -152,6 +152,7 @@ sched_idle(void *v)
> > > p->p_cpu = ci;
> > > atomic_setbits_int(&p->p_flag, P_CPUPEG);
> > > mi_switch();
> > > + SCHED_LOCK();
> > > cpuset_del(&sched_idle_cpus, ci);
> > > SCHED_UNLOCK();
> > >
> > > @@ -165,7 +166,7 @@ sched_idle(void *v)
> > > SCHED_LOCK();
> > > p->p_stat = SSLEEP;
> > > mi_switch();
> > > - SCHED_UNLOCK();
> > > + SCHED_ASSERT_UNLOCKED();
> > >
> > > while ((dead = LIST_FIRST(&spc->spc_deadproc))) {
> > > LIST_REMOVE(dead, p_hash);
> > > @@ -634,7 +635,7 @@ sched_peg_curproc(struct cpu_info *ci)
> > > setrunqueue(ci, p, p->p_usrpri);
> > > p->p_ru.ru_nvcsw++;
> > > mi_switch();
> > > - SCHED_UNLOCK();
> > > + SCHED_ASSERT_UNLOCKED();
> > > }
> > >
> > > void
> > > diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
> > > index 53e5d68f4df..828e5b2ae61 100644
> > > --- a/sys/kern/kern_sig.c
> > > +++ b/sys/kern/kern_sig.c
> > > @@ -1538,8 +1538,10 @@ proc_stop(struct proc *p, int sw)
> > > * Extra calls to softclock don't hurt.
> > > */
> > > softintr_schedule(proc_stop_si);
> > > - if (sw)
> > > + if (sw) {
> > > mi_switch();
> > > + SCHED_LOCK();
> > > + }
> > > }
> > >
> > > /*
> > > @@ -2102,7 +2104,7 @@ single_thread_check_locked(struct proc *p, int deep)
> > > SCHED_LOCK();
> > > p->p_stat = SSTOP;
> > > mi_switch();
> > > - SCHED_UNLOCK();
> > > + SCHED_ASSERT_UNLOCKED();
> > > mtx_enter(&pr->ps_mtx);
> > > } while (pr->ps_single != NULL);
> > >
> > > diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
> > > index 8fa6e299b5b..5b75a795788 100644
> > > --- a/sys/kern/kern_synch.c
> > > +++ b/sys/kern/kern_synch.c
> > > @@ -416,6 +416,7 @@ sleep_finish(int timo, int do_sleep)
> > > KASSERT(p->p_stat == SSLEEP || p->p_stat == SSTOP);
> > > p->p_ru.ru_nvcsw++;
> > > mi_switch();
> > > + SCHED_LOCK();
> > > } else {
> > > KASSERT(p->p_stat == SONPROC || p->p_stat == SSLEEP);
> > > p->p_stat = SONPROC;
> > > @@ -615,7 +616,7 @@ sys_sched_yield(struct proc *p, void *v, register_t *retval)
> > > setrunqueue(p->p_cpu, p, newprio);
> > > p->p_ru.ru_nvcsw++;
> > > mi_switch();
> > > - SCHED_UNLOCK();
> > > + SCHED_ASSERT_UNLOCKED();
> > >
> > > return (0);
> > > }
> > > diff --git a/sys/kern/sched_bsd.c b/sys/kern/sched_bsd.c
> > > index 31bce5d87f7..3b3dccaae75 100644
> > > --- a/sys/kern/sched_bsd.c
> > > +++ b/sys/kern/sched_bsd.c
> > > @@ -317,7 +317,7 @@ yield(void)
> > > setrunqueue(p->p_cpu, p, p->p_usrpri);
> > > p->p_ru.ru_nvcsw++;
> > > mi_switch();
> > > - SCHED_UNLOCK();
> > > + SCHED_ASSERT_UNLOCKED();
> > > }
> > >
> > > /*
> > > @@ -335,7 +335,7 @@ preempt(void)
> > > setrunqueue(p->p_cpu, p, p->p_usrpri);
> > > p->p_ru.ru_nivcsw++;
> > > mi_switch();
> > > - SCHED_UNLOCK();
> > > + SCHED_ASSERT_UNLOCKED();
> > > }
> > >
> > > void
> > > @@ -440,7 +440,7 @@ mi_switch(void)
> > > if (hold_count)
> > > __mp_acquire_count(&kernel_lock, hold_count);
> > > #endif
> > > - SCHED_LOCK();
> > > + SCHED_ASSERT_UNLOCKED();
> > > }
> > >
> > > /*
> > >
> >
> >
>
>
> --
> Mateusz Guzik <mjguzik gmail.com>
>
--
:wq Claudio
[PATCH] return from mi_switch() without the scheduler lock