Download raw body.
uvm_purge()
On 12/05/25(Mon) 22:00, Mark Kettenis wrote:
> > Date: Mon, 12 May 2025 20:42:40 +0200
> > From: Martin Pieuchot <mpi@grenadille.net>
>
> Hi Martin,
>
> > Diff below moves the tearing down of VM space to exit1(). It implies
> > that processes will now be charged for cleaning their VM space. As a
> > result their %sys time will increase.
> >
> > uvm_purge() is called in the "top" part of exit1() when the process is
> > still allowed to sleep. To allow this piece of code to be executed in
> > parallel, all recursive kernel_lock tokens need to be released.
> >
> > In the diff below uvm_purge() is called twice. The first time in
> > exit1() and the second time, doing nothing but integrity checks, in
> > uvm_exit(). Another approach could be to just check that the map is
> > empty. Any preferences?
> >
> > Comments? Oks?
>
> As I explained to Claudio in the "Faster _exit(2) for a faster
> userland: R.I.P the reaper" thread, this will disable the TLB flushing
> optimization I added on arm64 a few months ago. The uvm_purge() call
> needs to come after the userland page tables have been disabled and
> the TLBs have been flushed.
>
> Unfortunately the way I hooked that operation into pmap_deactivate()
> makes this non-trivial. The easiest way out may be to add a new
> pmap_purge() that does this operation and that gets called from an
> appropriate spot exit1(). This must happen after the other threads of
> the process have exited, so somewhere after the point where the main
> threads waits for them to die.
As long as this only touches userland page tables it can be inside
uvm_purge().
> I can cook you a diff for that.
Thanks!
> > Index: kern/kern_exit.c
> > ===================================================================
> > RCS file: /cvs/src/sys/kern/kern_exit.c,v
> > diff -u -p -r1.245 kern_exit.c
> > --- kern/kern_exit.c 2 May 2025 05:04:38 -0000 1.245
> > +++ kern/kern_exit.c 12 May 2025 18:10:35 -0000
> > @@ -241,6 +241,14 @@ exit1(struct proc *p, int xexit, int xsi
> > */
> > if (pr->ps_pptr->ps_sigacts->ps_sigflags & SAS_NOCLDWAIT)
> > atomic_setbits_int(&pr->ps_flags, PS_NOZOMBIE);
> > +
> > + /* Teardown the virtual address space. */
> > +#ifdef MULTIPROCESSOR
> > + __mp_release_all(&kernel_lock);
> > +#endif
> > + uvm_purge(pr);
> > + KERNEL_LOCK();
> > +
> > }
> >
> > p->p_fd = NULL; /* zap the thread's copy */
> > Index: uvm/uvm_extern.h
> > ===================================================================
> > RCS file: /cvs/src/sys/uvm/uvm_extern.h,v
> > diff -u -p -r1.180 uvm_extern.h
> > --- uvm/uvm_extern.h 19 Nov 2024 06:18:26 -0000 1.180
> > +++ uvm/uvm_extern.h 12 May 2025 18:05:34 -0000
> > @@ -268,6 +268,7 @@ int uvm_fault(vm_map_t, vaddr_t, vm_fa
> >
> > vaddr_t uvm_uarea_alloc(void);
> > void uvm_uarea_free(struct proc *);
> > +void uvm_purge(struct process *);
> > void uvm_exit(struct process *);
> > void uvm_init_limits(struct plimit *);
> > boolean_t uvm_kernacc(caddr_t, size_t, int);
> > @@ -401,6 +402,7 @@ void uvmspace_init(struct vmspace *, s
> > void uvmspace_exec(struct proc *, vaddr_t, vaddr_t);
> > struct vmspace *uvmspace_fork(struct process *);
> > void uvmspace_addref(struct vmspace *);
> > +void uvmspace_purge(struct vmspace *);
> > void uvmspace_free(struct vmspace *);
> > struct vmspace *uvmspace_share(struct process *);
> > int uvm_share(vm_map_t, vaddr_t, vm_prot_t,
> > Index: uvm/uvm_glue.c
> > ===================================================================
> > RCS file: /cvs/src/sys/uvm/uvm_glue.c,v
> > diff -u -p -r1.88 uvm_glue.c
> > --- uvm/uvm_glue.c 21 Mar 2025 13:19:33 -0000 1.88
> > +++ uvm/uvm_glue.c 12 May 2025 18:05:34 -0000
> > @@ -286,6 +286,19 @@ uvm_uarea_free(struct proc *p)
> > }
> >
> > /*
> > + * uvm_purge: teardown a virtual address space
> > + */
> > +void
> > +uvm_purge(struct process *pr)
> > +{
> > + struct vmspace *vm = pr->ps_vmspace;
> > +
> > + KERNEL_ASSERT_UNLOCKED();
> > +
> > + uvmspace_purge(vm);
> > +}
> > +
> > +/*
> > * uvm_exit: exit a virtual address space
> > */
> > void
> > Index: uvm/uvm_map.c
> > ===================================================================
> > RCS file: /cvs/src/sys/uvm/uvm_map.c,v
> > diff -u -p -r1.341 uvm_map.c
> > --- uvm/uvm_map.c 21 Apr 2025 14:46:18 -0000 1.341
> > +++ uvm/uvm_map.c 12 May 2025 18:05:34 -0000
> > @@ -2491,6 +2491,19 @@ uvm_map_teardown(struct vm_map *map)
> > entry = TAILQ_NEXT(entry, dfree.deadq);
> > }
> >
> > +#ifdef VMMAP_DEBUG
> > + numt = numq = 0;
> > + RBT_FOREACH(entry, uvm_map_addr, &map->addr)
> > + numt++;
> > + TAILQ_FOREACH(entry, &dead_entries, dfree.deadq)
> > + numq++;
> > + KASSERT(numt == numq);
> > +#endif
> > + RBT_INIT(uvm_map_addr, &map->addr);
> > + map->size = 0;
> > + map->min_offset = 0;
> > + map->max_offset = 0;
> > + map->flags &= ~VM_MAP_ISVMSPACE;
> > vm_map_unlock(map);
> >
> > /* Remove address selectors. */
> > @@ -2503,18 +2516,7 @@ uvm_map_teardown(struct vm_map *map)
> > uvm_addr_destroy(map->uaddr_brk_stack);
> > map->uaddr_brk_stack = NULL;
> >
> > -#ifdef VMMAP_DEBUG
> > - numt = numq = 0;
> > - RBT_FOREACH(entry, uvm_map_addr, &map->addr)
> > - numt++;
> > - TAILQ_FOREACH(entry, &dead_entries, dfree.deadq)
> > - numq++;
> > - KASSERT(numt == numq);
> > -#endif
> > uvm_unmap_detach(&dead_entries, 0);
> > -
> > - pmap_destroy(map->pmap);
> > - map->pmap = NULL;
> > }
> >
> > /*
> > @@ -3395,6 +3397,21 @@ uvmspace_addref(struct vmspace *vm)
> > atomic_inc_int(&vm->vm_refcnt);
> > }
> >
> > +void
> > +uvmspace_purge(struct vmspace *vm)
> > +{
> > +#ifdef SYSVSHM
> > + /* Get rid of any SYSV shared memory segments. */
> > + if (vm->vm_shm != NULL) {
> > + KERNEL_LOCK();
> > + shmexit(vm);
> > + KERNEL_UNLOCK();
> > + }
> > +#endif
> > +
> > + uvm_map_teardown(&vm->vm_map);
> > +}
> > +
> > /*
> > * uvmspace_free: free a vmspace data structure
> > */
> > @@ -3407,16 +3424,11 @@ uvmspace_free(struct vmspace *vm)
> > * all of the mappings and pages they hold, then call the pmap
> > * module to reclaim anything left.
> > */
> > -#ifdef SYSVSHM
> > - /* Get rid of any SYSV shared memory segments. */
> > - if (vm->vm_shm != NULL) {
> > - KERNEL_LOCK();
> > - shmexit(vm);
> > - KERNEL_UNLOCK();
> > - }
> > -#endif
> > + uvmspace_purge(vm);
> > +
> > + pmap_destroy(vm->vm_map.pmap);
> > + vm->vm_map.pmap = NULL;
> >
> > - uvm_map_teardown(&vm->vm_map);
> > pool_put(&uvm_vmspace_pool, vm);
> > }
> > }
> >
> >
> >
uvm_purge()