Download raw body.
bufbackoff() & freed pages
On 20/10/24(Sun) 11:33, Martin Pieuchot wrote:
> This changes bufbackoff() to return the amount of recovered pages.
> I'll be using this soon to access `uvmexp.free' only once per
> iteration.
I got a positive review. Any ok?
> Index: uvm/uvm_pdaemon.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_pdaemon.c,v
> diff -u -p -r1.117 uvm_pdaemon.c
> --- uvm/uvm_pdaemon.c 2 Oct 2024 10:36:33 -0000 1.117
> +++ uvm/uvm_pdaemon.c 20 Oct 2024 09:21:48 -0000
> @@ -131,7 +131,7 @@ uvm_wait(const char *wmsg)
> */
> if (curproc == uvm.pagedaemon_proc) {
> printf("uvm_wait emergency bufbackoff\n");
> - if (bufbackoff(NULL, 4) == 0)
> + if (bufbackoff(NULL, 4) >= 4)
> return;
> /*
> * now we have a problem: the pagedaemon wants to go to
> Index: uvm/uvm_pmemrange.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_pmemrange.c,v
> diff -u -p -r1.68 uvm_pmemrange.c
> --- uvm/uvm_pmemrange.c 2 Oct 2024 10:17:28 -0000 1.68
> +++ uvm/uvm_pmemrange.c 20 Oct 2024 09:21:48 -0000
> @@ -2104,7 +2104,7 @@ uvm_wait_pla(paddr_t low, paddr_t high,
> * easily use up that reserve in a single scan iteration.
> */
> uvm_unlock_fpageq();
> - if (bufbackoff(NULL, atop(size)) == 0) {
> + if (bufbackoff(NULL, atop(size)) >= atop(size)) {
> uvm_lock_fpageq();
> return 0;
> }
> Index: kern/vfs_bio.c
> ===================================================================
> RCS file: /cvs/src/sys/kern/vfs_bio.c,v
> diff -u -p -r1.213 vfs_bio.c
> --- kern/vfs_bio.c 3 Feb 2024 18:51:58 -0000 1.213
> +++ kern/vfs_bio.c 20 Oct 2024 09:31:05 -0000
> @@ -284,22 +284,16 @@ bufadjust(int newbufpages)
> }
>
> /*
> - * Make the buffer cache back off from cachepct.
> + * Back off "size" buffer cache pages. Called by the page
> + * daemon to consume buffer cache pages rather than scanning.
> + *
> + * It returns the amount of freed pages.
> */
> -int
> +unsigned long
> bufbackoff(struct uvm_constraint_range *range, long size)
> {
> - /*
> - * Back off "size" buffer cache pages. Called by the page
> - * daemon to consume buffer cache pages rather than scanning.
> - *
> - * It returns 0 to the pagedaemon to indicate that it has
> - * succeeded in freeing enough pages. It returns -1 to
> - * indicate that it could not and the pagedaemon should take
> - * other measures.
> - *
> - */
> long pdelta, oldbufpages;
> + int64_t dmarecovered, recovered = 0;
>
> /*
> * If we will accept high memory for this backoff
> @@ -307,11 +301,13 @@ bufbackoff(struct uvm_constraint_range *
> */
> if (range != NULL && range->ucr_high > dma_constraint.ucr_high) {
> struct buf *bp;
> - int64_t start = bcstats.numbufpages, recovered = 0;
> - int s = splbio();
> + int64_t start;
> + int s;
>
> - while ((recovered < size) &&
> - (bp = bufcache_gethighcleanbuf())) {
> + start = bcstats.numbufpages;
> +
> + s = splbio();
> + while ((recovered < size) && (bp = bufcache_gethighcleanbuf())){
> bufcache_take(bp);
> if (bp->b_vp) {
> RBT_REMOVE(buf_rb_bufs,
> @@ -324,16 +320,13 @@ bufbackoff(struct uvm_constraint_range *
> bufcache_adjust();
> splx(s);
>
> - /* If we got enough, return success */
> + /* We got enough. */
> if (recovered >= size)
> - return 0;
> + return recovered;
>
> - /*
> - * If we needed only memory above DMA,
> - * return failure
> - */
> + /* If we needed only memory above DMA, we're done. */
> if (range->ucr_low > dma_constraint.ucr_high)
> - return -1;
> + return recovered;
>
> /* Otherwise get the rest from DMA */
> size -= recovered;
> @@ -351,15 +344,14 @@ bufbackoff(struct uvm_constraint_range *
> pdelta = (size > bufbackpages) ? size : bufbackpages;
>
> if (bufpages <= buflowpages)
> - return(-1);
> + return recovered;
> if (bufpages - pdelta < buflowpages)
> pdelta = bufpages - buflowpages;
> oldbufpages = bufpages;
> bufadjust(bufpages - pdelta);
> - if (oldbufpages - bufpages < size)
> - return (-1); /* we did not free what we were asked */
> - else
> - return(0);
> + dmarecovered = (oldbufpages - bufpages);
> +
> + return recovered + dmarecovered;
> }
>
>
> Index: kern/vfs_biomem.c
> ===================================================================
> RCS file: /cvs/src/sys/kern/vfs_biomem.c,v
> diff -u -p -r1.51 vfs_biomem.c
> --- kern/vfs_biomem.c 24 Oct 2021 00:02:25 -0000 1.51
> +++ kern/vfs_biomem.c 20 Oct 2024 09:21:48 -0000
> @@ -272,7 +272,7 @@ buf_alloc_pages(struct buf *bp, vsize_t
> UVM_PLA_NOWAIT | UVM_PLA_NOWAKE);
> if (i == 0)
> break;
> - } while (bufbackoff(&dma_constraint, size) == 0);
> + } while (bufbackoff(&dma_constraint, size) >= size);
> if (i != 0)
> i = uvm_pagealloc_multi(&bp->b_uobj, 0, size,
> UVM_PLA_WAITOK);
> @@ -324,6 +324,7 @@ int
> buf_realloc_pages(struct buf *bp, struct uvm_constraint_range *where,
> int flags)
> {
> + vsize_t size;
> vaddr_t va;
> int dma;
> int i, r;
> @@ -345,7 +346,8 @@ buf_realloc_pages(struct buf *bp, struct
> bp->b_bufsize, UVM_PLA_NOWAIT | UVM_PLA_NOWAKE, where);
> if (r == 0)
> break;
> - } while ((bufbackoff(where, atop(bp->b_bufsize)) == 0));
> + size = atop(bp->b_bufsize);
> + } while ((bufbackoff(where, size) >= size));
>
> /*
> * bufbackoff() failed, so there's no more we can do without
> Index: sys/mount.h
> ===================================================================
> RCS file: /cvs/src/sys/sys/mount.h,v
> diff -u -p -r1.151 mount.h
> --- sys/mount.h 3 Feb 2024 18:51:58 -0000 1.151
> +++ sys/mount.h 20 Oct 2024 09:27:53 -0000
> @@ -495,7 +495,7 @@ extern long buflowpages, bufhighpages, b
> extern int bufcachepercent;
> extern void bufadjust(int);
> struct uvm_constraint_range;
> -extern int bufbackoff(struct uvm_constraint_range*, long);
> +extern unsigned long bufbackoff(struct uvm_constraint_range*, long);
>
> /*
> * Operations supported on mounted file system.
>
bufbackoff() & freed pages