Download raw body.
Prevent recursion in bufbackoff() by discarding DMA pages
Currently when bufbackoff(), called by the page daemon, ends up reducing
the DMA cache it tries to flip every buffer. Flipping buffers implies
allocating new pages which is something we want to avoid in such
situation.
On top of that, once uvm_pagerealloc_multi() failed, bufbackoff() is
called again, creating a recursion.
To prevent these scenarios and speed-up the page daemon I'd like to
commit the diff below which always discard DMA buffers when they are
being freed from bufbackoff().
The downside of this approach is that we now discard buffer if only
DMA-reachable memory is missing.
ok?
Index: kern/kern_sysctl.c
===================================================================
RCS file: /cvs/src/sys/kern/kern_sysctl.c,v
diff -u -p -r1.447 kern_sysctl.c
--- kern/kern_sysctl.c 24 Sep 2024 12:37:11 -0000 1.447
+++ kern/kern_sysctl.c 30 Sep 2024 07:27:22 -0000
@@ -712,7 +712,7 @@ kern_sysctl_locked(int *name, u_int name
dmapages = uvm_pagecount(&dma_constraint);
if (bufcachepercent != opct) {
pgs = bufcachepercent * dmapages / 100;
- bufadjust(pgs); /* adjust bufpages */
+ bufadjust(0, pgs); /* adjust bufpages */
bufhighpages = bufpages; /* set high water mark */
}
return(0);
Index: kern/vfs_bio.c
===================================================================
RCS file: /cvs/src/sys/kern/vfs_bio.c,v
diff -u -p -r1.213 vfs_bio.c
--- kern/vfs_bio.c 3 Feb 2024 18:51:58 -0000 1.213
+++ kern/vfs_bio.c 30 Sep 2024 07:28:07 -0000
@@ -245,7 +245,7 @@ bufinit(void)
* Change cachepct
*/
void
-bufadjust(int newbufpages)
+bufadjust(int discard, int newbufpages)
{
int s;
int64_t npages;
@@ -269,7 +269,8 @@ bufadjust(int newbufpages)
* to give back memory *now* - so we give it all back.
*/
if (bcstats.dmapages > targetpages)
- (void) bufcache_recover_dmapages(0, bcstats.dmapages - targetpages);
+ bufcache_recover_dmapages(discard,
+ bcstats.dmapages - targetpages);
bufcache_adjust();
/*
@@ -355,7 +356,7 @@ bufbackoff(struct uvm_constraint_range *
if (bufpages - pdelta < buflowpages)
pdelta = bufpages - buflowpages;
oldbufpages = bufpages;
- bufadjust(bufpages - pdelta);
+ bufadjust(1, bufpages - pdelta);
if (oldbufpages - bufpages < size)
return (-1); /* we did not free what we were asked */
else
@@ -1093,7 +1094,7 @@ buf_get(struct vnode *vp, daddr_t blkno,
* bufhighpages (cachepercent)
*/
if (bufpages < bufhighpages)
- bufadjust(bufhighpages);
+ bufadjust(0, bufhighpages);
/*
* If we would go over the page target with our
Index: sys/mount.h
===================================================================
RCS file: /cvs/src/sys/sys/mount.h,v
diff -u -p -r1.151 mount.h
--- sys/mount.h 3 Feb 2024 18:51:58 -0000 1.151
+++ sys/mount.h 30 Sep 2024 07:27:22 -0000
@@ -493,7 +493,7 @@ extern long buflowpages, bufhighpages, b
#define BUFPAGES_INACT (((bcstats.numcleanpages - buflowpages) < 0) ? 0 \
: bcstats.numcleanpages - buflowpages)
extern int bufcachepercent;
-extern void bufadjust(int);
+extern void bufadjust(int, int);
struct uvm_constraint_range;
extern int bufbackoff(struct uvm_constraint_range*, long);
Prevent recursion in bufbackoff() by discarding DMA pages