Download raw body.
pdaemon: reduce access to uvmexp.free
On 02/11/24(Sat) 17:00, Martin Pieuchot wrote:
> Diff below introduces a `shortage' variable to aim at computing once how
> many pages should be recovered to reach the high watermark.
>
> Note that currently `uvmexp.free' is still accessed in uvm_swapout_threads()
> and uvmpd_scan_inactive(). I'll address this in upcoming diffs.
Now with corrected arithmetic as pointed out by miod@.
Index: uvm/uvm_pdaemon.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_pdaemon.c,v
diff -u -p -r1.118 uvm_pdaemon.c
--- uvm/uvm_pdaemon.c 2 Nov 2024 07:57:54 -0000 1.118
+++ uvm/uvm_pdaemon.c 2 Nov 2024 16:14:01 -0000
@@ -102,7 +102,7 @@ extern unsigned long drmbackoff(long);
*/
struct rwlock *uvmpd_trylockowner(struct vm_page *);
-void uvmpd_scan(struct uvm_pmalloc *, int,
+void uvmpd_scan(struct uvm_pmalloc *, int, int,
struct uvm_constraint_range *);
int uvmpd_scan_inactive(struct uvm_pmalloc *,
struct uvm_constraint_range *);
@@ -206,7 +206,7 @@ uvm_pageout(void *arg)
{
struct uvm_constraint_range constraint;
struct uvm_pmalloc *pma;
- int inactive_shortage, free;
+ int shortage, inactive_shortage;
/* ensure correct priority and set paging parameters... */
uvm.pagedaemon_proc = curproc;
@@ -237,7 +237,7 @@ uvm_pageout(void *arg)
} else
constraint = no_constraint;
}
- free = uvmexp.free - BUFPAGES_DEFICIT;
+ shortage = uvmexp.freetarg - uvmexp.free + BUFPAGES_DEFICIT;
uvm_unlock_fpageq();
/*
@@ -254,8 +254,8 @@ uvm_pageout(void *arg)
size = 0;
if (pma != NULL)
size += pma->pm_size >> PAGE_SHIFT;
- if (free < uvmexp.freetarg)
- size += uvmexp.freetarg - free;
+ if (shortage > 0)
+ size += shortage;
if (size == 0)
size = 16; /* XXX */
@@ -269,12 +269,12 @@ uvm_pageout(void *arg)
* scan if needed
*/
uvm_lock_pageq();
- free = uvmexp.free - BUFPAGES_DEFICIT;
+ shortage = uvmexp.freetarg - uvmexp.free + BUFPAGES_DEFICIT;
inactive_shortage =
uvmexp.inactarg - uvmexp.inactive - BUFPAGES_INACT;
- if (pma != NULL || (free < uvmexp.freetarg) ||
- (inactive_shortage > 0)) {
- uvmpd_scan(pma, inactive_shortage, &constraint);
+ if (pma != NULL || (shortage > 0) || (inactive_shortage > 0)) {
+ uvmpd_scan(pma, shortage, inactive_shortage,
+ &constraint);
}
/*
@@ -861,10 +861,10 @@ uvmpd_scan_inactive(struct uvm_pmalloc *
*/
void
-uvmpd_scan(struct uvm_pmalloc *pma, int inactive_shortage,
+uvmpd_scan(struct uvm_pmalloc *pma, int shortage, int inactive_shortage,
struct uvm_constraint_range *constraint)
{
- int free, swap_shortage, pages_freed;
+ int swap_shortage, pages_freed;
struct vm_page *p, *nextpg;
struct rwlock *slock;
paddr_t paddr;
@@ -873,20 +873,16 @@ uvmpd_scan(struct uvm_pmalloc *pma, int
uvmexp.pdrevs++; /* counter */
- /*
- * get current "free" page count
- */
- free = uvmexp.free - BUFPAGES_DEFICIT;
#ifdef __HAVE_PMAP_COLLECT
/*
* swap out some processes if we are below our free target.
* we need to unlock the page queues for this.
*/
- if (free < uvmexp.freetarg) {
+ if (shortage > 0) {
uvmexp.pdswout++;
uvm_unlock_pageq();
- uvm_swapout_threads();
+ shortage -= uvm_swapout_threads();
uvm_lock_pageq();
}
#endif
@@ -899,6 +895,7 @@ uvmpd_scan(struct uvm_pmalloc *pma, int
*/
pages_freed = uvmpd_scan_inactive(pma, constraint);
uvmexp.pdfreed += pages_freed;
+ shortage -= pages_freed;
/*
* we have done the scan to get free pages. now we work on meeting
@@ -907,11 +904,10 @@ uvmpd_scan(struct uvm_pmalloc *pma, int
* detect if we're not going to be able to page anything out
* until we free some swap resources from active pages.
*/
- free = uvmexp.free - BUFPAGES_DEFICIT;
swap_shortage = 0;
- if (free < uvmexp.freetarg && uvm_swapisfilled() && !uvm_swapisfull() &&
+ if ((shortage > 0) && uvm_swapisfilled() && !uvm_swapisfull() &&
pages_freed == 0) {
- swap_shortage = uvmexp.freetarg - free;
+ swap_shortage = shortage;
}
for (p = TAILQ_FIRST(&uvm.page_active);
pdaemon: reduce access to uvmexp.free