From: Claudio Jeker Subject: move common code to accumulate runtime into its own function To: tech@openbsd.org Date: Mon, 7 Oct 2024 15:36:29 +0200 Factor out lots of common code around runtime calculation into tuagg_add_runtime(). -- :wq Claudio Index: kern/kern_exit.c =================================================================== RCS file: /cvs/src/sys/kern/kern_exit.c,v diff -u -p -r1.234 kern_exit.c --- kern/kern_exit.c 30 Sep 2024 12:32:26 -0000 1.234 +++ kern/kern_exit.c 7 Oct 2024 13:33:13 -0000 @@ -118,7 +118,6 @@ exit1(struct proc *p, int xexit, int xsi { struct process *pr, *qr, *nqr; struct rusage *rup; - struct timespec ts, pts; atomic_setbits_int(&p->p_flag, P_WEXIT); @@ -172,16 +171,7 @@ exit1(struct proc *p, int xexit, int xsi } /* proc is off ps_threads list so update accounting of process now */ - nanouptime(&ts); - if (timespeccmp(&ts, &curcpu()->ci_schedstate.spc_runtime, <)) - timespecclear(&pts); - else - timespecsub(&ts, &curcpu()->ci_schedstate.spc_runtime, &pts); - tu_enter(&p->p_tu); - timespecadd(&p->p_tu.tu_runtime, &pts, &p->p_tu.tu_runtime); - tu_leave(&p->p_tu); - /* adjust spc_runtime to not double account the runtime from above */ - curcpu()->ci_schedstate.spc_runtime = ts; + tuagg_add_runtime(); tuagg_add_process(p->p_p, p); if ((p->p_flag & P_THREAD) == 0) { Index: kern/kern_resource.c =================================================================== RCS file: /cvs/src/sys/kern/kern_resource.c,v diff -u -p -r1.90 kern_resource.c --- kern/kern_resource.c 3 Oct 2024 10:20:05 -0000 1.90 +++ kern/kern_resource.c 7 Oct 2024 13:33:13 -0000 @@ -443,6 +443,37 @@ tuagg_add_process(struct process *pr, st p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0; } +void +tuagg_add_runtime(void) +{ + struct schedstate_percpu *spc = &curcpu()->ci_schedstate; + struct proc *p = curproc; + struct timespec ts; + + /* + * Compute the amount of time during which the current + * process was running, and add that to its total so far. + */ + nanouptime(&ts); + if (timespeccmp(&ts, &spc->spc_runtime, <)) { +#if 0 + printf("uptime is not monotonic! " + "ts=%lld.%09lu, runtime=%lld.%09lu\n", + (long long)tv.tv_sec, tv.tv_nsec, + (long long)spc->spc_runtime.tv_sec, + spc->spc_runtime.tv_nsec); +#endif + timespecclear(&ts); + } else { + timespecsub(&ts, &spc->spc_runtime, &ts); + } + /* update spc_runtime */ + spc->spc_runtime = ts; + tu_enter(&p->p_tu); + timespecadd(&p->p_tu.tu_runtime, &ts, &p->p_tu.tu_runtime); + tu_leave(&p->p_tu); +} + /* * Transform the running time and tick information in a struct tusage * into user, system, and interrupt time usage. Index: kern/kern_sched.c =================================================================== RCS file: /cvs/src/sys/kern/kern_sched.c,v diff -u -p -r1.101 kern_sched.c --- kern/kern_sched.c 6 Oct 2024 01:50:56 -0000 1.101 +++ kern/kern_sched.c 7 Oct 2024 13:33:13 -0000 @@ -213,21 +213,10 @@ void sched_exit(struct proc *p) { struct schedstate_percpu *spc = &curcpu()->ci_schedstate; - struct timespec ts; LIST_INSERT_HEAD(&spc->spc_deadproc, p, p_hash); - /* update the tu_runtime one last time */ - nanouptime(&ts); - if (timespeccmp(&ts, &spc->spc_runtime, <)) - timespecclear(&ts); - else - timespecsub(&ts, &spc->spc_runtime, &ts); - - /* add the time counts for this thread */ - tu_enter(&p->p_tu); - timespecadd(&p->p_tu.tu_runtime, &ts, &p->p_tu.tu_runtime); - tu_leave(&p->p_tu); + tuagg_add_runtime(); KERNEL_ASSERT_LOCKED(); sched_toidle(); Index: kern/sched_bsd.c =================================================================== RCS file: /cvs/src/sys/kern/sched_bsd.c,v diff -u -p -r1.94 sched_bsd.c --- kern/sched_bsd.c 8 Jul 2024 13:17:12 -0000 1.94 +++ kern/sched_bsd.c 7 Oct 2024 13:33:13 -0000 @@ -344,7 +344,6 @@ mi_switch(void) struct schedstate_percpu *spc = &curcpu()->ci_schedstate; struct proc *p = curproc; struct proc *nextproc; - struct timespec ts; int oldipl; #ifdef MULTIPROCESSOR int hold_count; @@ -368,22 +367,7 @@ mi_switch(void) * Compute the amount of time during which the current * process was running, and add that to its total so far. */ - nanouptime(&ts); - if (timespeccmp(&ts, &spc->spc_runtime, <)) { -#if 0 - printf("uptime is not monotonic! " - "ts=%lld.%09lu, runtime=%lld.%09lu\n", - (long long)tv.tv_sec, tv.tv_nsec, - (long long)spc->spc_runtime.tv_sec, - spc->spc_runtime.tv_nsec); -#endif - timespecclear(&ts); - } else { - timespecsub(&ts, &spc->spc_runtime, &ts); - } - tu_enter(&p->p_tu); - timespecadd(&p->p_tu.tu_runtime, &ts, &p->p_tu.tu_runtime); - tu_leave(&p->p_tu); + tuagg_add_runtime(); /* Stop any optional clock interrupts. */ if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) { Index: sys/resourcevar.h =================================================================== RCS file: /cvs/src/sys/sys/resourcevar.h,v diff -u -p -r1.33 resourcevar.h --- sys/resourcevar.h 1 Oct 2024 09:22:25 -0000 1.33 +++ sys/resourcevar.h 7 Oct 2024 13:33:13 -0000 @@ -67,6 +67,7 @@ void addupc_task(struct proc *, u_long, struct clockrequest; void profclock(struct clockrequest *, void *, void *); void tuagg_add_process(struct process *, struct proc *); +void tuagg_add_runtime(void); struct tusage; void tuagg_get_proc(struct tusage *, struct proc *); void tuagg_get_process(struct tusage *, struct process *);