From: Claudio Jeker Subject: Re: clockintr: switch from callee- to caller-allocated structs To: Scott Cheloha Cc: tech@openbsd.org, mlarkin@openbsd.org, mpi@openbsd.org Date: Tue, 23 Jan 2024 09:29:44 +0100 On Mon, Jan 22, 2024 at 06:53:14PM -0600, Scott Cheloha wrote: > Currently, clockintr_establish() calls malloc(9) to allocate a struct > clockintr. mpi@ says this behavior is incompatible with dt(4). So > this patch changes the clockintr subsystem to use caller-allocated > clockintr structs instead of callee-allocated structs. > > clockintr_establish() is named after softintr_establish(), which > itself malloc(9) to create a softintr object. To avoid confusion, > this patch changes the name of the object initialization interface > from "clockintr_establish" to "clockintr_bind". An updated manpage, > clockintr_bind.9, is included. > > The patch is mostly rototilling: change pointers to plain structs and > add '&' wherever necessary. > > ok? I like this. OK claudio@ > Index: share/man/man9/clockintr_bind.9 > =================================================================== > RCS file: share/man/man9/clockintr_bind.9 > diff -N share/man/man9/clockintr_bind.9 > --- /dev/null 1 Jan 1970 00:00:00 -0000 > +++ share/man/man9/clockintr_bind.9 23 Jan 2024 00:49:56 -0000 > @@ -0,0 +1,288 @@ > +.\" $OpenBSD$ > +.\" > +.\" Copyright (c) 2023-2024 Scott Cheloha > +.\" > +.\" Permission to use, copy, modify, and distribute this software for any > +.\" purpose with or without fee is hereby granted, provided that the above > +.\" copyright notice and this permission notice appear in all copies. > +.\" > +.\" THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES > +.\" WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF > +.\" MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR > +.\" ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES > +.\" WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN > +.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF > +.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. > +.\" > +.Dd $Mdocdate$ > +.Dt CLOCKINTR_BIND 9 > +.Os > +.Sh NAME > +.Nm clockintr_bind , > +.Nm clockintr_schedule , > +.Nm clockintr_advance , > +.Nm clockintr_cancel , > +.Nm clockintr_unbind , > +.Nm clockintr_stagger , > +.Nm clockrequest_advance > +.Nd execute a function in a clock interrupt context > +.Sh SYNOPSIS > +.In sys/clockintr.h > +.Ft void > +.Fo clockintr_bind > +.Fa "struct clockintr *cl" > +.Fa "struct cpu_info *cpu" > +.Fa "void (*callback)(struct clockrequest *cr, void *cf, void *arg)" > +.Fa "void *arg" > +.Fc > +.Ft void > +.Fo clockintr_schedule > +.Fa "struct clockintr *cl" > +.Fa "uint64_t abs" > +.Fc > +.Ft uint64_t > +.Fo clockintr_advance > +.Fa "struct clockintr *cl" > +.Fa "uint64_t interval" > +.Fc > +.Ft void > +.Fo clockintr_cancel > +.Fa "struct clockintr *cl" > +.Fc > +.Ft void > +.Fo clockintr_unbind > +.Fa "struct clockintr *cl" > +.Fa "uint32_t flags" > +.Fc > +.Ft void > +.Fo clockintr_stagger > +.Fa "struct clockintr *cl" > +.Fa "uint64_t interval" > +.Fa "uint32_t numer" > +.Fa "uint32_t denom" > +.Fc > +.Ft uint64_t > +.Fo clockrequest_advance > +.Fa "struct clockrequest *cr" > +.Fa "uint64_t interval" > +.Fc > +.\" .Fn clockrequest_advance_random is intentionally undocumented. > +.\" It may be removed in the future. New code should not use it. > +.Sh DESCRIPTION > +The clock interrupt subsystem schedules functions for asynchronous execution > +from the clock interrupt context on a particular CPU. > +.Pp > +Clock interrupts are well-suited for timekeeping, > +scheduling, > +and statistical profiling. > +Applications with more relaxed latency requirements should use timeouts > +to schedule asynchronous execution; > +see > +.Xr timeout_add 9 > +for details. > +.Pp > +The > +.Fn clockintr_bind > +function initializes the clock interrupt object > +.Fa cl . > +When > +.Fa cl > +expires, > +its > +.Fa callback > +function is executed from the > +.Dv IPL_CLOCK > +context on its host > +.Fa cpu > +without any locks or mutexes. > +The callback function must not block. > +Its parameters are as follows: > +.Bl -tag -width indent > +.It Fa cr > +A private > +.Vt clockrequest > +object. > +May be used to request rescheduling; > +see > +.Fn clockrequest_advance > +below. > +.It Fa cf > +The > +.Fa cpu Ns 's > +current machine-dependent clockframe. > +.It Fa arg > +The > +.Fa arg > +given to > +.Fn clockintr_bind . > +.El > +.Pp > +The memory pointed to by > +.Fa cl > +must be zeroed before it is first bound. > +It is an error to use > +.Fa cl > +as argument to any other function in the > +.Vt clockintr > +API before it is bound. > +It is an error to rebind > +.Fa cl > +without first unbinding it; > +see > +.Fn clockintr_unbind > +below. > +.Pp > +The > +.Fn clockintr_schedule > +function schedules > +.Fa cl > +to expire at the absolute time > +.Fa abs > +on the system uptime clock. > +The subsystem will never execute > +.Fa cl Ns 's > +callback function before this expiration time, > +though its execution may be delayed by other activity on the system. > +.Pp > +The > +.Fn clockintr_advance > +function schedules > +.Fa cl > +to expire at the next terminus of the given > +.Fa interval , > +a non-zero count of nanoseconds, > +relative to > +.Fa cl Ns 's > +current expiration time. > +Periodic clock interrupts should be scheduled with > +.Fn clockintr_advance > +to keep the execution period from drifting. > +.Pp > +The > +.Fn clockintr_cancel > +function cancels any pending expiration of > +.Fa cl . > +.Pp > +The > +.Fn clockintr_unbind > +function cancels any pending expiration of > +.Fa cl > +and severs the binding between > +.Fa cl > +and its host > +.Fa cpu . > +Upon return, > +.Fa cl > +may be rebound with > +.Fn clockintr_bind . > +The call may be configured with zero or more of the following > +.Fa flags : > +.Bl -tag -width CL_BARRIER > +.It Dv CL_BARRIER > +If > +.Fa cl Ns 's > +callback function is executing, > +block until it returns. > +By default, > +the caller does not block. > +Useful when > +.Fa arg > +is a shared reference. > +.El > +.Pp > +The > +.Fn clockintr_stagger > +function resets > +.Fa cl Ns 's > +expiration time to a fraction of the given > +.Fa interval , > +a count of nanoseconds. > +Specifically, > +.Fa cl Ns 's > +expiration time is reset to > +.Pq Fa interval Ms / Fa denom Ms * Fa numer . > +Periodic clock interrupts bound to multiple CPUs may be staggered > +to reduce the likelihood that their callback functions will execute > +simultaneously and compete for a shared resource. > +It is an error if > +.Fa numer > +is greater than or equal to > +.Fa denom . > +It is an error if > +.Fa cl > +is already scheduled to expire. > +.Pp > +The > +.Fn clockrequest_advance > +function is similar to > +.Fn clockintr_advance , > +except that > +(a) it may only be called during the execution of a > +.Fa callback > +function, > +(b) it accepts a > +.Vt clockrequest > +pointer as argument, > +and (c) scheduling requests submitted with the interface are not fulfilled > +until the callback function returns. > +When the callback function returns, > +scheduling requests are only committed to the underlying clock interrupt > +object if that object was not manipulated during the execution of the > +callback function. > +Otherwise, > +outstanding requests are discarded. > +.Sh CONTEXT > +The > +.Fn clockintr_bind > +and > +.Fn clockintr_unbind > +functions may only be called from process context. > +.Pp > +The > +.Fn clockintr_advance , > +.Fn clockintr_cancel , > +.Fn clockintr_schedule , > +and > +.Fn clockintr_stagger > +functions may be called from process context or from interrupt context. > +.Pp > +The > +.Fn clockrequest_advance > +function may only be called during execution of a > +.Fa callback > +function. > +.Sh RETURN VALUES > +The > +.Fn clockintr_advance > +and > +.Fn clockrequest_advance > +functions return the number of > +.Fa interval Ns s > +that have elapsed since > +.Fa cl > +was scheduled to expire, > +or zero if > +.Fa cl > +has not yet expired. > +.Sh CODE REFERENCES > +.Pa sys/kern/kern_clockintr.c > +.Sh SEE ALSO > +.Xr microtime 9 , > +.Xr spl 9 , > +.Xr timeout 9 > +.Rs > +.%A Richard McDougall > +.%A Jim Mauro > +.%B Solaris Internals: Solaris 10 and OpenSolaris Kernel Architecture > +.%I Prentice Hall > +.%I Sun Microsystems Press > +.%D 2nd Edition, 2007 > +.%P pp. 912\(en925 > +.Re > +.Sh HISTORY > +The > +.Vt clockintr > +and > +.Vt clockrequest > +APIs first appeared in > +.Ox 7.5 . > Index: sys/sys/clockintr.h > =================================================================== > RCS file: /cvs/src/sys/sys/clockintr.h,v > diff -u -p -r1.24 clockintr.h > --- sys/sys/clockintr.h 15 Jan 2024 01:15:37 -0000 1.24 > +++ sys/sys/clockintr.h 23 Jan 2024 00:49:56 -0000 > @@ -102,7 +102,7 @@ struct clockintr_queue { > TAILQ_HEAD(, clockintr) cq_all; /* [m] established clockintr list */ > TAILQ_HEAD(, clockintr) cq_pend;/* [m] pending clockintr list */ > struct clockintr *cq_running; /* [m] running clockintr */ > - struct clockintr *cq_hardclock; /* [o] hardclock handle */ > + struct clockintr cq_hardclock; /* [o] hardclock handle */ > struct intrclock cq_intrclock; /* [I] local interrupt clock */ > struct clockintr_stat cq_stat; /* [o] dispatch statistics */ > volatile uint32_t cq_gen; /* [o] cq_stat update generation */ > @@ -124,9 +124,9 @@ void clockintr_trigger(void); > */ > > uint64_t clockintr_advance(struct clockintr *, uint64_t); > -void clockintr_cancel(struct clockintr *); > -struct clockintr *clockintr_establish(struct cpu_info *, > +void clockintr_bind(struct clockintr *, struct cpu_info *, > void (*)(struct clockrequest *, void *, void *), void *); > +void clockintr_cancel(struct clockintr *); > void clockintr_schedule(struct clockintr *, uint64_t); > void clockintr_stagger(struct clockintr *, uint64_t, uint32_t, uint32_t); > uint64_t clockrequest_advance(struct clockrequest *, uint64_t); > Index: sys/sys/sched.h > =================================================================== > RCS file: /cvs/src/sys/sys/sched.h,v > diff -u -p -r1.69 sched.h > --- sys/sys/sched.h 14 Jan 2024 17:23:56 -0000 1.69 > +++ sys/sys/sched.h 23 Jan 2024 00:49:56 -0000 > @@ -95,11 +95,11 @@ struct cpustats { > > #ifdef _KERNEL > > +#include > #include > > #define SCHED_NQS 32 /* 32 run queues. */ > > -struct clockintr; > struct smr_entry; > > /* > @@ -115,10 +115,10 @@ struct schedstate_percpu { > u_int64_t spc_cp_time[CPUSTATES]; /* CPU state statistics */ > u_char spc_curpriority; /* usrpri of curproc */ > > - struct clockintr *spc_itimer; /* [o] itimer_update handle */ > - struct clockintr *spc_profclock; /* [o] profclock handle */ > - struct clockintr *spc_roundrobin; /* [o] roundrobin handle */ > - struct clockintr *spc_statclock; /* [o] statclock handle */ > + struct clockintr spc_itimer; /* [o] itimer_update handle */ > + struct clockintr spc_profclock; /* [o] profclock handle */ > + struct clockintr spc_roundrobin;/* [o] roundrobin handle */ > + struct clockintr spc_statclock; /* [o] statclock handle */ > > u_int spc_nrun; /* procs on the run queues */ > > Index: sys/kern/kern_clockintr.c > =================================================================== > RCS file: /cvs/src/sys/kern/kern_clockintr.c,v > diff -u -p -r1.63 kern_clockintr.c > --- sys/kern/kern_clockintr.c 15 Jan 2024 01:15:37 -0000 1.63 > +++ sys/kern/kern_clockintr.c 23 Jan 2024 00:49:56 -0000 > @@ -62,11 +62,9 @@ clockintr_cpu_init(const struct intrcloc > clockqueue_intrclock_install(cq, ic); > > /* TODO: Remove this from struct clockintr_queue. */ > - if (cq->cq_hardclock == NULL) { > - cq->cq_hardclock = clockintr_establish(ci, clockintr_hardclock, > + if (cq->cq_hardclock.cl_expiration == 0) { > + clockintr_bind(&cq->cq_hardclock, ci, clockintr_hardclock, > NULL); > - if (cq->cq_hardclock == NULL) > - panic("%s: failed to establish hardclock", __func__); > } > > /* > @@ -96,16 +94,16 @@ clockintr_cpu_init(const struct intrcloc > * behalf. > */ > if (CPU_IS_PRIMARY(ci)) { > - if (cq->cq_hardclock->cl_expiration == 0) > - clockintr_schedule(cq->cq_hardclock, 0); > + if (cq->cq_hardclock.cl_expiration == 0) > + clockintr_schedule(&cq->cq_hardclock, 0); > else > - clockintr_advance(cq->cq_hardclock, hardclock_period); > + clockintr_advance(&cq->cq_hardclock, hardclock_period); > } else { > - if (cq->cq_hardclock->cl_expiration == 0) { > - clockintr_stagger(cq->cq_hardclock, hardclock_period, > + if (cq->cq_hardclock.cl_expiration == 0) { > + clockintr_stagger(&cq->cq_hardclock, hardclock_period, > multiplier, MAXCPUS); > } > - clockintr_advance(cq->cq_hardclock, hardclock_period); > + clockintr_advance(&cq->cq_hardclock, hardclock_period); > } > > /* > @@ -113,30 +111,30 @@ clockintr_cpu_init(const struct intrcloc > * stagger a randomized statclock. > */ > if (!statclock_is_randomized) { > - if (spc->spc_statclock->cl_expiration == 0) { > - clockintr_stagger(spc->spc_statclock, statclock_avg, > + if (spc->spc_statclock.cl_expiration == 0) { > + clockintr_stagger(&spc->spc_statclock, statclock_avg, > multiplier, MAXCPUS); > } > } > - clockintr_advance(spc->spc_statclock, statclock_avg); > + clockintr_advance(&spc->spc_statclock, statclock_avg); > > /* > * XXX Need to find a better place to do this. We can't do it in > * sched_init_cpu() because initclocks() runs after it. > */ > - if (spc->spc_itimer->cl_expiration == 0) { > - clockintr_stagger(spc->spc_itimer, hardclock_period, > + if (spc->spc_itimer.cl_expiration == 0) { > + clockintr_stagger(&spc->spc_itimer, hardclock_period, > multiplier, MAXCPUS); > } > - if (spc->spc_profclock->cl_expiration == 0) { > - clockintr_stagger(spc->spc_profclock, profclock_period, > + if (spc->spc_profclock.cl_expiration == 0) { > + clockintr_stagger(&spc->spc_profclock, profclock_period, > multiplier, MAXCPUS); > } > - if (spc->spc_roundrobin->cl_expiration == 0) { > - clockintr_stagger(spc->spc_roundrobin, hardclock_period, > + if (spc->spc_roundrobin.cl_expiration == 0) { > + clockintr_stagger(&spc->spc_roundrobin, hardclock_period, > multiplier, MAXCPUS); > } > - clockintr_advance(spc->spc_roundrobin, roundrobin_period); > + clockintr_advance(&spc->spc_roundrobin, roundrobin_period); > > if (reset_cq_intrclock) > SET(cq->cq_flags, CQ_INTRCLOCK); > @@ -337,16 +335,12 @@ clockintr_cancel(struct clockintr *cl) > mtx_leave(&cq->cq_mtx); > } > > -struct clockintr * > -clockintr_establish(struct cpu_info *ci, > +void > +clockintr_bind(struct clockintr *cl, struct cpu_info *cpu, > void (*func)(struct clockrequest *, void *, void *), void *arg) > { > - struct clockintr *cl; > - struct clockintr_queue *cq = &ci->ci_queue; > + struct clockintr_queue *cq = &cpu->ci_queue; > > - cl = malloc(sizeof *cl, M_DEVBUF, M_NOWAIT | M_ZERO); > - if (cl == NULL) > - return NULL; > cl->cl_arg = arg; > cl->cl_func = func; > cl->cl_queue = cq; > @@ -354,7 +348,6 @@ clockintr_establish(struct cpu_info *ci, > mtx_enter(&cq->cq_mtx); > TAILQ_INSERT_TAIL(&cq->cq_all, cl, cl_alink); > mtx_leave(&cq->cq_mtx); > - return cl; > } > > void > Index: sys/kern/sched_bsd.c > =================================================================== > RCS file: /cvs/src/sys/kern/sched_bsd.c,v > diff -u -p -r1.89 sched_bsd.c > --- sys/kern/sched_bsd.c 17 Oct 2023 00:04:02 -0000 1.89 > +++ sys/kern/sched_bsd.c 23 Jan 2024 00:49:56 -0000 > @@ -396,11 +396,11 @@ mi_switch(void) > /* Stop any optional clock interrupts. */ > if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) { > atomic_clearbits_int(&spc->spc_schedflags, SPCF_ITIMER); > - clockintr_cancel(spc->spc_itimer); > + clockintr_cancel(&spc->spc_itimer); > } > if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) { > atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK); > - clockintr_cancel(spc->spc_profclock); > + clockintr_cancel(&spc->spc_profclock); > } > > /* > @@ -451,11 +451,11 @@ mi_switch(void) > /* Start any optional clock interrupts needed by the thread. */ > if (ISSET(p->p_p->ps_flags, PS_ITIMER)) { > atomic_setbits_int(&spc->spc_schedflags, SPCF_ITIMER); > - clockintr_advance(spc->spc_itimer, hardclock_period); > + clockintr_advance(&spc->spc_itimer, hardclock_period); > } > if (ISSET(p->p_p->ps_flags, PS_PROFIL)) { > atomic_setbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK); > - clockintr_advance(spc->spc_profclock, profclock_period); > + clockintr_advance(&spc->spc_profclock, profclock_period); > } > > nanouptime(&spc->spc_runtime); > Index: sys/kern/kern_sched.c > =================================================================== > RCS file: /cvs/src/sys/kern/kern_sched.c,v > diff -u -p -r1.93 kern_sched.c > --- sys/kern/kern_sched.c 24 Oct 2023 13:20:11 -0000 1.93 > +++ sys/kern/kern_sched.c 23 Jan 2024 00:49:56 -0000 > @@ -88,18 +88,10 @@ sched_init_cpu(struct cpu_info *ci) > > spc->spc_idleproc = NULL; > > - spc->spc_itimer = clockintr_establish(ci, itimer_update, NULL); > - if (spc->spc_itimer == NULL) > - panic("%s: clockintr_establish itimer_update", __func__); > - spc->spc_profclock = clockintr_establish(ci, profclock, NULL); > - if (spc->spc_profclock == NULL) > - panic("%s: clockintr_establish profclock", __func__); > - spc->spc_roundrobin = clockintr_establish(ci, roundrobin, NULL); > - if (spc->spc_roundrobin == NULL) > - panic("%s: clockintr_establish roundrobin", __func__); > - spc->spc_statclock = clockintr_establish(ci, statclock, NULL); > - if (spc->spc_statclock == NULL) > - panic("%s: clockintr_establish statclock", __func__); > + clockintr_bind(&spc->spc_itimer, ci, itimer_update, NULL); > + clockintr_bind(&spc->spc_profclock, ci, profclock, NULL); > + clockintr_bind(&spc->spc_roundrobin, ci, roundrobin, NULL); > + clockintr_bind(&spc->spc_statclock, ci, statclock, NULL); > > kthread_create_deferred(sched_kthreads_create, ci); > > @@ -244,11 +236,11 @@ sched_toidle(void) > > if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) { > atomic_clearbits_int(&spc->spc_schedflags, SPCF_ITIMER); > - clockintr_cancel(spc->spc_itimer); > + clockintr_cancel(&spc->spc_itimer); > } > if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) { > atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK); > - clockintr_cancel(spc->spc_profclock); > + clockintr_cancel(&spc->spc_profclock); > } > > atomic_clearbits_int(&spc->spc_schedflags, SPCF_SWITCHCLEAR); > Index: sys/kern/kern_fork.c > =================================================================== > RCS file: /cvs/src/sys/kern/kern_fork.c,v > diff -u -p -r1.256 kern_fork.c > --- sys/kern/kern_fork.c 19 Jan 2024 01:43:26 -0000 1.256 > +++ sys/kern/kern_fork.c 23 Jan 2024 00:49:56 -0000 > @@ -704,11 +704,11 @@ proc_trampoline_mi(void) > /* Start any optional clock interrupts needed by the thread. */ > if (ISSET(p->p_p->ps_flags, PS_ITIMER)) { > atomic_setbits_int(&spc->spc_schedflags, SPCF_ITIMER); > - clockintr_advance(spc->spc_itimer, hardclock_period); > + clockintr_advance(&spc->spc_itimer, hardclock_period); > } > if (ISSET(p->p_p->ps_flags, PS_PROFIL)) { > atomic_setbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK); > - clockintr_advance(spc->spc_profclock, profclock_period); > + clockintr_advance(&spc->spc_profclock, profclock_period); > } > > nanouptime(&spc->spc_runtime); > Index: sys/kern/subr_prof.c > =================================================================== > RCS file: /cvs/src/sys/kern/subr_prof.c,v > diff -u -p -r1.40 subr_prof.c > --- sys/kern/subr_prof.c 17 Oct 2023 00:04:02 -0000 1.40 > +++ sys/kern/subr_prof.c 23 Jan 2024 00:49:56 -0000 > @@ -101,19 +101,16 @@ prof_init(void) > > /* Allocate and initialize one profiling buffer per CPU. */ > CPU_INFO_FOREACH(cii, ci) { > - ci->ci_gmonclock = clockintr_establish(ci, gmonclock, NULL); > - if (ci->ci_gmonclock == NULL) { > - printf("%s: clockintr_establish gmonclock\n", __func__); > - return; > - } > - clockintr_stagger(ci->ci_gmonclock, profclock_period, > - CPU_INFO_UNIT(ci), MAXCPUS); > cp = km_alloc(round_page(size), &kv_any, &kp_zero, &kd_nowait); > if (cp == NULL) { > printf("No memory for profiling.\n"); > return; > } > > + clockintr_bind(&ci->ci_gmonclock, ci, gmonclock, NULL); > + clockintr_stagger(&ci->ci_gmonclock, profclock_period, > + CPU_INFO_UNIT(ci), MAXCPUS); > + > p = (struct gmonparam *)cp; > cp += sizeof(*p); > p->tos = (struct tostruct *)cp; > @@ -159,7 +156,7 @@ prof_state_toggle(struct cpu_info *ci, i > if (error == 0) { > if (++gmon_cpu_count == 1) > startprofclock(&process0); > - clockintr_advance(ci->ci_gmonclock, profclock_period); > + clockintr_advance(&ci->ci_gmonclock, profclock_period); > } > break; > default: > @@ -167,7 +164,7 @@ prof_state_toggle(struct cpu_info *ci, i > gp->state = GMON_PROF_OFF; > /* FALLTHROUGH */ > case GMON_PROF_OFF: > - clockintr_cancel(ci->ci_gmonclock); > + clockintr_cancel(&ci->ci_gmonclock); > if (--gmon_cpu_count == 0) > stopprofclock(&process0); > #if !defined(GPROF) > Index: sys/arch/alpha/include/cpu.h > =================================================================== > RCS file: /cvs/src/sys/arch/alpha/include/cpu.h,v > diff -u -p -r1.70 cpu.h > --- sys/arch/alpha/include/cpu.h 25 Jul 2023 18:16:19 -0000 1.70 > +++ sys/arch/alpha/include/cpu.h 23 Jan 2024 00:49:56 -0000 > @@ -212,7 +212,7 @@ struct cpu_info { > #endif > #ifdef GPROF > struct gmonparam *ci_gmon; > - struct clockintr *ci_gmonclock; > + struct clockintr ci_gmonclock; > #endif > struct clockintr_queue ci_queue; > char ci_panicbuf[512]; > Index: sys/arch/amd64/include/cpu.h > =================================================================== > RCS file: /cvs/src/sys/arch/amd64/include/cpu.h,v > diff -u -p -r1.159 cpu.h > --- sys/arch/amd64/include/cpu.h 23 Aug 2023 01:55:46 -0000 1.159 > +++ sys/arch/amd64/include/cpu.h 23 Jan 2024 00:49:56 -0000 > @@ -208,7 +208,7 @@ struct cpu_info { > u_int64_t ci_hz_aperf; > #if defined(GPROF) || defined(DDBPROF) > struct gmonparam *ci_gmon; > - struct clockintr *ci_gmonclock; > + struct clockintr ci_gmonclock; > #endif > u_int32_t ci_vmm_flags; > #define CI_VMM_VMX (1 << 0) > Index: sys/arch/arm/include/cpu.h > =================================================================== > RCS file: /cvs/src/sys/arch/arm/include/cpu.h,v > diff -u -p -r1.64 cpu.h > --- sys/arch/arm/include/cpu.h 23 Aug 2023 01:55:46 -0000 1.64 > +++ sys/arch/arm/include/cpu.h 23 Jan 2024 00:49:56 -0000 > @@ -198,7 +198,7 @@ struct cpu_info { > > #ifdef GPROF > struct gmonparam *ci_gmon; > - struct clockintr *ci_gmonclock; > + struct clockintr ci_gmonclock; > #endif > struct clockintr_queue ci_queue; > char ci_panicbuf[512]; > Index: sys/arch/arm64/include/cpu.h > =================================================================== > RCS file: /cvs/src/sys/arch/arm64/include/cpu.h,v > diff -u -p -r1.41 cpu.h > --- sys/arch/arm64/include/cpu.h 15 Jan 2024 11:58:45 -0000 1.41 > +++ sys/arch/arm64/include/cpu.h 23 Jan 2024 00:49:56 -0000 > @@ -174,7 +174,7 @@ struct cpu_info { > > #ifdef GPROF > struct gmonparam *ci_gmon; > - struct clockintr *ci_gmonclock; > + struct clockintr ci_gmonclock; > #endif > struct clockintr_queue ci_queue; > char ci_panicbuf[512]; > Index: sys/arch/hppa/include/cpu.h > =================================================================== > RCS file: /cvs/src/sys/arch/hppa/include/cpu.h,v > diff -u -p -r1.102 cpu.h > --- sys/arch/hppa/include/cpu.h 5 Nov 2023 16:33:50 -0000 1.102 > +++ sys/arch/hppa/include/cpu.h 23 Jan 2024 00:49:56 -0000 > @@ -113,7 +113,7 @@ struct cpu_info { > #endif > #ifdef GPROF > struct gmonparam *ci_gmon; > - struct clockintr *ci_gmonclock; > + struct clockintr ci_gmonclock; > #endif > struct clockintr_queue ci_queue; > char ci_panicbuf[512]; > Index: sys/arch/i386/include/cpu.h > =================================================================== > RCS file: /cvs/src/sys/arch/i386/include/cpu.h,v > diff -u -p -r1.183 cpu.h > --- sys/arch/i386/include/cpu.h 23 Aug 2023 01:55:46 -0000 1.183 > +++ sys/arch/i386/include/cpu.h 23 Jan 2024 00:49:56 -0000 > @@ -168,7 +168,7 @@ struct cpu_info { > struct ksensor ci_sensor; > #if defined(GPROF) || defined(DDBPROF) > struct gmonparam *ci_gmon; > - struct clockintr *ci_gmonclock; > + struct clockintr ci_gmonclock; > #endif > struct clockintr_queue ci_queue; > char ci_panicbuf[512]; > Index: sys/arch/m88k/include/cpu.h > =================================================================== > RCS file: /cvs/src/sys/arch/m88k/include/cpu.h,v > diff -u -p -r1.73 cpu.h > --- sys/arch/m88k/include/cpu.h 25 Jul 2023 18:16:20 -0000 1.73 > +++ sys/arch/m88k/include/cpu.h 23 Jan 2024 00:49:56 -0000 > @@ -177,7 +177,7 @@ struct cpu_info { > #endif > #ifdef GPROF > struct gmonparam *ci_gmon; > - struct clockintr *ci_gmonclock; > + struct clockintr ci_gmonclock; > #endif > struct clockintr_queue ci_queue; > char ci_panicbuf[512]; > Index: sys/arch/mips64/include/cpu.h > =================================================================== > RCS file: /cvs/src/sys/arch/mips64/include/cpu.h,v > diff -u -p -r1.144 cpu.h > --- sys/arch/mips64/include/cpu.h 23 Aug 2023 01:55:47 -0000 1.144 > +++ sys/arch/mips64/include/cpu.h 23 Jan 2024 00:49:56 -0000 > @@ -200,7 +200,7 @@ struct cpu_info { > #endif > #ifdef GPROF > struct gmonparam *ci_gmon; > - struct clockintr *ci_gmonclock; > + struct clockintr ci_gmonclock; > #endif > char ci_panicbuf[512]; > }; > Index: sys/arch/powerpc/include/cpu.h > =================================================================== > RCS file: /cvs/src/sys/arch/powerpc/include/cpu.h,v > diff -u -p -r1.75 cpu.h > --- sys/arch/powerpc/include/cpu.h 25 Jul 2023 18:16:20 -0000 1.75 > +++ sys/arch/powerpc/include/cpu.h 23 Jan 2024 00:49:56 -0000 > @@ -89,7 +89,7 @@ struct cpu_info { > #endif > #ifdef GPROF > struct gmonparam *ci_gmon; > - struct clockintr *ci_gmonclock; > + struct clockintr ci_gmonclock; > #endif > char ci_panicbuf[512]; > }; > Index: sys/arch/riscv64/include/cpu.h > =================================================================== > RCS file: /cvs/src/sys/arch/riscv64/include/cpu.h,v > diff -u -p -r1.19 cpu.h > --- sys/arch/riscv64/include/cpu.h 19 Sep 2023 19:20:33 -0000 1.19 > +++ sys/arch/riscv64/include/cpu.h 23 Jan 2024 00:49:56 -0000 > @@ -124,7 +124,7 @@ struct cpu_info { > > #ifdef GPROF > struct gmonparam *ci_gmon; > - struct clockintr *ci_gmonclock; > + struct clockintr ci_gmonclock; > #endif > > char ci_panicbuf[512]; > Index: sys/arch/sh/include/cpu.h > =================================================================== > RCS file: /cvs/src/sys/arch/sh/include/cpu.h,v > diff -u -p -r1.35 cpu.h > --- sys/arch/sh/include/cpu.h 25 Jul 2023 18:16:21 -0000 1.35 > +++ sys/arch/sh/include/cpu.h 23 Jan 2024 00:49:56 -0000 > @@ -68,7 +68,7 @@ struct cpu_info { > #endif > #ifdef GPROF > struct gmonparam *ci_gmon; > - struct clockintr *ci_gmonclock; > + struct clockintr ci_gmonclock; > #endif > > int ci_want_resched; > Index: sys/arch/sparc64/include/cpu.h > =================================================================== > RCS file: /cvs/src/sys/arch/sparc64/include/cpu.h,v > diff -u -p -r1.102 cpu.h > --- sys/arch/sparc64/include/cpu.h 25 Jul 2023 18:16:21 -0000 1.102 > +++ sys/arch/sparc64/include/cpu.h 23 Jan 2024 00:49:56 -0000 > @@ -165,7 +165,7 @@ struct cpu_info { > #endif > #ifdef GPROF > struct gmonparam *ci_gmon; > - struct clockintr *ci_gmonclock; > + struct clockintr ci_gmonclock; > #endif > char ci_panicbuf[512]; > }; > -- :wq Claudio