Index | Thread | Search

From:
David Gwynne <david@gwynne.id.au>
Subject:
Re: rpki-client: add error checks for pthread_* calls
To:
Job Snijders <job@openbsd.org>
Cc:
tech@openbsd.org
Date:
Tue, 24 Jun 2025 15:22:21 +1000

Download raw body.

Thread
On Fri, Jun 20, 2025 at 12:00:13PM +0000, Job Snijders wrote:
> Perhaps good to add error checking to the pthread_* calls?

yes...

> While there, the rwlock_unlock in auth_insert() seemed somewhat
> superfluous given that the program errors out right after.
> 
> OK?

sorry to go way back in time on this thread, but the way you're handling
errors from the pthread api and how i understand that api don't line up.

my understanding is that pthread functions return error codes
directly, and they don't set errno. therefore, rather than handling
errors like this:

	if (pthread_create(&writer, NULL, &parse_writer, &fd) != 0)
		errx(1, "pthread_create");

you should be doing this:

	int error;

	error = pthread_create(&writer, NULL, &parse_writer, &fd);
	if (error != 0)
		errc(1, error, "pthread_create");

my best guess is that errno wasn't a thread local variable when
pthreads was first invented, so to avoid races you passed variables
to be filled in by the api and the api returns errors directly.
pthread_create is a good example of this where it takes a
pthread_t * argument to be filled in rather than returning a pthread_t
and setting errno on failure.

cheers,
dlg

> Index: cert.c
> ===================================================================
> RCS file: /cvs/src/usr.sbin/rpki-client/cert.c,v
> diff -u -p -r1.164 cert.c
> --- cert.c	19 Jun 2025 11:06:48 -0000	1.164
> +++ cert.c	20 Jun 2025 11:54:57 -0000
> @@ -1426,9 +1426,11 @@ auth_find(struct auth_tree *auths, int i
>  	c.certid = id;
>  	a.cert = &c;
>  
> -	pthread_rwlock_rdlock(&cert_lk);
> +	if (pthread_rwlock_rdlock(&cert_lk) != 0)
> +		errx(1, "pthread_rwlock_rdlock");
>  	f = RB_FIND(auth_tree, auths, &a);
> -	pthread_rwlock_unlock(&cert_lk);
> +	if (pthread_rwlock_unlock(&cert_lk) != 0)
> +		errx(1, "pthread_rwlock_unlock");
>  	return f;
>  }
>  
> @@ -1442,7 +1444,8 @@ auth_insert(const char *fn, struct auth_
>  	if (na == NULL)
>  		err(1, NULL);
>  
> -	pthread_rwlock_wrlock(&cert_lk);
> +	if (pthread_rwlock_wrlock(&cert_lk) != 0)
> +		errx(1, "pthread_rwlock_wrlock");
>  	if (issuer == NULL) {
>  		cert->certid = cert->talid;
>  	} else {
> @@ -1464,16 +1467,17 @@ auth_insert(const char *fn, struct auth_
>  	na->cert = cert;
>  	na->any_inherits = x509_any_inherits(cert->x509);
>  
> -	if (RB_INSERT(auth_tree, auths, na) != NULL) {
> -		pthread_rwlock_unlock(&cert_lk);
> +	if (RB_INSERT(auth_tree, auths, na) != NULL)
>  		errx(1, "auth tree corrupted");
> -	}
> -	pthread_rwlock_unlock(&cert_lk);
> +
> +	if (pthread_rwlock_unlock(&cert_lk) != 0)
> +		errx(1, "pthread_rwlock_unlock");
>  
>  	return na;
>  
>   fail:
> -	pthread_rwlock_unlock(&cert_lk);
> +	if (pthread_rwlock_unlock(&cert_lk) != 0)
> +		errx(1, "pthread_rwlock_unlock");
>  	free(na);
>  	return NULL;
>  }
> Index: crl.c
> ===================================================================
> RCS file: /cvs/src/usr.sbin/rpki-client/crl.c,v
> diff -u -p -r1.45 crl.c
> --- crl.c	4 Jun 2025 09:18:28 -0000	1.45
> +++ crl.c	20 Jun 2025 11:54:57 -0000
> @@ -313,9 +313,11 @@ crl_get(struct crl_tree *crlt, const str
>  	find.aki = a->cert->ski;
>  	find.mftpath = a->cert->mft;
>  
> -	pthread_rwlock_rdlock(&crl_lk);
> +	if (pthread_rwlock_rdlock(&crl_lk) != 0)
> +		errx(1, "pthread_rwlock_rdlock");
>  	crl = RB_FIND(crl_tree, crlt, &find);
> -	pthread_rwlock_unlock(&crl_lk);
> +	if (pthread_rwlock_unlock(&crl_lk) != 0)
> +		errx(1, "pthread_rwlock_unlock");
>  	return crl;
>  }
>  
> @@ -324,9 +326,11 @@ crl_insert(struct crl_tree *crlt, struct
>  {
>  	int rv;
>  
> -	pthread_rwlock_wrlock(&crl_lk);
> +	if (pthread_rwlock_wrlock(&crl_lk) != 0)
> +		errx(1, "pthread_rwlock_wrlock");
>  	rv = RB_INSERT(crl_tree, crlt, crl) == NULL;
> -	pthread_rwlock_unlock(&crl_lk);
> +	if (pthread_rwlock_unlock(&crl_lk) != 0)
> +		errx(1, "pthread_rwlock_unlock");
>  
>  	return rv;
>  }
> Index: parser.c
> ===================================================================
> RCS file: /cvs/src/usr.sbin/rpki-client/parser.c,v
> diff -u -p -r1.157 parser.c
> --- parser.c	20 Jun 2025 05:00:01 -0000	1.157
> +++ parser.c	20 Jun 2025 11:54:58 -0000
> @@ -76,9 +76,11 @@ repo_get(unsigned int id)
>  {
>  	struct parse_repo needle = { .id = id }, *r;
>  
> -	pthread_rwlock_rdlock(&repos_lk);
> +	if (pthread_rwlock_rdlock(&repos_lk) != 0)
> +		errx(1, "pthread_rwlock_rdlock");
>  	r = RB_FIND(repo_tree, &repos, &needle);
> -	pthread_rwlock_unlock(&repos_lk);
> +	if (pthread_rwlock_unlock(&repos_lk) != 0)
> +		errx(1, "pthread_rwlock_unlock");
>  	return r;
>  }
>  
> @@ -97,10 +99,12 @@ repo_add(unsigned int id, char *path, ch
>  		if ((rp->validpath = strdup(validpath)) == NULL)
>  			err(1, NULL);
>  
> -	pthread_rwlock_wrlock(&repos_lk);
> +	if (pthread_rwlock_wrlock(&repos_lk) != 0)
> +		errx(1, "pthread_rwlock_wrlock");
>  	if (RB_INSERT(repo_tree, &repos, rp) != NULL)
>  		errx(1, "repository already added: id %d, %s", id, path);
> -	pthread_rwlock_unlock(&repos_lk);
> +	if (pthread_rwlock_unlock(&repos_lk) != 0)
> +		errx(1, "pthread_rwlock_unlock");
>  }
>  
>  /*
> @@ -1078,7 +1082,8 @@ parse_worker(void *arg)
>  		err(1, "ibufqueue_new");
>  
>  	while (!quit) {
> -		pthread_mutex_lock(&globalq_mtx);
> +		if (pthread_mutex_lock(&globalq_mtx) != 0)
> +			errx(1, "pthread_mutex_lock");
>  		while (TAILQ_EMPTY(&globalq) && !quit)
>  			pthread_cond_wait(&globalq_cond, &globalq_mtx);
>  		n = 0;
> @@ -1088,17 +1093,23 @@ parse_worker(void *arg)
>  			if (n++ > 16)
>  				break;
>  		}
> -		pthread_mutex_unlock(&globalq_mtx);
> -		if (n > 16)
> -			pthread_cond_signal(&globalq_cond);
> +		if (pthread_mutex_unlock(&globalq_mtx) != 0)
> +			errx(1, "pthread_mutex_unlock");
> +		if (n > 16) {
> +			if (pthread_cond_signal(&globalq_cond) != 0)
> +				errx(1, "pthread_cond_signal");
> +		}
>  
>  		parse_entity(&q, myq, ctx, bn_ctx);
>  
>  		if (ibufq_queuelen(myq) > 0) {
> -			pthread_mutex_lock(&globalmsgq_mtx);
> +			if (pthread_mutex_lock(&globalmsgq_mtx) != 0)
> +				errx(1, "pthread_mutex_lock");
>  			ibufq_concat(globalmsgq, myq);
> -			pthread_cond_signal(&globalmsgq_cond);
> -			pthread_mutex_unlock(&globalmsgq_mtx);
> +			if (pthread_cond_signal(&globalmsgq_cond) != 0)
> +				errx(1, "pthread_cond_signal");
> +			if (pthread_mutex_unlock(&globalmsgq_mtx) != 0)
> +				errx(1, "pthread_mutex_unlock");
>  		}
>  	}
>  
> @@ -1119,13 +1130,15 @@ parse_writer(void *arg)
>  	pfd.fd = *(int *)arg;
>  	while (!quit) {
>  		if (msgbuf_queuelen(myq) == 0) {
> -			pthread_mutex_lock(&globalmsgq_mtx);
> +			if (pthread_mutex_lock(&globalmsgq_mtx) != 0)
> +				errx(1, "pthread_mutex_lock");
>  			while (ibufq_queuelen(globalmsgq) == 0 && !quit)
>  				pthread_cond_wait(&globalmsgq_cond,
>  				    &globalmsgq_mtx);
>  			/* enqueue messages to local msgbuf */
>  			msgbuf_concat(myq, globalmsgq);
> -			pthread_mutex_unlock(&globalmsgq_mtx);
> +			if (pthread_mutex_unlock(&globalmsgq_mtx) != 0)
> +				errx(1, "pthread_mutex_lock");
>  		}
>  
>  		if (msgbuf_queuelen(myq) > 0) {
> @@ -1200,9 +1213,12 @@ proc_parser(int fd, int nthreads)
>  	if ((workers = calloc(nthreads, sizeof(*workers))) == NULL)
>  		err(1, NULL);
>  
> -	pthread_create(&writer, NULL, &parse_writer, &fd);
> -	for (i = 0; i < nthreads; i++)
> -		pthread_create(&workers[i], NULL, &parse_worker, NULL);
> +	if (pthread_create(&writer, NULL, &parse_writer, &fd) != 0)
> +		errx(1, "pthread_create");
> +	for (i = 0; i < nthreads; i++) {
> +		if (pthread_create(&workers[i], NULL, &parse_worker, NULL) != 0)
> +			errx(1, "pthread_create");
> +	}
>  
>  	pfd.fd = fd;
>  	while (!quit) {
> @@ -1246,24 +1262,31 @@ proc_parser(int fd, int nthreads)
>  				TAILQ_INSERT_TAIL(&myq, entp, entries);
>  			}
>  			if (!TAILQ_EMPTY(&myq)) {
> -				pthread_mutex_lock(&globalq_mtx);
> +				if (pthread_mutex_lock(&globalq_mtx) != 0)
> +					errx(1, "pthread_mutex_lock");
>  				TAILQ_CONCAT(&globalq, &myq, entries);
> -				pthread_mutex_unlock(&globalq_mtx);
> -				pthread_cond_signal(&globalq_cond);
> +				if (pthread_mutex_unlock(&globalq_mtx) != 0)
> +					errx(1, "pthread_mutex_unlock");
> +				if (pthread_cond_signal(&globalq_cond) != 0)
> +					errx(1, "pthread_cond_signal");
>  			}
>  		}
>  	}
>  
>  	/* signal all threads */
> -	pthread_cond_broadcast(&globalq_cond);
> -	pthread_cond_broadcast(&globalmsgq_cond);
> +	if (pthread_cond_broadcast(&globalq_cond) != 0)
> +		errx(1, "pthread_cond_broadcast");
> +	if (pthread_cond_broadcast(&globalmsgq_cond) != 0)
> +		errx(1, "pthread_cond_broadcast");
>  
> -	pthread_mutex_lock(&globalq_mtx);
> +	if (pthread_mutex_lock(&globalq_mtx) != 0)
> +		errx(1, "pthread_mutex_lock");
>  	while ((entp = TAILQ_FIRST(&globalq)) != NULL) {
>  		TAILQ_REMOVE(&globalq, entp, entries);
>  		entity_free(entp);
>  	}
> -	pthread_mutex_unlock(&globalq_mtx);
> +	if (pthread_mutex_unlock(&globalq_mtx) != 0)
> +		errx(1, "pthread_mutex_unlock");
>  
>  	if (pthread_join(writer, NULL) != 0)
>  		errx(1, "pthread_join writer");
>