diff --git a/configure.ac b/configure.ac
index 9ddc1f6..9135595 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,447 +1,447 @@
# configure.ac -*- Autoconf -*-
# Copyright (C) 2011, 2012 g10 Code GmbH
#
# This file is part of nPth.
#
# nPth is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# nPth is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General
# Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, see .
# Process this file with autoconf to produce a configure script.
AC_PREREQ([2.69])
min_automake_version="1.14"
# To build a release you need to create a tag with the version number
# (git tag -s npth-n.m) and run "./autogen.sh --force". Please bump
# the version number immediately after the release and do another
# commit and push so that the git magic is able to work.
# See below for the LT versions.
m4_define([mym4_package],[npth])
m4_define([mym4_major], [1])
m4_define([mym4_minor], [7])
# To start a new development series, i.e a new major or minor number
# you need to mark an arbitrary commit before the first beta release
# with an annotated tag. For example a 2.1 branch starts off with
# the tag "foo-2.1-base". This is used as the base for counting
# beta numbers before the first release of a series.
# Below is m4 magic to extract and compute the git revision number,
# the decimalized short revision number, a beta version string and a
# flag indicating a development version (mym4_isbeta). Note that the
# m4 processing is done by autoconf and not during the configure run.
m4_define([mym4_verslist], m4_split(m4_esyscmd([./autogen.sh --find-version] \
mym4_package mym4_major mym4_minor),[:]))
m4_define([mym4_isbeta], m4_argn(2, mym4_verslist))
m4_define([mym4_version], m4_argn(4, mym4_verslist))
m4_define([mym4_revision], m4_argn(7, mym4_verslist))
m4_define([mym4_revision_dec], m4_argn(8, mym4_verslist))
m4_esyscmd([echo ]mym4_version[>VERSION])
AC_INIT([mym4_package],[mym4_version],[https://bugs.gnupg.org])
# LT Version numbers, remember to change them just *before* a release.
# (Code changed: REVISION++)
# (Interfaces added/removed/changed: CURRENT++, REVISION=0)
# (Interfaces added: AGE++)
# (Interfaces removed: AGE=0)
#
LIBNPTH_LT_CURRENT=1
LIBNPTH_LT_AGE=1
LIBNPTH_LT_REVISION=2
################################################
AC_SUBST(LIBNPTH_LT_CURRENT)
AC_SUBST(LIBNPTH_LT_AGE)
AC_SUBST(LIBNPTH_LT_REVISION)
VERSION_NUMBER=m4_esyscmd(printf "0x%02x%02x00" mym4_major mym4_minor)
AC_SUBST(VERSION_NUMBER)
# If the API is changed in an incompatible way: increment the next counter.
NPTH_CONFIG_API_VERSION=1
##############################################
AC_CONFIG_AUX_DIR([build-aux])
AM_INIT_AUTOMAKE([serial-tests dist-bzip2 no-dist-gzip])
AM_MAINTAINER_MODE
AC_CONFIG_SRCDIR([src/npth.c])
AC_CONFIG_HEADERS([config.h])
AC_CONFIG_MACRO_DIR([m4])
AC_CANONICAL_HOST
AM_SILENT_RULES
# Enable GNU extensions on systems that have them.
AC_USE_SYSTEM_EXTENSIONS
# Taken from mpfr-4.0.1, then modified for LDADD_FOR_TESTS_KLUDGE
dnl Under Linux, make sure that the old dtags are used if LD_LIBRARY_PATH
dnl is defined. The issue is that with the new dtags, LD_LIBRARY_PATH has
dnl the precedence over the run path, so that if a compatible MPFR library
dnl is installed in some directory from $LD_LIBRARY_PATH, then the tested
dnl MPFR library will be this library instead of the MPFR library from the
dnl build tree. Other OS with the same issue might be added later.
dnl
dnl References:
dnl https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=859732
dnl http://lists.gnu.org/archive/html/libtool/2017-05/msg00000.html
dnl
dnl We need to check whether --disable-new-dtags is supported as alternate
dnl linkers may be used (e.g., with tcc: CC=tcc LD=tcc).
dnl
case $host in
*-*-linux*)
if test -n "$LD_LIBRARY_PATH"; then
saved_LDFLAGS="$LDFLAGS"
LDADD_FOR_TESTS_KLUDGE="-Wl,--disable-new-dtags"
LDFLAGS="$LDFLAGS $LDADD_FOR_TESTS_KLUDGE"
AC_MSG_CHECKING(whether --disable-new-dtags is supported by the linker)
AC_LINK_IFELSE([AC_LANG_SOURCE([[
int main (void) { return 0; }
]])],
[AC_MSG_RESULT(yes (use it since LD_LIBRARY_PATH is set))],
[AC_MSG_RESULT(no)
LDADD_FOR_TESTS_KLUDGE=""
])
LDFLAGS="$saved_LDFLAGS"
fi
;;
esac
AC_SUBST([LDADD_FOR_TESTS_KLUDGE])
AH_VERBATIM([_REENTRANT],
[#ifndef _REENTRANT
# define _REENTRANT 1
#endif])
# Checks for programs.
AC_PROG_CC
if test "$GCC" = yes; then
CFLAGS="$CFLAGS -Wall -Wcast-align -Wshadow -Wstrict-prototypes"
AC_MSG_CHECKING([if gcc supports -Wpointer-arith])
_gcc_cflags_save=$CFLAGS
CFLAGS="-Wpointer-arith"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([])],_gcc_wopt=yes,_gcc_wopt=no)
AC_MSG_RESULT($_gcc_wopt)
CFLAGS=$_gcc_cflags_save;
if test x"$_gcc_wopt" = xyes ; then
CFLAGS="$CFLAGS -Wpointer-arith"
fi
fi
# We used to provide npth-config command always. Now, it's
# gpgrt-config command with npth.pc configuration file, which does
# same thing.
AC_ARG_ENABLE(install-npth-config,
AS_HELP_STRING([--enable-install-npth-config],[install npth-config command]),
install_npth_config=$enableval)
AM_CONDITIONAL(INSTALL_NPTH_CONFIG, test "$install_npth_config" = "yes")
run_tests="yes"
AC_ARG_ENABLE(tests,
AS_HELP_STRING([--disable-tests],[disable tests]),
run_tests=$enableval)
AM_CONDITIONAL(RUN_TESTS, test "$run_tests" = "yes")
# Don't default to build static libs.
LT_PREREQ([2.2.6])
LT_INIT([win32-dll disable-static])
LT_LANG([Windows Resource])
# For now we hardcode the use of version scripts. It would be better
# to write a test for this or even implement this within libtool.
have_ld_version_script=no
case "${host}" in
*-*-linux*)
have_ld_version_script=yes
;;
*-*-gnu*)
have_ld_version_script=yes
;;
*-apple-darwin*)
AC_SEARCH_LIBS([dispatch_semaphore_create],[dispatch],
[AC_DEFINE([HAVE_LIB_DISPATCH],1,[Defined if we have libdispatch])])
;;
*-*-aix*)
have_fork_unsafe_semaphore=yes
;;
*-*-dragonfly*|*-*-freebsd*|*-*-netbsd*|*-*-hpux*)
LIBS="-lpthread $LIBS"
AC_SEARCH_LIBS([sem_init],[rt])
;;
esac
AM_CONDITIONAL(HAVE_LD_VERSION_SCRIPT, test "$have_ld_version_script" = "yes")
if test "$have_fork_unsafe_semaphore" = yes; then
AC_DEFINE(HAVE_FORK_UNSAFE_SEMAPHORE, 1, [Defined if we have fork-unsafe semaphore])
fi
# Set some default values
config_libs="-lnpth $LIBS"
have_w32_system=no
have_w32ce_system=no
have_w64_system=no
# Define OS macros
case "${host}" in
x86_64-*mingw32*)
have_w64_system=yes
;;
*-mingw32ce*)
have_w32ce_system=yes
;;
esac
case "${host}" in
*-mingw32ce*|*-mingw32*)
have_w32_system=yes
;;
*)
;;
esac
if test "$have_w32_system" = yes; then
AC_DEFINE(HAVE_W32_SYSTEM,1,
[Defined if we run on any kind of W32 API based system])
fi
AM_CONDITIONAL(HAVE_W32_SYSTEM, test "$have_w32_system" = yes)
if test "$have_w64_system" = yes; then
AC_DEFINE(HAVE_W64_SYSTEM,1,
[Defined if we run on a 64 bit W32 API based system])
fi
AM_CONDITIONAL(HAVE_W64_SYSTEM, test "$have_w64_system" = yes)
#
# Generate values for the DLL version info
#
if test "$have_w32_system" = yes; then
BUILD_ISODATE=`date --iso-8601`
changequote(,)dnl
BUILD_FILEVERSION=`echo "$VERSION" | sed 's/\([0-9.]*\).*/\1./;s/\./,/g'`
changequote([,])dnl
BUILD_FILEVERSION="${BUILD_FILEVERSION}git_revision_dec"
fi
AC_SUBST(BUILD_ISODATE)
AC_SUBST(BUILD_FILEVERSION)
AC_ARG_ENABLE([build-timestamp],
AS_HELP_STRING([--enable-build-timestamp],[set an explicit build timestamp for reproducibility.
(default is the current time in ISO-8601 format)]),
[if test "$enableval" = "yes"; then
BUILD_TIMESTAMP=`date -u +%Y-%m-%dT%H:%M+0000 2>/dev/null || date`
else
BUILD_TIMESTAMP="$enableval"
fi],
[BUILD_TIMESTAMP=""])
AC_SUBST(BUILD_TIMESTAMP)
AC_DEFINE_UNQUOTED(BUILD_TIMESTAMP, "$BUILD_TIMESTAMP",
[The time this package was configured for a build])
#
# Checks for header files.
#
# fixme: For what do we need the sys/socket test?
AC_CHECK_HEADERS([sys/socket.h sys/select.h unistd.h sys/time.h time.h \
signal.h poll.h])
INSERT_SYS_SELECT_H=
if test x"$ac_cv_header_sys_select_h" = xyes; then
INSERT_SYS_SELECT_H="include "
fi
AC_SUBST(INSERT_SYS_SELECT_H)
if test x"$ac_cv_header_sys_time_h" = xyes; then
INSERT_SYS_TIME_H="include "
fi
AC_SUBST(INSERT_SYS_TIME_H)
if test x"$ac_cv_header_time_h" = xyes; then
INSERT_TIME_H="include "
fi
AC_SUBST(INSERT_TIME_H)
if test x"$ac_cv_header_signal_h" = xyes; then
INSERT_SIGNAL_H="include "
fi
AC_SUBST(INSERT_SIGNAL_H)
# Some systems lack socklen_t - provide a replacement.
gl_TYPE_SOCKLEN_T
case "${host}" in
*-*-mingw32*)
# socklen_t may or may not be defined depending on what headers
# are included. To be safe we use int as this is the actual type.
- INSERT_SOCKLEN_T="define npth_socklen_t int"
+ INSERT_SOCKLEN_T="define _npth_socklen_t int"
;;
*)
if test ".$gl_cv_socklen_t_equiv" = "."; then
- INSERT_SOCKLEN_T="define npth_socklen_t socklen_t"
+ INSERT_SOCKLEN_T="define _npth_socklen_t socklen_t"
else
- INSERT_SOCKLEN_T="define npth_socklen_t ${gl_cv_socklen_t_equiv}"
+ INSERT_SOCKLEN_T="define _npth_socklen_t ${gl_cv_socklen_t_equiv}"
fi
esac
AC_SUBST(INSERT_SOCKLEN_T)
#
# Checks for typedefs, structures, and compiler characteristics.
#
AC_TYPE_PID_T
AC_TYPE_SIZE_T
AC_TYPE_SSIZE_T
#
# Checks for libraries and functions.
#
# We test for pthread_cancel because in glibc 2.34, libc includes
# pthread_create and pthread_detach, but not pthread_cancel.
if test "$have_w32_system" = no; then
AC_SEARCH_LIBS([pthread_cancel],[pthread])
case "x$ac_cv_search_pthread_cancel" in
xno)
have_pthread=no
;;
xnone\ required)
have_pthread=yes
;;
*)
have_pthread=yes
config_libs="$config_libs $ac_cv_search_pthread_cancel"
;;
esac
if test "$have_pthread" != no; then
AC_DEFINE(HAVE_PTHREAD,1,[Define if we have pthread.])
AC_CHECK_TYPE([pthread_rwlock_t])
AC_CHECK_FUNCS([pthread_tryjoin_np pthread_setname_np pthread_getname_np])
AC_CHECK_FUNCS([pthread_mutex_timedlock])
AC_CHECK_FUNCS([pthread_rwlock_rdlock pthread_rwlock_wrlock])
AC_CHECK_FUNCS([pthread_rwlock_timedrdlock pthread_rwlock_timedwrlock])
AC_CHECK_FUNCS([pthread_rwlock_tryrdlock pthread_rwlock_trywrlock])
AC_CHECK_FUNCS([pthread_atfork])
fi
fi
-INSERT_NO_RWLOCK="undef NPTH_NO_RWLOCK"
+INSERT_NO_RWLOCK="undef _NPTH_NO_RWLOCK"
if test x"$ac_cv_type_pthread_rwlock_t" = xno; then
- INSERT_NO_RWLOCK="define NPTH_NO_RWLOCK 1"
+ INSERT_NO_RWLOCK="define _NPTH_NO_RWLOCK 1"
fi
AC_SUBST(INSERT_NO_RWLOCK)
case "${host}" in
*-*-linux*|*-*-gnu*)
INSERT_EXPOSE_RWLOCK_API="defined(__USE_UNIX98) || defined(__USE_XOPEN2K)"
;;
*)
INSERT_EXPOSE_RWLOCK_API="1"
;;
esac
AC_SUBST(INSERT_EXPOSE_RWLOCK_API)
AC_CHECK_FUNCS([select pselect gettimeofday ppoll])
npth_LIBSOCKET
config_libs="$config_libs $LIBSOCKET"
# Save and restore LIBS so e.g., -lrt, isn't added to it. Otherwise, *all*
# programs in the package would end up linked with that potentially-shared
# library, inducing unnecessary run-time overhead.
LIB_CLOCK_GETTIME=
AC_SUBST([LIB_CLOCK_GETTIME])
gl_saved_libs=$LIBS
AC_SEARCH_LIBS([clock_gettime], [rt posix4],
[if test "$ac_cv_search_clock_gettime" != "none required"; then
LIB_CLOCK_GETTIME=$ac_cv_search_clock_gettime
config_libs="$config_libs $LIB_CLOCK_GETTIME"
fi
AC_DEFINE([HAVE_CLOCK_GETTIME],1,
[Define to 1 if you have the `clock_gettime' function.])
])
LIBS=$gl_saved_libs
#
# Set NETLIBS
#
if test "$have_w32ce_system" = yes; then
NETLIBS="-lws2 $NETLIBS"
elif test "$have_w32_system" = yes; then
NETLIBS="-lws2_32 $NETLIBS"
fi
AC_SUBST(NETLIBS)
#
# Substitutions to set generated files in a Emacs buffer to read-only.
#
AC_SUBST(emacs_local_vars_begin, [['Local][ ][Variables:']])
AC_SUBST(emacs_local_vars_read_only, ['buffer-read-only: t'])
AC_SUBST(emacs_local_vars_end, ['End:'])
#
# Substitution used for npth-config
#
NPTH_CONFIG_LIBS="$config_libs"
NPTH_CONFIG_CFLAGS=""
NPTH_CONFIG_HOST="$host"
AC_SUBST(NPTH_CONFIG_API_VERSION)
AC_SUBST(NPTH_CONFIG_LIBS)
AC_SUBST(NPTH_CONFIG_CFLAGS)
AC_SUBST(NPTH_CONFIG_HOST)
#
# Last check.
#
die=no
if test "$have_w32_system" = no; then
if test "$have_pthread" = "no"; then
die=yes
AC_MSG_NOTICE([[
***
*** You need Pthread to build this program.
*** Normally, this library comes with your system. On Windows, you can use:
*** http://sourceware.org/pthreads-win32/
***]])
fi
fi
if test "$die" = "yes"; then
AC_MSG_ERROR([[
***
*** Required libraries not found. Please consult the above messages
*** and install them before running configure again.
***]])
fi
#
# Write output
#
AC_CONFIG_FILES([Makefile
npth.pc
src/npth.h
src/Makefile
w32/Makefile
tests/Makefile])
AC_CONFIG_FILES(npth-config, chmod +x npth-config)
AC_OUTPUT
echo "
$PACKAGE_NAME v$PACKAGE_VERSION prepared for make
Revision: mym4_revision (mym4_revision_dec)
Platform: $host
"
diff --git a/src/npth-sigev.c b/src/npth-sigev.c
index 8f8b5cb..f5edc65 100644
--- a/src/npth-sigev.c
+++ b/src/npth-sigev.c
@@ -1,170 +1,170 @@
/* npth-sigev.c - signal handling interface
* Copyright (C) 2011 g10 Code GmbH
*
* This file is part of nPth.
*
* nPth is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* nPth is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see .
*/
/* This is a support interface to make it easier to handle signals.
*
* The interfaces here support one (and only one) thread (here called
* "main thread") in the application to monitor several signals while
* selecting on filedescriptors.
*
* First, the main thread should call npth_sigev_init. This
* initializes some global data structures used to record interesting
* and pending signals.
*
* Then, the main thread should call npth_sigev_add for every signal
* it is interested in observing, and finally npth_sigev_fini. This
* will block the signal in the main threads sigmask. Note that these
* signals should also be blocked in all other threads. Since they
* are blocked in the main thread after calling npth_sigev_add, it is
* recommended to call npth_sigev_add in the main thread before
* creating any threads.
*
* The function npth_sigev_sigmask is a convenient function that
* returns the sigmask of the thread at time of npth_sigev_init, but
* with all registered signals unblocked. It is recommended to do all
* other changes to the main thread's sigmask before calling
* npth_sigev_init, so that the return value of npth_sigev_sigmask can
* be used in the npth_pselect invocation.
*
* In any case, the main thread should invoke npth_pselect with a
* sigmask that has all signals that should be monitored unblocked.
*
* After npth_pselect returns, npth_sigev_get_pending can be called in
* a loop until it returns 0 to iterate over the list of pending
* signals. Each time a signal is returned by that function, its
* status is reset to non-pending.
*/
#ifdef HAVE_CONFIG_H
#include
#endif
#include
#include
#include "npth.h"
/* Record events that have been noticed. */
static sigset_t sigev_pending;
/* The signal mask during normal operation. */
static sigset_t sigev_block;
/* The signal mask during pselect. */
static sigset_t sigev_unblock;
/* Registered signal numbers. Needed to iterate over sigset_t.
Bah. */
#define SIGEV_MAX 32
static int sigev_signum[SIGEV_MAX];
static int sigev_signum_cnt;
/* The internal handler which just sets a global flag. */
static void
-sigev_handler (int signum)
+_sigev_handler (int signum)
{
sigaddset (&sigev_pending, signum);
}
/* Start setting up signal event handling. */
void
npth_sigev_init (void)
{
sigemptyset (&sigev_pending);
pthread_sigmask (SIG_SETMASK, NULL, &sigev_block);
pthread_sigmask (SIG_SETMASK, NULL, &sigev_unblock);
}
/* Add signal SIGNUM to the list of watched signals. */
void
npth_sigev_add (int signum)
{
struct sigaction sa;
sigset_t ss;
sigemptyset(&ss);
assert (sigev_signum_cnt < SIGEV_MAX);
sigev_signum[sigev_signum_cnt++] = signum;
/* Make sure we can receive it. */
sigdelset (&sigev_unblock, signum);
sigaddset (&sigev_block, signum);
- sa.sa_handler = sigev_handler;
+ sa.sa_handler = _sigev_handler;
sa.sa_mask = ss;
sa.sa_flags = 0; /* NOT setting SA_RESTART! */
sigaction (signum, &sa, NULL);
}
#ifdef HAVE_PTHREAD_ATFORK
/* There is non-POSIX operating system where fork is not available to
applications. There, we have no pthread_atfork either. In such a
case, we don't call pthread_atfork. */
static void
restore_sigmask_for_child_process (void)
{
pthread_sigmask (SIG_SETMASK, &sigev_unblock, NULL);
}
#endif
/* Finish the list of watched signals. This starts to block them,
too. */
void
npth_sigev_fini (void)
{
/* Block the interesting signals. */
pthread_sigmask (SIG_SETMASK, &sigev_block, NULL);
#ifdef HAVE_PTHREAD_ATFORK
pthread_atfork (NULL, NULL, restore_sigmask_for_child_process);
#endif
}
/* Get the sigmask as needed for pselect. */
sigset_t *
npth_sigev_sigmask (void)
{
return &sigev_unblock;
}
/* Return the next signal event that occured. Returns if none are
left, 1 on success. */
int
npth_sigev_get_pending (int *r_signum)
{
int i;
for (i = 0; i < sigev_signum_cnt; i++)
{
int signum = sigev_signum[i];
if (sigismember (&sigev_pending, signum))
{
sigdelset (&sigev_pending, signum);
*r_signum = signum;
return 1;
}
}
return 0;
}
diff --git a/src/npth.c b/src/npth.c
index f57d06c..1b7e4ac 100644
--- a/src/npth.c
+++ b/src/npth.c
@@ -1,842 +1,842 @@
/* npth.c - a lightweight implementation of pth over pthread.
* Copyright (C) 2011 g10 Code GmbH
*
* This file is part of nPth.
*
* nPth is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* nPth is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see .
*/
#ifdef HAVE_CONFIG_H
#include
#endif
#include
#include
#include
#include
#include
#include
#include
#include
#ifdef HAVE_LIB_DISPATCH
# include
typedef dispatch_semaphore_t sem_t;
/* This glue code is for macOS which does not have full implementation
of POSIX semaphore. On macOS, using semaphore in Grand Central
Dispatch library is better than using the partial implementation of
POSIX semaphore where sem_init doesn't work well.
*/
static int
sem_init (sem_t *sem, int is_shared, unsigned int value)
{
(void)is_shared;
if ((*sem = dispatch_semaphore_create (value)) == NULL)
return -1;
else
return 0;
}
static int
sem_post (sem_t *sem)
{
dispatch_semaphore_signal (*sem);
return 0;
}
static int
sem_wait (sem_t *sem)
{
dispatch_semaphore_wait (*sem, DISPATCH_TIME_FOREVER);
return 0;
}
#else
# include
#endif
#ifdef HAVE_UNISTD_H
# include
#endif
#ifndef HAVE_PSELECT
# include
#endif
#ifdef HAVE_POLL_H
#include
#endif
#include "npth.h"
/* The global lock that excludes all threads but one. This is a
semaphore, because these can be safely used in a library even if
the application or other libraries call fork(), including from a
signal handler. sem_post is async-signal-safe. (The reason a
semaphore is safe and a mutex is not safe is that a mutex has an
owner, while a semaphore does not.) We init sceptre to a static
buffer for use by sem_init; in case sem_open is used instead
SCEPTRE will changed to the value returned by sem_open.
GOT_SCEPTRE is a flag used for debugging to tell wether we hold
SCEPTRE. */
static sem_t sceptre_buffer;
static sem_t *sceptre = &sceptre_buffer;
static int got_sceptre;
/* Configure defines HAVE_FORK_UNSAFE_SEMAPHORE if child process can't
access non-shared unnamed semaphore which is created by its parent.
We use unnamed semaphore (if available) for the global lock. The
specific semaphore is only valid for those threads in a process,
and it is no use by other processes. Thus, PSHARED argument for
sem_init is naturally 0.
However, there are daemon-like applications which use fork after
npth's initialization by npth_init. In this case, a child process
uses the semaphore which was created by its parent process, while
parent does nothing with the semaphore. In some system (e.g. AIX),
access by child process to non-shared unnamed semaphore is
prohibited. For such a system, HAVE_FORK_UNSAFE_SEMAPHORE should
be defined, so that unnamed semaphore will be created with the
option PSHARED=1. The purpose of the setting of PSHARED=1 is only
for allowing the access of the lock by child process. For NPTH, it
does not mean any other interactions between processes.
*/
#ifdef HAVE_FORK_UNSAFE_SEMAPHORE
#define NPTH_SEMAPHORE_PSHARED 1
#else
#define NPTH_SEMAPHORE_PSHARED 0
#endif
/* The main thread is the active thread at the time pth_init was
called. As of now it is only useful for debugging. The volatile
make sure the compiler does not eliminate this set but not used
variable. */
static volatile pthread_t main_thread;
/* This flag is set as soon as npth_init has been called or if any
* thread has been created. It will never be cleared again. The only
* purpose is to make npth_protect and npth_unprotect more robust in
* that they can be shortcut when npth_init has not yet been called.
* This is important for libraries which want to support nPth by using
* those two functions but may have be initialized before pPth. */
static int initialized_or_any_threads;
/* Systems that don't have pthread_mutex_timedlock get a busy wait
implementation that probes the lock every BUSY_WAIT_INTERVAL
milliseconds. */
#define BUSY_WAIT_INTERVAL 200
typedef int (*trylock_func_t) (void *);
#ifndef HAVE_PTHREAD_MUTEX_TIMEDLOCK
static int
busy_wait_for (trylock_func_t trylock, void *lock,
const struct timespec *abstime)
{
int err;
/* This is not great, but better than nothing. Only works for locks
which are mostly uncontested. Provides absolutely no fairness at
all. Creates many wake-ups. */
while (1)
{
struct timespec ts;
err = npth_clock_gettime (&ts);
if (err < 0)
{
/* Just for safety make sure we return some error. */
err = errno ? errno : EINVAL;
break;
}
if (npth_timercmp (abstime, &ts, <))
{
err = ETIMEDOUT;
break;
}
err = (*trylock) (lock);
if (err != EBUSY)
break;
/* Try again after waiting a bit. We could calculate the
maximum wait time from ts and abstime, but we don't
bother, as our granularity is pretty fine. */
usleep (BUSY_WAIT_INTERVAL * 1000);
}
return err;
}
#endif
static void
enter_npth (void)
{
int res;
got_sceptre = 0;
res = sem_post (sceptre);
assert (res == 0);
}
static void
leave_npth (void)
{
int res;
int save_errno = errno;
do {
res = sem_wait (sceptre);
} while (res < 0 && errno == EINTR);
assert (!res);
got_sceptre = 1;
errno = save_errno;
}
#define ENTER() enter_npth ()
#define LEAVE() leave_npth ()
int
npth_init (void)
{
int res;
main_thread = pthread_self();
/* Track that we have been initialized. */
initialized_or_any_threads |= 1;
/* Better reset ERRNO so that we know that it has been set by
sem_init. */
errno = 0;
/* The semaphore is binary. */
res = sem_init (sceptre, NPTH_SEMAPHORE_PSHARED, 1);
/* There are some versions of operating systems which have sem_init
symbol defined but the call actually returns ENOSYS at runtime.
We know this problem for older versions of AIX (<= 4.3.3) and
macOS. For macOS, we use semaphore in Grand Central Dispatch
library, so ENOSYS doesn't happen. We only support AIX >= 5.2,
where sem_init is supported.
*/
if (res < 0)
{
/* POSIX.1-2001 defines the semaphore interface but does not
specify the return value for success. Thus we better
bail out on error only on a POSIX.1-2008 system. */
#if _POSIX_C_SOURCE >= 200809L
return errno;
#endif
}
LEAVE();
return 0;
}
int
npth_getname_np (npth_t target_thread, char *buf, size_t buflen)
{
#ifdef HAVE_PTHREAD_GETNAME_NP
return pthread_getname_np (target_thread, buf, buflen);
#else
(void)target_thread;
(void)buf;
(void)buflen;
return ENOSYS;
#endif
}
int
npth_setname_np (npth_t target_thread, const char *name)
{
#ifdef HAVE_PTHREAD_SETNAME_NP
#ifdef __NetBSD__
return pthread_setname_np (target_thread, "%s", (void*) name);
#else
#ifdef __APPLE__
if (target_thread == npth_self ())
return pthread_setname_np (name);
else
return ENOTSUP;
#else
return pthread_setname_np (target_thread, name);
#endif
#endif
#else
(void)target_thread;
(void)name;
return ENOSYS;
#endif
}
struct startup_s
{
void *(*start_routine) (void *);
void *arg;
};
static void *
thread_start (void *startup_arg)
{
struct startup_s *startup = startup_arg;
void *(*start_routine) (void *);
void *arg;
void *result;
start_routine = startup->start_routine;
arg = startup->arg;
free (startup);
LEAVE();
result = (*start_routine) (arg);
/* Note: instead of returning here, we might end up in
npth_exit() instead. */
ENTER();
return result;
}
int
npth_create (npth_t *thread, const npth_attr_t *attr,
void *(*start_routine) (void *), void *arg)
{
int err;
struct startup_s *startup;
startup = malloc (sizeof (*startup));
if (!startup)
return errno;
initialized_or_any_threads |= 2;
startup->start_routine = start_routine;
startup->arg = arg;
err = pthread_create (thread, attr, thread_start, startup);
if (err)
{
free (startup);
return err;
}
/* Memory is released in thread_start. */
return 0;
}
int
npth_join (npth_t thread, void **retval)
{
int err;
#ifdef HAVE_PTHREAD_TRYJOIN_NP
/* No need to allow competing threads to enter when we can get the
lock immediately. pthread_tryjoin_np is a GNU extension. */
err = pthread_tryjoin_np (thread, retval);
if (err != EBUSY)
return err;
#endif /*HAVE_PTHREAD_TRYJOIN_NP*/
ENTER();
err = pthread_join (thread, retval);
LEAVE();
return err;
}
void
npth_exit (void *retval)
{
ENTER();
pthread_exit (retval);
/* Never reached. But just in case pthread_exit does return... */
LEAVE();
}
int
npth_mutex_lock (npth_mutex_t *mutex)
{
int err;
/* No need to allow competing threads to enter when we can get the
lock immediately. */
err = pthread_mutex_trylock (mutex);
if (err != EBUSY)
return err;
ENTER();
err = pthread_mutex_lock (mutex);
LEAVE();
return err;
}
int
npth_mutex_timedlock (npth_mutex_t *mutex, const struct timespec *abstime)
{
int err;
/* No need to allow competing threads to enter when we can get the
lock immediately. */
err = pthread_mutex_trylock (mutex);
if (err != EBUSY)
return err;
ENTER();
#if HAVE_PTHREAD_MUTEX_TIMEDLOCK
err = pthread_mutex_timedlock (mutex, abstime);
#else
err = busy_wait_for ((trylock_func_t) pthread_mutex_trylock, mutex, abstime);
#endif
LEAVE();
return err;
}
-#ifndef NPTH_NO_RWLOCK
+#ifndef _NPTH_NO_RWLOCK
int
npth_rwlock_rdlock (npth_rwlock_t *rwlock)
{
int err;
#ifdef HAVE_PTHREAD_RWLOCK_TRYRDLOCK
/* No need to allow competing threads to enter when we can get the
lock immediately. */
err = pthread_rwlock_tryrdlock (rwlock);
if (err != EBUSY)
return err;
#endif
ENTER();
err = pthread_rwlock_rdlock (rwlock);
LEAVE();
return err;
}
int
npth_rwlock_timedrdlock (npth_rwlock_t *rwlock, const struct timespec *abstime)
{
int err;
#ifdef HAVE_PTHREAD_RWLOCK_TRYRDLOCK
/* No need to allow competing threads to enter when we can get the
lock immediately. */
err = pthread_rwlock_tryrdlock (rwlock);
if (err != EBUSY)
return err;
#endif
ENTER();
#if HAVE_PTHREAD_RWLOCK_TIMEDRDLOCK
err = pthread_rwlock_timedrdlock (rwlock, abstime);
#else
err = busy_wait_for ((trylock_func_t) pthread_rwlock_tryrdlock, rwlock,
abstime);
#endif
LEAVE();
return err;
}
int
npth_rwlock_wrlock (npth_rwlock_t *rwlock)
{
int err;
#ifdef HAVE_PTHREAD_RWLOCK_TRYWRLOCK
/* No need to allow competing threads to enter when we can get the
lock immediately. */
err = pthread_rwlock_trywrlock (rwlock);
if (err != EBUSY)
return err;
#endif
ENTER();
err = pthread_rwlock_wrlock (rwlock);
LEAVE();
return err;
}
int
npth_rwlock_timedwrlock (npth_rwlock_t *rwlock, const struct timespec *abstime)
{
int err;
#ifdef HAVE_PTHREAD_RWLOCK_TRYWRLOCK
/* No need to allow competing threads to enter when we can get the
lock immediately. */
err = pthread_rwlock_trywrlock (rwlock);
if (err != EBUSY)
return err;
#endif
ENTER();
#if HAVE_PTHREAD_RWLOCK_TIMEDWRLOCK
err = pthread_rwlock_timedwrlock (rwlock, abstime);
#elif HAVE_PTHREAD_RWLOCK_TRYRDLOCK
err = busy_wait_for ((trylock_func_t) pthread_rwlock_trywrlock, rwlock,
abstime);
#else
err = ENOSYS;
#endif
LEAVE();
return err;
}
#endif
int
npth_cond_wait (npth_cond_t *cond, npth_mutex_t *mutex)
{
int err;
ENTER();
err = pthread_cond_wait (cond, mutex);
LEAVE();
return err;
}
int
npth_cond_timedwait (npth_cond_t *cond, npth_mutex_t *mutex,
const struct timespec *abstime)
{
int err;
ENTER();
err = pthread_cond_timedwait (cond, mutex, abstime);
LEAVE();
return err;
}
/* Standard POSIX Replacement API */
int
npth_usleep(unsigned int usec)
{
int res;
ENTER();
res = usleep(usec);
LEAVE();
return res;
}
unsigned int
npth_sleep(unsigned int sec)
{
unsigned res;
ENTER();
res = sleep(sec);
LEAVE();
return res;
}
int
npth_system(const char *cmd)
{
int res;
ENTER();
res = system(cmd);
LEAVE();
return res;
}
pid_t
npth_waitpid(pid_t pid, int *status, int options)
{
pid_t res;
ENTER();
res = waitpid(pid,status, options);
LEAVE();
return res;
}
int
npth_connect(int s, const struct sockaddr *addr, socklen_t addrlen)
{
int res;
ENTER();
res = connect(s, addr, addrlen);
LEAVE();
return res;
}
int
npth_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
{
int res;
ENTER();
res = accept(s, addr, addrlen);
LEAVE();
return res;
}
int
npth_select(int nfd, fd_set *rfds, fd_set *wfds, fd_set *efds,
struct timeval *timeout)
{
int res;
ENTER();
res = select(nfd, rfds, wfds, efds, timeout);
LEAVE();
return res;
}
int
npth_pselect(int nfd, fd_set *rfds, fd_set *wfds, fd_set *efds,
const struct timespec *timeout, const sigset_t *sigmask)
{
int res;
ENTER();
#ifdef HAVE_PSELECT
res = pselect (nfd, rfds, wfds, efds, timeout, sigmask);
#else /*!HAVE_PSELECT*/
{
/* A better emulation of pselect would be to create a pipe, wait
in the select for one end and have a signal handler write to
the other end. However, this is non-trivial to implement and
thus we only print a compile time warning. */
# ifdef __GNUC__
# warning Using a non race free pselect emulation.
# endif
struct timeval t, *tp;
tp = NULL;
if (!timeout)
;
else if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000)
{
t.tv_sec = timeout->tv_sec;
t.tv_usec = (timeout->tv_nsec + 999) / 1000;
tp = &t;
}
else
{
errno = EINVAL;
res = -1;
goto leave;
}
if (sigmask)
{
int save_errno;
sigset_t savemask;
pthread_sigmask (SIG_SETMASK, sigmask, &savemask);
res = select (nfd, rfds, wfds, efds, tp);
save_errno = errno;
pthread_sigmask (SIG_SETMASK, &savemask, NULL);
errno = save_errno;
}
else
res = select (nfd, rfds, wfds, efds, tp);
leave:
;
}
#endif /*!HAVE_PSELECT*/
LEAVE();
return res;
}
int
npth_poll (struct pollfd *fds, unsigned long nfds, int timeout)
{
int res;
ENTER();
res = poll (fds, (nfds_t)nfds, timeout);
LEAVE();
return res;
}
int
npth_ppoll (struct pollfd *fds, unsigned long nfds,
const struct timespec *timeout, const sigset_t *sigmask)
{
int res;
ENTER();
#ifdef HAVE_PPOLL
res = ppoll (fds, (nfds_t)nfds, timeout, sigmask);
#else /*!HAVE_PPOLL*/
{
# ifdef __GNUC__
# warning Using a non race free ppoll emulation.
# endif
int t;
if (!timeout)
t = -1;
else if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000)
t = timeout->tv_sec * 1000 + (timeout->tv_nsec + 999999) / 1000000;
else
{
errno = EINVAL;
res = -1;
goto leave;
}
if (sigmask)
{
int save_errno;
sigset_t savemask;
pthread_sigmask (SIG_SETMASK, sigmask, &savemask);
res = poll (fds, (nfds_t)nfds, t);
save_errno = errno;
pthread_sigmask (SIG_SETMASK, &savemask, NULL);
errno = save_errno;
}
else
res = poll (fds, (nfds_t)nfds, t);
leave:
;
}
#endif
LEAVE();
return res;
}
ssize_t
npth_read(int fd, void *buf, size_t nbytes)
{
ssize_t res;
ENTER();
res = read(fd, buf, nbytes);
LEAVE();
return res;
}
ssize_t
npth_write(int fd, const void *buf, size_t nbytes)
{
ssize_t res;
ENTER();
res = write(fd, buf, nbytes);
LEAVE();
return res;
}
int
npth_recvmsg (int fd, struct msghdr *msg, int flags)
{
int res;
ENTER();
res = recvmsg (fd, msg, flags);
LEAVE();
return res;
}
int
npth_sendmsg (int fd, const struct msghdr *msg, int flags)
{
int res;
ENTER();
res = sendmsg (fd, msg, flags);
LEAVE();
return res;
}
void
npth_unprotect (void)
{
/* If we are not initialized we may not access the semaphore and
* thus we shortcut it. Note that in this case the unprotect/protect
* is not needed. For failsafe reasons if an nPth thread has ever
* been created but nPth has accidentally not initialized we do not
* shortcut so that a stack backtrace (due to the access of the
* uninitialized semaphore) is more expressive. */
if (initialized_or_any_threads)
ENTER();
}
void
npth_protect (void)
{
/* See npth_unprotect for commentary. */
if (initialized_or_any_threads)
LEAVE();
}
int
npth_is_protected (void)
{
return got_sceptre;
}
int
npth_clock_gettime (struct timespec *ts)
{
#if defined(CLOCK_REALTIME) && HAVE_CLOCK_GETTIME
return clock_gettime (CLOCK_REALTIME, ts);
#elif HAVE_GETTIMEOFDAY
{
struct timeval tv;
if (gettimeofday (&tv, NULL))
return -1;
ts->tv_sec = tv.tv_sec;
ts->tv_nsec = tv.tv_usec * 1000;
return 0;
}
#else
/* FIXME: fall back on time() with seconds resolution. */
# error clock_gettime not available - please provide a fallback.
#endif
}
diff --git a/src/npth.h.in b/src/npth.h.in
index 5818bd7..eff3f72 100644
--- a/src/npth.h.in
+++ b/src/npth.h.in
@@ -1,460 +1,460 @@
/* npth.h - a lightweight implementation of pth over pthread.
* Configured for: @NPTH_CONFIG_HOST@.
* Copyright (C) 2011, 2012, 2015, 2017 g10 Code GmbH
*
* This file is part of nPth.
*
* nPth is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* nPth is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see .
*/
/* Changes to GNU Pth:
*
* Return value and arguments follow strictly the pthread format:
*
* - Return the error number instead of setting errno,
*
* - have timedlock function instead of extra event argument,
*
* - have trylock function instead of extra event argument. Can't mix
* timed and try.
*
* - No _new functions. Use _init functions instead.
*
* - Attributes are set by specific instead of generic getter/setter
* functions.
*
* - Offers replacement functions for sendmsg and recvmsg.
*/
-#ifndef NPTH_H
-#define NPTH_H
+#ifndef _NPTH_H
+#define _NPTH_H
#include
#include
#@INSERT_SYS_TIME_H@
#@INSERT_TIME_H@
#include
#@INSERT_SOCKLEN_T@
#@INSERT_SYS_SELECT_H@
#@INSERT_SIGNAL_H@
#include
#@INSERT_NO_RWLOCK@
#ifdef __ANDROID__
#include
#if __ANDROID_API__ < 9
/* Android 8 and earlier are missing rwlocks. We punt to mutexes in
that case. */
-#define NPTH_NO_RWLOCK 1
+#define _NPTH_NO_RWLOCK 1
#endif
#endif
#ifdef __cplusplus
extern "C" {
#if 0 /* (Keep Emacsens' auto-indent happy.) */
}
#endif
#endif
/* Global Library Management */
#define npth_t pthread_t
/* Initialize the library and convert current thread to main thread.
Must be first npth function called in a process. Returns error
number on error and 0 on success. */
int npth_init(void);
/* Not needed. */
/* pth_kill, pth_ctrl, pth_version */
/* Thread Attribute Handling */
/* Can't do that. */
/* pth_attr_of */
#define npth_attr_t pthread_attr_t
#define npth_attr_init pthread_attr_init
#define npth_attr_destroy pthread_attr_destroy
#define NPTH_CREATE_JOINABLE PTHREAD_CREATE_JOINABLE
#define NPTH_CREATE_DETACHED PTHREAD_CREATE_DETACHED
#define npth_attr_getdetachstate pthread_attr_getdetachstate
#define npth_attr_setdetachstate pthread_attr_setdetachstate
int npth_getname_np (npth_t target_thread, char *buf, size_t buflen);
int npth_setname_np (npth_t target_thread, const char *name);
/* Thread Control */
int npth_create(npth_t *thread, const npth_attr_t *attr,
void *(*start_routine) (void *), void *arg);
/* The Pth version of pth_once supports passing an argument, the
pthread version does not. We would have to reimplement the whole
feature with a global table. Not needed. */
/* pth_once */
#define npth_self pthread_self
/* No can do! */
/* pth_suspend, pth_resume */
/* Yield is considered harmful and should never be used in high-level
applications. Use a condition instead to wait for a specific event
to happen, or, as a last resort, use npth_usleep to back off a hard
busy wait. */
/* pth_yield */
/* Not needed. */
/* pth_nap */
/* pth_wait, pth_cancel, pth_abort, pth_raise */
int npth_join(npth_t thread, void **retval);
#define npth_detach pthread_detach
void npth_exit(void *retval);
/* Utilities */
/* pth_fdmode, pth_time, pth_timeout, pth_sfiodisc */
/* Cancellation Management */
/* Not needed. */
/* pth_cancel_state. npth_cancel_point */
/* Event Handling */
/* No equivalent in pthread. */
/* pth_event, pth_event_typeof, pth_event_extract, pth_event_concat, pth_event_isolate,
pth_event_walk, pth_event_status, pth_event_free */
/* Key-Based Storage */
#define npth_key_t pthread_key_t
#define npth_key_create pthread_key_create
#define npth_key_delete pthread_key_delete
#define npth_setspecific pthread_setspecific
#define npth_getspecific pthread_getspecific
/* Message Port Communication */
/* No equivalent in pthread. */
/* pth_msgport_create, pth_msgport_destroy, pth_msgport_find,
pth_msgport_pending, pth_msgport_put, pth_msgport_get,
pth_msgport_reply. */
/* Thread Cleanups */
/* Not needed. */
/* pth_cleanup_push, pth_cleanup_pop */
/* Process Forking */
/* POSIX only supports a global atfork handler. So, to implement
per-thread handlers like in Pth, we would need to keep the data in
thread local storage. But, neither pthread_self nor
pthread_getspecific are standardized as async-signal-safe (what a
joke!), and __thread is an ELF extension. Still, using
pthread_self and pthread_getspecific is probably portable
enough to implement the atfork handlers, if required.
pth_fork is only required because fork() is not pth aware. fork()
is pthread aware though, and already only creates a single thread
in the child process. */
/* pth_atfork_push, pth_atfork_pop, pth_fork */
/* Synchronization */
#define npth_mutexattr_t pthread_mutexattr_t
#define npth_mutexattr_init pthread_mutexattr_init
#define npth_mutexattr_destroy pthread_mutexattr_destroy
#define npth_mutexattr_settype pthread_mutexattr_settype
#define npth_mutexattr_gettype pthread_mutexattr_gettype
#define NPTH_MUTEX_NORMAL PTHREAD_MUTEX_NORMAL
#define NPTH_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE
#define NPTH_MUTEX_ERRORCHECK PTHREAD_MUTEX_ERRORCHECK
#define NPTH_MUTEX_DEFAULT PTHREAD_MUTEX_DEFAULT
#define npth_mutex_t pthread_mutex_t
#define NPTH_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
#define NPTH_RECURSIVE_MUTEX_INITIALIZER_NP \
PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
#define NPTH_ERRORCHECK_MUTEX_INITIALIZER_NP \
PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
#define npth_mutex_init pthread_mutex_init
#define npth_mutex_destroy pthread_mutex_destroy
#define npth_mutex_trylock pthread_mutex_trylock
int npth_mutex_lock(npth_mutex_t *mutex);
int npth_mutex_timedlock(npth_mutex_t *mutex, const struct timespec *abstime);
#define npth_mutex_unlock pthread_mutex_unlock
#if @INSERT_EXPOSE_RWLOCK_API@
-#ifdef NPTH_NO_RWLOCK
+#ifdef _NPTH_NO_RWLOCK
typedef int npth_rwlockattr_t;
#define npth_rwlockattr_init(attr)
#define npth_rwlockattr_destroy(attr)
#define npth_rwlockattr_gettype_np(attr,kind)
#define npth_rwlockattr_settype_np(attr,kind)
#define NPTH_RWLOCK_PREFER_READER_NP 0
#define NPTH_RWLOCK_PREFER_WRITER_NP 0
#define NPTH_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP 0
#define NPTH_RWLOCK_DEFAULT_NP 0
#define NPTH_RWLOCK_INITIALIZER NPTH_MUTEX_INITIALIZER
#define NPTH_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP NPTH_MUTEX_INITIALIZER
typedef npth_mutex_t npth_rwlock_t;
#define npth_rwlock_init(rwlock,attr) npth_mutex_init(rwlock,0)
#define npth_rwlock_destroy npth_mutex_destroy
#define npth_rwlock_tryrdlock npth_mutex_trylock
#define npth_rwlock_rdlock npth_mutex_lock
#define npth_rwlock_trywrlock npth_mutex_trylock
#define npth_rwlock_timedrdlock npth_mutex_timedlock
#define npth_rwlock_wrlock npth_mutex_lock
#define npth_rwlock_rdlock npth_mutex_lock
#define npth_rwlock_timedwrlock npth_mutex_timedlock
#define npth_rwlock_unlock npth_mutex_unlock
-#else /* NPTH_NO_RWLOCK */
+#else /* _NPTH_NO_RWLOCK */
#define npth_rwlockattr_t pthread_rwlockattr_t
#define npth_rwlockattr_init pthread_rwlockattr_init
#define npth_rwlockattr_destroy pthread_rwlockattr_destroy
#define npth_rwlockattr_gettype_np pthread_rwlockattr_gettype_np
#define npth_rwlockattr_settype_np pthread_rwlockattr_settype_np
#define NPTH_RWLOCK_PREFER_READER_NP PTHREAD_RWLOCK_PREFER_READER_NP
/* Note: The prefer-writer setting is ineffective and the same as
prefer-reader. This is because reader locks are specified to be
recursive, but for efficiency reasons we do not keep track of which
threads already hold a reader lock. For this reason, we can not
prefer some reader locks over others, and thus a recursive reader
lock could be stalled by a pending writer, leading to a dead
lock. */
#define NPTH_RWLOCK_PREFER_WRITER_NP PTHREAD_RWLOCK_PREFER_WRITER_NP
/* The non-recursive choise is a promise by the application that it
does not lock the rwlock for reading recursively. In this setting,
writers are preferred, but note that recursive reader locking is
prone to deadlocks in that case. */
#define NPTH_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP \
PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP
#define NPTH_RWLOCK_DEFAULT_NP PTHREAD_RWLOCK_DEFAULT_NP
#define NPTH_RWLOCK_INITIALIZER PTHREAD_RWLOCK_INITIALIZER
#define NPTH_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP
typedef pthread_rwlock_t npth_rwlock_t;
#define npth_rwlock_init pthread_rwlock_init
#define npth_rwlock_destroy pthread_rwlock_destroy
#define npth_rwlock_tryrdlock pthread_rwlock_tryrdlock
int npth_rwlock_rdlock (npth_rwlock_t *rwlock);
int npth_rwlock_timedrdlock (npth_rwlock_t *rwlock,
const struct timespec *abstime);
#define npth_rwlock_trywrlock pthread_rwlock_trywrlock
int npth_rwlock_wrlock (npth_rwlock_t *rwlock);
int npth_rwlock_timedwrlock (npth_rwlock_t *rwlock,
const struct timespec *abstime);
#define npth_rwlock_unlock pthread_rwlock_unlock
-#endif /* !NPTH_NO_RWLOCK */
+#endif /* !_NPTH_NO_RWLOCK */
#endif
typedef pthread_cond_t npth_cond_t;
#define NPTH_COND_INITIALIZER PTHREAD_COND_INITIALIZER
/* For now, we don't support any cond attributes. */
#define npth_cond_init pthread_cond_init
#define npth_cond_broadcast pthread_cond_broadcast
#define npth_cond_signal pthread_cond_signal
#define npth_cond_destroy pthread_cond_destroy
int npth_cond_wait(npth_cond_t *cond, npth_mutex_t *mutex);
int npth_cond_timedwait(npth_cond_t *cond, npth_mutex_t *mutex,
const struct timespec *abstime);
/* Not needed. */
/* pth_barrier_t, pth_barrier_init, pth_barrier_reach */
/* User-Space Context */
/* Can not be implemented. */
/* pth_uctx_create, pth_uctx_make, pth_uctx_switch, pth_uctx_destroy */
/* Generalized POSIX Replacement API */
/* In general, we can not support these easily. */
/* pth_sigwait_ev, pth_accept_ev, pth_connect_ev, pth_select_ev,
pth_poll_ev, pth_read_ev, pth_readv_ev, pth_write_ev,
pth_writev_ev, pth_recv_ev, pth_recvfrom_ev, pth_send_ev,
pth_sendto_ev */
/* Standard POSIX Replacement API */
/* We will provide a more specific way to handle signals. */
/* pth_sigmask, pth_sigwait */
/* Not needed. */
/* pth_nanosleep, pth_system, pth_readv, pth_writev, pth_poll,
pth_recv, pth_send, pth_recvfrom, pth_sendto */
int npth_usleep(unsigned int usec);
unsigned int npth_sleep(unsigned int sec);
pid_t npth_waitpid(pid_t pid, int *status, int options);
int npth_system(const char *cmd);
#define npth_sigmask pthread_sigmask
int npth_sigwait(const sigset_t *set, int *sig);
-int npth_connect(int s, const struct sockaddr *addr, npth_socklen_t addrlen);
-int npth_accept(int s, struct sockaddr *addr, npth_socklen_t *addrlen);
+int npth_connect(int s, const struct sockaddr *addr, _npth_socklen_t addrlen);
+int npth_accept(int s, struct sockaddr *addr, _npth_socklen_t *addrlen);
int npth_select(int nfd, fd_set *rfds, fd_set *wfds, fd_set *efds,
struct timeval *timeout);
int npth_pselect(int nfd, fd_set *rfds, fd_set *wfds, fd_set *efds,
const struct timespec *timeout, const sigset_t *sigmask);
ssize_t npth_read(int fd, void *buf, size_t nbytes);
ssize_t npth_write(int fd, const void *buf, size_t nbytes);
int npth_recvmsg (int fd, struct msghdr *msg, int flags);
int npth_sendmsg (int fd, const struct msghdr *msg, int flags);
struct pollfd;
int npth_poll (struct pollfd *fds, unsigned long nfds, int timeout);
int npth_ppoll (struct pollfd *fds, unsigned long nfds,
const struct timespec *timeout, const sigset_t *sigmask);
/* For anything not covered here, you can enter/leave manually at your
own risk. */
void npth_unprotect (void);
void npth_protect (void);
/* If you run into problems with the above calls, this function can be
* used to examine in which state nPth is. */
int npth_is_protected (void);
/* Because the timed functions work on timespec, we provide a clock
interface for convenience and portability. */
int npth_clock_gettime (struct timespec *tp);
/* CMP may be ==, < or >. Do not use <= or >=. */
#define npth_timercmp(t1, t2, cmp) \
(((t1)->tv_sec == (t2)->tv_sec) ? \
((t1)->tv_nsec cmp (t2)->tv_nsec) : \
((t1)->tv_sec cmp (t2)->tv_sec))
#define npth_timeradd(t1, t2, result) \
do { \
(result)->tv_sec = (t1)->tv_sec + (t2)->tv_sec; \
(result)->tv_nsec = (t1)->tv_nsec + (t2)->tv_nsec; \
if ((result)->tv_nsec >= 1000000000) \
{ \
++(result)->tv_sec; \
(result)->tv_nsec -= 1000000000; \
} \
} while (0)
#define npth_timersub(t1, t2, result) \
do { \
(result)->tv_sec = (t1)->tv_sec - (t2)->tv_sec; \
(result)->tv_nsec = (t1)->tv_nsec - (t2)->tv_nsec; \
if ((result)->tv_nsec < 0) { \
--(result)->tv_sec; \
(result)->tv_nsec += 1000000000; \
} \
} while (0)
/* This is a support interface to make it easier to handle signals.
*
* The interfaces here support one (and only one) thread (here called
* "main thread") in the application to monitor several signals while
* selecting on filedescriptors.
*
* First, the main thread should call npth_sigev_init. This
* initializes some global data structures used to record interesting
* and pending signals.
*
* Then, the main thread should call npth_sigev_add for every signal
* it is interested in observing, and finally npth_sigev_fini. This
* will block the signal in the main threads sigmask. Note that these
* signals should also be blocked in all other threads. Since they
* are blocked in the main thread after calling npth_sigev_add, it is
* recommended to call npth_sigev_add in the main thread before
* creating any threads.
*
* The function npth_sigev_sigmask is a convenient function that
* returns the sigmask of the thread at time of npth_sigev_init, but
* with all registered signals unblocked. It is recommended to do all
* other changes to the main thread's sigmask before calling
* npth_sigev_init, so that the return value of npth_sigev_sigmask can
* be used in the npth_pselect invocation.
*
* In any case, the main thread should invoke npth_pselect with a
* sigmask that has all signals that should be monitored unblocked.
*
* After npth_pselect returns, npth_sigev_get_pending can be called in
* a loop until it returns 0 to iterate over the list of pending
* signals. Each time a signal is returned by that function, its
* status is reset to non-pending.
*/
/* Start setting up signal event handling. */
void npth_sigev_init (void);
/* Add signal SIGNUM to the list of watched signals. */
void npth_sigev_add (int signum);
/* Finish the list of watched signals. This starts to block them,
too. */
void npth_sigev_fini (void);
/* Get the sigmask as needed for pselect. */
sigset_t *npth_sigev_sigmask (void);
/* Return the next signal event that occured. Returns if none are
left, 1 on success. */
int npth_sigev_get_pending (int *r_signum);
#if 0 /* (Keep Emacsens' auto-indent happy.) */
{
#endif
#ifdef __cplusplus
}
#endif
-#endif /*NPTH_H*/
+#endif /*_NPTH_H*/
/*
@emacs_local_vars_begin@
@emacs_local_vars_read_only@
@emacs_local_vars_end@
*/
diff --git a/w32/npth.c b/w32/npth.c
index fc75291..270e76a 100644
--- a/w32/npth.c
+++ b/w32/npth.c
@@ -1,1994 +1,1994 @@
/* npth.c - a lightweight implementation of pth over native threads
* Copyright (C) 2011, 2014 g10 Code GmbH
*
* This file is part of nPth.
*
* nPth is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* nPth is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see .
*/
/* We implement the join mechanism ourself. */
#ifdef HAVE_CONFIG_H
#include
#endif
#include
#include
#include
#include "npth.h"
#include
#define DEBUG_CALLS 1
-#define npth_debug(x, ...) fprintf(stderr, __VA_ARGS__)
+#define _npth_debug(x, ...) fprintf(stderr, __VA_ARGS__)
#ifndef TEST
#undef DEBUG_CALLS
#define DEBUG_CALLS 0
-#undef npth_debug
-#define npth_debug(x, ...)
+#undef _npth_debug
+#define _npth_debug(x, ...)
#endif
/* This seems to be a common standard. */
#define THREAD_NAME_MAX 15
/* The global lock that excludes all threads but one. Note that this
implements the single-user-thread policy, but also protects all our
global data such as the thread_table. GOT_SCEPTRE is a flag used
for debugging to tell wether we hold SCEPTRE. */
static CRITICAL_SECTION sceptre;
static int got_sceptre;
/* This flag is set as soon as npth_init has been called or if any
* thread has been created. It will never be cleared again. The only
* purpose is to make npth_protect and npth_unprotect more robust in
* that they can be shortcut when npth_init has not yet been called.
* This is important for libraries which want to support nPth by using
* those two functions but may have been initialized before nPth. */
static int initialized_or_any_threads;
typedef struct npth_impl_s *npth_impl_t;
#define MAX_THREADS 1024
#define INVALID_THREAD_ID 0
/* Thread ID to thread context table. We never allocate ID 0. */
static npth_impl_t thread_table[MAX_THREADS];
/* The TLS index to store thread ID of the current thread. Used to
make faster lookups of the thread data. */
DWORD tls_index;
/* Map a windows error value (GetLastError) to a POSIX error value. */
static int
map_error (int winerr)
{
/* FIXME */
return EIO;
}
static int
wait_for_single_object (HANDLE obj, DWORD msecs)
{
DWORD res;
res = WaitForSingleObject(obj, msecs);
if (res == WAIT_ABANDONED)
return EDEADLK;
else if (res == WAIT_TIMEOUT)
return ETIMEDOUT;
else if (res == WAIT_FAILED)
return map_error (GetLastError());
else if (res != WAIT_OBJECT_0)
return EINTR;
else
return 0;
}
int
npth_clock_gettime(struct timespec *tp)
{
FILETIME ftime;
ULARGE_INTEGER systime;
GetSystemTimeAsFileTime (&ftime);
systime.LowPart = ftime.dwLowDateTime;
systime.HighPart = ftime.dwHighDateTime;
/* systime.QuadPart has the 100-nanosecond intervals since Jan 1, 1601. */
tp->tv_sec = systime.QuadPart / 10000000ULL;
tp->tv_nsec = (systime.QuadPart * 100ULL) % 1000000000ULL;
return 0;
}
static int
calculate_timeout (const struct timespec *abstime, DWORD *msecs_r)
{
struct timespec tp;
struct timespec tp_delta;
DWORD msecs;
npth_clock_gettime (&tp);
/* Make sure there is a positive time delta. */
if (!(npth_timercmp (&tp, abstime, <)))
return ETIMEDOUT;
npth_timersub (abstime, &tp, &tp_delta);
/* Make sure to round up to at least one millisecond. Note that
within reasonable timeouts and the above macros, we should always
end up with a positive wait time here. */
msecs = (tp_delta.tv_sec * 1000) + (tp_delta.tv_nsec + 999999) / 1000000;
if (msecs < 1)
{
/* Log a critical error here. */
return ETIMEDOUT;
}
*msecs_r = msecs;
return 0;
}
static void
enter_npth (const char *function)
{
if (DEBUG_CALLS)
- npth_debug (DEBUG_CALLS, "tid %lu: enter_npth (%s)\n",
- npth_self (), function ? function : "unknown");
+ _npth_debug (DEBUG_CALLS, "tid %lu: enter_npth (%s)\n",
+ npth_self (), function ? function : "unknown");
got_sceptre = 0;
LeaveCriticalSection (&sceptre);
}
static void
leave_npth (const char *function)
{
EnterCriticalSection (&sceptre);
got_sceptre = 1;
if (DEBUG_CALLS)
- npth_debug (DEBUG_CALLS, "tid %lu: leave_npth (%s)\n",
- npth_self (), function ? function : "");
+ _npth_debug (DEBUG_CALLS, "tid %lu: leave_npth (%s)\n",
+ npth_self (), function ? function : "");
}
#define ENTER() enter_npth(__FUNCTION__)
#define LEAVE() leave_npth(__FUNCTION__)
struct npth_impl_s
{
/* Usually there is one ref owned by the thread as long as it is
running, and one ref for everybody else as long as the thread is
joinable. */
int refs;
HANDLE handle;
/* True if thread is detached. */
int detached;
/* The start routine and arg. */
void *(*start_routine) (void *);
void *start_arg;
char name[THREAD_NAME_MAX + 1];
/* Doubly-linked list for the waiter queue in condition
variables. */
npth_impl_t next;
npth_impl_t *prev_ptr;
/* The event on which this thread waits when it is queued. */
HANDLE event;
void *result;
};
static void
dequeue_thread (npth_impl_t thread)
{
/* Unlink the thread from any condition waiter queue. */
if (thread->next)
{
thread->next->prev_ptr = thread->prev_ptr;
thread->next = NULL;
}
if (thread->prev_ptr)
{
*(thread->prev_ptr) = thread->next;
thread->prev_ptr = NULL;
}
}
/* Enqueue THREAD to come after the thread whose next pointer is
prev_ptr. */
static void
enqueue_thread (npth_impl_t thread, npth_impl_t *prev_ptr)
{
if (*prev_ptr)
(*prev_ptr)->prev_ptr = &thread->next;
thread->prev_ptr = prev_ptr;
thread->next = *prev_ptr;
*prev_ptr = thread;
}
static int
find_thread (npth_t thread_id, npth_impl_t *thread)
{
if (thread_id < 1 || thread_id >= MAX_THREADS)
return ESRCH;
if (! thread_table[thread_id])
return ESRCH;
*thread = thread_table[thread_id];
return 0;
}
static int
new_thread (npth_t *thread_id)
{
npth_impl_t thread;
int id;
/* ID 0 is never allocated. */
for (id = 1; id < MAX_THREADS; id++)
if (! thread_table[id])
break;
if (id == MAX_THREADS)
return EAGAIN;
thread = malloc (sizeof (*thread));
if (! thread)
return errno;
thread->refs = 1;
thread->handle = INVALID_HANDLE_VALUE;
thread->detached = 0;
thread->start_routine = NULL;
thread->start_arg = NULL;
thread->next = NULL;
thread->prev_ptr = NULL;
/* We create the event when it is first needed (not all threads wait
on conditions). */
thread->event = INVALID_HANDLE_VALUE;
memset (thread->name, '\0', sizeof (thread->name));
thread_table[id] = thread;
*thread_id = id;
return 0;
}
static void
free_thread (npth_t thread_id)
{
npth_impl_t thread = thread_table[thread_id];
if (thread->handle)
CloseHandle (thread->handle);
/* Unlink the thread from any condition waiter queue. */
dequeue_thread (thread);
free (thread);
thread_table[thread_id] = NULL;
}
static void
deref_thread (npth_t thread_id)
{
npth_impl_t thread = thread_table[thread_id];
thread->refs -= 1;
if (thread->refs == 0)
free_thread (thread_id);
}
int
npth_init (void)
{
int err;
npth_t thread_id;
BOOL res;
HANDLE handle;
npth_impl_t thread;
InitializeCriticalSection (&sceptre);
/* Track that we have been initialized. */
initialized_or_any_threads = 1;
/* Fake a thread table item for the main thread. */
tls_index = TlsAlloc();
if (tls_index == TLS_OUT_OF_INDEXES)
return map_error (GetLastError());
err = new_thread(&thread_id);
if (err)
return err;
/* GetCurrentThread() is not usable by other threads, so it needs to
be duplicated. */
res = DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
GetCurrentProcess(), &handle,
0, FALSE, DUPLICATE_SAME_ACCESS);
if (!res)
{
free_thread (thread_id);
return map_error(GetLastError());
}
thread = thread_table[thread_id];
thread->handle = handle;
if (! TlsSetValue(tls_index, (LPVOID)(uintptr_t) thread_id))
return map_error (GetLastError());
LEAVE();
return 0;
}
struct npth_attr_s
{
int detachstate;
};
int
npth_attr_init (npth_attr_t *attr_r)
{
npth_attr_t attr;
attr = malloc (sizeof *attr);
if (!attr)
return errno;
attr->detachstate = NPTH_CREATE_JOINABLE;
*attr_r = attr;
return 0;
}
int
npth_attr_destroy (npth_attr_t *attr)
{
free (*attr);
*attr = NULL;
return 0;
}
int
npth_attr_getdetachstate (npth_attr_t *attr,
int *detachstate)
{
*detachstate = (*attr)->detachstate;
return 0;
}
int
npth_attr_setdetachstate (npth_attr_t *attr, int detachstate)
{
if (detachstate != NPTH_CREATE_JOINABLE
&& detachstate != NPTH_CREATE_DETACHED)
return EINVAL;
(*attr)->detachstate = detachstate;
return 0;
}
int
npth_getname_np (npth_t target_thread, char *buf, size_t buflen)
{
npth_impl_t thread;
int err;
if (buflen < THREAD_NAME_MAX + 1)
return ERANGE;
err = find_thread (target_thread, &thread);
if (err)
return err;
strcpy (buf, thread->name);
return 0;
}
int
npth_setname_np (npth_t target_thread, const char *name)
{
npth_impl_t thread;
int err;
if (strlen(name) > THREAD_NAME_MAX)
return ERANGE;
err = find_thread (target_thread, &thread);
if (err)
return err;
strcpy (thread->name, name);
return 0;
}
static DWORD
thread_start (void *arg)
{
npth_t thread_id = (npth_t)(uintptr_t) arg;
npth_impl_t thread;
void *result;
if (! TlsSetValue(tls_index, (LPVOID)(uintptr_t) thread_id))
/* FIXME: There is not much we can do here. */
;
LEAVE();
/* We must be protected here, because we access the global
thread_table. */
thread = thread_table[thread_id];
result = thread->start_routine (thread->start_arg);
/* We might not return here if the thread calls npth_exit(). */
thread->result = result;
/* Any joiner will be signaled once we terminate. */
deref_thread (thread_id);
ENTER();
/* We can not return result, as that is a void*, not a DWORD. */
return 0;
}
int
npth_create (npth_t *newthread, const npth_attr_t *user_attr,
void *(*start_routine) (void *), void *start_arg)
{
int err = 0;
npth_t thread_id = INVALID_THREAD_ID;
npth_impl_t thread;
HANDLE handle;
/* We must stay protected here, because we access the global
thread_table. Also, creating a new thread is not a blocking
operation. */
err = new_thread (&thread_id);
if (err)
goto err_out;
thread = thread_table[thread_id];
if (user_attr && (*user_attr)->detachstate == NPTH_CREATE_DETACHED)
thread->detached = 1;
else
thread->refs += 1;
thread->start_routine = start_routine;
thread->start_arg = start_arg;
handle = CreateThread (NULL, 0,
(LPTHREAD_START_ROUTINE)thread_start,
(void *)(uintptr_t) thread_id, CREATE_SUSPENDED,
NULL);
if (handle == NULL)
{
err = map_error (GetLastError());
goto err_out;
}
thread->handle = handle;
*newthread = thread_id;
ResumeThread (thread->handle);
return 0;
err_out:
if (thread_id)
free_thread (thread_id);
return err;
}
npth_t
npth_self (void)
{
LPVOID thread_id;
thread_id = TlsGetValue (tls_index);
if (thread_id == 0 && GetLastError() != ERROR_SUCCESS)
/* FIXME: Log the error. */
;
return (npth_t)(uintptr_t) thread_id;
}
/* Not part of the public interface at the moment, thus static. */
static int
npth_tryjoin_np (npth_t thread_id, void **thread_return)
{
int err;
npth_impl_t thread;
err = find_thread (thread_id, &thread);
if (err)
return err;
if (thread->detached)
return EINVAL;
/* No need to allow competing threads to enter when we can get the
lock immediately. */
err = wait_for_single_object (thread->handle, 0);
if (err == ETIMEDOUT)
err = EBUSY;
if (err)
return err;
if (thread_return)
*thread_return = thread->result;
deref_thread (thread_id);
return 0;
}
int
npth_join (npth_t thread_id, void **thread_return)
{
int err;
npth_impl_t thread;
/* No need to allow competing threads to enter when we can get the
lock immediately. */
err = npth_tryjoin_np (thread_id, thread_return);
if (err != EBUSY)
return err;
err = find_thread (thread_id, &thread);
if (err)
return err;
if (thread->detached)
return EINVAL;
ENTER();
err = wait_for_single_object (thread->handle, INFINITE);
LEAVE();
if (err)
return err;
if (thread_return)
*thread_return = thread->result;
deref_thread (thread_id);
return 0;
}
int
npth_detach (npth_t thread_id)
{
int err;
npth_impl_t thread;
err = find_thread (thread_id, &thread);
if (err)
return err;
if (thread->detached)
return EINVAL;
/* The detached flag indicates to other thread that the outside
reference in the global thread table has been consumed. */
thread->detached = 1;
deref_thread (thread_id);
return 0;
}
void
npth_exit (void *retval)
{
int err;
npth_t thread_id;
npth_impl_t thread;
thread_id = npth_self();
err = find_thread (thread_id, &thread);
if (err)
/* FIXME: log this? */
return;
thread->result = retval;
/* Any joiner will be signaled once we terminate. */
deref_thread (thread_id);
ENTER();
/* We can not use retval here, as that is a void*, not a DWORD. */
ExitThread(0);
/* Never reached. But just in case ExitThread does return... */
LEAVE();
}
int
npth_key_create (npth_key_t *key,
void (*destr_function) (void *))
{
DWORD idx;
if (destr_function)
return EOPNOTSUPP;
idx = TlsAlloc ();
if (idx == TLS_OUT_OF_INDEXES)
return map_error (GetLastError());
*key = idx;
return 0;
}
int
npth_key_delete (npth_key_t key)
{
BOOL res;
res = TlsFree (key);
if (res == 0)
return map_error (GetLastError());
return 0;
}
void *
npth_getspecific (npth_key_t key)
{
/* Pthread doesn't support error reporting beyond returning NULL for
an invalid key, which is also what TlsGetValue returns in that
case. */
return TlsGetValue (key);
}
int
npth_setspecific (npth_key_t key, const void *pointer)
{
BOOL res;
res = TlsSetValue (key, (void *) pointer);
if (res == 0)
return map_error (GetLastError());
return 0;
}
struct npth_mutexattr_s
{
int kind;
};
int
npth_mutexattr_init (npth_mutexattr_t *attr_r)
{
npth_mutexattr_t attr;
attr = malloc (sizeof *attr);
if (!attr)
return errno;
attr->kind = NPTH_MUTEX_DEFAULT;
*attr_r = attr;
return 0;
}
int
npth_mutexattr_destroy (npth_mutexattr_t *attr)
{
free (*attr);
*attr = NULL;
return 0;
}
int
npth_mutexattr_gettype (const npth_mutexattr_t *attr,
int *kind)
{
*kind = (*attr)->kind;
return 0;
}
int
npth_mutexattr_settype (npth_mutexattr_t *attr, int kind)
{
if (kind != NPTH_MUTEX_NORMAL && kind != NPTH_MUTEX_RECURSIVE
&& kind != NPTH_MUTEX_ERRORCHECK)
return EINVAL;
(*attr)->kind = kind;
return 0;
}
struct npth_mutex_s
{
/* We have to use a mutex, not a CRITICAL_SECTION, because the
latter can not be used with timed waits. */
HANDLE mutex;
};
int
npth_mutex_init (npth_mutex_t *mutex_r, const npth_mutexattr_t *mutex_attr)
{
npth_mutex_t mutex;
/* We can not check *mutex_r here, as it may contain random data. */
mutex = malloc (sizeof (*mutex));
if (!mutex)
return errno;
/* We ignore MUTEX_ATTR. */
mutex->mutex = CreateMutex (NULL, FALSE, NULL);
if (!mutex->mutex)
{
int err = map_error (GetLastError());
free (mutex);
return err;
}
*mutex_r = mutex;
return 0;
}
int
npth_mutex_destroy (npth_mutex_t *mutex)
{
BOOL res;
if (*mutex == 0 || *mutex == NPTH_MUTEX_INITIALIZER
|| *mutex == NPTH_RECURSIVE_MUTEX_INITIALIZER_NP)
return EINVAL;
res = CloseHandle ((*mutex)->mutex);
if (res == 0)
return map_error (GetLastError());
free (*mutex);
*mutex = NULL;
return 0;
}
/* Must be called with global lock held. */
static int
mutex_init_check (npth_mutex_t *mutex)
{
int err;
npth_mutexattr_t attr;
int kind;
if (*mutex == 0)
return EINVAL;
if ((*mutex) == NPTH_MUTEX_INITIALIZER)
kind = NPTH_MUTEX_NORMAL;
else if ((*mutex) == NPTH_RECURSIVE_MUTEX_INITIALIZER_NP)
kind = NPTH_MUTEX_RECURSIVE;
else if ((*mutex) == NPTH_ERRORCHECK_MUTEX_INITIALIZER_NP)
kind = NPTH_MUTEX_ERRORCHECK;
else
/* Already initialized. */
return 0;
/* Make sure we don't try again in case of error. */
*mutex = 0;
err = npth_mutexattr_init (&attr);
if (err)
return err;
err = npth_mutexattr_settype (&attr, kind);
if (err)
{
npth_mutexattr_destroy (&attr);
return err;
}
err = npth_mutex_init (mutex, &attr);
npth_mutexattr_destroy (&attr);
return err;
}
int
npth_mutex_lock (npth_mutex_t *mutex)
{
int err;
/* While we are protected, let's check for a static initializer. */
err = mutex_init_check (mutex);
if (err)
return err;
/* No need to allow competing threads to enter when we can get the
lock immediately. */
err = npth_mutex_trylock (mutex);
if (err != EBUSY)
return err;
ENTER();
err = wait_for_single_object ((*mutex)->mutex, INFINITE);
LEAVE();
if (err)
return err;
return 0;
}
int
npth_mutex_trylock (npth_mutex_t *mutex)
{
int err;
/* While we are protected, let's check for a static initializer. */
err = mutex_init_check (mutex);
if (err)
return err;
/* We do not leave the global lock for a quick try. */
err = wait_for_single_object ((*mutex)->mutex, 0);
if (err == ETIMEDOUT)
err = EBUSY;
if (err)
return err;
return 0;
}
int
npth_mutex_timedlock (npth_mutex_t *mutex, const struct timespec *abstime)
{
int err;
DWORD msecs;
/* While we are protected, let's check for a static initializer. */
err = mutex_init_check (mutex);
if (err)
return err;
/* No need to allow competing threads to enter when we can get the
lock immediately. */
err = npth_mutex_trylock (mutex);
if (err != EBUSY)
return err;
err = calculate_timeout (abstime, &msecs);
if (err)
return err;
ENTER();
err = wait_for_single_object ((*mutex)->mutex, msecs);
LEAVE();
if (err)
return err;
return 0;
}
int
npth_mutex_unlock (npth_mutex_t *mutex)
{
BOOL res;
if (*mutex == 0 || *mutex == NPTH_MUTEX_INITIALIZER
|| *mutex == NPTH_RECURSIVE_MUTEX_INITIALIZER_NP)
return EINVAL;
res = ReleaseMutex ((*mutex)->mutex);
if (res == 0)
return map_error (GetLastError());
return 0;
}
struct npth_cond_s
{
/* All conditions are protected by the global lock, so this is
simple. */
/* The waiter queue. */
npth_impl_t waiter;
};
int
npth_cond_init (npth_cond_t *cond_r,
const npth_condattr_t *cond_attr)
{
npth_cond_t cond;
if (cond_attr != NULL)
return EINVAL;
/* We can not check *cond_r here, as it may contain random data. */
cond = malloc (sizeof (*cond));
if (!cond)
return errno;
cond->waiter = NULL;
*cond_r = cond;
return 0;
}
int
npth_cond_destroy (npth_cond_t *cond)
{
if (*cond == 0)
return EINVAL;
if ((*cond)->waiter)
return EBUSY;
free (*cond);
*cond = NULL;
return 0;
}
/* Must be called with global lock held. */
static int
cond_init_check (npth_cond_t *cond)
{
int err;
if (*cond == 0 || *cond == NPTH_COND_INITIALIZER)
return EINVAL;
if (*cond != NPTH_COND_INITIALIZER)
/* Already initialized. */
return 0;
/* Make sure we don't try again in case of error. */
*cond = 0;
err = npth_cond_init (cond, NULL);
return err;
}
int
npth_cond_signal (npth_cond_t *cond)
{
int err;
npth_impl_t thread;
DWORD res;
/* While we are protected, let's check for a static initializer. */
err = cond_init_check (cond);
if (err)
return err;
if ((*cond)->waiter == INVALID_THREAD_ID)
return 0;
/* Dequeue the first thread and wake it up. */
thread = (*cond)->waiter;
dequeue_thread (thread);
res = SetEvent (thread->event);
if (res == 0)
/* FIXME: An error here implies a mistake in the npth code. Log it. */
;
/* Force the woken up thread into the mutex lock function (for the
mutex associated with the condition, which is why we have to
release the global lock here). This helps to ensure fairness,
because otherwise our own thread might release and reacquire the
lock first (followed by removing the condition that lead to the
wakeup) and starve the woken up thread. */
ENTER ();
Sleep (0);
LEAVE ();
return 0;
}
int
npth_cond_broadcast (npth_cond_t *cond)
{
int err;
npth_impl_t thread;
DWORD res;
/* While we are protected, let's check for a static initializer. */
err = cond_init_check (cond);
if (err)
return err;
if ((*cond)->waiter == INVALID_THREAD_ID)
return 0;
while ((*cond)->waiter)
{
/* Dequeue the first thread and wake it up. */
thread = (*cond)->waiter;
dequeue_thread (thread);
res = SetEvent (thread->event);
if (res == 0)
/* FIXME: An error here implies a mistake in the npth code. Log it. */
;
}
/* Force the woken up threads into the mutex lock function (for the
mutex associated with the condition, which is why we have to
release the global lock here). This helps to ensure fairness,
because otherwise our own thread might release and reacquire the
lock first (followed by removing the condition that lead to the
wakeup) and starve the woken up threads. */
ENTER ();
Sleep (0);
LEAVE ();
return 0;
}
/* As a special exception in W32 NPTH, mutex can be NULL, in which
case the global lock doubles as the mutex protecting the condition.
This is used internally in the RW implementation as an
optimization. Note that this is safe as long as the caller does
not yield to other threads (directly or indirectly) between
checking the condition and waiting on it. */
int
npth_cond_wait (npth_cond_t *cond, npth_mutex_t *mutex)
{
int err;
int err2;
BOOL bres;
npth_impl_t thread;
npth_impl_t *prev_ptr;
/* While we are protected, let's check for a static initializer. */
err = cond_init_check (cond);
if (err)
return err;
err = find_thread (npth_self(), &thread);
if (err)
return err;
/* Ensure there is an event. */
if (thread->event == INVALID_HANDLE_VALUE)
{
thread->event = CreateEvent (NULL, TRUE, FALSE, NULL);
if (thread->event == INVALID_HANDLE_VALUE)
return map_error (GetLastError());
}
/* Find end of queue and enqueue the thread. */
prev_ptr = &(*cond)->waiter;
while (*prev_ptr)
prev_ptr = &(*prev_ptr)->next;
enqueue_thread (thread, prev_ptr);
/* Make sure the event is not signaled before releasing the mutex. */
bres = ResetEvent (thread->event);
if (bres == 0)
/* Log an error. */
;
if (mutex)
{
err = npth_mutex_unlock (mutex);
if (err)
{
dequeue_thread (thread);
return err;
}
}
ENTER();
err = wait_for_single_object (thread->event, INFINITE);
LEAVE();
/* Make sure the thread is dequeued (in case of error). */
dequeue_thread (thread);
if (mutex)
{
err2 = npth_mutex_lock (mutex);
if (err2)
/* FIXME: Log this at least. */
;
}
if (err)
return err;
return 0;
}
int
npth_cond_timedwait (npth_cond_t *cond, npth_mutex_t *mutex,
const struct timespec *abstime)
{
int err;
int err2;
BOOL bres;
npth_impl_t thread;
npth_impl_t *prev_ptr;
DWORD msecs;
err = calculate_timeout (abstime, &msecs);
if (err)
{
if (err != ETIMEDOUT)
return err;
/* We have to give up the lock anyway to give others a chance to
signal or broadcast. */
err = npth_mutex_unlock (mutex);
if (err)
return err;
ENTER();
Sleep (0);
LEAVE();
err = npth_mutex_lock (mutex);
if (err)
return (err);
return ETIMEDOUT;
}
/* While we are protected, let's check for a static initializer. */
err = cond_init_check (cond);
if (err)
return err;
err = find_thread (npth_self(), &thread);
if (err)
return err;
/* Ensure there is an event. */
if (thread->event == INVALID_HANDLE_VALUE)
{
thread->event = CreateEvent (NULL, TRUE, FALSE, NULL);
if (thread->event == INVALID_HANDLE_VALUE)
return map_error (GetLastError());
}
/* Make sure the event is not signaled. */
bres = ResetEvent (thread->event);
if (bres == 0)
/* Log an error. */
;
/* Find end of queue and enqueue the thread. */
prev_ptr = &(*cond)->waiter;
while (*prev_ptr)
prev_ptr = &(*prev_ptr)->next;
enqueue_thread (thread, prev_ptr);
err = npth_mutex_unlock (mutex);
if (err)
{
dequeue_thread (thread);
return err;
}
ENTER();
err = wait_for_single_object (thread->event, msecs);
LEAVE();
err2 = npth_mutex_lock (mutex);
if (err2)
/* FIXME: Log this at least. */
;
if (err)
return err;
return 0;
}
struct npth_rwlockattr_s
{
int kind;
};
int
npth_rwlockattr_init (npth_rwlockattr_t *attr_r)
{
npth_rwlockattr_t attr;
attr = malloc (sizeof *attr);
if (!attr)
return errno;
attr->kind = NPTH_RWLOCK_DEFAULT_NP;
*attr_r = attr;
return 0;
}
int
npth_rwlockattr_destroy (npth_rwlockattr_t *attr)
{
free (*attr);
*attr = NULL;
return 0;
}
int
npth_rwlockattr_gettype_np (const npth_rwlockattr_t *attr,
int *kind)
{
*kind = (*attr)->kind;
return 0;
}
int
npth_rwlockattr_settype_np (npth_rwlockattr_t *attr, int kind)
{
if (kind != NPTH_RWLOCK_PREFER_READER_NP
&& kind != NPTH_RWLOCK_PREFER_WRITER_NP
&& kind != NPTH_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)
return EINVAL;
(*attr)->kind = kind;
return 0;
}
struct npth_rwlock_s
{
/* Objects are protected by the global lock, so no lock here
necessary. This is even true for the condition (by specifying
NULL as the mutex in npth_cond_wait and npth_cond_timedwait). */
/* True if we prefer writers over readers. */
int prefer_writer;
/* Readers who want the lock wait on this condition, which is
broadcast when the last writer goes away. */
npth_cond_t reader_wait;
/* The number of readers waiting on the condition. */
int nr_readers_queued;
/* The number of current readers. */
int nr_readers;
/* Writers who want the lock wait on this condition, which is
signaled when the current writer or last reader goes away. */
npth_cond_t writer_wait;
/* The number of queued writers. */
int nr_writers_queued;
/* The number of current writers. This is either 1 (then nr_readers
is 0) or it is 0. At unlock time this value tells us if the
current lock holder is a writer or a reader. */
int nr_writers;
};
int
npth_rwlock_init (npth_rwlock_t *rwlock_r,
const npth_rwlockattr_t *user_attr)
{
int err;
npth_rwlock_t rwlock;
/* We can not check *rwlock_r here, as it may contain random data. */
rwlock = malloc (sizeof (*rwlock));
if (!rwlock)
{
err = errno;
goto err_out;
}
rwlock->prefer_writer = (user_attr && (*user_attr)->kind == NPTH_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
err = npth_cond_init (&rwlock->reader_wait, NULL);
if (err)
{
free (rwlock);
goto err_out;
}
err = npth_cond_init (&rwlock->writer_wait, NULL);
if (err)
{
npth_cond_destroy (&rwlock->reader_wait);
free (rwlock);
goto err_out;
}
rwlock->nr_readers = 0;
rwlock->nr_readers_queued = 0;
rwlock->nr_writers = 0;
rwlock->nr_writers_queued = 0;
*rwlock_r = rwlock;
err_out:
return err;
}
#if 0 /* Not used. */
/* Must be called with global lock held. */
static int
rwlock_init_check (npth_rwlock_t *rwlock)
{
int err;
npth_rwlockattr_t attr;
int kind;
if (*rwlock == 0)
return EINVAL;
if ((*rwlock) == NPTH_RWLOCK_INITIALIZER)
kind = NPTH_RWLOCK_PREFER_READER_NP;
if ((*rwlock) == NPTH_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP)
kind = NPTH_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP;
else
/* Already initialized. */
return 0;
/* Make sure we don't try again in case of error. */
*rwlock = 0;
err = npth_rwlockattr_init (&attr);
if (err)
return err;
err = npth_rwlockattr_settype_np (&attr, kind);
if (err)
{
npth_rwlockattr_destroy (&attr);
return err;
}
err = npth_rwlock_init (rwlock, &attr);
npth_rwlockattr_destroy (&attr);
return err;
}
#endif
int
npth_rwlock_destroy (npth_rwlock_t *rwlock)
{
int err;
if (*rwlock == 0 || *rwlock == NPTH_RWLOCK_INITIALIZER)
return EINVAL;
if ((*rwlock)->nr_writers || (*rwlock)->nr_readers || (*rwlock)->nr_writers_queued
|| (*rwlock)->nr_readers_queued)
return EBUSY;
err = npth_cond_destroy (&(*rwlock)->reader_wait);
if (err)
/* FIXME: Log this. */
;
err = npth_cond_destroy (&(*rwlock)->writer_wait);
if (err)
/* FIXME: Log this. */
;
free (rwlock);
*rwlock = NULL;
return 0;
}
int
npth_rwlock_tryrdlock (npth_rwlock_t *rwlock)
{
if ((*rwlock)->nr_writers)
return EBUSY;
if ((*rwlock)->nr_writers_queued && (*rwlock)->prefer_writer)
return EBUSY;
(*rwlock)->nr_readers++;
return 0;
}
int
npth_rwlock_rdlock (npth_rwlock_t *rwlock)
{
int err;
while (1)
{
/* Quick check. */
err = npth_rwlock_tryrdlock (rwlock);
if (err != EBUSY)
return err;
(*rwlock)->nr_readers_queued++;
err = npth_cond_wait (&(*rwlock)->reader_wait, NULL);
(*rwlock)->nr_readers_queued--;
if (err)
return err;
}
}
int
npth_rwlock_timedrdlock (npth_rwlock_t *rwlock,
const struct timespec *abstime)
{
int err;
while (1)
{
/* Quick check. */
err = npth_rwlock_tryrdlock (rwlock);
if (err != EBUSY)
return err;
(*rwlock)->nr_readers_queued++;
err = npth_cond_timedwait (&(*rwlock)->reader_wait, NULL, abstime);
(*rwlock)->nr_readers_queued--;
if (err)
return err;
}
}
int
npth_rwlock_trywrlock (npth_rwlock_t *rwlock)
{
if ((*rwlock)->nr_writers)
return EBUSY;
if ((*rwlock)->nr_readers)
return EBUSY;
(*rwlock)->nr_writers = 1;
return 0;
}
int
npth_rwlock_wrlock (npth_rwlock_t *rwlock)
{
int err;
while (1)
{
/* Quick check. */
err = npth_rwlock_trywrlock (rwlock);
if (err != EBUSY)
return err;
(*rwlock)->nr_writers_queued++;
err = npth_cond_wait (&(*rwlock)->writer_wait, NULL);
(*rwlock)->nr_writers_queued--;
if (err)
return err;
}
}
int
npth_rwlock_timedwrlock (npth_rwlock_t *rwlock,
const struct timespec *abstime)
{
int err;
while (1)
{
/* Quick check. */
err = npth_rwlock_trywrlock (rwlock);
if (err != EBUSY)
return err;
(*rwlock)->nr_writers_queued++;
err = npth_cond_timedwait (&(*rwlock)->writer_wait, NULL, abstime);
(*rwlock)->nr_writers_queued--;
if (err)
return err;
}
}
int
npth_rwlock_unlock (npth_rwlock_t *rwlock)
{
int err;
if ((*rwlock)->nr_writers)
/* We are the writer. */
(*rwlock)->nr_writers = 0;
else
/* We are the reader. */
(*rwlock)->nr_readers--;
if ((*rwlock)->nr_readers == 0)
{
if ((*rwlock)->nr_writers_queued)
{
err = npth_cond_signal (&(*rwlock)->writer_wait);
if (err)
return err;
}
else if ((*rwlock)->nr_readers_queued)
{
err = npth_cond_broadcast (&(*rwlock)->reader_wait);
return err;
}
}
return 0;
}
/* Standard POSIX Replacement API */
int
npth_usleep(unsigned int usec)
{
ENTER();
Sleep((usec + 999) / 1000);
LEAVE();
return 0;
}
unsigned int
npth_sleep(unsigned int sec)
{
ENTER();
Sleep (sec * 1000);
LEAVE();
return 0;
}
int
npth_system(const char *cmd)
{
int res;
ENTER();
res = system(cmd);
LEAVE();
return res;
}
pid_t
npth_waitpid(pid_t pid, int *status, int options)
{
return EOPNOTSUPP;
}
int
npth_connect(int s, const struct sockaddr *addr, socklen_t addrlen)
{
int res;
ENTER();
res = connect(s, addr, addrlen);
LEAVE();
return res;
}
int
npth_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
{
int res;
ENTER();
res = accept(s, addr, addrlen);
LEAVE();
return res;
}
int
npth_select(int nfd, fd_set *rfds, fd_set *wfds, fd_set *efds,
struct timeval *timeout)
{
int res;
ENTER();
res = select(nfd, rfds, wfds, efds, timeout);
LEAVE();
return res;
}
ssize_t
npth_read(int fd, void *buf, size_t nbytes)
{
ssize_t res;
ENTER();
res = read(fd, buf, nbytes);
LEAVE();
return res;
}
ssize_t
npth_write(int fd, const void *buf, size_t nbytes)
{
ssize_t res;
ENTER();
res = write(fd, buf, nbytes);
LEAVE();
return res;
}
int
npth_recvmsg (int fd, struct msghdr *msg, int flags)
{
return EOPNOTSUPP;
}
int
npth_sendmsg (int fd, const struct msghdr *msg, int flags)
{
return EOPNOTSUPP;
}
void
npth_unprotect (void)
{
/* If we are not initialized we may not access the semaphore and
* thus we shortcut it. Note that in this case the unprotect/protect
* is not needed. For failsafe reasons if an nPth thread has ever
* been created but nPth has accidentally not initialized we do not
* shortcut so that a stack backtrace (due to the access of the
* uninitialized semaphore) is more expressive. */
if (initialized_or_any_threads)
ENTER();
}
void
npth_protect (void)
{
/* See npth_unprotect for commentary. */
if (initialized_or_any_threads)
LEAVE();
}
int
npth_is_protected (void)
{
return got_sceptre;
}
/* Maximum number of extra handles. We can only support 31 as that is
the number of bits we can return. This is smaller than the maximum
number of allowed wait objects for WFMO (which is 64). */
#define MAX_EVENTS 31
/* Although the WSAEventSelect machinery seems to have no limit on the
number of selectable fds, we impose the same limit as used by
traditional select. This allows us to work with a static data
structure instead of an dynamically allocated array. */
#define MAX_FDOBJS FD_SETSIZE
/* Using WFMO even for sockets makes Windows objects more composable,
which helps faking signals and other constructs, so we support
that. You can still use npth_select for the plain select
function. */
int
npth_eselect(int nfd, fd_set *rfds, fd_set *wfds, fd_set *efds,
const struct timespec *timeout,
HANDLE *events, unsigned int *events_set)
{
int err = 0;
DWORD msecs;
int i;
u_int idx;
/* One more for the handle associated with socket events. */
HANDLE obj[MAX_EVENTS + 1];
int nr_obj = 0;
/* Number of extra events. */
int nr_events = 0;
HANDLE sock_event = INVALID_HANDLE_VALUE;
int res;
DWORD ret;
SOCKET fd;
long flags;
int cnt;
struct {
SOCKET fd;
long flags;
} fdobj[MAX_FDOBJS];
int nr_fdobj = 0;
(void)nfd; /* No need for it under Windows. */
if (events)
{
if (!events_set)
{
errno = EINVAL;
return -1;
}
/* We always ensure that the events_set is valid, even after an
error. */
*events_set = 0;
}
if (timeout && (timeout->tv_sec < 0 || timeout->tv_nsec < 0))
{
errno = EINVAL;
return -1;
}
if (timeout == NULL)
msecs = INFINITE;
else if (timeout->tv_sec == 0 && timeout->tv_nsec == 0)
msecs = 0;
else
{
msecs = (timeout->tv_sec * 1000) + (timeout->tv_nsec + 999999) / 1000000;
if (msecs < 1)
msecs = 1;
}
if (events)
{
/* Copy the extra handles. */
for (i = 0; i < MAX_EVENTS; i++)
{
if (events[i] == INVALID_HANDLE_VALUE)
break;
obj[nr_obj] = events[i];
nr_obj++;
nr_events++;
}
/* We can only return the status of up to MAX_EVENTS handles in
EVENTS_SET. */
if (events[i] != INVALID_HANDLE_VALUE)
{
errno = EINVAL;
return -1;
}
}
/* From here on, we clean up at err_out, and you can set ERR to
return an error. */
sock_event = WSACreateEvent ();
if (sock_event == INVALID_HANDLE_VALUE)
{
err = EINVAL;
return -1;
}
obj[nr_obj] = sock_event;
nr_obj++;
/* Combine FDs from all lists. */
#define SET_FDOBJ(x,v) do { \
for (idx=0; idx < (x)->fd_count; idx++) \
{ \
for (i=0; i < nr_fdobj; i++) \
if (fdobj[i].fd == (x)->fd_array[idx]) \
break; \
if (i < nr_fdobj) \
; \
else if (nr_fdobj < MAX_FDOBJS) \
{ \
i = nr_fdobj++; \
fdobj[i].fd = (x)->fd_array[idx]; \
fdobj[i].flags = 0; \
} \
else \
{ \
err = EINVAL; \
goto err_out; \
} \
fdobj[i].flags |= (v); \
} \
} while (0)
if (rfds)
SET_FDOBJ (rfds, FD_READ | FD_ACCEPT);
if (wfds)
SET_FDOBJ (wfds, FD_WRITE);
if (efds)
SET_FDOBJ (efds, FD_OOB | FD_CLOSE);
#undef SET_FDOBJ
/* Set the select flags. */
for (i = 0; i < nr_fdobj; i++)
{
res = WSAEventSelect (fdobj[i].fd, sock_event, fdobj[i].flags);
if (res == SOCKET_ERROR)
{
err = map_error (WSAGetLastError());
goto err_out;
}
}
/* Let's wait. */
ENTER();
ret = WaitForMultipleObjects (nr_obj, obj, FALSE, msecs);
LEAVE();
if (ret == WAIT_TIMEOUT)
{
err = ETIMEDOUT;
goto err_out;
}
else if (ret == WAIT_FAILED)
{
err = map_error (GetLastError());
goto err_out;
}
/* All other return values: We look at the objects. We must not
fail from here, because then we could lose events. */
/* Keep track of result count. */
cnt = 0;
for (i = 0; i < nr_events; i++)
{
ret = WaitForSingleObject (obj[i], 0);
if (ret != WAIT_OBJECT_0)
/* We ignore errors here. */
continue;
*events_set = (*events_set) | (1 << i);
/* We consume the event here. This may be undesirable, but
unless we make it configurable we need a common policy,
and this saves the user one step. */
ResetEvent (obj[i]);
/* Increase result count. */
cnt++;
}
/* Now update the file descriptors sets. */
if (rfds)
FD_ZERO (rfds);
if (wfds)
FD_ZERO (wfds);
if (efds)
FD_ZERO (efds);
for (i = 0; i < nr_fdobj; i++)
{
WSANETWORKEVENTS ne;
fd = fdobj[i].fd;
flags = fdobj[i].flags;
res = WSAEnumNetworkEvents (fd, NULL, &ne);
if (res == SOCKET_ERROR)
continue; /* FIXME: We ignore this error here. */
/* NB that the test on FLAGS guarantees that ?fds is not NULL. */
if ((flags & FD_READ) && (ne.lNetworkEvents & (FD_READ | FD_ACCEPT)))
{
FD_SET (fd, rfds);
cnt++;
}
if ((flags & FD_WRITE) && (ne.lNetworkEvents & FD_WRITE))
{
FD_SET (fd, wfds);
cnt++;
}
if ((flags & FD_CLOSE) && (ne.lNetworkEvents & (FD_OOB | FD_CLOSE)))
{
FD_SET (fd, efds);
cnt++;
}
WSAEventSelect (fd, NULL, 0); /* We ignore errors. */
}
/* We ignore errors. */
WSACloseEvent (sock_event);
return cnt;
/* Cleanup. */
err_out:
if (sock_event != INVALID_HANDLE_VALUE)
{
for (i = 0; i < nr_fdobj; i++)
{
WSAEventSelect (fdobj[i].fd, NULL, 0); /* We ignore errors. */
}
WSACloseEvent (sock_event); /* We ignore errors. */
}
if (err == ETIMEDOUT)
return 0;
errno = err;
return -1;
}
diff --git a/w32/npth.h b/w32/npth.h
index b344d73..bf8b5c8 100644
--- a/w32/npth.h
+++ b/w32/npth.h
@@ -1,228 +1,228 @@
/* npth.h - a lightweight implementation of pth over native threads
* Copyright (C) 2011, 2015 g10 Code GmbH
*
* This file is part of nPth.
*
* nPth is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* nPth is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see .
*/
-#ifndef NPTH_H
-#define NPTH_H
+#ifndef _NPTH_H
+#define _NPTH_H
#include
#include
#include
#include
#include
#include
#ifdef __cplusplus
extern "C" {
#if 0 /* (Keep Emacsens' auto-indent happy.) */
}
#endif
#endif
struct msghdr;
#ifndef ETIMEDOUT
#define ETIMEDOUT 10060 /* This is WSAETIMEDOUT. */
#endif
#ifndef EOPNOTSUPP
#define EOPNOTSUPP 10045 /* This is WSAEOPNOTSUPP. */
#endif
int npth_init (void);
typedef struct npth_attr_s *npth_attr_t;
typedef unsigned long int npth_t;
typedef struct npth_mutexattr_s *npth_mutexattr_t;
typedef struct npth_mutex_s *npth_mutex_t;
typedef struct npth_rwlockattr_s *npth_rwlockattr_t;
typedef struct npth_rwlock_s *npth_rwlock_t;
typedef struct npth_condattr_s *npth_condattr_t;
typedef struct npth_cond_s *npth_cond_t;
int npth_attr_init (npth_attr_t *attr);
int npth_attr_destroy (npth_attr_t *attr);
#define NPTH_CREATE_JOINABLE 0
#define NPTH_CREATE_DETACHED 1
int npth_attr_getdetachstate(npth_attr_t *attr, int *detachstate);
int npth_attr_setdetachstate(npth_attr_t *attr, int detachstate);
int npth_getname_np (npth_t target_thread, char *buf, size_t buflen);
int npth_setname_np (npth_t target_thread, const char *name);
int npth_create (npth_t *newthread, const npth_attr_t *attr,
void *(*start_routine) (void *), void *arg);
npth_t npth_self (void);
int npth_join (npth_t th, void **thread_return);
int npth_detach (npth_t th);
void npth_exit (void *retval);
typedef DWORD npth_key_t;
int npth_key_create (npth_key_t *key,
void (*destr_function) (void *));
int npth_key_delete (npth_key_t key);
void *npth_getspecific (npth_key_t key);
int npth_setspecific (npth_key_t key, const void *pointer);
int npth_mutexattr_init (npth_mutexattr_t *attr);
int npth_mutexattr_destroy (npth_mutexattr_t *attr);
int npth_mutexattr_gettype (const npth_mutexattr_t *attr,
int *kind);
int npth_mutexattr_settype (npth_mutexattr_t *attr, int kind);
#define NPTH_MUTEX_NORMAL 0
#define NPTH_MUTEX_RECURSIVE 1
#define NPTH_MUTEX_ERRORCHECK 2
#define NPTH_MUTEX_DEFAULT NPTH_MUTEX_NORMAL
#define NPTH_MUTEX_INITIALIZER ((npth_mutex_t) -1)
#define NPTH_RECURSIVE_MUTEX_INITIALIZER_NP ((npth_mutex_t) -2)
#define NPTH_ERRORCHECK_MUTEX_INITIALIZER_NP ((npth_mutex_t) -3)
int npth_mutex_init (npth_mutex_t *mutex, const npth_mutexattr_t *mutexattr);
int npth_mutex_destroy (npth_mutex_t *mutex);
int npth_mutex_trylock(npth_mutex_t *mutex);
int npth_mutex_lock(npth_mutex_t *mutex);
int npth_mutex_timedlock(npth_mutex_t *mutex, const struct timespec *abstime);
int npth_mutex_unlock(npth_mutex_t *mutex);
int npth_rwlockattr_init (npth_rwlockattr_t *attr);
int npth_rwlockattr_destroy (npth_rwlockattr_t *attr);
int npth_rwlockattr_gettype_np (const npth_rwlockattr_t *attr,
int *kind);
int npth_rwlockattr_settype_np (npth_rwlockattr_t *attr, int kind);
#define NPTH_RWLOCK_PREFER_READER_NP 0
#define NPTH_RWLOCK_PREFER_WRITER_NP 1
#define NPTH_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP 2
#define NPTH_RWLOCK_DEFAULT_NP NPTH_RWLOCK_PREFER_READER_NP
#define NPTH_RWLOCK_INITIALIZER ((npth_rwlock_t) -1)
#define NPTH_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP ((npth_rwlock_t) -2)
/* For now, we don't support any rwlock attributes. */
int npth_rwlock_init (npth_rwlock_t *rwlock,
const npth_rwlockattr_t *attr);
int npth_rwlock_destroy (npth_rwlock_t *rwlock);
int npth_rwlock_tryrdlock (npth_rwlock_t *rwlock);
int npth_rwlock_rdlock (npth_rwlock_t *rwlock);
int npth_rwlock_timedrdlock (npth_rwlock_t *rwlock,
const struct timespec *abstime);
int npth_rwlock_trywrlock (npth_rwlock_t *rwlock);
int npth_rwlock_wrlock (npth_rwlock_t *rwlock);
int npth_rwlock_timedwrlock (npth_rwlock_t *rwlock,
const struct timespec *abstime);
int npth_rwlock_unlock (npth_rwlock_t *rwlock);
#define NPTH_COND_INITIALIZER ((npth_cond_t) -1)
/* For now, we don't support any cond attributes. */
int npth_cond_init (npth_cond_t *cond,
const npth_condattr_t *cond_attr);
int npth_cond_broadcast (npth_cond_t *cond);
int npth_cond_signal (npth_cond_t *cond);
int npth_cond_destroy (npth_cond_t *cond);
int npth_cond_wait (npth_cond_t *cond, npth_mutex_t *mutex);
int npth_cond_timedwait (npth_cond_t *cond, npth_mutex_t *mutex,
const struct timespec *abstime);
int npth_usleep(unsigned int usec);
unsigned int npth_sleep(unsigned int sec);
pid_t npth_waitpid(pid_t pid, int *status, int options);
int npth_system(const char *cmd);
#if 0
/* We do not support this on windows. */
int npth_sigmask(int how, const sigset_t *set, sigset_t *oldset);
int npth_sigwait(const sigset_t *set, int *sig);
#endif
int npth_connect(int s, const struct sockaddr *addr, socklen_t addrlen);
int npth_accept(int s, struct sockaddr *addr, socklen_t *addrlen);
/* Only good for sockets! */
int npth_select(int nfd, fd_set *rfds, fd_set *wfds, fd_set *efds,
struct timeval *timeout);
#if 0
/* We do not support this on windows. */
int npth_pselect(int nfd, fd_set *rfds, fd_set *wfds, fd_set *efds,
const struct timespec *timeout, const sigset_t *sigmask);
#endif
/* Wait on the FDs (only good for sockets!) and the
INVALID_HANDLE_VALUE terminated list of extra events. On return
(even on error), the bits in EVENTS_SET will contain the extra
events that occured (which means that there can only be up to 31
extra events). */
int npth_eselect(int nfd, fd_set *rfds, fd_set *wfds, fd_set *efds,
const struct timespec *timeout,
HANDLE *events, unsigned int *events_set);
ssize_t npth_read(int fd, void *buf, size_t nbytes);
ssize_t npth_write(int fd, const void *buf, size_t nbytes);
int npth_recvmsg (int fd, struct msghdr *msg, int flags);
int npth_sendmsg (int fd, const struct msghdr *msg, int flags);
void npth_unprotect (void);
void npth_protect (void);
/* Return true when we hold the sceptre. This is used to debug
* problems with npth_unprotect and npth_protect. */
int npth_is_protected (void);
int npth_clock_gettime(struct timespec *tp);
/* CMP may be ==, < or >. Do not use <= or >=. */
#define npth_timercmp(t1, t2, cmp) \
(((t1)->tv_sec == (t2)->tv_sec) ? \
((t1)->tv_nsec cmp (t2)->tv_nsec) : \
((t1)->tv_sec cmp (t2)->tv_sec))
#define npth_timeradd(t1, t2, result) \
do { \
(result)->tv_sec = (t1)->tv_sec + (t2)->tv_sec; \
(result)->tv_nsec = (t1)->tv_nsec + (t2)->tv_nsec; \
if ((result)->tv_nsec >= 1000000000) \
{ \
++(result)->tv_sec; \
(result)->tv_nsec -= 1000000000; \
} \
} while (0)
#define npth_timersub(t1, t2, result) \
do { \
(result)->tv_sec = (t1)->tv_sec - (t2)->tv_sec; \
(result)->tv_nsec = (t1)->tv_nsec - (t2)->tv_nsec; \
if ((result)->tv_nsec < 0) { \
--(result)->tv_sec; \
(result)->tv_nsec += 1000000000; \
} \
} while (0)
#if 0
/* We do not support this on windows. */
void npth_sigev_init (void);
void npth_sigev_add (int signum);
void npth_sigev_fini (void);
sigset_t *npth_sigev_sigmask (void);
int npth_sigev_get_pending (int *r_signum);
#endif
#if 0 /* (Keep Emacsens' auto-indent happy.) */
{
#endif
#ifdef __cplusplus
}
#endif
-#endif /*NPTH_H*/
+#endif /*_NPTH_H*/