4739 lines
174 KiB
Diff
4739 lines
174 KiB
Diff
diff --git a/configure b/configure
|
|
index 74c72003827..fe26bedfce6 100755
|
|
--- a/configure
|
|
+++ b/configure
|
|
@@ -7500,6 +7500,7 @@ for ac_header in \
|
|
linux/serial.h \
|
|
linux/types.h \
|
|
linux/ucdrom.h \
|
|
+ linux/winesync.h \
|
|
lwp.h \
|
|
mach-o/loader.h \
|
|
mach/mach.h \
|
|
diff --git a/configure.ac b/configure.ac
|
|
index dc43d73858f..9006299fee5 100644
|
|
--- a/configure.ac
|
|
+++ b/configure.ac
|
|
@@ -480,6 +480,7 @@ AC_CHECK_HEADERS(\
|
|
linux/serial.h \
|
|
linux/types.h \
|
|
linux/ucdrom.h \
|
|
+ linux/winesync.h \
|
|
lwp.h \
|
|
mach-o/loader.h \
|
|
mach/mach.h \
|
|
diff --git a/dlls/kernel32/tests/sync.c b/dlls/kernel32/tests/sync.c
|
|
index 827f4c53aa0..36ed25b4db3 100644
|
|
--- a/dlls/kernel32/tests/sync.c
|
|
+++ b/dlls/kernel32/tests/sync.c
|
|
@@ -2778,6 +2778,84 @@ static void test_QueueUserAPC(void)
|
|
ok(apc_count == 1, "APC count %u\n", apc_count);
|
|
}
|
|
|
|
+static int zigzag_state, zigzag_count[2], zigzag_stop;
|
|
+
|
|
+static DWORD CALLBACK zigzag_event0(void *arg)
|
|
+{
|
|
+ HANDLE *events = arg;
|
|
+
|
|
+ while (!zigzag_stop)
|
|
+ {
|
|
+ WaitForSingleObject(events[0], INFINITE);
|
|
+ ResetEvent(events[0]);
|
|
+ ok(zigzag_state == 0, "got wrong state %d\n", zigzag_state);
|
|
+ zigzag_state++;
|
|
+ SetEvent(events[1]);
|
|
+ zigzag_count[0]++;
|
|
+ }
|
|
+ trace("thread 0 got done\n");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static DWORD CALLBACK zigzag_event1(void *arg)
|
|
+{
|
|
+ HANDLE *events = arg;
|
|
+
|
|
+ while (!zigzag_stop)
|
|
+ {
|
|
+ WaitForSingleObject(events[1], INFINITE);
|
|
+ ResetEvent(events[1]);
|
|
+ ok(zigzag_state == 1, "got wrong state %d\n", zigzag_state);
|
|
+ zigzag_state--;
|
|
+ SetEvent(events[0]);
|
|
+ zigzag_count[1]++;
|
|
+ }
|
|
+ trace("thread 1 got done\n");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void test_zigzag_event(void)
|
|
+{
|
|
+ /* The basic idea is to test SetEvent/Wait back and forth between two
|
|
+ * threads. Each thread clears their own event, sets some common data,
|
|
+ * signals the other's, then waits on their own. We make sure the common
|
|
+ * data is always in the right state. We also print performance data. */
|
|
+
|
|
+ HANDLE threads[2], events[2];
|
|
+ BOOL ret;
|
|
+
|
|
+ events[0] = CreateEventA(NULL, FALSE, FALSE, NULL);
|
|
+ events[1] = CreateEventA(NULL, FALSE, FALSE, NULL);
|
|
+
|
|
+ threads[0] = CreateThread(NULL, 0, zigzag_event0, events, 0, NULL);
|
|
+ threads[1] = CreateThread(NULL, 0, zigzag_event1, events, 0, NULL);
|
|
+
|
|
+ zigzag_state = 0;
|
|
+ zigzag_count[0] = zigzag_count[1] = 0;
|
|
+ zigzag_stop = 0;
|
|
+
|
|
+ trace("starting zigzag test (events)\n");
|
|
+ SetEvent(events[0]);
|
|
+ Sleep(2000);
|
|
+ zigzag_stop = 1;
|
|
+ ret = WaitForMultipleObjects(2, threads, FALSE, INFINITE);
|
|
+ trace("%d\n", ret);
|
|
+ ok(ret == 0 || ret == 1, "wait failed: %u\n", ret);
|
|
+
|
|
+ ok(zigzag_count[0] == zigzag_count[1] || zigzag_count[0] == zigzag_count[1] + 1,
|
|
+ "count did not match: %d != %d\n", zigzag_count[0], zigzag_count[1]);
|
|
+
|
|
+ /* signal the other thread to finish, if it didn't already
|
|
+ * (in theory they both would at the same time, but there's a slight race on teardown if we get
|
|
+ * thread 1 SetEvent -> thread 0 ResetEvent -> thread 0 Wait -> thread 1 exits */
|
|
+ zigzag_state = 1-ret;
|
|
+ SetEvent(events[1-ret]);
|
|
+ ret = WaitForSingleObject(threads[1-ret], 1000);
|
|
+ ok(!ret, "wait failed: %u\n", ret);
|
|
+
|
|
+ trace("count: %d\n", zigzag_count[0]);
|
|
+}
|
|
+
|
|
START_TEST(sync)
|
|
{
|
|
char **argv;
|
|
@@ -2843,5 +2921,6 @@ START_TEST(sync)
|
|
test_srwlock_example();
|
|
test_alertable_wait();
|
|
test_apc_deadlock();
|
|
+ test_zigzag_event();
|
|
test_crit_section();
|
|
}
|
|
diff --git a/dlls/ntdll/unix/file.c b/dlls/ntdll/unix/file.c
|
|
index 21f5d10f3f8..a7b1ff15376 100644
|
|
--- a/dlls/ntdll/unix/file.c
|
|
+++ b/dlls/ntdll/unix/file.c
|
|
@@ -4690,7 +4690,7 @@ static async_data_t server_async( HANDLE handle, struct async_fileio *user, HAND
|
|
|
|
static NTSTATUS wait_async( HANDLE handle, BOOL alertable )
|
|
{
|
|
- return NtWaitForSingleObject( handle, alertable, NULL );
|
|
+ return server_wait_for_object( handle, alertable, NULL );
|
|
}
|
|
|
|
/* callback for irp async I/O completion */
|
|
@@ -6021,7 +6021,7 @@ NTSTATUS WINAPI NtLockFile( HANDLE file, HANDLE event, PIO_APC_ROUTINE apc, void
|
|
}
|
|
if (handle)
|
|
{
|
|
- NtWaitForSingleObject( handle, FALSE, NULL );
|
|
+ server_wait_for_object( handle, FALSE, NULL );
|
|
NtClose( handle );
|
|
}
|
|
else /* Unix lock conflict, sleep a bit and retry */
|
|
diff --git a/dlls/ntdll/unix/process.c b/dlls/ntdll/unix/process.c
|
|
index c1ff0289cf2..153350be927 100644
|
|
--- a/dlls/ntdll/unix/process.c
|
|
+++ b/dlls/ntdll/unix/process.c
|
|
@@ -794,7 +794,7 @@ NTSTATUS WINAPI NtCreateUserProcess( HANDLE *process_handle_ptr, HANDLE *thread_
|
|
|
|
/* wait for the new process info to be ready */
|
|
|
|
- NtWaitForSingleObject( process_info, FALSE, NULL );
|
|
+ server_wait_for_object( process_info, FALSE, NULL );
|
|
SERVER_START_REQ( get_new_process_info )
|
|
{
|
|
req->info = wine_server_obj_handle( process_info );
|
|
diff --git a/dlls/ntdll/unix/server.c b/dlls/ntdll/unix/server.c
|
|
index cec2ef250a3..5754f272b21 100644
|
|
--- a/dlls/ntdll/unix/server.c
|
|
+++ b/dlls/ntdll/unix/server.c
|
|
@@ -114,7 +114,7 @@ timeout_t server_start_time = 0; /* time of server startup */
|
|
sigset_t server_block_set; /* signals to block during server calls */
|
|
static int fd_socket = -1; /* socket to exchange file descriptors with the server */
|
|
static pid_t server_pid;
|
|
-static pthread_mutex_t fd_cache_mutex = PTHREAD_MUTEX_INITIALIZER;
|
|
+pthread_mutex_t fd_cache_mutex = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
/* atomically exchange a 64-bit value */
|
|
static inline LONG64 interlocked_xchg64( LONG64 *dest, LONG64 val )
|
|
@@ -354,7 +354,7 @@ static int wait_select_reply( void *cookie )
|
|
/***********************************************************************
|
|
* invoke_user_apc
|
|
*/
|
|
-static NTSTATUS invoke_user_apc( CONTEXT *context, const user_apc_t *apc, NTSTATUS status )
|
|
+NTSTATUS invoke_user_apc( CONTEXT *context, const user_apc_t *apc, NTSTATUS status )
|
|
{
|
|
return call_user_apc_dispatcher( context, apc->args[0], apc->args[1], apc->args[2],
|
|
wine_server_get_ptr( apc->func ), status );
|
|
@@ -685,6 +685,21 @@ unsigned int server_wait( const select_op_t *select_op, data_size_t size, UINT f
|
|
}
|
|
|
|
|
|
+/* helper function to perform a server-side wait on an internal handle without
|
|
+ * using the fast synchronization path */
|
|
+unsigned int server_wait_for_object( HANDLE handle, BOOL alertable, const LARGE_INTEGER *timeout )
|
|
+{
|
|
+ select_op_t select_op;
|
|
+ UINT flags = SELECT_INTERRUPTIBLE;
|
|
+
|
|
+ if (alertable) flags |= SELECT_ALERTABLE;
|
|
+
|
|
+ select_op.wait.op = SELECT_WAIT;
|
|
+ select_op.wait.handles[0] = wine_server_obj_handle( handle );
|
|
+ return server_wait( &select_op, offsetof( select_op_t, wait.handles[1] ), flags, timeout );
|
|
+}
|
|
+
|
|
+
|
|
/***********************************************************************
|
|
* NtContinue (NTDLL.@)
|
|
*/
|
|
@@ -749,7 +764,7 @@ unsigned int server_queue_process_apc( HANDLE process, const apc_call_t *call, a
|
|
}
|
|
else
|
|
{
|
|
- NtWaitForSingleObject( handle, FALSE, NULL );
|
|
+ server_wait_for_object( handle, FALSE, NULL );
|
|
|
|
SERVER_START_REQ( get_apc_result )
|
|
{
|
|
@@ -1684,12 +1699,17 @@ NTSTATUS WINAPI NtDuplicateObject( HANDLE source_process, HANDLE source, HANDLE
|
|
return result.dup_handle.status;
|
|
}
|
|
|
|
+ /* hold fd_cache_mutex to prevent the fd from being added again between the
|
|
+ * call to remove_fd_from_cache and close_handle */
|
|
server_enter_uninterrupted_section( &fd_cache_mutex, &sigset );
|
|
|
|
/* always remove the cached fd; if the server request fails we'll just
|
|
* retrieve it again */
|
|
if (options & DUPLICATE_CLOSE_SOURCE)
|
|
+ {
|
|
fd = remove_fd_from_cache( source );
|
|
+ close_fast_sync_obj( source );
|
|
+ }
|
|
|
|
SERVER_START_REQ( dup_handle )
|
|
{
|
|
@@ -1723,12 +1743,16 @@ NTSTATUS WINAPI NtClose( HANDLE handle )
|
|
NTSTATUS ret;
|
|
int fd;
|
|
|
|
+ /* hold fd_cache_mutex to prevent the fd from being added again between the
|
|
+ * call to remove_fd_from_cache and close_handle */
|
|
server_enter_uninterrupted_section( &fd_cache_mutex, &sigset );
|
|
|
|
/* always remove the cached fd; if the server request fails we'll just
|
|
* retrieve it again */
|
|
fd = remove_fd_from_cache( handle );
|
|
|
|
+ close_fast_sync_obj( handle );
|
|
+
|
|
SERVER_START_REQ( close_handle )
|
|
{
|
|
req->handle = wine_server_obj_handle( handle );
|
|
diff --git a/dlls/ntdll/unix/signal_arm.c b/dlls/ntdll/unix/signal_arm.c
|
|
index f07f97930f1..e226225cd1a 100644
|
|
--- a/dlls/ntdll/unix/signal_arm.c
|
|
+++ b/dlls/ntdll/unix/signal_arm.c
|
|
@@ -765,6 +765,8 @@ static void usr1_handler( int signal, siginfo_t *siginfo, void *sigcontext )
|
|
save_context( &context, sigcontext );
|
|
wait_suspend( &context );
|
|
restore_context( &context, sigcontext );
|
|
+
|
|
+ // FIXME: fast alertable waits...
|
|
}
|
|
}
|
|
|
|
@@ -840,14 +842,15 @@ void signal_init_process(void)
|
|
if (sigaction( SIGABRT, &sig_act, NULL ) == -1) goto error;
|
|
sig_act.sa_sigaction = quit_handler;
|
|
if (sigaction( SIGQUIT, &sig_act, NULL ) == -1) goto error;
|
|
- sig_act.sa_sigaction = usr1_handler;
|
|
- if (sigaction( SIGUSR1, &sig_act, NULL ) == -1) goto error;
|
|
sig_act.sa_sigaction = trap_handler;
|
|
if (sigaction( SIGTRAP, &sig_act, NULL ) == -1) goto error;
|
|
sig_act.sa_sigaction = segv_handler;
|
|
if (sigaction( SIGSEGV, &sig_act, NULL ) == -1) goto error;
|
|
if (sigaction( SIGILL, &sig_act, NULL ) == -1) goto error;
|
|
if (sigaction( SIGBUS, &sig_act, NULL ) == -1) goto error;
|
|
+ sig_act.sa_sigaction = usr1_handler;
|
|
+ sig_act.sa_flags &= ~SA_RESTART; /* needed for fast sync alertable waits */
|
|
+ if (sigaction( SIGUSR1, &sig_act, NULL ) == -1) goto error;
|
|
return;
|
|
|
|
error:
|
|
diff --git a/dlls/ntdll/unix/signal_arm64.c b/dlls/ntdll/unix/signal_arm64.c
|
|
index 079064eee4b..206678206dc 100644
|
|
--- a/dlls/ntdll/unix/signal_arm64.c
|
|
+++ b/dlls/ntdll/unix/signal_arm64.c
|
|
@@ -922,6 +922,8 @@ static void usr1_handler( int signal, siginfo_t *siginfo, void *sigcontext )
|
|
save_context( &context, sigcontext );
|
|
wait_suspend( &context );
|
|
restore_context( &context, sigcontext );
|
|
+
|
|
+ // FIXME: fast alertable waits...
|
|
}
|
|
}
|
|
|
|
@@ -1015,8 +1017,6 @@ void signal_init_process(void)
|
|
if (sigaction( SIGABRT, &sig_act, NULL ) == -1) goto error;
|
|
sig_act.sa_sigaction = quit_handler;
|
|
if (sigaction( SIGQUIT, &sig_act, NULL ) == -1) goto error;
|
|
- sig_act.sa_sigaction = usr1_handler;
|
|
- if (sigaction( SIGUSR1, &sig_act, NULL ) == -1) goto error;
|
|
sig_act.sa_sigaction = usr2_handler;
|
|
if (sigaction( SIGUSR2, &sig_act, NULL ) == -1) goto error;
|
|
sig_act.sa_sigaction = trap_handler;
|
|
@@ -1027,6 +1027,9 @@ void signal_init_process(void)
|
|
if (sigaction( SIGILL, &sig_act, NULL ) == -1) goto error;
|
|
sig_act.sa_sigaction = bus_handler;
|
|
if (sigaction( SIGBUS, &sig_act, NULL ) == -1) goto error;
|
|
+ sig_act.sa_sigaction = usr1_handler;
|
|
+ sig_act.sa_flags &= ~SA_RESTART; /* needed for fast sync alertable waits */
|
|
+ if (sigaction( SIGUSR1, &sig_act, NULL ) == -1) goto error;
|
|
return;
|
|
|
|
error:
|
|
diff --git a/dlls/ntdll/unix/signal_i386.c b/dlls/ntdll/unix/signal_i386.c
|
|
index b5afbcf0aac..0a3b8dc115d 100644
|
|
--- a/dlls/ntdll/unix/signal_i386.c
|
|
+++ b/dlls/ntdll/unix/signal_i386.c
|
|
@@ -2010,6 +2010,9 @@ static void usr1_handler( int signal, siginfo_t *siginfo, void *sigcontext )
|
|
NtGetContextThread( GetCurrentThread(), &xcontext.c );
|
|
wait_suspend( &xcontext.c );
|
|
NtSetContextThread( GetCurrentThread(), &xcontext.c );
|
|
+
|
|
+ if (ntdll_get_thread_data()->in_fast_alert_wait)
|
|
+ siglongjmp( ntdll_get_thread_data()->fast_alert_buf, 1 );
|
|
}
|
|
else
|
|
{
|
|
@@ -2336,14 +2339,15 @@ void signal_init_process(void)
|
|
if (sigaction( SIGABRT, &sig_act, NULL ) == -1) goto error;
|
|
sig_act.sa_sigaction = quit_handler;
|
|
if (sigaction( SIGQUIT, &sig_act, NULL ) == -1) goto error;
|
|
- sig_act.sa_sigaction = usr1_handler;
|
|
- if (sigaction( SIGUSR1, &sig_act, NULL ) == -1) goto error;
|
|
sig_act.sa_sigaction = trap_handler;
|
|
if (sigaction( SIGTRAP, &sig_act, NULL ) == -1) goto error;
|
|
sig_act.sa_sigaction = segv_handler;
|
|
if (sigaction( SIGSEGV, &sig_act, NULL ) == -1) goto error;
|
|
if (sigaction( SIGILL, &sig_act, NULL ) == -1) goto error;
|
|
if (sigaction( SIGBUS, &sig_act, NULL ) == -1) goto error;
|
|
+ sig_act.sa_sigaction = usr1_handler;
|
|
+ sig_act.sa_flags &= ~SA_RESTART; /* needed for fast sync alertable waits */
|
|
+ if (sigaction( SIGUSR1, &sig_act, NULL ) == -1) goto error;
|
|
return;
|
|
|
|
error:
|
|
diff --git a/dlls/ntdll/unix/signal_x86_64.c b/dlls/ntdll/unix/signal_x86_64.c
|
|
index 148e3641d0e..4d6201e7156 100644
|
|
--- a/dlls/ntdll/unix/signal_x86_64.c
|
|
+++ b/dlls/ntdll/unix/signal_x86_64.c
|
|
@@ -2478,6 +2478,9 @@ static void usr1_handler( int signal, siginfo_t *siginfo, void *ucontext )
|
|
NtGetContextThread( GetCurrentThread(), &context.c );
|
|
wait_suspend( &context.c );
|
|
NtSetContextThread( GetCurrentThread(), &context.c );
|
|
+
|
|
+ if (ntdll_get_thread_data()->in_fast_alert_wait)
|
|
+ siglongjmp( ntdll_get_thread_data()->fast_alert_buf, 1 );
|
|
}
|
|
else
|
|
{
|
|
@@ -2649,14 +2652,15 @@ void signal_init_process(void)
|
|
if (sigaction( SIGABRT, &sig_act, NULL ) == -1) goto error;
|
|
sig_act.sa_sigaction = quit_handler;
|
|
if (sigaction( SIGQUIT, &sig_act, NULL ) == -1) goto error;
|
|
- sig_act.sa_sigaction = usr1_handler;
|
|
- if (sigaction( SIGUSR1, &sig_act, NULL ) == -1) goto error;
|
|
sig_act.sa_sigaction = trap_handler;
|
|
if (sigaction( SIGTRAP, &sig_act, NULL ) == -1) goto error;
|
|
sig_act.sa_sigaction = segv_handler;
|
|
if (sigaction( SIGSEGV, &sig_act, NULL ) == -1) goto error;
|
|
if (sigaction( SIGILL, &sig_act, NULL ) == -1) goto error;
|
|
if (sigaction( SIGBUS, &sig_act, NULL ) == -1) goto error;
|
|
+ sig_act.sa_sigaction = usr1_handler;
|
|
+ sig_act.sa_flags &= ~SA_RESTART; /* needed for fast sync alertable waits */
|
|
+ if (sigaction( SIGUSR1, &sig_act, NULL ) == -1) goto error;
|
|
return;
|
|
|
|
error:
|
|
diff --git a/dlls/ntdll/unix/socket.c b/dlls/ntdll/unix/socket.c
|
|
index c1cd319ccf9..2e9f5230629 100644
|
|
--- a/dlls/ntdll/unix/socket.c
|
|
+++ b/dlls/ntdll/unix/socket.c
|
|
@@ -107,7 +107,7 @@ static async_data_t server_async( HANDLE handle, struct async_fileio *user, HAND
|
|
|
|
static NTSTATUS wait_async( HANDLE handle, BOOL alertable )
|
|
{
|
|
- return NtWaitForSingleObject( handle, alertable, NULL );
|
|
+ return server_wait_for_object( handle, alertable, NULL );
|
|
}
|
|
|
|
union unix_sockaddr
|
|
diff --git a/dlls/ntdll/unix/sync.c b/dlls/ntdll/unix/sync.c
|
|
index 16635ee42fa..31af5cf8040 100644
|
|
--- a/dlls/ntdll/unix/sync.c
|
|
+++ b/dlls/ntdll/unix/sync.c
|
|
@@ -30,8 +30,15 @@
|
|
|
|
#include <assert.h>
|
|
#include <errno.h>
|
|
+#include <inttypes.h>
|
|
#include <limits.h>
|
|
#include <signal.h>
|
|
+#ifdef HAVE_SYS_IOCTL_H
|
|
+#include <sys/ioctl.h>
|
|
+#endif
|
|
+#ifdef HAVE_SYS_MMAN_H
|
|
+#include <sys/mman.h>
|
|
+#endif
|
|
#ifdef HAVE_SYS_SYSCALL_H
|
|
#include <sys/syscall.h>
|
|
#endif
|
|
@@ -61,6 +68,9 @@
|
|
# include <mach/semaphore.h>
|
|
# include <mach/mach_time.h>
|
|
#endif
|
|
+#ifdef HAVE_LINUX_WINESYNC_H
|
|
+# include <linux/winesync.h>
|
|
+#endif
|
|
|
|
#include "ntstatus.h"
|
|
#define WIN32_NO_STATUS
|
|
@@ -141,173 +151,1194 @@ static inline int use_futexes(void)
|
|
{
|
|
static int supported = -1;
|
|
|
|
- if (supported == -1)
|
|
+ if (supported == -1)
|
|
+ {
|
|
+ futex_wait( &supported, 10, NULL );
|
|
+ if (errno == ENOSYS)
|
|
+ {
|
|
+ futex_private = 0;
|
|
+ futex_wait( &supported, 10, NULL );
|
|
+ }
|
|
+ supported = (errno != ENOSYS);
|
|
+ }
|
|
+ return supported;
|
|
+}
|
|
+
|
|
+static int *get_futex(void **ptr)
|
|
+{
|
|
+ if (sizeof(void *) == 8)
|
|
+ return (int *)((((ULONG_PTR)ptr) + 3) & ~3);
|
|
+ else if (!(((ULONG_PTR)ptr) & 3))
|
|
+ return (int *)ptr;
|
|
+ else
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static void timespec_from_timeout( struct timespec *timespec, const LARGE_INTEGER *timeout )
|
|
+{
|
|
+ LARGE_INTEGER now;
|
|
+ timeout_t diff;
|
|
+
|
|
+ if (timeout->QuadPart > 0)
|
|
+ {
|
|
+ NtQuerySystemTime( &now );
|
|
+ diff = timeout->QuadPart - now.QuadPart;
|
|
+ }
|
|
+ else
|
|
+ diff = -timeout->QuadPart;
|
|
+
|
|
+ timespec->tv_sec = diff / TICKSPERSEC;
|
|
+ timespec->tv_nsec = (diff % TICKSPERSEC) * 100;
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+
|
|
+static BOOL compare_addr( const void *addr, const void *cmp, SIZE_T size )
|
|
+{
|
|
+ switch (size)
|
|
+ {
|
|
+ case 1:
|
|
+ return (*(const UCHAR *)addr == *(const UCHAR *)cmp);
|
|
+ case 2:
|
|
+ return (*(const USHORT *)addr == *(const USHORT *)cmp);
|
|
+ case 4:
|
|
+ return (*(const ULONG *)addr == *(const ULONG *)cmp);
|
|
+ case 8:
|
|
+ return (*(const ULONG64 *)addr == *(const ULONG64 *)cmp);
|
|
+ }
|
|
+
|
|
+ return FALSE;
|
|
+}
|
|
+
|
|
+
|
|
+static const char *debugstr_timeout( const LARGE_INTEGER *timeout )
|
|
+{
|
|
+ if (!timeout) return "<infinite>";
|
|
+ return wine_dbg_sprintf( "%ld.%07ld", (long)timeout->QuadPart / TICKSPERSEC,
|
|
+ (long)timeout->QuadPart % TICKSPERSEC );
|
|
+}
|
|
+
|
|
+
|
|
+/* create a struct security_descriptor and contained information in one contiguous piece of memory */
|
|
+NTSTATUS alloc_object_attributes( const OBJECT_ATTRIBUTES *attr, struct object_attributes **ret,
|
|
+ data_size_t *ret_len )
|
|
+{
|
|
+ unsigned int len = sizeof(**ret);
|
|
+ SID *owner = NULL, *group = NULL;
|
|
+ ACL *dacl = NULL, *sacl = NULL;
|
|
+ SECURITY_DESCRIPTOR *sd;
|
|
+
|
|
+ *ret = NULL;
|
|
+ *ret_len = 0;
|
|
+
|
|
+ if (!attr) return STATUS_SUCCESS;
|
|
+
|
|
+ if (attr->Length != sizeof(*attr)) return STATUS_INVALID_PARAMETER;
|
|
+
|
|
+ if ((sd = attr->SecurityDescriptor))
|
|
+ {
|
|
+ len += sizeof(struct security_descriptor);
|
|
+ if (sd->Revision != SECURITY_DESCRIPTOR_REVISION) return STATUS_UNKNOWN_REVISION;
|
|
+ if (sd->Control & SE_SELF_RELATIVE)
|
|
+ {
|
|
+ SECURITY_DESCRIPTOR_RELATIVE *rel = (SECURITY_DESCRIPTOR_RELATIVE *)sd;
|
|
+ if (rel->Owner) owner = (PSID)((BYTE *)rel + rel->Owner);
|
|
+ if (rel->Group) group = (PSID)((BYTE *)rel + rel->Group);
|
|
+ if ((sd->Control & SE_SACL_PRESENT) && rel->Sacl) sacl = (PSID)((BYTE *)rel + rel->Sacl);
|
|
+ if ((sd->Control & SE_DACL_PRESENT) && rel->Dacl) dacl = (PSID)((BYTE *)rel + rel->Dacl);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ owner = sd->Owner;
|
|
+ group = sd->Group;
|
|
+ if (sd->Control & SE_SACL_PRESENT) sacl = sd->Sacl;
|
|
+ if (sd->Control & SE_DACL_PRESENT) dacl = sd->Dacl;
|
|
+ }
|
|
+
|
|
+ if (owner) len += offsetof( SID, SubAuthority[owner->SubAuthorityCount] );
|
|
+ if (group) len += offsetof( SID, SubAuthority[group->SubAuthorityCount] );
|
|
+ if (sacl) len += sacl->AclSize;
|
|
+ if (dacl) len += dacl->AclSize;
|
|
+
|
|
+ /* fix alignment for the Unicode name that follows the structure */
|
|
+ len = (len + sizeof(WCHAR) - 1) & ~(sizeof(WCHAR) - 1);
|
|
+ }
|
|
+
|
|
+ if (attr->ObjectName)
|
|
+ {
|
|
+ if (attr->ObjectName->Length & (sizeof(WCHAR) - 1)) return STATUS_OBJECT_NAME_INVALID;
|
|
+ len += attr->ObjectName->Length;
|
|
+ }
|
|
+ else if (attr->RootDirectory) return STATUS_OBJECT_NAME_INVALID;
|
|
+
|
|
+ len = (len + 3) & ~3; /* DWORD-align the entire structure */
|
|
+
|
|
+ if (!(*ret = calloc( len, 1 ))) return STATUS_NO_MEMORY;
|
|
+
|
|
+ (*ret)->rootdir = wine_server_obj_handle( attr->RootDirectory );
|
|
+ (*ret)->attributes = attr->Attributes;
|
|
+
|
|
+ if (attr->SecurityDescriptor)
|
|
+ {
|
|
+ struct security_descriptor *descr = (struct security_descriptor *)(*ret + 1);
|
|
+ unsigned char *ptr = (unsigned char *)(descr + 1);
|
|
+
|
|
+ descr->control = sd->Control & ~SE_SELF_RELATIVE;
|
|
+ if (owner) descr->owner_len = offsetof( SID, SubAuthority[owner->SubAuthorityCount] );
|
|
+ if (group) descr->group_len = offsetof( SID, SubAuthority[group->SubAuthorityCount] );
|
|
+ if (sacl) descr->sacl_len = sacl->AclSize;
|
|
+ if (dacl) descr->dacl_len = dacl->AclSize;
|
|
+
|
|
+ memcpy( ptr, owner, descr->owner_len );
|
|
+ ptr += descr->owner_len;
|
|
+ memcpy( ptr, group, descr->group_len );
|
|
+ ptr += descr->group_len;
|
|
+ memcpy( ptr, sacl, descr->sacl_len );
|
|
+ ptr += descr->sacl_len;
|
|
+ memcpy( ptr, dacl, descr->dacl_len );
|
|
+ (*ret)->sd_len = (sizeof(*descr) + descr->owner_len + descr->group_len + descr->sacl_len +
|
|
+ descr->dacl_len + sizeof(WCHAR) - 1) & ~(sizeof(WCHAR) - 1);
|
|
+ }
|
|
+
|
|
+ if (attr->ObjectName)
|
|
+ {
|
|
+ unsigned char *ptr = (unsigned char *)(*ret + 1) + (*ret)->sd_len;
|
|
+ (*ret)->name_len = attr->ObjectName->Length;
|
|
+ memcpy( ptr, attr->ObjectName->Buffer, (*ret)->name_len );
|
|
+ }
|
|
+
|
|
+ *ret_len = len;
|
|
+ return STATUS_SUCCESS;
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS validate_open_object_attributes( const OBJECT_ATTRIBUTES *attr )
|
|
+{
|
|
+ if (!attr || attr->Length != sizeof(*attr)) return STATUS_INVALID_PARAMETER;
|
|
+
|
|
+ if (attr->ObjectName)
|
|
+ {
|
|
+ if (attr->ObjectName->Length & (sizeof(WCHAR) - 1)) return STATUS_OBJECT_NAME_INVALID;
|
|
+ }
|
|
+ else if (attr->RootDirectory) return STATUS_OBJECT_NAME_INVALID;
|
|
+
|
|
+ return STATUS_SUCCESS;
|
|
+}
|
|
+
|
|
+
|
|
+#ifdef HAVE_LINUX_WINESYNC_H
|
|
+
|
|
+static int get_fast_sync_device(void)
|
|
+{
|
|
+ static int fast_sync_fd = -2;
|
|
+
|
|
+ if (fast_sync_fd == -2)
|
|
+ {
|
|
+ HANDLE device;
|
|
+ int fd, needs_close;
|
|
+ NTSTATUS ret;
|
|
+
|
|
+ SERVER_START_REQ( get_fast_sync_device )
|
|
+ {
|
|
+ if (!(ret = wine_server_call( req ))) device = wine_server_ptr_handle( reply->handle );
|
|
+ }
|
|
+ SERVER_END_REQ;
|
|
+
|
|
+ if (!ret)
|
|
+ {
|
|
+ if (!server_get_unix_fd( device, 0, &fd, &needs_close, NULL, NULL ))
|
|
+ {
|
|
+ if (InterlockedCompareExchange( &fast_sync_fd, fd, -2 ) != -2)
|
|
+ {
|
|
+ /* someone beat us to it */
|
|
+ if (needs_close) close( fd );
|
|
+ NtClose( device );
|
|
+ }
|
|
+ /* otherwise don't close the device */
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ InterlockedCompareExchange( &fast_sync_fd, -1, -2 );
|
|
+ NtClose( device );
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ InterlockedCompareExchange( &fast_sync_fd, -1, -2 );
|
|
+ }
|
|
+ }
|
|
+ return fast_sync_fd;
|
|
+}
|
|
+
|
|
+/* It's possible for synchronization primitives to remain alive even after being
|
|
+ * closed, because a thread is still waiting on them. It's rare in practice, and
|
|
+ * documented as being undefined behaviour by Microsoft, but it works, and some
|
|
+ * applications rely on it. This means we need to refcount handles, and defer
|
|
+ * deleting them on the server side until the refcount reaches zero. We do this
|
|
+ * by having each client process hold a handle to the fast synchronization
|
|
+ * object, as well as a private refcount. When the client refcount reaches zero,
|
|
+ * it closes the handle; when all handles are closed, the server deletes the
|
|
+ * fast synchronization object.
|
|
+ *
|
|
+ * We also need this for signal-and-wait. The signal and wait operations aren't
|
|
+ * atomic, but we can't perform the signal and then return STATUS_INVALID_HANDLE
|
|
+ * for the wait—we need to either do both operations or neither. That means we
|
|
+ * need to grab references to both objects, and prevent them from being
|
|
+ * destroyed before we're done with them.
|
|
+ *
|
|
+ * We want lookup of objects from the cache to be very fast; ideally, it should
|
|
+ * be lock-free. We achieve this by using atomic modifications to "refcount",
|
|
+ * and guaranteeing that all other fields are valid and correct *as long as*
|
|
+ * refcount is nonzero, and we store the entire structure in memory which will
|
|
+ * never be freed.
|
|
+ *
|
|
+ * This means that acquiring the object can't use a simple atomic increment; it
|
|
+ * has to use a compare-and-swap loop to ensure that it doesn't try to increment
|
|
+ * an object with a zero refcount. That's still leagues better than a real lock,
|
|
+ * though, and release can be a single atomic decrement.
|
|
+ *
|
|
+ * It also means that threads modifying the cache need to take a lock, to
|
|
+ * prevent other threads from writing to it concurrently.
|
|
+ *
|
|
+ * It's possible for an object currently in use (by a waiter) to be closed and
|
|
+ * the same handle immediately reallocated to a different object. This should be
|
|
+ * a very rare situation, and in that case we simply don't cache the handle.
|
|
+ */
|
|
+struct fast_sync_cache_entry
|
|
+{
|
|
+ LONG refcount;
|
|
+ unsigned int obj;
|
|
+ enum fast_sync_type type;
|
|
+ unsigned int access;
|
|
+ BOOL closed;
|
|
+ /* handle to the underlying fast sync object, stored as obj_handle_t to save
|
|
+ * space */
|
|
+ obj_handle_t handle;
|
|
+};
|
|
+
|
|
+
|
|
+static void release_fast_sync_obj( struct fast_sync_cache_entry *cache )
|
|
+{
|
|
+ /* save the handle now; as soon as the refcount hits 0 we cannot access the
|
|
+ * cache anymore */
|
|
+ HANDLE handle = wine_server_ptr_handle( cache->handle );
|
|
+ LONG refcount = InterlockedDecrement( &cache->refcount );
|
|
+
|
|
+ assert( refcount >= 0 );
|
|
+
|
|
+ if (!refcount)
|
|
+ {
|
|
+ NTSTATUS ret;
|
|
+
|
|
+ /* we can't call NtClose here as we may be inside fd_cache_mutex */
|
|
+ SERVER_START_REQ( close_handle )
|
|
+ {
|
|
+ req->handle = wine_server_obj_handle( handle );
|
|
+ ret = wine_server_call( req );
|
|
+ }
|
|
+ SERVER_END_REQ;
|
|
+
|
|
+ assert( !ret );
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+#define FAST_SYNC_CACHE_BLOCK_SIZE (65536 / sizeof(struct fast_sync_cache_entry))
|
|
+#define FAST_SYNC_CACHE_ENTRIES 128
|
|
+
|
|
+static struct fast_sync_cache_entry *fast_sync_cache[FAST_SYNC_CACHE_ENTRIES];
|
|
+static struct fast_sync_cache_entry fast_sync_cache_initial_block[FAST_SYNC_CACHE_BLOCK_SIZE];
|
|
+
|
|
+static inline unsigned int handle_to_index( HANDLE handle, unsigned int *entry )
|
|
+{
|
|
+ unsigned int idx = (wine_server_obj_handle(handle) >> 2) - 1;
|
|
+ *entry = idx / FAST_SYNC_CACHE_BLOCK_SIZE;
|
|
+ return idx % FAST_SYNC_CACHE_BLOCK_SIZE;
|
|
+}
|
|
+
|
|
+
|
|
+static struct fast_sync_cache_entry *cache_fast_sync_obj( HANDLE handle, obj_handle_t fast_sync, int obj,
|
|
+ enum fast_sync_type type, unsigned int access )
|
|
+{
|
|
+ unsigned int entry, idx = handle_to_index( handle, &entry );
|
|
+ struct fast_sync_cache_entry *cache;
|
|
+ sigset_t sigset;
|
|
+ int refcount;
|
|
+
|
|
+ if (entry >= FAST_SYNC_CACHE_ENTRIES)
|
|
+ {
|
|
+ FIXME( "too many allocated handles, not caching %p\n", handle );
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (!fast_sync_cache[entry]) /* do we need to allocate a new block of entries? */
|
|
+ {
|
|
+ if (!entry) fast_sync_cache[0] = fast_sync_cache_initial_block;
|
|
+ else
|
|
+ {
|
|
+ static const size_t size = FAST_SYNC_CACHE_BLOCK_SIZE * sizeof(struct fast_sync_cache_entry);
|
|
+ void *ptr = anon_mmap_alloc( size, PROT_READ | PROT_WRITE );
|
|
+ if (ptr == MAP_FAILED) return NULL;
|
|
+ if (InterlockedCompareExchangePointer( (void **)&fast_sync_cache[entry], ptr, NULL ))
|
|
+ munmap( ptr, size ); /* someone beat us to it */
|
|
+ }
|
|
+ }
|
|
+
|
|
+ cache = &fast_sync_cache[entry][idx];
|
|
+
|
|
+ /* Hold fd_cache_mutex instead of a separate mutex, to prevent the same
|
|
+ * race between this function and NtClose. That is, prevent the object from
|
|
+ * being cached again between close_fast_sync_obj() and close_handle. */
|
|
+ server_enter_uninterrupted_section( &fd_cache_mutex, &sigset );
|
|
+
|
|
+ if (InterlockedCompareExchange( &cache->refcount, 0, 0 ))
|
|
+ {
|
|
+ /* We lost the race with another thread trying to cache this object, or
|
|
+ * the handle is currently being used for another object (i.e. it was
|
|
+ * closed and then reused). We have no way of knowing which, and in the
|
|
+ * latter case we can't cache this object until the old one is
|
|
+ * completely destroyed, so always return failure. */
|
|
+ server_leave_uninterrupted_section( &fd_cache_mutex, &sigset );
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ cache->handle = fast_sync;
|
|
+ cache->obj = obj;
|
|
+ cache->type = type;
|
|
+ cache->access = access;
|
|
+ cache->closed = FALSE;
|
|
+ /* make sure we set the other members before the refcount; this store needs
|
|
+ * release semantics [paired with the load in get_cached_fast_sync_obj()]
|
|
+ * set the refcount to 2 (one for the handle, one for the caller) */
|
|
+ refcount = InterlockedExchange( &cache->refcount, 2 );
|
|
+ assert( !refcount );
|
|
+
|
|
+ server_leave_uninterrupted_section( &fd_cache_mutex, &sigset );
|
|
+
|
|
+ return cache;
|
|
+}
|
|
+
|
|
+
|
|
+/* returns the previous value */
|
|
+static inline LONG interlocked_inc_if_nonzero( LONG *dest )
|
|
+{
|
|
+ LONG val, tmp;
|
|
+ for (val = *dest;; val = tmp)
|
|
+ {
|
|
+ if (!val || (tmp = InterlockedCompareExchange( dest, val + 1, val )) == val)
|
|
+ break;
|
|
+ }
|
|
+ return val;
|
|
+}
|
|
+
|
|
+
|
|
+static struct fast_sync_cache_entry *get_cached_fast_sync_obj( HANDLE handle )
|
|
+{
|
|
+ unsigned int entry, idx = handle_to_index( handle, &entry );
|
|
+ struct fast_sync_cache_entry *cache;
|
|
+
|
|
+ if (entry >= FAST_SYNC_CACHE_ENTRIES || !fast_sync_cache[entry])
|
|
+ return NULL;
|
|
+
|
|
+ cache = &fast_sync_cache[entry][idx];
|
|
+
|
|
+ /* this load needs acquire semantics [paired with the store in
|
|
+ * cache_fast_sync_obj()] */
|
|
+ if (!interlocked_inc_if_nonzero( &cache->refcount ))
|
|
+ return NULL;
|
|
+
|
|
+ if (cache->closed)
|
|
+ {
|
|
+ /* The object is still being used, but "handle" has been closed. The
|
|
+ * handle value might have been reused for another object in the
|
|
+ * meantime, in which case we have to report that valid object, so
|
|
+ * force the caller to check the server. */
|
|
+ release_fast_sync_obj( cache );
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ return cache;
|
|
+}
|
|
+
|
|
+
|
|
+/* returns a pointer to a cache entry; if the object could not be cached,
|
|
+ * returns "stack_cache" instead, which should be allocated on stack */
|
|
+static NTSTATUS get_fast_sync_obj( HANDLE handle, enum fast_sync_type desired_type, ACCESS_MASK desired_access,
|
|
+ struct fast_sync_cache_entry *stack_cache,
|
|
+ struct fast_sync_cache_entry **ret_cache )
|
|
+{
|
|
+ struct fast_sync_cache_entry *cache;
|
|
+ obj_handle_t fast_sync_handle;
|
|
+ enum fast_sync_type type;
|
|
+ unsigned int access;
|
|
+ NTSTATUS ret;
|
|
+ int obj;
|
|
+
|
|
+ /* try to find it in the cache already */
|
|
+ if ((cache = get_cached_fast_sync_obj( handle )))
|
|
+ {
|
|
+ *ret_cache = cache;
|
|
+ return STATUS_SUCCESS;
|
|
+ }
|
|
+
|
|
+ /* try to retrieve it from the server */
|
|
+ SERVER_START_REQ( get_fast_sync_obj )
|
|
+ {
|
|
+ req->handle = wine_server_obj_handle( handle );
|
|
+ if (!(ret = wine_server_call( req )))
|
|
+ {
|
|
+ fast_sync_handle = reply->handle;
|
|
+ access = reply->access;
|
|
+ type = reply->type;
|
|
+ obj = reply->obj;
|
|
+ }
|
|
+ }
|
|
+ SERVER_END_REQ;
|
|
+
|
|
+ if (ret) return ret;
|
|
+
|
|
+ cache = cache_fast_sync_obj( handle, fast_sync_handle, obj, type, access );
|
|
+ if (!cache)
|
|
+ {
|
|
+ cache = stack_cache;
|
|
+ cache->handle = fast_sync_handle;
|
|
+ cache->obj = obj;
|
|
+ cache->type = type;
|
|
+ cache->access = access;
|
|
+ cache->closed = FALSE;
|
|
+ cache->refcount = 1;
|
|
+ }
|
|
+
|
|
+ *ret_cache = cache;
|
|
+
|
|
+ if (desired_type && desired_type != cache->type)
|
|
+ {
|
|
+ release_fast_sync_obj( cache );
|
|
+ return STATUS_OBJECT_TYPE_MISMATCH;
|
|
+ }
|
|
+
|
|
+ if ((cache->access & desired_access) != desired_access)
|
|
+ {
|
|
+ release_fast_sync_obj( cache );
|
|
+ return STATUS_ACCESS_DENIED;
|
|
+ }
|
|
+
|
|
+ return STATUS_SUCCESS;
|
|
+}
|
|
+
|
|
+
|
|
+/* caller must hold fd_cache_mutex */
|
|
+void close_fast_sync_obj( HANDLE handle )
|
|
+{
|
|
+ struct fast_sync_cache_entry *cache = get_cached_fast_sync_obj( handle );
|
|
+
|
|
+ if (cache)
|
|
+ {
|
|
+ cache->closed = TRUE;
|
|
+ /* once for the reference we just grabbed, and once for the handle */
|
|
+ release_fast_sync_obj( cache );
|
|
+ release_fast_sync_obj( cache );
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS fast_release_semaphore_obj( int device, unsigned int obj, ULONG count, ULONG *prev_count )
|
|
+{
|
|
+ struct winesync_sem_args args = {0};
|
|
+ NTSTATUS ret;
|
|
+
|
|
+ args.sem = obj;
|
|
+ args.count = count;
|
|
+ ret = ioctl( device, WINESYNC_IOC_PUT_SEM, &args );
|
|
+ if (ret < 0)
|
|
+ {
|
|
+ if (errno == EOVERFLOW)
|
|
+ return STATUS_SEMAPHORE_LIMIT_EXCEEDED;
|
|
+ else
|
|
+ return errno_to_status( errno );
|
|
+ }
|
|
+ if (prev_count) *prev_count = args.count;
|
|
+ return STATUS_SUCCESS;
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS fast_release_semaphore( HANDLE handle, ULONG count, ULONG *prev_count )
|
|
+{
|
|
+ struct fast_sync_cache_entry stack_cache, *cache;
|
|
+ NTSTATUS ret;
|
|
+ int device;
|
|
+
|
|
+ if ((device = get_fast_sync_device()) < 0)
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+
|
|
+ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_SEMAPHORE,
|
|
+ SEMAPHORE_MODIFY_STATE, &stack_cache, &cache )))
|
|
+ return ret;
|
|
+
|
|
+ ret = fast_release_semaphore_obj( device, cache->obj, count, prev_count );
|
|
+
|
|
+ release_fast_sync_obj( cache );
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS fast_query_semaphore_obj( int device, unsigned int obj, SEMAPHORE_BASIC_INFORMATION *info )
|
|
+{
|
|
+ struct winesync_sem_args args = {0};
|
|
+ NTSTATUS ret;
|
|
+
|
|
+ args.sem = obj;
|
|
+ ret = ioctl( device, WINESYNC_IOC_READ_SEM, &args );
|
|
+
|
|
+ if (ret < 0)
|
|
+ return errno_to_status( errno );
|
|
+ info->CurrentCount = args.count;
|
|
+ info->MaximumCount = args.max;
|
|
+ return STATUS_SUCCESS;
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS fast_query_semaphore( HANDLE handle, SEMAPHORE_BASIC_INFORMATION *info )
|
|
+{
|
|
+ struct fast_sync_cache_entry stack_cache, *cache;
|
|
+ NTSTATUS ret;
|
|
+ int device;
|
|
+
|
|
+ if ((device = get_fast_sync_device()) < 0)
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+
|
|
+ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_SEMAPHORE,
|
|
+ SEMAPHORE_QUERY_STATE, &stack_cache, &cache )))
|
|
+ return ret;
|
|
+
|
|
+ ret = fast_query_semaphore_obj( device, cache->obj, info );
|
|
+
|
|
+ release_fast_sync_obj( cache );
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS fast_set_event_obj( int device, unsigned int obj, LONG *prev_state )
|
|
+{
|
|
+ struct winesync_sem_args args = {0};
|
|
+ NTSTATUS ret;
|
|
+
|
|
+ args.sem = obj;
|
|
+ args.count = 1;
|
|
+ ret = ioctl( device, WINESYNC_IOC_PUT_SEM, &args );
|
|
+ if (ret < 0)
|
|
+ {
|
|
+ if (errno == EOVERFLOW)
|
|
+ {
|
|
+ if (prev_state) *prev_state = 1;
|
|
+ return STATUS_SUCCESS;
|
|
+ }
|
|
+ else
|
|
+ return errno_to_status( errno );
|
|
+ }
|
|
+ if (prev_state) *prev_state = 0;
|
|
+ return STATUS_SUCCESS;
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS fast_set_event( HANDLE handle, LONG *prev_state )
|
|
+{
|
|
+ struct fast_sync_cache_entry stack_cache, *cache;
|
|
+ NTSTATUS ret;
|
|
+ int device;
|
|
+
|
|
+ if ((device = get_fast_sync_device()) < 0)
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+
|
|
+ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_EVENT, EVENT_MODIFY_STATE,
|
|
+ &stack_cache, &cache )))
|
|
+ return ret;
|
|
+
|
|
+ ret = fast_set_event_obj( device, cache->obj, prev_state );
|
|
+
|
|
+ release_fast_sync_obj( cache );
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS fast_reset_event_obj( int device, unsigned int obj, LONG *prev_state )
|
|
+{
|
|
+ NTSTATUS ret;
|
|
+
|
|
+ ret = ioctl( device, WINESYNC_IOC_GET_SEM, &obj );
|
|
+ if (ret < 0)
|
|
+ {
|
|
+ if (errno == EWOULDBLOCK)
|
|
+ {
|
|
+ if (prev_state) *prev_state = 0;
|
|
+ return STATUS_SUCCESS;
|
|
+ }
|
|
+ else
|
|
+ return errno_to_status( errno );
|
|
+ }
|
|
+ if (prev_state) *prev_state = 1;
|
|
+ return STATUS_SUCCESS;
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS fast_reset_event( HANDLE handle, LONG *prev_state )
|
|
+{
|
|
+ struct fast_sync_cache_entry stack_cache, *cache;
|
|
+ NTSTATUS ret;
|
|
+ int device;
|
|
+
|
|
+ if ((device = get_fast_sync_device()) < 0)
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+
|
|
+ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_EVENT, EVENT_MODIFY_STATE,
|
|
+ &stack_cache, &cache )))
|
|
+ return ret;
|
|
+
|
|
+ ret = fast_reset_event_obj( device, cache->obj, prev_state );
|
|
+
|
|
+ release_fast_sync_obj( cache );
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS fast_pulse_event_obj( int device, unsigned int obj, LONG *prev_state )
|
|
+{
|
|
+ struct winesync_sem_args args = {0};
|
|
+ NTSTATUS ret;
|
|
+
|
|
+ args.sem = obj;
|
|
+ args.count = 1;
|
|
+ ret = ioctl( device, WINESYNC_IOC_PULSE_SEM, &args );
|
|
+ if (ret < 0)
|
|
+ {
|
|
+ if (errno == EOVERFLOW)
|
|
+ {
|
|
+ if (prev_state) *prev_state = 1;
|
|
+ return STATUS_SUCCESS;
|
|
+ }
|
|
+ else
|
|
+ return errno_to_status( errno );
|
|
+ }
|
|
+ if (prev_state) *prev_state = 0;
|
|
+ return STATUS_SUCCESS;
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS fast_pulse_event( HANDLE handle, LONG *prev_state )
|
|
+{
|
|
+ struct fast_sync_cache_entry stack_cache, *cache;
|
|
+ NTSTATUS ret;
|
|
+ int device;
|
|
+
|
|
+ if ((device = get_fast_sync_device()) < 0)
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+
|
|
+ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_EVENT, EVENT_MODIFY_STATE,
|
|
+ &stack_cache, &cache )))
|
|
+ return ret;
|
|
+
|
|
+ ret = fast_pulse_event_obj( device, cache->obj, prev_state );
|
|
+
|
|
+ release_fast_sync_obj( cache );
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS fast_query_event_obj( int device, unsigned int obj, EVENT_BASIC_INFORMATION *info )
|
|
+{
|
|
+ struct winesync_sem_args args = {0};
|
|
+ NTSTATUS ret;
|
|
+
|
|
+ args.sem = obj;
|
|
+ ret = ioctl( device, WINESYNC_IOC_READ_SEM, &args );
|
|
+
|
|
+ if (ret < 0)
|
|
+ return errno_to_status( errno );
|
|
+ info->EventType = (args.flags & WINESYNC_SEM_GETONWAIT) ? SynchronizationEvent : NotificationEvent;
|
|
+ info->EventState = args.count;
|
|
+ return STATUS_SUCCESS;
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS fast_query_event( HANDLE handle, EVENT_BASIC_INFORMATION *info )
|
|
+{
|
|
+ struct fast_sync_cache_entry stack_cache, *cache;
|
|
+ NTSTATUS ret;
|
|
+ int device;
|
|
+
|
|
+ if ((device = get_fast_sync_device()) < 0)
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+
|
|
+ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_EVENT, EVENT_QUERY_STATE,
|
|
+ &stack_cache, &cache )))
|
|
+ return ret;
|
|
+
|
|
+ ret = fast_query_event_obj( device, cache->obj, info );
|
|
+
|
|
+ release_fast_sync_obj( cache );
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS fast_release_mutex_obj( int device, unsigned int obj, LONG *prev_count )
|
|
+{
|
|
+ struct winesync_mutex_args args = {0};
|
|
+ NTSTATUS ret;
|
|
+
|
|
+ args.mutex = obj;
|
|
+ args.owner = GetCurrentThreadId();
|
|
+ ret = ioctl( device, WINESYNC_IOC_PUT_MUTEX, &args );
|
|
+
|
|
+ if (ret < 0)
|
|
+ {
|
|
+ if (errno == EOVERFLOW)
|
|
+ return STATUS_MUTANT_LIMIT_EXCEEDED;
|
|
+ else if (errno == EPERM)
|
|
+ return STATUS_MUTANT_NOT_OWNED;
|
|
+ else
|
|
+ return errno_to_status( errno );
|
|
+ }
|
|
+ if (prev_count) *prev_count = 1 - args.count;
|
|
+ return STATUS_SUCCESS;
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS fast_release_mutex( HANDLE handle, LONG *prev_count )
|
|
+{
|
|
+ struct fast_sync_cache_entry stack_cache, *cache;
|
|
+ NTSTATUS ret;
|
|
+ int device;
|
|
+
|
|
+ if ((device = get_fast_sync_device()) < 0)
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+
|
|
+ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_MUTEX, 0, &stack_cache, &cache )))
|
|
+ return ret;
|
|
+
|
|
+ ret = fast_release_mutex_obj( device, cache->obj, prev_count );
|
|
+
|
|
+ release_fast_sync_obj( cache );
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS fast_query_mutex_obj( int device, unsigned int obj, MUTANT_BASIC_INFORMATION *info )
|
|
+{
|
|
+ struct winesync_mutex_args args = {0};
|
|
+ NTSTATUS ret;
|
|
+
|
|
+ args.mutex = obj;
|
|
+ ret = ioctl( device, WINESYNC_IOC_READ_MUTEX, &args );
|
|
+
|
|
+ if (ret < 0)
|
|
+ {
|
|
+ if (errno == EOWNERDEAD)
|
|
+ {
|
|
+ info->AbandonedState = TRUE;
|
|
+ info->OwnedByCaller = FALSE;
|
|
+ info->CurrentCount = 1;
|
|
+ return STATUS_SUCCESS;
|
|
+ }
|
|
+ else
|
|
+ return errno_to_status( errno );
|
|
+ }
|
|
+ info->AbandonedState = FALSE;
|
|
+ info->OwnedByCaller = (args.owner == GetCurrentThreadId());
|
|
+ info->CurrentCount = 1 - args.count;
|
|
+ return STATUS_SUCCESS;
|
|
+}
|
|
+
|
|
+
|
|
+static NTSTATUS fast_query_mutex( HANDLE handle, MUTANT_BASIC_INFORMATION *info )
|
|
+{
|
|
+ struct fast_sync_cache_entry stack_cache, *cache;
|
|
+ NTSTATUS ret;
|
|
+ int device;
|
|
+
|
|
+ if ((device = get_fast_sync_device()) < 0)
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+
|
|
+ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_MUTEX, MUTANT_QUERY_STATE,
|
|
+ &stack_cache, &cache )))
|
|
+ return ret;
|
|
+
|
|
+ ret = fast_query_mutex_obj( device, cache->obj, info );
|
|
+
|
|
+ release_fast_sync_obj( cache );
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+struct timespec64
|
|
+{
|
|
+ __s64 tv_sec;
|
|
+ __s64 tv_nsec;
|
|
+};
|
|
+
|
|
+static void timespec64_from_timeout( struct timespec64 *timespec, const LARGE_INTEGER *timeout )
|
|
+{
|
|
+ struct timespec now;
|
|
+ timeout_t relative;
|
|
+
|
|
+ clock_gettime( CLOCK_MONOTONIC, &now );
|
|
+
|
|
+ if (timeout->QuadPart <= 0)
|
|
+ {
|
|
+ relative = -timeout->QuadPart;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ LARGE_INTEGER system_now;
|
|
+
|
|
+ /* the system clock is probably REALTIME, so we need to convert to
|
|
+ * relative time first */
|
|
+ NtQuerySystemTime( &system_now );
|
|
+ relative = timeout->QuadPart - system_now.QuadPart;
|
|
+ }
|
|
+
|
|
+ timespec->tv_sec = now.tv_sec + (relative / TICKSPERSEC);
|
|
+ timespec->tv_nsec = now.tv_nsec + ((relative % TICKSPERSEC) * 100);
|
|
+ if (timespec->tv_nsec >= 1000000000)
|
|
+ {
|
|
+ timespec->tv_nsec -= 1000000000;
|
|
+ ++timespec->tv_sec;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void select_queue( HANDLE queue )
|
|
+{
|
|
+ SERVER_START_REQ( fast_select_queue )
|
|
+ {
|
|
+ req->handle = wine_server_obj_handle( queue );
|
|
+ wine_server_call( req );
|
|
+ }
|
|
+ SERVER_END_REQ;
|
|
+}
|
|
+
|
|
+static void unselect_queue( HANDLE queue, BOOL signaled )
|
|
+{
|
|
+ SERVER_START_REQ( fast_unselect_queue )
|
|
+ {
|
|
+ req->handle = wine_server_obj_handle( queue );
|
|
+ req->signaled = signaled;
|
|
+ wine_server_call( req );
|
|
+ }
|
|
+ SERVER_END_REQ;
|
|
+}
|
|
+
|
|
+static NTSTATUS fast_wait_objs( int device, DWORD count, const unsigned int *objs, BOOLEAN wait_any,
|
|
+ BOOLEAN alertable, const LARGE_INTEGER *timeout, user_apc_t *apc )
|
|
+{
|
|
+ volatile struct winesync_wait_args args = {0};
|
|
+ struct timespec64 timespec;
|
|
+ uintptr_t timeout_ptr = 0;
|
|
+ unsigned long request;
|
|
+ NTSTATUS ret;
|
|
+
|
|
+ if (timeout && timeout->QuadPart != TIMEOUT_INFINITE)
|
|
+ {
|
|
+ timeout_ptr = (uintptr_t)×pec;
|
|
+ timespec64_from_timeout( ×pec, timeout );
|
|
+ }
|
|
+ args.objs = (uintptr_t)objs;
|
|
+ args.count = count;
|
|
+ args.owner = GetCurrentThreadId();
|
|
+ args.index = ~0u;
|
|
+
|
|
+ if (wait_any || count == 1)
|
|
+ request = WINESYNC_IOC_WAIT_ANY;
|
|
+ else
|
|
+ request = WINESYNC_IOC_WAIT_ALL;
|
|
+
|
|
+ if (alertable)
|
|
+ {
|
|
+ struct timespec64 now64;
|
|
+ struct timespec now;
|
|
+
|
|
+ /* if there is an already signaled object and an APC available, the
|
|
+ * object is returned first */
|
|
+ clock_gettime( CLOCK_MONOTONIC, &now );
|
|
+ now64.tv_sec = now.tv_sec;
|
|
+ now64.tv_nsec = now.tv_nsec;
|
|
+ args.timeout = (uintptr_t)&now64;
|
|
+ do
|
|
+ {
|
|
+ ret = ioctl( device, request, &args );
|
|
+ } while (ret < 0 && errno == EINTR);
|
|
+
|
|
+ if (ret < 0 && errno == ETIMEDOUT)
|
|
+ {
|
|
+ args.timeout = timeout_ptr;
|
|
+
|
|
+ /* When a user APC is queued to this thread, the server wakes us
|
|
+ * with SIGUSR1, whereupon usr1_handler() will longjmp here, causing
|
|
+ * us to poll for a user APC again. It's not enough simply to retry
|
|
+ * on EINTR, as we might get SIGUSR1 after checking for user APCs
|
|
+ * but before calling ioctl(). */
|
|
+
|
|
+ sigsetjmp( ntdll_get_thread_data()->fast_alert_buf, 1 );
|
|
+
|
|
+ /* If the signal arrives *after* the ioctl, and the wait succeeded,
|
|
+ * we don't want to wait again. */
|
|
+
|
|
+ if (args.index != ~0u)
|
|
+ {
|
|
+ ntdll_get_thread_data()->in_fast_alert_wait = 0;
|
|
+ ret = 0;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ ntdll_get_thread_data()->in_fast_alert_wait = 1;
|
|
+
|
|
+ SERVER_START_REQ( check_user_apc )
|
|
+ {
|
|
+ ret = wine_server_call( req );
|
|
+ }
|
|
+ SERVER_END_REQ;
|
|
+
|
|
+ if (!ret)
|
|
+ {
|
|
+ ntdll_get_thread_data()->in_fast_alert_wait = 0;
|
|
+
|
|
+ /* Retrieve the user APC. We can't actually dequeue it until
|
|
+ * after we reset in_fast_alert_wait, as otherwise we could
|
|
+ * have the thread context changed on us and drop the APC data
|
|
+ * on the floor. */
|
|
+ ret = server_select( NULL, 0, SELECT_INTERRUPTIBLE | SELECT_ALERTABLE,
|
|
+ 0, NULL, NULL, apc );
|
|
+ assert( ret == STATUS_USER_APC );
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ do
|
|
+ {
|
|
+ ret = ioctl( device, request, &args );
|
|
+ } while (ret < 0 && errno == EINTR);
|
|
+
|
|
+ ntdll_get_thread_data()->in_fast_alert_wait = 0;
|
|
+ }
|
|
+ }
|
|
+ else
|
|
{
|
|
- futex_wait( &supported, 10, NULL );
|
|
- if (errno == ENOSYS)
|
|
+ args.timeout = timeout_ptr;
|
|
+ do
|
|
{
|
|
- futex_private = 0;
|
|
- futex_wait( &supported, 10, NULL );
|
|
- }
|
|
- supported = (errno != ENOSYS);
|
|
+ ret = ioctl( device, request, &args );
|
|
+ } while (ret < 0 && errno == EINTR);
|
|
}
|
|
- return supported;
|
|
-}
|
|
|
|
-static int *get_futex(void **ptr)
|
|
-{
|
|
- if (sizeof(void *) == 8)
|
|
- return (int *)((((ULONG_PTR)ptr) + 3) & ~3);
|
|
- else if (!(((ULONG_PTR)ptr) & 3))
|
|
- return (int *)ptr;
|
|
+out:
|
|
+ if (!ret)
|
|
+ return wait_any ? args.index : 0;
|
|
+ else if (errno == EOWNERDEAD)
|
|
+ return STATUS_ABANDONED + (wait_any ? args.index : 0);
|
|
+ else if (errno == ETIMEDOUT)
|
|
+ return STATUS_TIMEOUT;
|
|
else
|
|
- return NULL;
|
|
+ return errno_to_status( errno );
|
|
}
|
|
|
|
-static void timespec_from_timeout( struct timespec *timespec, const LARGE_INTEGER *timeout )
|
|
+static NTSTATUS fast_wait( DWORD count, const HANDLE *handles, BOOLEAN wait_any,
|
|
+ BOOLEAN alertable, const LARGE_INTEGER *timeout )
|
|
{
|
|
- LARGE_INTEGER now;
|
|
- timeout_t diff;
|
|
+ struct fast_sync_cache_entry stack_cache[64], *cache[64];
|
|
+ unsigned int objs[64];
|
|
+ HANDLE queue = NULL;
|
|
+ user_apc_t apc;
|
|
+ NTSTATUS ret;
|
|
+ DWORD i, j;
|
|
+ int device;
|
|
|
|
- if (timeout->QuadPart > 0)
|
|
+ if ((device = get_fast_sync_device()) < 0)
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+
|
|
+ for (i = 0; i < count; ++i)
|
|
{
|
|
- NtQuerySystemTime( &now );
|
|
- diff = timeout->QuadPart - now.QuadPart;
|
|
+ if ((ret = get_fast_sync_obj( handles[i], 0, SYNCHRONIZE, &stack_cache[i], &cache[i] )))
|
|
+ {
|
|
+ for (j = 0; j < i; ++j)
|
|
+ release_fast_sync_obj( cache[j] );
|
|
+ return ret;
|
|
+ }
|
|
+ if (cache[i]->type == FAST_SYNC_QUEUE)
|
|
+ queue = handles[i];
|
|
+
|
|
+ objs[i] = cache[i]->obj;
|
|
}
|
|
- else
|
|
- diff = -timeout->QuadPart;
|
|
|
|
- timespec->tv_sec = diff / TICKSPERSEC;
|
|
- timespec->tv_nsec = (diff % TICKSPERSEC) * 100;
|
|
-}
|
|
+ /* It's common to wait on the message queue alone. Some applications wait
|
|
+ * on it in fast paths, with a zero timeout. Since we take two server calls
|
|
+ * instead of one when going through fast_wait_objs(), and since we only
|
|
+ * need to go through that path if we're waiting on other objects, just
|
|
+ * delegate to the server if we're only waiting on the message queue. */
|
|
+ if (count == 1 && queue)
|
|
+ {
|
|
+ release_fast_sync_obj( cache[0] );
|
|
+ return server_wait_for_object( handles[0], alertable, timeout );
|
|
+ }
|
|
|
|
-#endif
|
|
+ if (queue) select_queue( queue );
|
|
|
|
+ ret = fast_wait_objs( device, count, objs, wait_any, alertable, timeout, &apc );
|
|
|
|
-static BOOL compare_addr( const void *addr, const void *cmp, SIZE_T size )
|
|
-{
|
|
- switch (size)
|
|
- {
|
|
- case 1:
|
|
- return (*(const UCHAR *)addr == *(const UCHAR *)cmp);
|
|
- case 2:
|
|
- return (*(const USHORT *)addr == *(const USHORT *)cmp);
|
|
- case 4:
|
|
- return (*(const ULONG *)addr == *(const ULONG *)cmp);
|
|
- case 8:
|
|
- return (*(const ULONG64 *)addr == *(const ULONG64 *)cmp);
|
|
- }
|
|
+ if (queue) unselect_queue( queue, handles[ret] == queue );
|
|
|
|
- return FALSE;
|
|
-}
|
|
+ for (i = 0; i < count; ++i)
|
|
+ release_fast_sync_obj( cache[i] );
|
|
|
|
+ if (ret == STATUS_USER_APC)
|
|
+ invoke_user_apc( NULL, &apc, ret );
|
|
+ return ret;
|
|
+}
|
|
|
|
-/* create a struct security_descriptor and contained information in one contiguous piece of memory */
|
|
-NTSTATUS alloc_object_attributes( const OBJECT_ATTRIBUTES *attr, struct object_attributes **ret,
|
|
- data_size_t *ret_len )
|
|
+static NTSTATUS fast_signal_and_wait( HANDLE signal, HANDLE wait,
|
|
+ BOOLEAN alertable, const LARGE_INTEGER *timeout )
|
|
{
|
|
- unsigned int len = sizeof(**ret);
|
|
- SID *owner = NULL, *group = NULL;
|
|
- ACL *dacl = NULL, *sacl = NULL;
|
|
- SECURITY_DESCRIPTOR *sd;
|
|
-
|
|
- *ret = NULL;
|
|
- *ret_len = 0;
|
|
+ struct fast_sync_cache_entry signal_stack_cache, *signal_cache;
|
|
+ struct fast_sync_cache_entry wait_stack_cache, *wait_cache;
|
|
+ HANDLE queue = NULL;
|
|
+ user_apc_t apc;
|
|
+ NTSTATUS ret;
|
|
+ int device;
|
|
|
|
- if (!attr) return STATUS_SUCCESS;
|
|
+ if ((device = get_fast_sync_device()) < 0)
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
|
|
- if (attr->Length != sizeof(*attr)) return STATUS_INVALID_PARAMETER;
|
|
+ if ((ret = get_fast_sync_obj( signal, 0, 0, &signal_stack_cache, &signal_cache )))
|
|
+ return ret;
|
|
|
|
- if ((sd = attr->SecurityDescriptor))
|
|
+ switch (signal_cache->type)
|
|
{
|
|
- len += sizeof(struct security_descriptor);
|
|
- if (sd->Revision != SECURITY_DESCRIPTOR_REVISION) return STATUS_UNKNOWN_REVISION;
|
|
- if (sd->Control & SE_SELF_RELATIVE)
|
|
- {
|
|
- SECURITY_DESCRIPTOR_RELATIVE *rel = (SECURITY_DESCRIPTOR_RELATIVE *)sd;
|
|
- if (rel->Owner) owner = (PSID)((BYTE *)rel + rel->Owner);
|
|
- if (rel->Group) group = (PSID)((BYTE *)rel + rel->Group);
|
|
- if ((sd->Control & SE_SACL_PRESENT) && rel->Sacl) sacl = (PSID)((BYTE *)rel + rel->Sacl);
|
|
- if ((sd->Control & SE_DACL_PRESENT) && rel->Dacl) dacl = (PSID)((BYTE *)rel + rel->Dacl);
|
|
- }
|
|
- else
|
|
- {
|
|
- owner = sd->Owner;
|
|
- group = sd->Group;
|
|
- if (sd->Control & SE_SACL_PRESENT) sacl = sd->Sacl;
|
|
- if (sd->Control & SE_DACL_PRESENT) dacl = sd->Dacl;
|
|
- }
|
|
+ case FAST_SYNC_SEMAPHORE:
|
|
+ if (!(signal_cache->access & SEMAPHORE_MODIFY_STATE))
|
|
+ {
|
|
+ release_fast_sync_obj( signal_cache );
|
|
+ return STATUS_ACCESS_DENIED;
|
|
+ }
|
|
+ break;
|
|
|
|
- if (owner) len += offsetof( SID, SubAuthority[owner->SubAuthorityCount] );
|
|
- if (group) len += offsetof( SID, SubAuthority[group->SubAuthorityCount] );
|
|
- if (sacl) len += sacl->AclSize;
|
|
- if (dacl) len += dacl->AclSize;
|
|
+ case FAST_SYNC_EVENT:
|
|
+ if (!(signal_cache->access & EVENT_MODIFY_STATE))
|
|
+ {
|
|
+ release_fast_sync_obj( signal_cache );
|
|
+ return STATUS_ACCESS_DENIED;
|
|
+ }
|
|
+ break;
|
|
|
|
- /* fix alignment for the Unicode name that follows the structure */
|
|
- len = (len + sizeof(WCHAR) - 1) & ~(sizeof(WCHAR) - 1);
|
|
+ case FAST_SYNC_MUTEX:
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ /* can't be signaled */
|
|
+ release_fast_sync_obj( signal_cache );
|
|
+ return STATUS_OBJECT_TYPE_MISMATCH;
|
|
}
|
|
|
|
- if (attr->ObjectName)
|
|
+ if ((ret = get_fast_sync_obj( wait, 0, SYNCHRONIZE, &wait_stack_cache, &wait_cache )))
|
|
{
|
|
- if (attr->ObjectName->Length & (sizeof(WCHAR) - 1)) return STATUS_OBJECT_NAME_INVALID;
|
|
- len += attr->ObjectName->Length;
|
|
+ release_fast_sync_obj( signal_cache );
|
|
+ return ret;
|
|
}
|
|
- else if (attr->RootDirectory) return STATUS_OBJECT_NAME_INVALID;
|
|
-
|
|
- len = (len + 3) & ~3; /* DWORD-align the entire structure */
|
|
-
|
|
- if (!(*ret = calloc( len, 1 ))) return STATUS_NO_MEMORY;
|
|
|
|
- (*ret)->rootdir = wine_server_obj_handle( attr->RootDirectory );
|
|
- (*ret)->attributes = attr->Attributes;
|
|
+ if (wait_cache->type == FAST_SYNC_QUEUE)
|
|
+ queue = wait;
|
|
|
|
- if (attr->SecurityDescriptor)
|
|
+ switch (signal_cache->type)
|
|
{
|
|
- struct security_descriptor *descr = (struct security_descriptor *)(*ret + 1);
|
|
- unsigned char *ptr = (unsigned char *)(descr + 1);
|
|
+ case FAST_SYNC_SEMAPHORE:
|
|
+ ret = fast_release_semaphore_obj( device, signal_cache->obj, 1, NULL );
|
|
+ break;
|
|
|
|
- descr->control = sd->Control & ~SE_SELF_RELATIVE;
|
|
- if (owner) descr->owner_len = offsetof( SID, SubAuthority[owner->SubAuthorityCount] );
|
|
- if (group) descr->group_len = offsetof( SID, SubAuthority[group->SubAuthorityCount] );
|
|
- if (sacl) descr->sacl_len = sacl->AclSize;
|
|
- if (dacl) descr->dacl_len = dacl->AclSize;
|
|
+ case FAST_SYNC_EVENT:
|
|
+ ret = fast_set_event_obj( device, signal_cache->obj, NULL );
|
|
+ break;
|
|
|
|
- memcpy( ptr, owner, descr->owner_len );
|
|
- ptr += descr->owner_len;
|
|
- memcpy( ptr, group, descr->group_len );
|
|
- ptr += descr->group_len;
|
|
- memcpy( ptr, sacl, descr->sacl_len );
|
|
- ptr += descr->sacl_len;
|
|
- memcpy( ptr, dacl, descr->dacl_len );
|
|
- (*ret)->sd_len = (sizeof(*descr) + descr->owner_len + descr->group_len + descr->sacl_len +
|
|
- descr->dacl_len + sizeof(WCHAR) - 1) & ~(sizeof(WCHAR) - 1);
|
|
+ case FAST_SYNC_MUTEX:
|
|
+ ret = fast_release_mutex_obj( device, signal_cache->obj, NULL );
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ assert( 0 );
|
|
+ break;
|
|
}
|
|
|
|
- if (attr->ObjectName)
|
|
+ if (!ret)
|
|
{
|
|
- unsigned char *ptr = (unsigned char *)(*ret + 1) + (*ret)->sd_len;
|
|
- (*ret)->name_len = attr->ObjectName->Length;
|
|
- memcpy( ptr, attr->ObjectName->Buffer, (*ret)->name_len );
|
|
+ if (queue) select_queue( queue );
|
|
+
|
|
+ ret = fast_wait_objs( device, 1, &wait_cache->obj, TRUE, alertable, timeout, &apc );
|
|
+
|
|
+ if (queue) unselect_queue( queue, !ret );
|
|
}
|
|
|
|
- *ret_len = len;
|
|
- return STATUS_SUCCESS;
|
|
+ release_fast_sync_obj( signal_cache );
|
|
+ release_fast_sync_obj( wait_cache );
|
|
+
|
|
+ if (ret == STATUS_USER_APC)
|
|
+ invoke_user_apc( NULL, &apc, ret );
|
|
+ return ret;
|
|
}
|
|
|
|
+#else
|
|
|
|
-static NTSTATUS validate_open_object_attributes( const OBJECT_ATTRIBUTES *attr )
|
|
+void close_fast_sync_obj( HANDLE handle )
|
|
{
|
|
- if (!attr || attr->Length != sizeof(*attr)) return STATUS_INVALID_PARAMETER;
|
|
+}
|
|
|
|
- if (attr->ObjectName)
|
|
- {
|
|
- if (attr->ObjectName->Length & (sizeof(WCHAR) - 1)) return STATUS_OBJECT_NAME_INVALID;
|
|
- }
|
|
- else if (attr->RootDirectory) return STATUS_OBJECT_NAME_INVALID;
|
|
+static NTSTATUS fast_release_semaphore( HANDLE handle, ULONG count, ULONG *prev_count )
|
|
+{
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+}
|
|
|
|
- return STATUS_SUCCESS;
|
|
+static NTSTATUS fast_query_semaphore( HANDLE handle, SEMAPHORE_BASIC_INFORMATION *info )
|
|
+{
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+}
|
|
+
|
|
+static NTSTATUS fast_set_event( HANDLE handle, LONG *prev_state )
|
|
+{
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+}
|
|
+
|
|
+static NTSTATUS fast_reset_event( HANDLE handle, LONG *prev_state )
|
|
+{
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+}
|
|
+
|
|
+static NTSTATUS fast_pulse_event( HANDLE handle, LONG *prev_state )
|
|
+{
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+}
|
|
+
|
|
+static NTSTATUS fast_query_event( HANDLE handle, EVENT_BASIC_INFORMATION *info )
|
|
+{
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+}
|
|
+
|
|
+static NTSTATUS fast_release_mutex( HANDLE handle, LONG *prev_count )
|
|
+{
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+}
|
|
+
|
|
+static NTSTATUS fast_query_mutex( HANDLE handle, MUTANT_BASIC_INFORMATION *info )
|
|
+{
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
}
|
|
|
|
+static NTSTATUS fast_wait( DWORD count, const HANDLE *handles, BOOLEAN wait_any,
|
|
+ BOOLEAN alertable, const LARGE_INTEGER *timeout )
|
|
+{
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+}
|
|
+
|
|
+static NTSTATUS fast_signal_and_wait( HANDLE signal, HANDLE wait,
|
|
+ BOOLEAN alertable, const LARGE_INTEGER *timeout )
|
|
+{
|
|
+ return STATUS_NOT_IMPLEMENTED;
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
|
|
/******************************************************************************
|
|
* NtCreateSemaphore (NTDLL.@)
|
|
@@ -319,6 +1350,9 @@ NTSTATUS WINAPI NtCreateSemaphore( HANDLE *handle, ACCESS_MASK access, const OBJ
|
|
data_size_t len;
|
|
struct object_attributes *objattr;
|
|
|
|
+ TRACE( "access %#x, name %s, initial %d, max %d\n", access,
|
|
+ attr ? debugstr_us(attr->ObjectName) : "(null)", initial, max );
|
|
+
|
|
if (max <= 0 || initial < 0 || initial > max) return STATUS_INVALID_PARAMETER;
|
|
if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret;
|
|
|
|
@@ -345,6 +1379,8 @@ NTSTATUS WINAPI NtOpenSemaphore( HANDLE *handle, ACCESS_MASK access, const OBJEC
|
|
{
|
|
NTSTATUS ret;
|
|
|
|
+ TRACE( "access %#x, name %s\n", access, attr ? debugstr_us(attr->ObjectName) : "(null)" );
|
|
+
|
|
if ((ret = validate_open_object_attributes( attr ))) return ret;
|
|
|
|
SERVER_START_REQ( open_semaphore )
|
|
@@ -381,6 +1417,12 @@ NTSTATUS WINAPI NtQuerySemaphore( HANDLE handle, SEMAPHORE_INFORMATION_CLASS cla
|
|
|
|
if (len != sizeof(SEMAPHORE_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH;
|
|
|
|
+ if ((ret = fast_query_semaphore( handle, out )) != STATUS_NOT_IMPLEMENTED)
|
|
+ {
|
|
+ if (!ret && ret_len) *ret_len = sizeof(SEMAPHORE_BASIC_INFORMATION);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
SERVER_START_REQ( query_semaphore )
|
|
{
|
|
req->handle = wine_server_obj_handle( handle );
|
|
@@ -403,6 +1445,11 @@ NTSTATUS WINAPI NtReleaseSemaphore( HANDLE handle, ULONG count, ULONG *previous
|
|
{
|
|
NTSTATUS ret;
|
|
|
|
+ TRACE( "handle %p, count %u, prev_count %p\n", handle, count, previous );
|
|
+
|
|
+ if ((ret = fast_release_semaphore( handle, count, previous )) != STATUS_NOT_IMPLEMENTED)
|
|
+ return ret;
|
|
+
|
|
SERVER_START_REQ( release_semaphore )
|
|
{
|
|
req->handle = wine_server_obj_handle( handle );
|
|
@@ -427,6 +1474,9 @@ NTSTATUS WINAPI NtCreateEvent( HANDLE *handle, ACCESS_MASK access, const OBJECT_
|
|
data_size_t len;
|
|
struct object_attributes *objattr;
|
|
|
|
+ TRACE( "access %#x, name %s, type %u, state %u\n", access,
|
|
+ attr ? debugstr_us(attr->ObjectName) : "(null)", type, state );
|
|
+
|
|
if (type != NotificationEvent && type != SynchronizationEvent) return STATUS_INVALID_PARAMETER;
|
|
if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret;
|
|
|
|
@@ -453,6 +1503,8 @@ NTSTATUS WINAPI NtOpenEvent( HANDLE *handle, ACCESS_MASK access, const OBJECT_AT
|
|
{
|
|
NTSTATUS ret;
|
|
|
|
+ TRACE( "access %#x, name %s\n", access, attr ? debugstr_us(attr->ObjectName) : "(null)" );
|
|
+
|
|
if ((ret = validate_open_object_attributes( attr ))) return ret;
|
|
|
|
SERVER_START_REQ( open_event )
|
|
@@ -477,6 +1529,11 @@ NTSTATUS WINAPI NtSetEvent( HANDLE handle, LONG *prev_state )
|
|
{
|
|
NTSTATUS ret;
|
|
|
|
+ TRACE( "handle %p, prev_state %p\n", handle, prev_state );
|
|
+
|
|
+ if ((ret = fast_set_event( handle, prev_state )) != STATUS_NOT_IMPLEMENTED)
|
|
+ return ret;
|
|
+
|
|
SERVER_START_REQ( event_op )
|
|
{
|
|
req->handle = wine_server_obj_handle( handle );
|
|
@@ -496,6 +1553,11 @@ NTSTATUS WINAPI NtResetEvent( HANDLE handle, LONG *prev_state )
|
|
{
|
|
NTSTATUS ret;
|
|
|
|
+ TRACE( "handle %p, prev_state %p\n", handle, prev_state );
|
|
+
|
|
+ if ((ret = fast_reset_event( handle, prev_state )) != STATUS_NOT_IMPLEMENTED)
|
|
+ return ret;
|
|
+
|
|
SERVER_START_REQ( event_op )
|
|
{
|
|
req->handle = wine_server_obj_handle( handle );
|
|
@@ -525,6 +1587,11 @@ NTSTATUS WINAPI NtPulseEvent( HANDLE handle, LONG *prev_state )
|
|
{
|
|
NTSTATUS ret;
|
|
|
|
+ TRACE( "handle %p, prev_state %p\n", handle, prev_state );
|
|
+
|
|
+ if ((ret = fast_pulse_event( handle, prev_state )) != STATUS_NOT_IMPLEMENTED)
|
|
+ return ret;
|
|
+
|
|
SERVER_START_REQ( event_op )
|
|
{
|
|
req->handle = wine_server_obj_handle( handle );
|
|
@@ -557,6 +1624,12 @@ NTSTATUS WINAPI NtQueryEvent( HANDLE handle, EVENT_INFORMATION_CLASS class,
|
|
|
|
if (len != sizeof(EVENT_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH;
|
|
|
|
+ if ((ret = fast_query_event( handle, out )) != STATUS_NOT_IMPLEMENTED)
|
|
+ {
|
|
+ if (!ret && ret_len) *ret_len = sizeof(EVENT_BASIC_INFORMATION);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
SERVER_START_REQ( query_event )
|
|
{
|
|
req->handle = wine_server_obj_handle( handle );
|
|
@@ -582,6 +1655,9 @@ NTSTATUS WINAPI NtCreateMutant( HANDLE *handle, ACCESS_MASK access, const OBJECT
|
|
data_size_t len;
|
|
struct object_attributes *objattr;
|
|
|
|
+ TRACE( "access %#x, name %s, owned %u\n", access,
|
|
+ attr ? debugstr_us(attr->ObjectName) : "(null)", owned );
|
|
+
|
|
if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret;
|
|
|
|
SERVER_START_REQ( create_mutex )
|
|
@@ -606,6 +1682,8 @@ NTSTATUS WINAPI NtOpenMutant( HANDLE *handle, ACCESS_MASK access, const OBJECT_A
|
|
{
|
|
NTSTATUS ret;
|
|
|
|
+ TRACE( "access %#x, name %s\n", access, attr ? debugstr_us(attr->ObjectName) : "(null)" );
|
|
+
|
|
if ((ret = validate_open_object_attributes( attr ))) return ret;
|
|
|
|
SERVER_START_REQ( open_mutex )
|
|
@@ -630,6 +1708,11 @@ NTSTATUS WINAPI NtReleaseMutant( HANDLE handle, LONG *prev_count )
|
|
{
|
|
NTSTATUS ret;
|
|
|
|
+ TRACE( "handle %p, prev_count %p\n", handle, prev_count );
|
|
+
|
|
+ if ((ret = fast_release_mutex( handle, prev_count )) != STATUS_NOT_IMPLEMENTED)
|
|
+ return ret;
|
|
+
|
|
SERVER_START_REQ( release_mutex )
|
|
{
|
|
req->handle = wine_server_obj_handle( handle );
|
|
@@ -660,6 +1743,12 @@ NTSTATUS WINAPI NtQueryMutant( HANDLE handle, MUTANT_INFORMATION_CLASS class,
|
|
|
|
if (len != sizeof(MUTANT_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH;
|
|
|
|
+ if ((ret = fast_query_mutex( handle, out )) != STATUS_NOT_IMPLEMENTED)
|
|
+ {
|
|
+ if (!ret && ret_len) *ret_len = sizeof(MUTANT_BASIC_INFORMATION);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
SERVER_START_REQ( query_mutex )
|
|
{
|
|
req->handle = wine_server_obj_handle( handle );
|
|
@@ -1277,6 +2366,9 @@ NTSTATUS WINAPI NtCreateTimer( HANDLE *handle, ACCESS_MASK access, const OBJECT_
|
|
data_size_t len;
|
|
struct object_attributes *objattr;
|
|
|
|
+ TRACE( "access %#x, name %s, type %u\n", access,
|
|
+ attr ? debugstr_us(attr->ObjectName) : "(null)", type );
|
|
+
|
|
if (type != NotificationTimer && type != SynchronizationTimer) return STATUS_INVALID_PARAMETER;
|
|
|
|
if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret;
|
|
@@ -1304,6 +2396,8 @@ NTSTATUS WINAPI NtOpenTimer( HANDLE *handle, ACCESS_MASK access, const OBJECT_AT
|
|
{
|
|
NTSTATUS ret;
|
|
|
|
+ TRACE( "access %#x, name %s\n", access, attr ? debugstr_us(attr->ObjectName) : "(null)" );
|
|
+
|
|
if ((ret = validate_open_object_attributes( attr ))) return ret;
|
|
|
|
SERVER_START_REQ( open_timer )
|
|
@@ -1356,6 +2450,8 @@ NTSTATUS WINAPI NtCancelTimer( HANDLE handle, BOOLEAN *state )
|
|
{
|
|
NTSTATUS ret;
|
|
|
|
+ TRACE( "handle %p, state %p\n", handle, state );
|
|
+
|
|
SERVER_START_REQ( cancel_timer )
|
|
{
|
|
req->handle = wine_server_obj_handle( handle );
|
|
@@ -1424,13 +2520,29 @@ NTSTATUS WINAPI NtWaitForMultipleObjects( DWORD count, const HANDLE *handles, BO
|
|
{
|
|
select_op_t select_op;
|
|
UINT i, flags = SELECT_INTERRUPTIBLE;
|
|
+ NTSTATUS ret;
|
|
|
|
if (!count || count > MAXIMUM_WAIT_OBJECTS) return STATUS_INVALID_PARAMETER_1;
|
|
|
|
+ if (TRACE_ON(sync))
|
|
+ {
|
|
+ TRACE( "wait_any %u, alertable %u, handles {%p", wait_any, alertable, handles[0] );
|
|
+ for (i = 1; i < count; i++) TRACE( ", %p", handles[i] );
|
|
+ TRACE( "}, timeout %s\n", debugstr_timeout(timeout) );
|
|
+ }
|
|
+
|
|
+ if ((ret = fast_wait( count, handles, wait_any, alertable, timeout )) != STATUS_NOT_IMPLEMENTED)
|
|
+ {
|
|
+ TRACE( "-> %#x\n", ret );
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
if (alertable) flags |= SELECT_ALERTABLE;
|
|
select_op.wait.op = wait_any ? SELECT_WAIT : SELECT_WAIT_ALL;
|
|
for (i = 0; i < count; i++) select_op.wait.handles[i] = wine_server_obj_handle( handles[i] );
|
|
- return server_wait( &select_op, offsetof( select_op_t, wait.handles[count] ), flags, timeout );
|
|
+ ret = server_wait( &select_op, offsetof( select_op_t, wait.handles[count] ), flags, timeout );
|
|
+ TRACE( "-> %#x\n", ret );
|
|
+ return ret;
|
|
}
|
|
|
|
|
|
@@ -1451,9 +2563,15 @@ NTSTATUS WINAPI NtSignalAndWaitForSingleObject( HANDLE signal, HANDLE wait,
|
|
{
|
|
select_op_t select_op;
|
|
UINT flags = SELECT_INTERRUPTIBLE;
|
|
+ NTSTATUS ret;
|
|
+
|
|
+ TRACE( "signal %p, wait %p, alertable %u, timeout %s\n", signal, wait, alertable, debugstr_timeout(timeout) );
|
|
|
|
if (!signal) return STATUS_INVALID_HANDLE;
|
|
|
|
+ if ((ret = fast_signal_and_wait( signal, wait, alertable, timeout )) != STATUS_NOT_IMPLEMENTED)
|
|
+ return ret;
|
|
+
|
|
if (alertable) flags |= SELECT_ALERTABLE;
|
|
select_op.signal_and_wait.op = SELECT_SIGNAL_AND_WAIT;
|
|
select_op.signal_and_wait.wait = wine_server_obj_handle( wait );
|
|
@@ -1481,6 +2599,8 @@ NTSTATUS WINAPI NtYieldExecution(void)
|
|
*/
|
|
NTSTATUS WINAPI NtDelayExecution( BOOLEAN alertable, const LARGE_INTEGER *timeout )
|
|
{
|
|
+// TRACE( "alertable %u, timeout %s\n", alertable, debugstr_timeout(timeout) );
|
|
+
|
|
/* if alertable, we need to query the server */
|
|
if (alertable) return server_wait( NULL, 0, SELECT_INTERRUPTIBLE | SELECT_ALERTABLE, timeout );
|
|
|
|
@@ -1651,6 +2771,9 @@ NTSTATUS WINAPI NtCreateKeyedEvent( HANDLE *handle, ACCESS_MASK access,
|
|
data_size_t len;
|
|
struct object_attributes *objattr;
|
|
|
|
+ TRACE( "access %#x, name %s, flags %#x\n", access,
|
|
+ attr ? debugstr_us(attr->ObjectName) : "(null)", flags );
|
|
+
|
|
if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret;
|
|
|
|
SERVER_START_REQ( create_keyed_event )
|
|
@@ -1674,6 +2797,8 @@ NTSTATUS WINAPI NtOpenKeyedEvent( HANDLE *handle, ACCESS_MASK access, const OBJE
|
|
{
|
|
NTSTATUS ret;
|
|
|
|
+ TRACE( "access %#x, name %s\n", access, attr ? debugstr_us(attr->ObjectName) : "(null)" );
|
|
+
|
|
if ((ret = validate_open_object_attributes( attr ))) return ret;
|
|
|
|
SERVER_START_REQ( open_keyed_event )
|
|
@@ -1699,6 +2824,8 @@ NTSTATUS WINAPI NtWaitForKeyedEvent( HANDLE handle, const void *key,
|
|
select_op_t select_op;
|
|
UINT flags = SELECT_INTERRUPTIBLE;
|
|
|
|
+ TRACE( "handle %p, key %p, alertable %u, timeout %s\n", handle, key, alertable, debugstr_timeout(timeout) );
|
|
+
|
|
if (!handle) handle = keyed_event;
|
|
if ((ULONG_PTR)key & 1) return STATUS_INVALID_PARAMETER_1;
|
|
if (alertable) flags |= SELECT_ALERTABLE;
|
|
@@ -1718,6 +2845,8 @@ NTSTATUS WINAPI NtReleaseKeyedEvent( HANDLE handle, const void *key,
|
|
select_op_t select_op;
|
|
UINT flags = SELECT_INTERRUPTIBLE;
|
|
|
|
+ TRACE( "handle %p, key %p, alertable %u, timeout %s\n", handle, key, alertable, debugstr_timeout(timeout) );
|
|
+
|
|
if (!handle) handle = keyed_event;
|
|
if ((ULONG_PTR)key & 1) return STATUS_INVALID_PARAMETER_1;
|
|
if (alertable) flags |= SELECT_ALERTABLE;
|
|
diff --git a/dlls/ntdll/unix/thread.c b/dlls/ntdll/unix/thread.c
|
|
index 54b748e99cb..d7b850d8746 100644
|
|
--- a/dlls/ntdll/unix/thread.c
|
|
+++ b/dlls/ntdll/unix/thread.c
|
|
@@ -1354,7 +1354,7 @@ NTSTATUS get_thread_context( HANDLE handle, void *context, BOOL *self, USHORT ma
|
|
|
|
if (ret == STATUS_PENDING)
|
|
{
|
|
- NtWaitForSingleObject( context_handle, FALSE, NULL );
|
|
+ server_wait_for_object( context_handle, FALSE, NULL );
|
|
|
|
SERVER_START_REQ( get_thread_context )
|
|
{
|
|
diff --git a/dlls/ntdll/unix/unix_private.h b/dlls/ntdll/unix/unix_private.h
|
|
index 5ebbc6d702b..6e3bff4ba9e 100644
|
|
--- a/dlls/ntdll/unix/unix_private.h
|
|
+++ b/dlls/ntdll/unix/unix_private.h
|
|
@@ -22,6 +22,7 @@
|
|
#define __NTDLL_UNIX_PRIVATE_H
|
|
|
|
#include <pthread.h>
|
|
+#include <setjmp.h>
|
|
#include <signal.h>
|
|
#include "unixlib.h"
|
|
#include "wine/server.h"
|
|
@@ -69,6 +70,8 @@ struct ntdll_thread_data
|
|
PRTL_THREAD_START_ROUTINE start; /* thread entry point */
|
|
void *param; /* thread entry point parameter */
|
|
void *jmp_buf; /* setjmp buffer for exception handling */
|
|
+ volatile int in_fast_alert_wait; /* are we currently in a fast alertable wait? */
|
|
+ sigjmp_buf fast_alert_buf; /* setjmp buffer to restart a fast alertable wait */
|
|
};
|
|
|
|
C_ASSERT( sizeof(struct ntdll_thread_data) <= sizeof(((TEB *)0)->GdiTebBatch) );
|
|
@@ -168,5 +171,8 @@ extern NTSTATUS load_start_exe( WCHAR **image, void **module ) DECLSPEC_HIDDEN;
|
|
extern void start_server( BOOL debug ) DECLSPEC_HIDDEN;
|
|
|
|
+extern pthread_mutex_t fd_cache_mutex DECLSPEC_HIDDEN;
|
|
+
|
|
+extern NTSTATUS invoke_user_apc( CONTEXT *context, const user_apc_t *apc, NTSTATUS status ) DECLSPEC_HIDDEN;
|
|
extern unsigned int server_call_unlocked( void *req_ptr ) DECLSPEC_HIDDEN;
|
|
extern void server_enter_uninterrupted_section( pthread_mutex_t *mutex, sigset_t *sigset ) DECLSPEC_HIDDEN;
|
|
extern void server_leave_uninterrupted_section( pthread_mutex_t *mutex, sigset_t *sigset ) DECLSPEC_HIDDEN;
|
|
@@ -176,6 +182,8 @@ extern unsigned int server_select( const select_op_t *select_op, data_size_t siz
|
|
user_apc_t *user_apc ) DECLSPEC_HIDDEN;
|
|
extern unsigned int server_wait( const select_op_t *select_op, data_size_t size, UINT flags,
|
|
const LARGE_INTEGER *timeout ) DECLSPEC_HIDDEN;
|
|
+extern unsigned int server_wait_for_object( HANDLE handle, BOOL alertable,
|
|
+ const LARGE_INTEGER *timeout ) DECLSPEC_HIDDEN;
|
|
extern unsigned int server_queue_process_apc( HANDLE process, const apc_call_t *call,
|
|
apc_result_t *result ) DECLSPEC_HIDDEN;
|
|
extern int server_get_unix_fd( HANDLE handle, unsigned int wanted_access, int *unix_fd,
|
|
@@ -284,6 +292,8 @@ extern void add_completion( HANDLE handle, ULONG_PTR value, NTSTATUS status, ULO
|
|
|
|
extern void dbg_init(void) DECLSPEC_HIDDEN;
|
|
|
|
+extern void close_fast_sync_obj( HANDLE handle ) DECLSPEC_HIDDEN;
|
|
+
|
|
extern NTSTATUS call_user_apc_dispatcher( CONTEXT *context_ptr, ULONG_PTR arg1, ULONG_PTR arg2, ULONG_PTR arg3,
|
|
PNTAPCFUNC func, NTSTATUS status ) DECLSPEC_HIDDEN;
|
|
extern NTSTATUS call_user_exception_dispatcher( EXCEPTION_RECORD *rec, CONTEXT *context ) DECLSPEC_HIDDEN;
|
|
diff --git a/include/config.h.in b/include/config.h.in
|
|
index 68122959035..c5e1d4a921b 100644
|
|
--- a/include/config.h.in
|
|
+++ b/include/config.h.in
|
|
@@ -396,6 +396,9 @@
|
|
/* Define to 1 if you have the <linux/videodev2.h> header file. */
|
|
#undef HAVE_LINUX_VIDEODEV2_H
|
|
|
|
+/* Define to 1 if you have the <linux/winesync.h> header file. */
|
|
+#undef HAVE_LINUX_WINESYNC_H
|
|
+
|
|
/* Define to 1 if you have the `lstat' function. */
|
|
#undef HAVE_LSTAT
|
|
|
|
diff --git a/include/wine/server_protocol.h b/include/wine/server_protocol.h
|
|
index fe1f5394a72..0c877db8fde 100644
|
|
--- a/include/wine/server_protocol.h
|
|
+++ b/include/wine/server_protocol.h
|
|
@@ -5448,6 +5448,84 @@ struct get_next_thread_reply
|
|
};
|
|
|
|
|
|
+enum fast_sync_type
|
|
+{
|
|
+ FAST_SYNC_SEMAPHORE = 1,
|
|
+ FAST_SYNC_MUTEX,
|
|
+ FAST_SYNC_EVENT,
|
|
+ FAST_SYNC_SERVER,
|
|
+ FAST_SYNC_QUEUE,
|
|
+};
|
|
+
|
|
+
|
|
+
|
|
+struct get_fast_sync_device_request
|
|
+{
|
|
+ struct request_header __header;
|
|
+ char __pad_12[4];
|
|
+};
|
|
+struct get_fast_sync_device_reply
|
|
+{
|
|
+ struct reply_header __header;
|
|
+ obj_handle_t handle;
|
|
+ char __pad_12[4];
|
|
+};
|
|
+
|
|
+
|
|
+
|
|
+struct get_fast_sync_obj_request
|
|
+{
|
|
+ struct request_header __header;
|
|
+ obj_handle_t handle;
|
|
+};
|
|
+struct get_fast_sync_obj_reply
|
|
+{
|
|
+ struct reply_header __header;
|
|
+ obj_handle_t handle;
|
|
+ int obj;
|
|
+ int type;
|
|
+ unsigned int access;
|
|
+};
|
|
+
|
|
+
|
|
+
|
|
+struct fast_select_queue_request
|
|
+{
|
|
+ struct request_header __header;
|
|
+ obj_handle_t handle;
|
|
+};
|
|
+struct fast_select_queue_reply
|
|
+{
|
|
+ struct reply_header __header;
|
|
+};
|
|
+
|
|
+
|
|
+
|
|
+struct fast_unselect_queue_request
|
|
+{
|
|
+ struct request_header __header;
|
|
+ obj_handle_t handle;
|
|
+ int signaled;
|
|
+ char __pad_20[4];
|
|
+};
|
|
+struct fast_unselect_queue_reply
|
|
+{
|
|
+ struct reply_header __header;
|
|
+};
|
|
+
|
|
+
|
|
+
|
|
+struct check_user_apc_request
|
|
+{
|
|
+ struct request_header __header;
|
|
+ char __pad_12[4];
|
|
+};
|
|
+struct check_user_apc_reply
|
|
+{
|
|
+ struct reply_header __header;
|
|
+};
|
|
+
|
|
+
|
|
enum request
|
|
{
|
|
REQ_new_process,
|
|
@@ -5726,6 +5804,11 @@ enum request
|
|
REQ_suspend_process,
|
|
REQ_resume_process,
|
|
REQ_get_next_thread,
|
|
+ REQ_get_fast_sync_device,
|
|
+ REQ_get_fast_sync_obj,
|
|
+ REQ_fast_select_queue,
|
|
+ REQ_fast_unselect_queue,
|
|
+ REQ_check_user_apc,
|
|
REQ_NB_REQUESTS
|
|
};
|
|
|
|
@@ -6009,6 +6092,11 @@ union generic_request
|
|
struct suspend_process_request suspend_process_request;
|
|
struct resume_process_request resume_process_request;
|
|
struct get_next_thread_request get_next_thread_request;
|
|
+ struct get_fast_sync_device_request get_fast_sync_device_request;
|
|
+ struct get_fast_sync_obj_request get_fast_sync_obj_request;
|
|
+ struct fast_select_queue_request fast_select_queue_request;
|
|
+ struct fast_unselect_queue_request fast_unselect_queue_request;
|
|
+ struct check_user_apc_request check_user_apc_request;
|
|
};
|
|
union generic_reply
|
|
{
|
|
@@ -6290,6 +6378,11 @@ union generic_reply
|
|
struct suspend_process_reply suspend_process_reply;
|
|
struct resume_process_reply resume_process_reply;
|
|
struct get_next_thread_reply get_next_thread_reply;
|
|
+ struct get_fast_sync_device_reply get_fast_sync_device_reply;
|
|
+ struct get_fast_sync_obj_reply get_fast_sync_obj_reply;
|
|
+ struct fast_select_queue_reply fast_select_queue_reply;
|
|
+ struct fast_unselect_queue_reply fast_unselect_queue_reply;
|
|
+ struct check_user_apc_reply check_user_apc_reply;
|
|
};
|
|
|
|
/* ### protocol_version begin ### */
|
|
diff --git a/server/Makefile.in b/server/Makefile.in
|
|
index 4264e3db108..7486ef5010d 100644
|
|
--- a/server/Makefile.in
|
|
+++ b/server/Makefile.in
|
|
@@ -12,6 +12,7 @@ C_SRCS = \
|
|
device.c \
|
|
directory.c \
|
|
event.c \
|
|
+ fast_sync.c \
|
|
fd.c \
|
|
file.c \
|
|
handle.c \
|
|
diff --git a/server/async.c b/server/async.c
|
|
index 4dedb27f3d8..ad109fbbc05 100644
|
|
--- a/server/async.c
|
|
+++ b/server/async.c
|
|
@@ -84,6 +84,7 @@ static const struct object_ops async_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
async_destroy /* destroy */
|
|
};
|
|
@@ -506,6 +507,7 @@ static const struct object_ops iosb_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
iosb_destroy /* destroy */
|
|
};
|
|
diff --git a/server/atom.c b/server/atom.c
|
|
index 8d0ffbb05f3..e638b064f49 100644
|
|
--- a/server/atom.c
|
|
+++ b/server/atom.c
|
|
@@ -92,6 +92,7 @@ static const struct object_ops atom_table_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
atom_table_destroy /* destroy */
|
|
};
|
|
diff --git a/server/change.c b/server/change.c
|
|
index b02a9cd65bf..0eadae4e96d 100644
|
|
--- a/server/change.c
|
|
+++ b/server/change.c
|
|
@@ -126,6 +126,7 @@ static const struct object_ops dir_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ default_fd_get_fast_sync, /* get_fast_sync */
|
|
dir_close_handle, /* close_handle */
|
|
dir_destroy /* destroy */
|
|
};
|
|
diff --git a/server/clipboard.c b/server/clipboard.c
|
|
index 1c4875ff726..1d9369deb52 100644
|
|
--- a/server/clipboard.c
|
|
+++ b/server/clipboard.c
|
|
@@ -89,6 +89,7 @@ static const struct object_ops clipboard_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
clipboard_destroy /* destroy */
|
|
};
|
|
diff --git a/server/completion.c b/server/completion.c
|
|
index eb0d256ad09..776095b3900 100644
|
|
--- a/server/completion.c
|
|
+++ b/server/completion.c
|
|
@@ -62,10 +62,12 @@ struct completion
|
|
struct object obj;
|
|
struct list queue;
|
|
unsigned int depth;
|
|
+ struct fast_sync *fast_sync;
|
|
};
|
|
|
|
static void completion_dump( struct object*, int );
|
|
static int completion_signaled( struct object *obj, struct wait_queue_entry *entry );
|
|
+static struct fast_sync *completion_get_fast_sync( struct object *obj );
|
|
static void completion_destroy( struct object * );
|
|
|
|
static const struct object_ops completion_ops =
|
|
@@ -88,6 +90,7 @@ static const struct object_ops completion_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ completion_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
completion_destroy /* destroy */
|
|
};
|
|
@@ -110,6 +113,7 @@ static void completion_destroy( struct object *obj)
|
|
{
|
|
free( tmp );
|
|
}
|
|
+ if (completion->fast_sync) release_object( completion->fast_sync );
|
|
}
|
|
|
|
static void completion_dump( struct object *obj, int verbose )
|
|
@@ -127,6 +131,16 @@ static int completion_signaled( struct object *obj, struct wait_queue_entry *ent
|
|
return !list_empty( &completion->queue );
|
|
}
|
|
|
|
+static struct fast_sync *completion_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ struct completion *completion = (struct completion *)obj;
|
|
+
|
|
+ if (!completion->fast_sync)
|
|
+ completion->fast_sync = fast_create_event( FAST_SYNC_SERVER, 1, !list_empty( &completion->queue ) );
|
|
+ if (completion->fast_sync) grab_object( completion->fast_sync );
|
|
+ return completion->fast_sync;
|
|
+}
|
|
+
|
|
static struct completion *create_completion( struct object *root, const struct unicode_str *name,
|
|
unsigned int attr, unsigned int concurrent,
|
|
const struct security_descriptor *sd )
|
|
@@ -139,6 +153,7 @@ static struct completion *create_completion( struct object *root, const struct u
|
|
{
|
|
list_init( &completion->queue );
|
|
completion->depth = 0;
|
|
+ completion->fast_sync = NULL;
|
|
}
|
|
}
|
|
|
|
@@ -166,6 +181,7 @@ void add_completion( struct completion *completion, apc_param_t ckey, apc_param_
|
|
list_add_tail( &completion->queue, &msg->queue_entry );
|
|
completion->depth++;
|
|
wake_up( &completion->obj, 1 );
|
|
+ fast_set_event( completion->fast_sync );
|
|
}
|
|
|
|
/* create a completion */
|
|
@@ -232,6 +248,8 @@ DECL_HANDLER(remove_completion)
|
|
reply->status = msg->status;
|
|
reply->information = msg->information;
|
|
free( msg );
|
|
+ if (list_empty( &completion->queue ))
|
|
+ fast_reset_event( completion->fast_sync );
|
|
}
|
|
|
|
release_object( completion );
|
|
diff --git a/server/console.c b/server/console.c
|
|
index 1e6f6c0f8a3..a6d192394eb 100644
|
|
--- a/server/console.c
|
|
+++ b/server/console.c
|
|
@@ -62,6 +62,7 @@ struct console
|
|
struct fd *fd; /* for bare console, attached input fd */
|
|
struct async_queue ioctl_q; /* ioctl queue */
|
|
struct async_queue read_q; /* read queue */
|
|
+ struct fast_sync *fast_sync; /* fast synchronization object */
|
|
};
|
|
|
|
static void console_dump( struct object *obj, int verbose );
|
|
@@ -72,6 +73,7 @@ static struct object *console_lookup_name( struct object *obj, struct unicode_st
|
|
unsigned int attr, struct object *root );
|
|
static struct object *console_open_file( struct object *obj, unsigned int access,
|
|
unsigned int sharing, unsigned int options );
|
|
+static struct fast_sync *console_get_fast_sync( struct object *obj );
|
|
|
|
static const struct object_ops console_ops =
|
|
{
|
|
@@ -93,6 +95,7 @@ static const struct object_ops console_ops =
|
|
NULL, /* unlink_name */
|
|
console_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ console_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
console_destroy /* destroy */
|
|
};
|
|
@@ -137,6 +140,7 @@ struct console_server
|
|
int busy; /* flag if server processing an ioctl */
|
|
int term_fd; /* UNIX terminal fd */
|
|
struct termios termios; /* original termios */
|
|
+ struct fast_sync *fast_sync; /* fast synchronization object */
|
|
};
|
|
|
|
static void console_server_dump( struct object *obj, int verbose );
|
|
@@ -147,6 +151,7 @@ static struct object *console_server_lookup_name( struct object *obj, struct uni
|
|
unsigned int attr, struct object *root );
|
|
static struct object *console_server_open_file( struct object *obj, unsigned int access,
|
|
unsigned int sharing, unsigned int options );
|
|
+static struct fast_sync *console_server_get_fast_sync( struct object *obj );
|
|
|
|
static const struct object_ops console_server_ops =
|
|
{
|
|
@@ -168,6 +173,7 @@ static const struct object_ops console_server_ops =
|
|
NULL, /* unlink_name */
|
|
console_server_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ console_server_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
console_server_destroy /* destroy */
|
|
};
|
|
@@ -215,6 +221,7 @@ static int screen_buffer_add_queue( struct object *obj, struct wait_queue_entry
|
|
static struct fd *screen_buffer_get_fd( struct object *obj );
|
|
static struct object *screen_buffer_open_file( struct object *obj, unsigned int access,
|
|
unsigned int sharing, unsigned int options );
|
|
+static struct fast_sync *screen_buffer_get_fast_sync( struct object *obj );
|
|
|
|
static const struct object_ops screen_buffer_ops =
|
|
{
|
|
@@ -236,6 +243,7 @@ static const struct object_ops screen_buffer_ops =
|
|
NULL, /* unlink_name */
|
|
screen_buffer_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ screen_buffer_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
screen_buffer_destroy /* destroy */
|
|
};
|
|
@@ -284,6 +292,7 @@ static const struct object_ops console_device_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
console_device_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
no_destroy /* destroy */
|
|
};
|
|
@@ -299,6 +308,7 @@ static struct object *console_input_open_file( struct object *obj, unsigned int
|
|
unsigned int sharing, unsigned int options );
|
|
static int console_input_add_queue( struct object *obj, struct wait_queue_entry *entry );
|
|
static struct fd *console_input_get_fd( struct object *obj );
|
|
+static struct fast_sync *console_input_get_fast_sync( struct object *obj );
|
|
static void console_input_destroy( struct object *obj );
|
|
|
|
static const struct object_ops console_input_ops =
|
|
@@ -321,6 +331,7 @@ static const struct object_ops console_input_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
console_input_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ console_input_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
console_input_destroy /* destroy */
|
|
};
|
|
@@ -355,6 +366,7 @@ static int console_output_add_queue( struct object *obj, struct wait_queue_entry
|
|
static struct fd *console_output_get_fd( struct object *obj );
|
|
static struct object *console_output_open_file( struct object *obj, unsigned int access,
|
|
unsigned int sharing, unsigned int options );
|
|
+static struct fast_sync *console_output_get_fast_sync( struct object *obj );
|
|
static void console_output_destroy( struct object *obj );
|
|
|
|
static const struct object_ops console_output_ops =
|
|
@@ -377,6 +389,7 @@ static const struct object_ops console_output_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
console_output_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ console_output_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
console_output_destroy /* destroy */
|
|
};
|
|
@@ -434,6 +447,7 @@ static const struct object_ops console_connection_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
console_connection_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
console_connection_close_handle, /* close_handle */
|
|
console_connection_destroy /* destroy */
|
|
};
|
|
@@ -516,6 +530,7 @@ static struct object *create_console(void)
|
|
console->server = NULL;
|
|
console->fd = NULL;
|
|
console->last_id = 0;
|
|
+ console->fast_sync = NULL;
|
|
init_async_queue( &console->ioctl_q );
|
|
init_async_queue( &console->read_q );
|
|
|
|
@@ -555,6 +570,7 @@ static int queue_host_ioctl( struct console_server *server, unsigned int code, u
|
|
}
|
|
list_add_tail( &server->queue, &ioctl->entry );
|
|
wake_up( &server->obj, 0 );
|
|
+ fast_set_event( server->fast_sync );
|
|
if (async) set_error( STATUS_PENDING );
|
|
return 1;
|
|
}
|
|
@@ -587,6 +603,7 @@ static void disconnect_console_server( struct console_server *server )
|
|
server->console->server = NULL;
|
|
server->console = NULL;
|
|
wake_up( &server->obj, 0 );
|
|
+ fast_set_event( server->fast_sync );
|
|
}
|
|
}
|
|
|
|
@@ -718,6 +735,8 @@ static void console_destroy( struct object *obj )
|
|
free_async_queue( &console->read_q );
|
|
if (console->fd)
|
|
release_object( console->fd );
|
|
+
|
|
+ if (console->fast_sync) release_object( console->fast_sync );
|
|
}
|
|
|
|
static struct object *create_console_connection( struct console *console )
|
|
@@ -765,6 +784,16 @@ static struct object *console_open_file( struct object *obj, unsigned int access
|
|
return grab_object( obj );
|
|
}
|
|
|
|
+static struct fast_sync *console_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ struct console *console = (struct console *)obj;
|
|
+
|
|
+ if (!console->fast_sync)
|
|
+ console->fast_sync = fast_create_event( FAST_SYNC_SERVER, 1, console->signaled );
|
|
+ if (console->fast_sync) grab_object( console->fast_sync );
|
|
+ return console->fast_sync;
|
|
+}
|
|
+
|
|
static void screen_buffer_dump( struct object *obj, int verbose )
|
|
{
|
|
struct screen_buffer *screen_buffer = (struct screen_buffer *)obj;
|
|
@@ -814,6 +843,17 @@ static struct fd *screen_buffer_get_fd( struct object *obj )
|
|
return NULL;
|
|
}
|
|
|
|
+static struct fast_sync *screen_buffer_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ struct screen_buffer *screen_buffer = (struct screen_buffer *)obj;
|
|
+ if (!screen_buffer->input)
|
|
+ {
|
|
+ set_error( STATUS_ACCESS_DENIED );
|
|
+ return NULL;
|
|
+ }
|
|
+ return console_get_fast_sync( &screen_buffer->input->obj );
|
|
+}
|
|
+
|
|
static void console_server_dump( struct object *obj, int verbose )
|
|
{
|
|
assert( obj->ops == &console_server_ops );
|
|
@@ -826,6 +866,7 @@ static void console_server_destroy( struct object *obj )
|
|
assert( obj->ops == &console_server_ops );
|
|
disconnect_console_server( server );
|
|
if (server->fd) release_object( server->fd );
|
|
+ if (server->fast_sync) release_object( server->fast_sync );
|
|
}
|
|
|
|
static struct object *console_server_lookup_name( struct object *obj, struct unicode_str *name,
|
|
@@ -880,6 +921,17 @@ static struct object *console_server_open_file( struct object *obj, unsigned int
|
|
return grab_object( obj );
|
|
}
|
|
|
|
+static struct fast_sync *console_server_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ struct console_server *server = (struct console_server *)obj;
|
|
+ int signaled = !server->console || !list_empty( &server->queue );
|
|
+
|
|
+ if (!server->fast_sync)
|
|
+ server->fast_sync = fast_create_event( FAST_SYNC_SERVER, 1, signaled );
|
|
+ if (server->fast_sync) grab_object( server->fast_sync );
|
|
+ return server->fast_sync;
|
|
+}
|
|
+
|
|
static struct object *create_console_server( void )
|
|
{
|
|
struct console_server *server;
|
|
@@ -890,6 +942,7 @@ static struct object *create_console_server( void )
|
|
server->term_fd = -1;
|
|
list_init( &server->queue );
|
|
list_init( &server->read_queue );
|
|
+ server->fast_sync = NULL;
|
|
server->fd = alloc_pseudo_fd( &console_server_fd_ops, &server->obj, FILE_SYNCHRONOUS_IO_NONALERT );
|
|
if (!server->fd)
|
|
{
|
|
@@ -1339,6 +1392,16 @@ static struct object *console_input_open_file( struct object *obj, unsigned int
|
|
return grab_object( obj );
|
|
}
|
|
|
|
+static struct fast_sync *console_input_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ if (!current->process->console)
|
|
+ {
|
|
+ set_error( STATUS_ACCESS_DENIED );
|
|
+ return NULL;
|
|
+ }
|
|
+ return console_get_fast_sync( ¤t->process->console->obj );
|
|
+}
|
|
+
|
|
static void console_input_destroy( struct object *obj )
|
|
{
|
|
struct console_input *console_input = (struct console_input *)obj;
|
|
@@ -1411,6 +1474,16 @@ static struct object *console_output_open_file( struct object *obj, unsigned int
|
|
return grab_object( obj );
|
|
}
|
|
|
|
+static struct fast_sync *console_output_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ if (!current->process->console || !current->process->console->active)
|
|
+ {
|
|
+ set_error( STATUS_ACCESS_DENIED );
|
|
+ return NULL;
|
|
+ }
|
|
+ return console_get_fast_sync( ¤t->process->console->obj );
|
|
+}
|
|
+
|
|
static void console_output_destroy( struct object *obj )
|
|
{
|
|
struct console_output *console_output = (struct console_output *)obj;
|
|
@@ -1468,11 +1541,16 @@ DECL_HANDLER(get_next_console_request)
|
|
|
|
if (!server->console->renderer) server->console->renderer = current;
|
|
|
|
- if (!req->signal) server->console->signaled = 0;
|
|
+ if (!req->signal)
|
|
+ {
|
|
+ server->console->signaled = 0;
|
|
+ fast_reset_event( server->console->fast_sync );
|
|
+ }
|
|
else if (!server->console->signaled)
|
|
{
|
|
server->console->signaled = 1;
|
|
wake_up( &server->console->obj, 0 );
|
|
+ fast_set_event( server->console->fast_sync );
|
|
}
|
|
|
|
if (req->read)
|
|
@@ -1494,6 +1572,8 @@ DECL_HANDLER(get_next_console_request)
|
|
/* set result of previous ioctl */
|
|
ioctl = LIST_ENTRY( list_head( &server->queue ), struct console_host_ioctl, entry );
|
|
list_remove( &ioctl->entry );
|
|
+ if (list_empty( &server->queue ))
|
|
+ fast_reset_event( server->fast_sync );
|
|
}
|
|
|
|
if (ioctl)
|
|
@@ -1593,5 +1673,8 @@ DECL_HANDLER(get_next_console_request)
|
|
set_error( STATUS_PENDING );
|
|
}
|
|
|
|
+ if (list_empty( &server->queue ))
|
|
+ fast_reset_event( server->fast_sync );
|
|
+
|
|
release_object( server );
|
|
}
|
|
diff --git a/server/debugger.c b/server/debugger.c
|
|
index 9b814469b19..5e44579e0a7 100644
|
|
--- a/server/debugger.c
|
|
+++ b/server/debugger.c
|
|
@@ -72,6 +72,7 @@ struct debug_obj
|
|
struct object obj; /* object header */
|
|
struct list event_queue; /* pending events queue */
|
|
unsigned int flags; /* debug flags */
|
|
+ struct fast_sync *fast_sync; /* fast synchronization object */
|
|
};
|
|
|
|
|
|
@@ -99,12 +100,14 @@ static const struct object_ops debug_event_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
debug_event_destroy /* destroy */
|
|
};
|
|
|
|
static void debug_obj_dump( struct object *obj, int verbose );
|
|
static int debug_obj_signaled( struct object *obj, struct wait_queue_entry *entry );
|
|
+static struct fast_sync *debug_obj_get_fast_sync( struct object *obj );
|
|
static void debug_obj_destroy( struct object *obj );
|
|
|
|
static const struct object_ops debug_obj_ops =
|
|
@@ -127,6 +130,7 @@ static const struct object_ops debug_obj_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ debug_obj_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
debug_obj_destroy /* destroy */
|
|
};
|
|
@@ -254,6 +258,7 @@ static void link_event( struct debug_obj *debug_obj, struct debug_event *event )
|
|
/* grab reference since debugger could be killed while trying to wake up */
|
|
grab_object( debug_obj );
|
|
wake_up( &debug_obj->obj, 0 );
|
|
+ fast_set_event( debug_obj->fast_sync );
|
|
release_object( debug_obj );
|
|
}
|
|
}
|
|
@@ -266,6 +271,7 @@ static void resume_event( struct debug_obj *debug_obj, struct debug_event *event
|
|
{
|
|
grab_object( debug_obj );
|
|
wake_up( &debug_obj->obj, 0 );
|
|
+ fast_set_event( debug_obj->fast_sync );
|
|
release_object( debug_obj );
|
|
}
|
|
}
|
|
@@ -331,6 +337,17 @@ static int debug_obj_signaled( struct object *obj, struct wait_queue_entry *entr
|
|
return find_event_to_send( debug_obj ) != NULL;
|
|
}
|
|
|
|
+static struct fast_sync *debug_obj_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ struct debug_obj *debug_obj = (struct debug_obj *)obj;
|
|
+ int signaled = find_event_to_send( debug_obj ) != NULL;
|
|
+
|
|
+ if (!debug_obj->fast_sync)
|
|
+ debug_obj->fast_sync = fast_create_event( FAST_SYNC_SERVER, 1, signaled );
|
|
+ if (debug_obj->fast_sync) grab_object( debug_obj->fast_sync );
|
|
+ return debug_obj->fast_sync;
|
|
+}
|
|
+
|
|
static void debug_obj_destroy( struct object *obj )
|
|
{
|
|
struct list *ptr;
|
|
@@ -343,6 +360,8 @@ static void debug_obj_destroy( struct object *obj )
|
|
/* free all pending events */
|
|
while ((ptr = list_head( &debug_obj->event_queue )))
|
|
unlink_event( debug_obj, LIST_ENTRY( ptr, struct debug_event, entry ));
|
|
+
|
|
+ if (debug_obj->fast_sync) release_object( debug_obj->fast_sync );
|
|
}
|
|
|
|
struct debug_obj *get_debug_obj( struct process *process, obj_handle_t handle, unsigned int access )
|
|
@@ -362,6 +381,7 @@ static struct debug_obj *create_debug_obj( struct object *root, const struct uni
|
|
{
|
|
debug_obj->flags = flags;
|
|
list_init( &debug_obj->event_queue );
|
|
+ debug_obj->fast_sync = NULL;
|
|
}
|
|
}
|
|
return debug_obj;
|
|
@@ -570,6 +590,9 @@ DECL_HANDLER(wait_debug_event)
|
|
reply->tid = get_thread_id( event->sender );
|
|
alloc_event_handles( event, current->process );
|
|
set_reply_data( &event->data, min( get_reply_max_size(), sizeof(event->data) ));
|
|
+
|
|
+ if (!find_event_to_send( debug_obj ))
|
|
+ fast_reset_event( debug_obj->fast_sync );
|
|
}
|
|
else
|
|
{
|
|
diff --git a/server/device.c b/server/device.c
|
|
index 6400751e339..b3f4329d11b 100644
|
|
--- a/server/device.c
|
|
+++ b/server/device.c
|
|
@@ -80,6 +80,7 @@ static const struct object_ops irp_call_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
irp_call_destroy /* destroy */
|
|
};
|
|
@@ -94,10 +95,12 @@ struct device_manager
|
|
struct list requests; /* list of pending irps across all devices */
|
|
struct irp_call *current_call; /* call currently executed on client side */
|
|
struct wine_rb_tree kernel_objects; /* map of objects that have client side pointer associated */
|
|
+ struct fast_sync *fast_sync; /* fast synchronization object */
|
|
};
|
|
|
|
static void device_manager_dump( struct object *obj, int verbose );
|
|
static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry );
|
|
+static struct fast_sync *device_manager_get_fast_sync( struct object *obj );
|
|
static void device_manager_destroy( struct object *obj );
|
|
|
|
static const struct object_ops device_manager_ops =
|
|
@@ -120,6 +123,7 @@ static const struct object_ops device_manager_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ device_manager_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
device_manager_destroy /* destroy */
|
|
};
|
|
@@ -177,6 +181,7 @@ static const struct object_ops device_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
device_open_file, /* open_file */
|
|
device_get_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
device_destroy /* destroy */
|
|
};
|
|
@@ -229,6 +234,7 @@ static const struct object_ops device_file_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
device_file_get_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ default_fd_get_fast_sync, /* get_fast_sync */
|
|
device_file_close_handle, /* close_handle */
|
|
device_file_destroy /* destroy */
|
|
};
|
|
@@ -439,7 +445,12 @@ static void add_irp_to_queue( struct device_manager *manager, struct irp_call *i
|
|
irp->thread = thread ? (struct thread *)grab_object( thread ) : NULL;
|
|
if (irp->file) list_add_tail( &irp->file->requests, &irp->dev_entry );
|
|
list_add_tail( &manager->requests, &irp->mgr_entry );
|
|
- if (list_head( &manager->requests ) == &irp->mgr_entry) wake_up( &manager->obj, 0 ); /* first one */
|
|
+ if (list_head( &manager->requests ) == &irp->mgr_entry)
|
|
+ {
|
|
+ /* first one */
|
|
+ wake_up( &manager->obj, 0 );
|
|
+ fast_set_event( manager->fast_sync );
|
|
+ }
|
|
}
|
|
|
|
static struct object *device_open_file( struct object *obj, unsigned int access,
|
|
@@ -775,6 +786,9 @@ static void delete_file( struct device_file *file )
|
|
set_irp_result( irp, STATUS_FILE_DELETED, NULL, 0, 0 );
|
|
}
|
|
|
|
+ if (list_empty( &file->device->manager->requests ))
|
|
+ fast_reset_event( file->device->manager->fast_sync );
|
|
+
|
|
release_object( file );
|
|
}
|
|
|
|
@@ -806,6 +820,16 @@ static int device_manager_signaled( struct object *obj, struct wait_queue_entry
|
|
return !list_empty( &manager->requests );
|
|
}
|
|
|
|
+static struct fast_sync *device_manager_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ struct device_manager *manager = (struct device_manager *)obj;
|
|
+
|
|
+ if (!manager->fast_sync)
|
|
+ manager->fast_sync = fast_create_event( FAST_SYNC_SERVER, 1, !list_empty( &manager->requests ) );
|
|
+ if (manager->fast_sync) grab_object( manager->fast_sync );
|
|
+ return manager->fast_sync;
|
|
+}
|
|
+
|
|
static void device_manager_destroy( struct object *obj )
|
|
{
|
|
struct device_manager *manager = (struct device_manager *)obj;
|
|
@@ -840,6 +864,8 @@ static void device_manager_destroy( struct object *obj )
|
|
assert( !irp->file && !irp->async );
|
|
release_object( irp );
|
|
}
|
|
+
|
|
+ if (manager->fast_sync) release_object( manager->fast_sync );
|
|
}
|
|
|
|
static struct device_manager *create_device_manager(void)
|
|
@@ -849,6 +875,7 @@ static struct device_manager *create_device_manager(void)
|
|
if ((manager = alloc_object( &device_manager_ops )))
|
|
{
|
|
manager->current_call = NULL;
|
|
+ manager->fast_sync = NULL;
|
|
list_init( &manager->devices );
|
|
list_init( &manager->requests );
|
|
wine_rb_init( &manager->kernel_objects, compare_kernel_object );
|
|
@@ -1014,6 +1041,10 @@ DECL_HANDLER(get_next_device_request)
|
|
iosb->in_size = 0;
|
|
list_remove( &irp->mgr_entry );
|
|
list_init( &irp->mgr_entry );
|
|
+
|
|
+ if (list_empty( &manager->requests ))
|
|
+ fast_reset_event( manager->fast_sync );
|
|
+
|
|
/* we already own the object if it's only on manager queue */
|
|
if (irp->file) grab_object( irp );
|
|
manager->current_call = irp;
|
|
diff --git a/server/directory.c b/server/directory.c
|
|
index 30d69459984..4a0ec8867f8 100644
|
|
--- a/server/directory.c
|
|
+++ b/server/directory.c
|
|
@@ -82,6 +82,7 @@ static const struct object_ops object_type_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
no_destroy /* destroy */
|
|
};
|
|
@@ -132,6 +133,7 @@ static const struct object_ops directory_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
directory_destroy /* destroy */
|
|
};
|
|
diff --git a/server/event.c b/server/event.c
|
|
index ccdd465dd5b..857bc1ab23b 100644
|
|
--- a/server/event.c
|
|
+++ b/server/event.c
|
|
@@ -56,6 +56,7 @@ struct event
|
|
struct list kernel_object; /* list of kernel object pointers */
|
|
int manual_reset; /* is it a manual reset event? */
|
|
int signaled; /* event has been signaled */
|
|
+ struct fast_sync *fast_sync; /* fast synchronization object */
|
|
};
|
|
|
|
static void event_dump( struct object *obj, int verbose );
|
|
@@ -63,6 +64,8 @@ static int event_signaled( struct object *obj, struct wait_queue_entry *entry );
|
|
static void event_satisfied( struct object *obj, struct wait_queue_entry *entry );
|
|
static int event_signal( struct object *obj, unsigned int access);
|
|
static struct list *event_get_kernel_obj_list( struct object *obj );
|
|
+static struct fast_sync *event_get_fast_sync( struct object *obj );
|
|
+static void event_destroy( struct object *obj );
|
|
|
|
static const struct object_ops event_ops =
|
|
{
|
|
@@ -84,8 +87,9 @@ static const struct object_ops event_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
event_get_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ event_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
- no_destroy /* destroy */
|
|
+ event_destroy /* destroy */
|
|
};
|
|
|
|
|
|
@@ -106,10 +110,13 @@ struct type_descr keyed_event_type =
|
|
struct keyed_event
|
|
{
|
|
struct object obj; /* object header */
|
|
+ struct fast_sync *fast_sync; /* fast synchronization object */
|
|
};
|
|
|
|
static void keyed_event_dump( struct object *obj, int verbose );
|
|
static int keyed_event_signaled( struct object *obj, struct wait_queue_entry *entry );
|
|
+static struct fast_sync *keyed_event_get_fast_sync( struct object *obj );
|
|
+static void keyed_event_destroy( struct object *obj );
|
|
|
|
static const struct object_ops keyed_event_ops =
|
|
{
|
|
@@ -131,8 +138,9 @@ static const struct object_ops keyed_event_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ keyed_event_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
- no_destroy /* destroy */
|
|
+ keyed_event_destroy /* destroy */
|
|
};
|
|
|
|
|
|
@@ -150,6 +158,7 @@ struct event *create_event( struct object *root, const struct unicode_str *name,
|
|
list_init( &event->kernel_object );
|
|
event->manual_reset = manual_reset;
|
|
event->signaled = initial_state;
|
|
+ event->fast_sync = NULL;
|
|
}
|
|
}
|
|
return event;
|
|
@@ -173,11 +182,13 @@ void set_event( struct event *event )
|
|
event->signaled = 1;
|
|
/* wake up all waiters if manual reset, a single one otherwise */
|
|
wake_up( &event->obj, !event->manual_reset );
|
|
+ fast_set_event( event->fast_sync );
|
|
}
|
|
|
|
void reset_event( struct event *event )
|
|
{
|
|
event->signaled = 0;
|
|
+ fast_reset_event( event->fast_sync );
|
|
}
|
|
|
|
static void event_dump( struct object *obj, int verbose )
|
|
@@ -223,6 +234,23 @@ static struct list *event_get_kernel_obj_list( struct object *obj )
|
|
return &event->kernel_object;
|
|
}
|
|
|
|
+static struct fast_sync *event_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ struct event *event = (struct event *)obj;
|
|
+
|
|
+ if (!event->fast_sync)
|
|
+ event->fast_sync = fast_create_event( FAST_SYNC_EVENT, event->manual_reset, event->signaled );
|
|
+ if (event->fast_sync) grab_object( event->fast_sync );
|
|
+ return event->fast_sync;
|
|
+}
|
|
+
|
|
+static void event_destroy( struct object *obj )
|
|
+{
|
|
+ struct event *event = (struct event *)obj;
|
|
+
|
|
+ if (event->fast_sync) release_object( event->fast_sync );
|
|
+}
|
|
+
|
|
struct keyed_event *create_keyed_event( struct object *root, const struct unicode_str *name,
|
|
unsigned int attr, const struct security_descriptor *sd )
|
|
{
|
|
@@ -233,6 +261,7 @@ struct keyed_event *create_keyed_event( struct object *root, const struct unicod
|
|
if (get_error() != STATUS_OBJECT_NAME_EXISTS)
|
|
{
|
|
/* initialize it if it didn't already exist */
|
|
+ event->fast_sync = NULL;
|
|
}
|
|
}
|
|
return event;
|
|
@@ -276,6 +305,23 @@ static int keyed_event_signaled( struct object *obj, struct wait_queue_entry *en
|
|
return 0;
|
|
}
|
|
|
|
+static struct fast_sync *keyed_event_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ struct keyed_event *event = (struct keyed_event *)obj;
|
|
+
|
|
+ if (!event->fast_sync)
|
|
+ event->fast_sync = fast_create_event( FAST_SYNC_SERVER, 1, 1 );
|
|
+ if (event->fast_sync) grab_object( event->fast_sync );
|
|
+ return event->fast_sync;
|
|
+}
|
|
+
|
|
+static void keyed_event_destroy( struct object *obj )
|
|
+{
|
|
+ struct keyed_event *event = (struct keyed_event *)obj;
|
|
+
|
|
+ if (event->fast_sync) release_object( event->fast_sync );
|
|
+}
|
|
+
|
|
/* create an event */
|
|
DECL_HANDLER(create_event)
|
|
{
|
|
diff --git a/server/fast_sync.c b/server/fast_sync.c
|
|
new file mode 100644
|
|
index 00000000000..f0dae236ca3
|
|
--- /dev/null
|
|
+++ b/server/fast_sync.c
|
|
@@ -0,0 +1,427 @@
|
|
+/*
|
|
+ * Fast synchronization primitives
|
|
+ *
|
|
+ * Copyright (C) 2021 Zebediah Figura for CodeWeavers
|
|
+ *
|
|
+ * This library is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU Lesser General Public
|
|
+ * License as published by the Free Software Foundation; either
|
|
+ * version 2.1 of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This library is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+ * Lesser General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU Lesser General Public
|
|
+ * License along with this library; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
|
|
+ */
|
|
+
|
|
+#include "config.h"
|
|
+
|
|
+#include <assert.h>
|
|
+#include <fcntl.h>
|
|
+#ifdef HAVE_LINUX_WINESYNC_H
|
|
+# include <linux/winesync.h>
|
|
+#endif
|
|
+#include <stdio.h>
|
|
+#ifdef HAVE_SYS_IOCTL_H
|
|
+# include <sys/ioctl.h>
|
|
+#endif
|
|
+#include <sys/stat.h>
|
|
+#include <unistd.h>
|
|
+
|
|
+#include "ntstatus.h"
|
|
+#define WIN32_NO_STATUS
|
|
+#include "winternl.h"
|
|
+
|
|
+#include "file.h"
|
|
+#include "handle.h"
|
|
+#include "request.h"
|
|
+#include "thread.h"
|
|
+
|
|
+#ifdef HAVE_LINUX_WINESYNC_H
|
|
+
|
|
+struct fast_sync_device
|
|
+{
|
|
+ struct object obj; /* object header */
|
|
+ struct fd *fd; /* fd for unix fd */
|
|
+};
|
|
+
|
|
+static struct fast_sync_device *fast_sync_device_object;
|
|
+
|
|
+static void fast_sync_device_dump( struct object *obj, int verbose );
|
|
+static struct fd *fast_sync_device_get_fd( struct object *obj );
|
|
+static void fast_sync_device_destroy( struct object *obj );
|
|
+static enum server_fd_type fast_sync_device_get_fd_type( struct fd *fd );
|
|
+
|
|
+static const struct object_ops fast_sync_device_ops =
|
|
+{
|
|
+ sizeof(struct fast_sync_device), /* size */
|
|
+ &no_type, /* type */
|
|
+ fast_sync_device_dump, /* dump */
|
|
+ no_add_queue, /* add_queue */
|
|
+ NULL, /* remove_queue */
|
|
+ NULL, /* signaled */
|
|
+ NULL, /* satisfied */
|
|
+ no_signal, /* signal */
|
|
+ fast_sync_device_get_fd, /* get_fd */
|
|
+ default_map_access, /* map_access */
|
|
+ default_get_sd, /* get_sd */
|
|
+ default_set_sd, /* set_sd */
|
|
+ no_get_full_name, /* get_full_name */
|
|
+ no_lookup_name, /* lookup_name */
|
|
+ no_link_name, /* link_name */
|
|
+ NULL, /* unlink_name */
|
|
+ no_open_file, /* open_file */
|
|
+ no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
+ no_close_handle, /* close_handle */
|
|
+ fast_sync_device_destroy /* destroy */
|
|
+};
|
|
+
|
|
+static const struct fd_ops fast_sync_device_fd_ops =
|
|
+{
|
|
+ default_fd_get_poll_events, /* get_poll_events */
|
|
+ default_poll_event, /* poll_event */
|
|
+ fast_sync_device_get_fd_type, /* get_fd_type */
|
|
+ no_fd_read, /* read */
|
|
+ no_fd_write, /* write */
|
|
+ no_fd_flush, /* flush */
|
|
+ no_fd_get_file_info, /* get_file_info */
|
|
+ no_fd_get_volume_info, /* get_volume_info */
|
|
+ no_fd_ioctl, /* ioctl */
|
|
+ no_fd_queue_async, /* queue_async */
|
|
+ default_fd_reselect_async /* reselect_async */
|
|
+};
|
|
+
|
|
+static void fast_sync_device_dump( struct object *obj, int verbose )
|
|
+{
|
|
+ struct fast_sync_device *device = (struct fast_sync_device *)obj;
|
|
+ assert( obj->ops == &fast_sync_device_ops );
|
|
+ fprintf( stderr, "Fast synchronization device fd=%p\n", device->fd );
|
|
+}
|
|
+
|
|
+static struct fd *fast_sync_device_get_fd( struct object *obj )
|
|
+{
|
|
+ struct fast_sync_device *device = (struct fast_sync_device *)obj;
|
|
+ return (struct fd *)grab_object( device->fd );
|
|
+}
|
|
+
|
|
+static void fast_sync_device_destroy( struct object *obj )
|
|
+{
|
|
+ struct fast_sync_device *device = (struct fast_sync_device *)obj;
|
|
+ assert( obj->ops == &fast_sync_device_ops );
|
|
+ if (device->fd) release_object( device->fd );
|
|
+ fast_sync_device_object = NULL;
|
|
+}
|
|
+
|
|
+static enum server_fd_type fast_sync_device_get_fd_type( struct fd *fd )
|
|
+{
|
|
+ return FD_TYPE_FILE;
|
|
+}
|
|
+
|
|
+static struct fast_sync_device *get_fast_sync_device(void)
|
|
+{
|
|
+ struct fast_sync_device *device;
|
|
+ int unix_fd;
|
|
+
|
|
+ if (getenv( "WINE_DISABLE_FAST_SYNC" ) && atoi( getenv( "WINE_DISABLE_FAST_SYNC" ) ))
|
|
+ {
|
|
+ set_error( STATUS_NOT_IMPLEMENTED );
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (fast_sync_device_object)
|
|
+ return (struct fast_sync_device *)grab_object( fast_sync_device_object );
|
|
+
|
|
+ unix_fd = open( "/dev/winesync", O_CLOEXEC | O_RDONLY );
|
|
+ if (unix_fd == -1)
|
|
+ {
|
|
+ file_set_error();
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (!(device = alloc_object( &fast_sync_device_ops )))
|
|
+ {
|
|
+ close( unix_fd );
|
|
+ set_error( STATUS_NO_MEMORY );
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (!(device->fd = create_anonymous_fd( &fast_sync_device_fd_ops, unix_fd, &device->obj, 0 )))
|
|
+ {
|
|
+ release_object( device );
|
|
+ set_error( STATUS_NO_MEMORY );
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ fast_sync_device_object = device;
|
|
+ return device;
|
|
+}
|
|
+
|
|
+struct fast_sync
|
|
+{
|
|
+ struct object obj;
|
|
+ struct fast_sync_device *device;
|
|
+ enum fast_sync_type type;
|
|
+ unsigned int linux_obj;
|
|
+};
|
|
+
|
|
+static void fast_sync_dump( struct object *obj, int verbose );
|
|
+static void fast_sync_destroy( struct object *obj );
|
|
+
|
|
+static const struct object_ops fast_sync_ops =
|
|
+{
|
|
+ sizeof(struct fast_sync), /* size */
|
|
+ &no_type, /* type */
|
|
+ fast_sync_dump, /* dump */
|
|
+ no_add_queue, /* add_queue */
|
|
+ NULL, /* remove_queue */
|
|
+ NULL, /* signaled */
|
|
+ NULL, /* satisfied */
|
|
+ no_signal, /* signal */
|
|
+ no_get_fd, /* get_fd */
|
|
+ default_map_access, /* map_access */
|
|
+ default_get_sd, /* get_sd */
|
|
+ default_set_sd, /* set_sd */
|
|
+ no_get_full_name, /* get_full_name */
|
|
+ no_lookup_name, /* lookup_name */
|
|
+ no_link_name, /* link_name */
|
|
+ NULL, /* unlink_name */
|
|
+ no_open_file, /* open_file */
|
|
+ no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
+ no_close_handle, /* close_handle */
|
|
+ fast_sync_destroy /* destroy */
|
|
+};
|
|
+
|
|
+static void fast_sync_dump( struct object *obj, int verbose )
|
|
+{
|
|
+ struct fast_sync *fast_sync = (struct fast_sync *)obj;
|
|
+ assert( obj->ops == &fast_sync_ops );
|
|
+ fprintf( stderr, "Fast synchronization object type=%u linux_obj=%u\n",
|
|
+ fast_sync->type, fast_sync->linux_obj );
|
|
+}
|
|
+
|
|
+static void fast_sync_destroy( struct object *obj )
|
|
+{
|
|
+ struct fast_sync *fast_sync = (struct fast_sync *)obj;
|
|
+
|
|
+ ioctl( get_unix_fd( fast_sync->device->fd ), WINESYNC_IOC_DELETE, &fast_sync->linux_obj );
|
|
+ release_object( fast_sync->device );
|
|
+}
|
|
+
|
|
+struct fast_sync *fast_create_event( enum fast_sync_type type, int manual_reset, int signaled )
|
|
+{
|
|
+ struct winesync_sem_args args = {0};
|
|
+ struct fast_sync_device *device;
|
|
+ struct fast_sync *fast_sync;
|
|
+
|
|
+ if (!(device = get_fast_sync_device())) return NULL;
|
|
+
|
|
+ args.count = signaled;
|
|
+ args.max = 1;
|
|
+ if (!manual_reset)
|
|
+ args.flags |= WINESYNC_SEM_GETONWAIT;
|
|
+ if (ioctl( get_unix_fd( device->fd ), WINESYNC_IOC_CREATE_SEM, &args ) < 0)
|
|
+ {
|
|
+ file_set_error();
|
|
+ release_object( device );
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (!(fast_sync = alloc_object( &fast_sync_ops ))) return NULL;
|
|
+
|
|
+ /* transfer our device reference to the fast sync object */
|
|
+ fast_sync->device = device;
|
|
+ fast_sync->type = type;
|
|
+ fast_sync->linux_obj = args.sem;
|
|
+
|
|
+ return fast_sync;
|
|
+}
|
|
+
|
|
+struct fast_sync *fast_create_semaphore( unsigned int count, unsigned int max )
|
|
+{
|
|
+ struct winesync_sem_args args = {0};
|
|
+ struct fast_sync_device *device;
|
|
+ struct fast_sync *fast_sync;
|
|
+
|
|
+ if (!(device = get_fast_sync_device())) return NULL;
|
|
+
|
|
+ args.count = count;
|
|
+ args.max = max;
|
|
+ args.flags = WINESYNC_SEM_GETONWAIT;
|
|
+ if (ioctl( get_unix_fd( device->fd ), WINESYNC_IOC_CREATE_SEM, &args ) < 0)
|
|
+ {
|
|
+ file_set_error();
|
|
+ release_object( device );
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (!(fast_sync = alloc_object( &fast_sync_ops ))) return NULL;
|
|
+
|
|
+ /* transfer our device reference to the fast sync object */
|
|
+ fast_sync->device = device;
|
|
+ fast_sync->type = FAST_SYNC_SEMAPHORE;
|
|
+ fast_sync->linux_obj = args.sem;
|
|
+
|
|
+ return fast_sync;
|
|
+}
|
|
+
|
|
+struct fast_sync *fast_create_mutex( thread_id_t owner, unsigned int count )
|
|
+{
|
|
+ struct winesync_mutex_args args = {0};
|
|
+ struct fast_sync_device *device;
|
|
+ struct fast_sync *fast_sync;
|
|
+
|
|
+ if (!(device = get_fast_sync_device())) return NULL;
|
|
+
|
|
+ args.owner = owner;
|
|
+ args.count = count;
|
|
+ if (ioctl( get_unix_fd( device->fd ), WINESYNC_IOC_CREATE_MUTEX, &args ) < 0)
|
|
+ {
|
|
+ file_set_error();
|
|
+ release_object( device );
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (!(fast_sync = alloc_object( &fast_sync_ops ))) return NULL;
|
|
+
|
|
+ /* transfer our device reference to the fast sync object */
|
|
+ fast_sync->device = device;
|
|
+ fast_sync->type = FAST_SYNC_MUTEX;
|
|
+ fast_sync->linux_obj = args.mutex;
|
|
+
|
|
+ return fast_sync;
|
|
+}
|
|
+
|
|
+void fast_set_event( struct fast_sync *fast_sync )
|
|
+{
|
|
+ struct winesync_sem_args args = {0};
|
|
+
|
|
+ if (!fast_sync) return;
|
|
+
|
|
+ if (debug_level) fprintf( stderr, "fast_set_event %u\n", fast_sync->linux_obj );
|
|
+
|
|
+ args.sem = fast_sync->linux_obj;
|
|
+ args.count = 1;
|
|
+ ioctl( get_unix_fd( fast_sync->device->fd ), WINESYNC_IOC_PUT_SEM, &args );
|
|
+}
|
|
+
|
|
+void fast_reset_event( struct fast_sync *fast_sync )
|
|
+{
|
|
+ if (!fast_sync) return;
|
|
+
|
|
+ if (debug_level) fprintf( stderr, "fast_reset_event %u\n", fast_sync->linux_obj );
|
|
+
|
|
+ ioctl( get_unix_fd( fast_sync->device->fd ), WINESYNC_IOC_GET_SEM, &fast_sync->linux_obj );
|
|
+}
|
|
+
|
|
+void fast_abandon_mutexes( thread_id_t tid )
|
|
+{
|
|
+ struct fast_sync_device *device;
|
|
+
|
|
+ if (!(device = get_fast_sync_device()))
|
|
+ {
|
|
+ clear_error();
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ ioctl( get_unix_fd( device->fd ), WINESYNC_IOC_KILL_OWNER, &tid );
|
|
+ release_object( device );
|
|
+}
|
|
+
|
|
+int fast_user_apc_needs_signal(void)
|
|
+{
|
|
+ return !!fast_sync_device_object;
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
+static struct fast_sync_device *get_fast_sync_device(void)
|
|
+{
|
|
+ set_error( STATUS_NOT_IMPLEMENTED );
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+struct fast_sync *fast_create_event( enum fast_sync_type type, int manual_reset, int signaled )
|
|
+{
|
|
+ set_error( STATUS_NOT_IMPLEMENTED );
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+struct fast_sync *fast_create_semaphore( unsigned int count, unsigned int max )
|
|
+{
|
|
+ set_error( STATUS_NOT_IMPLEMENTED );
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+struct fast_sync *fast_create_mutex( thread_id_t owner, unsigned int count )
|
|
+{
|
|
+ set_error( STATUS_NOT_IMPLEMENTED );
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+void fast_set_event( struct fast_sync *fast_sync )
|
|
+{
|
|
+}
|
|
+
|
|
+void fast_reset_event( struct fast_sync *obj )
|
|
+{
|
|
+}
|
|
+
|
|
+void fast_abandon_mutexes( thread_id_t tid )
|
|
+{
|
|
+}
|
|
+
|
|
+int fast_user_apc_needs_signal(void)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+DECL_HANDLER(get_fast_sync_device)
|
|
+{
|
|
+#ifdef HAVE_LINUX_WINESYNC_H
|
|
+ struct fast_sync_device *device;
|
|
+
|
|
+ if ((device = get_fast_sync_device()))
|
|
+ {
|
|
+ reply->handle = alloc_handle( current->process, device, 0, 0 );
|
|
+ release_object( device );
|
|
+ }
|
|
+#else
|
|
+ set_error( STATUS_NOT_IMPLEMENTED );
|
|
+#endif
|
|
+}
|
|
+
|
|
+DECL_HANDLER(get_fast_sync_obj)
|
|
+{
|
|
+#ifdef HAVE_LINUX_WINESYNC_H
|
|
+ struct object *obj;
|
|
+ static int once;
|
|
+
|
|
+ if (!once++)
|
|
+ fprintf( stderr, "wine: using fast synchronization.\n" );
|
|
+
|
|
+ if ((obj = get_handle_obj( current->process, req->handle, 0, NULL )))
|
|
+ {
|
|
+ struct fast_sync *fast_sync;
|
|
+
|
|
+ if ((fast_sync = obj->ops->get_fast_sync( obj )))
|
|
+ {
|
|
+ reply->handle = alloc_handle( current->process, fast_sync, 0, 0 );
|
|
+ reply->obj = fast_sync->linux_obj;
|
|
+ reply->type = fast_sync->type;
|
|
+ reply->access = get_handle_access( current->process, req->handle );
|
|
+ release_object( fast_sync );
|
|
+ }
|
|
+ release_object( obj );
|
|
+ }
|
|
+#else
|
|
+ set_error( STATUS_NOT_IMPLEMENTED );
|
|
+#endif
|
|
+}
|
|
diff --git a/server/fd.c b/server/fd.c
|
|
index bfa2805d82b..2b7ed1b752e 100644
|
|
--- a/server/fd.c
|
|
+++ b/server/fd.c
|
|
@@ -199,6 +199,7 @@ struct fd
|
|
struct completion *completion; /* completion object attached to this fd */
|
|
apc_param_t comp_key; /* completion key to set in completion events */
|
|
unsigned int comp_flags; /* completion flags */
|
|
+ struct fast_sync *fast_sync; /* fast synchronization object */
|
|
};
|
|
|
|
static void fd_dump( struct object *obj, int verbose );
|
|
@@ -224,6 +225,7 @@ static const struct object_ops fd_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
fd_destroy /* destroy */
|
|
};
|
|
@@ -265,6 +267,7 @@ static const struct object_ops device_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
device_destroy /* destroy */
|
|
};
|
|
@@ -305,6 +308,7 @@ static const struct object_ops inode_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
inode_destroy /* destroy */
|
|
};
|
|
@@ -347,6 +351,7 @@ static const struct object_ops file_lock_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
no_destroy /* destroy */
|
|
};
|
|
@@ -1580,6 +1585,7 @@ static void fd_destroy( struct object *obj )
|
|
if (fd->unix_fd != -1) close( fd->unix_fd );
|
|
free( fd->unix_name );
|
|
}
|
|
+ if (fd->fast_sync) release_object( fd->fast_sync );
|
|
}
|
|
|
|
/* check if the desired access is possible without violating */
|
|
@@ -1696,6 +1702,7 @@ static struct fd *alloc_fd_object(void)
|
|
fd->poll_index = -1;
|
|
fd->completion = NULL;
|
|
fd->comp_flags = 0;
|
|
+ fd->fast_sync = NULL;
|
|
init_async_queue( &fd->read_q );
|
|
init_async_queue( &fd->write_q );
|
|
init_async_queue( &fd->wait_q );
|
|
@@ -1734,6 +1741,7 @@ struct fd *alloc_pseudo_fd( const struct fd_ops *fd_user_ops, struct object *use
|
|
fd->poll_index = -1;
|
|
fd->completion = NULL;
|
|
fd->comp_flags = 0;
|
|
+ fd->fast_sync = NULL;
|
|
fd->no_fd_status = STATUS_BAD_DEVICE_TYPE;
|
|
init_async_queue( &fd->read_q );
|
|
init_async_queue( &fd->write_q );
|
|
@@ -2118,7 +2126,15 @@ void set_fd_signaled( struct fd *fd, int signaled )
|
|
{
|
|
if (fd->comp_flags & FILE_SKIP_SET_EVENT_ON_HANDLE) return;
|
|
fd->signaled = signaled;
|
|
- if (signaled) wake_up( fd->user, 0 );
|
|
+ if (signaled)
|
|
+ {
|
|
+ wake_up( fd->user, 0 );
|
|
+ fast_set_event( fd->fast_sync );
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ fast_reset_event( fd->fast_sync );
|
|
+ }
|
|
}
|
|
|
|
/* check if events are pending and if yes return which one(s) */
|
|
@@ -2144,6 +2160,19 @@ int default_fd_signaled( struct object *obj, struct wait_queue_entry *entry )
|
|
return ret;
|
|
}
|
|
|
|
+struct fast_sync *default_fd_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ struct fd *fd = get_obj_fd( obj );
|
|
+ struct fast_sync *ret;
|
|
+
|
|
+ if (!fd->fast_sync)
|
|
+ fd->fast_sync = fast_create_event( FAST_SYNC_SERVER, 1, fd->signaled );
|
|
+ ret = fd->fast_sync;
|
|
+ release_object( fd );
|
|
+ if (ret) grab_object( ret );
|
|
+ return ret;
|
|
+}
|
|
+
|
|
int default_fd_get_poll_events( struct fd *fd )
|
|
{
|
|
int events = 0;
|
|
diff --git a/server/file.c b/server/file.c
|
|
index aff4d9e09e1..8c684141fa6 100644
|
|
--- a/server/file.c
|
|
+++ b/server/file.c
|
|
@@ -109,6 +109,7 @@ static const struct object_ops file_ops =
|
|
NULL, /* unlink_name */
|
|
file_open_file, /* open_file */
|
|
file_get_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ default_fd_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
file_destroy /* destroy */
|
|
};
|
|
diff --git a/server/file.h b/server/file.h
|
|
index b8bc7645b19..0989bae1d87 100644
|
|
--- a/server/file.h
|
|
+++ b/server/file.h
|
|
@@ -104,6 +104,7 @@ extern char *dup_fd_name( struct fd *root, const char *name );
|
|
extern void get_nt_name( struct fd *fd, struct unicode_str *name );
|
|
|
|
extern int default_fd_signaled( struct object *obj, struct wait_queue_entry *entry );
|
|
+extern struct fast_sync *default_fd_get_fast_sync( struct object *obj );
|
|
extern int default_fd_get_poll_events( struct fd *fd );
|
|
extern void default_poll_event( struct fd *fd, int event );
|
|
extern void fd_queue_async( struct fd *fd, struct async *async, int type );
|
|
diff --git a/server/handle.c b/server/handle.c
|
|
index d86f0960ccf..6cf388cd1d5 100644
|
|
--- a/server/handle.c
|
|
+++ b/server/handle.c
|
|
@@ -138,6 +138,7 @@ static const struct object_ops handle_table_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
handle_table_destroy /* destroy */
|
|
};
|
|
diff --git a/server/hook.c b/server/hook.c
|
|
index c048908c295..8d1f7895313 100644
|
|
--- a/server/hook.c
|
|
+++ b/server/hook.c
|
|
@@ -93,6 +93,7 @@ static const struct object_ops hook_table_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
hook_table_destroy /* destroy */
|
|
};
|
|
diff --git a/server/mailslot.c b/server/mailslot.c
|
|
index d4b2fd1b562..88aee771e5e 100644
|
|
--- a/server/mailslot.c
|
|
+++ b/server/mailslot.c
|
|
@@ -90,6 +90,7 @@ static const struct object_ops mailslot_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
mailslot_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ default_fd_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
mailslot_destroy /* destroy */
|
|
};
|
|
@@ -148,6 +149,7 @@ static const struct object_ops mail_writer_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
mail_writer_destroy /* destroy */
|
|
};
|
|
@@ -210,6 +212,7 @@ static const struct object_ops mailslot_device_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
mailslot_device_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
mailslot_device_destroy /* destroy */
|
|
};
|
|
@@ -240,6 +243,7 @@ static const struct object_ops mailslot_device_file_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ default_fd_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
mailslot_device_file_destroy /* destroy */
|
|
};
|
|
diff --git a/server/mapping.c b/server/mapping.c
|
|
index 9cb24a4213f..c03d18254d3 100644
|
|
--- a/server/mapping.c
|
|
+++ b/server/mapping.c
|
|
@@ -80,6 +80,7 @@ static const struct object_ops ranges_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
ranges_destroy /* destroy */
|
|
};
|
|
@@ -116,6 +117,7 @@ static const struct object_ops shared_map_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
shared_map_destroy /* destroy */
|
|
};
|
|
@@ -189,6 +191,7 @@ static const struct object_ops mapping_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
mapping_destroy /* destroy */
|
|
};
|
|
diff --git a/server/mutex.c b/server/mutex.c
|
|
index 75ff5117d3f..ca5d38da5ea 100644
|
|
--- a/server/mutex.c
|
|
+++ b/server/mutex.c
|
|
@@ -57,6 +57,7 @@ struct mutex
|
|
unsigned int count; /* recursion count */
|
|
int abandoned; /* has it been abandoned? */
|
|
struct list entry; /* entry in owner thread mutex list */
|
|
+ struct fast_sync *fast_sync; /* fast synchronization object */
|
|
};
|
|
|
|
static void mutex_dump( struct object *obj, int verbose );
|
|
@@ -64,6 +65,7 @@ static int mutex_signaled( struct object *obj, struct wait_queue_entry *entry );
|
|
static void mutex_satisfied( struct object *obj, struct wait_queue_entry *entry );
|
|
static void mutex_destroy( struct object *obj );
|
|
static int mutex_signal( struct object *obj, unsigned int access );
|
|
+static struct fast_sync *mutex_get_fast_sync( struct object *obj );
|
|
|
|
static const struct object_ops mutex_ops =
|
|
{
|
|
@@ -85,6 +87,7 @@ static const struct object_ops mutex_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ mutex_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
mutex_destroy /* destroy */
|
|
};
|
|
@@ -127,6 +130,7 @@ static struct mutex *create_mutex( struct object *root, const struct unicode_str
|
|
mutex->owner = NULL;
|
|
mutex->abandoned = 0;
|
|
if (owned) do_grab( mutex, current );
|
|
+ mutex->fast_sync = NULL;
|
|
}
|
|
}
|
|
return mutex;
|
|
@@ -189,14 +193,27 @@ static int mutex_signal( struct object *obj, unsigned int access )
|
|
return 1;
|
|
}
|
|
|
|
+static struct fast_sync *mutex_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ struct mutex *mutex = (struct mutex *)obj;
|
|
+
|
|
+ if (!mutex->fast_sync)
|
|
+ mutex->fast_sync = fast_create_mutex( mutex->owner ? mutex->owner->id : 0, mutex->count );
|
|
+ if (mutex->fast_sync) grab_object( mutex->fast_sync );
|
|
+ return mutex->fast_sync;
|
|
+}
|
|
+
|
|
static void mutex_destroy( struct object *obj )
|
|
{
|
|
struct mutex *mutex = (struct mutex *)obj;
|
|
assert( obj->ops == &mutex_ops );
|
|
|
|
- if (!mutex->count) return;
|
|
- mutex->count = 0;
|
|
- do_release( mutex );
|
|
+ if (mutex->count)
|
|
+ {
|
|
+ mutex->count = 0;
|
|
+ do_release( mutex );
|
|
+ }
|
|
+ if (mutex->fast_sync) release_object( mutex->fast_sync );
|
|
}
|
|
|
|
/* create a mutex */
|
|
diff --git a/server/named_pipe.c b/server/named_pipe.c
|
|
index df8c7e3170c..3bc91bb8350 100644
|
|
--- a/server/named_pipe.c
|
|
+++ b/server/named_pipe.c
|
|
@@ -131,6 +131,7 @@ static const struct object_ops named_pipe_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
named_pipe_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
named_pipe_destroy /* destroy */
|
|
};
|
|
@@ -179,6 +180,7 @@ static const struct object_ops pipe_server_ops =
|
|
NULL, /* unlink_name */
|
|
pipe_server_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ default_fd_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
pipe_server_destroy /* destroy */
|
|
};
|
|
@@ -222,6 +224,7 @@ static const struct object_ops pipe_client_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ default_fd_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
pipe_end_destroy /* destroy */
|
|
};
|
|
@@ -268,6 +271,7 @@ static const struct object_ops named_pipe_device_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
named_pipe_device_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
named_pipe_device_destroy /* destroy */
|
|
};
|
|
@@ -299,6 +303,7 @@ static const struct object_ops named_pipe_device_file_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ default_fd_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
named_pipe_device_file_destroy /* destroy */
|
|
};
|
|
diff --git a/server/object.c b/server/object.c
|
|
index b2779fd61fe..e8d6783537c 100644
|
|
--- a/server/object.c
|
|
+++ b/server/object.c
|
|
@@ -528,6 +528,12 @@ struct fd *no_get_fd( struct object *obj )
|
|
return NULL;
|
|
}
|
|
|
|
+struct fast_sync *no_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ set_error( STATUS_OBJECT_TYPE_MISMATCH );
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
unsigned int default_map_access( struct object *obj, unsigned int access )
|
|
{
|
|
return map_access( access, &obj->ops->type->mapping );
|
|
diff --git a/server/object.h b/server/object.h
|
|
index e9d9a87875e..6d77c9bb0c2 100644
|
|
--- a/server/object.h
|
|
+++ b/server/object.h
|
|
@@ -45,6 +45,7 @@ struct async;
|
|
struct async_queue;
|
|
struct winstation;
|
|
struct object_type;
|
|
+struct fast_sync;
|
|
|
|
|
|
struct unicode_str
|
|
@@ -106,6 +107,8 @@ struct object_ops
|
|
unsigned int options);
|
|
/* return list of kernel objects */
|
|
struct list *(*get_kernel_obj_list)(struct object *);
|
|
+ /* get a client-waitable fast-synchronization handle to this object */
|
|
+ struct fast_sync *(*get_fast_sync)(struct object *);
|
|
/* close a handle to this object */
|
|
int (*close_handle)(struct object *,struct process *,obj_handle_t);
|
|
/* destroy on refcount == 0 */
|
|
@@ -222,6 +225,18 @@ extern void reset_event( struct event *event );
|
|
|
|
extern void abandon_mutexes( struct thread *thread );
|
|
|
|
+/* fast-synchronization functions */
|
|
+
|
|
+extern struct fast_sync *fast_create_event( enum fast_sync_type type, int manual_reset, int signaled );
|
|
+extern struct fast_sync *fast_create_semaphore( unsigned int count, unsigned int max );
|
|
+extern struct fast_sync *fast_create_mutex( thread_id_t owner, unsigned int count );
|
|
+extern void fast_set_event( struct fast_sync *obj );
|
|
+extern void fast_reset_event( struct fast_sync *obj );
|
|
+extern void fast_abandon_mutexes( thread_id_t tid );
|
|
+extern int fast_user_apc_needs_signal(void);
|
|
+
|
|
+extern struct fast_sync *no_get_fast_sync( struct object *obj );
|
|
+
|
|
/* serial functions */
|
|
|
|
int get_serial_async_timeout(struct object *obj, int type, int count);
|
|
diff --git a/server/process.c b/server/process.c
|
|
index 5b271b12328..b6ff300eacc 100644
|
|
--- a/server/process.c
|
|
+++ b/server/process.c
|
|
@@ -80,6 +80,7 @@ static unsigned int process_map_access( struct object *obj, unsigned int access
|
|
static struct security_descriptor *process_get_sd( struct object *obj );
|
|
static void process_poll_event( struct fd *fd, int event );
|
|
static struct list *process_get_kernel_obj_list( struct object *obj );
|
|
+static struct fast_sync *process_get_fast_sync( struct object *obj );
|
|
static void process_destroy( struct object *obj );
|
|
static void terminate_process( struct process *process, struct thread *skip, int exit_code );
|
|
|
|
@@ -103,6 +104,7 @@ static const struct object_ops process_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
process_get_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ process_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
process_destroy /* destroy */
|
|
};
|
|
@@ -154,6 +156,7 @@ static const struct object_ops startup_info_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
startup_info_destroy /* destroy */
|
|
};
|
|
@@ -176,6 +179,7 @@ struct type_descr job_type =
|
|
|
|
static void job_dump( struct object *obj, int verbose );
|
|
static int job_signaled( struct object *obj, struct wait_queue_entry *entry );
|
|
+static struct fast_sync *job_get_fast_sync( struct object *obj );
|
|
static int job_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
|
|
static void job_destroy( struct object *obj );
|
|
|
|
@@ -193,6 +197,7 @@ struct job
|
|
struct job *parent;
|
|
struct list parent_job_entry; /* list entry for parent job */
|
|
struct list child_job_list; /* list of child jobs */
|
|
+ struct fast_sync *fast_sync; /* fast synchronization object */
|
|
};
|
|
|
|
static const struct object_ops job_ops =
|
|
@@ -215,6 +220,7 @@ static const struct object_ops job_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ job_get_fast_sync, /* get_fast_sync */
|
|
job_close_handle, /* close_handle */
|
|
job_destroy /* destroy */
|
|
};
|
|
@@ -239,6 +245,7 @@ static struct job *create_job_object( struct object *root, const struct unicode_
|
|
job->completion_port = NULL;
|
|
job->completion_key = 0;
|
|
job->parent = NULL;
|
|
+ job->fast_sync = NULL;
|
|
}
|
|
}
|
|
return job;
|
|
@@ -377,6 +384,17 @@ static void terminate_job( struct job *job, int exit_code )
|
|
job->terminating = 0;
|
|
job->signaled = 1;
|
|
wake_up( &job->obj, 0 );
|
|
+ fast_set_event( job->fast_sync );
|
|
+}
|
|
+
|
|
+static struct fast_sync *job_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ struct job *job = (struct job *)obj;
|
|
+
|
|
+ if (!job->fast_sync)
|
|
+ job->fast_sync = fast_create_event( FAST_SYNC_SERVER, 1, job->signaled );
|
|
+ if (job->fast_sync) grab_object( job->fast_sync );
|
|
+ return job->fast_sync;
|
|
}
|
|
|
|
static int job_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
|
|
@@ -407,6 +425,8 @@ static void job_destroy( struct object *obj )
|
|
list_remove( &job->parent_job_entry );
|
|
release_object( job->parent );
|
|
}
|
|
+
|
|
+ if (job->fast_sync) release_object( job->fast_sync );
|
|
}
|
|
|
|
static void job_dump( struct object *obj, int verbose )
|
|
@@ -637,6 +657,7 @@ struct process *create_process( int fd, struct process *parent, unsigned int fla
|
|
process->trace_data = 0;
|
|
process->rawinput_mouse = NULL;
|
|
process->rawinput_kbd = NULL;
|
|
+ process->fast_sync = NULL;
|
|
list_init( &process->kernel_object );
|
|
list_init( &process->thread_list );
|
|
list_init( &process->locks );
|
|
@@ -739,6 +760,8 @@ static void process_destroy( struct object *obj )
|
|
if (process->token) release_object( process->token );
|
|
free( process->dir_cache );
|
|
free( process->image );
|
|
+
|
|
+ if (process->fast_sync) release_object( process->fast_sync );
|
|
}
|
|
|
|
/* dump a process on stdout for debugging purposes */
|
|
@@ -770,6 +793,16 @@ static struct list *process_get_kernel_obj_list( struct object *obj )
|
|
return &process->kernel_object;
|
|
}
|
|
|
|
+static struct fast_sync *process_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ struct process *process = (struct process *)obj;
|
|
+
|
|
+ if (!process->fast_sync)
|
|
+ process->fast_sync = fast_create_event( FAST_SYNC_SERVER, 1, !process->running_threads );
|
|
+ if (process->fast_sync) grab_object( process->fast_sync );
|
|
+ return process->fast_sync;
|
|
+}
|
|
+
|
|
static struct security_descriptor *process_get_sd( struct object *obj )
|
|
{
|
|
static struct security_descriptor *process_default_sd;
|
|
@@ -950,6 +983,7 @@ static void process_killed( struct process *process )
|
|
release_job_process( process );
|
|
start_sigkill_timer( process );
|
|
wake_up( &process->obj, 0 );
|
|
+ fast_set_event( process->fast_sync );
|
|
}
|
|
|
|
/* add a thread to a process running threads list */
|
|
diff --git a/server/process.h b/server/process.h
|
|
index 0e1a83859d9..7c25f998735 100644
|
|
--- a/server/process.h
|
|
+++ b/server/process.h
|
|
@@ -87,6 +87,7 @@ struct process
|
|
const struct rawinput_device *rawinput_mouse; /* rawinput mouse device, if any */
|
|
const struct rawinput_device *rawinput_kbd; /* rawinput keyboard device, if any */
|
|
struct list kernel_object; /* list of kernel object pointers */
|
|
+ struct fast_sync *fast_sync; /* fast synchronization object */
|
|
};
|
|
|
|
/* process functions */
|
|
diff --git a/server/protocol.def b/server/protocol.def
|
|
index 30624a7cbaf..8e49fc478cd 100644
|
|
--- a/server/protocol.def
|
|
+++ b/server/protocol.def
|
|
@@ -3745,3 +3745,49 @@ struct handle_info
|
|
@REPLY
|
|
obj_handle_t handle; /* next thread handle */
|
|
@END
|
|
+
|
|
+
|
|
+enum fast_sync_type
|
|
+{
|
|
+ FAST_SYNC_SEMAPHORE = 1,
|
|
+ FAST_SYNC_MUTEX,
|
|
+ FAST_SYNC_EVENT,
|
|
+ FAST_SYNC_SERVER,
|
|
+ FAST_SYNC_QUEUE,
|
|
+};
|
|
+
|
|
+
|
|
+/* Obtain a handle to the fast synchronization device object */
|
|
+@REQ(get_fast_sync_device)
|
|
+@REPLY
|
|
+ obj_handle_t handle; /* handle to the device */
|
|
+@END
|
|
+
|
|
+
|
|
+/* Get the fast synchronization object associated with the given handle */
|
|
+@REQ(get_fast_sync_obj)
|
|
+ obj_handle_t handle; /* handle to the object */
|
|
+@REPLY
|
|
+ obj_handle_t handle; /* handle to the fast synchronization object */
|
|
+ int obj; /* linux object */
|
|
+ int type; /* object type */
|
|
+ unsigned int access; /* handle access rights */
|
|
+@END
|
|
+
|
|
+
|
|
+/* Begin a fast wait on a message queue */
|
|
+@REQ(fast_select_queue)
|
|
+ obj_handle_t handle; /* handle to the queue */
|
|
+@END
|
|
+
|
|
+
|
|
+/* End a fast wait on a message queue */
|
|
+@REQ(fast_unselect_queue)
|
|
+ obj_handle_t handle; /* handle to the queue */
|
|
+ int signaled; /* was the queue signaled? */
|
|
+@END
|
|
+
|
|
+
|
|
+/* Check if there are any user APCs queued */
|
|
+@REQ(check_user_apc)
|
|
+@END
|
|
diff --git a/server/queue.c b/server/queue.c
|
|
index e4903bcb79f..24770b1dd02 100644
|
|
--- a/server/queue.c
|
|
+++ b/server/queue.c
|
|
@@ -140,6 +140,8 @@ struct msg_queue
|
|
struct thread_input *input; /* thread input descriptor */
|
|
struct hook_table *hooks; /* hook table */
|
|
timeout_t last_get_msg; /* time of last get message call */
|
|
+ struct fast_sync *fast_sync; /* fast synchronization object */
|
|
+ int in_fast_wait; /* are we in a fast wait? */
|
|
};
|
|
|
|
struct hotkey
|
|
@@ -157,6 +159,7 @@ static int msg_queue_add_queue( struct object *obj, struct wait_queue_entry *ent
|
|
static void msg_queue_remove_queue( struct object *obj, struct wait_queue_entry *entry );
|
|
static int msg_queue_signaled( struct object *obj, struct wait_queue_entry *entry );
|
|
static void msg_queue_satisfied( struct object *obj, struct wait_queue_entry *entry );
|
|
+static struct fast_sync *msg_queue_get_fast_sync( struct object *obj );
|
|
static void msg_queue_destroy( struct object *obj );
|
|
static void msg_queue_poll_event( struct fd *fd, int event );
|
|
static void thread_input_dump( struct object *obj, int verbose );
|
|
@@ -183,6 +186,7 @@ static const struct object_ops msg_queue_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ msg_queue_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
msg_queue_destroy /* destroy */
|
|
};
|
|
@@ -220,6 +224,7 @@ static const struct object_ops thread_input_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
thread_input_destroy /* destroy */
|
|
};
|
|
@@ -305,6 +310,8 @@ static struct msg_queue *create_msg_queue( struct thread *thread, struct thread_
|
|
queue->input = (struct thread_input *)grab_object( input );
|
|
queue->hooks = NULL;
|
|
queue->last_get_msg = current_time;
|
|
+ queue->fast_sync = NULL;
|
|
+ queue->in_fast_wait = 0;
|
|
list_init( &queue->send_result );
|
|
list_init( &queue->callback_result );
|
|
list_init( &queue->pending_timers );
|
|
@@ -480,7 +487,11 @@ static inline void set_queue_bits( struct msg_queue *queue, unsigned int bits )
|
|
{
|
|
queue->wake_bits |= bits;
|
|
queue->changed_bits |= bits;
|
|
- if (is_signaled( queue )) wake_up( &queue->obj, 0 );
|
|
+ if (is_signaled( queue ))
|
|
+ {
|
|
+ wake_up( &queue->obj, 0 );
|
|
+ fast_set_event( queue->fast_sync );
|
|
+ }
|
|
}
|
|
|
|
/* clear some queue bits */
|
|
@@ -488,6 +499,8 @@ static inline void clear_queue_bits( struct msg_queue *queue, unsigned int bits
|
|
{
|
|
queue->wake_bits &= ~bits;
|
|
queue->changed_bits &= ~bits;
|
|
+ if (!is_signaled( queue ))
|
|
+ fast_reset_event( queue->fast_sync );
|
|
}
|
|
|
|
/* check whether msg is a keyboard message */
|
|
@@ -937,6 +950,10 @@ static int is_queue_hung( struct msg_queue *queue )
|
|
if (get_wait_queue_thread(entry)->queue == queue)
|
|
return 0; /* thread is waiting on queue -> not hung */
|
|
}
|
|
+
|
|
+ if (queue->in_fast_wait)
|
|
+ return 0; /* thread is waiting on queue in absentia -> not hung */
|
|
+
|
|
return 1;
|
|
}
|
|
|
|
@@ -997,6 +1014,17 @@ static void msg_queue_satisfied( struct object *obj, struct wait_queue_entry *en
|
|
struct msg_queue *queue = (struct msg_queue *)obj;
|
|
queue->wake_mask = 0;
|
|
queue->changed_mask = 0;
|
|
+ fast_reset_event( queue->fast_sync );
|
|
+}
|
|
+
|
|
+static struct fast_sync *msg_queue_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ struct msg_queue *queue = (struct msg_queue *)obj;
|
|
+
|
|
+ if (!queue->fast_sync)
|
|
+ queue->fast_sync = fast_create_event( FAST_SYNC_QUEUE, 1, is_signaled( queue ) );
|
|
+ if (queue->fast_sync) grab_object( queue->fast_sync );
|
|
+ return queue->fast_sync;
|
|
}
|
|
|
|
static void msg_queue_destroy( struct object *obj )
|
|
@@ -1035,6 +1063,7 @@ static void msg_queue_destroy( struct object *obj )
|
|
release_object( queue->input );
|
|
if (queue->hooks) release_object( queue->hooks );
|
|
if (queue->fd) release_object( queue->fd );
|
|
+ if (queue->fast_sync) release_object( queue->fast_sync );
|
|
}
|
|
|
|
static void msg_queue_poll_event( struct fd *fd, int event )
|
|
@@ -1045,6 +1074,7 @@ static void msg_queue_poll_event( struct fd *fd, int event )
|
|
if (event & (POLLERR | POLLHUP)) set_fd_events( fd, -1 );
|
|
else set_fd_events( queue->fd, 0 );
|
|
wake_up( &queue->obj, 0 );
|
|
+ fast_set_event( queue->fast_sync );
|
|
}
|
|
|
|
static void thread_input_dump( struct object *obj, int verbose )
|
|
@@ -2421,8 +2451,20 @@ DECL_HANDLER(set_queue_mask)
|
|
if (is_signaled( queue ))
|
|
{
|
|
/* if skip wait is set, do what would have been done in the subsequent wait */
|
|
- if (req->skip_wait) queue->wake_mask = queue->changed_mask = 0;
|
|
- else wake_up( &queue->obj, 0 );
|
|
+ if (req->skip_wait)
|
|
+ {
|
|
+ queue->wake_mask = queue->changed_mask = 0;
|
|
+ fast_reset_event( queue->fast_sync );
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ wake_up( &queue->obj, 0 );
|
|
+ fast_set_event( queue->fast_sync );
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ fast_reset_event( queue->fast_sync );
|
|
}
|
|
}
|
|
}
|
|
@@ -2437,6 +2479,8 @@ DECL_HANDLER(get_queue_status)
|
|
reply->wake_bits = queue->wake_bits;
|
|
reply->changed_bits = queue->changed_bits;
|
|
queue->changed_bits &= ~req->clear_bits;
|
|
+ if (!is_signaled( queue ))
|
|
+ fast_reset_event( queue->fast_sync );
|
|
}
|
|
else reply->wake_bits = reply->changed_bits = 0;
|
|
}
|
|
@@ -2620,6 +2664,9 @@ DECL_HANDLER(get_message)
|
|
if (filter & QS_INPUT) queue->changed_bits &= ~QS_INPUT;
|
|
if (filter & QS_PAINT) queue->changed_bits &= ~QS_PAINT;
|
|
|
|
+ if (!is_signaled( queue ))
|
|
+ fast_reset_event( queue->fast_sync );
|
|
+
|
|
/* then check for posted messages */
|
|
if ((filter & QS_POSTMESSAGE) &&
|
|
get_posted_message( queue, get_win, req->get_first, req->get_last, req->flags, reply ))
|
|
@@ -2673,6 +2720,7 @@ DECL_HANDLER(get_message)
|
|
if (get_win == -1 && current->process->idle_event) set_event( current->process->idle_event );
|
|
queue->wake_mask = req->wake_mask;
|
|
queue->changed_mask = req->changed_mask;
|
|
+ fast_reset_event( queue->fast_sync );
|
|
set_error( STATUS_PENDING ); /* FIXME */
|
|
}
|
|
|
|
@@ -3394,3 +3442,61 @@ DECL_HANDLER(get_rawinput_devices)
|
|
devices[i++] = e->device;
|
|
}
|
|
}
|
|
+
|
|
+DECL_HANDLER(fast_select_queue)
|
|
+{
|
|
+ struct msg_queue *queue;
|
|
+
|
|
+ if (!(queue = (struct msg_queue *)get_handle_obj( current->process, req->handle,
|
|
+ SYNCHRONIZE, &msg_queue_ops )))
|
|
+ return;
|
|
+
|
|
+ /* a thread can only wait on its own queue */
|
|
+ if (current->queue != queue || queue->in_fast_wait)
|
|
+ {
|
|
+ set_error( STATUS_ACCESS_DENIED );
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if (current->process->idle_event && !(queue->wake_mask & QS_SMRESULT))
|
|
+ set_event( current->process->idle_event );
|
|
+
|
|
+ if (queue->fd)
|
|
+ set_fd_events( queue->fd, POLLIN );
|
|
+
|
|
+ queue->in_fast_wait = 1;
|
|
+ }
|
|
+
|
|
+ release_object( queue );
|
|
+}
|
|
+
|
|
+DECL_HANDLER(fast_unselect_queue)
|
|
+{
|
|
+ struct msg_queue *queue;
|
|
+
|
|
+ if (!(queue = (struct msg_queue *)get_handle_obj( current->process, req->handle,
|
|
+ SYNCHRONIZE, &msg_queue_ops )))
|
|
+ return;
|
|
+
|
|
+ if (current->queue != queue || !queue->in_fast_wait)
|
|
+ {
|
|
+ set_error( STATUS_ACCESS_DENIED );
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if (queue->fd)
|
|
+ set_fd_events( queue->fd, 0 );
|
|
+
|
|
+ if (req->signaled)
|
|
+ msg_queue_satisfied( &queue->obj, NULL );
|
|
+
|
|
+ queue->in_fast_wait = 0;
|
|
+ }
|
|
+
|
|
+ release_object( queue );
|
|
+}
|
|
+
|
|
+DECL_HANDLER(check_user_apc)
|
|
+{
|
|
+ if (list_empty( ¤t->user_apc )) set_error( STATUS_TIMEOUT );
|
|
+}
|
|
diff --git a/server/registry.c b/server/registry.c
|
|
index 68ec4f9e39a..c9110c56e6e 100644
|
|
--- a/server/registry.c
|
|
+++ b/server/registry.c
|
|
@@ -188,6 +188,7 @@ static const struct object_ops key_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
key_close_handle, /* close_handle */
|
|
key_destroy /* destroy */
|
|
};
|
|
diff --git a/server/request.c b/server/request.c
|
|
index 29b63600f15..ae6d9d0b52c 100644
|
|
--- a/server/request.c
|
|
+++ b/server/request.c
|
|
@@ -109,6 +109,7 @@ static const struct object_ops master_socket_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
master_socket_destroy /* destroy */
|
|
};
|
|
diff --git a/server/request.h b/server/request.h
|
|
index a8d74f01f96..1c778fce75f 100644
|
|
--- a/server/request.h
|
|
+++ b/server/request.h
|
|
@@ -395,6 +395,11 @@ DECL_HANDLER(terminate_job);
|
|
DECL_HANDLER(suspend_process);
|
|
DECL_HANDLER(resume_process);
|
|
DECL_HANDLER(get_next_thread);
|
|
+DECL_HANDLER(get_fast_sync_device);
|
|
+DECL_HANDLER(get_fast_sync_obj);
|
|
+DECL_HANDLER(fast_select_queue);
|
|
+DECL_HANDLER(fast_unselect_queue);
|
|
+DECL_HANDLER(check_user_apc);
|
|
|
|
#ifdef WANT_REQUEST_HANDLERS
|
|
|
|
@@ -677,6 +682,11 @@ static const req_handler req_handlers[REQ_NB_REQUESTS] =
|
|
(req_handler)req_suspend_process,
|
|
(req_handler)req_resume_process,
|
|
(req_handler)req_get_next_thread,
|
|
+ (req_handler)req_get_fast_sync_device,
|
|
+ (req_handler)req_get_fast_sync_obj,
|
|
+ (req_handler)req_fast_select_queue,
|
|
+ (req_handler)req_fast_unselect_queue,
|
|
+ (req_handler)req_check_user_apc,
|
|
};
|
|
|
|
C_ASSERT( sizeof(abstime_t) == 8 );
|
|
@@ -2258,6 +2268,22 @@ C_ASSERT( FIELD_OFFSET(struct get_next_thread_request, flags) == 28 );
|
|
C_ASSERT( sizeof(struct get_next_thread_request) == 32 );
|
|
C_ASSERT( FIELD_OFFSET(struct get_next_thread_reply, handle) == 8 );
|
|
C_ASSERT( sizeof(struct get_next_thread_reply) == 16 );
|
|
+C_ASSERT( sizeof(struct get_fast_sync_device_request) == 16 );
|
|
+C_ASSERT( FIELD_OFFSET(struct get_fast_sync_device_reply, handle) == 8 );
|
|
+C_ASSERT( sizeof(struct get_fast_sync_device_reply) == 16 );
|
|
+C_ASSERT( FIELD_OFFSET(struct get_fast_sync_obj_request, handle) == 12 );
|
|
+C_ASSERT( sizeof(struct get_fast_sync_obj_request) == 16 );
|
|
+C_ASSERT( FIELD_OFFSET(struct get_fast_sync_obj_reply, handle) == 8 );
|
|
+C_ASSERT( FIELD_OFFSET(struct get_fast_sync_obj_reply, obj) == 12 );
|
|
+C_ASSERT( FIELD_OFFSET(struct get_fast_sync_obj_reply, type) == 16 );
|
|
+C_ASSERT( FIELD_OFFSET(struct get_fast_sync_obj_reply, access) == 20 );
|
|
+C_ASSERT( sizeof(struct get_fast_sync_obj_reply) == 24 );
|
|
+C_ASSERT( FIELD_OFFSET(struct fast_select_queue_request, handle) == 12 );
|
|
+C_ASSERT( sizeof(struct fast_select_queue_request) == 16 );
|
|
+C_ASSERT( FIELD_OFFSET(struct fast_unselect_queue_request, handle) == 12 );
|
|
+C_ASSERT( FIELD_OFFSET(struct fast_unselect_queue_request, signaled) == 16 );
|
|
+C_ASSERT( sizeof(struct fast_unselect_queue_request) == 24 );
|
|
+C_ASSERT( sizeof(struct check_user_apc_request) == 16 );
|
|
|
|
#endif /* WANT_REQUEST_HANDLERS */
|
|
|
|
diff --git a/server/semaphore.c b/server/semaphore.c
|
|
index 543d5883ec8..6b7e737c848 100644
|
|
--- a/server/semaphore.c
|
|
+++ b/server/semaphore.c
|
|
@@ -55,12 +55,15 @@ struct semaphore
|
|
struct object obj; /* object header */
|
|
unsigned int count; /* current count */
|
|
unsigned int max; /* maximum possible count */
|
|
+ struct fast_sync *fast_sync; /* fast synchronization object */
|
|
};
|
|
|
|
static void semaphore_dump( struct object *obj, int verbose );
|
|
static int semaphore_signaled( struct object *obj, struct wait_queue_entry *entry );
|
|
static void semaphore_satisfied( struct object *obj, struct wait_queue_entry *entry );
|
|
static int semaphore_signal( struct object *obj, unsigned int access );
|
|
+static struct fast_sync *semaphore_get_fast_sync( struct object *obj );
|
|
+static void semaphore_destroy( struct object *obj );
|
|
|
|
static const struct object_ops semaphore_ops =
|
|
{
|
|
@@ -82,8 +85,9 @@ static const struct object_ops semaphore_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ semaphore_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
- no_destroy /* destroy */
|
|
+ semaphore_destroy /* destroy */
|
|
};
|
|
|
|
|
|
@@ -105,6 +109,7 @@ static struct semaphore *create_semaphore( struct object *root, const struct uni
|
|
/* initialize it if it didn't already exist */
|
|
sem->count = initial;
|
|
sem->max = max;
|
|
+ sem->fast_sync = NULL;
|
|
}
|
|
}
|
|
return sem;
|
|
@@ -167,6 +172,23 @@ static int semaphore_signal( struct object *obj, unsigned int access )
|
|
return release_semaphore( sem, 1, NULL );
|
|
}
|
|
|
|
+static struct fast_sync *semaphore_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ struct semaphore *semaphore = (struct semaphore *)obj;
|
|
+
|
|
+ if (!semaphore->fast_sync)
|
|
+ semaphore->fast_sync = fast_create_semaphore( semaphore->count, semaphore->max );
|
|
+ if (semaphore->fast_sync) grab_object( semaphore->fast_sync );
|
|
+ return semaphore->fast_sync;
|
|
+}
|
|
+
|
|
+static void semaphore_destroy( struct object *obj )
|
|
+{
|
|
+ struct semaphore *semaphore = (struct semaphore *)obj;
|
|
+
|
|
+ if (semaphore->fast_sync) release_object( semaphore->fast_sync );
|
|
+}
|
|
+
|
|
/* create a semaphore */
|
|
DECL_HANDLER(create_semaphore)
|
|
{
|
|
diff --git a/server/serial.c b/server/serial.c
|
|
index d3ea4cbe420..8c68aafe61f 100644
|
|
--- a/server/serial.c
|
|
+++ b/server/serial.c
|
|
@@ -104,6 +104,7 @@ static const struct object_ops serial_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ default_fd_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
serial_destroy /* destroy */
|
|
};
|
|
diff --git a/server/signal.c b/server/signal.c
|
|
index 0c22c157f2b..ad555fd51af 100644
|
|
--- a/server/signal.c
|
|
+++ b/server/signal.c
|
|
@@ -79,6 +79,7 @@ static const struct object_ops handler_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
handler_destroy /* destroy */
|
|
};
|
|
diff --git a/server/sock.c b/server/sock.c
|
|
index b4649abbf08..855454f05ee 100644
|
|
--- a/server/sock.c
|
|
+++ b/server/sock.c
|
|
@@ -214,6 +214,7 @@ static const struct object_ops sock_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ default_fd_get_fast_sync, /* get_fast_sync */
|
|
sock_close_handle, /* close_handle */
|
|
sock_destroy /* destroy */
|
|
};
|
|
@@ -2072,6 +2073,7 @@ static const struct object_ops ifchange_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
ifchange_destroy /* destroy */
|
|
};
|
|
@@ -2292,6 +2294,7 @@ static const struct object_ops socket_device_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
socket_device_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
no_destroy /* destroy */
|
|
};
|
|
diff --git a/server/symlink.c b/server/symlink.c
|
|
index 3879bb685f7..241e13fa369 100644
|
|
--- a/server/symlink.c
|
|
+++ b/server/symlink.c
|
|
@@ -84,6 +84,7 @@ static const struct object_ops symlink_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
symlink_destroy /* destroy */
|
|
};
|
|
diff --git a/server/thread.c b/server/thread.c
|
|
index 0c7f11c0da1..0cae26c0653 100644
|
|
--- a/server/thread.c
|
|
+++ b/server/thread.c
|
|
@@ -109,6 +109,7 @@ static const struct object_ops thread_apc_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
thread_apc_destroy /* destroy */
|
|
};
|
|
@@ -146,6 +147,7 @@ static const struct object_ops context_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
no_destroy /* destroy */
|
|
};
|
|
@@ -173,6 +175,7 @@ static int thread_signaled( struct object *obj, struct wait_queue_entry *entry )
|
|
static unsigned int thread_map_access( struct object *obj, unsigned int access );
|
|
static void thread_poll_event( struct fd *fd, int event );
|
|
static struct list *thread_get_kernel_obj_list( struct object *obj );
|
|
+static struct fast_sync *thread_get_fast_sync( struct object *obj );
|
|
static void destroy_thread( struct object *obj );
|
|
|
|
static const struct object_ops thread_ops =
|
|
@@ -195,6 +198,7 @@ static const struct object_ops thread_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
thread_get_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ thread_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
destroy_thread /* destroy */
|
|
};
|
|
@@ -242,6 +246,7 @@ static inline void init_thread_structure( struct thread *thread )
|
|
thread->token = NULL;
|
|
thread->desc = NULL;
|
|
thread->desc_len = 0;
|
|
+ thread->fast_sync = NULL;
|
|
|
|
thread->creation_time = current_time;
|
|
thread->exit_time = 0;
|
|
@@ -391,6 +396,16 @@ static struct list *thread_get_kernel_obj_list( struct object *obj )
|
|
return &thread->kernel_object;
|
|
}
|
|
|
|
+static struct fast_sync *thread_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ struct thread *thread = (struct thread *)obj;
|
|
+
|
|
+ if (!thread->fast_sync)
|
|
+ thread->fast_sync = fast_create_event( FAST_SYNC_SERVER, 1, thread->state == TERMINATED );
|
|
+ if (thread->fast_sync) grab_object( thread->fast_sync );
|
|
+ return thread->fast_sync;
|
|
+}
|
|
+
|
|
/* cleanup everything that is no longer needed by a dead thread */
|
|
/* used by destroy_thread and kill_thread */
|
|
static void cleanup_thread( struct thread *thread )
|
|
@@ -445,6 +460,7 @@ static void destroy_thread( struct object *obj )
|
|
release_object( thread->process );
|
|
if (thread->id) free_ptid( thread->id );
|
|
if (thread->token) release_object( thread->token );
|
|
+ if (thread->fast_sync) release_object( thread->fast_sync );
|
|
}
|
|
|
|
/* dump a thread on stdout for debugging purposes */
|
|
@@ -1125,8 +1141,9 @@ static int queue_apc( struct process *process, struct thread *thread, struct thr
|
|
{
|
|
if (thread->state == TERMINATED) return 0;
|
|
if (!(queue = get_apc_queue( thread, apc->call.type ))) return 1;
|
|
- /* send signal for system APCs if needed */
|
|
- if (queue == &thread->system_apc && list_empty( queue ) && !is_in_apc_wait( thread ))
|
|
+ /* send signal if needed */
|
|
+ if ((queue == &thread->system_apc || fast_user_apc_needs_signal())
|
|
+ && list_empty( queue ) && !is_in_apc_wait( thread ))
|
|
{
|
|
if (!send_thread_signal( thread, SIGUSR1 )) return 0;
|
|
}
|
|
@@ -1279,7 +1296,9 @@ void kill_thread( struct thread *thread, int violent_death )
|
|
}
|
|
kill_console_processes( thread, 0 );
|
|
abandon_mutexes( thread );
|
|
+ fast_abandon_mutexes( thread->id );
|
|
wake_up( &thread->obj, 0 );
|
|
+ fast_set_event( thread->fast_sync );
|
|
if (violent_death) send_thread_signal( thread, SIGQUIT );
|
|
cleanup_thread( thread );
|
|
remove_process_thread( thread->process, thread );
|
|
diff --git a/server/thread.h b/server/thread.h
|
|
index 8dcf966a90a..caabbe5bfd6 100644
|
|
--- a/server/thread.h
|
|
+++ b/server/thread.h
|
|
@@ -90,6 +90,7 @@ struct thread
|
|
struct list kernel_object; /* list of kernel object pointers */
|
|
data_size_t desc_len; /* thread description length in bytes */
|
|
WCHAR *desc; /* thread description string */
|
|
+ struct fast_sync *fast_sync; /* fast synchronization object */
|
|
};
|
|
|
|
extern struct thread *current;
|
|
diff --git a/server/timer.c b/server/timer.c
|
|
index 5e265d2ddf6..4f9e5e61aa1 100644
|
|
--- a/server/timer.c
|
|
+++ b/server/timer.c
|
|
@@ -62,11 +62,13 @@ struct timer
|
|
struct thread *thread; /* thread that set the APC function */
|
|
client_ptr_t callback; /* callback APC function */
|
|
client_ptr_t arg; /* callback argument */
|
|
+ struct fast_sync *fast_sync; /* fast synchronization object */
|
|
};
|
|
|
|
static void timer_dump( struct object *obj, int verbose );
|
|
static int timer_signaled( struct object *obj, struct wait_queue_entry *entry );
|
|
static void timer_satisfied( struct object *obj, struct wait_queue_entry *entry );
|
|
+static struct fast_sync *timer_get_fast_sync( struct object *obj );
|
|
static void timer_destroy( struct object *obj );
|
|
|
|
static const struct object_ops timer_ops =
|
|
@@ -89,6 +91,7 @@ static const struct object_ops timer_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ timer_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
timer_destroy /* destroy */
|
|
};
|
|
@@ -111,6 +114,7 @@ static struct timer *create_timer( struct object *root, const struct unicode_str
|
|
timer->period = 0;
|
|
timer->timeout = NULL;
|
|
timer->thread = NULL;
|
|
+ timer->fast_sync = NULL;
|
|
}
|
|
}
|
|
return timer;
|
|
@@ -152,6 +156,7 @@ static void timer_callback( void *private )
|
|
/* wake up waiters */
|
|
timer->signaled = 1;
|
|
wake_up( &timer->obj, 0 );
|
|
+ fast_set_event( timer->fast_sync );
|
|
}
|
|
|
|
/* cancel a running timer */
|
|
@@ -182,6 +187,7 @@ static int set_timer( struct timer *timer, timeout_t expire, unsigned int period
|
|
{
|
|
period = 0; /* period doesn't make any sense for a manual timer */
|
|
timer->signaled = 0;
|
|
+ fast_reset_event( timer->fast_sync );
|
|
}
|
|
timer->when = (expire <= 0) ? expire - monotonic_time : max( expire, current_time );
|
|
timer->period = period;
|
|
@@ -216,6 +222,16 @@ static void timer_satisfied( struct object *obj, struct wait_queue_entry *entry
|
|
if (!timer->manual) timer->signaled = 0;
|
|
}
|
|
|
|
+static struct fast_sync *timer_get_fast_sync( struct object *obj )
|
|
+{
|
|
+ struct timer *timer = (struct timer *)obj;
|
|
+
|
|
+ if (!timer->fast_sync)
|
|
+ timer->fast_sync = fast_create_event( FAST_SYNC_SERVER, timer->manual, timer->signaled );
|
|
+ if (timer->fast_sync) grab_object( timer->fast_sync );
|
|
+ return timer->fast_sync;
|
|
+}
|
|
+
|
|
static void timer_destroy( struct object *obj )
|
|
{
|
|
struct timer *timer = (struct timer *)obj;
|
|
@@ -223,6 +239,7 @@ static void timer_destroy( struct object *obj )
|
|
|
|
if (timer->timeout) remove_timeout_user( timer->timeout );
|
|
if (timer->thread) release_object( timer->thread );
|
|
+ if (timer->fast_sync) release_object( timer->fast_sync );
|
|
}
|
|
|
|
/* create a timer */
|
|
diff --git a/server/token.c b/server/token.c
|
|
index ad5d7cda323..7d73e9d7caa 100644
|
|
--- a/server/token.c
|
|
+++ b/server/token.c
|
|
@@ -173,6 +173,7 @@ static const struct object_ops token_ops =
|
|
NULL, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
no_close_handle, /* close_handle */
|
|
token_destroy /* destroy */
|
|
};
|
|
diff --git a/server/trace.c b/server/trace.c
|
|
index 1c51bffe1aa..e7b760b4634 100644
|
|
--- a/server/trace.c
|
|
+++ b/server/trace.c
|
|
@@ -4545,6 +4545,43 @@ static void dump_get_next_thread_reply( const struct get_next_thread_reply *req
|
|
fprintf( stderr, " handle=%04x", req->handle );
|
|
}
|
|
|
|
+static void dump_get_fast_sync_device_request( const struct get_fast_sync_device_request *req )
|
|
+{
|
|
+}
|
|
+
|
|
+static void dump_get_fast_sync_device_reply( const struct get_fast_sync_device_reply *req )
|
|
+{
|
|
+ fprintf( stderr, " handle=%04x", req->handle );
|
|
+}
|
|
+
|
|
+static void dump_get_fast_sync_obj_request( const struct get_fast_sync_obj_request *req )
|
|
+{
|
|
+ fprintf( stderr, " handle=%04x", req->handle );
|
|
+}
|
|
+
|
|
+static void dump_get_fast_sync_obj_reply( const struct get_fast_sync_obj_reply *req )
|
|
+{
|
|
+ fprintf( stderr, " handle=%04x", req->handle );
|
|
+ fprintf( stderr, ", obj=%d", req->obj );
|
|
+ fprintf( stderr, ", type=%d", req->type );
|
|
+ fprintf( stderr, ", access=%08x", req->access );
|
|
+}
|
|
+
|
|
+static void dump_fast_select_queue_request( const struct fast_select_queue_request *req )
|
|
+{
|
|
+ fprintf( stderr, " handle=%04x", req->handle );
|
|
+}
|
|
+
|
|
+static void dump_fast_unselect_queue_request( const struct fast_unselect_queue_request *req )
|
|
+{
|
|
+ fprintf( stderr, " handle=%04x", req->handle );
|
|
+ fprintf( stderr, ", signaled=%d", req->signaled );
|
|
+}
|
|
+
|
|
+static void dump_check_user_apc_request( const struct check_user_apc_request *req )
|
|
+{
|
|
+}
|
|
+
|
|
static const dump_func req_dumpers[REQ_NB_REQUESTS] = {
|
|
(dump_func)dump_new_process_request,
|
|
(dump_func)dump_get_new_process_info_request,
|
|
@@ -4822,6 +4859,11 @@ static const dump_func req_dumpers[REQ_NB_REQUESTS] = {
|
|
(dump_func)dump_suspend_process_request,
|
|
(dump_func)dump_resume_process_request,
|
|
(dump_func)dump_get_next_thread_request,
|
|
+ (dump_func)dump_get_fast_sync_device_request,
|
|
+ (dump_func)dump_get_fast_sync_obj_request,
|
|
+ (dump_func)dump_fast_select_queue_request,
|
|
+ (dump_func)dump_fast_unselect_queue_request,
|
|
+ (dump_func)dump_check_user_apc_request,
|
|
};
|
|
|
|
static const dump_func reply_dumpers[REQ_NB_REQUESTS] = {
|
|
@@ -5101,6 +5143,11 @@ static const dump_func reply_dumpers[REQ_NB_REQUESTS] = {
|
|
NULL,
|
|
NULL,
|
|
(dump_func)dump_get_next_thread_reply,
|
|
+ (dump_func)dump_get_fast_sync_device_reply,
|
|
+ (dump_func)dump_get_fast_sync_obj_reply,
|
|
+ NULL,
|
|
+ NULL,
|
|
+ NULL,
|
|
};
|
|
|
|
static const char * const req_names[REQ_NB_REQUESTS] = {
|
|
@@ -5380,6 +5427,11 @@ static const char * const req_names[REQ_NB_REQUESTS] = {
|
|
"suspend_process",
|
|
"resume_process",
|
|
"get_next_thread",
|
|
+ "get_fast_sync_device",
|
|
+ "get_fast_sync_obj",
|
|
+ "fast_select_queue",
|
|
+ "fast_unselect_queue",
|
|
+ "check_user_apc",
|
|
};
|
|
|
|
static const struct
|
|
diff --git a/server/winstation.c b/server/winstation.c
|
|
index 61f9f77c73f..2dee8067dd2 100644
|
|
--- a/server/winstation.c
|
|
+++ b/server/winstation.c
|
|
@@ -87,6 +87,7 @@ static const struct object_ops winstation_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
winstation_close_handle, /* close_handle */
|
|
winstation_destroy /* destroy */
|
|
};
|
|
@@ -127,6 +128,7 @@ static const struct object_ops desktop_ops =
|
|
default_unlink_name, /* unlink_name */
|
|
no_open_file, /* open_file */
|
|
no_kernel_obj_list, /* get_kernel_obj_list */
|
|
+ no_get_fast_sync, /* get_fast_sync */
|
|
desktop_close_handle, /* close_handle */
|
|
desktop_destroy /* destroy */
|
|
};
|