- Change incorrect rpcrt4's synchronous operation back to asynchronous (which was in Wine back in 0.9.5).

- Update corresponding patch file, increasing context to 5 lines for better merging.

svn path=/trunk/; revision=31640
This commit is contained in:
Aleksey Bragin 2008-01-07 14:16:27 +00:00
parent 8fbe89bbba
commit 77de066997
2 changed files with 269 additions and 28 deletions

View file

@ -90,7 +90,7 @@ typedef struct _RpcConnection_np
{
RpcConnection common;
HANDLE pipe;
OVERLAPPED ovl;
OVERLAPPED ovl[2];
BOOL listening;
} RpcConnection_np;
@ -112,11 +112,11 @@ static RPC_STATUS rpcrt4_conn_listen_pipe(RpcConnection_np *npc)
return RPC_S_OK;
npc->listening = TRUE;
if (ConnectNamedPipe(npc->pipe, &npc->ovl))
if (ConnectNamedPipe(npc->pipe, &npc->ovl[0]))
return RPC_S_OK;
if (GetLastError() == ERROR_PIPE_CONNECTED) {
SetEvent(npc->ovl.hEvent);
SetEvent(npc->ovl[0].hEvent);
return RPC_S_OK;
}
if (GetLastError() == ERROR_IO_PENDING) {
@ -146,7 +146,8 @@ static RPC_STATUS rpcrt4_conn_create_pipe(RpcConnection *Connection, LPCSTR pnam
}
memset(&npc->ovl, 0, sizeof(npc->ovl));
npc->ovl.hEvent = CreateEventW(NULL, TRUE, FALSE, NULL);
npc->ovl[0].hEvent = CreateEventW(NULL, TRUE, FALSE, NULL);
npc->ovl[1].hEvent = CreateEventW(NULL, TRUE, FALSE, NULL);
/* Note: we don't call ConnectNamedPipe here because it must be done in the
* server thread as the thread must be alertable */
@ -209,7 +210,9 @@ static RPC_STATUS rpcrt4_conn_open_pipe(RpcConnection *Connection, LPCSTR pname,
/* pipe is connected; change to message-read mode. */
dwMode = PIPE_READMODE_MESSAGE;
SetNamedPipeHandleState(pipe, &dwMode, NULL, NULL);
npc->ovl.hEvent = CreateEventW(NULL, TRUE, FALSE, NULL);
npc->ovl[0].hEvent = CreateEventW(NULL, TRUE, FALSE, NULL);
npc->ovl[1].hEvent = CreateEventW(NULL, TRUE, FALSE, NULL);
npc->pipe = pipe;
return RPC_S_OK;
@ -315,7 +318,8 @@ static void rpcrt4_conn_np_handoff(RpcConnection_np *old_npc, RpcConnection_np *
* to the child, then reopen the server binding to continue listening */
new_npc->pipe = old_npc->pipe;
new_npc->ovl = old_npc->ovl;
new_npc->ovl[0] = old_npc->ovl[0];
new_npc->ovl[1] = old_npc->ovl[1];
old_npc->pipe = 0;
memset(&old_npc->ovl, 0, sizeof(old_npc->ovl));
old_npc->listening = FALSE;
@ -366,9 +370,14 @@ static int rpcrt4_conn_np_read(RpcConnection *Connection,
while (bytes_left)
{
DWORD bytes_read;
ret = ReadFile(npc->pipe, buf, bytes_left, &bytes_read, NULL);
if (!ret || !bytes_read)
ret = ReadFile(npc->pipe, buf, bytes_left, &bytes_read, &npc->ovl[0]);
if ((!ret || !bytes_read) && (GetLastError() != ERROR_IO_PENDING))
break;
ret = GetOverlappedResult(npc->pipe, &npc->ovl[0], &bytes_read, TRUE);
if (!ret /*&& GetLastError() != ERROR_MORE_DATA*/)
break;
bytes_left -= bytes_read;
buf += bytes_read;
}
@ -386,9 +395,14 @@ static int rpcrt4_conn_np_write(RpcConnection *Connection,
while (bytes_left)
{
DWORD bytes_written;
ret = WriteFile(npc->pipe, buf, count, &bytes_written, NULL);
if (!ret || !bytes_written)
ret = WriteFile(npc->pipe, buf, count, &bytes_written, &npc->ovl[1]);
if ((!ret || !bytes_written) && (GetLastError() != ERROR_IO_PENDING))
break;
ret = GetOverlappedResult(npc->pipe, &npc->ovl[1], &bytes_written, TRUE);
if (!ret /*&& GetLastError() != ERROR_MORE_DATA*/)
break;
bytes_left -= bytes_written;
buf += bytes_written;
}
@ -403,10 +417,15 @@ static int rpcrt4_conn_np_close(RpcConnection *Connection)
CloseHandle(npc->pipe);
npc->pipe = 0;
}
if (npc->ovl.hEvent) {
CloseHandle(npc->ovl.hEvent);
npc->ovl.hEvent = 0;
if (npc->ovl[0].hEvent) {
CloseHandle(npc->ovl[0].hEvent);
npc->ovl[0].hEvent = 0;
}
if (npc->ovl[1].hEvent) {
CloseHandle(npc->ovl[1].hEvent);
npc->ovl[1].hEvent = 0;
}
return 0;
}
@ -554,7 +573,7 @@ static void *rpcrt4_protseq_np_get_wait_array(RpcServerProtseq *protseq, void *p
conn = CONTAINING_RECORD(protseq->conn, RpcConnection_np, common);
while (conn) {
rpcrt4_conn_listen_pipe(conn);
if (conn->ovl.hEvent)
if (conn->ovl[0].hEvent)
(*count)++;
conn = CONTAINING_RECORD(conn->common.Next, RpcConnection_np, common);
}
@ -575,7 +594,7 @@ static void *rpcrt4_protseq_np_get_wait_array(RpcServerProtseq *protseq, void *p
*count = 1;
conn = CONTAINING_RECORD(protseq->conn, RpcConnection_np, common);
while (conn) {
if ((objs[*count] = conn->ovl.hEvent))
if ((objs[*count] = conn->ovl[0].hEvent))
(*count)++;
conn = CONTAINING_RECORD(conn->common.Next, RpcConnection_np, common);
}
@ -622,7 +641,7 @@ static int rpcrt4_protseq_np_wait_for_new_connection(RpcServerProtseq *protseq,
EnterCriticalSection(&protseq->cs);
conn = CONTAINING_RECORD(protseq->conn, RpcConnection_np, common);
while (conn) {
if (b_handle == conn->ovl.hEvent) break;
if (b_handle == conn->ovl[0].hEvent) break;
conn = CONTAINING_RECORD(conn->common.Next, RpcConnection_np, common);
}
cconn = NULL;

View file

@ -1,6 +1,8 @@
--- H:\Working Copies\wine\dlls\rpcrt4\rpc_transport.c Sun Jan 06 19:27:38 2008
+++ H:\Working Copies\ReactOS\trunk\reactos\dll\win32\rpcrt4\rpc_transport.c Sun Jan 06 19:28:07 2008
@@ -56,6 +56,9 @@
+++ H:\Working Copies\ReactOS\trunk\reactos\dll\win32\rpcrt4\rpc_transport.c Mon Jan 07 16:02:15 2008
@@ -54,10 +54,13 @@
#endif
#ifdef HAVE_SYS_POLL_H
#include <sys/poll.h>
#endif
@ -10,7 +12,11 @@
#include "windef.h"
#include "winbase.h"
#include "winnls.h"
@@ -73,6 +76,8 @@
#include "winerror.h"
#include "winternl.h"
@@ -71,10 +74,12 @@
#include "rpc_binding.h"
#include "rpc_message.h"
#include "rpc_server.h"
#include "epm_towers.h"
@ -19,7 +25,42 @@
#ifndef SOL_TCP
# define SOL_TCP IPPROTO_TCP
#endif
@@ -128,7 +133,7 @@
WINE_DEFAULT_DEBUG_CHANNEL(rpc);
@@ -83,11 +88,11 @@
typedef struct _RpcConnection_np
{
RpcConnection common;
HANDLE pipe;
- OVERLAPPED ovl;
+ OVERLAPPED ovl[2];
BOOL listening;
} RpcConnection_np;
static RpcConnection *rpcrt4_conn_np_alloc(void)
{
@@ -105,15 +110,15 @@
{
if (npc->listening)
return RPC_S_OK;
npc->listening = TRUE;
- if (ConnectNamedPipe(npc->pipe, &npc->ovl))
+ if (ConnectNamedPipe(npc->pipe, &npc->ovl[0]))
return RPC_S_OK;
if (GetLastError() == ERROR_PIPE_CONNECTED) {
- SetEvent(npc->ovl.hEvent);
+ SetEvent(npc->ovl[0].hEvent);
return RPC_S_OK;
}
if (GetLastError() == ERROR_IO_PENDING) {
/* will be completed in rpcrt4_protseq_np_wait_for_new_connection */
return RPC_S_OK;
@@ -126,11 +131,11 @@
static RPC_STATUS rpcrt4_conn_create_pipe(RpcConnection *Connection, LPCSTR pname)
{
RpcConnection_np *npc = (RpcConnection_np *) Connection;
TRACE("listening on %s\n", pname);
@ -28,7 +69,158 @@
PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE,
PIPE_UNLIMITED_INSTANCES,
RPC_MAX_PACKET_SIZE, RPC_MAX_PACKET_SIZE, 5000, NULL);
@@ -715,12 +720,14 @@
if (npc->pipe == INVALID_HANDLE_VALUE) {
WARN("CreateNamedPipe failed with error %d\n", GetLastError());
@@ -139,11 +144,12 @@
else
return RPC_S_CANT_CREATE_ENDPOINT;
}
memset(&npc->ovl, 0, sizeof(npc->ovl));
- npc->ovl.hEvent = CreateEventW(NULL, TRUE, FALSE, NULL);
+ npc->ovl[0].hEvent = CreateEventW(NULL, TRUE, FALSE, NULL);
+ npc->ovl[1].hEvent = CreateEventW(NULL, TRUE, FALSE, NULL);
/* Note: we don't call ConnectNamedPipe here because it must be done in the
* server thread as the thread must be alertable */
return RPC_S_OK;
}
@@ -202,11 +208,13 @@
/* success */
memset(&npc->ovl, 0, sizeof(npc->ovl));
/* pipe is connected; change to message-read mode. */
dwMode = PIPE_READMODE_MESSAGE;
SetNamedPipeHandleState(pipe, &dwMode, NULL, NULL);
- npc->ovl.hEvent = CreateEventW(NULL, TRUE, FALSE, NULL);
+ npc->ovl[0].hEvent = CreateEventW(NULL, TRUE, FALSE, NULL);
+ npc->ovl[1].hEvent = CreateEventW(NULL, TRUE, FALSE, NULL);
+
npc->pipe = pipe;
return RPC_S_OK;
}
@@ -308,11 +316,12 @@
{
/* because of the way named pipes work, we'll transfer the connected pipe
* to the child, then reopen the server binding to continue listening */
new_npc->pipe = old_npc->pipe;
- new_npc->ovl = old_npc->ovl;
+ new_npc->ovl[0] = old_npc->ovl[0];
+ new_npc->ovl[1] = old_npc->ovl[1];
old_npc->pipe = 0;
memset(&old_npc->ovl, 0, sizeof(old_npc->ovl));
old_npc->listening = FALSE;
}
@@ -359,13 +368,18 @@
unsigned int bytes_left = count;
while (bytes_left)
{
DWORD bytes_read;
- ret = ReadFile(npc->pipe, buf, bytes_left, &bytes_read, NULL);
- if (!ret || !bytes_read)
+ ret = ReadFile(npc->pipe, buf, bytes_left, &bytes_read, &npc->ovl[0]);
+ if ((!ret || !bytes_read) && (GetLastError() != ERROR_IO_PENDING))
break;
+
+ ret = GetOverlappedResult(npc->pipe, &npc->ovl[0], &bytes_read, TRUE);
+ if (!ret /*&& GetLastError() != ERROR_MORE_DATA*/)
+ break;
+
bytes_left -= bytes_read;
buf += bytes_read;
}
return ret ? count : -1;
}
@@ -379,13 +393,18 @@
unsigned int bytes_left = count;
while (bytes_left)
{
DWORD bytes_written;
- ret = WriteFile(npc->pipe, buf, count, &bytes_written, NULL);
- if (!ret || !bytes_written)
+ ret = WriteFile(npc->pipe, buf, count, &bytes_written, &npc->ovl[1]);
+ if ((!ret || !bytes_written) && (GetLastError() != ERROR_IO_PENDING))
break;
+
+ ret = GetOverlappedResult(npc->pipe, &npc->ovl[1], &bytes_written, TRUE);
+ if (!ret /*&& GetLastError() != ERROR_MORE_DATA*/)
+ break;
+
bytes_left -= bytes_written;
buf += bytes_written;
}
return ret ? count : -1;
}
@@ -396,14 +415,19 @@
if (npc->pipe) {
FlushFileBuffers(npc->pipe);
CloseHandle(npc->pipe);
npc->pipe = 0;
}
- if (npc->ovl.hEvent) {
- CloseHandle(npc->ovl.hEvent);
- npc->ovl.hEvent = 0;
+ if (npc->ovl[0].hEvent) {
+ CloseHandle(npc->ovl[0].hEvent);
+ npc->ovl[0].hEvent = 0;
+ }
+ if (npc->ovl[1].hEvent) {
+ CloseHandle(npc->ovl[1].hEvent);
+ npc->ovl[1].hEvent = 0;
}
+
return 0;
}
static void rpcrt4_conn_np_cancel_call(RpcConnection *Connection)
{
@@ -547,11 +571,11 @@
/* open and count connections */
*count = 1;
conn = CONTAINING_RECORD(protseq->conn, RpcConnection_np, common);
while (conn) {
rpcrt4_conn_listen_pipe(conn);
- if (conn->ovl.hEvent)
+ if (conn->ovl[0].hEvent)
(*count)++;
conn = CONTAINING_RECORD(conn->common.Next, RpcConnection_np, common);
}
/* make array of connections */
@@ -568,11 +592,11 @@
objs[0] = npps->mgr_event;
*count = 1;
conn = CONTAINING_RECORD(protseq->conn, RpcConnection_np, common);
while (conn) {
- if ((objs[*count] = conn->ovl.hEvent))
+ if ((objs[*count] = conn->ovl[0].hEvent))
(*count)++;
conn = CONTAINING_RECORD(conn->common.Next, RpcConnection_np, common);
}
LeaveCriticalSection(&protseq->cs);
return objs;
@@ -615,11 +639,11 @@
b_handle = objs[res - WAIT_OBJECT_0];
/* find which connection got a RPC */
EnterCriticalSection(&protseq->cs);
conn = CONTAINING_RECORD(protseq->conn, RpcConnection_np, common);
while (conn) {
- if (b_handle == conn->ovl.hEvent) break;
+ if (b_handle == conn->ovl[0].hEvent) break;
conn = CONTAINING_RECORD(conn->common.Next, RpcConnection_np, common);
}
cconn = NULL;
if (conn)
RPCRT4_SpawnConnection(&cconn, &conn->common);
@@ -713,16 +737,18 @@
RpcConnection_tcp *tcpc;
tcpc = HeapAlloc(GetProcessHeap(), 0, sizeof(RpcConnection_tcp));
if (tcpc == NULL)
return NULL;
tcpc->sock = -1;
@ -43,7 +235,11 @@
return &tcpc->common;
}
@@ -785,8 +792,7 @@
static RPC_STATUS rpcrt4_ncacn_ip_tcp_open(RpcConnection* Connection)
{
@@ -783,12 +809,11 @@
continue;
}
/* RPC depends on having minimal latency so disable the Nagle algorithm */
val = 1;
@ -53,7 +249,11 @@
tcpc->sock = sock;
@@ -808,6 +814,7 @@
freeaddrinfo(ai);
TRACE("connected\n");
@@ -806,10 +831,11 @@
int sock;
int ret;
struct addrinfo *ai;
struct addrinfo *ai_cur;
struct addrinfo hints;
@ -61,7 +261,11 @@
RpcConnection *first_connection = NULL;
TRACE("(%p, %s)\n", protseq, endpoint);
@@ -859,7 +866,7 @@
hints.ai_flags = AI_PASSIVE /* for non-localhost addresses */;
@@ -857,11 +883,11 @@
ret = bind(sock, ai_cur->ai_addr, ai_cur->ai_addrlen);
if (ret < 0)
{
WARN("bind failed: %s\n", strerror(errno));
close(sock);
@ -70,7 +274,11 @@
status = RPC_S_DUPLICATE_ENDPOINT;
else
status = RPC_S_CANT_CREATE_ENDPOINT;
@@ -888,7 +895,8 @@
continue;
}
@@ -886,11 +912,12 @@
}
/* need a non-blocking socket, otherwise accept() has a potential
* race-condition (poll() says it is readable, connection drops,
* and accept() blocks until the next connection comes...)
*/
@ -80,7 +288,11 @@
if (ret < 0)
{
WARN("couldn't make socket non-blocking, error %d\n", ret);
@@ -931,6 +939,7 @@
RPCRT4_DestroyConnection(&tcpc->common);
status = RPC_S_OUT_OF_RESOURCES;
@@ -929,10 +956,11 @@
static RPC_STATUS rpcrt4_conn_tcp_handoff(RpcConnection *old_conn, RpcConnection *new_conn)
{
int ret;
struct sockaddr_in address;
socklen_t addrsize;
@ -88,7 +300,11 @@
RpcConnection_tcp *server = (RpcConnection_tcp*) old_conn;
RpcConnection_tcp *client = (RpcConnection_tcp*) new_conn;
@@ -942,7 +951,8 @@
addrsize = sizeof(address);
ret = accept(server->sock, (struct sockaddr*) &address, &addrsize);
@@ -940,11 +968,12 @@
{
ERR("Failed to accept a TCP connection: error %d\n", ret);
return RPC_S_OUT_OF_RESOURCES;
}
/* reset to blocking behaviour */
@ -98,7 +314,11 @@
client->sock = ret;
TRACE("Accepted a new TCP connection\n");
return RPC_S_OK;
@@ -1189,10 +1199,12 @@
}
@@ -1187,14 +1216,16 @@
{
RpcServerProtseq_sock *ps = HeapAlloc(GetProcessHeap(), 0, sizeof(*ps));
if (ps)
{
int fds[2];
@ -113,3 +333,5 @@
ps->mgr_event_rcv = fds[0];
ps->mgr_event_snd = fds[1];
}
else
{