- fix crash caused by a race condition when trying to close a socket
- move timer hack from sys_arch.c to timers.c because there it gives best performance (will have to get rid of it at some point though)
- when closing a socket only mask out the receive and accept callbacks (as they can cause of a crash when closing a socket). The rest of the callbacks are left alone because they may be used.

[TCPIP]
- merge r52698
- remove some DbgPrint calls that are now useless since the bug they were introduced to help discover has been fixed

svn path=/branches/GSoC_2011/TcpIpDriver/; revision=52712
This commit is contained in:
Claudiu Mihail 2011-07-17 12:32:33 +00:00
parent 11a025ade9
commit f0c093af20
7 changed files with 41 additions and 58 deletions

View file

@ -758,51 +758,45 @@ NTSTATUS DispTdiQueryInformation(
case TDI_QUERY_CONNECTION_INFO:
{
PTDI_CONNECTION_INFORMATION AddressInfo;
PADDRESS_FILE AddrFile;
PCONNECTION_ENDPOINT Endpoint = NULL;
PTDI_CONNECTION_INFO ConnectionInfo;
PCONNECTION_ENDPOINT Endpoint;
if (MmGetMdlByteCount(Irp->MdlAddress) <
(FIELD_OFFSET(TDI_CONNECTION_INFORMATION, RemoteAddress) +
sizeof(PVOID))) {
TI_DbgPrint(MID_TRACE, ("MDL buffer too small (ptr).\n"));
if (MmGetMdlByteCount(Irp->MdlAddress) < sizeof(*ConnectionInfo)) {
TI_DbgPrint(MID_TRACE, ("MDL buffer too small.\n"));
return STATUS_BUFFER_TOO_SMALL;
}
AddressInfo = (PTDI_CONNECTION_INFORMATION)
ConnectionInfo = (PTDI_CONNECTION_INFO)
MmGetSystemAddressForMdl(Irp->MdlAddress);
switch ((ULONG_PTR)IrpSp->FileObject->FsContext2) {
case TDI_TRANSPORT_ADDRESS_FILE:
AddrFile = (PADDRESS_FILE)TranContext->Handle.AddressHandle;
Endpoint = AddrFile ? AddrFile->Connection : NULL;
break;
case TDI_CONNECTION_FILE:
Endpoint =
(PCONNECTION_ENDPOINT)TranContext->Handle.ConnectionContext;
break;
RtlZeroMemory(ConnectionInfo, sizeof(*ConnectionInfo));
return STATUS_SUCCESS;
default:
TI_DbgPrint(MIN_TRACE, ("Invalid transport context\n"));
return STATUS_INVALID_PARAMETER;
}
if (!Endpoint) {
TI_DbgPrint(MID_TRACE, ("No connection object.\n"));
return STATUS_INVALID_PARAMETER;
}
return TCPGetSockAddress( Endpoint, AddressInfo->RemoteAddress, TRUE );
}
case TDI_QUERY_MAX_DATAGRAM_INFO:
{
PTDI_MAX_DATAGRAM_INFO MaxDatagramInfo = MmGetSystemAddressForMdl(Irp->MdlAddress);
PTDI_MAX_DATAGRAM_INFO MaxDatagramInfo;
if (MmGetMdlByteCount(Irp->MdlAddress) < sizeof(*MaxDatagramInfo)) {
TI_DbgPrint(MID_TRACE, ("MDL buffer too small.\n"));
return STATUS_BUFFER_TOO_SMALL;
}
MaxDatagramInfo = (PTDI_MAX_DATAGRAM_INFO)
MmGetSystemAddressForMdl(Irp->MdlAddress);
MaxDatagramInfo->MaxDatagramSize = 0xFFFF;
MaxDatagramInfo->MaxDatagramSize = 0xFFFF;
return STATUS_SUCCESS;
return STATUS_SUCCESS;
}
}

View file

@ -70,7 +70,7 @@ FlushAllQueues(PCONNECTION_ENDPOINT Connection, NTSTATUS Status)
ReferenceObject(Connection);
DbgPrint("[IP, FlushAllQueues] Flushing recv/all with status: 0x%x fox Connection = 0x%x\n", Status, Connection);
DbgPrint("[IP, FlushAllQueues] Flushing recv/all with status: 0x%x for Connection = 0x%x\n", Status, Connection);
while ((Entry = ExInterlockedRemoveHeadList(&Connection->ReceiveRequest, &Connection->Lock)))
{
@ -86,9 +86,9 @@ FlushAllQueues(PCONNECTION_ENDPOINT Connection, NTSTATUS Status)
CompleteBucket(Connection, Bucket, FALSE);
}
/* Calling with Status == STATUS_SUCCESS means that we got a graceful closure
* so we don't want to kill everything else since send is still valid in this state
*/
// Calling with Status == STATUS_SUCCESS means that we got a graceful closure
// so we don't want to kill everything else since send is still valid in this state
//
if (Status == STATUS_SUCCESS)
{
DbgPrint("[IP, FlushAllQueues] Flushed recv only after graceful closure\n");

View file

@ -43,8 +43,6 @@ TCPSendDataCallback(struct netif *netif, struct pbuf *p, struct ip_addr *dest)
return EINVAL;
}
DbgPrint("[IP, TCPSendDataCallback] Set packet local and remore adresses\n");
if (!(NCE = RouteGetRouteToDestination(&RemoteAddress)))
{
DbgPrint("[IP, TCPSendDataCallback] FAIL EINVAL 2\n");
@ -65,8 +63,6 @@ TCPSendDataCallback(struct netif *netif, struct pbuf *p, struct ip_addr *dest)
ASSERT(p1);
RtlCopyMemory(((PUCHAR)Packet.Header) + i, p1->payload, p1->len);
}
DbgPrint("[IP, TCPSendDataCallback] Allocated NDIS packet and set data\n");
Packet.HeaderSize = sizeof(IPv4_HEADER);
Packet.TotalSize = p->tot_len;
@ -79,8 +75,6 @@ TCPSendDataCallback(struct netif *netif, struct pbuf *p, struct ip_addr *dest)
FreeNdisPacket(Packet.NdisPacket);
return EINVAL;
}
DbgPrint("[IP, TCPSendDataCallback] Leaving\n");
return 0;
}

View file

@ -127,15 +127,15 @@ NTSTATUS TCPClose
DbgPrint("[IP, TCPClose] Socket (pcb) = 0x%x\n", Socket);
LibTCPClose(Socket);
LibTCPClose(Socket, FALSE);
}
DbgPrint("[IP, TCPClose] Leaving. Connection->RefCount = %d\n", Connection->RefCount);
UnlockObject(Connection, OldIrql);
DereferenceObject(Connection);
DbgPrint("[IP, TCPClose] Leaving. Connection->RefCount = %d\n", Connection->RefCount);
return STATUS_SUCCESS;
}
@ -466,7 +466,8 @@ NTSTATUS TCPSendData
Status = TCPTranslateError(LibTCPSend(Connection->SocketContext,
BufferData,
SendLength));
SendLength,
FALSE));
DbgPrint("[IP, TCPSendData] LibTCPSend: 0x%x\n", Status);

View file

@ -427,7 +427,7 @@ sys_timeouts_mbox_fetch(sys_mbox_t *mbox, void **msg)
time_needed = sys_arch_mbox_fetch(mbox, msg, 0);
} else {
if (next_timeout->time > 0) {
time_needed = sys_arch_mbox_fetch(mbox, msg, next_timeout->time);
time_needed = sys_arch_mbox_fetch(mbox, msg, 5);//next_timeout->time);
} else {
time_needed = SYS_ARCH_TIMEOUT;
}

View file

@ -482,7 +482,7 @@ LibTCPSend(struct tcp_pcb *pcb, void *const dataptr, const u16_t len, const int
else
ret = ERR_CLSD;
DbgPrint("LibTCPSend(0x%x)\n", pcb);
DbgPrint("[lwIP, LibTCPSend] pcb = 0x%x\n", pcb);
ExFreePool(msg);
@ -599,13 +599,15 @@ LibTCPShutdown(struct tcp_pcb *pcb, const int shut_rx, const int shut_tx)
struct shutdown_callback_msg *msg;
err_t ret;
DbgPrint("[lwIP, LibTCPShutdown] Called\n");
DbgPrint("[lwIP, LibTCPShutdown] Called on pcb = 0x%x\n", pcb);
if (!pcb)
{
DbgPrint("[lwIP, LibTCPShutdown] Done... NO pcb\n");
return ERR_CLSD;
}
DbgPrint("[lwIP, LibTCPClose] pcb->state = %s\n", tcp_state_str[pcb->state]);
msg = ExAllocatePool(NonPagedPool, sizeof(struct shutdown_callback_msg));
if (msg)
@ -625,8 +627,6 @@ LibTCPShutdown(struct tcp_pcb *pcb, const int shut_rx, const int shut_tx)
ExFreePool(msg);
DbgPrint("[lwIP, LibTCPShutdown] pcb = 0x%x\n", pcb);
DbgPrint("[lwIP, LibTCPShutdown] Done\n");
return ret;
@ -658,12 +658,14 @@ LibTCPCloseCallback(void *arg)
DbgPrint("[lwIP, LibTCPCloseCallback] Closing a listener\n");
msg->Error = tcp_close(msg->Pcb);
}
else
else if (msg->Pcb->state != LAST_ACK && msg->Pcb->state != CLOSED)
{
DbgPrint("[lwIP, LibTCPCloseCallback] Aborting a connection\n");
tcp_abort(msg->Pcb);
msg->Error = ERR_OK;
}
else
msg->Error = ERR_OK;
KeSetEvent(&msg->Event, IO_NO_INCREMENT, FALSE);
}
@ -681,8 +683,6 @@ LibTCPClose(struct tcp_pcb *pcb, const int safe)
return ERR_CLSD;
}
DbgPrint("[lwIP, LibTCPClose] Removing pcb callbacks\n");
DbgPrint("[lwIP, LibTCPClose] pcb->state = %s\n", tcp_state_str[pcb->state]);
tcp_arg(pcb, NULL);
@ -694,14 +694,12 @@ LibTCPClose(struct tcp_pcb *pcb, const int safe)
if (pcb->state != LISTEN)
{
tcp_recv(pcb, NULL);
tcp_sent(pcb, NULL);
tcp_err(pcb, NULL);
//tcp_sent(pcb, NULL);
//tcp_err(pcb, NULL);
}
tcp_accept(pcb, NULL);
DbgPrint("[lwIP, LibTCPClose] Attempting to allocate memory for msg\n");
/*
If we're being called from a handler it means we're in the conetxt of teh tcpip
main thread. Therefore we don't have to queue our request via a callback and we
@ -714,12 +712,14 @@ LibTCPClose(struct tcp_pcb *pcb, const int safe)
DbgPrint("[lwIP, LibTCPClose] Closing a listener\n");
ret = tcp_close(pcb);
}
else
else if (pcb->state != LAST_ACK && pcb->state != CLOSED)
{
DbgPrint("[lwIP, LibTCPClose] Aborting a connection\n");
tcp_abort(pcb);
ret = ERR_OK;
}
else
ret = ERR_OK;
return ret;
}
@ -730,13 +730,9 @@ LibTCPClose(struct tcp_pcb *pcb, const int safe)
msg = ExAllocatePool(NonPagedPool, sizeof(struct close_callback_msg));
if (msg)
{
DbgPrint("[lwIP, LibTCPClose] Initializing msg->Event\n");
KeInitializeEvent(&msg->Event, NotificationEvent, FALSE);
DbgPrint("[lwIP, LibTCPClose] Initializing msg->pcb = 0x%x\n", pcb);
msg->Pcb = pcb;
DbgPrint("[lwIP, LibTCPClose] Attempting to call LibTCPCloseCallback\n");
tcpip_callback_with_block(LibTCPCloseCallback, msg, 1);
@ -746,8 +742,6 @@ LibTCPClose(struct tcp_pcb *pcb, const int safe)
ret = ERR_CLSD;
ExFreePool(msg);
DbgPrint("[lwIP, LibTCPClose] pcb = 0x%x\n", pcb);
DbgPrint("[lwIP, LibTCPClose] Done\n");

View file

@ -97,7 +97,7 @@ sys_arch_sem_wait(sys_sem_t* sem, u32_t timeout)
// FIXME: This is a hack to increase the throughput. Once this is done
// the right way it should definately be removed.
timeout = 5;
//timeout = 5;
Status = KeWaitForMultipleObjects(2,
WaitObjects,