mirror of
https://github.com/reactos/reactos.git
synced 2025-08-02 05:25:48 +00:00
Create a branch for network fixes.
svn path=/branches/aicom-network-fixes/; revision=34994
This commit is contained in:
parent
0e213bbc00
commit
c501d8112c
18148 changed files with 0 additions and 860488 deletions
82
drivers/network/tcpip/recmutex/recmutex.c
Normal file
82
drivers/network/tcpip/recmutex/recmutex.c
Normal file
|
@ -0,0 +1,82 @@
|
|||
#include <ntddk.h>
|
||||
#include "recmutex.h"
|
||||
|
||||
VOID RecursiveMutexInit( PRECURSIVE_MUTEX RecMutex ) {
|
||||
RtlZeroMemory( RecMutex, sizeof(*RecMutex) );
|
||||
KeInitializeSpinLock( &RecMutex->SpinLock );
|
||||
ExInitializeFastMutex( &RecMutex->Mutex );
|
||||
KeInitializeEvent( &RecMutex->StateLockedEvent,
|
||||
NotificationEvent, FALSE );
|
||||
}
|
||||
|
||||
/* NOTE: When we leave, the FAST_MUTEX must have been released. The result
|
||||
* is that we always exit in the same irql as entering */
|
||||
SIZE_T RecursiveMutexEnter( PRECURSIVE_MUTEX RecMutex, BOOLEAN ToWrite ) {
|
||||
NTSTATUS Status = STATUS_SUCCESS;
|
||||
PVOID CurrentThread = KeGetCurrentThread();
|
||||
|
||||
/* Wait for the previous user to unlock the RecMutex state. There might be
|
||||
* multiple waiters waiting to change the state. We need to check each
|
||||
* time we get the event whether somebody still has the state locked */
|
||||
|
||||
if( !RecMutex ) return FALSE;
|
||||
|
||||
if( CurrentThread == RecMutex->CurrentThread ||
|
||||
(!ToWrite && !RecMutex->Writer) ) {
|
||||
RecMutex->LockCount++;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
if( KeGetCurrentIrql() == PASSIVE_LEVEL ) {
|
||||
ExAcquireFastMutex( &RecMutex->Mutex );
|
||||
RecMutex->OldIrql = PASSIVE_LEVEL;
|
||||
while( RecMutex->Locked ) {
|
||||
ExReleaseFastMutex( &RecMutex->Mutex );
|
||||
Status = KeWaitForSingleObject( &RecMutex->StateLockedEvent,
|
||||
UserRequest,
|
||||
KernelMode,
|
||||
FALSE,
|
||||
NULL );
|
||||
ExAcquireFastMutex( &RecMutex->Mutex );
|
||||
}
|
||||
RecMutex->Locked = TRUE;
|
||||
RecMutex->Writer = ToWrite;
|
||||
RecMutex->CurrentThread = CurrentThread;
|
||||
RecMutex->LockCount++;
|
||||
ExReleaseFastMutex( &RecMutex->Mutex );
|
||||
} else {
|
||||
KeAcquireSpinLock( &RecMutex->SpinLock, &RecMutex->OldIrql );
|
||||
RecMutex->Locked = TRUE;
|
||||
RecMutex->Writer = ToWrite;
|
||||
RecMutex->CurrentThread = CurrentThread;
|
||||
RecMutex->LockCount++;
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
VOID RecursiveMutexLeave( PRECURSIVE_MUTEX RecMutex ) {
|
||||
if( RecMutex->LockCount == 0 ) {
|
||||
return;
|
||||
} else
|
||||
RecMutex->LockCount--;
|
||||
|
||||
if( !RecMutex->LockCount ) {
|
||||
RecMutex->CurrentThread = NULL;
|
||||
if( RecMutex->OldIrql == PASSIVE_LEVEL ) {
|
||||
ExAcquireFastMutex( &RecMutex->Mutex );
|
||||
RecMutex->Locked = FALSE;
|
||||
RecMutex->Writer = FALSE;
|
||||
ExReleaseFastMutex( &RecMutex->Mutex );
|
||||
} else {
|
||||
RecMutex->Locked = FALSE;
|
||||
RecMutex->Writer = FALSE;
|
||||
KeReleaseSpinLock( &RecMutex->SpinLock, RecMutex->OldIrql );
|
||||
}
|
||||
|
||||
RecMutex->OldIrql = PASSIVE_LEVEL;
|
||||
KePulseEvent( &RecMutex->StateLockedEvent, IO_NETWORK_INCREMENT,
|
||||
FALSE );
|
||||
}
|
||||
}
|
||||
|
27
drivers/network/tcpip/recmutex/recmutex.h
Normal file
27
drivers/network/tcpip/recmutex/recmutex.h
Normal file
|
@ -0,0 +1,27 @@
|
|||
#ifndef _ROSRTL_RECMUTEX_H
|
||||
#define _ROSRTL_RECMUTEX_H
|
||||
|
||||
typedef struct _RECURSIVE_MUTEX {
|
||||
/* Lock. */
|
||||
FAST_MUTEX Mutex;
|
||||
/* Number of times this object was locked */
|
||||
SIZE_T LockCount;
|
||||
/* CurrentThread */
|
||||
PVOID CurrentThread;
|
||||
/* Notification event which signals that another thread can take over */
|
||||
KEVENT StateLockedEvent;
|
||||
/* IRQL from spin lock */
|
||||
KIRQL OldIrql;
|
||||
/* Is Locked */
|
||||
BOOLEAN Locked;
|
||||
/* Is reader or writer phase */
|
||||
BOOLEAN Writer;
|
||||
/* Spin lock needed for */
|
||||
KSPIN_LOCK SpinLock;
|
||||
} RECURSIVE_MUTEX, *PRECURSIVE_MUTEX;
|
||||
|
||||
extern VOID RecursiveMutexInit( PRECURSIVE_MUTEX RecMutex );
|
||||
extern SIZE_T RecursiveMutexEnter( PRECURSIVE_MUTEX RecMutex, BOOLEAN ToRead );
|
||||
extern VOID RecursiveMutexLeave( PRECURSIVE_MUTEX RecMutex );
|
||||
|
||||
#endif/*_ROSRTL_RECMUTEX_H*/
|
Loading…
Add table
Add a link
Reference in a new issue