modified include/ntos/zwtypes.h Add SYSTEM_BATTERY_STATE from w32api.

modified   ntoskrnl/include/internal/mm.h   Add MmAllocPagesSpecifyRange (internal function)
modified   ntoskrnl/mm/freelist.c           Add MmAllocPagesSpecifyRange function (used by MmAllocatePagesForMdl)
modified   ntoskrnl/mm/mdl.c                Implemenet MmAllocatePagesForMdl, MmFreePagesFromMdl. Turn MmMapLockedPages into MmMapLockedPagesSpecifyCache (and make MmMapLockedPages a wrapper to it).
modified   ntoskrnl/po/power.c              Implement SystemBatteryState in NtPowerInformation (Thanks to Alex for testing the returned values on his windows desktop PC)

Windows 2000 NVIDIA driver should work now.

svn path=/trunk/; revision=12871
This commit is contained in:
Gregor Anich 2005-01-07 16:10:48 +00:00
parent 7d7661d52d
commit f5daa985c6
5 changed files with 456 additions and 161 deletions

View file

@ -1666,6 +1666,22 @@ typedef struct _OBJECT_DIRECTORY_INFORMATION
} OBJECT_DIRECTORY_INFORMATION, *POBJECT_DIRECTORY_INFORMATION; } OBJECT_DIRECTORY_INFORMATION, *POBJECT_DIRECTORY_INFORMATION;
/* system battery state */
typedef struct _SYSTEM_BATTERY_STATE {
BOOLEAN AcOnLine;
BOOLEAN BatteryPresent;
BOOLEAN Charging;
BOOLEAN Discharging;
BOOLEAN Spare1[4];
ULONG MaxCapacity;
ULONG RemainingCapacity;
ULONG Rate;
ULONG EstimatedTime;
ULONG DefaultAlert1;
ULONG DefaultAlert2;
} SYSTEM_BATTERY_STATE, *PSYSTEM_BATTERY_STATE;
// power information levels // power information levels
typedef enum _POWER_INFORMATION_LEVEL { typedef enum _POWER_INFORMATION_LEVEL {
SystemPowerPolicyAc, SystemPowerPolicyAc,

View file

@ -697,6 +697,12 @@ VOID MmSetDirtyPage(PEPROCESS Process, PVOID Address);
PFN_TYPE MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry); PFN_TYPE MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry);
LONG MmAllocPagesSpecifyRange(ULONG Consumer,
PHYSICAL_ADDRESS LowestAddress,
PHYSICAL_ADDRESS HighestAddress,
ULONG NumberOfPages,
PPFN_TYPE Pages);
VOID MmDereferencePage(PFN_TYPE Page); VOID MmDereferencePage(PFN_TYPE Page);
VOID MmReferencePage(PFN_TYPE Page); VOID MmReferencePage(PFN_TYPE Page);

View file

@ -897,6 +897,154 @@ MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
return PfnOffset; return PfnOffset;
} }
LONG
MmAllocPagesSpecifyRange(ULONG Consumer,
PHYSICAL_ADDRESS LowestAddress,
PHYSICAL_ADDRESS HighestAddress,
ULONG NumberOfPages,
PPFN_TYPE Pages)
{
PPHYSICAL_PAGE PageDescriptor;
KIRQL oldIrql;
PFN_TYPE LowestPage, HighestPage;
PFN_TYPE pfn;
ULONG NumberOfPagesFound = 0;
ULONG i;
DPRINT("MmAllocPagesSpecifyRange()\n"
" LowestAddress = 0x%08x%08x\n"
" HighestAddress = 0x%08x%08x\n"
" NumberOfPages = %d\n",
LowestAddress.u.HighPart, LowestAddress.u.LowPart,
HighestAddress.u.HighPart, HighestAddress.u.LowPart,
NumberOfPages);
if (NumberOfPages == 0)
return 0;
LowestPage = LowestAddress.QuadPart / PAGE_SIZE;
HighestPage = HighestAddress.QuadPart / PAGE_SIZE;
if ((HighestAddress.u.LowPart % PAGE_SIZE) != 0)
HighestPage++;
if (LowestPage >= MmPageArraySize)
{
DPRINT1("MmAllocPagesSpecifyRange(): Out of memory\n");
return -1;
}
if (HighestPage > MmPageArraySize)
HighestPage = MmPageArraySize;
KeAcquireSpinLock(&PageListLock, &oldIrql);
if (LowestPage == 0 && HighestPage == MmPageArraySize)
{
PLIST_ENTRY ListEntry;
while (NumberOfPagesFound < NumberOfPages)
{
if (!IsListEmpty(&FreeZeroedPageListHead))
{
ListEntry = RemoveTailList(&FreeZeroedPageListHead);
}
else if (!IsListEmpty(&FreeUnzeroedPageListHead))
{
ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
UnzeroedPageCount--;
}
else
{
if (NumberOfPagesFound == 0)
{
KeReleaseSpinLock(&PageListLock, oldIrql);
DPRINT1("MmAllocPagesSpecifyRange(): Out of memory\n");
return -1;
}
else
{
break;
}
}
PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
ASSERT(PageDescriptor->Flags.Type == MM_PHYSICAL_PAGE_FREE);
ASSERT(PageDescriptor->MapCount == 0);
ASSERT(PageDescriptor->ReferenceCount == 0);
/* Allocate the page */
PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
PageDescriptor->Flags.Consumer = Consumer;
PageDescriptor->ReferenceCount = 1;
PageDescriptor->LockCount = 0;
PageDescriptor->MapCount = 0;
PageDescriptor->SavedSwapEntry = 0; /* FIXME: Do we need swap entries? */
InsertTailList(&UsedPageListHeads[Consumer], &PageDescriptor->ListEntry);
MmStats.NrSystemPages++;
MmStats.NrFreePages--;
/* Remember the page */
pfn = PageDescriptor - MmPageArray;
Pages[NumberOfPagesFound++] = pfn;
}
}
else
{
INT LookForZeroedPages;
for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
{
for (pfn = LowestPage; pfn < HighestPage; pfn++)
{
PageDescriptor = MmPageArray + pfn;
if (PageDescriptor->Flags.Type != MM_PHYSICAL_PAGE_FREE)
continue;
if (PageDescriptor->Flags.Zero != LookForZeroedPages)
continue;
ASSERT(PageDescriptor->MapCount == 0);
ASSERT(PageDescriptor->ReferenceCount == 0);
/* Allocate the page */
PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
PageDescriptor->Flags.Consumer = Consumer;
PageDescriptor->ReferenceCount = 1;
PageDescriptor->LockCount = 0;
PageDescriptor->MapCount = 0;
PageDescriptor->SavedSwapEntry = 0; /* FIXME: Do we need swap entries? */
RemoveEntryList(&PageDescriptor->ListEntry);
InsertTailList(&UsedPageListHeads[Consumer], &PageDescriptor->ListEntry);
if (!PageDescriptor->Flags.Zero)
UnzeroedPageCount--;
MmStats.NrSystemPages++;
MmStats.NrFreePages--;
/* Remember the page */
Pages[NumberOfPagesFound++] = pfn;
if (NumberOfPagesFound == NumberOfPages)
break;
}
if (NumberOfPagesFound == NumberOfPages)
break;
}
}
KeReleaseSpinLock(&PageListLock, oldIrql);
/* Zero unzero-ed pages */
for (i = 0; i < NumberOfPagesFound; i++)
{
pfn = Pages[i];
if (MmPageArray[pfn].Flags.Zero == 0)
{
MiZeroPage(pfn);
}
else
{
MmPageArray[pfn].Flags.Zero = 0;
}
}
return NumberOfPagesFound;
}
NTSTATUS STDCALL NTSTATUS STDCALL
MmZeroPageThreadMain(PVOID Ignored) MmZeroPageThreadMain(PVOID Ignored)

View file

@ -40,6 +40,7 @@ MDL_PARTIAL_HAS_BEEN_MAPPED mdl flagged MDL_PARTIAL has been mapped into kerne
/* FUNCTIONS *****************************************************************/ /* FUNCTIONS *****************************************************************/
/* /*
* @unimplemented * @unimplemented
*/ */
@ -54,6 +55,7 @@ MmAdvanceMdl (
return STATUS_NOT_IMPLEMENTED; return STATUS_NOT_IMPLEMENTED;
} }
VOID INIT_FUNCTION VOID INIT_FUNCTION
MmInitializeMdlImplementation(VOID) MmInitializeMdlImplementation(VOID)
{ {
@ -92,6 +94,7 @@ MmInitializeMdlImplementation(VOID)
KeInitializeSpinLock(&MiMdlMappingRegionLock); KeInitializeSpinLock(&MiMdlMappingRegionLock);
} }
PVOID PVOID
MmGetMdlPageAddress(PMDL Mdl, PVOID Offset) MmGetMdlPageAddress(PMDL Mdl, PVOID Offset)
{ {
@ -170,153 +173,6 @@ MmUnlockPages(PMDL Mdl)
} }
/*
* @implemented
*/
PVOID STDCALL
MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
/*
* FUNCTION: Maps the physical pages described by a given MDL
* ARGUMENTS:
* Mdl = Points to an MDL updated by MmProbeAndLockPages, MmBuildMdlForNonPagedPool
* or IoBuildPartialMdl.
* AccessMode = Specifies the portion of the address space to map the
* pages.
* RETURNS: The base virtual address that maps the locked pages for the
* range described by the MDL
*
* If mapping into user space, pages are mapped into current address space.
*/
{
PVOID Base;
PULONG MdlPages;
KIRQL oldIrql;
ULONG PageCount;
ULONG StartingOffset;
PEPROCESS CurrentProcess;
NTSTATUS Status;
DPRINT("MmMapLockedPages(Mdl %x, AccessMode %x)\n", Mdl, AccessMode);
/* Calculate the number of pages required. */
PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
if (AccessMode == UserMode)
{
MEMORY_AREA *Result;
LARGE_INTEGER BoundaryAddressMultiple;
NTSTATUS Status;
/* pretty sure you can't map partial mdl's to user space */
ASSERT(!(Mdl->MdlFlags & MDL_PARTIAL));
BoundaryAddressMultiple.QuadPart = 0;
Base = NULL;
CurrentProcess = PsGetCurrentProcess();
MmLockAddressSpace(&CurrentProcess->AddressSpace);
Status = MmCreateMemoryArea(CurrentProcess,
&CurrentProcess->AddressSpace,
MEMORY_AREA_MDL_MAPPING,
&Base,
PageCount * PAGE_SIZE,
0, /* PAGE_READWRITE? */
&Result,
FALSE,
FALSE,
BoundaryAddressMultiple);
MmUnlockAddressSpace(&CurrentProcess->AddressSpace);
if (!NT_SUCCESS(Status))
{
if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
{
return NULL;
}
KEBUGCHECK(0);
/* FIXME: handle this? */
}
Mdl->Process = CurrentProcess;
}
else /* if (AccessMode == KernelMode) */
{
/* can't map mdl twice */
ASSERT(!(Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL_HAS_BEEN_MAPPED)));
/* can't map mdl buildt from non paged pool into kernel space */
ASSERT(!(Mdl->MdlFlags & (MDL_SOURCE_IS_NONPAGED_POOL)));
CurrentProcess = NULL;
/* Allocate that number of pages from the mdl mapping region. */
KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap, PageCount, MiMdlMappingRegionHint);
if (StartingOffset == 0xffffffff)
{
KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
DPRINT1("Out of MDL mapping space\n");
if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
{
return NULL;
}
KEBUGCHECK(0);
}
Base = (PVOID)((ULONG_PTR)MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE);
if (MiMdlMappingRegionHint == StartingOffset)
{
MiMdlMappingRegionHint += PageCount;
}
KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
}
/* Set the virtual mappings for the MDL pages. */
MdlPages = (PULONG)(Mdl + 1);
Status = MmCreateVirtualMapping(CurrentProcess,
Base,
PAGE_READWRITE,
MdlPages,
PageCount);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");
if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
{
return NULL;
}
KEBUGCHECK(0);
}
/* Mark the MDL has having being mapped. */
if (AccessMode == KernelMode)
{
if (Mdl->MdlFlags & MDL_PARTIAL)
{
Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
}
else
{
Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
}
Mdl->MappedSystemVa = (char*)Base + Mdl->ByteOffset;
}
return((char*)Base + Mdl->ByteOffset);
}
/* /*
* @unimplemented * @unimplemented
*/ */
@ -333,6 +189,7 @@ MmMapLockedPagesWithReservedMapping (
return 0; return 0;
} }
/* /*
* @implemented * @implemented
*/ */
@ -423,6 +280,7 @@ MmUnmapLockedPages(PVOID BaseAddress, PMDL Mdl)
} }
/* /*
* @unimplemented * @unimplemented
*/ */
@ -447,6 +305,7 @@ MmBuildMdlFromPages(PMDL Mdl, PPFN_TYPE Pages)
Mdl->MdlFlags |= MDL_IO_PAGE_READ; Mdl->MdlFlags |= MDL_IO_PAGE_READ;
} }
/* /*
* @unimplemented * @unimplemented
*/ */
@ -461,6 +320,7 @@ MmPrefetchPages (
return STATUS_NOT_IMPLEMENTED; return STATUS_NOT_IMPLEMENTED;
} }
/* /*
* @unimplemented * @unimplemented
*/ */
@ -477,7 +337,7 @@ MmProtectMdlSystemAddress (
/* /*
* @unimplemented * @implemented
*/ */
VOID STDCALL MmProbeAndLockPages (PMDL Mdl, VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
KPROCESSOR_MODE AccessMode, KPROCESSOR_MODE AccessMode,
@ -603,6 +463,7 @@ VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
Mdl->MdlFlags |= MDL_PAGES_LOCKED; Mdl->MdlFlags |= MDL_PAGES_LOCKED;
} }
/* /*
* @unimplemented * @unimplemented
*/ */
@ -618,6 +479,7 @@ MmProbeAndLockProcessPages (
UNIMPLEMENTED; UNIMPLEMENTED;
} }
/* /*
* @unimplemented * @unimplemented
*/ */
@ -633,6 +495,7 @@ MmProbeAndLockSelectedPages(
UNIMPLEMENTED; UNIMPLEMENTED;
} }
/* /*
* @implemented * @implemented
*/ */
@ -732,6 +595,7 @@ MmCreateMdl (PMDL Mdl,
return(Mdl); return(Mdl);
} }
/* /*
* @unimplemented * @unimplemented
*/ */
@ -745,8 +609,9 @@ MmMapMemoryDumpMdl (PVOID Unknown0)
UNIMPLEMENTED; UNIMPLEMENTED;
} }
/* /*
* @unimplemented * @implemented
*/ */
PMDL STDCALL PMDL STDCALL
MmAllocatePagesForMdl ( IN PHYSICAL_ADDRESS LowAddress, MmAllocatePagesForMdl ( IN PHYSICAL_ADDRESS LowAddress,
@ -776,15 +641,61 @@ MmAllocatePagesForMdl ( IN PHYSICAL_ADDRESS LowAddress,
*/ */
PMDL Mdl;
PPFN_TYPE Pages;
ULONG NumberOfPagesWanted, NumberOfPagesAllocated;
ULONG Ret;
DPRINT("MmAllocatePagesForMdl - LowAddress = 0x%I64x, HighAddress = 0x%I64x, "
"SkipBytes = 0x%I64x, Totalbytes = 0x%x\n",
LowAddress.QuadPart, HighAddress.QuadPart,
SkipBytes.QuadPart, Totalbytes);
/* SkipBytes must be a multiple of the page size */ /* SkipBytes must be a multiple of the page size */
ASSERT((SkipBytes.QuadPart % PAGE_SIZE) == 0); ASSERT((SkipBytes.QuadPart % PAGE_SIZE) == 0);
UNIMPLEMENTED; /* Allocate memory for the MDL */
return(NULL); Mdl = MmCreateMdl(NULL, 0, Totalbytes);
if (Mdl == NULL)
{
return NULL;
}
/* Allocate pages into the MDL */
NumberOfPagesAllocated = 0;
NumberOfPagesWanted = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
Pages = (PPFN_TYPE)(Mdl + 1);
while (NumberOfPagesWanted > 0)
{
Ret = MmAllocPagesSpecifyRange(
MC_NPPOOL,
LowAddress,
HighAddress,
NumberOfPagesWanted,
Pages + NumberOfPagesAllocated);
if (Ret == -1)
break;
NumberOfPagesAllocated += Ret;
NumberOfPagesWanted -= Ret;
if (SkipBytes.QuadPart == 0)
break;
LowAddress.QuadPart += SkipBytes.QuadPart;
HighAddress.QuadPart += SkipBytes.QuadPart;
}
if (NumberOfPagesAllocated == 0)
{
ExFreePool(Mdl);
Mdl = NULL;
}
return Mdl;
} }
/* /*
* @unimplemented * @implemented
*/ */
VOID STDCALL VOID STDCALL
MmFreePagesFromMdl ( IN PMDL Mdl ) MmFreePagesFromMdl ( IN PMDL Mdl )
@ -801,12 +712,21 @@ MmFreePagesFromMdl ( IN PMDL Mdl )
Konstantin Gusev Konstantin Gusev
*/ */
PPFN_TYPE Pages;
UNIMPLEMENTED; LONG NumberOfPages;
NumberOfPages = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
Pages = (PPFN_TYPE)(Mdl + 1);
while (--NumberOfPages >= 0)
{
MmDereferencePage(Pages[NumberOfPages]);
}
} }
/* /*
* @unimplemented * @implemented
*/ */
PVOID STDCALL PVOID STDCALL
MmMapLockedPagesSpecifyCache ( IN PMDL Mdl, MmMapLockedPagesSpecifyCache ( IN PMDL Mdl,
@ -814,12 +734,184 @@ MmMapLockedPagesSpecifyCache ( IN PMDL Mdl,
IN MEMORY_CACHING_TYPE CacheType, IN MEMORY_CACHING_TYPE CacheType,
IN PVOID BaseAddress, IN PVOID BaseAddress,
IN ULONG BugCheckOnFailure, IN ULONG BugCheckOnFailure,
IN MM_PAGE_PRIORITY Priority ) IN MM_PAGE_PRIORITY Priority)
{ {
UNIMPLEMENTED; PVOID Base;
return MmMapLockedPages (Mdl, AccessMode); PULONG MdlPages;
KIRQL oldIrql;
ULONG PageCount;
ULONG StartingOffset;
PEPROCESS CurrentProcess;
NTSTATUS Status;
ULONG Protect;
DPRINT("MmMapLockedPagesSpecifyCache(Mdl 0x%x, AccessMode 0x%x, CacheType 0x%x, "
"BaseAddress 0x%x, BugCheckOnFailure 0x%x, Priority 0x%x)\n",
Mdl, AccessMode, CacheType, BaseAddress, BugCheckOnFailure, Priority);
/* FIXME: Implement Priority */
(void) Priority;
/* Calculate the number of pages required. */
PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
if (AccessMode == UserMode)
{
MEMORY_AREA *Result;
LARGE_INTEGER BoundaryAddressMultiple;
NTSTATUS Status;
/* pretty sure you can't map partial mdl's to user space */
ASSERT(!(Mdl->MdlFlags & MDL_PARTIAL));
BoundaryAddressMultiple.QuadPart = 0;
Base = BaseAddress;
CurrentProcess = PsGetCurrentProcess();
MmLockAddressSpace(&CurrentProcess->AddressSpace);
Status = MmCreateMemoryArea(CurrentProcess,
&CurrentProcess->AddressSpace,
MEMORY_AREA_MDL_MAPPING,
&Base,
PageCount * PAGE_SIZE,
0, /* PAGE_READWRITE? */
&Result,
(Base != NULL),
FALSE,
BoundaryAddressMultiple);
MmUnlockAddressSpace(&CurrentProcess->AddressSpace);
if (!NT_SUCCESS(Status))
{
if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
{
return NULL;
}
/* FIXME: Raise an exception instead of bugchecking */
KEBUGCHECK(0);
}
Mdl->Process = CurrentProcess;
}
else /* if (AccessMode == KernelMode) */
{
/* can't map mdl twice */
ASSERT(!(Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL_HAS_BEEN_MAPPED)));
/* can't map mdl buildt from non paged pool into kernel space */
ASSERT(!(Mdl->MdlFlags & (MDL_SOURCE_IS_NONPAGED_POOL)));
CurrentProcess = NULL;
/* Allocate that number of pages from the mdl mapping region. */
KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap, PageCount, MiMdlMappingRegionHint);
if (StartingOffset == 0xffffffff)
{
KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
DPRINT1("Out of MDL mapping space\n");
if ((Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) || !BugCheckOnFailure)
{
return NULL;
}
KEBUGCHECK(0);
}
Base = (PVOID)((ULONG_PTR)MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE);
if (MiMdlMappingRegionHint == StartingOffset)
{
MiMdlMappingRegionHint += PageCount;
}
KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
}
/* Set the virtual mappings for the MDL pages. */
MdlPages = (PULONG)(Mdl + 1);
Protect = PAGE_READWRITE;
if (CacheType == MmNonCached)
Protect |= PAGE_NOCACHE;
else if (CacheType == MmWriteCombined)
DPRINT("CacheType MmWriteCombined not supported!\n");
Status = MmCreateVirtualMapping(CurrentProcess,
Base,
Protect,
MdlPages,
PageCount);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");
if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
{
return NULL;
}
if (AccessMode == UserMode)
{
/* FIXME: Raise an exception */
return NULL;
}
else /* AccessMode == KernelMode */
{
if (!BugCheckOnFailure)
return NULL;
/* FIXME: Use some bugcheck code instead of 0 */
KEBUGCHECK(0);
}
}
/* Mark the MDL has having being mapped. */
if (AccessMode == KernelMode)
{
if (Mdl->MdlFlags & MDL_PARTIAL)
{
Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
}
else
{
Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
}
Mdl->MappedSystemVa = (char*)Base + Mdl->ByteOffset;
}
return((char*)Base + Mdl->ByteOffset);
} }
/*
* @implemented
*/
PVOID STDCALL
MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
/*
* FUNCTION: Maps the physical pages described by a given MDL
* ARGUMENTS:
* Mdl = Points to an MDL updated by MmProbeAndLockPages, MmBuildMdlForNonPagedPool,
* MmAllocatePagesForMdl or IoBuildPartialMdl.
* AccessMode = Specifies the portion of the address space to map the
* pages.
* RETURNS: The base virtual address that maps the locked pages for the
* range described by the MDL
*
* If mapping into user space, pages are mapped into current address space.
*/
{
return MmMapLockedPagesSpecifyCache(Mdl,
AccessMode,
MmCached,
NULL,
TRUE,
NormalPagePriority);
}
/* EOF */ /* EOF */

View file

@ -245,8 +245,41 @@ NtPowerInformation(
IN ULONG OutputBufferLength IN ULONG OutputBufferLength
) )
{ {
UNIMPLEMENTED; NTSTATUS Status;
return STATUS_NOT_IMPLEMENTED;
DPRINT("NtPowerInformation(PowerInformationLevel 0x%x, InputBuffer 0x%x, "
"InputBufferLength 0x%x, OutputBuffer 0x%x, OutputBufferLength 0x%x)\n",
PowerInformationLevel,
InputBuffer, InputBufferLength,
OutputBuffer, OutputBufferLength);
switch (PowerInformationLevel)
{
case SystemBatteryState:
{
PSYSTEM_BATTERY_STATE BatteryState = (PSYSTEM_BATTERY_STATE)OutputBuffer;
if (InputBuffer != NULL)
return STATUS_INVALID_PARAMETER;
if (OutputBufferLength < sizeof(SYSTEM_BATTERY_STATE))
return STATUS_BUFFER_TOO_SMALL;
/* Just zero the struct (and thus set BatteryState->BatteryPresent = FALSE) */
RtlZeroMemory(BatteryState, sizeof(SYSTEM_BATTERY_STATE));
BatteryState->EstimatedTime = (ULONG)-1;
Status = STATUS_SUCCESS;
break;
}
default:
Status = STATUS_NOT_IMPLEMENTED;
DPRINT1("PowerInformationLevel 0x%x is UNIMPLEMENTED! Have a nice day.\n",
PowerInformationLevel);
for (;;);
break;
}
return Status;
} }
/* EOF */ /* EOF */