Windows2003-3790/drivers/storage/crcfilter/memory.c
2020-09-30 16:53:55 +02:00

400 lines
15 KiB
C

/*++
Copyright (c) 2001-2002 Microsoft Corporation
Module Name:
Memory.c
Abstract:
Utilities to allocate Physical memory and Mapping/Unmapping the MDLs.
Environment:
kernel mode only
Notes:
--*/
#include "Filter.h"
#include "Device.h"
#include "CRC.h"
#include "Util.h"
#if DBG_WMI_TRACING
//
// for any file that has software tracing printouts, you must include a
// header file <filename>.tmh
// this file will be generated by the WPP processing phase
//
#include "Memory.tmh"
#endif
NTSTATUS AllocAndMapPages(PDEVICE_EXTENSION DeviceExtension, ULONG LogicalBlockAddr, ULONG NumSectors)
/*++
Routine Description:
CRC_MDL_ARRAY is an array of pointers. Each of these pointers points to a memory location
of fixed size ( CRC_MDL_LOGIC_BLOCK_SIZE * sizeof(USHORT) ), which is used to store the
CRC's of that particular sector. In essence, this is a 2 dimensional array of CRC's
indexed by the sector number. Based on the logical block address, find out the CRC MDL block.
If it hasn't been allocated, memory will be allocated.
Must be called with SyncEvent HELD
Must be called only when PAGING is allowed
Return Value:
SUCCESS if allocation succeeded, error otherwise.
--*/
{
NTSTATUS status = STATUS_SUCCESS;
ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
ASSERT(DeviceExtension->CRCMdlLists.mdlItemsAllocated);
if (NumSectors){
ULONG StartIndex = LogicalBlockAddr / CRC_MDL_LOGIC_BLOCK_SIZE;
ULONG EndIndex = (LogicalBlockAddr + NumSectors - 1) / CRC_MDL_LOGIC_BLOCK_SIZE;
ULONG i;
ASSERT (EndIndex <= DeviceExtension->CRCMdlLists.ulMaxItems);
for (i = StartIndex; i <= EndIndex; i++){
PCRC_MDL_ITEM pCRCMdlItem = &DeviceExtension->CRCMdlLists.pMdlItems[i];
const ULONG checkSumsArrayLen = CRC_MDL_LOGIC_BLOCK_SIZE*sizeof(USHORT);
if (pCRCMdlItem->checkSumsArraysAllocated){
/*
* The checksum array pair for the region is already allocated.
* Lock it down.
*/
if (!pCRCMdlItem->checkSumsArraysLocked){
if (!LockCheckSumArrays(DeviceExtension, i)){
status = STATUS_INSUFFICIENT_RESOURCES;
break;
}
}
}
else {
/*
* Allocate the checksum arrays from PAGED pool so we don't take up all nonpagee pool.
* Allocate 2 copies of the checksums since they may be paged out to the disk that we are verifying.
*/
BOOLEAN buffersAssigned = FALSE;
PVOID checkSums = AllocPool(DeviceExtension, PagedPool, checkSumsArrayLen, TRUE);
if (checkSums){
PVOID checkSumsCopy = AllocPool(DeviceExtension, PagedPool, checkSumsArrayLen, TRUE);
if (checkSumsCopy){
PMDL checkSumsMdl = IoAllocateMdl(checkSums, checkSumsArrayLen, FALSE, FALSE, NULL);
if (checkSumsMdl){
PMDL checkSumsCopyMdl = IoAllocateMdl(checkSumsCopy, checkSumsArrayLen, FALSE, FALSE, NULL);
if (checkSumsCopyMdl){
KIRQL oldIrql;
/*
* Now assign the pointers and update the pCRCMdlItem synchronously.
* Make sure to not touch or alloc/free any pageable memory with lock held.
*/
KeAcquireSpinLock(&DeviceExtension->SpinLock, &oldIrql);
if (!pCRCMdlItem->checkSumsArraysAllocated){
ASSERT(!pCRCMdlItem->checkSumsArray);
ASSERT(!pCRCMdlItem->checkSumsArrayCopy);
pCRCMdlItem->checkSumsArray = checkSums;
pCRCMdlItem->checkSumsArrayCopy = checkSumsCopy;
pCRCMdlItem->checkSumsArrayMdl = checkSumsMdl;
pCRCMdlItem->checkSumsArrayCopyMdl = checkSumsCopyMdl;
pCRCMdlItem->checkSumsArraysAllocated = TRUE;
buffersAssigned = TRUE;
}
else {
buffersAssigned = FALSE;
}
KeReleaseSpinLock(&DeviceExtension->SpinLock, oldIrql);
if (buffersAssigned){
if (!LockCheckSumArrays(DeviceExtension, i)){
status = STATUS_INSUFFICIENT_RESOURCES;
break;
}
}
else {
IoFreeMdl(checkSumsCopyMdl);
}
}
else {
status = STATUS_INSUFFICIENT_RESOURCES;
break;
}
if (!buffersAssigned){
IoFreeMdl(checkSumsMdl);
}
}
else {
status = STATUS_INSUFFICIENT_RESOURCES;
break;
}
if (!buffersAssigned){
FreePool(DeviceExtension, checkSumsCopy, PagedPool);
}
}
else {
status = STATUS_INSUFFICIENT_RESOURCES;
break;
}
if (!buffersAssigned){
FreePool(DeviceExtension, checkSums, PagedPool);
}
}
else {
status = STATUS_INSUFFICIENT_RESOURCES;
break;
}
}
}
}
return status;
}
VOID FreeAllPages(PDEVICE_EXTENSION DeviceExtension)
/*++
Routine Description:
Frees all the memory which is not in use. This is done in reponse to capacity change
of the disk.
At runtime, must be called with SyncEvent HELD
Arguments:
deviceExtension - Device extension for the particular disk on which the filter is.
Force - Free all memory, even if it's in use!
UseSpinLock - Indicates if SpinLock needs to be held.
Return Value:
N/A
--*/
{
ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
if (DeviceExtension->CRCMdlLists.pMdlItems){
ULONG StartSector = 0;
ULONG LastSector = DeviceExtension->ulNumSectors-1;
ULONG StartIndex = StartSector / CRC_MDL_LOGIC_BLOCK_SIZE;
ULONG EndIndex = LastSector / CRC_MDL_LOGIC_BLOCK_SIZE;
ULONG i;
for (i = StartIndex; i <= EndIndex; i++){
PCRC_MDL_ITEM pCRCMdlItem = &DeviceExtension->CRCMdlLists.pMdlItems[i];
PVOID bufToFree = NULL, bufCopyToFree = NULL;
KIRQL oldIrql;
/*
* We need the spinlock to synchronize allocation of the checkSum arrays,
* but doing so raises irql to dispatch level,
* and we cannot free PAGED pool at dispatch level.
* So we move the pointers with spinlock held, and free them after dropping the lock.
*/
KeAcquireSpinLock(&DeviceExtension->SpinLock, &oldIrql);
if (pCRCMdlItem->checkSumsArraysAllocated){
bufToFree = pCRCMdlItem->checkSumsArray;
pCRCMdlItem->checkSumsArray = NULL;
bufCopyToFree = pCRCMdlItem->checkSumsArrayCopy;
pCRCMdlItem->checkSumsArrayCopy = NULL;
/*
* Unlock the checksum arrays before we free them, and remove them from the locked LRU list.
*/
if (pCRCMdlItem->checkSumsArraysLocked){
MmUnlockPages(pCRCMdlItem->checkSumsArrayMdl);
MmUnlockPages(pCRCMdlItem->checkSumsArrayCopyMdl);
pCRCMdlItem->checkSumsArraysLocked = FALSE;
ASSERT(!IsListEmpty(&DeviceExtension->CRCMdlLists.LockedLRUList));
ASSERT(!IsListEmpty(&pCRCMdlItem->LockedLRUListEntry));
RemoveEntryList(&pCRCMdlItem->LockedLRUListEntry);
InitializeListHead(&pCRCMdlItem->LockedLRUListEntry);
ASSERT(DeviceExtension->CRCMdlLists.ulTotalLocked > 0);
DeviceExtension->CRCMdlLists.ulTotalLocked--;
}
IoFreeMdl(pCRCMdlItem->checkSumsArrayMdl);
pCRCMdlItem->checkSumsArrayMdl = NULL;
IoFreeMdl(pCRCMdlItem->checkSumsArrayCopyMdl);
pCRCMdlItem->checkSumsArrayCopyMdl = NULL;
pCRCMdlItem->checkSumsArraysAllocated = FALSE;
}
KeReleaseSpinLock(&DeviceExtension->SpinLock, oldIrql);
if (bufToFree) FreePool(DeviceExtension, bufToFree, PagedPool);
if (bufCopyToFree) FreePool(DeviceExtension, bufCopyToFree, PagedPool);
}
}
ASSERT(DeviceExtension->CRCMdlLists.ulTotalLocked == 0);
ASSERT(IsListEmpty(&DeviceExtension->CRCMdlLists.LockedLRUList));
}
/*
* LockCheckSumArrays
*
* Must be called at PASSIVE irql with SyncEvent HELD but SPINLOCK NOT HELD.
*/
BOOLEAN LockCheckSumArrays(PDEVICE_EXTENSION DeviceExtension, ULONG RegionIndex)
{
PCRC_MDL_ITEM pCRCMdlItem = &DeviceExtension->CRCMdlLists.pMdlItems[RegionIndex];
BOOLEAN lockedFirstArray = FALSE, lockSucceeded = FALSE;
ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
ASSERT(RegionIndex < DeviceExtension->CRCMdlLists.ulMaxItems);
ASSERT(!pCRCMdlItem->checkSumsArraysLocked);
__try {
/*
* We are locking down pageable addresses, so do this at passivel level i.e. with spinlock not held.
* If MmProbeAndLockPages fails, it will raise an exception, which we will catch in the _except block.
*/
MmProbeAndLockPages(pCRCMdlItem->checkSumsArrayMdl, KernelMode, IoWriteAccess);
lockedFirstArray = TRUE;
MmProbeAndLockPages(pCRCMdlItem->checkSumsArrayCopyMdl, KernelMode, IoWriteAccess);
lockSucceeded = TRUE;
}
__except(EXCEPTION_EXECUTE_HANDLER){
DBGERR(("MmProbeAndLockPages raised exception"));
DeviceExtension->DbgNumLockFailures++;
if (lockedFirstArray){
MmUnlockPages(pCRCMdlItem->checkSumsArrayMdl);
}
}
if (lockSucceeded){
BOOLEAN unlockSome = FALSE;
KIRQL oldIrql;
KeAcquireSpinLock(&DeviceExtension->SpinLock, &oldIrql);
pCRCMdlItem->checkSumsArraysLocked = TRUE;
/*
* Update this regions's timestamp.
* That will make it the 'latest' one and keep it from getting unlocked below.
*/
ASSERT(IsListEmpty(&pCRCMdlItem->LockedLRUListEntry));
UpdateRegionAccessTimeStamp(DeviceExtension, RegionIndex);
/*
* Keep track of the number of locked checksum array pairs.
* If it goes too high, unlock the least-recently-used array pair.
*/
DeviceExtension->CRCMdlLists.ulTotalLocked++;
if (DeviceExtension->CRCMdlLists.ulTotalLocked > MAX_LOCKED_CHECKSUM_ARRAY_PAIRS){
unlockSome = TRUE;
}
KeReleaseSpinLock(&DeviceExtension->SpinLock, oldIrql);
if (unlockSome){
UnlockLRUChecksumArray(DeviceExtension);
/*
* We recently updated the latestAccessTimestamp for the region we just locked,
* so we should not have unlocked the region we just locked.
*/
ASSERT(pCRCMdlItem->checkSumsArraysLocked);
}
}
return lockSucceeded;
}
/*
* UnlockLRUChecksumArray
*
* Unlock the least recently used checksum array pair
*
* Must be called at PASSIVE irql with SyncEvent HELD but SPINLOCK NOT HELD.
*/
VOID UnlockLRUChecksumArray(PDEVICE_EXTENSION DeviceExtension)
{
KIRQL oldIrql;
ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
KeAcquireSpinLock(&DeviceExtension->SpinLock, &oldIrql);
if (DeviceExtension->CRCMdlLists.mdlItemsAllocated){
/*
* Unlock the checksum arrays for the region at the head of the LRU list;
* this is the 'oldest' (least recently touched) region.
*/
if (!IsListEmpty(&DeviceExtension->CRCMdlLists.LockedLRUList)){
PLIST_ENTRY listEntry = RemoveHeadList(&DeviceExtension->CRCMdlLists.LockedLRUList);
PCRC_MDL_ITEM lruMdlItem = CONTAINING_RECORD(listEntry, CRC_MDL_ITEM, LockedLRUListEntry);
InitializeListHead(&lruMdlItem->LockedLRUListEntry);
ASSERT(lruMdlItem->checkSumsArraysLocked);
lruMdlItem->checkSumsArraysLocked = FALSE;
MmUnlockPages(lruMdlItem->checkSumsArrayMdl);
MmUnlockPages(lruMdlItem->checkSumsArrayCopyMdl);
ASSERT(DeviceExtension->CRCMdlLists.ulTotalLocked > 0);
DeviceExtension->CRCMdlLists.ulTotalLocked--;
}
}
KeReleaseSpinLock(&DeviceExtension->SpinLock, oldIrql);
}
/*
* UpdateRegionAccessTimeStamp
*
* Updates latestAccessTimestamp for a LOCKED region, and maintains the LRU list.
*
* Must be called with SPINLOCK HELD
*/
VOID UpdateRegionAccessTimeStamp(PDEVICE_EXTENSION DeviceExtension, ULONG RegionIndex)
{
PCRC_MDL_ITEM pCRCMdlItem = &DeviceExtension->CRCMdlLists.pMdlItems[RegionIndex];
ASSERT(pCRCMdlItem->checkSumsArraysLocked);
/*
* Update the regions's timestamp, and move it to the end of the LRU list.
*/
pCRCMdlItem->latestAccessTimestamp = ++DeviceExtension->CRCMdlLists.currentAccessCount;
RemoveEntryList(&pCRCMdlItem->LockedLRUListEntry); // listEntry is initialized, so this is ok even if its not queued
InsertTailList(&DeviceExtension->CRCMdlLists.LockedLRUList, &pCRCMdlItem->LockedLRUListEntry);
}