[Box Backup-commit] COMMIT r3250 - box/trunk/lib/backupstore

subversion at boxbackup.org subversion at boxbackup.org
Mon Feb 10 20:14:34 GMT 2014


Author: chris
Date: 2014-02-10 20:14:34 +0000 (Mon, 10 Feb 2014)
New Revision: 3250

Modified:
   box/trunk/lib/backupstore/BackupStoreContext.cpp
   box/trunk/lib/backupstore/BackupStoreContext.h
   box/trunk/lib/backupstore/BackupStoreInfo.h
Log:
Fix accounting for old and deleted files and blocks during backup operations.

Adding and deleting files was not always accounted properly before.

Modified: box/trunk/lib/backupstore/BackupStoreContext.cpp
===================================================================
--- box/trunk/lib/backupstore/BackupStoreContext.cpp	2014-02-10 16:35:31 UTC (rev 3249)
+++ box/trunk/lib/backupstore/BackupStoreContext.cpp	2014-02-10 20:14:34 UTC (rev 3250)
@@ -126,6 +126,20 @@
 		// Save the store info, not delayed
 		SaveStoreInfo(false);
 	}
+
+	// Just in case someone wants to reuse a local protocol object,
+	// put the context back to its initial state.
+	mProtocolPhase = BackupStoreContext::Phase_Version;
+
+	// Avoid the need to check version again, by not resetting
+	// mClientHasAccount, mAccountRootDir or mStoreDiscSet
+
+	mReadOnly = true;
+	mSaveStoreInfoDelay = STORE_INFO_SAVE_DELAY;
+	mpTestHook = NULL;
+	mapStoreInfo.reset();
+	mapRefCount.reset();
+	ClearDirectoryCache();
 }
 
 
@@ -234,6 +248,7 @@
 	{
 		THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
 	}
+
 	if(mReadOnly)
 	{
 		THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
@@ -434,11 +449,12 @@
 	{
 		THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
 	}
+
 	if(mReadOnly)
 	{
 		THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
 	}
-	
+
 	// This is going to be a bit complex to make sure it copes OK
 	// with things going wrong.
 	// The only thing which isn't safe is incrementing the object ID
@@ -461,12 +477,13 @@
 	RaidFileWrite *ppreviousVerStoreFile = 0;
 	bool reversedDiffIsCompletelyDifferent = false;
 	int64_t oldVersionNewBlocksUsed = 0;
+	BackupStoreInfo::Adjustment adjustment = {};
+
 	try
 	{
 		RaidFileWrite storeFile(mStoreDiscSet, fn);
 		storeFile.Open(false /* no overwriting */);
 
-		// size adjustment from use of patch in old file
 		int64_t spaceSavedByConversionToPatch = 0;
 
 		// Diff or full file?
@@ -551,6 +568,17 @@
 					from->GetDiscUsageInBlocks() - 
 					oldVersionNewBlocksUsed;
 
+				adjustment.mBlocksUsed -= spaceSavedByConversionToPatch;
+				// The code below will change the patch from a
+				// Current file to an Old file, so we need to
+				// account for it as a Current file here.
+				adjustment.mBlocksInCurrentFiles -=
+					spaceSavedByConversionToPatch;
+
+				// Don't adjust anything else here. We'll do it
+				// when we update the directory just below,
+				// which also accounts for non-diff replacements.
+
 				// Everything cleans up here...
 			}
 			catch(...)
@@ -563,11 +591,14 @@
 		
 		// Get the blocks used
 		newObjectBlocksUsed = storeFile.GetDiscUsageInBlocks();
+		adjustment.mBlocksUsed += newObjectBlocksUsed;
+		adjustment.mBlocksInCurrentFiles += newObjectBlocksUsed;
+		adjustment.mNumCurrentFiles++;
 		
 		// Exceeds the hard limit?
-		int64_t newBlocksUsed = mapStoreInfo->GetBlocksUsed() + 
-			newObjectBlocksUsed - spaceSavedByConversionToPatch;
-		if(newBlocksUsed > mapStoreInfo->GetBlocksHardLimit())
+		int64_t newTotalBlocksUsed = mapStoreInfo->GetBlocksUsed() + 
+			adjustment.mBlocksUsed;
+		if(newTotalBlocksUsed > mapStoreInfo->GetBlocksHardLimit())
 		{
 			THROW_EXCEPTION(BackupStoreException, AddedFileExceedsStorageLimit)
 			// The store file will be deleted automatically by the RaidFile object
@@ -607,9 +638,23 @@
 	
 	// Modify the directory -- first make all files with the same name
 	// marked as an old version
-	int64_t blocksInOldFiles = 0;
 	try
 	{
+		// Adjust the entry for the object that we replaced with a
+		// patch, above.
+		BackupStoreDirectory::Entry *poldEntry = NULL;
+
+		if(DiffFromFileID != 0)
+		{
+			// Get old version entry
+			poldEntry = dir.FindEntryByID(DiffFromFileID);
+			ASSERT(poldEntry != 0);
+		
+			// Adjust size of old entry
+			int64_t oldSize = poldEntry->GetSizeInBlocks();
+			poldEntry->SetSizeInBlocks(oldVersionNewBlocksUsed);
+		}
+
 		if(MarkFileWithSameNameAsOldVersions)
 		{
 			BackupStoreDirectory::Iterator i(dir);
@@ -629,7 +674,10 @@
 						e->AddFlags(BackupStoreDirectory::Entry::Flags_OldVersion);
 						// Can safely do this, because we know we won't be here if it's already 
 						// an old version
-						blocksInOldFiles += e->GetSizeInBlocks();
+						adjustment.mBlocksInOldFiles += e->GetSizeInBlocks();
+						adjustment.mBlocksInCurrentFiles -= e->GetSizeInBlocks();
+						adjustment.mNumOldFiles++;
+						adjustment.mNumCurrentFiles--;
 					}
 				}
 			}
@@ -637,33 +685,17 @@
 		
 		// Then the new entry
 		BackupStoreDirectory::Entry *pnewEntry = dir.AddEntry(rFilename,
-				ModificationTime, id, newObjectBlocksUsed,
-				BackupStoreDirectory::Entry::Flags_File,
-				AttributesHash);
+			ModificationTime, id, newObjectBlocksUsed,
+			BackupStoreDirectory::Entry::Flags_File,
+			AttributesHash);
 
-		// Adjust for the patch back stuff?
-		if(DiffFromFileID != 0)
+		// Adjust dependency info of file?
+		if(DiffFromFileID && poldEntry && !reversedDiffIsCompletelyDifferent)
 		{
-			// Get old version entry
-			BackupStoreDirectory::Entry *poldEntry = dir.FindEntryByID(DiffFromFileID);
-			ASSERT(poldEntry != 0);
-		
-			// Adjust dependency info of file?
-			if(!reversedDiffIsCompletelyDifferent)
-			{
-				poldEntry->SetDependsNewer(id);
-				pnewEntry->SetDependsOlder(DiffFromFileID);
-			}
+			poldEntry->SetDependsNewer(id);
+			pnewEntry->SetDependsOlder(DiffFromFileID);
+		}
 			
-			// Adjust size of old entry
-			int64_t oldSize = poldEntry->GetSizeInBlocks();
-			poldEntry->SetSizeInBlocks(oldVersionNewBlocksUsed);
-			
-			// And adjust blocks used count, for later adjustment
-			newObjectBlocksUsed += (oldVersionNewBlocksUsed - oldSize);
-			blocksInOldFiles += (oldVersionNewBlocksUsed - oldSize);
-		}
-
 		// Write the directory back to disc
 		SaveDirectory(dir, InDirectory);
 
@@ -700,21 +732,16 @@
 	ASSERT(ppreviousVerStoreFile == 0);
 	
 	// Modify the store info
-
-	if(DiffFromFileID == 0)
-	{
-		mapStoreInfo->AdjustNumCurrentFiles(1);
-	}
-	else
-	{
-		mapStoreInfo->AdjustNumOldFiles(1);
-	}
+	mapStoreInfo->AdjustNumCurrentFiles(adjustment.mNumCurrentFiles);
+	mapStoreInfo->AdjustNumOldFiles(adjustment.mNumOldFiles);
+	mapStoreInfo->AdjustNumDeletedFiles(adjustment.mNumDeletedFiles);
+	mapStoreInfo->AdjustNumDirectories(adjustment.mNumDirectories);
+	mapStoreInfo->ChangeBlocksUsed(adjustment.mBlocksUsed);
+	mapStoreInfo->ChangeBlocksInCurrentFiles(adjustment.mBlocksInCurrentFiles);
+	mapStoreInfo->ChangeBlocksInOldFiles(adjustment.mBlocksInOldFiles);
+	mapStoreInfo->ChangeBlocksInDeletedFiles(adjustment.mBlocksInDeletedFiles);
+	mapStoreInfo->ChangeBlocksInDirectories(adjustment.mBlocksInDirectories);
 	
-	mapStoreInfo->ChangeBlocksUsed(newObjectBlocksUsed);
-	mapStoreInfo->ChangeBlocksInCurrentFiles(newObjectBlocksUsed -
-		blocksInOldFiles);
-	mapStoreInfo->ChangeBlocksInOldFiles(blocksInOldFiles);
-	
 	// Increment reference count on the new directory to one
 	mapRefCount->AddReference(id);
 	
@@ -757,9 +784,6 @@
 	bool madeChanges = false;
 	rObjectIDOut = 0;		// not found
 
-	// Count of deleted blocks
-	int64_t blocksDel = 0;
-
 	try
 	{
 		// Iterate through directory, only looking at files which haven't been deleted
@@ -772,14 +796,28 @@
 			if(e->GetName() == rFilename)
 			{
 				// Check that it's definately not already deleted
-				ASSERT((e->GetFlags() & BackupStoreDirectory::Entry::Flags_Deleted) == 0);
+				ASSERT(!e->IsDeleted());
 				// Set deleted flag
 				e->AddFlags(BackupStoreDirectory::Entry::Flags_Deleted);
 				// Mark as made a change
 				madeChanges = true;
-				// Can safely do this, because we know we won't be here if it's already 
-				// an old version
-				blocksDel += e->GetSizeInBlocks();
+
+				int64_t blocks = e->GetSizeInBlocks();
+				mapStoreInfo->AdjustNumDeletedFiles(1);
+				mapStoreInfo->ChangeBlocksInDeletedFiles(blocks);
+
+				// We're marking all old versions as deleted.
+				// This is how a file can be old and deleted
+				// at the same time. So we don't subtract from
+				// number or size of old files. But if it was
+				// a current file, then it's not any more, so
+				// we do need to adjust the current counts.
+				if(!e->IsOld())
+				{
+					mapStoreInfo->AdjustNumCurrentFiles(-1);
+					mapStoreInfo->ChangeBlocksInCurrentFiles(-blocks);
+				}
+					
 				// Is this the last version?
 				if((e->GetFlags() & BackupStoreDirectory::Entry::Flags_OldVersion) == 0)
 				{
@@ -795,13 +833,6 @@
 		{
 			// Save the directory back
 			SaveDirectory(dir, InDirectory);
-			
-			// Modify the store info, and write
-			// It definitely wasn't an old or deleted version
-			mapStoreInfo->AdjustNumCurrentFiles(-1);
-			mapStoreInfo->AdjustNumDeletedFiles(1);
-			mapStoreInfo->ChangeBlocksInDeletedFiles(blocksDel);
-			
 			SaveStoreInfo(false);
 		}
 	}
@@ -874,16 +905,16 @@
 				}
 			}
 		}
-		
+
 		// Save changes?
 		if(madeChanges)
 		{
 			// Save the directory back
 			SaveDirectory(dir, InDirectory);
-			
+
 			// Modify the store info, and write
 			mapStoreInfo->ChangeBlocksInDeletedFiles(blocksDel);
-			
+
 			// Maybe postponed save of store info
 			SaveStoreInfo();
 		}
@@ -933,6 +964,7 @@
 	{
 		THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
 	}
+
 	if(rDir.GetObjectID() != ObjectID)
 	{
 		THROW_EXCEPTION(BackupStoreException, Internal)
@@ -956,7 +988,7 @@
 
 			// Commit directory
 			writeDir.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
-			
+
 			// Make sure the size of the directory is available for writing the dir back
 			ASSERT(dirSize > 0);
 			int64_t sizeAdjustment = dirSize - rDir.GetUserInfo1_SizeInBlocks();
@@ -1004,10 +1036,10 @@
 	{
 		THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
 	}
-	
+
 	// Flags as not already existing
 	rAlreadyExists = false;
-	
+
 	// Get the directory we want to modify
 	BackupStoreDirectory &dir(GetDirectoryInternal(InDirectory));
 
@@ -1037,7 +1069,7 @@
 		BackupStoreDirectory emptyDir(id, InDirectory);
 		// add the atttribues
 		emptyDir.SetAttributes(Attributes, AttributesModTime);
-		
+
 		// Write...
 		RaidFileWrite dirFile(mStoreDiscSet, fn);
 		dirFile.Open(false /* no overwriting */);
@@ -1053,7 +1085,7 @@
 		mapStoreInfo->ChangeBlocksInDirectories(dirSize);
 		// Not added to cache, so don't set the size in the directory
 	}
-	
+
 	// Then add it into the parent directory
 	try
 	{
@@ -1068,10 +1100,10 @@
 		// Back out on adding that directory
 		RaidFileWrite del(mStoreDiscSet, fn);
 		del.Delete();
-		
+
 		// Remove this entry from the cache
 		RemoveDirectoryFromCache(InDirectory);
-		
+
 		// Don't worry about the incremented number in the store info
 		throw;	
 	}
@@ -1099,6 +1131,7 @@
 	{
 		THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
 	}
+
 	if(mReadOnly)
 	{
 		THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
@@ -1106,9 +1139,6 @@
 
 	// Containing directory
 	int64_t InDirectory = 0;
-	
-	// Count of blocks deleted
-	int64_t blocksDeleted = 0;
 
 	try
 	{
@@ -1121,13 +1151,13 @@
 			InDirectory = dir.GetContainerID();
 		
 			// Depth first delete of contents
-			DeleteDirectoryRecurse(ObjectID, blocksDeleted, Undelete);
+			DeleteDirectoryRecurse(ObjectID, Undelete);
 		}
-		
+
 		// Remove the entry from the directory it's in
 		ASSERT(InDirectory != 0);
 		BackupStoreDirectory &parentDir(GetDirectoryInternal(InDirectory));
-		
+
 		BackupStoreDirectory::Iterator i(parentDir);
 		BackupStoreDirectory::Entry *en = 0;
 		while((en = i.Next(Undelete?(BackupStoreDirectory::Entry::Flags_Deleted):(BackupStoreDirectory::Entry::Flags_INCLUDE_EVERYTHING),
@@ -1144,18 +1174,16 @@
 				{
 					en->AddFlags(BackupStoreDirectory::Entry::Flags_Deleted);
 				}
-							
+
 				// Save it
 				SaveDirectory(parentDir, InDirectory);
-				
+
 				// Done
 				break;
 			}
 		}
-		
+
 		// Update blocks deleted count
-		mapStoreInfo->ChangeBlocksInDeletedFiles(Undelete?(0 - blocksDeleted):(blocksDeleted));
-		mapStoreInfo->AdjustNumDirectories(-1);
 		SaveStoreInfo(false);
 	}
 	catch(...)
@@ -1173,18 +1201,18 @@
 //		Created: 2003/10/21
 //
 // --------------------------------------------------------------------------
-void BackupStoreContext::DeleteDirectoryRecurse(int64_t ObjectID, int64_t &rBlocksDeletedOut, bool Undelete)
+void BackupStoreContext::DeleteDirectoryRecurse(int64_t ObjectID, bool Undelete)
 {
 	try
 	{
 		// Does things carefully to avoid using a directory in the cache after recursive call
 		// because it may have been deleted.
-		
+
 		// Do sub directories
 		{
 			// Get the directory...
 			BackupStoreDirectory &dir(GetDirectoryInternal(ObjectID));
-			
+
 			// Then scan it for directories
 			std::vector<int64_t> subDirs;
 			BackupStoreDirectory::Iterator i(dir);
@@ -1207,11 +1235,11 @@
 					subDirs.push_back(en->GetObjectID());
 				}
 			}
-			
+
 			// Done with the directory for now. Recurse to sub directories
 			for(std::vector<int64_t>::const_iterator i = subDirs.begin(); i != subDirs.end(); ++i)
 			{
-				DeleteDirectoryRecurse((*i), rBlocksDeletedOut, Undelete);	
+				DeleteDirectoryRecurse(*i, Undelete);	
 			}
 		}
 		
@@ -1231,6 +1259,22 @@
 			while((en = i.Next(Undelete?(BackupStoreDirectory::Entry::Flags_Deleted):(BackupStoreDirectory::Entry::Flags_INCLUDE_EVERYTHING),
 				Undelete?(0):(BackupStoreDirectory::Entry::Flags_Deleted))) != 0)	// Ignore deleted directories (or not deleted if Undelete)
 			{
+				// Keep count of the deleted blocks
+				if(en->IsFile())
+				{
+					int64_t size = en->GetSizeInBlocks();
+					ASSERT(en->IsDeleted() == Undelete); 
+					// Don't adjust counters for old files,
+					// because it can be both old and deleted.
+					if(!en->IsOld())
+					{
+						mapStoreInfo->ChangeBlocksInCurrentFiles(Undelete ? size : -size);
+						mapStoreInfo->AdjustNumCurrentFiles(Undelete ? 1 : -1);
+					}
+					mapStoreInfo->ChangeBlocksInDeletedFiles(Undelete ? -size : size);
+					mapStoreInfo->AdjustNumDeletedFiles(Undelete ? -1 : 1);
+				}
+
 				// Add/remove the deleted flags
 				if(Undelete)
 				{
@@ -1240,13 +1284,7 @@
 				{
 					en->AddFlags(BackupStoreDirectory::Entry::Flags_Deleted);
 				}
-							
-				// Keep count of the deleted blocks
-				if((en->GetFlags() & BackupStoreDirectory::Entry::Flags_File) != 0)
-				{
-					rBlocksDeletedOut += en->GetSizeInBlocks();
-				}
-				
+
 				// Did something
 				changesMade = true;
 			}

Modified: box/trunk/lib/backupstore/BackupStoreContext.h
===================================================================
--- box/trunk/lib/backupstore/BackupStoreContext.h	2014-02-10 16:35:31 UTC (rev 3249)
+++ box/trunk/lib/backupstore/BackupStoreContext.h	2014-02-10 20:14:34 UTC (rev 3250)
@@ -167,8 +167,8 @@
 	BackupStoreDirectory &GetDirectoryInternal(int64_t ObjectID);
 	void SaveDirectory(BackupStoreDirectory &rDir, int64_t ObjectID);
 	void RemoveDirectoryFromCache(int64_t ObjectID);
-	void DeleteDirectoryRecurse(int64_t ObjectID, int64_t &rBlocksDeletedOut, bool Undelete);
 	void ClearDirectoryCache();
+	void DeleteDirectoryRecurse(int64_t ObjectID, bool Undelete);
 	int64_t AllocateObjectID();
 
 	std::string mConnectionDetails;

Modified: box/trunk/lib/backupstore/BackupStoreInfo.h
===================================================================
--- box/trunk/lib/backupstore/BackupStoreInfo.h	2014-02-10 16:35:31 UTC (rev 3249)
+++ box/trunk/lib/backupstore/BackupStoreInfo.h	2014-02-10 20:14:34 UTC (rev 3250)
@@ -152,6 +152,20 @@
 		int64_t BlockSoftLimit, int64_t BlockHardLimit,
 		bool AccountEnabled, IOStream& ExtraData);
 
+	typedef struct
+	{
+		int64_t mLastObjectIDUsed;
+		int64_t mBlocksUsed;
+		int64_t mBlocksInCurrentFiles;
+		int64_t mBlocksInOldFiles;
+		int64_t mBlocksInDeletedFiles;
+		int64_t mBlocksInDirectories;
+		int64_t mNumCurrentFiles;
+		int64_t mNumOldFiles;
+		int64_t mNumDeletedFiles;
+		int64_t mNumDirectories;
+	} Adjustment;
+
 private:
 	// Location information
 	// Be VERY careful about changing types of these values, as




More information about the Boxbackup-commit mailing list