Merge pull request #879 from charsyam/feature/typos

fix typos in comments
This commit is contained in:
Igor Canadi 2015-12-10 09:42:11 -08:00
commit c5af8bffbf
18 changed files with 29 additions and 29 deletions

View File

@ -215,7 +215,7 @@
* Support Multiple DB paths in universal style compactions * Support Multiple DB paths in universal style compactions
* Add feature of storing plain table index and bloom filter in SST file. * Add feature of storing plain table index and bloom filter in SST file.
* CompactRange() will never output compacted files to level 0. This used to be the case when all the compaction input files were at level 0. * CompactRange() will never output compacted files to level 0. This used to be the case when all the compaction input files were at level 0.
* Added iterate_upper_bound to define the extent upto which the forward iterator will return entries. This will prevent iterating over delete markers and overwritten entries for edge cases where you want to break out the iterator anyways. This may improve perfomance in case there are a large number of delete markers or overwritten entries. * Added iterate_upper_bound to define the extent upto which the forward iterator will return entries. This will prevent iterating over delete markers and overwritten entries for edge cases where you want to break out the iterator anyways. This may improve performance in case there are a large number of delete markers or overwritten entries.
### Public API changes ### Public API changes
* DBOptions.db_paths now is a vector of a DBPath structure which indicates both of path and target size * DBOptions.db_paths now is a vector of a DBPath structure which indicates both of path and target size

View File

@ -1,5 +1,5 @@
# This script enables you running RocksDB tests by running # This script enables you running RocksDB tests by running
# All the tests in paralell and utilizing all the cores # All the tests in parallel and utilizing all the cores
# For db_test the script first lists and parses the tests # For db_test the script first lists and parses the tests
# and then fires them up in parallel using async PS Job functionality # and then fires them up in parallel using async PS Job functionality
# Run the script from the enlistment # Run the script from the enlistment
@ -258,7 +258,7 @@ function RunJobs($TestToLog, [int]$ConcurrencyVal, [bool]$AddForRerun)
Write-Warning $message Write-Warning $message
$log_content | Write-Warning $log_content | Write-Warning
} else { } else {
# Scan the log. If we find PASSED and no occurence of FAILED # Scan the log. If we find PASSED and no occurrence of FAILED
# then it is a success # then it is a success
[bool]$pass_found = $false [bool]$pass_found = $false
ForEach($l in $log_content) { ForEach($l in $log_content) {
@ -315,4 +315,4 @@ if(!$success) {
exit 12345 exit 12345
} }

View File

@ -138,7 +138,7 @@ class CompactionJob {
// This is the earliest snapshot that could be used for write-conflict // This is the earliest snapshot that could be used for write-conflict
// checking by a transaction. For any user-key newer than this snapshot, we // checking by a transaction. For any user-key newer than this snapshot, we
// should make sure not to remove evidence that a write occured. // should make sure not to remove evidence that a write occurred.
SequenceNumber earliest_write_conflict_snapshot_; SequenceNumber earliest_write_conflict_snapshot_;
std::shared_ptr<Cache> table_cache_; std::shared_ptr<Cache> table_cache_;

View File

@ -8927,7 +8927,7 @@ TEST_F(DBTest, DelayedWriteRate) {
WriteOptions wo; WriteOptions wo;
Put(Key(i), std::string(entry_size, 'x'), wo); Put(Key(i), std::string(entry_size, 'x'), wo);
estimated_total_size += entry_size + 20; estimated_total_size += entry_size + 20;
// Ocassionally sleep a while // Occasionally sleep a while
if (rnd.Uniform(20) == 6) { if (rnd.Uniform(20) == 6) {
env_->SleepForMicroseconds(2666); env_->SleepForMicroseconds(2666);
} }
@ -9363,7 +9363,7 @@ TEST_F(DBTest, DestroyDBWithRateLimitedDelete) {
// We created 4 sst files in L0 // We created 4 sst files in L0
ASSERT_EQ("4", FilesPerLevel(0)); ASSERT_EQ("4", FilesPerLevel(0));
// Close DB and destory it using DeleteScheduler // Close DB and destroy it using DeleteScheduler
Close(); Close();
std::string trash_dir = test::TmpDir(env_) + "/trash"; std::string trash_dir = test::TmpDir(env_) + "/trash";
int64_t rate_bytes_per_sec = 1024 * 1024; // 1 Mb / Sec int64_t rate_bytes_per_sec = 1024 * 1024; // 1 Mb / Sec

View File

@ -26,9 +26,9 @@ struct FileLevel;
// The file tree structure in Version is prebuilt and the range of each file // The file tree structure in Version is prebuilt and the range of each file
// is known. On Version::Get(), it uses binary search to find a potential file // is known. On Version::Get(), it uses binary search to find a potential file
// and then check if a target key can be found in the file by comparing the key // and then check if a target key can be found in the file by comparing the key
// to each file's smallest and largest key. The results of these comparisions // to each file's smallest and largest key. The results of these comparisons
// can be reused beyond checking if a key falls into a file's range. // can be reused beyond checking if a key falls into a file's range.
// With some pre-calculated knowledge, each key comparision that has been done // With some pre-calculated knowledge, each key comparison that has been done
// can serve as a hint to narrow down further searches: if a key compared to // can serve as a hint to narrow down further searches: if a key compared to
// be smaller than a file's smallest or largest, that comparison can be used // be smaller than a file's smallest or largest, that comparison can be used
// to find out the right bound of next binary search. Similarly, if a key // to find out the right bound of next binary search. Similarly, if a key
@ -48,7 +48,7 @@ class FileIndexer {
size_t LevelIndexSize(size_t level) const; size_t LevelIndexSize(size_t level) const;
// Return a file index range in the next level to search for a key based on // Return a file index range in the next level to search for a key based on
// smallest and largest key comparision for the current file specified by // smallest and largest key comparison for the current file specified by
// level and file_index. When *left_index < *right_index, both index should // level and file_index. When *left_index < *right_index, both index should
// be valid and fit in the vector size. // be valid and fit in the vector size.
void GetNextLevelIndex(const size_t level, const size_t file_index, void GetNextLevelIndex(const size_t level, const size_t file_index,

View File

@ -25,7 +25,7 @@ std::unique_ptr<WriteControllerToken> WriteController::GetDelayToken() {
} }
bool WriteController::IsStopped() const { return total_stopped_ > 0; } bool WriteController::IsStopped() const { return total_stopped_ > 0; }
// Tihs is inside DB mutex, so we can't sleep and need to minimize // This is inside DB mutex, so we can't sleep and need to minimize
// frequency to get time. // frequency to get time.
// If it turns out to be a performance issue, we can redesign the thread // If it turns out to be a performance issue, we can redesign the thread
// synchronization model here. // synchronization model here.

View File

@ -10,7 +10,7 @@ The env_hdfs.h file defines the rocksdb objects that are needed to talk to an
underlying filesystem. underlying filesystem.
If you want to compile rocksdb with hdfs support, please set the following If you want to compile rocksdb with hdfs support, please set the following
enviroment variables appropriately (also defined in setup.sh for convenience) environment variables appropriately (also defined in setup.sh for convenience)
USE_HDFS=1 USE_HDFS=1
JAVA_HOME=/usr/local/jdk-6u22-64 JAVA_HOME=/usr/local/jdk-6u22-64
LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/jdk-6u22-64/jre/lib/amd64/server:/usr/local/jdk-6u22-64/jre/lib/amd64/:./snappy/libs LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/jdk-6u22-64/jre/lib/amd64/server:/usr/local/jdk-6u22-64/jre/lib/amd64/:./snappy/libs

View File

@ -50,7 +50,7 @@ struct TableFileDeletionInfo {
std::string file_path; std::string file_path;
// The id of the job which deleted the file. // The id of the job which deleted the file.
int job_id; int job_id;
// The status indicating whether the deletion was successfull or not. // The status indicating whether the deletion was successful or not.
Status status; Status status;
}; };

View File

@ -536,7 +536,7 @@ struct ColumnFamilyOptions {
// If <= 0, a proper value is automatically calculated (usually 1/8 of // If <= 0, a proper value is automatically calculated (usually 1/8 of
// writer_buffer_size, rounded up to a multiple of 4KB). // writer_buffer_size, rounded up to a multiple of 4KB).
// //
// There are two additonal restriction of the The specified size: // There are two additional restriction of the The specified size:
// (1) size should be in the range of [4096, 2 << 30] and // (1) size should be in the range of [4096, 2 << 30] and
// (2) be the multiple of the CPU word (which helps with the memory // (2) be the multiple of the CPU word (which helps with the memory
// alignment). // alignment).
@ -893,7 +893,7 @@ struct DBOptions {
// If none of the paths has sufficient room to place a file, the file will // If none of the paths has sufficient room to place a file, the file will
// be placed to the last path anyway, despite to the target size. // be placed to the last path anyway, despite to the target size.
// //
// Placing newer data to ealier paths is also best-efforts. User should // Placing newer data to earlier paths is also best-efforts. User should
// expect user files to be placed in higher levels in some extreme cases. // expect user files to be placed in higher levels in some extreme cases.
// //
// If left empty, only one path will be used, which is db_name passed when // If left empty, only one path will be used, which is db_name passed when

View File

@ -128,7 +128,7 @@ struct BlockBasedTableOptions {
// This must generally be true for gets to be efficient. // This must generally be true for gets to be efficient.
bool whole_key_filtering = true; bool whole_key_filtering = true;
// If true, block will not be explictly flushed to disk during building // If true, block will not be explicitly flushed to disk during building
// a SstTable. Instead, buffer in WritableFileWriter will take // a SstTable. Instead, buffer in WritableFileWriter will take
// care of the flushing when it is full. // care of the flushing when it is full.
// //

View File

@ -83,7 +83,7 @@ class Transaction {
// When the snapshot is created the notifier's SnapshotCreated method will // When the snapshot is created the notifier's SnapshotCreated method will
// be called so that the caller can get access to the snapshot. // be called so that the caller can get access to the snapshot.
// //
// This is an optimization to reduce the likelyhood of conflicts that // This is an optimization to reduce the likelihood of conflicts that
// could occur in between the time SetSnapshot() is called and the first // could occur in between the time SetSnapshot() is called and the first
// write/GetForUpdate operation. Eg, this prevents the following // write/GetForUpdate operation. Eg, this prevents the following
// race-condition: // race-condition:
@ -225,7 +225,7 @@ class Transaction {
// in this transaction do not yet belong to any snapshot and will be fetched // in this transaction do not yet belong to any snapshot and will be fetched
// regardless). // regardless).
// //
// Caller is reponsible for deleting the returned Iterator. // Caller is responsible for deleting the returned Iterator.
// //
// The returned iterator is only valid until Commit(), Rollback(), or // The returned iterator is only valid until Commit(), Rollback(), or
// RollbackToSavePoint() is called. // RollbackToSavePoint() is called.

View File

@ -753,7 +753,7 @@ public interface ColumnFamilyOptionsInterface {
* If &le; 0, a proper value is automatically calculated (usually 1/10 of * If &le; 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size). * writer_buffer_size).
* *
* There are two additonal restriction of the The specified size: * There are two additional restriction of the The specified size:
* (1) size should be in the range of [4096, 2 &lt;&lt; 30] and * (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
* (2) be the multiple of the CPU word (which helps with the memory * (2) be the multiple of the CPU word (which helps with the memory
* alignment). * alignment).
@ -774,7 +774,7 @@ public interface ColumnFamilyOptionsInterface {
* If &le; 0, a proper value is automatically calculated (usually 1/10 of * If &le; 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size). * writer_buffer_size).
* *
* There are two additonal restriction of the The specified size: * There are two additional restriction of the The specified size:
* (1) size should be in the range of [4096, 2 &lt;&lt; 30] and * (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
* (2) be the multiple of the CPU word (which helps with the memory * (2) be the multiple of the CPU word (which helps with the memory
* alignment). * alignment).

View File

@ -67,7 +67,7 @@
#if defined(OS_ANDROID) && __ANDROID_API__ < 9 #if defined(OS_ANDROID) && __ANDROID_API__ < 9
// fdatasync() was only introduced in API level 9 on Android. Use fsync() // fdatasync() was only introduced in API level 9 on Android. Use fsync()
// when targetting older platforms. // when targeting older platforms.
#define fdatasync fsync #define fdatasync fsync
#endif #endif

View File

@ -64,11 +64,11 @@ echo "Creating db based on the old commit --- $commit_old"
./old_db_sanity_test $dir_old create ./old_db_sanity_test $dir_old create
echo "=============================================================" echo "============================================================="
echo "[Backward Compability Check]" echo "[Backward Compatibility Check]"
echo "Verifying old db $dir_old using the new commit --- $commit_new" echo "Verifying old db $dir_old using the new commit --- $commit_new"
./new_db_sanity_test $dir_old verify ./new_db_sanity_test $dir_old verify
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "[ERROR] Backward Compability Check fails:" echo "[ERROR] Backward Compatibility Check fails:"
echo " Verification of $dir_old using commit $commit_new failed." echo " Verification of $dir_old using commit $commit_new failed."
exit 2 exit 2
fi fi
@ -78,7 +78,7 @@ echo "[Forward Compatibility Check]"
echo "Verifying new db $dir_new using the old commit --- $commit_old" echo "Verifying new db $dir_new using the old commit --- $commit_old"
./old_db_sanity_test $dir_new verify ./old_db_sanity_test $dir_new verify
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "[ERROR] Forward Compability Check fails:" echo "[ERROR] Forward Compatibility Check fails:"
echo " $dir_new using commit $commit_old failed." echo " $dir_new using commit $commit_old failed."
exit 2 exit 2
fi fi

View File

@ -57,7 +57,7 @@
# This argument can be left off for the default # This argument can be left off for the default
# column family # column family
# #
# Returns true if an error occured while trying to delete the key in # Returns true if an error occurred while trying to delete the key in
# the database, or false otherwise. Note that this is NOT the same as # the database, or false otherwise. Note that this is NOT the same as
# whether a value was deleted; in the case of a specified key not having # whether a value was deleted; in the case of a specified key not having
# a value, this will still return true. Use the `get` method prior to # a value, this will still return true. Use the `get` method prior to

View File

@ -56,7 +56,7 @@ namespace rocksdb {
// In the unit test, 'Happens After' relationship among sync points could be // In the unit test, 'Happens After' relationship among sync points could be
// setup via SyncPoint::LoadDependency, to reproduce a desired interleave of // setup via SyncPoint::LoadDependency, to reproduce a desired interleave of
// threads execution. // threads execution.
// Refer to (DBTest,TransactionLogIteratorRace), for an exmaple use case. // Refer to (DBTest,TransactionLogIteratorRace), for an example use case.
class SyncPoint { class SyncPoint {
public: public:

View File

@ -54,7 +54,7 @@ class ThreadLocalPtr {
void* Swap(void* ptr); void* Swap(void* ptr);
// Atomically compare the stored value with expected. Set the new // Atomically compare the stored value with expected. Set the new
// pointer value to thread local only if the comparision is true. // pointer value to thread local only if the comparison is true.
// Otherwise, expected returns the stored value. // Otherwise, expected returns the stored value.
// Return true on success, false on failure // Return true on success, false on failure
bool CompareAndSwap(void* ptr, void*& expected); bool CompareAndSwap(void* ptr, void*& expected);
@ -98,7 +98,7 @@ class ThreadLocalPtr {
// Return the next available Id // Return the next available Id
uint32_t GetId(); uint32_t GetId();
// Return the next availabe Id without claiming it // Return the next available Id without claiming it
uint32_t PeekId() const; uint32_t PeekId() const;
// Return the given Id back to the free pool. This also triggers // Return the given Id back to the free pool. This also triggers
// UnrefHandler for associated pointer value (if not NULL) for all threads. // UnrefHandler for associated pointer value (if not NULL) for all threads.

View File

@ -706,7 +706,7 @@ Status WriteBatchWithIndex::GetFromBatchAndDB(DB* db,
// Did not find key in batch OR could not resolve Merges. Try DB. // Did not find key in batch OR could not resolve Merges. Try DB.
s = db->Get(read_options, column_family, key, value); s = db->Get(read_options, column_family, key, value);
if (s.ok() || s.IsNotFound()) { // DB Get Suceeded if (s.ok() || s.IsNotFound()) { // DB Get Succeeded
if (result == WriteBatchWithIndexInternal::Result::kMergeInProgress) { if (result == WriteBatchWithIndexInternal::Result::kMergeInProgress) {
// Merge result from DB with merges in Batch // Merge result from DB with merges in Batch
auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family); auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);