Revision 8e12638f3d0d91791cf06253493b8b15827f4b6c authored by sdong on 19 August 2019, 17:50:25 UTC, committed by Facebook Github Bot on 19 August 2019, 17:51:59 UTC
Summary: Atomic white box test's kill odd is the same as normal test. However, in the scenario that only WritableFileWriter::Append() is blacklisted, WritableFileWriter::Flush() dominates the killing odds. Normally, most of WritableFileWriter::Flush() are called in WAL writes, where every write triggers a WAL flush. In atomic test, WAL is disabled, so the kill happens less frequently than we antipated. In some rare cases, the kill didn't end up with happening (for reasons I still don't fully understand) and cause the stress test timeout. If WAL is disabled, make the odds 5x likely to trigger. Pull Request resolved: https://github.com/facebook/rocksdb/pull/5717 Test Plan: Run whitebox_crash_test_with_atomic_flush and whitebox_crash_test and observe the kill odds printed out. Differential Revision: D16897237 fbshipit-source-id: cbf5d96f6fc0e980523d0f1f94bf4e72cdb82d1c
1 parent e1c468d
table_properties_collector.cc
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#include "db/table_properties_collector.h"
#include "db/dbformat.h"
#include "util/coding.h"
#include "util/string_util.h"
namespace rocksdb {
namespace {
uint64_t GetUint64Property(const UserCollectedProperties& props,
const std::string& property_name,
bool* property_present) {
auto pos = props.find(property_name);
if (pos == props.end()) {
*property_present = false;
return 0;
}
Slice raw = pos->second;
uint64_t val = 0;
*property_present = true;
return GetVarint64(&raw, &val) ? val : 0;
}
} // namespace
Status UserKeyTablePropertiesCollector::InternalAdd(const Slice& key,
const Slice& value,
uint64_t file_size) {
ParsedInternalKey ikey;
if (!ParseInternalKey(key, &ikey)) {
return Status::InvalidArgument("Invalid internal key");
}
return collector_->AddUserKey(ikey.user_key, value, GetEntryType(ikey.type),
ikey.sequence, file_size);
}
void UserKeyTablePropertiesCollector::BlockAdd(
uint64_t bLockRawBytes, uint64_t blockCompressedBytesFast,
uint64_t blockCompressedBytesSlow) {
return collector_->BlockAdd(bLockRawBytes, blockCompressedBytesFast,
blockCompressedBytesSlow);
}
Status UserKeyTablePropertiesCollector::Finish(
UserCollectedProperties* properties) {
return collector_->Finish(properties);
}
UserCollectedProperties
UserKeyTablePropertiesCollector::GetReadableProperties() const {
return collector_->GetReadableProperties();
}
uint64_t GetDeletedKeys(
const UserCollectedProperties& props) {
bool property_present_ignored;
return GetUint64Property(props, TablePropertiesNames::kDeletedKeys,
&property_present_ignored);
}
uint64_t GetMergeOperands(const UserCollectedProperties& props,
bool* property_present) {
return GetUint64Property(
props, TablePropertiesNames::kMergeOperands, property_present);
}
} // namespace rocksdb
![swh spinner](/static/img/swh-spinner.gif)
Computing file changes ...