swh:1:snp:5115096b921df712aeb2a08114fede57fb3331fb
- HEAD
- refs/heads/2.2.fb.branch
- refs/heads/2.3.fb.branch
- refs/heads/2.4.fb.branch
- refs/heads/2.5.fb.branch
- refs/heads/2.6.fb.branch
- refs/heads/2.7
- refs/heads/2.7.fb.branch
- refs/heads/2.8.1.fb
- refs/heads/2.8.fb
- refs/heads/2.8.fb.trunk
- refs/heads/3.0.fb
- refs/heads/3.0.fb.branch
- refs/heads/3.1.fb
- refs/heads/3.10.fb
- refs/heads/3.11.fb
- refs/heads/3.12.fb
- refs/heads/3.13.fb
- refs/heads/3.2.fb
- refs/heads/3.3.fb
- refs/heads/3.4.fb
- refs/heads/3.5.fb
- refs/heads/3.6.fb
- refs/heads/3.7.fb
- refs/heads/3.8.fb
- refs/heads/3.9.fb
- refs/heads/4.0.fb
- refs/heads/4.1.fb
- refs/heads/4.10.fb
- refs/heads/4.11.fb
- refs/heads/4.12.fb
- refs/heads/4.13.fb
- refs/heads/4.2.fb
- refs/heads/4.3.fb
- refs/heads/4.4.fb
- refs/heads/4.5.fb
- refs/heads/4.6.fb
- refs/heads/4.7.fb
- refs/heads/4.8.fb
- refs/heads/4.9.fb
- refs/heads/5.0.fb
- refs/heads/5.1.fb
- refs/heads/5.10.fb
- refs/heads/5.11.fb
- refs/heads/5.12.fb
- refs/heads/5.13.fb
- refs/heads/5.13.fb.myrocks
- refs/heads/5.14.fb
- refs/heads/5.14.fb.myrocks
- refs/heads/5.15.fb
- refs/heads/5.16.fb
- refs/heads/5.17.fb
- refs/heads/5.17.fb.myrocks
- refs/heads/5.18.fb
- refs/heads/5.2.fb
- refs/heads/5.3.fb
- refs/heads/5.4.fb
- refs/heads/5.5.fb
- refs/heads/5.6.fb
- refs/heads/5.7.fb
- refs/heads/5.7.fb.myrocks
- refs/heads/5.8.3
- refs/heads/5.8.fb
- refs/heads/5.9.fb
- refs/heads/5.9.fb.myrocks
- refs/heads/6.0.fb
- refs/heads/6.0.fb.myrocks
- refs/heads/6.1.fb
- refs/heads/6.1.fb.myrocks
- refs/heads/6.1.fb.prod201905
- refs/heads/6.10.fb
- refs/heads/6.11.fb
- refs/heads/6.12.fb
- refs/heads/6.13.fb
- refs/heads/6.13.fb.laser
- refs/heads/6.14.fb
- refs/heads/6.14.fb.laser
- refs/heads/6.15.fb
- refs/heads/6.16.fb
- refs/heads/6.17.fb
- refs/heads/6.2.fb
- refs/heads/6.3.fb
- refs/heads/6.3.fb.myrocks
- refs/heads/6.3.fb.myrocks2
- refs/heads/6.3fb
- refs/heads/6.4.fb
- refs/heads/6.5.fb
- refs/heads/6.6.fb
- refs/heads/6.7.fb
- refs/heads/6.8.fb
- refs/heads/6.9.fb
- refs/heads/adaptive
- refs/heads/ajkr-patch-1
- refs/heads/blob_shadow
- refs/heads/bottom-pri-level
- refs/heads/bugfix-build-detect
- refs/heads/checksum_readahead_mmap_fix
- refs/heads/fb-config
- refs/heads/feature/debug-rocksdbjavastatic
- refs/heads/feature/travis-arm64
- refs/heads/fix-release-notes
- refs/heads/fix-write-batch-comment
- refs/heads/format_compatible_4
- refs/heads/getmergeops
- refs/heads/gh-pages-old
- refs/heads/history-update
- refs/heads/hotfix/lambda-capture
- refs/heads/improve-support
- refs/heads/jay-stress
- refs/heads/katherinez-patch-1
- refs/heads/katherinez-patch-2
- refs/heads/master
- refs/heads/pr-sanity-check-as-GHAction
- refs/heads/pr/6062
- refs/heads/ramvadiv-patch-1
- refs/heads/scaffold
- refs/heads/siying-patch-1
- refs/heads/siying-patch-10
- refs/heads/siying-patch-2
- refs/heads/siying-patch-3
- refs/heads/siying-patch-4
- refs/heads/skip_memtable_flush
- refs/heads/testing_ppc_build
- refs/heads/tests
- refs/heads/unused-var
- refs/heads/v6.6.4
- refs/heads/yiwu_stackable
- refs/heads/yuslepukhin
- refs/remotes/origin/5.13.fb
- refs/tags/2.5.fb
- refs/tags/2.6.fb
- refs/tags/3.0.fb
- refs/tags/do-not-use-me2
- refs/tags/rocksdb-3.1
- refs/tags/rocksdb-3.10.2
- refs/tags/rocksdb-3.11
- refs/tags/rocksdb-3.11.1
- refs/tags/rocksdb-3.11.2
- refs/tags/rocksdb-3.2
- refs/tags/rocksdb-3.3
- refs/tags/rocksdb-3.4
- refs/tags/rocksdb-3.5
- refs/tags/rocksdb-3.5.1
- refs/tags/rocksdb-3.6.1
- refs/tags/rocksdb-3.6.2
- refs/tags/rocksdb-3.7
- refs/tags/rocksdb-3.8
- refs/tags/rocksdb-3.9
- refs/tags/rocksdb-3.9.1
- refs/tags/rocksdb-4.1
- refs/tags/rocksdb-5.10.2
- refs/tags/rocksdb-5.10.3
- refs/tags/rocksdb-5.10.4
- refs/tags/rocksdb-5.11.2
- refs/tags/rocksdb-5.11.3
- refs/tags/rocksdb-5.14.3
- refs/tags/rocksdb-5.2.1
- refs/tags/rocksdb-5.3.3
- refs/tags/rocksdb-5.3.4
- refs/tags/rocksdb-5.3.5
- refs/tags/rocksdb-5.3.6
- refs/tags/rocksdb-5.4.10
- refs/tags/rocksdb-5.4.5
- refs/tags/rocksdb-5.4.6
- refs/tags/rocksdb-5.5.2
- refs/tags/rocksdb-5.5.3
- refs/tags/rocksdb-5.5.4
- refs/tags/rocksdb-5.5.5
- refs/tags/rocksdb-5.5.6
- refs/tags/rocksdb-5.6.1
- refs/tags/rocksdb-5.6.2
- refs/tags/rocksdb-5.7.1
- refs/tags/rocksdb-5.7.2
- refs/tags/rocksdb-5.7.3
- refs/tags/rocksdb-5.7.5
- refs/tags/rocksdb-5.8.6
- refs/tags/rocksdb-5.8.7
- refs/tags/rocksdb-5.8.8
- refs/tags/rocksdb-5.9.2
- refs/tags/v4.0
- refs/tags/v4.1
- refs/tags/v5.10.2
- refs/tags/v5.10.3
- refs/tags/v5.10.4
- refs/tags/v5.11.2
- refs/tags/v5.11.3
- refs/tags/v5.13.3
- refs/tags/v5.14.3
- refs/tags/v5.15.10
- refs/tags/v5.18.3
- refs/tags/v5.2.1
- refs/tags/v5.3.3
- refs/tags/v5.3.4
- refs/tags/v5.3.5
- refs/tags/v5.3.6
- refs/tags/v5.4.10
- refs/tags/v5.4.5
- refs/tags/v5.4.6
- refs/tags/v5.5.2
- refs/tags/v5.5.3
- refs/tags/v5.5.4
- refs/tags/v5.5.5
- refs/tags/v5.5.6
- refs/tags/v5.6.1
- refs/tags/v5.6.2
- refs/tags/v5.7.1
- refs/tags/v5.7.2
- refs/tags/v5.7.3
- refs/tags/v5.7.5
- refs/tags/v5.8.6
- refs/tags/v5.8.7
- refs/tags/v5.8.8
- refs/tags/v5.9.2
- refs/tags/v6.0.1
- refs/tags/v6.0.2
- refs/tags/v6.1.1
- refs/tags/v6.1.2
- refs/tags/v6.10.1
- refs/tags/v6.10.2
- refs/tags/v6.11.4
- refs/tags/v6.11.6
- refs/tags/v6.12.6
- refs/tags/v6.12.7
- refs/tags/v6.13.2
- refs/tags/v6.13.3
- refs/tags/v6.14.5
- refs/tags/v6.14.6
- refs/tags/v6.15.4
- refs/tags/v6.2.2
- refs/tags/v6.2.4
- refs/tags/v6.3.6
- refs/tags/v6.4.6
- refs/tags/v6.5.2
- refs/tags/v6.5.3
- refs/tags/v6.6.3
- refs/tags/v6.6.4
- refs/tags/v6.7.3
- refs/tags/v6.8.1
- v6.15.2
- v5.8
- v5.5.1
- v5.4.7
- v5.18.4
- v5.17.2
- v5.16.6
- v5.14.2
- v5.13.4
- v5.13.2
- v5.13.1
- v5.12.5
- v5.12.4
- v5.12.3
- v5.12.2
- v5.1.4
- v5.1.3
- v5.1.2
- v5.0.2
- v5.0.1
- v4.9
- v4.8
- v4.6.1
- v4.5.1
- v4.4.1
- v4.4
- v4.3.1
- v4.3
- v4.2
- v4.13.5
- v4.13
- v4.11.2
- v3.9
- v3.8
- v3.7
- v3.6.1
- v3.5
- v3.4
- v3.3
- v3.2
- v3.13.1
- v3.13
- v3.12.1
- v3.12
- v3.11
- v3.10
- v3.1
- v3.0
- v2.8
- v2.7
- v2.6
- v2.5
- v2.4
- v2.3
- v2.2
- v2.1
- v2.0
- v1.5.9.1
- v1.5.8.2
- v1.5.8.1
- v1.5.8
- v1.5.7
- rocksdb-5.8
- rocksdb-5.4.7
- rocksdb-5.1.4
- rocksdb-5.1.3
- rocksdb-5.1.2
- rocksdb-5.0.2
- rocksdb-5.0.1
- rocksdb-4.9
- rocksdb-4.8
- rocksdb-4.6.1
- rocksdb-4.5.1
- rocksdb-4.4.1
- rocksdb-4.4
- rocksdb-4.3.1
- rocksdb-4.3
- rocksdb-4.2
- rocksdb-4.13.5
- rocksdb-4.13
- rocksdb-4.11.2
- rocksdb-3.10.1
- 2.8.fb
- 2.7.fb
- 2.4.fb
- 2.3.fb
- 2.2.fb
- 2.1.fb
- 2.0.fb
- 1.5.9.fb
- 1.5.9.2.fb
- 1.5.9.1.fb
- 1.5.8.fb
- 1.5.8.2.fb
- 1.5.8.1.fb
- 1.5.7.fb
Permalinks
To reference or cite the objects present in the Software Heritage archive, permalinks based on SoftWare Hash IDentifiers (SWHIDs) must be used.
Select below a type of object currently browsed in order to display its associated SWHID and permalink.
Revision | Author | Date | Message | Commit Date |
---|---|---|---|---|
9da32ba | Ramkumar Vadivelu | 31 December 2020, 01:14:07 UTC | Update "Further Documentation" Remove stale pointers in the "Further Documentation" and just point to RocksDB Wiki | 31 December 2020, 01:14:07 UTC |
edb0b1f | jbosh | 30 December 2020, 23:41:49 UTC | rocksdb_transaction_get_for_update now exports (#6293) Summary: Added missing ROCKSDB_LIBRARY_API decorator to rocksdb_transaction_get_for_update. Pull Request resolved: https://github.com/facebook/rocksdb/pull/6293 Reviewed By: jay-zhuang Differential Revision: D25234298 Pulled By: ajkr fbshipit-source-id: 8a4817adaec1f445f338c8d8c59d3392925b5721 | 30 December 2020, 23:42:59 UTC |
fd2db79 | Adam Retter | 30 December 2020, 21:38:43 UTC | Attempt to fix build errors around missing compression library includes (#7803) Summary: This fixes an issue introduced in https://github.com/facebook/rocksdb/pull/7769 that caused many errors about missing compression libraries to be displayed during compilation, although compilation actually succeeded. This PR fixes the compilation so the compression libraries are only introduced where strictly needed. It likely needs to be merged into the same branches as https://github.com/facebook/rocksdb/pull/7769 which I think are: 1. master 2. 6.15.fb 3. 6.16.fb Pull Request resolved: https://github.com/facebook/rocksdb/pull/7803 Reviewed By: ramvadiv Differential Revision: D25733743 Pulled By: pdillinger fbshipit-source-id: 6c04f6864b2ff4a345841d791a89b19e0e3f5bf7 | 30 December 2020, 21:40:10 UTC |
01298c8 | anand76 | 30 December 2020, 17:24:04 UTC | Return Status from FilePrefetchBuffer::TryReadFromCache() (#7816) Summary: Return the Status from TryReadFromCache() in an argument to make it easier to report prefetch errors to the user. Tests: make crash_test make check Pull Request resolved: https://github.com/facebook/rocksdb/pull/7816 Reviewed By: akankshamahajan15 Differential Revision: D25717222 Pulled By: anand1976 fbshipit-source-id: c320d3c12d4146bda16df78ff6927eee584c1810 | 30 December 2020, 17:25:09 UTC |
d773866 | anand76 | 28 December 2020, 21:37:07 UTC | Fix db_bench duration for multireadrandom benchmark (#7817) Summary: The multireadrandom benchmark, when run for a specific number of reads (--reads argument), should base the duration on the actual number of keys read rather than number of batches. Tests: Run db_bench multireadrandom benchmark Pull Request resolved: https://github.com/facebook/rocksdb/pull/7817 Reviewed By: zhichao-cao Differential Revision: D25717230 Pulled By: anand1976 fbshipit-source-id: 13f4d8162268cf9a34918655e60302d0aba3864b | 28 December 2020, 21:38:10 UTC |
736c6dc | cheng-chang | 28 December 2020, 00:16:55 UTC | Disable BasicLockEscalation if cannot determine whether TSAN is enabled (#7814) Summary: BasicLockEscalation will cause false-positive warnings under TSAN (this is a known issue in TSAN, see details in https://gist.github.com/spetrunia/77274cf2d5848e0a7e090d622695ed4e), skip this test if TSAN is enabled, or if we are not sure whether TSAN is enabled. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7814 Test Plan: watch the tsan contrun test to pass. Reviewed By: zhichao-cao Differential Revision: D25708094 Pulled By: cheng-chang fbshipit-source-id: 4fc813ff373301d033d086154cc7bb60a5e95889 | 28 December 2020, 00:18:00 UTC |
44ebc24 | Zhichao Cao | 27 December 2020, 06:05:42 UTC | Add rate_limiter to GenerateOneFileChecksum (#7811) Summary: In GenerateOneFileChecksum(), RocksDB reads the file and computes its checksum. A rate limiter can be passed to the constructor of RandomAccessFileReader so that read I/O can be rate limited. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7811 Test Plan: make check Reviewed By: cheng-chang Differential Revision: D25699896 Pulled By: zhichao-cao fbshipit-source-id: e2688bc1126c543979a3bcf91dda784bd7b74164 | 27 December 2020, 06:07:24 UTC |
601585b | Zhichao Cao | 25 December 2020, 21:14:38 UTC | fix memory leak in db_stress checkpoint test (#7813) Summary: fix memory leak in db_stress checkpoint test. If s is not ok, checkpoint is not deleted, may cause memory leak. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7813 Test Plan: make asan_check Reviewed By: cheng-chang Differential Revision: D25702999 Pulled By: zhichao-cao fbshipit-source-id: 08253b0852835acb8cfd412503cdabf720afb678 | 25 December 2020, 21:15:48 UTC |
55e9968 | mrambacher | 24 December 2020, 00:54:05 UTC | No elide constructors (#7798) Summary: Added "no-elide-constructors to the ASSERT_STATUS_CHECK builds. This flag gives more errors/warnings for some of the Status checks where an inner class checks a Status and later returns it. In this case, without the elide check on, the returned status may not have been checked in the caller, thereby bypassing the checked code. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7798 Reviewed By: jay-zhuang Differential Revision: D25680451 Pulled By: pdillinger fbshipit-source-id: c3f14ed9e2a13f0a8c54d839d5fb4d1fc1e93917 | 24 December 2020, 00:55:53 UTC |
30a5ed9 | Akanksha Mahajan | 23 December 2020, 23:15:14 UTC | Update "num_data_read" stat in RetrieveMultipleBlocks (#7770) Summary: RetrieveMultipleBlocks which is used by MultiGet to read data blocks is not updating num_data_read stat in GetContextStats. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7770 Test Plan: make check -j64 Reviewed By: anand1976 Differential Revision: D25538982 Pulled By: akankshamahajan15 fbshipit-source-id: e3daedb035b1be8ab6af6f115cb3793ccc7b1ec6 | 23 December 2020, 23:16:46 UTC |
bdb7e54 | cheng-chang | 23 December 2020, 19:32:10 UTC | Skip WALs according to MinLogNumberToKeep when creating checkpoint (#7789) Summary: In a stress test failure, we observe that a WAL is skipped when creating checkpoint, although its log number >= MinLogNumberToKeep(). This might happen in the following case: 1. when creating the checkpoint, there are 2 column families: CF0 and CF1, and there are 2 WALs: 1, 2; 2. CF0's log number is 1, CF0's active memtable is empty, CF1's log number is 2, CF1's active memtable is not empty, WAL 2 is not empty, the sequence number points to WAL 2; 2. the checkpoint process flushes CF0, since CF0' active memtable is empty, there is no need to SwitchMemtable, thus no new WAL will be created, so CF0's log number is now 2, concurrently, some data is written to CF0 and WAL 2; 3. the checkpoint process flushes CF1, WAL 3 is created and CF1's log number is now 3, CF0's log number is still 2 because CF0 is not empty and WAL 2 contains its unflushed data concurrently written in step 2; 4. the checkpoint process determines that WAL 1 and 2 are no longer needed according to [live_wal_files[i]->StartSequence() >= *sequence_number](https://github.com/facebook/rocksdb/blob/master/utilities/checkpoint/checkpoint_impl.cc#L388), so it skips linking them to the checkpoint directory; 5. but according to `MinLogNumberToKeep()`, WAL 2 still needs to be kept because CF0's log number is 2. If the checkpoint is reopened in read-only mode, and only read from the snapshot with the initial sequence number, then there will be no data loss or data inconsistency. But if the checkpoint is reopened and read from the most recent sequence number, suppose in step 3, there are also data concurrently written to CF1 and WAL 3, then the most recent sequence number refers to the latest entry in WAL 3, so the data written in step 2 should also be visible, but since WAL 2 is discarded, those data are lost. When tracking WAL in MANIFEST is enabled, when reopening the checkpoint, since WAL 2 is still tracked in MANIFEST as alive, but it's missing from the checkpoint directory, a corruption will be reported. This PR makes the checkpoint process to only skip a WAL if its log number < `MinLogNumberToKeep`. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7789 Test Plan: watch existing tests to pass. Reviewed By: ajkr Differential Revision: D25662346 Pulled By: cheng-chang fbshipit-source-id: 136471095baa01886cf44809455cf855f24857a0 | 23 December 2020, 19:33:26 UTC |
bd2645b | anand76 | 23 December 2020, 19:24:50 UTC | Update regression_test.sh to run multireadrandom benchmark (#7802) Summary: Update the regression_test.sh script to run the multireadrandom benchmark. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7802 Reviewed By: zhichao-cao Differential Revision: D25685482 Pulled By: anand1976 fbshipit-source-id: ef2973b551a1bbdbce198a0adf29fc277f3e65e2 | 23 December 2020, 19:26:12 UTC |
a727efc | Peter Dillinger | 23 December 2020, 19:06:49 UTC | Remove flaky, redundant, and dubious DBTest.SparseMerge (#7800) Summary: This test would occasionally fail like this: WARNING: c:\users\circleci\project\db\db_test.cc(1343): error: Expected: (dbfull()->TEST_MaxNextLevelOverlappingBytes(handles_[1])) <= (20 * 1048576), actual: 33501540 vs 20971520 And being a super old test, it's not structured in a sound way. And it appears that DBTest2.MaxCompactionBytesTest is a better test of what SparseMerge was intended to test. In fact, SparseMerge fails if I set options.max_compaction_bytes = options.target_file_size_base * 1000; Thus, we are removing this negative-value test. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7800 Test Plan: Q.E.D. Reviewed By: ajkr Differential Revision: D25693366 Pulled By: pdillinger fbshipit-source-id: 9da07d4dce0559547fc938b2163a2015e956c548 | 23 December 2020, 19:08:12 UTC |
0241819 | mrambacher | 23 December 2020, 07:44:44 UTC | Add more tests for assert status checked (#7524) Summary: Added 10 more tests that pass the ASSERT_STATUS_CHECKED test. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7524 Reviewed By: akankshamahajan15 Differential Revision: D24323093 Pulled By: ajkr fbshipit-source-id: 28d4106d0ca1740c3b896c755edf82d504b74801 | 23 December 2020, 07:45:58 UTC |
daab760 | Sergei Petrunia | 23 December 2020, 03:10:56 UTC | Range Locking: Implementation of range locking (#7506) Summary: Range Locking - an implementation based on the locktree library - Add a RangeTreeLockManager and RangeTreeLockTracker which implement range locking using the locktree library. - Point locks are handled as locks on single-point ranges. - Add a unit test: range_locking_test Pull Request resolved: https://github.com/facebook/rocksdb/pull/7506 Reviewed By: akankshamahajan15 Differential Revision: D25320703 Pulled By: cheng-chang fbshipit-source-id: f86347384b42ba2b0257d67eca0f45f806b69da7 | 23 December 2020, 03:12:36 UTC |
f4db3e4 | sdong | 23 December 2020, 00:52:51 UTC | Avoid to force PORTABLE mode in tools/regression_test.sh (#7806) Summary: Right now tools/regression_test.sh always builds RocksDB with PORTABLE=1. There isn't a reason for that. Remove it. Users can always specify PORTABLE through envirionement variable. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7806 Test Plan: Run tools/regression_test.sh and see it still builds. Reviewed By: ajkr Differential Revision: D25687911 fbshipit-source-id: 1c0b03e5df890babc8b7d8af48b48774d9a4600c | 23 December 2020, 00:54:07 UTC |
81592d9 | Adam Retter | 22 December 2020, 23:08:17 UTC | Add more tests to ASSERT_STATUS_CHECKED (4) (#7718) Summary: Fourth batch of adding more tests to ASSERT_STATUS_CHECKED. * db_range_del_test * db_write_test * random_access_file_reader_test * merge_test * external_sst_file_test * write_buffer_manager_test * stringappend_test * deletefile_test Pull Request resolved: https://github.com/facebook/rocksdb/pull/7718 Reviewed By: pdillinger Differential Revision: D25671608 fbshipit-source-id: 687a794e98a9e0cd5428ead9898ef05ced987c31 | 22 December 2020, 23:09:39 UTC |
41ff125 | cheng-chang | 22 December 2020, 22:52:29 UTC | SyncWAL shouldn't be supported in compacted db (#7788) Summary: `CompactedDB` is a kind of read-only DB, so it shouldn't support `SyncWAL`. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7788 Test Plan: watch existing tests to pass. Reviewed By: akankshamahajan15 Differential Revision: D25661209 Pulled By: cheng-chang fbshipit-source-id: 9eb2cc3f73736dcc205c8410e5944aa203f002d3 | 22 December 2020, 22:53:43 UTC |
1022090 | Sergei Petrunia | 22 December 2020, 22:46:20 UTC | Apply the changes from: PS-5501 : Re-license PerconaFT 'locktree' to Apache V2 (#7801) Summary: commit d5178f513c0b4144a5ac9358ec0f6a3b54a28e76 Author: George O. Lorch III <george.lorch@percona.com> Date: Tue Mar 19 12:18:40 2019 -0700 PS-5501 : Re-license PerconaFT 'locktree' to Apache V2 - Fixed some incomplete relicensed files from previous round. - Added missing license text to some. - Relicensed more files to Apache V2 that locktree depends on. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7801 Reviewed By: jay-zhuang Differential Revision: D25682430 Pulled By: cheng-chang fbshipit-source-id: deb8a0de3e76f3638672997bfbd300e2fffbe5f5 | 22 December 2020, 22:47:41 UTC |
9057d0a | sdong | 22 December 2020, 22:42:06 UTC | Minimize Timing Issue in test WALTrashCleanupOnOpen (#7796) Summary: We saw DBWALTestWithParam/DBWALTestWithParam.WALTrashCleanupOnOpen sometimes fail with: db/db_sst_test.cc:575: Failure Expected: (trash_log_count) >= (1), actual: 0 vs 1 The suspicious is that delete scheduling actually deleted all trash files based on rate, but it is not expected. This can be reproduced if we manually add sleep after DB is closed for serveral seconds. Minimize its chance by setting the delete rate to be lowest possible. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7796 Test Plan: The test doesn't fail with the manual sleeping anymore Reviewed By: anand1976 Differential Revision: D25675000 fbshipit-source-id: a39fd05e1a83719c41014e48843792e752368e22 | 22 December 2020, 22:44:08 UTC |
fbac1b3 | Akanksha Mahajan | 22 December 2020, 18:29:58 UTC | Add tests in ASSERT_STATUS_CHECKED (#7793) Summary: add io_tracer_parser_test and prefetch_test under ASSERT_STATUS_CHECKED Pull Request resolved: https://github.com/facebook/rocksdb/pull/7793 Test Plan: ASSERT_STATUS_CHECKED=1 make check -j64 Reviewed By: jay-zhuang Differential Revision: D25673464 Pulled By: akankshamahajan15 fbshipit-source-id: 50e0b6f17160ddda206a521a7b47ee33e699a2d4 | 22 December 2020, 18:31:13 UTC |
4d897e5 | Peter Dillinger | 22 December 2020, 08:19:44 UTC | Migrate away from Travis+Linux+amd64 (#7791) Summary: This disables Linux/amd64 builds in Travis for PRs, and adds a gcc-10+c++20 build in CircleCI, which should fill out sufficient coverage vs. what we had in Travis Fixed a use of std::is_pod, which is deprecated in c++20 Fixed ++ on a volatile in db_repl_stress.cc, with bigger refactoring. Although ++ on this volatile was probably ok with one thread writer and one thread reader, the code was still overly complex. There was a deadcode check for error `if (replThread.no_read < dataPump.no_records)` which can be proven never to happen based on the structure of the code. It infinite loops instead for the case intended to be checked. I just simplified the code for what should be the same checking power. Also most configurations seem to be using make parallelism = 2 * vcores, so fixing / using that. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7791 Test Plan: CI and `while ./db_repl_stress; do echo again; done` for a while Reviewed By: siying Differential Revision: D25669834 Pulled By: pdillinger fbshipit-source-id: b2c688053d0b1d52c989903449d3cd27a04130d6 | 22 December 2020, 08:20:57 UTC |
861b0d1 | Jay Zhuang | 21 December 2020, 23:20:59 UTC | Fix Windows build in block_cache_tracer_test (#7795) Summary: The test was added to cmake in https://github.com/facebook/rocksdb/issues/7783 Pull Request resolved: https://github.com/facebook/rocksdb/pull/7795 Reviewed By: akankshamahajan15 Differential Revision: D25671010 Pulled By: jay-zhuang fbshipit-source-id: 2146ff9559cdd7266c4d78476672488c62654a6d | 21 December 2020, 23:22:16 UTC |
fd0d35d | Jay Zhuang | 21 December 2020, 16:45:54 UTC | Fix block_cache_test failure (#7783) Summary: `block_cache_tracer_test` and `block_cache_trace_analyzer_test` are using the same test directory. Which causes build failure if these 2 tests are running in parallel, for example: https://app.circleci.com/pipelines/github/facebook/rocksdb/5211/workflows/8639afbe-9fec-43e2-a6a4-6d47ea9cbcbe/jobs/74598/tests Pull Request resolved: https://github.com/facebook/rocksdb/pull/7783 Reviewed By: akankshamahajan15 Differential Revision: D25656762 Pulled By: jay-zhuang fbshipit-source-id: 68aa020aa5b4b3bce324315edecb4e1a60cc18e6 | 21 December 2020, 16:47:08 UTC |
a8aeefd | Jay Zhuang | 19 December 2020, 20:37:43 UTC | Update release version to 6.16 (#7782) Summary: Update release version to 6.8 Pull Request resolved: https://github.com/facebook/rocksdb/pull/7782 Reviewed By: siying Differential Revision: D25648579 Pulled By: jay-zhuang fbshipit-source-id: c536d606868b95c5fb2ae8f19c17eb259d67bc51 | 19 December 2020, 20:39:21 UTC |
4d1ac19 | Peter Dillinger | 19 December 2020, 15:59:08 UTC | aggregated-table-properties with GetMapProperty (#7779) Summary: So that we can more easily get aggregate live table data such as total filter, index, and data sizes. Also adds ldb support for getting properties Also fixed some missing/inaccurate related comments in db.h For example: $ ./ldb --db=testdb get_property rocksdb.aggregated-table-properties rocksdb.aggregated-table-properties.data_size: 102871 rocksdb.aggregated-table-properties.filter_size: 0 rocksdb.aggregated-table-properties.index_partitions: 0 rocksdb.aggregated-table-properties.index_size: 2232 rocksdb.aggregated-table-properties.num_data_blocks: 100 rocksdb.aggregated-table-properties.num_deletions: 0 rocksdb.aggregated-table-properties.num_entries: 15000 rocksdb.aggregated-table-properties.num_merge_operands: 0 rocksdb.aggregated-table-properties.num_range_deletions: 0 rocksdb.aggregated-table-properties.raw_key_size: 288890 rocksdb.aggregated-table-properties.raw_value_size: 198890 rocksdb.aggregated-table-properties.top_level_index_size: 0 $ ./ldb --db=testdb get_property rocksdb.aggregated-table-properties-at-level1 rocksdb.aggregated-table-properties-at-level1.data_size: 80909 rocksdb.aggregated-table-properties-at-level1.filter_size: 0 rocksdb.aggregated-table-properties-at-level1.index_partitions: 0 rocksdb.aggregated-table-properties-at-level1.index_size: 1787 rocksdb.aggregated-table-properties-at-level1.num_data_blocks: 81 rocksdb.aggregated-table-properties-at-level1.num_deletions: 0 rocksdb.aggregated-table-properties-at-level1.num_entries: 12466 rocksdb.aggregated-table-properties-at-level1.num_merge_operands: 0 rocksdb.aggregated-table-properties-at-level1.num_range_deletions: 0 rocksdb.aggregated-table-properties-at-level1.raw_key_size: 238210 rocksdb.aggregated-table-properties-at-level1.raw_value_size: 163414 rocksdb.aggregated-table-properties-at-level1.top_level_index_size: 0 $ Pull Request resolved: https://github.com/facebook/rocksdb/pull/7779 Test Plan: Added a test to ldb_test.py Reviewed By: jay-zhuang Differential Revision: D25653103 Pulled By: pdillinger fbshipit-source-id: 2905469a08a64dd6b5510cbd7be2e64d3234d6d3 | 19 December 2020, 16:00:14 UTC |
fbce7a3 | Cheng Chang | 19 December 2020, 05:33:20 UTC | Track WAL obsoletion when updating empty CF's log number (#7781) Summary: In the write path, there is an optimization: when a new WAL is created during SwitchMemtable, we update the internal log number of the empty column families to the new WAL. `FindObsoleteFiles` marks a WAL as obsolete if the WAL's log number is less than `VersionSet::MinLogNumberWithUnflushedData`. After updating the empty column families' internal log number, `VersionSet::MinLogNumberWithUnflushedData` might change, so some WALs might become obsolete to be purged from disk. For example, consider there are 3 column families: 0, 1, 2: 1. initially, all the column families' log number is 1; 2. write some data to cf0, and flush cf0, but the flush is pending; 3. now a new WAL 2 is created; 4. write data to cf1 and WAL 2, now cf0's log number is 1, cf1's log number is 2, cf2's log number is 2 (because cf1 and cf2 are empty, so their log numbers will be set to the highest log number); 5. now cf0's flush hasn't finished, flush cf1, a new WAL 3 is created, and cf1's flush finishes, now cf0's log number is 1, cf1's log number is 3, cf2's log number is 3, since WAL 1 still contains data for the unflushed cf0, no WAL can be deleted from disk; 6. now cf0's flush finishes, cf0's log number is 2 (because when cf0 was switching memtable, WAL 3 does not exist yet), cf1's log number is 3, cf2's log number is 3, so WAL 1 can be purged from disk now, but WAL 2 still cannot because `MinLogNumberToKeep()` is 2; 7. write data to cf2 and WAL 3, because cf0 is empty, its log number is updated to 3, so now cf0's log number is 3, cf1's log number is 3, cf2's log number is 3; 8. now if the background threads want to purge obsolete files from disk, WAL 2 can be purged because `MinLogNumberToKeep()` is 3. But there are only two flush results written to MANIFEST: the first is for flushing cf1, and the `MinLogNumberToKeep` is 1, the second is for flushing cf0, and the `MinLogNumberToKeep` is 2. So without this PR, if the DB crashes at this point and try to recover, `WalSet` will still expect WAL 2 to exist. When WAL tracking is enabled, we assume WALs will only become obsolete after a flush result is written to MANIFEST in `MemtableList::TryInstallMemtableFlushResults` (or its atomic flush counterpart). The above situation breaks this assumption. This PR tracks WAL obsoletion if necessary before updating the empty column families' log numbers. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7781 Test Plan: watch existing tests and stress tests to pass. `make -j48 blackbox_crash_test` on devserver Reviewed By: ltamasi Differential Revision: D25631695 Pulled By: cheng-chang fbshipit-source-id: ca7fff967bdb42204b84226063d909893bc0a4ec | 19 December 2020, 05:34:36 UTC |
62afa96 | Adam Retter | 19 December 2020, 00:11:17 UTC | Fix various small build issues, Java API naming (#7776) Summary: * Compatibility with older GCC. * Compatibility with older jemalloc libraries. * Remove Docker warning when building i686 binaries. * Fix case inconsistency in Java API naming (potential update to HISTORY.md deferred) Pull Request resolved: https://github.com/facebook/rocksdb/pull/7776 Reviewed By: akankshamahajan15 Differential Revision: D25607235 Pulled By: pdillinger fbshipit-source-id: 7ab0fb7fa7a34e97ed0bec991f5081acb095777d | 19 December 2020, 00:12:26 UTC |
75e4af1 | sdong | 18 December 2020, 23:27:40 UTC | Update code comment for options.ttl (#7775) Summary: The behavior of options.ttl has been updated long ago but we didn't update the code comments. Also update the periodic compaction's comment. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7775 Test Plan: See it can still build through CI. Reviewed By: ajkr Differential Revision: D25592015 fbshipit-source-id: b1db18b6787e7048ce6aedcbc3bb44493c9fc49b | 18 December 2020, 23:29:03 UTC |
239d17a | Peter Dillinger | 18 December 2020, 22:29:48 UTC | Support optimize_filters_for_memory for Ribbon filter (#7774) Summary: Primarily this change refactors the optimize_filters_for_memory code for Bloom filters, based on malloc_usable_size, to also work for Ribbon filters. This change also replaces the somewhat slow but general BuiltinFilterBitsBuilder::ApproximateNumEntries with implementation-specific versions for Ribbon (new) and Legacy Bloom (based on a recently deleted version). The reason is to emphasize speed in ApproximateNumEntries rather than 100% accuracy. Justification: ApproximateNumEntries (formerly CalculateNumEntry) is only used by RocksDB for range-partitioned filters, called each time we start to construct one. (In theory, it should be possible to reuse the estimate, but the abstractions provided by FilterPolicy don't really make that workable.) But this is only used as a heuristic estimate for hitting a desired partitioned filter size because of alignment to data blocks, which have various numbers of unique keys or prefixes. The two factors lead us to prioritize reasonable speed over 100% accuracy. optimize_filters_for_memory adds extra complication, because precisely calculating num_entries for some allowed number of bytes depends on state with optimize_filters_for_memory enabled. And the allocator-agnostic implementation of optimize_filters_for_memory, using malloc_usable_size, means we would have to actually allocate memory, many times, just to precisely determine how many entries (keys) could be added and stay below some size budget, for the current state. (In a draft, I got this working, and then realized the balance of speed vs. accuracy was all wrong.) So related to that, I have made CalculateSpace, an internal-only API only used for testing, non-authoritative also if optimize_filters_for_memory is enabled. This simplifies some code. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7774 Test Plan: unit test updated, and for FilterSize test, range of tested values is greatly expanded (still super fast) Also tested `db_bench -benchmarks=fillrandom,stats -bloom_bits=10 -num=1000000 -partition_index_and_filters -format_version=5 [-optimize_filters_for_memory] [-use_ribbon_filter]` with temporary debug output of generated filter sizes. Bloom+optimize_filters_for_memory: 1 Filter size: 197 (224 in memory) 134 Filter size: 3525 (3584 in memory) 107 Filter size: 4037 (4096 in memory) Total on disk: 904,506 Total in memory: 918,752 Ribbon+optimize_filters_for_memory: 1 Filter size: 3061 (3072 in memory) 110 Filter size: 3573 (3584 in memory) 58 Filter size: 4085 (4096 in memory) Total on disk: 633,021 (-30.0%) Total in memory: 634,880 (-30.9%) Bloom (no offm): 1 Filter size: 261 (320 in memory) 1 Filter size: 3333 (3584 in memory) 240 Filter size: 3717 (4096 in memory) Total on disk: 895,674 (-1% on disk vs. +offm; known tolerable overhead of offm) Total in memory: 986,944 (+7.4% vs. +offm) Ribbon (no offm): 1 Filter size: 2949 (3072 in memory) 1 Filter size: 3381 (3584 in memory) 167 Filter size: 3701 (4096 in memory) Total on disk: 624,397 (-30.3% vs. Bloom) Total in memory: 690,688 (-30.0% vs. Bloom) Note that optimize_filters_for_memory is even more effective for Ribbon filter than for cache-local Bloom, because it can close the unused memory gap even tighter than Bloom filter, because of 16 byte increments for Ribbon vs. 64 byte increments for Bloom. Reviewed By: jay-zhuang Differential Revision: D25592970 Pulled By: pdillinger fbshipit-source-id: 606fdaa025bb790d7e9c21601e8ea86e10541912 | 18 December 2020, 22:31:03 UTC |
04b3524 | Zhichao Cao | 17 December 2020, 19:51:04 UTC | Inject the random write error to stress test (#7653) Summary: Inject the random write error to stress test, it requires set reopen=0 and disable_wal=true. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7653 Test Plan: pass db_stress and python3 db_crashtest.py blackbox Reviewed By: ajkr Differential Revision: D25354132 Pulled By: zhichao-cao fbshipit-source-id: 44721104eecb416e27f65f854912c40e301dd669 | 17 December 2020, 19:52:28 UTC |
99f5a80 | Akanksha Mahajan | 17 December 2020, 05:32:00 UTC | Fix clang_analyze error (#7777) Summary: Fix clang_analyze error Pull Request resolved: https://github.com/facebook/rocksdb/pull/7777 Test Plan: USE_CLANG=1 TEST_TMPDIR=/dev/shm/rocksdb OPT=-g make -j64 analyze Reviewed By: jay-zhuang Differential Revision: D25601675 Pulled By: akankshamahajan15 fbshipit-source-id: 30f58cf4d575a2d546c455fb43e856455eb72a07 | 17 December 2020, 05:34:41 UTC |
29d1274 | Adam Retter | 16 December 2020, 23:58:56 UTC | Fix failing RocksJava test compilation and add CI (#7769) Summary: * Fixes a Java test compilation issue on macOS * Cleans up CircleCI RocksDBJava build config * Adds CircleCI for RocksDBJava on MacOS * Ensures backwards compatibility with older macOS via CircleCI * Fixes RocksJava static builds ordering * Adds missing RocksJava static builds to CircleCI for Mac and Linux * Improves parallelism in RocksJava builds * Reduces the size of the machines used for RocksJava CircleCI as they don't need to be so large (Saves credits) Pull Request resolved: https://github.com/facebook/rocksdb/pull/7769 Reviewed By: akankshamahajan15 Differential Revision: D25601293 Pulled By: pdillinger fbshipit-source-id: 0a0bb9906f65438fe143487d78e37e1947364d08 | 17 December 2020, 00:00:02 UTC |
2021392 | Burton Li | 15 December 2020, 21:50:07 UTC | Do not full scan obsolete files on compaction busy (#7739) Summary: When ConcurrentTaskLimiter is enabled and there are too many outstanding compactions, BackgroundCompaction returns Status::Busy(), which shouldn't be treat as compaction failure. This caused performance issue when outstanding compactions reached the limit. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7739 Reviewed By: cheng-chang Differential Revision: D25508319 Pulled By: ltamasi fbshipit-source-id: 3b181b16ada0ca3393cfa3a7412985764e79c719 | 15 December 2020, 21:51:10 UTC |
a0e4421 | Jay Zhuang | 14 December 2020, 22:06:04 UTC | Log sst number in Corruption status (#7767) Summary: sst file number in corruption error would be very useful for debugging Pull Request resolved: https://github.com/facebook/rocksdb/pull/7767 Reviewed By: zhichao-cao Differential Revision: D25485872 Pulled By: jay-zhuang fbshipit-source-id: 67315b582cedeefbce6676015303ebe5bf6526a3 | 14 December 2020, 22:07:52 UTC |
1afbd19 | Levi Tamasi | 14 December 2020, 21:47:17 UTC | Add initial blob support to batched MultiGet (#7766) Summary: The patch adds initial support for reading blobs to the batched `MultiGet` API. The current implementation simply retrieves the blob values as the blob indexes are encountered; that is, reads from blob files are currently not batched. (This will be optimized in a separate phase.) In addition, the patch removes some dead code related to BlobDB from the batched `MultiGet` implementation, namely the `is_blob` / `is_blob_index` flags that are passed around in `DBImpl` and `MemTable` / `MemTableListVersion`. These were never hooked up to anything and wouldn't work anyways, since a single flag is not sufficient to communicate the "blobness" of multiple key-values. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7766 Test Plan: `make check` Reviewed By: jay-zhuang Differential Revision: D25479290 Pulled By: ltamasi fbshipit-source-id: 7aba2d290e31876ee592bcf1adfd1018713a8000 | 14 December 2020, 21:48:22 UTC |
003e72b | Peter Dillinger | 12 December 2020, 06:17:08 UTC | Use size_t for filter APIs, protect against overflow (#7726) Summary: Deprecate CalculateNumEntry and replace with ApproximateNumEntries (better name) using size_t instead of int and uint32_t, to minimize confusing casts and bad overflow behavior (possible though probably not realistic). Bloom sizes are now explicitly capped at max size supported by implementations: just under 4GiB for fv=5 Bloom, and just under 512MiB for fv<5 Legacy Bloom. This hardening could help to set up for fuzzing. Also, since RocksDB only uses this information as an approximation for trying to hit certain sizes for partitioned filters, it's more important that the function be reasonably fast than for it to be completely accurate. It's hard enough to be 100% accurate for Ribbon (currently reversing CalculateSpace) that adding optimize_filters_for_memory into the mix is just not worth trying to be 100% accurate for num entries for bytes. Also: - Cleaned up filter_policy.h to remove MSVC warning handling and potentially unsafe use of exception for "not implemented" - Correct the number of entries limit beyond which current Ribbon implementation falls back on Bloom instead. - Consistently use "num_entries" rather than "num_entry" - Remove LegacyBloomBitsBuilder::CalculateNumEntry as it's essentially obsolete from general implementation BuiltinFilterBitsBuilder::CalculateNumEntries. - Fix filter_bench to skip some tests that don't make sense when only one or a small number of filters has been generated. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7726 Test Plan: expanded existing unit tests for CalculateSpace / ApproximateNumEntries. Also manually used filter_bench to verify Legacy and fv=5 Bloom size caps work (much too expensive for unit test). Note that the actual bits per key is below requested due to space cap. $ ./filter_bench -impl=0 -bits_per_key=20 -average_keys_per_filter=256000000 -vary_key_count_ratio=0 -m_keys_total_max=256 -allow_bad_fp_rate ... Total size (MB): 511.992 Bits/key stored: 16.777 ... $ ./filter_bench -impl=2 -bits_per_key=20 -average_keys_per_filter=2000000000 -vary_key_count_ratio=0 -m_keys_total_max=2000 ... Total size (MB): 4096 Bits/key stored: 17.1799 ... $ Reviewed By: jay-zhuang Differential Revision: D25239800 Pulled By: pdillinger fbshipit-source-id: f94e6d065efd31e05ec630ae1a82e6400d8390c4 | 12 December 2020, 06:18:12 UTC |
4917795 | Cheng Chang | 12 December 2020, 00:07:48 UTC | Update SstFileWriter fuzzer to iterate and check all key-value pairs (#7761) Summary: as title Pull Request resolved: https://github.com/facebook/rocksdb/pull/7761 Test Plan: cd fuzz && make sst_file_writer_fuzzer && ./sst_file_writer_fuzzer Reviewed By: pdillinger Differential Revision: D25430802 Pulled By: cheng-chang fbshipit-source-id: 01436307df6f4c434bb608f44e1c8e4a1119f94f | 12 December 2020, 00:09:10 UTC |
b1ee191 | Peter Dillinger | 11 December 2020, 19:17:11 UTC | Fix memory leak for ColumnFamily drop with live iterator (#7749) Summary: Uncommon bug seen by ASAN with ColumnFamilyTest.LiveIteratorWithDroppedColumnFamily, if the last two references to a ColumnFamilyData are both SuperVersions (during InstallSuperVersion). The fix is to use UnrefAndTryDelete even in SuperVersion::Cleanup but with a parameter to avoid re-entering Cleanup on the same SuperVersion being cleaned up. ColumnFamilyData::Unref is considered unsafe so removed. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7749 Test Plan: ./column_family_test --gtest_filter=*LiveIter* --gtest_repeat=100 Reviewed By: jay-zhuang Differential Revision: D25354304 Pulled By: pdillinger fbshipit-source-id: e78f3a3f67c40013b8432f31d0da8bec55c5321c | 11 December 2020, 19:18:21 UTC |
07c0fc0 | Jay Zhuang | 11 December 2020, 04:36:44 UTC | Fix pyenv-version-name not found issue (#7768) Summary: Remove the workaround for that. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7768 Reviewed By: cheng-chang Differential Revision: D25487150 Pulled By: jay-zhuang fbshipit-source-id: 4514dfc64eb56d2dad48f9a24176c21af8107def | 11 December 2020, 04:37:51 UTC |
fd7d8dc | Cheng Chang | 10 December 2020, 20:54:34 UTC | Do not log unnecessary WAL obsoletion events (#7765) Summary: min_wal_number_to_keep should not be decreasing, if it does not increase, then there is no need to log the WAL obsoletions in MANIFEST since a previous one has been logged. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7765 Test Plan: watch existing tests and stress tests to pass Reviewed By: pdillinger Differential Revision: D25462542 Pulled By: cheng-chang fbshipit-source-id: 0085fcb6edf5cf2b0fc32f9932a7566f508768ff | 10 December 2020, 20:55:49 UTC |
40f2b65 | Azat Khuzhin | 10 December 2020, 17:33:53 UTC | Eliminate possible race between LockFile() vs UnlockFile() (#7721) Summary: LockFile() accessing LockHoldingInfo (element of locked_files) by reference after mutex_locked_files had been released. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7721 Reviewed By: pdillinger Differential Revision: D25431839 Pulled By: jay-zhuang fbshipit-source-id: eefee93f12a8016a98e2466e442af2605b3e2a5e | 10 December 2020, 17:35:11 UTC |
8ff6557 | Adam Retter | 10 December 2020, 05:19:55 UTC | Add further tests to ASSERT_STATUS_CHECKED (2) (#7698) Summary: Second batch of adding more tests to ASSERT_STATUS_CHECKED. * external_sst_file_basic_test * checkpoint_test * db_wal_test * db_block_cache_test * db_logical_block_size_cache_test * db_blob_index_test * optimistic_transaction_test * transaction_test * point_lock_manager_test * write_prepared_transaction_test * write_unprepared_transaction_test Pull Request resolved: https://github.com/facebook/rocksdb/pull/7698 Reviewed By: cheng-chang Differential Revision: D25441664 Pulled By: pdillinger fbshipit-source-id: 9e78867f32321db5d4833e95eb96c5734526ef00 | 10 December 2020, 05:21:16 UTC |
8e2749f | Michael Lee | 10 December 2020, 03:23:06 UTC | Fix use of positional args in BUCK rules (#7760) Summary: Prefer to use keyword args rather than positional args for Buck rules. This appears to be the only remaining instance for `custom_unittest` Pull Request resolved: https://github.com/facebook/rocksdb/pull/7760 Test Plan: Search for other instances of `custom_unittest` without `name` Reviewed By: cheng-chang Differential Revision: D25439887 Pulled By: mzlee fbshipit-source-id: 518c541a5c01207c7b0c1f7322addf5cc4f09f92 | 10 December 2020, 03:25:31 UTC |
7123990 | Manuel Ung | 10 December 2020, 03:12:10 UTC | Invalidate iterator on transaction clear (#7733) Summary: Some clients do not close their iterators until after the transaction finishes. To handle this case, we will invalidate any iterators on transaction clear. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7733 Reviewed By: cheng-chang Differential Revision: D25261158 Pulled By: lth fbshipit-source-id: b91320f00c54cbe0e6882b794b34f3bb5640dbc0 | 10 December 2020, 03:13:22 UTC |
80159f6 | Cheng Chang | 10 December 2020, 03:05:14 UTC | Carry over min_log_number_to_keep_2pc in new MANIFEST (#7747) Summary: When two phase commit is enabled, `VersionSet::min_log_number_to_keep_2pc` is set during flush. But when a new MANIFEST is created, the `min_log_number_to_keep_2pc` is not carried over to the new MANIFEST. So if a new MANIFEST is created and then DB is reopened, the `min_log_number_to_keep_2pc` will be lost. This may cause DB recovery errors. The bug is reproduced in a new unit test in `version_set_test.cc`. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7747 Test Plan: The new unit test in `version_set_test.cc` should pass. Reviewed By: jay-zhuang Differential Revision: D25350661 Pulled By: cheng-chang fbshipit-source-id: eee890d5b19f15769069670692e270ae31044ece | 10 December 2020, 03:07:25 UTC |
8a1488e | anand76 | 10 December 2020, 00:57:54 UTC | Ensure that MultiGet works properly with compressed cache (#7756) Summary: Ensure that when direct IO is enabled and a compressed block cache is configured, MultiGet inserts compressed data blocks into the compressed block cache. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7756 Test Plan: Add unit test to db_basic_test Reviewed By: cheng-chang Differential Revision: D25416240 Pulled By: anand1976 fbshipit-source-id: 75d57526370c9c0a45ff72651f3278dbd8a9086f | 10 December 2020, 01:01:13 UTC |
3c2a448 | Cheng Chang | 10 December 2020, 00:56:26 UTC | Add a test for disabling tracking WAL (#7757) Summary: If WAL tracking was enabled, then disabled during reopen, the previously tracked WALs should be removed from MANIFEST. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7757 Test Plan: a new unit test `DBBasicTest.DisableTrackWal` is added. Reviewed By: jay-zhuang Differential Revision: D25410508 Pulled By: cheng-chang fbshipit-source-id: 9d8d9e665066135930a7c1035bb8c2f68bded6a0 | 10 December 2020, 00:58:26 UTC |
89cc06b | Cheng Chang | 10 December 2020, 00:23:52 UTC | Add a new db_map_fuzzer (#7762) Summary: Execute randomly generated operations on both a DB and a std::map, then reopen the DB and make sure that iterating the DB produces the same key-value pairs as iterating through the std::map. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7762 Test Plan: cd fuzz && make db_map_fuzzer && ./db_map_fuzzer Reviewed By: pdillinger Differential Revision: D25437485 Pulled By: cheng-chang fbshipit-source-id: 3a93f7efd046b194193e45d2ab1ad81565510781 | 10 December 2020, 00:26:35 UTC |
efe827b | Cheng Chang | 10 December 2020, 00:01:07 UTC | Always track WAL obsoletion (#7759) Summary: Currently, when a WAL becomes obsolete after flushing, if VersionSet::WalSet does not contain the WAL, we do not track the WAL obsoletion event in MANIFEST. But consider this case: * WAL 10 is synced, a VersionEdit is LogAndApplied to MANIFEST to log this WAL addition event, but the VersionEdit is not applied to WalSet yet since its corresponding ManifestWriter is still pending in the write queue; * Since the above ManifestWriter is blocking, the LogAndApply will block on a conditional variable and release the db mutex, so another LogAndApply can proceed to enqueue other VersionEdits concurrently; * Now flush happens, and WAL 10 becomes obsolete, although WalSet does not contain WAL 10 yet, we should call LogAndApply to enqueue a VersionEdit to indicate the obsoletion of WAL 10; * otherwise, when the queued edit indicating WAL 10 addition is logged to MANIFEST, and DB crashes and reopens, the WAL 10 might have been removed from disk, but it still exists in MANIFEST. This PR changes the behavior to: always `LogAndApply` any WAL addition or obsoletion event, without considering the order issues caused by concurrency, but when applying the edits to `WalSet`, do not add the WALs if they are already obsolete. In this approach, the logical events of WAL addition and obsoletion are always tracked in MANIFEST, so we can inspect the MANIFEST and know all the previous WAL events, but we choose to ignore certain events due to the concurrency issues such as the case above, or the case in https://github.com/facebook/rocksdb/pull/7725. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7759 Test Plan: make check Reviewed By: pdillinger Differential Revision: D25423089 Pulled By: cheng-chang fbshipit-source-id: 9cb9a7fbc1875bf954f2a42f9b6cfd6d49a7b21c | 10 December 2020, 00:02:12 UTC |
98236fb | Sergei Petrunia | 09 December 2020, 20:09:46 UTC | LockTree library, originally from PerconaFT (#7753) Summary: To be used for implementing Range Locking. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7753 Reviewed By: zhichao-cao Differential Revision: D25378980 Pulled By: cheng-chang fbshipit-source-id: 801a9c5cd92a84654ca2586b73e8f69001e89320 | 09 December 2020, 20:10:57 UTC |
7b2216c | Adam Retter | 08 December 2020, 23:53:59 UTC | Add further tests to ASSERT_STATUS_CHECKED (1) (#7679) Summary: First batch of adding more tests to ASSERT_STATUS_CHECKED. * db_iterator_test * db_memtable_test * db_merge_operator_test * db_merge_operand_test * write_batch_test * write_batch_with_index_test Pull Request resolved: https://github.com/facebook/rocksdb/pull/7679 Reviewed By: ajkr Differential Revision: D25399270 Pulled By: pdillinger fbshipit-source-id: 3017d0a686aec5cd2d743fc2acbbf75df239f3ba | 08 December 2020, 23:55:04 UTC |
66e54c5 | pkubaj | 08 December 2020, 23:30:44 UTC | Fix build on FreeBSD/powerpc64(le) (#7732) Summary: To build on FreeBSD, arch_ppc_probe needs to be adapted to FreeBSD. Since FreeBSD uses elf_aux_info as an getauxval equivalent, use it and include necessary headers: - machine/cpu.h for PPC_FEATURE2_HAS_VEC_CRYPTO, - sys/auxv.h for elf_aux_info, - sys/elf_common.h for AT_HWCAP2. elf_aux_info isn't checked for being available, because it's available since FreeBSD 12.0. rocksdb assumes using Clang on FreeBSD, but powerpc* platforms switch to Clang only since 13.0. This patch makes rocksdb build on FreeBSD on powerpc64 and powerpc64le platforms. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7732 Reviewed By: ltamasi Differential Revision: D25399194 Pulled By: pdillinger fbshipit-source-id: 9c905147d75f98cd2557dd2f86a940b8e6c5afcd | 08 December 2020, 23:31:56 UTC |
93c6c18 | Vincent Milum Jr | 08 December 2020, 21:32:04 UTC | Adding ARM AT_HWCAP support for FreeBSD (#7750) Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/7750 Reviewed By: ltamasi Differential Revision: D25400609 Pulled By: pdillinger fbshipit-source-id: 13b15e2f490acc011b648fbd9615ea8e580cccc7 | 08 December 2020, 21:33:21 UTC |
07030c6 | Cheng Chang | 08 December 2020, 18:56:50 UTC | Do not track obsolete WALs in MANIFEST even if they are synced (#7725) Summary: Consider the case: 1. All column families are flushed, so all WALs become obsolete, but no WAL is removed from disk yet because the removal is asynchronous, a VersionEdit is written to MANIFEST indicating that WALs before a certain WAL number are obsolete, let's say this number is 3; 2. `SyncWAL` is called, so all the on-disk WALs are synced, and if track_and_verify_wal_in_manifest=true, the WALs will be tracked in MANIFEST, let's say the WAL numbers are 1 and 2; 3. DB crashes; 4. During DB recovery, when replaying MANIFEST, we first see that WAL with number < 3 are obsolete, then we see that WAL 1 and 2 are synced, so according to current implementation of `WalSet`, the `WalSet` will be recovered to include WAL 1 and 2; 5. WAL 1 and 2 are asynchronously deleted from disk, then the WAL verification algorithm fails with `Corruption: missing WAL`. The above case is reproduced in a new unit test `DBBasicTestTrackWal::DoNotTrackObsoleteWal`. The fix is to maintain the upper bound of the obsolete WAL numbers, any WAL with number less than the maintained number is considered to be obsolete, so shouldn't be tracked even if they are later synced. The number is maintained in `WalSet`. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7725 Test Plan: 1. a new unit test `DBBasicTestTrackWal::DoNotTrackObsoleteWal` is added. 2. run `make crash_test` on devserver. Reviewed By: riversand963 Differential Revision: D25238914 Pulled By: cheng-chang fbshipit-source-id: f5dccd57c3d89f19565ec5731f2d42f06d272b72 | 08 December 2020, 18:58:04 UTC |
11c4be2 | Yanqin Jin | 08 December 2020, 10:36:18 UTC | Refactor ProcessManifestWrites a little bit (#7751) Summary: This PR removes a nested loop inside ProcessManifestWrites. The new implementation has the same behavior as the old code with simpler logic and lower complexity. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7751 Test Plan: make check Run make crash_test on devserver and succeeds 3 times. Reviewed By: ltamasi Differential Revision: D25363526 Pulled By: riversand963 fbshipit-source-id: 27e681949dacd7501a752e5e517b9e85b54ccb2e | 08 December 2020, 10:37:38 UTC |
d8bd9fc | Sergei Petrunia | 08 December 2020, 04:16:18 UTC | Range Locking: Allow different LockManagers, add Range Lock definitions (#7443) Summary: This PR has two commits: 1. Modify the code to allow different Lock Managers (of any kind) to be used. It is implied that a LockManager uses its own custom LockTracker. 2. Add definitions for Range Locking (class Endpoint and GetRangeLock() function. cheng-chang, is this what you've had in mind (should the PR have both item 1 and item 2?) Pull Request resolved: https://github.com/facebook/rocksdb/pull/7443 Reviewed By: zhichao-cao Differential Revision: D24123172 Pulled By: cheng-chang fbshipit-source-id: c6548ad6d4cc3c25f68d13b29147bc6fdf357185 | 08 December 2020, 04:18:07 UTC |
db03172 | mrambacher | 08 December 2020, 04:09:55 UTC | Change ErrorHandler methods to return const Status& (#7539) Summary: This change eliminates the need for a lot of the PermitUncheckedError calls on return from ErrorHandler methods. The calls are no longer needed as the status is returned as a reference rather than a copy. Additionally, this means that the originating status (recovery_error_, bg_error_) is not cleared implicitly as a result of calling one of these methods. For this class, I do not know if the proper behavior should be to call PermitUncheckedError in the destructor or if the checked state should be cleared when the status is cleared. I did tests both ways. Without the code in the destructor, the status will need to be cleared in at least some of the places where it is set to OK. When running tests, I found no instances where this class was destructed with a non-OK, non-checked Status. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7539 Reviewed By: anand1976 Differential Revision: D25340565 Pulled By: pdillinger fbshipit-source-id: 1730c035c81a475875ea745226112030ec25136c | 08 December 2020, 04:11:35 UTC |
8a06fe2 | Levi Tamasi | 08 December 2020, 01:35:54 UTC | Do not use ASSERT_OK in child threads in ExternalSstFileTest.PickedLevelBug (#7754) Summary: `googletest` uses exceptions to communicate assertion failures when `GTEST_THROW_ON_FAILURE` is set, which does not go well with `std::thread`s, since an exception escaping the top-level function of an `std::thread` object or an `std::thread` getting destroyed without having been `join`ed or `detach`ed first results in a call to `std::terminate`. The patch fixes this by moving the `Status` assertions of background operations in `ExternalSstFileTest.PickedLevelBug` to the main thread. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7754 Test Plan: `make check` Reviewed By: riversand963 Differential Revision: D25383808 Pulled By: ltamasi fbshipit-source-id: 32fb2721e5169ec898d218900bc0d83eead45d03 | 08 December 2020, 01:37:17 UTC |
ba2a3bf | davkor | 07 December 2020, 22:01:05 UTC | OSS-Fuzz integration and db_fuzzer (#7674) Summary: This PR adds a fuzzer to the project and infrastructure to integrate Rocksdb with OSS-Fuzz. OSS-Fuzz is a service run by Google that performs continuous fuzzing of important open source projects. The LevelDB project is also in being fuzzed by OSS-Fuzz (https://github.com/google/oss-fuzz/tree/master/projects/leveldb). Essentially, OSS-Fuzz will perform the fuzzing for you and email you bug reports, coverage reports etc. All we need is a set of email addresses that will receive this information. For cross-referencing, the PR that adds the OSS-Fuzz logic is here: https://github.com/google/oss-fuzz/pull/4642 The `db_fuzzer` of the PR performs stateful fuzzing of Rocksdb by calling a sequence of Rockdb's APIs with random input in each fuzz iteration. Each fuzz iteration, thus, creates a new instance of Rocksdb and operates on this given instance. The goal is to test diverse states of Rocksdb and ensure no state lead to error conditions, e.g. memory corruption vulnerabilities. The fuzzer is similar (although more complex) to the fuzzer that is currently being used to analyse Leveldb (https://github.com/google/oss-fuzz/blob/master/projects/leveldb/fuzz_db.cc) Pull Request resolved: https://github.com/facebook/rocksdb/pull/7674 Reviewed By: pdillinger Differential Revision: D25238536 Pulled By: cheng-chang fbshipit-source-id: 610331c49a77eb68d3b1d7d5ef1b0ce230ac0630 | 07 December 2020, 22:02:20 UTC |
20c7d7c | Akanksha Mahajan | 07 December 2020, 21:42:27 UTC | Handling misuse of snprintf return value (#7686) Summary: Handle misuse of snprintf return value to avoid Out of bound read/write. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7686 Test Plan: make check -j64 Reviewed By: riversand963 Differential Revision: D25030831 Pulled By: akankshamahajan15 fbshipit-source-id: 1a1d181c067c78b94d720323ae00b79566b57cfa | 07 December 2020, 21:43:55 UTC |
b77569f | Neil Mitchell | 07 December 2020, 18:26:20 UTC | Make the TARGETS file Starlark compliant (#7743) Summary: Buck TARGETS files are sometimes parsed with Python, and sometimes with Starlark - this TARGETS file was not Starlark compliant. In Starlark you can't have a top-level if in a TARGETS file, but you can have a ternary `a if b else c`. Therefore I converted TARGETS, and updated the generator for it. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7743 Reviewed By: pdillinger Differential Revision: D25342587 Pulled By: ndmitchell fbshipit-source-id: 88cbe8632071a45a3ea8675812967614c62c78d1 | 07 December 2020, 18:28:26 UTC |
1df8584 | Akanksha Mahajan | 07 December 2020, 18:23:17 UTC | Fix unit test failure ppc64le in travis (#7752) Summary: Added a fix for the failure of DBTest2.PartitionedIndexUserToInternalKey on ppc64le in travis Closes https://github.com/facebook/rocksdb/issues/7746 Pull Request resolved: https://github.com/facebook/rocksdb/pull/7752 Test Plan: Ran travis job multiple times and it passed. Will keep watching the travis job after this patch. Reviewed By: pdillinger Differential Revision: D25373130 Pulled By: akankshamahajan15 fbshipit-source-id: fa0e3f85f75b687415044a506e42cc38ead87975 | 07 December 2020, 18:24:33 UTC |
eee0af9 | Yanqin Jin | 05 December 2020, 22:17:11 UTC | Add full_history_ts_low to column family (#7740) Summary: Following https://github.com/facebook/rocksdb/issues/7655 and https://github.com/facebook/rocksdb/issues/7657, this PR adds `full_history_ts_low_` to `ColumnFamilyData`. `ColumnFamilyData::full_history_ts_low_` will be used to create `FlushJob` and `CompactionJob`. `ColumnFamilyData::full_history_ts_low` is persisted to the MANIFEST file. An application can only increase its value. Consider the following case: > > The database has a key at ts=950. `full_history_ts_low` is first set to 1000, and then a GC is triggered > and cleans up all data older than 1000. If the application sets `full_history_ts_low` to 900 afterwards, > and tries to read at ts=960, the key at 950 is not seen. From the perspective of the read, the result > is hard to reason. For simplicity, we just do now allow decreasing full_history_ts_low for now. > During recovery, the value of `full_history_ts_low` is restored for each column family if applicable. Note that version edits in the MANIFEST file for the same column family may have `full_history_ts_low` unsorted due to the potential interleaving of `LogAndApply` calls. Only the max will be used to restore the state of the column family. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7740 Test Plan: make check Reviewed By: ltamasi Differential Revision: D25296217 Pulled By: riversand963 fbshipit-source-id: 24acda1df8262cd7cfdc6ce7b0ec56438abe242a | 05 December 2020, 22:18:22 UTC |
e34b2e9 | Peter Dillinger | 05 December 2020, 07:22:18 UTC | Migrate away from broken macos on Travis (#7745) Summary: Add macos+cmake build on CircleCI instead. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7745 Test Plan: CI Reviewed By: riversand963 Differential Revision: D25352864 Pulled By: pdillinger fbshipit-source-id: 6b0a328cbe715bc3b43d70e919a27c834edcf079 | 05 December 2020, 07:24:27 UTC |
61932cd | Levi Tamasi | 05 December 2020, 05:28:26 UTC | Add blob support to DBIter (#7731) Summary: The patch adds iterator support to the integrated BlobDB implementation. Whenever a blob reference is encountered during iteration, the corresponding blob is retrieved by calling `Version::GetBlob`, assuming the `expose_blob_index` (formerly `allow_blob`) flag is *not* set. (Note: the flag is set by the old stacked BlobDB implementation, which has its own blob file handling/blob retrieval logic.) In addition, `DBIter` now uniformly returns `Status::NotSupported` with the error message `"BlobDB does not support merge operator."` when encountering a blob reference while performing a merge (instead of potentially returning a message that implies the database should be opened using the stacked BlobDB's `Open`.) TODO: We can implement support for lazily retrieving the blob value (or in other words, bypassing the retrieval of blob values based on key) by extending the `Iterator` API with a new `PrepareValue` method (similarly to `InternalIterator`, which already supports lazy values). Pull Request resolved: https://github.com/facebook/rocksdb/pull/7731 Test Plan: `make check` Reviewed By: riversand963 Differential Revision: D25256293 Pulled By: ltamasi fbshipit-source-id: c39cd782011495a526cdff99c16f5fca400c4811 | 05 December 2020, 05:29:38 UTC |
e102de7 | Zhichao Cao | 05 December 2020, 04:30:23 UTC | Fix assert(cfd->imm()->NumNotFlushed() > 0) in FlushMemtable (#7744) Summary: In current code base, in FlushMemtable, when `(Flush_reason == FlushReason::kErrorRecoveryRetryFlush && (!cfd->mem()->IsEmpty() || !cached_recoverable_state_empty_.load()))`, we assert that cfd->imm()->NumNotFlushed() > 0. However, there are some corner cases that can fail this assert: 1) if there are multiple CFs, some CF has immutable memtable, some CFs don't. In ResumeImpl, all CFs will call FlushMemtable, which will hit the assert. 2) Regular flush is scheduled and running, the resume thread is waiting. New KVs are inserted and SchedulePendingFlush is called. Regular flush will continue call MaybeScheduleFlushAndCompaction until all the immutable memtables are flushed. When regular flush ends and auto resume thread starts to schedule new flushes, cfd->imm()->NumNotFlushed() can be 0. Remove the assert and added the comments. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7744 Test Plan: make check and pass the stress test Reviewed By: riversand963 Differential Revision: D25340573 Pulled By: zhichao-cao fbshipit-source-id: eac357bdace660247c197f01a9ff6857e3c97672 | 05 December 2020, 04:31:39 UTC |
ee4bd47 | Adam Retter | 04 December 2020, 23:21:18 UTC | Fix compilation on Apple Silicon (#7714) Summary: Closes - https://github.com/facebook/rocksdb/issues/7710 I tested this on an Apple DTK (Developer Transition Kit) with an Apple A12Z Bionic CPU and macOS Big Sur (11.0.1). Previously the arm64 specific CRC optimisations were limited to Linux only OS... Well now Apple Silicon is also arm64 but runs macOS ;-) Pull Request resolved: https://github.com/facebook/rocksdb/pull/7714 Reviewed By: ltamasi Differential Revision: D25287349 Pulled By: pdillinger fbshipit-source-id: 639b168bf0ac2652907531e9604936ac4974b577 | 04 December 2020, 23:22:33 UTC |
eb5a8c0 | Zhichao Cao | 04 December 2020, 22:57:29 UTC | Fix the thread wait case in error_handler (#7700) Summary: In error_handler auto recovery case, if recovery_in_prog_ is false, the recover is finished or failed. In this case, the auto recovery thread should finish its execution so recovery_thread_ should be null. However, in some cases, it is not null, the caller should not directly returned. Instead, it should wait for a while and create a new thread to execute the new recovery. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7700 Test Plan: make check, error_handler_fs_test Reviewed By: anand1976 Differential Revision: D25098233 Pulled By: zhichao-cao fbshipit-source-id: 5a1cba234ca18f6dd5d1be88e02d66e1d5ce931b | 04 December 2020, 22:58:37 UTC |
70f2e09 | Cheng Chang | 04 December 2020, 03:21:08 UTC | Write min_log_number_to_keep to MANIFEST during atomic flush under 2 phase commit (#7570) Summary: When 2 phase commit is enabled, if there are prepared data in a WAL, the WAL should be kept, the minimum log number for such a WAL is written to MANIFEST during flush. In atomic flush, such information is not written to MANIFEST. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7570 Test Plan: Added a new unit test `DBAtomicFlushTest.ManualFlushUnder2PC`, this test fails in atomic flush without this PR, after this PR, it succeeds. Reviewed By: riversand963 Differential Revision: D24394222 Pulled By: cheng-chang fbshipit-source-id: 60ce74b21b704804943be40c8de01b41269cf116 | 04 December 2020, 03:22:24 UTC |
ac2f90d | Ramkumar Vadivelu | 03 December 2020, 20:44:07 UTC | add 6.15.fb to check_format_compatible.sh (#7738) Summary: Update check_format_compatible.sh with 6.15.fb Pull Request resolved: https://github.com/facebook/rocksdb/pull/7738 Reviewed By: ajkr Differential Revision: D25307717 Pulled By: ramvadiv fbshipit-source-id: 49f5c6366e8c8a2ade9697975453c9c65e919f1b | 03 December 2020, 20:45:14 UTC |
29e8f6a | Zhichao Cao | 03 December 2020, 02:22:05 UTC | Add kManifestWriteNoWAL to BackgroundErrorReason to handle Flush IO Error when WAL is disabled (#7693) Summary: In the current code base, all the manifest writes with IO error will be set with reason: BackgroundErrorReason::kManifestWrite, which will be mapped to the kHardError if the IO Error is retryable. However, if the system does not use the WAL, all the retryable IO error should be mapped to kSoftError. Create this PR to handle is special case by adding kManifestWriteNoWAL to BackgroundErrorReason. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7693 Test Plan: make check, add new testing cases to error_handler_fs_test Reviewed By: anand1976 Differential Revision: D25066204 Pulled By: zhichao-cao fbshipit-source-id: d59553896c2eac3fb37c05238544d2b265379462 | 03 December 2020, 02:24:01 UTC |
3b9bfe8 | Peter Dillinger | 02 December 2020, 23:15:17 UTC | Skip minimum rate check in Sandcastle (#7728) Summary: The minimum rate check in RateLimiterTest.Rate can fail in Facebook's CI system Sandcastle, presumably due to heavily loaded machines. This change disables the minimum rate check for Sandcastle runs, and cleans up the code disabling it on other CI environments. (The amount of conditionally compiled code shall be minimized.) Pull Request resolved: https://github.com/facebook/rocksdb/pull/7728 Test Plan: try new test with and without setting envvar SANDCASTLE=1 Reviewed By: ltamasi Differential Revision: D25247642 Pulled By: pdillinger fbshipit-source-id: d786233af37af9a874adbb3a9e2707ec52c27a5a | 02 December 2020, 23:16:49 UTC |
7fec715 | Jay Zhuang | 02 December 2020, 20:59:23 UTC | Make CompactRange and GetApproximateSizes work with timestamp (#7684) Summary: Add timestamp to the `CompactRange()` and `GetApproximateSizes` range keys if needed. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7684 Test Plan: make check Reviewed By: riversand963 Differential Revision: D25015421 Pulled By: jay-zhuang fbshipit-source-id: 51ca0756087eb053a3b11801e5c7ce1c6e2d38a9 | 02 December 2020, 21:00:53 UTC |
e062a71 | Yanqin Jin | 02 December 2020, 17:29:50 UTC | Fix assertion failure in bg flush (#7362) Summary: https://github.com/facebook/rocksdb/issues/7340 reports and reproduces an assertion failure caused by a combination of the following: - atomic flush is disabled. - a column family can appear multiple times in the flush queue at the same time. This behavior was introduced in release 5.17. Consequently, it is possible that two flushes race with each other. One bg flush thread flushes all memtables. The other thread calls `FlushMemTableToOutputFile()` afterwards, and hits the assertion error below. ``` assert(cfd->imm()->NumNotFlushed() != 0); assert(cfd->imm()->IsFlushPending()); ``` Fix this by reverting the behavior. In non-atomic-flush case, a column family can appear in the flush queue at most once at the same time. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7362 Test Plan: make check Also run stress test successfully for 10 times. ``` make crash_test ``` Reviewed By: ajkr Differential Revision: D25172996 Pulled By: riversand963 fbshipit-source-id: f1559b6366cc609e961e3fc83fae548f1fad08ce | 02 December 2020, 17:31:14 UTC |
9e16404 | Jay Zhuang | 01 December 2020, 22:05:19 UTC | Exclude timestamp from prefix extractor (#7668) Summary: Timestamp should not be included in prefix extractor, as we discussed here: https://github.com/facebook/rocksdb/pull/7589#discussion_r511068586 Pull Request resolved: https://github.com/facebook/rocksdb/pull/7668 Test Plan: added unittest Reviewed By: riversand963 Differential Revision: D24966265 Pulled By: jay-zhuang fbshipit-source-id: 0dae618c333d4b7942a40d556535a1795e060aea | 01 December 2020, 22:07:15 UTC |
b937be3 | Adam Retter | 01 December 2020, 19:20:10 UTC | Fix Compilation on ppc64le using Clang 11 (#7713) Summary: Closes https://github.com/facebook/rocksdb/issues/7691 The optimised CRC code for PPC64le which was originally imported in https://github.com/facebook/rocksdb/pull/2353 is not compatible with Clang 11. It looks like the code most likely originated from https://github.com/antonblanchard/crc32-vpmsum. The code relied on a GCC header file `ppc-asm.h` which is not available in Clang. To solve this, I have taken the same approach as the the upstream project from which the CRC code came https://github.com/antonblanchard/crc32-vpmsum/commit/ffc8018efc1e4f05d22a9fc8dde57109dd09368b#diff-ec3e62c56fbcddeb07230f2a4673c1abd7f0f1cc8e48a2aa560056cfc1b25d60 and simply imported a copy of the GCC header file into our code-base which will be used when Clang is the compiler on pcc64le. **NOTE**: The new file `util/ppc-asm.h` may have licensing implications which I guess need to be approved by RocksDB/Facebook before this is merged Pull Request resolved: https://github.com/facebook/rocksdb/pull/7713 Reviewed By: jay-zhuang Differential Revision: D25222645 Pulled By: pdillinger fbshipit-source-id: e3fec9136f26ce1eb7a027048bcf77a6cb3c769c | 01 December 2020, 19:21:44 UTC |
0b06af9 | Peter Dillinger | 01 December 2020, 18:15:36 UTC | Warn about practically unfixable TSAN warnings in stack trace (#7723) Summary: TSAN reports that our stack trace handler makes unsafe calls during a signal handler. I just tried fixing some of them and I don't think it's fixable unless we can get away from using FILE stdio. Even if we can use lower level functions only, I'm not sure it's fixed. I also tried suppressing the reports with function and file level TSAN suppression, but that doesn't seem to work, perhaps because the violation is reported on the callee, not the caller. So I added a warning to be printed whenever these violations would be reported that they are practically ignorable. Internal ref: T77844138 Pull Request resolved: https://github.com/facebook/rocksdb/pull/7723 Test Plan: run external_sst_file_test with seeded abort(), with TSAN (TSAN warnings + new warning) and without TSAN (no warning, just stack trace). Reviewed By: akankshamahajan15 Differential Revision: D25228011 Pulled By: pdillinger fbshipit-source-id: 3eda1d6e7ca3cdc64076cf99ae954168837d2818 | 01 December 2020, 18:17:56 UTC |
eb65d67 | Andrew Kryczka | 01 December 2020, 02:10:18 UTC | Fix kPointInTimeRecovery handling of truncated WAL (#7701) Summary: WAL may be truncated to an incomplete record due to crash while writing the last record or corruption. In the former case, no hole will be produced since no ACK'd data was lost. In the latter case, a hole could be produced without this PR since we proceeded to recover the next WAL as if nothing happened. This PR changes the record reading code to always report a corruption for incomplete records in `kPointInTimeRecovery` mode, and the upper layer will only ignore them if the next WAL has consecutive seqnum (i.e., we are guaranteed no hole). While this solves the hole problem for the case of incomplete records, the possibility is still there if the WAL is corrupted by truncation to an exact record boundary. This PR also regresses how much data can be recovered when writes are mixed with/without `WriteOptions::disableWAL`, as then we can not distinguish between a seqnum gap caused by corruption and a seqnum gap caused by a `disableWAL` write. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7701 Test Plan: Interestingly there already was a test for this case (`DBWALTestWithParams.kPointInTimeRecovery`); it just had a typo bug in the verification that prevented it from noticing holes in recovery. Reviewed By: anand1976 Differential Revision: D25111765 Pulled By: ajkr fbshipit-source-id: 5e330b13b1ee2b5be096cea9d0ff6075843e57b6 | 01 December 2020, 02:11:38 UTC |
cc431ec | Steve Yen | 30 November 2020, 20:08:23 UTC | Fix merge operator docs typo (#7716) Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/7716 Reviewed By: pdillinger Differential Revision: D25214340 Pulled By: zhichao-cao fbshipit-source-id: 143a8e7d076917e60bbe6993d60ec55f33e2ab56 | 30 November 2020, 20:09:39 UTC |
51a8dc6 | Levi Tamasi | 24 November 2020, 05:07:01 UTC | Integrated blob garbage collection: relocate blobs (#7694) Summary: The patch adds basic garbage collection support to the integrated BlobDB implementation. Valid blobs residing in the oldest blob files are relocated as they are encountered during compaction. The threshold that determines which blob files qualify is computed based on the configuration option `blob_garbage_collection_age_cutoff`, which was introduced in https://github.com/facebook/rocksdb/issues/7661 . Once a blob is retrieved for the purposes of relocation, it passes through the same logic that extracts large values to blob files in general. This means that if, for instance, the size threshold for key-value separation (`min_blob_size`) got changed or writing blob files got disabled altogether, it is possible for the value to be moved back into the LSM tree. In particular, one way to re-inline all blob values if needed would be to perform a full manual compaction with `enable_blob_files` set to `false`, `enable_blob_garbage_collection` set to `true`, and `blob_file_garbage_collection_age_cutoff` set to `1.0`. Some TODOs that I plan to address in separate PRs: 1) We'll have to measure the amount of new garbage in each blob file and log `BlobFileGarbage` entries as part of the compaction job's `VersionEdit`. (For the time being, blob files are cleaned up solely based on the `oldest_blob_file_number` relationships.) 2) When compression is used for blobs, the compression type hasn't changed, and the blob still qualifies for being written to a blob file, we can simply copy the compressed blob to the new file instead of going through decompression and compression. 3) We need to update the formula for computing write amplification to account for the amount of data read from blob files as part of GC. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7694 Test Plan: `make check` Reviewed By: riversand963 Differential Revision: D25069663 Pulled By: ltamasi fbshipit-source-id: bdfa8feb09afcf5bca3b4eba2ba72ce2f15cd06a | 24 November 2020, 05:08:22 UTC |
dd6b7fc | Andrew Kryczka | 24 November 2020, 00:27:46 UTC | Return `Status` from `MemTable` mutation functions (#7656) Summary: This PR updates `MemTable::Add()`, `MemTable::Update()`, and `MemTable::UpdateCallback()` to return `Status` objects, and adapts the client code in `MemTableInserter`. The goal is to prepare these functions for key-value checksum, where we want to verify key-value integrity while adding to memtable. After this PR, the memtable mutation functions can report a failed integrity check by returning `Status::Corruption`. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7656 Reviewed By: riversand963 Differential Revision: D24900497 Pulled By: ajkr fbshipit-source-id: 1a7e80581e3774676f2bbba2f0a0b04890f40009 | 24 November 2020, 00:29:04 UTC |
0baa505 | Peter Dillinger | 23 November 2020, 03:50:42 UTC | Add Ribbon schema test to bloom_test (#7696) Summary: These new unit tests should ensure that we don't accidentally change the interpretation of bits for what I call Standard128Ribbon filter internally, available publicly as NewExperimentalRibbonFilterPolicy. There is very little intuitive reason for the values we check against in these tests; I just plug in the right expected values upon watching the test fail initially. Most (but not all) of the tests are essentially "whitebox" "round-trip." We create a filter from fixed keys, and first compare the checksum of those filter bytes against a saved value. We also run queries against other fixed keys, comparing which return false positives against a saved set. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7696 Test Plan: test addition and refactoring only Reviewed By: jay-zhuang Differential Revision: D25082289 Pulled By: pdillinger fbshipit-source-id: b5ca646fdcb5a1c2ad2085eda4a1fd44c4287f67 | 23 November 2020, 03:52:04 UTC |
1a5fc4f | Yanqin Jin | 21 November 2020, 02:38:47 UTC | Port corruption test to use custom env (#7699) Summary: Allow corruption_test to run on custom env loaded via `Env::LoadEnv()`. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7699 Test Plan: ``` make corruption_test ./corruption_test ``` Also run on in-house custom env. Reviewed By: zhichao-cao Differential Revision: D25135525 Pulled By: riversand963 fbshipit-source-id: 7941e7ce342dc88ec2cd63e90f7674a2f57de6b7 | 21 November 2020, 02:40:24 UTC |
7c19d43 | anand76 | 20 November 2020, 06:37:55 UTC | Fix initialization order of DBOptions and kHostnameForDbHostId (#7702) Summary: Fix initialization order of DBOptions and kHostnameForDbHostId by making the initialization of the latter static rather than dynamic. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7702 Reviewed By: ajkr Differential Revision: D25111633 Pulled By: anand1976 fbshipit-source-id: 7afad834a66e40bcd8694a43b40d378695212224 | 20 November 2020, 06:39:40 UTC |
5c585e1 | Cheng Chang | 19 November 2020, 22:44:48 UTC | Ship the track WAL in MANIFEST feature (#7689) Summary: Updates the option description and HISTORY. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7689 Test Plan: N/A Reviewed By: zhichao-cao Differential Revision: D25056238 Pulled By: cheng-chang fbshipit-source-id: 6af1ef6f8dcf2173cbc0fccadc0e06cefd92bcae | 19 November 2020, 22:45:54 UTC |
a65e905 | Dylan Wen | 19 November 2020, 21:31:16 UTC | Fix typos in comments (#7687) Summary: Hi there, This PR fixes a few typos in comments in `cache/lru_cache.h`. Thanks Pull Request resolved: https://github.com/facebook/rocksdb/pull/7687 Reviewed By: ajkr Differential Revision: D25064674 Pulled By: jay-zhuang fbshipit-source-id: fe633369d5b82c5aac42d4ee8d551b9d657237d1 | 19 November 2020, 21:32:50 UTC |
7169ca9 | Cheng Chang | 19 November 2020, 05:24:32 UTC | Do not track empty WALs (#7697) Summary: An empty WAL won't be backed up by the BackupEngine. So if we track the empty WALs in MANIFEST, then when restoring from a backup, it may report corruption that the empty WAL is missing, which is correct because the WAL is actually in the main DB but not in the backup DB, but missing an empty WAL does not logically break DB consistency. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7697 Test Plan: watch existing tests to pass Reviewed By: pdillinger Differential Revision: D25077194 Pulled By: cheng-chang fbshipit-source-id: 01917b57234b92b6063925f2ee9452c5732bdc03 | 19 November 2020, 05:27:54 UTC |
8a97f35 | Cheng Chang | 18 November 2020, 22:53:14 UTC | Call out a bug in HISTORY (#7690) Summary: It's worth mentioning the corner case bug fixed in PR 7621. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7690 Test Plan: N/A Reviewed By: zhichao-cao Differential Revision: D25056678 Pulled By: cheng-chang fbshipit-source-id: 1ab42ec080f3ffe21f5d97acf65ee0af993112ba | 18 November 2020, 22:54:22 UTC |
6cacb0d | Akanksha Mahajan | 18 November 2020, 02:18:13 UTC | Add cmake-mingw in circle-build (#7144) Summary: Add cmake-mignw in circle-build Pull Request resolved: https://github.com/facebook/rocksdb/pull/7144 Test Plan: watch circle cmake-mingw build Reviewed By: jay-zhuang Differential Revision: D25039744 Pulled By: akankshamahajan15 fbshipit-source-id: 92584c9d5ad161b93d5e5a1303aac306e7985108 | 18 November 2020, 02:19:50 UTC |
8c93b16 | Cheng Chang | 17 November 2020, 23:54:49 UTC | Track WAL in MANIFEST: Update logic for computing min_log_number_to_keep in atomic flush (#7660) Summary: The logic for computing min_log_number_to_keep in atomic flush was incorrect. For example, when all column families are flushed, the min_log_number_to_keep should be the latest new log. But the incorrect logic calls `PrecomputeMinLogNumberToKeepNon2PC` for each column family, and returns the minimum of them. However, `PrecomputeMinLogNumberToKeepNon2PC(cf)` assumes column families other than `cf` are flushed, but in case all column families are flushed, this assumption is incorrect. Without this fix, the WAL referenced by the computed min_log_number_to_keep may actually contain no unflushed data, so the WAL might have actually been deleted from disk on recovery, then an incorrect error `Corruption: missing WAL` will be reported. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7660 Test Plan: run `make crash_test_with_atomic_flush` on devserver added a unit test in `db_flush_test` Reviewed By: riversand963 Differential Revision: D24906265 Pulled By: cheng-chang fbshipit-source-id: 08deda62e71f67f59e3b7925cdd86dd09bd4f430 | 17 November 2020, 23:55:55 UTC |
303d283 | Adam Retter | 17 November 2020, 23:28:56 UTC | RocksJava static lib dependencies should support MacOS 10.12+ (#7683) Summary: Expands on https://github.com/facebook/rocksdb/pull/7016 so that when `PORTABLE=1` is set the dependencies for RocksJava static target will also be built with backwards compatibility for MacOS as far back as 10.12 (i.e. 2016). Pull Request resolved: https://github.com/facebook/rocksdb/pull/7683 Reviewed By: ajkr Differential Revision: D25034164 Pulled By: pdillinger fbshipit-source-id: dc9e51828869ed9ec336a8a86683e4d0bfe04f27 | 17 November 2020, 23:34:05 UTC |
4c336c6 | Adam Retter | 17 November 2020, 23:27:51 UTC | Fix jemalloc compliation problem on macOS (#7624) Summary: Closes https://github.com/facebook/rocksdb/issues/7269 I have only tested this on macOS, let's see what CI makes of it for the other platforms... Pull Request resolved: https://github.com/facebook/rocksdb/pull/7624 Reviewed By: ajkr Differential Revision: D24834305 Pulled By: pdillinger fbshipit-source-id: ba818d8424297ccebd18ed854b044764c2dbab5f | 17 November 2020, 23:29:35 UTC |
699411b | Cheng Chang | 17 November 2020, 20:55:27 UTC | Fuzzing RocksDB (#7685) Summary: This is the initial PR to support adding fuzz tests to RocksDB. It includes the necessary build infrastructure, and includes an example fuzzer. There is also a README serving as the tutorial for how to add more tests. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7685 Test Plan: Manually build and run the fuzz test according to README. Reviewed By: pdillinger Differential Revision: D25013847 Pulled By: cheng-chang fbshipit-source-id: c91e3b337398d7f4d8f769fd5091cd080487b171 | 17 November 2020, 20:56:48 UTC |
84a7008 | Yanqin Jin | 17 November 2020, 08:43:20 UTC | Fix the logic of setting read_amp_bytes_per_bit from OPTIONS file (#7680) Summary: Instead of using `EncodeFixed32` which always serialize a integer to little endian, we should use the local machine's endianness when populating a native data structure during options parsing. Without this fix, `read_amp_bytes_per_bit` may be populated incorrectly on big-endian machines. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7680 Test Plan: make check Reviewed By: pdillinger Differential Revision: D24999166 Pulled By: riversand963 fbshipit-source-id: dc603cff6e17f8fa32479ce6df93b93082e6b0c4 | 17 November 2020, 08:44:30 UTC |
869f053 | Yanqin Jin | 17 November 2020, 06:07:29 UTC | Clean up after two test failures in db_basic_test (#7682) Summary: In db_basic_test.cc, there are two tests that rely on the underlying system's `LockFile` support to function correctly: DBBasicTest.OpenWhenOpen and DBBasicTest.CheckLock. In both tests, re-opening a db using `DB::Open` is expected to fail because the second open cannot lock the LOCK file. Some distributed file systems, e.g. HDFS do not support the POSIX-style file lock. Therefore, these unit tests will cause assertion failure and the second `Open` will create a db instance. Currently, these db instances are not closed after the assertion failure. Since these db instances are registered with some process-wide, static data structures, e.g. `PeriodicWorkScheduler::Default()`, they can still be accessed after the unit tests. However, the `Env` object created for this db instance is destroyed when the test finishes in `~DBTestBase()`. Consequently, it causes illegal memory access. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7682 Test Plan: Run the following on a distrubited file system: ``` make check ``` Reviewed By: anand1976 Differential Revision: D25004215 Pulled By: riversand963 fbshipit-source-id: f4327d7716c0e72b13bb43737ec9a5d156da4d52 | 17 November 2020, 06:09:01 UTC |
9627e34 | anand76 | 17 November 2020, 02:41:43 UTC | Use default FileSystem in GenerateUniqueId (#7672) Summary: Use ```FileSystem::Default``` to read ```/proc/sys/kernel/uuid```, so it works for ```Envs``` with remote ```FileSystem``` as well. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7672 Reviewed By: riversand963 Differential Revision: D24998702 Pulled By: anand1976 fbshipit-source-id: fa95c1d70f0e4ed17561201f047aa055046d06c3 | 17 November 2020, 04:48:13 UTC |
1c5f13f | Andrew Kryczka | 17 November 2020, 01:58:59 UTC | Fail early when `merge_operator` not configured (#7667) Summary: An application may accidentally write merge operands without properly configuring `merge_operator`. We should alert them as early as possible that there's an API misuse. Previously RocksDB only notified them when a query or background operation needed to merge but couldn't. With this PR, RocksDB notifies them of the problem before applying the merge operand to the memtable (although it may already be in WAL, which seems it'd cause a crash loop until they enable `merge_operator`). Pull Request resolved: https://github.com/facebook/rocksdb/pull/7667 Reviewed By: riversand963 Differential Revision: D24933360 Pulled By: ajkr fbshipit-source-id: 3a4a2ceb0b7aed184113dd03b8efd735a8332f7f | 17 November 2020, 04:39:01 UTC |
7582c56 | jsteemann | 16 November 2020, 23:51:21 UTC | add ArangoDB to USERS.md, and fix typos in that file (#7675) Summary: Add ArangoDB to USERS.md. We are using RocksDB since 2016. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7675 Reviewed By: riversand963 Differential Revision: D24998955 Pulled By: ajkr fbshipit-source-id: 82c656bf56589e52aff8c491bab6fbc19b52cc91 | 17 November 2020, 02:29:51 UTC |
1861de4 | Mammo, Mulugeta | 16 November 2020, 21:05:21 UTC | Add arena_block_size flag to db_bench (#7654) Summary: db_bench currently does not allow overriding the default `arena_block_size `calculation ([memtable size/8](https://github.com/facebook/rocksdb/blob/master/db/column_family.cc#L216)). For memtables whose size is in gigabytes, the `arena_block_size` defaults to hundreds of megabytes (affecting performance). Exposing this option in db_bench would allow us to test the workloads with various `arena_block_size` values. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7654 Reviewed By: jay-zhuang Differential Revision: D24996812 Pulled By: ajkr fbshipit-source-id: a5e3d2c83d9f89e1bb8382f2e8dd476c79e33bef | 16 November 2020, 21:06:30 UTC |