2020-05-29 20:24:19 +02:00
|
|
|
#!/usr/bin/env python3
|
2019-04-18 19:51:19 +02:00
|
|
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2020-03-25 04:57:53 +01:00
|
|
|
from __future__ import absolute_import, division, print_function, unicode_literals
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
import os
|
2015-09-08 23:23:42 +02:00
|
|
|
import glob
|
2013-01-11 20:09:23 +01:00
|
|
|
import os.path
|
|
|
|
import shutil
|
|
|
|
import subprocess
|
|
|
|
import time
|
|
|
|
import unittest
|
|
|
|
import tempfile
|
2015-09-08 23:23:42 +02:00
|
|
|
import re
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
def my_check_output(*popenargs, **kwargs):
|
|
|
|
"""
|
|
|
|
If we had python 2.7, we should simply use subprocess.check_output.
|
|
|
|
This is a stop-gap solution for python 2.6
|
|
|
|
"""
|
|
|
|
if 'stdout' in kwargs:
|
|
|
|
raise ValueError('stdout argument not allowed, it will be overridden.')
|
2013-03-11 22:31:06 +01:00
|
|
|
process = subprocess.Popen(stderr=subprocess.PIPE, stdout=subprocess.PIPE,
|
|
|
|
*popenargs, **kwargs)
|
2013-01-11 20:09:23 +01:00
|
|
|
output, unused_err = process.communicate()
|
|
|
|
retcode = process.poll()
|
|
|
|
if retcode:
|
|
|
|
cmd = kwargs.get("args")
|
|
|
|
if cmd is None:
|
|
|
|
cmd = popenargs[0]
|
|
|
|
raise Exception("Exit code is not 0. It is %d. Command: %s" %
|
|
|
|
(retcode, cmd))
|
2020-03-25 04:57:53 +01:00
|
|
|
return output.decode('utf-8')
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2013-03-11 22:31:06 +01:00
|
|
|
def run_err_null(cmd):
|
|
|
|
return os.system(cmd + " 2>/dev/null ")
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
class LDBTestCase(unittest.TestCase):
|
|
|
|
def setUp(self):
|
|
|
|
self.TMP_DIR = tempfile.mkdtemp(prefix="ldb_test_")
|
|
|
|
self.DB_NAME = "testdb"
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
assert(self.TMP_DIR.strip() != "/"
|
|
|
|
and self.TMP_DIR.strip() != "/tmp"
|
|
|
|
and self.TMP_DIR.strip() != "/tmp/") #Just some paranoia
|
|
|
|
|
|
|
|
shutil.rmtree(self.TMP_DIR)
|
|
|
|
|
|
|
|
def dbParam(self, dbName):
|
|
|
|
return "--db=%s" % os.path.join(self.TMP_DIR, dbName)
|
|
|
|
|
2015-09-08 23:23:42 +02:00
|
|
|
def assertRunOKFull(self, params, expectedOutput, unexpected=False,
|
|
|
|
isPattern=False):
|
2013-01-11 20:09:23 +01:00
|
|
|
"""
|
|
|
|
All command-line params must be specified.
|
|
|
|
Allows full flexibility in testing; for example: missing db param.
|
|
|
|
"""
|
|
|
|
output = my_check_output("./ldb %s |grep -v \"Created bg thread\"" %
|
|
|
|
params, shell=True)
|
2013-05-14 04:11:56 +02:00
|
|
|
if not unexpected:
|
2015-09-08 23:23:42 +02:00
|
|
|
if isPattern:
|
|
|
|
self.assertNotEqual(expectedOutput.search(output.strip()),
|
|
|
|
None)
|
|
|
|
else:
|
|
|
|
self.assertEqual(output.strip(), expectedOutput.strip())
|
2013-05-14 04:11:56 +02:00
|
|
|
else:
|
2015-09-08 23:23:42 +02:00
|
|
|
if isPattern:
|
|
|
|
self.assertEqual(expectedOutput.search(output.strip()), None)
|
|
|
|
else:
|
|
|
|
self.assertNotEqual(output.strip(), expectedOutput.strip())
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
def assertRunFAILFull(self, params):
|
|
|
|
"""
|
|
|
|
All command-line params must be specified.
|
|
|
|
Allows full flexibility in testing; for example: missing db param.
|
|
|
|
"""
|
|
|
|
try:
|
2013-03-11 22:31:06 +01:00
|
|
|
|
|
|
|
my_check_output("./ldb %s >/dev/null 2>&1 |grep -v \"Created bg \
|
|
|
|
thread\"" % params, shell=True)
|
2018-08-09 23:18:59 +02:00
|
|
|
except Exception:
|
2013-01-11 20:09:23 +01:00
|
|
|
return
|
|
|
|
self.fail(
|
|
|
|
"Exception should have been raised for command with params: %s" %
|
|
|
|
params)
|
|
|
|
|
2013-05-14 04:11:56 +02:00
|
|
|
def assertRunOK(self, params, expectedOutput, unexpected=False):
|
2013-01-11 20:09:23 +01:00
|
|
|
"""
|
|
|
|
Uses the default test db.
|
|
|
|
"""
|
|
|
|
self.assertRunOKFull("%s %s" % (self.dbParam(self.DB_NAME), params),
|
2013-05-14 04:11:56 +02:00
|
|
|
expectedOutput, unexpected)
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
def assertRunFAIL(self, params):
|
|
|
|
"""
|
|
|
|
Uses the default test db.
|
|
|
|
"""
|
|
|
|
self.assertRunFAILFull("%s %s" % (self.dbParam(self.DB_NAME), params))
|
|
|
|
|
|
|
|
def testSimpleStringPutGet(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testSimpleStringPutGet...")
|
2013-01-11 20:09:23 +01:00
|
|
|
self.assertRunFAIL("put x1 y1")
|
|
|
|
self.assertRunOK("put --create_if_missing x1 y1", "OK")
|
|
|
|
self.assertRunOK("get x1", "y1")
|
|
|
|
self.assertRunFAIL("get x2")
|
|
|
|
|
|
|
|
self.assertRunOK("put x2 y2", "OK")
|
|
|
|
self.assertRunOK("get x1", "y1")
|
|
|
|
self.assertRunOK("get x2", "y2")
|
|
|
|
self.assertRunFAIL("get x3")
|
|
|
|
|
|
|
|
self.assertRunOK("scan --from=x1 --to=z", "x1 : y1\nx2 : y2")
|
|
|
|
self.assertRunOK("put x3 y3", "OK")
|
|
|
|
|
|
|
|
self.assertRunOK("scan --from=x1 --to=z", "x1 : y1\nx2 : y2\nx3 : y3")
|
|
|
|
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3")
|
|
|
|
self.assertRunOK("scan --from=x", "x1 : y1\nx2 : y2\nx3 : y3")
|
|
|
|
|
|
|
|
self.assertRunOK("scan --to=x2", "x1 : y1")
|
|
|
|
self.assertRunOK("scan --from=x1 --to=z --max_keys=1", "x1 : y1")
|
|
|
|
self.assertRunOK("scan --from=x1 --to=z --max_keys=2",
|
|
|
|
"x1 : y1\nx2 : y2")
|
|
|
|
|
|
|
|
self.assertRunOK("scan --from=x1 --to=z --max_keys=3",
|
|
|
|
"x1 : y1\nx2 : y2\nx3 : y3")
|
|
|
|
self.assertRunOK("scan --from=x1 --to=z --max_keys=4",
|
|
|
|
"x1 : y1\nx2 : y2\nx3 : y3")
|
|
|
|
self.assertRunOK("scan --from=x1 --to=x2", "x1 : y1")
|
|
|
|
self.assertRunOK("scan --from=x2 --to=x4", "x2 : y2\nx3 : y3")
|
|
|
|
self.assertRunFAIL("scan --from=x4 --to=z") # No results => FAIL
|
|
|
|
self.assertRunFAIL("scan --from=x1 --to=z --max_keys=foo")
|
|
|
|
|
|
|
|
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3")
|
|
|
|
|
|
|
|
self.assertRunOK("delete x1", "OK")
|
|
|
|
self.assertRunOK("scan", "x2 : y2\nx3 : y3")
|
|
|
|
|
|
|
|
self.assertRunOK("delete NonExistentKey", "OK")
|
2013-05-14 04:11:56 +02:00
|
|
|
# It is weird that GET and SCAN raise exception for
|
2013-01-11 20:09:23 +01:00
|
|
|
# non-existent key, while delete does not
|
|
|
|
|
2014-03-20 21:42:45 +01:00
|
|
|
self.assertRunOK("checkconsistency", "OK")
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
def dumpDb(self, params, dumpFile):
|
2013-03-11 22:31:06 +01:00
|
|
|
return 0 == run_err_null("./ldb dump %s > %s" % (params, dumpFile))
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
def loadDb(self, params, dumpFile):
|
2013-03-11 22:31:06 +01:00
|
|
|
return 0 == run_err_null("cat %s | ./ldb load %s" % (dumpFile, params))
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2018-08-09 23:18:59 +02:00
|
|
|
def writeExternSst(self, params, inputDumpFile, outputSst):
|
|
|
|
return 0 == run_err_null("cat %s | ./ldb write_extern_sst %s %s"
|
|
|
|
% (inputDumpFile, outputSst, params))
|
|
|
|
|
|
|
|
def ingestExternSst(self, params, inputSst):
|
|
|
|
return 0 == run_err_null("./ldb ingest_extern_sst %s %s"
|
|
|
|
% (inputSst, params))
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
def testStringBatchPut(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testStringBatchPut...")
|
2013-01-11 20:09:23 +01:00
|
|
|
self.assertRunOK("batchput x1 y1 --create_if_missing", "OK")
|
|
|
|
self.assertRunOK("scan", "x1 : y1")
|
|
|
|
self.assertRunOK("batchput x2 y2 x3 y3 \"x4 abc\" \"y4 xyz\"", "OK")
|
|
|
|
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 abc : y4 xyz")
|
|
|
|
self.assertRunFAIL("batchput")
|
|
|
|
self.assertRunFAIL("batchput k1")
|
|
|
|
self.assertRunFAIL("batchput k1 v1 k2")
|
|
|
|
|
2022-02-26 08:13:11 +01:00
|
|
|
def testBlobBatchPut(self):
|
|
|
|
print("Running testBlobBatchPut...")
|
|
|
|
|
|
|
|
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
|
|
|
|
self.assertRunOK("batchput x1 y1 --create_if_missing --enable_blob_files", "OK")
|
|
|
|
self.assertRunOK("scan", "x1 : y1")
|
|
|
|
self.assertRunOK("batchput --enable_blob_files x2 y2 x3 y3 \"x4 abc\" \"y4 xyz\"", "OK")
|
|
|
|
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 abc : y4 xyz")
|
|
|
|
|
|
|
|
blob_files = self.getBlobFiles(dbPath)
|
|
|
|
self.assertTrue(len(blob_files) >= 1)
|
|
|
|
|
|
|
|
def testBlobPut(self):
|
|
|
|
print("Running testBlobPut...")
|
|
|
|
|
|
|
|
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
|
|
|
|
self.assertRunOK("put --create_if_missing --enable_blob_files x1 y1", "OK")
|
|
|
|
self.assertRunOK("get x1", "y1")
|
|
|
|
self.assertRunOK("put --enable_blob_files x2 y2", "OK")
|
|
|
|
self.assertRunOK("get x1", "y1")
|
|
|
|
self.assertRunOK("get x2", "y2")
|
|
|
|
self.assertRunFAIL("get x3")
|
|
|
|
|
|
|
|
blob_files = self.getBlobFiles(dbPath)
|
|
|
|
self.assertTrue(len(blob_files) >= 1)
|
|
|
|
|
2013-11-01 21:59:14 +01:00
|
|
|
def testCountDelimDump(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testCountDelimDump...")
|
2013-11-01 21:59:14 +01:00
|
|
|
self.assertRunOK("batchput x.1 x1 --create_if_missing", "OK")
|
|
|
|
self.assertRunOK("batchput y.abc abc y.2 2 z.13c pqr", "OK")
|
|
|
|
self.assertRunOK("dump --count_delim", "x => count:1\tsize:5\ny => count:2\tsize:12\nz => count:1\tsize:8")
|
|
|
|
self.assertRunOK("dump --count_delim=\".\"", "x => count:1\tsize:5\ny => count:2\tsize:12\nz => count:1\tsize:8")
|
|
|
|
self.assertRunOK("batchput x,2 x2 x,abc xabc", "OK")
|
|
|
|
self.assertRunOK("dump --count_delim=\",\"", "x => count:2\tsize:14\nx.1 => count:1\tsize:5\ny.2 => count:1\tsize:4\ny.abc => count:1\tsize:8\nz.13c => count:1\tsize:8")
|
|
|
|
|
|
|
|
def testCountDelimIDump(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testCountDelimIDump...")
|
2013-11-01 21:59:14 +01:00
|
|
|
self.assertRunOK("batchput x.1 x1 --create_if_missing", "OK")
|
|
|
|
self.assertRunOK("batchput y.abc abc y.2 2 z.13c pqr", "OK")
|
2017-05-12 23:59:57 +02:00
|
|
|
self.assertRunOK("idump --count_delim", "x => count:1\tsize:5\ny => count:2\tsize:12\nz => count:1\tsize:8")
|
|
|
|
self.assertRunOK("idump --count_delim=\".\"", "x => count:1\tsize:5\ny => count:2\tsize:12\nz => count:1\tsize:8")
|
2013-11-01 21:59:14 +01:00
|
|
|
self.assertRunOK("batchput x,2 x2 x,abc xabc", "OK")
|
2017-05-12 23:59:57 +02:00
|
|
|
self.assertRunOK("idump --count_delim=\",\"", "x => count:2\tsize:14\nx.1 => count:1\tsize:5\ny.2 => count:1\tsize:4\ny.abc => count:1\tsize:8\nz.13c => count:1\tsize:8")
|
2013-11-01 21:59:14 +01:00
|
|
|
|
2013-05-14 04:11:56 +02:00
|
|
|
def testInvalidCmdLines(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testInvalidCmdLines...")
|
2013-05-14 04:11:56 +02:00
|
|
|
# db not specified
|
|
|
|
self.assertRunFAILFull("put 0x6133 0x6233 --hex --create_if_missing")
|
|
|
|
# No param called he
|
|
|
|
self.assertRunFAIL("put 0x6133 0x6233 --he --create_if_missing")
|
|
|
|
# max_keys is not applicable for put
|
|
|
|
self.assertRunFAIL("put 0x6133 0x6233 --max_keys=1 --create_if_missing")
|
|
|
|
# hex has invalid boolean value
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
def testHexPutGet(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testHexPutGet...")
|
2013-01-11 20:09:23 +01:00
|
|
|
self.assertRunOK("put a1 b1 --create_if_missing", "OK")
|
|
|
|
self.assertRunOK("scan", "a1 : b1")
|
|
|
|
self.assertRunOK("scan --hex", "0x6131 : 0x6231")
|
|
|
|
self.assertRunFAIL("put --hex 6132 6232")
|
|
|
|
self.assertRunOK("put --hex 0x6132 0x6232", "OK")
|
|
|
|
self.assertRunOK("scan --hex", "0x6131 : 0x6231\n0x6132 : 0x6232")
|
|
|
|
self.assertRunOK("scan", "a1 : b1\na2 : b2")
|
|
|
|
self.assertRunOK("get a1", "b1")
|
|
|
|
self.assertRunOK("get --hex 0x6131", "0x6231")
|
|
|
|
self.assertRunOK("get a2", "b2")
|
|
|
|
self.assertRunOK("get --hex 0x6132", "0x6232")
|
|
|
|
self.assertRunOK("get --key_hex 0x6132", "b2")
|
|
|
|
self.assertRunOK("get --key_hex --value_hex 0x6132", "0x6232")
|
|
|
|
self.assertRunOK("get --value_hex a2", "0x6232")
|
|
|
|
self.assertRunOK("scan --key_hex --value_hex",
|
|
|
|
"0x6131 : 0x6231\n0x6132 : 0x6232")
|
|
|
|
self.assertRunOK("scan --hex --from=0x6131 --to=0x6133",
|
|
|
|
"0x6131 : 0x6231\n0x6132 : 0x6232")
|
|
|
|
self.assertRunOK("scan --hex --from=0x6131 --to=0x6132",
|
|
|
|
"0x6131 : 0x6231")
|
|
|
|
self.assertRunOK("scan --key_hex", "0x6131 : b1\n0x6132 : b2")
|
|
|
|
self.assertRunOK("scan --value_hex", "a1 : 0x6231\na2 : 0x6232")
|
|
|
|
self.assertRunOK("batchput --hex 0x6133 0x6233 0x6134 0x6234", "OK")
|
|
|
|
self.assertRunOK("scan", "a1 : b1\na2 : b2\na3 : b3\na4 : b4")
|
|
|
|
self.assertRunOK("delete --hex 0x6133", "OK")
|
|
|
|
self.assertRunOK("scan", "a1 : b1\na2 : b2\na4 : b4")
|
2014-03-20 21:42:45 +01:00
|
|
|
self.assertRunOK("checkconsistency", "OK")
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2013-05-14 04:11:56 +02:00
|
|
|
def testTtlPutGet(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testTtlPutGet...")
|
2013-05-14 04:11:56 +02:00
|
|
|
self.assertRunOK("put a1 b1 --ttl --create_if_missing", "OK")
|
2014-03-10 20:11:46 +01:00
|
|
|
self.assertRunOK("scan --hex", "0x6131 : 0x6231", True)
|
2013-05-14 04:11:56 +02:00
|
|
|
self.assertRunOK("dump --ttl ", "a1 ==> b1", True)
|
2014-03-10 20:11:46 +01:00
|
|
|
self.assertRunOK("dump --hex --ttl ",
|
|
|
|
"0x6131 ==> 0x6231\nKeys in range: 1")
|
2013-05-14 04:11:56 +02:00
|
|
|
self.assertRunOK("scan --hex --ttl", "0x6131 : 0x6231")
|
2014-03-10 20:11:46 +01:00
|
|
|
self.assertRunOK("get --value_hex a1", "0x6231", True)
|
2013-05-14 04:11:56 +02:00
|
|
|
self.assertRunOK("get --ttl a1", "b1")
|
|
|
|
self.assertRunOK("put a3 b3 --create_if_missing", "OK")
|
|
|
|
# fails because timstamp's length is greater than value's
|
|
|
|
self.assertRunFAIL("get --ttl a3")
|
2014-03-20 21:42:45 +01:00
|
|
|
self.assertRunOK("checkconsistency", "OK")
|
2013-05-14 04:11:56 +02:00
|
|
|
|
2018-01-29 21:43:56 +01:00
|
|
|
def testInvalidCmdLines(self): # noqa: F811 T25377293 Grandfathered in
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testInvalidCmdLines...")
|
2013-01-11 20:09:23 +01:00
|
|
|
# db not specified
|
|
|
|
self.assertRunFAILFull("put 0x6133 0x6233 --hex --create_if_missing")
|
|
|
|
# No param called he
|
|
|
|
self.assertRunFAIL("put 0x6133 0x6233 --he --create_if_missing")
|
|
|
|
# max_keys is not applicable for put
|
|
|
|
self.assertRunFAIL("put 0x6133 0x6233 --max_keys=1 --create_if_missing")
|
|
|
|
# hex has invalid boolean value
|
|
|
|
self.assertRunFAIL("put 0x6133 0x6233 --hex=Boo --create_if_missing")
|
|
|
|
|
|
|
|
def testDumpLoad(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testDumpLoad...")
|
2013-01-11 20:09:23 +01:00
|
|
|
self.assertRunOK("batchput --create_if_missing x1 y1 x2 y2 x3 y3 x4 y4",
|
|
|
|
"OK")
|
|
|
|
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
|
|
|
|
origDbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
|
|
|
|
|
|
|
|
# Dump and load without any additional params specified
|
|
|
|
dumpFilePath = os.path.join(self.TMP_DIR, "dump1")
|
|
|
|
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump1")
|
|
|
|
self.assertTrue(self.dumpDb("--db=%s" % origDbPath, dumpFilePath))
|
|
|
|
self.assertTrue(self.loadDb(
|
|
|
|
"--db=%s --create_if_missing" % loadedDbPath, dumpFilePath))
|
|
|
|
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
|
|
|
|
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
|
|
|
|
|
|
|
|
# Dump and load in hex
|
|
|
|
dumpFilePath = os.path.join(self.TMP_DIR, "dump2")
|
|
|
|
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump2")
|
|
|
|
self.assertTrue(self.dumpDb("--db=%s --hex" % origDbPath, dumpFilePath))
|
|
|
|
self.assertTrue(self.loadDb(
|
|
|
|
"--db=%s --hex --create_if_missing" % loadedDbPath, dumpFilePath))
|
|
|
|
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
|
|
|
|
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
|
|
|
|
|
|
|
|
# Dump only a portion of the key range
|
|
|
|
dumpFilePath = os.path.join(self.TMP_DIR, "dump3")
|
|
|
|
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump3")
|
|
|
|
self.assertTrue(self.dumpDb(
|
|
|
|
"--db=%s --from=x1 --to=x3" % origDbPath, dumpFilePath))
|
|
|
|
self.assertTrue(self.loadDb(
|
|
|
|
"--db=%s --create_if_missing" % loadedDbPath, dumpFilePath))
|
|
|
|
self.assertRunOKFull("scan --db=%s" % loadedDbPath, "x1 : y1\nx2 : y2")
|
|
|
|
|
|
|
|
# Dump upto max_keys rows
|
|
|
|
dumpFilePath = os.path.join(self.TMP_DIR, "dump4")
|
|
|
|
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump4")
|
|
|
|
self.assertTrue(self.dumpDb(
|
|
|
|
"--db=%s --max_keys=3" % origDbPath, dumpFilePath))
|
|
|
|
self.assertTrue(self.loadDb(
|
|
|
|
"--db=%s --create_if_missing" % loadedDbPath, dumpFilePath))
|
|
|
|
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
|
|
|
|
"x1 : y1\nx2 : y2\nx3 : y3")
|
|
|
|
|
|
|
|
# Load into an existing db, create_if_missing is not specified
|
|
|
|
self.assertTrue(self.dumpDb("--db=%s" % origDbPath, dumpFilePath))
|
|
|
|
self.assertTrue(self.loadDb("--db=%s" % loadedDbPath, dumpFilePath))
|
|
|
|
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
|
|
|
|
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
|
|
|
|
|
|
|
|
# Dump and load with WAL disabled
|
|
|
|
dumpFilePath = os.path.join(self.TMP_DIR, "dump5")
|
|
|
|
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump5")
|
|
|
|
self.assertTrue(self.dumpDb("--db=%s" % origDbPath, dumpFilePath))
|
|
|
|
self.assertTrue(self.loadDb(
|
|
|
|
"--db=%s --disable_wal --create_if_missing" % loadedDbPath,
|
|
|
|
dumpFilePath))
|
|
|
|
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
|
|
|
|
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
|
|
|
|
|
|
|
|
# Dump and load with lots of extra params specified
|
2015-07-15 08:13:23 +02:00
|
|
|
extraParams = " ".join(["--bloom_bits=14", "--block_size=1024",
|
|
|
|
"--auto_compaction=true",
|
2013-01-11 20:09:23 +01:00
|
|
|
"--write_buffer_size=4194304",
|
|
|
|
"--file_size=2097152"])
|
|
|
|
dumpFilePath = os.path.join(self.TMP_DIR, "dump6")
|
|
|
|
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump6")
|
|
|
|
self.assertTrue(self.dumpDb(
|
|
|
|
"--db=%s %s" % (origDbPath, extraParams), dumpFilePath))
|
|
|
|
self.assertTrue(self.loadDb(
|
|
|
|
"--db=%s %s --create_if_missing" % (loadedDbPath, extraParams),
|
|
|
|
dumpFilePath))
|
|
|
|
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
|
|
|
|
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
|
|
|
|
|
|
|
|
# Dump with count_only
|
|
|
|
dumpFilePath = os.path.join(self.TMP_DIR, "dump7")
|
|
|
|
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump7")
|
|
|
|
self.assertTrue(self.dumpDb(
|
|
|
|
"--db=%s --count_only" % origDbPath, dumpFilePath))
|
|
|
|
self.assertTrue(self.loadDb(
|
|
|
|
"--db=%s --create_if_missing" % loadedDbPath, dumpFilePath))
|
|
|
|
# DB should have atleast one value for scan to work
|
|
|
|
self.assertRunOKFull("put --db=%s k1 v1" % loadedDbPath, "OK")
|
|
|
|
self.assertRunOKFull("scan --db=%s" % loadedDbPath, "k1 : v1")
|
|
|
|
|
|
|
|
# Dump command fails because of typo in params
|
|
|
|
dumpFilePath = os.path.join(self.TMP_DIR, "dump8")
|
|
|
|
self.assertFalse(self.dumpDb(
|
2013-11-16 12:21:34 +01:00
|
|
|
"--db=%s --create_if_missing" % origDbPath, dumpFilePath))
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2022-02-26 08:13:11 +01:00
|
|
|
# Dump and load with BlobDB enabled
|
|
|
|
blobParams = " ".join(["--enable_blob_files", "--min_blob_size=1",
|
|
|
|
"--blob_file_size=2097152"])
|
|
|
|
dumpFilePath = os.path.join(self.TMP_DIR, "dump9")
|
|
|
|
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump9")
|
|
|
|
self.assertTrue(self.dumpDb(
|
|
|
|
"--db=%s" % (origDbPath), dumpFilePath))
|
|
|
|
self.assertTrue(self.loadDb(
|
|
|
|
"--db=%s %s --create_if_missing --disable_wal" % (loadedDbPath, blobParams),
|
|
|
|
dumpFilePath))
|
|
|
|
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
|
|
|
|
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
|
|
|
|
blob_files = self.getBlobFiles(loadedDbPath)
|
|
|
|
self.assertTrue(len(blob_files) >= 1)
|
|
|
|
|
2017-05-12 23:59:57 +02:00
|
|
|
def testIDumpBasics(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testIDumpBasics...")
|
2017-05-12 23:59:57 +02:00
|
|
|
self.assertRunOK("put a val --create_if_missing", "OK")
|
|
|
|
self.assertRunOK("put b val", "OK")
|
|
|
|
self.assertRunOK(
|
|
|
|
"idump", "'a' seq:1, type:1 => val\n"
|
|
|
|
"'b' seq:2, type:1 => val\nInternal keys in range: 2")
|
|
|
|
self.assertRunOK(
|
|
|
|
"idump --input_key_hex --from=%s --to=%s" % (hex(ord('a')),
|
|
|
|
hex(ord('b'))),
|
|
|
|
"'a' seq:1, type:1 => val\nInternal keys in range: 1")
|
|
|
|
|
2022-04-20 20:10:20 +02:00
|
|
|
def testIDumpDecodeBlobIndex(self):
|
|
|
|
print("Running testIDumpDecodeBlobIndex...")
|
|
|
|
self.assertRunOK("put a val --create_if_missing", "OK")
|
|
|
|
self.assertRunOK("put b val --enable_blob_files", "OK")
|
|
|
|
|
|
|
|
# Pattern to expect from dump with decode_blob_index flag enabled.
|
|
|
|
regex = ".*\[blob ref\].*"
|
|
|
|
expected_pattern = re.compile(regex)
|
|
|
|
cmd = "idump %s --decode_blob_index"
|
|
|
|
self.assertRunOKFull((cmd)
|
|
|
|
% (self.dbParam(self.DB_NAME)),
|
|
|
|
expected_pattern, unexpected=False,
|
|
|
|
isPattern=True)
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
def testMiscAdminTask(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testMiscAdminTask...")
|
2013-01-11 20:09:23 +01:00
|
|
|
# These tests need to be improved; for example with asserts about
|
|
|
|
# whether compaction or level reduction actually took place.
|
|
|
|
self.assertRunOK("batchput --create_if_missing x1 y1 x2 y2 x3 y3 x4 y4",
|
|
|
|
"OK")
|
|
|
|
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
|
|
|
|
origDbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
|
|
|
|
|
2013-03-11 22:31:06 +01:00
|
|
|
self.assertTrue(0 == run_err_null(
|
|
|
|
"./ldb compact --db=%s" % origDbPath))
|
2013-01-11 20:09:23 +01:00
|
|
|
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
|
|
|
|
|
2013-03-11 22:31:06 +01:00
|
|
|
self.assertTrue(0 == run_err_null(
|
2013-01-11 20:09:23 +01:00
|
|
|
"./ldb reduce_levels --db=%s --new_levels=2" % origDbPath))
|
|
|
|
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
|
|
|
|
|
2013-03-11 22:31:06 +01:00
|
|
|
self.assertTrue(0 == run_err_null(
|
2013-01-11 20:09:23 +01:00
|
|
|
"./ldb reduce_levels --db=%s --new_levels=3" % origDbPath))
|
|
|
|
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
|
|
|
|
|
2013-03-11 22:31:06 +01:00
|
|
|
self.assertTrue(0 == run_err_null(
|
2013-01-11 20:09:23 +01:00
|
|
|
"./ldb compact --db=%s --from=x1 --to=x3" % origDbPath))
|
|
|
|
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
|
|
|
|
|
2013-03-11 22:31:06 +01:00
|
|
|
self.assertTrue(0 == run_err_null(
|
|
|
|
"./ldb compact --db=%s --hex --from=0x6131 --to=0x6134"
|
|
|
|
% origDbPath))
|
2013-01-11 20:09:23 +01:00
|
|
|
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
|
|
|
|
|
|
|
|
#TODO(dilip): Not sure what should be passed to WAL.Currently corrupted.
|
2013-03-11 22:31:06 +01:00
|
|
|
self.assertTrue(0 == run_err_null(
|
2013-01-11 20:09:23 +01:00
|
|
|
"./ldb dump_wal --db=%s --walfile=%s --header" % (
|
|
|
|
origDbPath, os.path.join(origDbPath, "LOG"))))
|
|
|
|
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
|
|
|
|
|
2014-03-20 21:42:45 +01:00
|
|
|
def testCheckConsistency(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testCheckConsistency...")
|
2014-03-20 21:42:45 +01:00
|
|
|
|
|
|
|
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
|
|
|
|
self.assertRunOK("put x1 y1 --create_if_missing", "OK")
|
|
|
|
self.assertRunOK("put x2 y2", "OK")
|
|
|
|
self.assertRunOK("get x1", "y1")
|
|
|
|
self.assertRunOK("checkconsistency", "OK")
|
|
|
|
|
|
|
|
sstFilePath = my_check_output("ls %s" % os.path.join(dbPath, "*.sst"),
|
|
|
|
shell=True)
|
|
|
|
|
|
|
|
# Modify the file
|
|
|
|
my_check_output("echo 'evil' > %s" % sstFilePath, shell=True)
|
|
|
|
self.assertRunFAIL("checkconsistency")
|
|
|
|
|
|
|
|
# Delete the file
|
|
|
|
my_check_output("rm -f %s" % sstFilePath, shell=True)
|
|
|
|
self.assertRunFAIL("checkconsistency")
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
def dumpLiveFiles(self, params, dumpFile):
|
|
|
|
return 0 == run_err_null("./ldb dump_live_files %s > %s" % (
|
|
|
|
params, dumpFile))
|
|
|
|
|
|
|
|
def testDumpLiveFiles(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testDumpLiveFiles...")
|
2014-11-24 19:04:16 +01:00
|
|
|
|
|
|
|
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
|
|
|
|
self.assertRunOK("put x1 y1 --create_if_missing", "OK")
|
2022-04-15 18:04:04 +02:00
|
|
|
self.assertRunOK("put x2 y2 --enable_blob_files", "OK")
|
2014-11-24 19:04:16 +01:00
|
|
|
dumpFilePath = os.path.join(self.TMP_DIR, "dump1")
|
|
|
|
self.assertTrue(self.dumpLiveFiles("--db=%s" % dbPath, dumpFilePath))
|
|
|
|
self.assertRunOK("delete x1", "OK")
|
|
|
|
self.assertRunOK("put x3 y3", "OK")
|
|
|
|
dumpFilePath = os.path.join(self.TMP_DIR, "dump2")
|
2021-06-22 20:45:14 +02:00
|
|
|
|
|
|
|
# Test that if the user provides a db path that ends with
|
|
|
|
# a slash '/', there is no double (or more!) slashes in the
|
|
|
|
# SST and manifest file names.
|
|
|
|
|
|
|
|
# Add a '/' at the end of dbPath (which normally shouldnt contain any)
|
|
|
|
if dbPath[-1] != "/":
|
|
|
|
dbPath += "/"
|
|
|
|
|
|
|
|
# Call the dump_live_files function with the edited dbPath name.
|
2022-04-23 01:54:43 +02:00
|
|
|
self.assertTrue(self.dumpLiveFiles("--db=%s --decode_blob_index --dump_uncompressed_blobs" % dbPath, dumpFilePath))
|
2014-03-20 21:42:45 +01:00
|
|
|
|
2021-06-22 20:45:14 +02:00
|
|
|
# Investigate the output
|
|
|
|
with open(dumpFilePath, "r") as tmp:
|
|
|
|
data = tmp.read()
|
|
|
|
|
|
|
|
# Check that all the SST filenames have a correct full path (no multiple '/').
|
|
|
|
sstFileList = re.findall(r"%s.*\d+.sst" % dbPath, data)
|
2022-04-23 01:54:43 +02:00
|
|
|
self.assertTrue(len(sstFileList) >= 1)
|
2021-06-22 20:45:14 +02:00
|
|
|
for sstFilename in sstFileList:
|
|
|
|
filenumber = re.findall(r"\d+.sst", sstFilename)[0]
|
|
|
|
self.assertEqual(sstFilename, dbPath+filenumber)
|
|
|
|
|
2022-04-23 01:54:43 +02:00
|
|
|
# Check that all the Blob filenames have a correct full path (no multiple '/').
|
|
|
|
blobFileList = re.findall(r"%s.*\d+.blob" % dbPath, data)
|
|
|
|
self.assertTrue(len(blobFileList) >= 1)
|
|
|
|
for blobFilename in blobFileList:
|
|
|
|
filenumber = re.findall(r"\d+.blob", blobFilename)[0]
|
|
|
|
self.assertEqual(blobFilename, dbPath+filenumber)
|
|
|
|
|
2021-06-22 20:45:14 +02:00
|
|
|
# Check that all the manifest filenames
|
|
|
|
# have a correct full path (no multiple '/').
|
|
|
|
manifestFileList = re.findall(r"%s.*MANIFEST-\d+" % dbPath, data)
|
2022-04-23 01:54:43 +02:00
|
|
|
self.assertTrue(len(manifestFileList) >= 1)
|
2021-06-22 20:45:14 +02:00
|
|
|
for manifestFilename in manifestFileList:
|
|
|
|
filenumber = re.findall(r"(?<=MANIFEST-)\d+", manifestFilename)[0]
|
|
|
|
self.assertEqual(manifestFilename, dbPath+"MANIFEST-"+filenumber)
|
|
|
|
|
2022-04-15 18:04:04 +02:00
|
|
|
# Check that the blob file index is decoded.
|
|
|
|
decodedBlobIndex = re.findall(r"\[blob ref\]", data)
|
|
|
|
self.assertTrue(len(decodedBlobIndex) >= 1)
|
|
|
|
|
Add list live files metadata (#8446)
Summary:
Add an argument to ldb to dump live file names, column families, and levels, `list_live_files_metadata`. The output shows all active SST file names, sorted first by column family and then by level. For each level the SST files are sorted alphabetically.
Typically, the output looks like this:
```
./ldb --db=/tmp/test_db list_live_files_metadata
Live SST Files:
===== Column Family: default =====
---------- level 0 ----------
/tmp/test_db/000069.sst
---------- level 1 ----------
/tmp/test_db/000064.sst
/tmp/test_db/000065.sst
/tmp/test_db/000066.sst
/tmp/test_db/000071.sst
---------- level 2 ----------
/tmp/test_db/000038.sst
/tmp/test_db/000039.sst
/tmp/test_db/000052.sst
/tmp/test_db/000067.sst
/tmp/test_db/000070.sst
------------------------------
```
Second, a flag was added `--sort_by_filename`, to change the layout of the output. When this flag is added to the command, the output shows all active SST files sorted by name, in front of which the LSM level and the column family are mentioned. With the same example, the following command would return:
```
./ldb --db=/tmp/test_db list_live_files_metadata --sort_by_filename
Live SST Files:
/tmp/test_db/000038.sst : level 2, column family 'default'
/tmp/test_db/000039.sst : level 2, column family 'default'
/tmp/test_db/000052.sst : level 2, column family 'default'
/tmp/test_db/000064.sst : level 1, column family 'default'
/tmp/test_db/000065.sst : level 1, column family 'default'
/tmp/test_db/000066.sst : level 1, column family 'default'
/tmp/test_db/000067.sst : level 2, column family 'default'
/tmp/test_db/000069.sst : level 0, column family 'default'
/tmp/test_db/000070.sst : level 2, column family 'default'
/tmp/test_db/000071.sst : level 1, column family 'default'
------------------------------
```
Thus, the user can either request to show the files by levels, or sorted by filenames.
This PR includes a simple Python unit test that makes sure the file name and level printed out by this new feature matches the one found with an existing feature, `dump_live_file`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8446
Reviewed By: akankshamahajan15
Differential Revision: D29320080
Pulled By: bjlemaire
fbshipit-source-id: 01fb7b5637c59010d74c80730a28d815994e7009
2021-06-23 04:06:44 +02:00
|
|
|
def listLiveFilesMetadata(self, params, dumpFile):
|
|
|
|
return 0 == run_err_null("./ldb list_live_files_metadata %s > %s" % (
|
|
|
|
params, dumpFile))
|
|
|
|
|
|
|
|
def testListLiveFilesMetadata(self):
|
|
|
|
print("Running testListLiveFilesMetadata...")
|
|
|
|
|
|
|
|
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
|
|
|
|
self.assertRunOK("put x1 y1 --create_if_missing", "OK")
|
|
|
|
self.assertRunOK("put x2 y2", "OK")
|
|
|
|
|
|
|
|
# Compare the SST filename and the level of list_live_files_metadata
|
|
|
|
# with the data collected from dump_live_files.
|
|
|
|
dumpFilePath1 = os.path.join(self.TMP_DIR, "dump1")
|
|
|
|
self.assertTrue(self.dumpLiveFiles("--db=%s" % dbPath, dumpFilePath1))
|
|
|
|
dumpFilePath2 = os.path.join(self.TMP_DIR, "dump2")
|
|
|
|
self.assertTrue(self.listLiveFilesMetadata("--sort_by_filename --db=%s" % dbPath, dumpFilePath2))
|
|
|
|
|
|
|
|
# Collect SST filename and level from dump_live_files
|
|
|
|
with open(dumpFilePath1, "r") as tmp:
|
|
|
|
data = tmp.read()
|
|
|
|
filename1 = re.findall(r".*\d+\.sst",data)[0]
|
|
|
|
level1 = re.findall(r"level:\d+",data)[0].split(':')[1]
|
|
|
|
|
|
|
|
# Collect SST filename and level from list_live_files_metadata
|
|
|
|
with open(dumpFilePath2, "r") as tmp:
|
|
|
|
data = tmp.read()
|
|
|
|
filename2 = re.findall(r".*\d+\.sst",data)[0]
|
|
|
|
level2 = re.findall(r"level \d+",data)[0].split(' ')[1]
|
|
|
|
|
|
|
|
# Assert equality between filenames and levels.
|
|
|
|
self.assertEqual(filename1,filename2)
|
|
|
|
self.assertEqual(level1,level2)
|
|
|
|
|
|
|
|
# Create multiple column families and compare the output
|
|
|
|
# of list_live_files_metadata with dump_live_files once again.
|
|
|
|
# Create new CF, and insert data:
|
|
|
|
self.assertRunOK("create_column_family mycol1", "OK")
|
|
|
|
self.assertRunOK("put --column_family=mycol1 v1 v2", "OK")
|
|
|
|
self.assertRunOK("create_column_family mycol2", "OK")
|
|
|
|
self.assertRunOK("put --column_family=mycol2 h1 h2", "OK")
|
|
|
|
self.assertRunOK("put --column_family=mycol2 h3 h4", "OK")
|
|
|
|
|
|
|
|
# Call dump_live_files and list_live_files_metadata
|
|
|
|
# and pipe the output to compare them later.
|
|
|
|
dumpFilePath3 = os.path.join(self.TMP_DIR, "dump3")
|
|
|
|
self.assertTrue(self.dumpLiveFiles("--db=%s" % dbPath, dumpFilePath3))
|
|
|
|
dumpFilePath4 = os.path.join(self.TMP_DIR, "dump4")
|
|
|
|
self.assertTrue(self.listLiveFilesMetadata("--sort_by_filename --db=%s" % dbPath, dumpFilePath4))
|
|
|
|
|
|
|
|
# dump_live_files:
|
|
|
|
# parse the output and create a map:
|
|
|
|
# [key: sstFilename]->[value:[LSM level, Column Family Name]]
|
|
|
|
referenceMap = {}
|
|
|
|
with open(dumpFilePath3, "r") as tmp:
|
|
|
|
data = tmp.read()
|
|
|
|
# Note: the following regex are contingent on what the
|
|
|
|
# dump_live_files outputs.
|
|
|
|
namesAndLevels = re.findall(r"\d+.sst level:\d+", data)
|
|
|
|
cfs = re.findall(r"(?<=column family name=)\w+", data)
|
|
|
|
# re.findall should not reorder the data.
|
|
|
|
# Therefore namesAndLevels[i] matches the data from cfs[i].
|
|
|
|
for count, nameAndLevel in enumerate(namesAndLevels):
|
|
|
|
sstFilename = re.findall(r"\d+.sst",nameAndLevel)[0]
|
|
|
|
sstLevel = re.findall(r"(?<=level:)\d+", nameAndLevel)[0]
|
|
|
|
cf = cfs[count]
|
|
|
|
referenceMap[sstFilename] = [sstLevel, cf]
|
|
|
|
|
|
|
|
# list_live_files_metadata:
|
|
|
|
# parse the output and create a map:
|
|
|
|
# [key: sstFilename]->[value:[LSM level, Column Family Name]]
|
|
|
|
testMap = {}
|
|
|
|
with open(dumpFilePath4, "r") as tmp:
|
|
|
|
data = tmp.read()
|
|
|
|
# Since for each SST file, all the information is contained
|
|
|
|
# on one line, the parsing is easy to perform and relies on
|
|
|
|
# the appearance of an "00xxx.sst" pattern.
|
|
|
|
sstLines = re.findall(r".*\d+.sst.*", data)
|
|
|
|
for line in sstLines:
|
|
|
|
sstFilename = re.findall(r"\d+.sst", line)[0]
|
|
|
|
sstLevel = re.findall(r"(?<=level )\d+",line)[0]
|
|
|
|
cf = re.findall(r"(?<=column family \')\w+(?=\')",line)[0]
|
|
|
|
testMap[sstFilename] = [sstLevel, cf]
|
|
|
|
|
|
|
|
# Compare the map obtained from dump_live_files and the map
|
|
|
|
# obtained from list_live_files_metadata. Everything should match.
|
|
|
|
self.assertEqual(referenceMap,testMap)
|
2021-06-22 20:45:14 +02:00
|
|
|
|
2015-09-08 23:23:42 +02:00
|
|
|
def getManifests(self, directory):
|
|
|
|
return glob.glob(directory + "/MANIFEST-*")
|
|
|
|
|
2016-01-06 23:19:08 +01:00
|
|
|
def getSSTFiles(self, directory):
|
|
|
|
return glob.glob(directory + "/*.sst")
|
|
|
|
|
|
|
|
def getWALFiles(self, directory):
|
|
|
|
return glob.glob(directory + "/*.log")
|
|
|
|
|
2022-02-26 08:13:11 +01:00
|
|
|
def getBlobFiles(self, directory):
|
|
|
|
return glob.glob(directory + "/*.blob")
|
|
|
|
|
2015-09-08 23:23:42 +02:00
|
|
|
def copyManifests(self, src, dest):
|
|
|
|
return 0 == run_err_null("cp " + src + " " + dest)
|
|
|
|
|
|
|
|
def testManifestDump(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testManifestDump...")
|
2015-09-08 23:23:42 +02:00
|
|
|
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
|
|
|
|
self.assertRunOK("put 1 1 --create_if_missing", "OK")
|
|
|
|
self.assertRunOK("put 2 2", "OK")
|
|
|
|
self.assertRunOK("put 3 3", "OK")
|
|
|
|
# Pattern to expect from manifest_dump.
|
|
|
|
num = "[0-9]+"
|
|
|
|
st = ".*"
|
2017-03-13 18:24:52 +01:00
|
|
|
subpat = st + " seq:" + num + ", type:" + num
|
2015-09-08 23:23:42 +02:00
|
|
|
regex = num + ":" + num + "\[" + subpat + ".." + subpat + "\]"
|
|
|
|
expected_pattern = re.compile(regex)
|
|
|
|
cmd = "manifest_dump --db=%s"
|
|
|
|
manifest_files = self.getManifests(dbPath)
|
|
|
|
self.assertTrue(len(manifest_files) == 1)
|
|
|
|
# Test with the default manifest file in dbPath.
|
|
|
|
self.assertRunOKFull(cmd % dbPath, expected_pattern,
|
|
|
|
unexpected=False, isPattern=True)
|
|
|
|
self.copyManifests(manifest_files[0], manifest_files[0] + "1")
|
|
|
|
manifest_files = self.getManifests(dbPath)
|
|
|
|
self.assertTrue(len(manifest_files) == 2)
|
|
|
|
# Test with multiple manifest files in dbPath.
|
|
|
|
self.assertRunFAILFull(cmd % dbPath)
|
|
|
|
# Running it with the copy we just created should pass.
|
|
|
|
self.assertRunOKFull((cmd + " --path=%s")
|
|
|
|
% (dbPath, manifest_files[1]),
|
|
|
|
expected_pattern, unexpected=False,
|
|
|
|
isPattern=True)
|
2016-01-06 23:19:08 +01:00
|
|
|
# Make sure that using the dump with --path will result in identical
|
|
|
|
# output as just using manifest_dump.
|
|
|
|
cmd = "dump --path=%s"
|
|
|
|
self.assertRunOKFull((cmd)
|
|
|
|
% (manifest_files[1]),
|
|
|
|
expected_pattern, unexpected=False,
|
|
|
|
isPattern=True)
|
|
|
|
|
2021-06-10 21:54:13 +02:00
|
|
|
# Check if null characters doesn't infer with output format.
|
|
|
|
self.assertRunOK("put a1 b1", "OK")
|
|
|
|
self.assertRunOK("put a2 b2", "OK")
|
|
|
|
self.assertRunOK("put --hex 0x12000DA0 0x80C0000B", "OK")
|
|
|
|
self.assertRunOK("put --hex 0x7200004f 0x80000004", "OK")
|
|
|
|
self.assertRunOK("put --hex 0xa000000a 0xf000000f", "OK")
|
|
|
|
self.assertRunOK("put a3 b3", "OK")
|
|
|
|
self.assertRunOK("put a4 b4", "OK")
|
|
|
|
|
|
|
|
# Verifies that all "levels" are printed out.
|
|
|
|
# There should be 66 mentions of levels.
|
|
|
|
expected_verbose_output = re.compile("matched")
|
|
|
|
# Test manifest_dump verbose and verify that key 0x7200004f
|
|
|
|
# is present. Note that we are forced to use grep here because
|
|
|
|
# an output with a non-terminating null character in it isn't piped
|
|
|
|
# correctly through the Python subprocess object.
|
|
|
|
# Also note that 0x72=r and 0x4f=O, hence the regex \'r.{2}O\'
|
|
|
|
# (we cannot use null character in the subprocess input either,
|
|
|
|
# so we have to use '.{2}')
|
|
|
|
cmd_verbose = "manifest_dump --verbose --db=%s | grep -aq $'\'r.{2}O\'' && echo 'matched' || echo 'not matched'" %dbPath
|
|
|
|
|
|
|
|
self.assertRunOKFull(cmd_verbose , expected_verbose_output,
|
|
|
|
unexpected=False, isPattern=True)
|
|
|
|
|
|
|
|
|
2020-12-19 16:59:08 +01:00
|
|
|
def testGetProperty(self):
|
|
|
|
print("Running testGetProperty...")
|
|
|
|
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
|
|
|
|
self.assertRunOK("put 1 1 --create_if_missing", "OK")
|
|
|
|
self.assertRunOK("put 2 2", "OK")
|
|
|
|
# A "string" property
|
|
|
|
cmd = "--db=%s get_property rocksdb.estimate-num-keys"
|
|
|
|
self.assertRunOKFull(cmd % dbPath,
|
|
|
|
"rocksdb.estimate-num-keys: 2")
|
|
|
|
# A "map" property
|
|
|
|
# FIXME: why doesn't this pick up two entries?
|
|
|
|
cmd = "--db=%s get_property rocksdb.aggregated-table-properties"
|
|
|
|
part = "rocksdb.aggregated-table-properties.num_entries: "
|
|
|
|
expected_pattern = re.compile(part)
|
|
|
|
self.assertRunOKFull(cmd % dbPath,
|
|
|
|
expected_pattern, unexpected=False,
|
|
|
|
isPattern=True)
|
|
|
|
# An invalid property
|
|
|
|
cmd = "--db=%s get_property rocksdb.this-property-does-not-exist"
|
|
|
|
self.assertRunFAILFull(cmd % dbPath)
|
|
|
|
|
2016-01-06 23:19:08 +01:00
|
|
|
def testSSTDump(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testSSTDump...")
|
2016-01-06 23:19:08 +01:00
|
|
|
|
|
|
|
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
|
|
|
|
self.assertRunOK("put sst1 sst1_val --create_if_missing", "OK")
|
2022-04-20 20:10:20 +02:00
|
|
|
self.assertRunOK("put sst2 sst2_val --enable_blob_files", "OK")
|
2016-01-06 23:19:08 +01:00
|
|
|
self.assertRunOK("get sst1", "sst1_val")
|
|
|
|
|
|
|
|
# Pattern to expect from SST dump.
|
2022-04-20 20:10:20 +02:00
|
|
|
regex = ".*Sst file format:.*\n.*\[blob ref\].*"
|
2016-01-06 23:19:08 +01:00
|
|
|
expected_pattern = re.compile(regex)
|
|
|
|
|
|
|
|
sst_files = self.getSSTFiles(dbPath)
|
|
|
|
self.assertTrue(len(sst_files) >= 1)
|
2022-04-20 20:10:20 +02:00
|
|
|
cmd = "dump --path=%s --decode_blob_index"
|
2016-01-06 23:19:08 +01:00
|
|
|
self.assertRunOKFull((cmd)
|
|
|
|
% (sst_files[0]),
|
|
|
|
expected_pattern, unexpected=False,
|
|
|
|
isPattern=True)
|
|
|
|
|
2022-04-22 05:37:07 +02:00
|
|
|
def testBlobDump(self):
|
|
|
|
print("Running testBlobDump")
|
|
|
|
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
|
|
|
|
self.assertRunOK("batchput x1 y1 --create_if_missing --enable_blob_files", "OK")
|
|
|
|
self.assertRunOK("batchput --enable_blob_files x2 y2 x3 y3 \"x4 abc\" \"y4 xyz\"", "OK")
|
|
|
|
|
|
|
|
# Pattern to expect from blob file dump.
|
|
|
|
regex = ".*Blob log header[\s\S]*Blob log footer[\s\S]*Read record[\s\S]*Summary"
|
|
|
|
expected_pattern = re.compile(regex)
|
|
|
|
blob_files = self.getBlobFiles(dbPath)
|
|
|
|
self.assertTrue(len(blob_files) >= 1)
|
|
|
|
cmd = "dump --path=%s --dump_uncompressed_blobs"
|
|
|
|
self.assertRunOKFull((cmd)
|
|
|
|
% (blob_files[0]),
|
|
|
|
expected_pattern, unexpected=False,
|
|
|
|
isPattern=True)
|
|
|
|
|
2016-01-06 23:19:08 +01:00
|
|
|
def testWALDump(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testWALDump...")
|
2016-01-06 23:19:08 +01:00
|
|
|
|
|
|
|
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
|
|
|
|
self.assertRunOK("put wal1 wal1_val --create_if_missing", "OK")
|
|
|
|
self.assertRunOK("put wal2 wal2_val", "OK")
|
|
|
|
self.assertRunOK("get wal1", "wal1_val")
|
|
|
|
|
|
|
|
# Pattern to expect from WAL dump.
|
|
|
|
regex = "^Sequence,Count,ByteSize,Physical Offset,Key\(s\).*"
|
|
|
|
expected_pattern = re.compile(regex)
|
|
|
|
|
|
|
|
wal_files = self.getWALFiles(dbPath)
|
|
|
|
self.assertTrue(len(wal_files) >= 1)
|
|
|
|
cmd = "dump --path=%s"
|
|
|
|
self.assertRunOKFull((cmd)
|
|
|
|
% (wal_files[0]),
|
|
|
|
expected_pattern, unexpected=False,
|
|
|
|
isPattern=True)
|
2015-09-08 23:23:42 +02:00
|
|
|
|
|
|
|
def testListColumnFamilies(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testListColumnFamilies...")
|
2015-09-08 23:23:42 +02:00
|
|
|
self.assertRunOK("put x1 y1 --create_if_missing", "OK")
|
2019-10-09 04:17:39 +02:00
|
|
|
cmd = "list_column_families | grep -v \"Column families\""
|
2015-09-08 23:23:42 +02:00
|
|
|
# Test on valid dbPath.
|
2019-10-09 04:17:39 +02:00
|
|
|
self.assertRunOK(cmd, "{default}")
|
2015-09-08 23:23:42 +02:00
|
|
|
# Test on empty path.
|
2019-10-09 04:17:39 +02:00
|
|
|
self.assertRunFAIL(cmd)
|
2015-09-08 23:23:42 +02:00
|
|
|
|
2016-01-23 00:46:32 +01:00
|
|
|
def testColumnFamilies(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testColumnFamilies...")
|
2018-01-29 21:43:56 +01:00
|
|
|
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME) # noqa: F841 T25377293 Grandfathered in
|
2016-01-23 00:46:32 +01:00
|
|
|
self.assertRunOK("put cf1_1 1 --create_if_missing", "OK")
|
|
|
|
self.assertRunOK("put cf1_2 2 --create_if_missing", "OK")
|
2017-04-20 19:16:13 +02:00
|
|
|
self.assertRunOK("put cf1_3 3 --try_load_options", "OK")
|
2016-01-23 00:46:32 +01:00
|
|
|
# Given non-default column family to single CF DB.
|
|
|
|
self.assertRunFAIL("get cf1_1 --column_family=two")
|
|
|
|
self.assertRunOK("create_column_family two", "OK")
|
|
|
|
self.assertRunOK("put cf2_1 1 --create_if_missing --column_family=two",
|
|
|
|
"OK")
|
|
|
|
self.assertRunOK("put cf2_2 2 --create_if_missing --column_family=two",
|
|
|
|
"OK")
|
|
|
|
self.assertRunOK("delete cf1_2", "OK")
|
|
|
|
self.assertRunOK("create_column_family three", "OK")
|
|
|
|
self.assertRunOK("delete cf2_2 --column_family=two", "OK")
|
|
|
|
self.assertRunOK(
|
|
|
|
"put cf3_1 3 --create_if_missing --column_family=three",
|
|
|
|
"OK")
|
|
|
|
self.assertRunOK("get cf1_1 --column_family=default", "1")
|
|
|
|
self.assertRunOK("dump --column_family=two",
|
|
|
|
"cf2_1 ==> 1\nKeys in range: 1")
|
2017-04-20 19:16:13 +02:00
|
|
|
self.assertRunOK("dump --column_family=two --try_load_options",
|
|
|
|
"cf2_1 ==> 1\nKeys in range: 1")
|
2016-01-23 00:46:32 +01:00
|
|
|
self.assertRunOK("dump",
|
|
|
|
"cf1_1 ==> 1\ncf1_3 ==> 3\nKeys in range: 2")
|
|
|
|
self.assertRunOK("get cf2_1 --column_family=two",
|
|
|
|
"1")
|
|
|
|
self.assertRunOK("get cf3_1 --column_family=three",
|
|
|
|
"3")
|
2019-06-27 20:08:45 +02:00
|
|
|
self.assertRunOK("drop_column_family three", "OK")
|
2016-01-23 00:46:32 +01:00
|
|
|
# non-existing column family.
|
|
|
|
self.assertRunFAIL("get cf3_1 --column_family=four")
|
2019-06-27 20:08:45 +02:00
|
|
|
self.assertRunFAIL("drop_column_family four")
|
2015-09-08 23:23:42 +02:00
|
|
|
|
2018-08-09 23:18:59 +02:00
|
|
|
def testIngestExternalSst(self):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running testIngestExternalSst...")
|
2018-08-09 23:18:59 +02:00
|
|
|
|
|
|
|
# Dump, load, write external sst and ingest it in another db
|
|
|
|
dbPath = os.path.join(self.TMP_DIR, "db1")
|
|
|
|
self.assertRunOK(
|
|
|
|
"batchput --db=%s --create_if_missing x1 y1 x2 y2 x3 y3 x4 y4"
|
|
|
|
% dbPath,
|
|
|
|
"OK")
|
|
|
|
self.assertRunOK("scan --db=%s" % dbPath,
|
|
|
|
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
|
|
|
|
dumpFilePath = os.path.join(self.TMP_DIR, "dump1")
|
|
|
|
with open(dumpFilePath, 'w') as f:
|
|
|
|
f.write("x1 ==> y10\nx2 ==> y20\nx3 ==> y30\nx4 ==> y40")
|
|
|
|
externSstPath = os.path.join(self.TMP_DIR, "extern_data1.sst")
|
|
|
|
self.assertTrue(self.writeExternSst("--create_if_missing --db=%s"
|
|
|
|
% dbPath,
|
|
|
|
dumpFilePath,
|
|
|
|
externSstPath))
|
|
|
|
# cannot ingest if allow_global_seqno is false
|
|
|
|
self.assertFalse(
|
|
|
|
self.ingestExternSst(
|
|
|
|
"--create_if_missing --allow_global_seqno=false --db=%s"
|
|
|
|
% dbPath,
|
|
|
|
externSstPath))
|
|
|
|
self.assertTrue(
|
|
|
|
self.ingestExternSst(
|
|
|
|
"--create_if_missing --allow_global_seqno --db=%s"
|
|
|
|
% dbPath,
|
|
|
|
externSstPath))
|
|
|
|
self.assertRunOKFull("scan --db=%s" % dbPath,
|
|
|
|
"x1 : y10\nx2 : y20\nx3 : y30\nx4 : y40")
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
if __name__ == "__main__":
|
|
|
|
unittest.main()
|