mirror of
https://github.com/status-im/nim-codex.git
synced 2025-02-21 23:18:48 +00:00
setting up rocksdb
This commit is contained in:
parent
2375a8f852
commit
4c31b7760e
58
rocksdb/README.md
Normal file
58
rocksdb/README.md
Normal file
@ -0,0 +1,58 @@
|
||||
# Nim-RocksDB
|
||||
|
||||
[")](https://travis-ci.org/status-im/nim-rocksdb)
|
||||
[")](https://ci.appveyor.com/project/nimbus/nim-rocksdb)
|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||

|
||||
|
||||
A Nim wrapper for [Facebook's RocksDB](https://github.com/facebook/rocksdb), a persistent key-value store for Flash and RAM Storage.
|
||||
|
||||
## Current status
|
||||
|
||||
Nim-RocksDB provides a wrapper for the low-level functions in the librocksdb c library.
|
||||
|
||||
## Requirements
|
||||
|
||||
A RocksDB installation that provides `librocksdb.so`. This means that on Debian, and possibly on other Linux distros, you need "librocksdb-dev", not just a versioned "librocksdbX.Y" package that only provides `librocksdb.so.X.Y.Z`.
|
||||
|
||||
## Usage
|
||||
|
||||
See [simple_example](examples/simple_example.nim)
|
||||
|
||||
### Static linking
|
||||
|
||||
To statically link librocksdb, you would do something like:
|
||||
|
||||
```nim
|
||||
nim c -d:rocksdb_static_linking --threads:on your_program.nim
|
||||
```
|
||||
|
||||
See the config.nims file which contains the static linking configuration which is switched on with the `rocksdb_static_linking` flag. Note that static linking is currently not supported on windows.
|
||||
|
||||
### Contribution
|
||||
|
||||
Any contribution intentionally submitted for inclusion in the work by you shall be dual licensed as above, without any
|
||||
additional terms or conditions.
|
||||
|
||||
## License
|
||||
|
||||
### Wrapper License
|
||||
|
||||
This repository is licensed and distributed under either of
|
||||
|
||||
* MIT license: [LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT
|
||||
|
||||
or
|
||||
|
||||
* Apache License, Version 2.0, ([LICENSE-APACHEv2](LICENSE-APACHEv2) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
||||
at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
### Dependency License
|
||||
|
||||
RocksDB is developed and maintained by Facebook Database Engineering Team.
|
||||
It is built on earlier work on LevelDB by Sanjay Ghemawat (sanjay@google.com)
|
||||
and Jeff Dean (jeff@google.com)
|
||||
|
||||
RocksDB is dual-licensed under both the [GPLv2](https://github.com/facebook/rocksdb/blob/master/COPYING) and Apache License, Version 2.0, ([LICENSE-APACHEv2](LICENSE-APACHEv2) or http://www.apache.org/licenses/LICENSE-2.0). You may select, at your option, one of the above-listed licenses.
|
24
rocksdb/config.nims
Normal file
24
rocksdb/config.nims
Normal file
@ -0,0 +1,24 @@
|
||||
# nim-rocksdb
|
||||
# Copyright (c) 2019-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
# begin Nimble config (version 1)
|
||||
when fileExists("nimble.paths"):
|
||||
include "nimble.paths"
|
||||
# end Nimble config
|
||||
|
||||
when defined(rocksdb_static_linking):
|
||||
# use the C++ linker profile because it's a C++ library
|
||||
when defined(macosx):
|
||||
switch("clang.linkerexe", "clang++")
|
||||
else:
|
||||
switch("gcc.linkerexe", "g++")
|
||||
|
||||
switch("dynlibOverride", "rocksdb")
|
||||
switch("dynlibOverride", "lz4")
|
||||
switch("dynlibOverride", "zstd")
|
6
rocksdb/examples/.gitignore
vendored
Normal file
6
rocksdb/examples/.gitignore
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
# ignore all executable files
|
||||
*
|
||||
!*.*
|
||||
!*/
|
||||
*.exe
|
||||
|
75
rocksdb/examples/simple_example.nim
Normal file
75
rocksdb/examples/simple_example.nim
Normal file
@ -0,0 +1,75 @@
|
||||
import ../rocksdb/lib/librocksdb, cpuinfo
|
||||
|
||||
const
|
||||
dbPath: cstring = "/tmp/rocksdb_simple_example"
|
||||
dbBackupPath: cstring = "/tmp/rocksdb_simple_example_backup"
|
||||
|
||||
proc main() =
|
||||
var
|
||||
db: ptr rocksdb_t
|
||||
be: ptr rocksdb_backup_engine_t
|
||||
options = rocksdb_options_create()
|
||||
# Optimize RocksDB. This is the easiest way to
|
||||
# get RocksDB to perform well
|
||||
let cpus = countProcessors()
|
||||
rocksdb_options_increase_parallelism(options, cpus.int32)
|
||||
# This requires snappy - disabled because rocksdb is not always compiled with
|
||||
# snappy support (for example Fedora 28, certain Ubuntu versions)
|
||||
# rocksdb_options_optimize_level_style_compaction(options, 0);
|
||||
# create the DB if it's not already present
|
||||
rocksdb_options_set_create_if_missing(options, 1);
|
||||
|
||||
# open DB
|
||||
var err: cstring # memory leak: example code does not free error string!
|
||||
db = rocksdb_open(options, dbPath, cast[cstringArray](err.addr))
|
||||
doAssert err.isNil, $err
|
||||
|
||||
# open Backup Engine that we will use for backing up our database
|
||||
be = rocksdb_backup_engine_open(options, dbBackupPath, cast[cstringArray](err.addr))
|
||||
doAssert err.isNil, $err
|
||||
|
||||
# Put key-value
|
||||
var writeOptions = rocksdb_writeoptions_create()
|
||||
let key = "key"
|
||||
let put_value = "value"
|
||||
rocksdb_put(db, writeOptions, key.cstring, key.len.csize_t, put_value.cstring,
|
||||
put_value.len.csize_t, cast[cstringArray](err.addr))
|
||||
doAssert err.isNil, $err
|
||||
|
||||
# Get value
|
||||
var readOptions = rocksdb_readoptions_create()
|
||||
var len: csize_t
|
||||
let raw_value = rocksdb_get(db, readOptions, key.cstring, key.len.csize_t, addr len,
|
||||
cast[cstringArray](err.addr)) # Important: rocksdb_get is not null-terminated
|
||||
doAssert err.isNil, $err
|
||||
|
||||
# Copy it to a regular Nim string (copyMem workaround because raw value is NOT null-terminated)
|
||||
var get_value = newString(len.int)
|
||||
copyMem(addr get_value[0], unsafeAddr raw_value[0], len.int * sizeof(char))
|
||||
|
||||
doAssert get_value == put_value
|
||||
|
||||
# create new backup in a directory specified by DBBackupPath
|
||||
rocksdb_backup_engine_create_new_backup(be, db, cast[cstringArray](err.addr))
|
||||
doAssert err.isNil, $err
|
||||
|
||||
rocksdb_close(db)
|
||||
|
||||
# If something is wrong, you might want to restore data from last backup
|
||||
var restoreOptions = rocksdb_restore_options_create()
|
||||
rocksdb_backup_engine_restore_db_from_latest_backup(be, dbPath, dbPath,
|
||||
restoreOptions, cast[cstringArray](err.addr))
|
||||
doAssert err.isNil, $err
|
||||
rocksdb_restore_options_destroy(restore_options)
|
||||
|
||||
db = rocksdb_open(options, dbPath, cast[cstringArray](err.addr))
|
||||
doAssert err.isNil, $err
|
||||
|
||||
# cleanup
|
||||
rocksdb_writeoptions_destroy(writeOptions)
|
||||
rocksdb_readoptions_destroy(readOptions)
|
||||
rocksdb_options_destroy(options)
|
||||
rocksdb_backup_engine_close(be)
|
||||
rocksdb_close(db)
|
||||
|
||||
main()
|
26
rocksdb/rocksdb.nim
Normal file
26
rocksdb/rocksdb.nim
Normal file
@ -0,0 +1,26 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
./rocksdb/[backup,
|
||||
columnfamily,
|
||||
rocksdb,
|
||||
rocksiterator,
|
||||
sstfilewriter,
|
||||
transactiondb,
|
||||
writebatch]
|
||||
|
||||
export
|
||||
backup,
|
||||
columnfamily,
|
||||
rocksdb,
|
||||
rocksiterator,
|
||||
sstfilewriter,
|
||||
transactiondb,
|
||||
writebatch
|
25
rocksdb/rocksdb.nimble
Normal file
25
rocksdb/rocksdb.nimble
Normal file
@ -0,0 +1,25 @@
|
||||
packageName = "rocksdb"
|
||||
version = "0.4.0"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "A wrapper for Facebook's RocksDB, an embeddable, persistent key-value store for fast storage"
|
||||
license = "Apache License 2.0 or GPLv2"
|
||||
skipDirs = @["examples", "tests"]
|
||||
mode = ScriptMode.Verbose
|
||||
|
||||
### Dependencies
|
||||
requires "nim >= 1.6",
|
||||
"results",
|
||||
"tempfile",
|
||||
"unittest2"
|
||||
|
||||
task clean, "Remove temporary files":
|
||||
exec "rm -rf build"
|
||||
exec "make -C vendor/rocksdb clean"
|
||||
|
||||
task test, "Run tests":
|
||||
exec "nim c -r --threads:on tests/test_all.nim"
|
||||
|
||||
task test_static, "Run tests after static linking dependencies":
|
||||
when not defined(windows):
|
||||
exec "scripts/build_static_deps.sh"
|
||||
exec "nim c -d:rocksdb_static_linking -r --threads:on tests/test_all.nim"
|
102
rocksdb/rocksdb/backup.nim
Normal file
102
rocksdb/rocksdb/backup.nim
Normal file
@ -0,0 +1,102 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
## A `BackupEngineRef` is used to create and manage backups against a RocksDB database.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
./lib/librocksdb,
|
||||
./internal/utils,
|
||||
./options/backupopts,
|
||||
./rocksdb,
|
||||
./rocksresult
|
||||
|
||||
export
|
||||
backupopts,
|
||||
rocksdb,
|
||||
rocksresult
|
||||
|
||||
type
|
||||
BackupEnginePtr* = ptr rocksdb_backup_engine_t
|
||||
|
||||
BackupEngineRef* = ref object
|
||||
cPtr: BackupEnginePtr
|
||||
path: string
|
||||
backupOpts: BackupEngineOptionsRef
|
||||
|
||||
proc openBackupEngine*(
|
||||
path: string,
|
||||
backupOpts = defaultBackupEngineOptions()): RocksDBResult[BackupEngineRef] =
|
||||
## Create a new backup engine. The `path` parameter is the path of the backup
|
||||
## directory. Note that the same directory should not be used for both backups
|
||||
## and the database itself.
|
||||
|
||||
var errors: cstring
|
||||
let backupEnginePtr = rocksdb_backup_engine_open(
|
||||
backupOpts.cPtr,
|
||||
path.cstring,
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
let engine = BackupEngineRef(
|
||||
cPtr: backupEnginePtr,
|
||||
path: path,
|
||||
backupOpts: backupOpts)
|
||||
ok(engine)
|
||||
|
||||
proc isClosed*(backupEngine: BackupEngineRef): bool {.inline.} =
|
||||
## Returns `true` if the `BackupEngineRef` has been closed.
|
||||
backupEngine.cPtr.isNil()
|
||||
|
||||
proc createNewBackup*(
|
||||
backupEngine: BackupEngineRef,
|
||||
db: RocksDbRef): RocksDBResult[void] =
|
||||
## Create a new backup of the database.
|
||||
doAssert not backupEngine.isClosed()
|
||||
doAssert not db.isClosed()
|
||||
|
||||
var errors: cstring
|
||||
rocksdb_backup_engine_create_new_backup(
|
||||
backupEngine.cPtr,
|
||||
db.cPtr,
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
ok()
|
||||
|
||||
proc restoreDbFromLatestBackup*(
|
||||
backupEngine: BackupEngineRef,
|
||||
dbDir: string,
|
||||
walDir = dbDir,
|
||||
keepLogFiles = false): RocksDBResult[void] =
|
||||
## Restore the database from the latest backup.
|
||||
doAssert not backupEngine.isClosed()
|
||||
|
||||
let restoreOptions = rocksdb_restore_options_create()
|
||||
rocksdb_restore_options_set_keep_log_files(restoreOptions, keepLogFiles.cint)
|
||||
|
||||
var errors: cstring
|
||||
rocksdb_backup_engine_restore_db_from_latest_backup(
|
||||
backupEngine.cPtr,
|
||||
dbDir.cstring,
|
||||
walDir.cstring,
|
||||
restoreOptions,
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
rocksdb_restore_options_destroy(restoreOptions)
|
||||
|
||||
ok()
|
||||
|
||||
proc close*(backupEngine: BackupEngineRef) =
|
||||
## Close the `BackupEngineRef`.
|
||||
if not backupEngine.isClosed():
|
||||
rocksdb_backup_engine_close(backupEngine.cPtr)
|
||||
backupEngine.cPtr = nil
|
113
rocksdb/rocksdb/columnfamily.nim
Normal file
113
rocksdb/rocksdb/columnfamily.nim
Normal file
@ -0,0 +1,113 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
## `ColFamilyReadOnly` and `ColFamilyReadWrite` types both hold a reference to a
|
||||
## `RocksDbReadOnlyRef` or `RocksDbReadWriteRef` respectively. They are convenience
|
||||
## types which enable writing to a specific column family without having to specify the
|
||||
## column family in each call.
|
||||
##
|
||||
## These column family types do not own the underlying `RocksDbRef` and therefore
|
||||
## to close the database, simply call `columnFamily.db.close()` which will close
|
||||
## the underlying `RocksDbRef`. Note that doing so will also impact any other column
|
||||
## families that hold a reference to the same `RocksDbRef`.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
./rocksdb
|
||||
|
||||
export rocksdb
|
||||
|
||||
type
|
||||
ColFamilyReadOnly* = object
|
||||
db: RocksDbReadOnlyRef
|
||||
name: string
|
||||
|
||||
ColFamilyReadWrite* = object
|
||||
db: RocksDbReadWriteRef
|
||||
name: string
|
||||
|
||||
proc withColFamily*(
|
||||
db: RocksDbReadOnlyRef,
|
||||
name: string): RocksDBResult[ColFamilyReadOnly] =
|
||||
## Creates a new `ColFamilyReadOnly` from the given `RocksDbReadOnlyRef` and
|
||||
## column family name.
|
||||
|
||||
# validate that the column family exists
|
||||
discard db.keyExists(@[0.byte], name).valueOr:
|
||||
return err(error)
|
||||
|
||||
ok(ColFamilyReadOnly(db: db, name: name))
|
||||
|
||||
proc withColFamily*(
|
||||
db: RocksDbReadWriteRef,
|
||||
name: string): RocksDBResult[ColFamilyReadWrite] =
|
||||
## Create a new `ColFamilyReadWrite` from the given `RocksDbReadWriteRef` and
|
||||
## column family name.
|
||||
|
||||
# validate that the column family exists
|
||||
discard db.keyExists(@[0.byte], name).valueOr:
|
||||
return err(error)
|
||||
|
||||
ok(ColFamilyReadWrite(db: db, name: name))
|
||||
|
||||
proc db*(cf: ColFamilyReadOnly | ColFamilyReadWrite): auto {.inline.} =
|
||||
## Returns the underlying `RocksDbReadOnlyRef` or `RocksDbReadWriteRef`.
|
||||
cf.db
|
||||
|
||||
proc name*(cf: ColFamilyReadOnly | ColFamilyReadWrite): string {.inline.} =
|
||||
## Returns the name of the column family.
|
||||
cf.name
|
||||
|
||||
proc get*(
|
||||
cf: ColFamilyReadOnly | ColFamilyReadWrite,
|
||||
key: openArray[byte],
|
||||
onData: DataProc): RocksDBResult[bool] {.inline.} =
|
||||
## Gets the value of the given key from the column family using the `onData`
|
||||
## callback.
|
||||
cf.db.get(key, onData, cf.name)
|
||||
|
||||
proc get*(
|
||||
cf: ColFamilyReadOnly | ColFamilyReadWrite,
|
||||
key: openArray[byte]): RocksDBResult[seq[byte]] {.inline.} =
|
||||
## Gets the value of the given key from the column family.
|
||||
cf.db.get(key, cf.name)
|
||||
|
||||
proc put*(
|
||||
cf: ColFamilyReadWrite,
|
||||
key, val: openArray[byte]): RocksDBResult[void] {.inline.} =
|
||||
## Puts a value for the given key into the column family.
|
||||
cf.db.put(key, val, cf.name)
|
||||
|
||||
proc keyExists*(
|
||||
cf: ColFamilyReadOnly | ColFamilyReadWrite,
|
||||
key: openArray[byte]): RocksDBResult[bool] {.inline.} =
|
||||
## Checks if the given key exists in the column family.
|
||||
cf.db.keyExists(key, cf.name)
|
||||
|
||||
proc delete*(
|
||||
cf: ColFamilyReadWrite,
|
||||
key: openArray[byte]): RocksDBResult[void] {.inline.} =
|
||||
## Deletes the given key from the column family.
|
||||
cf.db.delete(key, cf.name)
|
||||
|
||||
proc openIterator*(
|
||||
cf: ColFamilyReadOnly | ColFamilyReadWrite): RocksDBResult[RocksIteratorRef] {.inline.} =
|
||||
## Opens an `RocksIteratorRef` for the given column family.
|
||||
cf.db.openIterator(cf.name)
|
||||
|
||||
proc openWriteBatch*(cf: ColFamilyReadWrite): WriteBatchRef {.inline.} =
|
||||
## Opens a `WriteBatchRef` for the given column family.
|
||||
cf.db.openWriteBatch(cf.name)
|
||||
|
||||
proc write*(
|
||||
cf: ColFamilyReadWrite,
|
||||
updates: WriteBatchRef): RocksDBResult[void] {.inline.} =
|
||||
## Writes the updates in the `WriteBatchRef` to the column family.
|
||||
cf.db.write(updates)
|
44
rocksdb/rocksdb/columnfamily/cfdescriptor.nim
Normal file
44
rocksdb/rocksdb/columnfamily/cfdescriptor.nim
Normal file
@ -0,0 +1,44 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
../internal/utils,
|
||||
./cfopts
|
||||
|
||||
export cfopts
|
||||
|
||||
type
|
||||
ColFamilyDescriptor* = object
|
||||
name: string
|
||||
options: ColFamilyOptionsRef
|
||||
|
||||
proc initColFamilyDescriptor*(
|
||||
name: string,
|
||||
options = defaultColFamilyOptions()): ColFamilyDescriptor =
|
||||
ColFamilyDescriptor(name: name, options: options)
|
||||
|
||||
proc name*(descriptor: ColFamilyDescriptor): string {.inline.} =
|
||||
descriptor.name
|
||||
|
||||
proc options*(descriptor: ColFamilyDescriptor): ColFamilyOptionsRef {.inline.} =
|
||||
descriptor.options
|
||||
|
||||
proc isDefault*(descriptor: ColFamilyDescriptor): bool {.inline.} =
|
||||
descriptor.name == DEFAULT_COLUMN_FAMILY_NAME
|
||||
|
||||
proc defaultColFamilyDescriptor*(): ColFamilyDescriptor {.inline.} =
|
||||
initColFamilyDescriptor(DEFAULT_COLUMN_FAMILY_NAME)
|
||||
|
||||
proc isClosed*(descriptor: ColFamilyDescriptor): bool {.inline.} =
|
||||
descriptor.options.isClosed()
|
||||
|
||||
proc close*(descriptor: ColFamilyDescriptor) {.inline.} =
|
||||
descriptor.options.close()
|
50
rocksdb/rocksdb/columnfamily/cfhandle.nim
Normal file
50
rocksdb/rocksdb/columnfamily/cfhandle.nim
Normal file
@ -0,0 +1,50 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
../lib/librocksdb
|
||||
|
||||
type
|
||||
ColFamilyHandlePtr* = ptr rocksdb_column_family_handle_t
|
||||
|
||||
ColFamilyHandleRef* = ref object
|
||||
cPtr: ColFamilyHandlePtr
|
||||
|
||||
proc newColFamilyHandle*(cPtr: ColFamilyHandlePtr): ColFamilyHandleRef =
|
||||
ColFamilyHandleRef(cPtr: cPtr)
|
||||
|
||||
proc isClosed*(handle: ColFamilyHandleRef): bool {.inline.} =
|
||||
handle.cPtr.isNil()
|
||||
|
||||
proc cPtr*(handle: ColFamilyHandleRef): ColFamilyHandlePtr =
|
||||
doAssert not handle.isClosed()
|
||||
handle.cPtr
|
||||
|
||||
# TODO: These procs below will not work unless using the latest version of rocksdb
|
||||
# Currently, when installing librocksdb-dev on linux the RocksDb version used is 6.11.4
|
||||
# Need to complete this task: https://github.com/status-im/nim-rocksdb/issues/10
|
||||
|
||||
# proc getId*(handle: ColFamilyHandleRef): int =
|
||||
# doAssert not handle.isClosed()
|
||||
# rocksdb_column_family_handle_get_id(handle.cPtr).int
|
||||
|
||||
# proc getName*(handle: ColFamilyHandleRef): string =
|
||||
# doAssert not handle.isClosed()
|
||||
# var nameLen: csize_t
|
||||
# $rocksdb_column_family_handle_get_name(handle.cPtr, nameLen.addr)
|
||||
|
||||
# proc isDefault*(handle: ColFamilyHandleRef): bool {.inline.} =
|
||||
# handle.getName() == DEFAULT_COLUMN_FAMILY_NAME
|
||||
|
||||
proc close*(handle: ColFamilyHandleRef) =
|
||||
if not handle.isClosed():
|
||||
rocksdb_column_family_handle_destroy(handle.cPtr)
|
||||
handle.cPtr = nil
|
56
rocksdb/rocksdb/columnfamily/cfopts.nim
Normal file
56
rocksdb/rocksdb/columnfamily/cfopts.nim
Normal file
@ -0,0 +1,56 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
../lib/librocksdb
|
||||
|
||||
type
|
||||
ColFamilyOptionsPtr* = ptr rocksdb_options_t
|
||||
|
||||
ColFamilyOptionsRef* = ref object
|
||||
cPtr: ColFamilyOptionsPtr
|
||||
|
||||
proc newColFamilyOptions*(): ColFamilyOptionsRef =
|
||||
ColFamilyOptionsRef(cPtr: rocksdb_options_create())
|
||||
|
||||
proc isClosed*(cfOpts: ColFamilyOptionsRef): bool {.inline.} =
|
||||
cfOpts.cPtr.isNil()
|
||||
|
||||
proc cPtr*(cfOpts: ColFamilyOptionsRef): ColFamilyOptionsPtr =
|
||||
doAssert not cfOpts.isClosed()
|
||||
cfOpts.cPtr
|
||||
|
||||
proc setCreateMissingColumnFamilies*(cfOpts: ColFamilyOptionsRef, flag: bool) =
|
||||
doAssert not cfOpts.isClosed()
|
||||
rocksdb_options_set_create_missing_column_families(cfOpts.cPtr, flag.uint8)
|
||||
|
||||
proc defaultColFamilyOptions*(): ColFamilyOptionsRef =
|
||||
let opts = newColFamilyOptions()
|
||||
|
||||
# rocksdb_options_set_compression(opts.cPtr, rocksdb_lz4_compression)
|
||||
# rocksdb_options_set_bottommost_compression(opts.cPtr, rocksdb_zstd_compression)
|
||||
|
||||
# Enable creating column families if they do not exist
|
||||
opts.setCreateMissingColumnFamilies(true)
|
||||
return opts
|
||||
|
||||
# TODO: These procs below will not work unless using the latest version of rocksdb
|
||||
# Currently, when installing librocksdb-dev on linux the RocksDb version used is 6.11.4
|
||||
# Need to complete this task: https://github.com/status-im/nim-rocksdb/issues/10
|
||||
|
||||
# proc getCreateMissingColumnFamilies*(cfOpts: ColFamilyOptionsRef): bool =
|
||||
# doAssert not cfOpts.isClosed()
|
||||
# rocksdb_options_get_create_missing_column_families(cfOpts.cPtr).bool
|
||||
|
||||
proc close*(cfOpts: ColFamilyOptionsRef) =
|
||||
if not cfOpts.isClosed():
|
||||
rocksdb_options_destroy(cfOpts.cPtr)
|
||||
cfOpts.cPtr = nil
|
44
rocksdb/rocksdb/internal/cftable.nim
Normal file
44
rocksdb/rocksdb/internal/cftable.nim
Normal file
@ -0,0 +1,44 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
../columnfamily/cfhandle
|
||||
|
||||
export
|
||||
cfhandle
|
||||
|
||||
type
|
||||
ColFamilyTableRef* = ref object
|
||||
columnFamilies: TableRef[string, ColFamilyHandleRef]
|
||||
|
||||
proc newColFamilyTable*(
|
||||
names: openArray[string],
|
||||
handles: openArray[ColFamilyHandlePtr]): ColFamilyTableRef =
|
||||
doAssert names.len() == handles.len()
|
||||
|
||||
let cfTable = newTable[string, ColFamilyHandleRef]()
|
||||
for i, name in names:
|
||||
cfTable[name] = newColFamilyHandle(handles[i])
|
||||
|
||||
ColFamilyTableRef(columnFamilies: cfTable)
|
||||
|
||||
proc isClosed*(table: ColFamilyTableRef): bool {.inline.} =
|
||||
table.columnFamilies.isNil()
|
||||
|
||||
proc get*(table: ColFamilyTableRef, name: string): ColFamilyHandleRef =
|
||||
table.columnFamilies.getOrDefault(name)
|
||||
|
||||
proc close*(table: ColFamilyTableRef) =
|
||||
if not table.isClosed():
|
||||
for _, v in table.columnFamilies.mpairs():
|
||||
v.close()
|
||||
table.columnFamilies = nil
|
27
rocksdb/rocksdb/internal/utils.nim
Normal file
27
rocksdb/rocksdb/internal/utils.nim
Normal file
@ -0,0 +1,27 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/locks,
|
||||
../lib/librocksdb
|
||||
|
||||
const DEFAULT_COLUMN_FAMILY_NAME* = "default"
|
||||
|
||||
proc createLock*(): Lock =
|
||||
var lock = Lock()
|
||||
initLock(lock)
|
||||
lock
|
||||
|
||||
template bailOnErrors*(errors: cstring): auto =
|
||||
if not errors.isNil:
|
||||
let res = err($(errors))
|
||||
rocksdb_free(errors)
|
||||
return res
|
3042
rocksdb/rocksdb/lib/headers/c.h
Normal file
3042
rocksdb/rocksdb/lib/headers/c.h
Normal file
File diff suppressed because it is too large
Load Diff
3213
rocksdb/rocksdb/lib/librocksdb.nim
Normal file
3213
rocksdb/rocksdb/lib/librocksdb.nim
Normal file
File diff suppressed because it is too large
Load Diff
43
rocksdb/rocksdb/options/backupopts.nim
Normal file
43
rocksdb/rocksdb/options/backupopts.nim
Normal file
@ -0,0 +1,43 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
../lib/librocksdb
|
||||
|
||||
type
|
||||
BackupEngineOptionsPtr* = ptr rocksdb_options_t
|
||||
|
||||
BackupEngineOptionsRef* = ref object
|
||||
cPtr: BackupEngineOptionsPtr
|
||||
|
||||
proc newBackupEngineOptions*(): BackupEngineOptionsRef =
|
||||
BackupEngineOptionsRef(cPtr: rocksdb_options_create())
|
||||
|
||||
proc isClosed*(engineOpts: BackupEngineOptionsRef): bool {.inline.} =
|
||||
engineOpts.cPtr.isNil()
|
||||
|
||||
proc cPtr*(engineOpts: BackupEngineOptionsRef): BackupEngineOptionsPtr =
|
||||
doAssert not engineOpts.isClosed()
|
||||
engineOpts.cPtr
|
||||
|
||||
# TODO: Add setters and getters for backup options properties.
|
||||
|
||||
proc defaultBackupEngineOptions*(): BackupEngineOptionsRef {.inline.} =
|
||||
let opts = newBackupEngineOptions()
|
||||
# rocksdb_options_set_compression(opts.cPtr, rocksdb_lz4_compression)
|
||||
# rocksdb_options_set_bottommost_compression(opts.cPtr, rocksdb_zstd_compression)
|
||||
opts
|
||||
|
||||
|
||||
proc close*(engineOpts: BackupEngineOptionsRef) =
|
||||
if not engineOpts.isClosed():
|
||||
rocksdb_options_destroy(engineOpts.cPtr)
|
||||
engineOpts.cPtr = nil
|
88
rocksdb/rocksdb/options/dbopts.nim
Normal file
88
rocksdb/rocksdb/options/dbopts.nim
Normal file
@ -0,0 +1,88 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/cpuinfo,
|
||||
../lib/librocksdb
|
||||
|
||||
type
|
||||
DbOptionsPtr* = ptr rocksdb_options_t
|
||||
|
||||
DbOptionsRef* = ref object
|
||||
cPtr: DbOptionsPtr
|
||||
|
||||
proc newDbOptions*(): DbOptionsRef =
|
||||
DbOptionsRef(cPtr: rocksdb_options_create())
|
||||
|
||||
proc isClosed*(dbOpts: DbOptionsRef): bool {.inline.} =
|
||||
dbOpts.cPtr.isNil()
|
||||
|
||||
proc cPtr*(dbOpts: DbOptionsRef): DbOptionsPtr =
|
||||
doAssert not dbOpts.isClosed()
|
||||
dbOpts.cPtr
|
||||
|
||||
proc setIncreaseParallelism*(dbOpts: DbOptionsRef, totalThreads: int) =
|
||||
doAssert totalThreads > 0
|
||||
doAssert not dbOpts.isClosed()
|
||||
rocksdb_options_increase_parallelism(dbOpts.cPtr, totalThreads.cint)
|
||||
|
||||
proc setCreateIfMissing*(dbOpts: DbOptionsRef, flag: bool) =
|
||||
doAssert not dbOpts.isClosed()
|
||||
rocksdb_options_set_create_if_missing(dbOpts.cPtr, flag.uint8)
|
||||
|
||||
proc setMaxOpenFiles*(dbOpts: DbOptionsRef, maxOpenFiles: int) =
|
||||
doAssert maxOpenFiles >= -1
|
||||
doAssert not dbOpts.isClosed()
|
||||
rocksdb_options_set_max_open_files(dbOpts.cPtr, maxOpenFiles.cint)
|
||||
|
||||
proc setCreateMissingColumnFamilies*(dbOpts: DbOptionsRef, flag: bool) =
|
||||
doAssert not dbOpts.isClosed()
|
||||
rocksdb_options_set_create_missing_column_families(dbOpts.cPtr, flag.uint8)
|
||||
|
||||
proc defaultDbOptions*(): DbOptionsRef =
|
||||
let opts: DbOptionsRef = newDbOptions()
|
||||
|
||||
# rocksdb_options_set_compression(opts.cPtr, rocksdb_lz4_compression)
|
||||
# rocksdb_options_set_bottommost_compression(opts.cPtr, rocksdb_zstd_compression)
|
||||
|
||||
# Optimize RocksDB. This is the easiest way to get RocksDB to perform well:
|
||||
opts.setIncreaseParallelism(countProcessors())
|
||||
# This requires snappy - disabled because rocksdb is not always compiled with
|
||||
# snappy support (for example Fedora 28, certain Ubuntu versions)
|
||||
# rocksdb_options_optimize_level_style_compaction(options, 0);
|
||||
opts.setCreateIfMissing(true)
|
||||
# default set to keep all files open (-1), allow setting it to a specific
|
||||
# value, e.g. in case the application limit would be reached.
|
||||
opts.setMaxOpenFiles(-1)
|
||||
# Enable creating column families if they do not exist
|
||||
opts.setCreateMissingColumnFamilies(true)
|
||||
return opts
|
||||
|
||||
# TODO: These procs below will not work unless using the latest version of rocksdb
|
||||
# Currently, when installing librocksdb-dev on linux the RocksDb version used is 6.11.4
|
||||
# Need to complete this task: https://github.com/status-im/nim-rocksdb/issues/10
|
||||
|
||||
# proc getCreateIfMissing*(dbOpts: DbOptionsRef): bool =
|
||||
# doAssert not dbOpts.isClosed()
|
||||
# rocksdb_options_get_create_if_missing(dbOpts.cPtr).bool
|
||||
|
||||
# proc getMaxOpenFiles*(dbOpts: DbOptionsRef): int =
|
||||
# doAssert not dbOpts.isClosed()
|
||||
# rocksdb_options_get_max_open_files(dbOpts.cPtr).int
|
||||
|
||||
# proc getCreateMissingColumnFamilies*(dbOpts: DbOptionsRef): bool =
|
||||
# doAssert not dbOpts.isClosed()
|
||||
# rocksdb_options_get_create_missing_column_families(dbOpts.cPtr).bool
|
||||
|
||||
proc close*(dbOpts: DbOptionsRef) =
|
||||
if not dbOpts.isClosed():
|
||||
rocksdb_options_destroy(dbOpts.cPtr)
|
||||
dbOpts.cPtr = nil
|
40
rocksdb/rocksdb/options/readopts.nim
Normal file
40
rocksdb/rocksdb/options/readopts.nim
Normal file
@ -0,0 +1,40 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
../lib/librocksdb
|
||||
|
||||
type
|
||||
ReadOptionsPtr* = ptr rocksdb_readoptions_t
|
||||
|
||||
ReadOptionsRef* = ref object
|
||||
cPtr: ReadOptionsPtr
|
||||
|
||||
proc newReadOptions*(): ReadOptionsRef =
|
||||
ReadOptionsRef(cPtr: rocksdb_readoptions_create())
|
||||
|
||||
proc isClosed*(readOpts: ReadOptionsRef): bool {.inline.} =
|
||||
readOpts.cPtr.isNil()
|
||||
|
||||
proc cPtr*(readOpts: ReadOptionsRef): ReadOptionsPtr =
|
||||
doAssert not readOpts.isClosed()
|
||||
readOpts.cPtr
|
||||
|
||||
# TODO: Add setters and getters for read options properties.
|
||||
|
||||
proc defaultReadOptions*(): ReadOptionsRef {.inline.} =
|
||||
newReadOptions()
|
||||
# TODO: set prefered defaults
|
||||
|
||||
proc close*(readOpts: ReadOptionsRef) =
|
||||
if not readOpts.isClosed():
|
||||
rocksdb_readoptions_destroy(readOpts.cPtr)
|
||||
readOpts.cPtr = nil
|
40
rocksdb/rocksdb/options/writeopts.nim
Normal file
40
rocksdb/rocksdb/options/writeopts.nim
Normal file
@ -0,0 +1,40 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
../lib/librocksdb
|
||||
|
||||
type
|
||||
WriteOptionsPtr* = ptr rocksdb_writeoptions_t
|
||||
|
||||
WriteOptionsRef* = ref object
|
||||
cPtr: WriteOptionsPtr
|
||||
|
||||
proc newWriteOptions*(): WriteOptionsRef =
|
||||
WriteOptionsRef(cPtr: rocksdb_writeoptions_create())
|
||||
|
||||
proc isClosed*(writeOpts: WriteOptionsRef): bool {.inline.} =
|
||||
writeOpts.cPtr.isNil()
|
||||
|
||||
proc cPtr*(writeOpts: WriteOptionsRef): WriteOptionsPtr =
|
||||
doAssert not writeOpts.isClosed()
|
||||
writeOpts.cPtr
|
||||
|
||||
# TODO: Add setters and getters for write options properties.
|
||||
|
||||
proc defaultWriteOptions*(): WriteOptionsRef {.inline.} =
|
||||
newWriteOptions()
|
||||
# TODO: set prefered defaults
|
||||
|
||||
proc close*(writeOpts: WriteOptionsRef) =
|
||||
if not writeOpts.isClosed():
|
||||
rocksdb_writeoptions_destroy(writeOpts.cPtr)
|
||||
writeOpts.cPtr = nil
|
372
rocksdb/rocksdb/rocksdb.nim
Normal file
372
rocksdb/rocksdb/rocksdb.nim
Normal file
@ -0,0 +1,372 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
## A `RocksDBRef` represents a reference to a RocksDB instance. It can be opened
|
||||
## in read-only or read-write mode in which case a `RocksDbReadOnlyRef` or
|
||||
## `RocksDbReadWriteRef` will be returned respectively. The `RocksDbReadOnlyRef`
|
||||
## type doesn't support any of the write operations such as `put`, `delete` or
|
||||
## `write`.
|
||||
##
|
||||
## Many of the operations on these types can potentially fail for various reasons,
|
||||
## in which case a `RocksDbResult` containing an error will be returned.
|
||||
##
|
||||
## The types wrap and hold a handle to a c pointer which needs to be freed
|
||||
## so `close` should be called to prevent a memory leak after use.
|
||||
##
|
||||
## Most of the procs below support passing in the name of the column family
|
||||
## which should be used for the operation. The default column family will be
|
||||
## used if none is provided.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sequtils, locks],
|
||||
./lib/librocksdb,
|
||||
./options/[dbopts, readopts, writeopts],
|
||||
./columnfamily/[cfopts, cfdescriptor, cfhandle],
|
||||
./internal/[cftable, utils],
|
||||
./rocksiterator,
|
||||
./rocksresult,
|
||||
./writebatch
|
||||
|
||||
export
|
||||
rocksresult,
|
||||
dbopts,
|
||||
readopts,
|
||||
writeopts,
|
||||
cfdescriptor,
|
||||
rocksiterator,
|
||||
writebatch
|
||||
|
||||
type
|
||||
RocksDbPtr* = ptr rocksdb_t
|
||||
IngestExternalFilesOptionsPtr = ptr rocksdb_ingestexternalfileoptions_t
|
||||
|
||||
RocksDbRef* = ref object of RootObj
|
||||
lock: Lock
|
||||
cPtr: RocksDbPtr
|
||||
path: string
|
||||
dbOpts: DbOptionsRef
|
||||
readOpts: ReadOptionsRef
|
||||
defaultCfName: string
|
||||
cfTable: ColFamilyTableRef
|
||||
|
||||
RocksDbReadOnlyRef* = ref object of RocksDbRef
|
||||
|
||||
RocksDbReadWriteRef* = ref object of RocksDbRef
|
||||
writeOpts: WriteOptionsRef
|
||||
ingestOptsPtr: IngestExternalFilesOptionsPtr
|
||||
|
||||
proc openRocksDb*(
|
||||
path: string,
|
||||
dbOpts = defaultDbOptions(),
|
||||
readOpts = defaultReadOptions(),
|
||||
writeOpts = defaultWriteOptions(),
|
||||
columnFamilies: openArray[ColFamilyDescriptor] = []): RocksDBResult[RocksDbReadWriteRef] =
|
||||
## Open a RocksDB instance in read-write mode. If `columnFamilies` is empty
|
||||
## then it will open the default column family. If `dbOpts`, `readOpts`, or
|
||||
## `writeOpts` are not supplied then the default options will be used.
|
||||
## By default, column families will be created if they don't yet exist.
|
||||
## All existing column families must be specified if the database has
|
||||
## previously created any column families.
|
||||
|
||||
var cfs = columnFamilies.toSeq()
|
||||
if DEFAULT_COLUMN_FAMILY_NAME notin columnFamilies.mapIt(it.name()):
|
||||
cfs.add(defaultColFamilyDescriptor())
|
||||
|
||||
var
|
||||
cfNames = cfs.mapIt(it.name().cstring)
|
||||
cfOpts = cfs.mapIt(it.options.cPtr)
|
||||
cfHandles = newSeq[ColFamilyHandlePtr](cfs.len)
|
||||
errors: cstring
|
||||
let rocksDbPtr = rocksdb_open_column_families(
|
||||
dbOpts.cPtr,
|
||||
path.cstring,
|
||||
cfNames.len().cint,
|
||||
cast[cstringArray](cfNames[0].addr),
|
||||
cfOpts[0].addr,
|
||||
cfHandles[0].addr,
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
let db = RocksDbReadWriteRef(
|
||||
lock: createLock(),
|
||||
cPtr: rocksDbPtr,
|
||||
path: path,
|
||||
dbOpts: dbOpts,
|
||||
readOpts: readOpts,
|
||||
writeOpts: writeOpts,
|
||||
ingestOptsPtr: rocksdb_ingestexternalfileoptions_create(),
|
||||
defaultCfName: DEFAULT_COLUMN_FAMILY_NAME,
|
||||
cfTable: newColFamilyTable(cfNames.mapIt($it), cfHandles))
|
||||
ok(db)
|
||||
|
||||
proc openRocksDbReadOnly*(
|
||||
path: string,
|
||||
dbOpts = defaultDbOptions(),
|
||||
readOpts = defaultReadOptions(),
|
||||
columnFamilies: openArray[ColFamilyDescriptor] = [],
|
||||
errorIfWalFileExists = false): RocksDBResult[RocksDbReadOnlyRef] =
|
||||
## Open a RocksDB instance in read-only mode. If `columnFamilies` is empty
|
||||
## then it will open the default column family. If `dbOpts` or `readOpts` are
|
||||
## not supplied then the default options will be used. By default, column
|
||||
## families will be created if they don't yet exist. If the database already
|
||||
## contains any column families, then all or a subset of the existing column
|
||||
## families can be opened for reading.
|
||||
|
||||
var cfs = columnFamilies.toSeq()
|
||||
if DEFAULT_COLUMN_FAMILY_NAME notin columnFamilies.mapIt(it.name()):
|
||||
cfs.add(defaultColFamilyDescriptor())
|
||||
|
||||
var
|
||||
cfNames = cfs.mapIt(it.name().cstring)
|
||||
cfOpts = cfs.mapIt(it.options.cPtr)
|
||||
cfHandles = newSeq[ColFamilyHandlePtr](cfs.len)
|
||||
errors: cstring
|
||||
let rocksDbPtr = rocksdb_open_for_read_only_column_families(
|
||||
dbOpts.cPtr,
|
||||
path.cstring,
|
||||
cfNames.len().cint,
|
||||
cast[cstringArray](cfNames[0].addr),
|
||||
cfOpts[0].addr,
|
||||
cfHandles[0].addr,
|
||||
errorIfWalFileExists.uint8,
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
let db = RocksDbReadOnlyRef(
|
||||
lock: createLock(),
|
||||
cPtr: rocksDbPtr,
|
||||
path: path,
|
||||
dbOpts: dbOpts,
|
||||
readOpts: readOpts,
|
||||
defaultCfName: DEFAULT_COLUMN_FAMILY_NAME,
|
||||
cfTable: newColFamilyTable(cfNames.mapIt($it), cfHandles))
|
||||
ok(db)
|
||||
|
||||
proc isClosed*(db: RocksDbRef): bool {.inline.} =
|
||||
## Returns `true` if the database has been closed and `false` otherwise.
|
||||
db.cPtr.isNil()
|
||||
|
||||
proc cPtr*(db: RocksDbRef): RocksDbPtr {.inline.} =
|
||||
## Get the underlying database pointer.
|
||||
doAssert not db.isClosed()
|
||||
db.cPtr
|
||||
|
||||
proc get*(
|
||||
db: RocksDbRef,
|
||||
key: openArray[byte],
|
||||
onData: DataProc,
|
||||
columnFamily = db.defaultCfName): RocksDBResult[bool] =
|
||||
## Get the value for the given key from the specified column family.
|
||||
## If the value does not exist, `false` will be returned in the result
|
||||
## and `onData` will not be called. If the value does exist, `true` will be
|
||||
## returned in the result and `onData` will be called with the value.
|
||||
## The `onData` callback reduces the number of copies and therefore should be
|
||||
## preferred if performance is required.
|
||||
|
||||
if key.len() == 0:
|
||||
return err("rocksdb: key is empty")
|
||||
|
||||
let cfHandle = db.cfTable.get(columnFamily)
|
||||
if cfHandle.isNil():
|
||||
return err("rocksdb: unknown column family")
|
||||
|
||||
var
|
||||
len: csize_t
|
||||
errors: cstring
|
||||
let data = rocksdb_get_cf(
|
||||
db.cPtr,
|
||||
db.readOpts.cPtr,
|
||||
cfHandle.cPtr,
|
||||
cast[cstring](unsafeAddr key[0]),
|
||||
csize_t(key.len),
|
||||
len.addr,
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
if data.isNil():
|
||||
doAssert len == 0
|
||||
ok(false)
|
||||
else:
|
||||
onData(toOpenArrayByte(data, 0, len.int - 1))
|
||||
rocksdb_free(data)
|
||||
ok(true)
|
||||
|
||||
proc get*(
|
||||
db: RocksDbRef,
|
||||
key: openArray[byte],
|
||||
columnFamily = db.defaultCfName): RocksDBResult[seq[byte]] =
|
||||
## Get the value for the given key from the specified column family.
|
||||
## If the value does not exist, an empty error will be returned in the result.
|
||||
## If the value does exist, the value will be returned in the result.
|
||||
|
||||
var dataRes: RocksDBResult[seq[byte]]
|
||||
proc onData(data: openArray[byte]) = dataRes.ok(@data)
|
||||
|
||||
let res = db.get(key, onData, columnFamily)
|
||||
if res.isOk():
|
||||
return dataRes
|
||||
|
||||
dataRes.err(res.error())
|
||||
|
||||
proc put*(
|
||||
db: RocksDbReadWriteRef,
|
||||
key, val: openArray[byte],
|
||||
columnFamily = db.defaultCfName): RocksDBResult[void] =
|
||||
## Put the value for the given key into the specified column family.
|
||||
|
||||
if key.len() == 0:
|
||||
return err("rocksdb: key is empty")
|
||||
|
||||
let cfHandle = db.cfTable.get(columnFamily)
|
||||
if cfHandle.isNil():
|
||||
return err("rocksdb: unknown column family")
|
||||
|
||||
var errors: cstring
|
||||
rocksdb_put_cf(
|
||||
db.cPtr,
|
||||
db.writeOpts.cPtr,
|
||||
cfHandle.cPtr,
|
||||
cast[cstring](unsafeAddr key[0]),
|
||||
csize_t(key.len),
|
||||
cast[cstring](if val.len > 0: unsafeAddr val[0] else: nil),
|
||||
csize_t(val.len),
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
ok()
|
||||
|
||||
proc keyExists*(
|
||||
db: RocksDbRef,
|
||||
key: openArray[byte],
|
||||
columnFamily = db.defaultCfName): RocksDBResult[bool] =
|
||||
## Check if the key exists in the specified column family.
|
||||
## Returns a result containing `true` if the key exists or a result
|
||||
## containing `false` otherwise.
|
||||
|
||||
# TODO: Call rocksdb_key_may_exist_cf to improve performance for the case
|
||||
# when the key does not exist
|
||||
|
||||
db.get(key, proc(data: openArray[byte]) = discard, columnFamily)
|
||||
|
||||
proc delete*(
|
||||
db: RocksDbReadWriteRef,
|
||||
key: openArray[byte],
|
||||
columnFamily = db.defaultCfName): RocksDBResult[void] =
|
||||
## Delete the value for the given key from the specified column family.
|
||||
## If the value does not exist, the delete will be a no-op.
|
||||
## To check if the value exists before or after a delete, use `keyExists`.
|
||||
|
||||
if key.len() == 0:
|
||||
return err("rocksdb: key is empty")
|
||||
|
||||
let cfHandle = db.cfTable.get(columnFamily)
|
||||
if cfHandle.isNil:
|
||||
return err("rocksdb: unknown column family")
|
||||
|
||||
var errors: cstring
|
||||
rocksdb_delete_cf(
|
||||
db.cPtr,
|
||||
db.writeOpts.cPtr,
|
||||
cfHandle.cPtr,
|
||||
cast[cstring](unsafeAddr key[0]),
|
||||
csize_t(key.len),
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
ok()
|
||||
|
||||
proc openIterator*(
|
||||
db: RocksDbRef,
|
||||
columnFamily = db.defaultCfName): RocksDBResult[RocksIteratorRef] =
|
||||
## Opens an `RocksIteratorRef` for the specified column family.
|
||||
doAssert not db.isClosed()
|
||||
|
||||
let cfHandle = db.cfTable.get(columnFamily)
|
||||
if cfHandle.isNil():
|
||||
return err("rocksdb: unknown column family")
|
||||
|
||||
let rocksIterPtr = rocksdb_create_iterator_cf(
|
||||
db.cPtr,
|
||||
db.readOpts.cPtr,
|
||||
cfHandle.cPtr)
|
||||
|
||||
ok(newRocksIterator(rocksIterPtr))
|
||||
|
||||
proc openWriteBatch*(
|
||||
db: RocksDbReadWriteRef,
|
||||
columnFamily = db.defaultCfName): WriteBatchRef =
|
||||
## Opens a `WriteBatchRef` which defaults to using the specified column family.
|
||||
doAssert not db.isClosed()
|
||||
|
||||
newWriteBatch(db.cfTable, columnFamily)
|
||||
|
||||
proc write*(
|
||||
db: RocksDbReadWriteRef,
|
||||
updates: WriteBatchRef): RocksDBResult[void] =
|
||||
## Apply the updates in the `WriteBatchRef` to the database.
|
||||
doAssert not db.isClosed()
|
||||
|
||||
var errors: cstring
|
||||
rocksdb_write(
|
||||
db.cPtr,
|
||||
db.writeOpts.cPtr,
|
||||
updates.cPtr,
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
ok()
|
||||
|
||||
proc ingestExternalFile*(
|
||||
db: RocksDbReadWriteRef,
|
||||
filePath: string,
|
||||
columnFamily = db.defaultCfName): RocksDbResult[void] =
|
||||
## Ingest an external sst file into the database. The file will be ingested
|
||||
## into the specified column family or the default column family if none is
|
||||
## provided.
|
||||
doAssert not db.isClosed()
|
||||
|
||||
let cfHandle = db.cfTable.get(columnFamily)
|
||||
if cfHandle.isNil():
|
||||
return err("rocksdb: unknown column family")
|
||||
|
||||
var
|
||||
sstPath = filePath.cstring
|
||||
errors: cstring
|
||||
rocksdb_ingest_external_file_cf(
|
||||
db.cPtr,
|
||||
cfHandle.cPtr,
|
||||
cast[cstringArray](sstPath.addr), csize_t(1),
|
||||
db.ingestOptsPtr,
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
ok()
|
||||
|
||||
proc close*(db: RocksDbRef) =
|
||||
## Close the `RocksDbRef` which will release the connection to the database
|
||||
## and free the memory associated with it. `close` is idempotent and can
|
||||
## safely be called multple times. `close` is a no-op if the `RocksDbRef`
|
||||
## is already closed.
|
||||
|
||||
withLock(db.lock):
|
||||
if not db.isClosed():
|
||||
db.dbOpts.close()
|
||||
db.readOpts.close()
|
||||
db.cfTable.close()
|
||||
|
||||
if db of RocksDbReadWriteRef:
|
||||
let db = RocksDbReadWriteRef(db)
|
||||
db.writeOpts.close()
|
||||
rocksdb_ingestexternalfileoptions_destroy(db.ingestOptsPtr)
|
||||
db.ingestOptsPtr = nil
|
||||
|
||||
rocksdb_close(db.cPtr)
|
||||
db.cPtr = nil
|
152
rocksdb/rocksdb/rocksiterator.nim
Normal file
152
rocksdb/rocksdb/rocksiterator.nim
Normal file
@ -0,0 +1,152 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
## A `RocksIteratorRef` is a reference to a RocksDB iterator which supports
|
||||
## iterating over the key value pairs in a column family.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
./lib/librocksdb,
|
||||
./internal/utils,
|
||||
./rocksresult
|
||||
|
||||
export
|
||||
rocksresult
|
||||
|
||||
type
|
||||
RocksIteratorPtr* = ptr rocksdb_iterator_t
|
||||
|
||||
RocksIteratorRef* = ref object
|
||||
cPtr: RocksIteratorPtr
|
||||
|
||||
proc newRocksIterator*(cPtr: RocksIteratorPtr): RocksIteratorRef =
|
||||
doAssert not cPtr.isNil()
|
||||
RocksIteratorRef(cPtr: cPtr)
|
||||
|
||||
proc isClosed*(iter: RocksIteratorRef): bool {.inline.} =
|
||||
## Returns `true` if the iterator is closed and `false` otherwise.
|
||||
iter.cPtr.isNil()
|
||||
|
||||
proc seekToKey*(iter: RocksIteratorRef, key: openArray[byte]) =
|
||||
## Seeks to the `key` argument in the column family. If the return code is
|
||||
## `false`, the iterator has become invalid and should be closed.
|
||||
##
|
||||
## It is not clear what happens when the `key` does not exist in the column
|
||||
## family. The guess is that the interation will proceed at the next key
|
||||
## position. This is suggested by a comment from the GO port at
|
||||
##
|
||||
## //github.com/DanielMorsing/rocksdb/blob/master/iterator.go:
|
||||
##
|
||||
## Seek moves the iterator the position of the key given or, if the key
|
||||
## doesn't exist, the next key that does exist in the database. If the
|
||||
## key doesn't exist, and there is no next key, the Iterator becomes
|
||||
## invalid.
|
||||
##
|
||||
doAssert not iter.isClosed()
|
||||
let (cKey, cLen) = (cast[cstring](unsafeAddr key[0]), csize_t(key.len))
|
||||
rocksdb_iter_seek(iter.cPtr, cKey, cLen)
|
||||
|
||||
proc seekToFirst*(iter: RocksIteratorRef) =
|
||||
## Seeks to the first entry in the column family.
|
||||
doAssert not iter.isClosed()
|
||||
rocksdb_iter_seek_to_first(iter.cPtr)
|
||||
|
||||
proc seekToLast*(iter: RocksIteratorRef) =
|
||||
## Seeks to the last entry in the column family.
|
||||
doAssert not iter.isClosed()
|
||||
rocksdb_iter_seek_to_last(iter.cPtr)
|
||||
|
||||
proc isValid*(iter: RocksIteratorRef): bool =
|
||||
## Returns `true` if the iterator is valid and `false` otherwise.
|
||||
rocksdb_iter_valid(iter.cPtr).bool
|
||||
|
||||
proc next*(iter: RocksIteratorRef) =
|
||||
## Seeks to the next entry in the column family.
|
||||
rocksdb_iter_next(iter.cPtr)
|
||||
|
||||
proc prev*(iter: RocksIteratorRef) =
|
||||
## Seeks to the previous entry in the column family.
|
||||
rocksdb_iter_prev(iter.cPtr)
|
||||
|
||||
proc key*(iter: RocksIteratorRef, onData: DataProc) =
|
||||
## Returns the current key using the provided `onData` callback.
|
||||
|
||||
var kLen: csize_t
|
||||
let kData = rocksdb_iter_key(iter.cPtr, kLen.addr)
|
||||
|
||||
if kData.isNil or kLen == 0:
|
||||
onData([])
|
||||
else:
|
||||
onData(kData.toOpenArrayByte(0, kLen.int - 1))
|
||||
|
||||
proc key*(iter: RocksIteratorRef): seq[byte] =
|
||||
## Returns the current key.
|
||||
|
||||
var res: seq[byte]
|
||||
proc onData(data: openArray[byte]) =
|
||||
res = @data
|
||||
|
||||
iter.key(onData)
|
||||
res
|
||||
|
||||
proc value*(iter: RocksIteratorRef, onData: DataProc) =
|
||||
## Returns the current value using the provided `onData` callback.
|
||||
|
||||
var vLen: csize_t
|
||||
let vData = rocksdb_iter_value(iter.cPtr, vLen.addr)
|
||||
|
||||
if vData.isNil or vLen == 0:
|
||||
onData([])
|
||||
else:
|
||||
onData(vData.toOpenArrayByte(0, vLen.int - 1))
|
||||
|
||||
proc value*(iter: RocksIteratorRef): seq[byte] =
|
||||
## Returns the current value.
|
||||
|
||||
var res: seq[byte]
|
||||
proc onData(data: openArray[byte]) =
|
||||
res = @data
|
||||
|
||||
iter.value(onData)
|
||||
res
|
||||
|
||||
proc status*(iter: RocksIteratorRef): RocksDBResult[void] =
|
||||
## Returns the status of the iterator.
|
||||
doAssert not iter.isClosed()
|
||||
|
||||
var errors: cstring
|
||||
rocksdb_iter_get_error(iter.cPtr, cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
ok()
|
||||
|
||||
proc close*(iter: RocksIteratorRef) =
|
||||
## Closes the `RocksIteratorRef`.
|
||||
if not iter.isClosed():
|
||||
rocksdb_iter_destroy(iter.cPtr)
|
||||
iter.cPtr = nil
|
||||
|
||||
iterator pairs*(iter: RocksIteratorRef): tuple[key: seq[byte], value: seq[byte]] =
|
||||
## Iterates over the key value pairs in the column family yielding them in
|
||||
## the form of a tuple. The iterator is automatically closed after the
|
||||
## iteration.
|
||||
doAssert not iter.isClosed()
|
||||
defer: iter.close()
|
||||
|
||||
iter.seekToFirst()
|
||||
while iter.isValid():
|
||||
var
|
||||
key: seq[byte]
|
||||
value: seq[byte]
|
||||
iter.key(proc(data: openArray[byte]) = key = @data)
|
||||
iter.value(proc(data: openArray[byte]) = value = @data)
|
||||
|
||||
iter.next()
|
||||
yield (key, value)
|
21
rocksdb/rocksdb/rocksresult.nim
Normal file
21
rocksdb/rocksdb/rocksresult.nim
Normal file
@ -0,0 +1,21 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
results
|
||||
|
||||
export
|
||||
results
|
||||
|
||||
type
|
||||
RocksDBResult*[T] = Result[T, string]
|
||||
|
||||
DataProc* = proc(val: openArray[byte]) {.gcsafe, raises: [].}
|
101
rocksdb/rocksdb/sstfilewriter.nim
Normal file
101
rocksdb/rocksdb/sstfilewriter.nim
Normal file
@ -0,0 +1,101 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
## A `SstFileWriterRef` is used to create sst files that can be added to the database later.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
./lib/librocksdb,
|
||||
./internal/utils,
|
||||
./options/dbopts,
|
||||
./rocksresult
|
||||
|
||||
export
|
||||
rocksresult
|
||||
|
||||
type
|
||||
SstFileWriterPtr* = ptr rocksdb_sstfilewriter_t
|
||||
EnvOptionsPtr = ptr rocksdb_envoptions_t
|
||||
|
||||
SstFileWriterRef* = ref object
|
||||
cPtr: SstFileWriterPtr
|
||||
envOptsPtr: EnvOptionsPtr
|
||||
dbOpts: DbOptionsRef
|
||||
|
||||
proc openSstFileWriter*(
|
||||
filePath: string,
|
||||
dbOpts = defaultDbOptions()): RocksDBResult[SstFileWriterRef] =
|
||||
## Creates a new `SstFileWriterRef` and opens the file at the given `filePath`.
|
||||
doAssert not dbOpts.isClosed()
|
||||
|
||||
let envOptsPtr = rocksdb_envoptions_create()
|
||||
let writer = SstFileWriterRef(
|
||||
cPtr: rocksdb_sstfilewriter_create(envOptsPtr, dbOpts.cPtr),
|
||||
envOptsPtr: envOptsPtr,
|
||||
dbOpts: dbOpts)
|
||||
|
||||
var errors: cstring
|
||||
rocksdb_sstfilewriter_open(
|
||||
writer.cPtr,
|
||||
filePath.cstring,
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
ok(writer)
|
||||
|
||||
proc isClosed*(writer: SstFileWriterRef): bool {.inline.} =
|
||||
## Returns `true` if the `SstFileWriterRef` is closed and `false` otherwise.
|
||||
writer.cPtr.isNil()
|
||||
|
||||
proc put*(
|
||||
writer: SstFileWriterRef,
|
||||
key: openArray[byte],
|
||||
val: openArray[byte]): RocksDBResult[void] =
|
||||
## Add a key-value pair to the sst file.
|
||||
|
||||
var errors: cstring
|
||||
rocksdb_sstfilewriter_put(
|
||||
writer.cPtr,
|
||||
cast[cstring](unsafeAddr key[0]), csize_t(key.len),
|
||||
cast[cstring](unsafeAddr val[0]), csize_t(val.len),
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
ok()
|
||||
|
||||
proc delete*(writer: SstFileWriterRef, key: openArray[byte]): RocksDBResult[void] =
|
||||
## Delete a key-value pair from the sst file.
|
||||
|
||||
var errors: cstring
|
||||
rocksdb_sstfilewriter_delete(
|
||||
writer.cPtr,
|
||||
cast[cstring](unsafeAddr key[0]), csize_t(key.len),
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
ok()
|
||||
|
||||
proc finish*(writer: SstFileWriterRef): RocksDBResult[void] =
|
||||
## Finish the process and close the sst file.
|
||||
doAssert not writer.isClosed()
|
||||
|
||||
var errors: cstring
|
||||
rocksdb_sstfilewriter_finish(writer.cPtr, cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
ok()
|
||||
|
||||
proc close*(writer: SstFileWriterRef) =
|
||||
## Closes the `SstFileWriterRef`.
|
||||
if not writer.isClosed():
|
||||
rocksdb_envoptions_destroy(writer.envOptsPtr)
|
||||
writer.envOptsPtr = nil
|
||||
rocksdb_sstfilewriter_destroy(writer.cPtr)
|
||||
writer.cPtr = nil
|
119
rocksdb/rocksdb/transactiondb.nim
Normal file
119
rocksdb/rocksdb/transactiondb.nim
Normal file
@ -0,0 +1,119 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
## A `TransactionDbRef` can be used to open a connection to the RocksDB database
|
||||
## with support for transactional operations against multiple column families.
|
||||
## To create a new transaction call `beginTransaction` which will return a
|
||||
## `TransactionRef`. To commit or rollback the transaction call `commit` or
|
||||
## `rollback` on the `TransactionRef` type after applying changes to the transaction.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sequtils, locks],
|
||||
./lib/librocksdb,
|
||||
./options/[dbopts, readopts, writeopts],
|
||||
./transactions/[transaction, txdbopts, txopts],
|
||||
./columnfamily/[cfopts, cfdescriptor, cfhandle],
|
||||
./internal/[cftable, utils],
|
||||
./rocksresult
|
||||
|
||||
export
|
||||
dbopts,
|
||||
txdbopts,
|
||||
cfdescriptor,
|
||||
readopts,
|
||||
writeopts,
|
||||
txopts,
|
||||
transaction,
|
||||
rocksresult
|
||||
|
||||
type
|
||||
TransactionDbPtr* = ptr rocksdb_transactiondb_t
|
||||
|
||||
TransactionDbRef* = ref object
|
||||
lock: Lock
|
||||
cPtr: TransactionDbPtr
|
||||
path: string
|
||||
dbOpts: DbOptionsRef
|
||||
txDbOpts: TransactionDbOptionsRef
|
||||
cfTable: ColFamilyTableRef
|
||||
|
||||
proc openTransactionDb*(
|
||||
path: string,
|
||||
dbOpts = defaultDbOptions(),
|
||||
txDbOpts = defaultTransactionDbOptions(),
|
||||
columnFamilies: openArray[ColFamilyDescriptor] = []): RocksDBResult[TransactionDbRef] =
|
||||
## Open a `TransactionDbRef` with the given options and column families.
|
||||
## If no column families are provided the default column family will be used.
|
||||
## If no options are provided the default options will be used.
|
||||
|
||||
var cfs = columnFamilies.toSeq()
|
||||
if DEFAULT_COLUMN_FAMILY_NAME notin columnFamilies.mapIt(it.name()):
|
||||
cfs.add(defaultColFamilyDescriptor())
|
||||
|
||||
var
|
||||
cfNames = cfs.mapIt(it.name().cstring)
|
||||
cfOpts = cfs.mapIt(it.options.cPtr)
|
||||
cfHandles = newSeq[ColFamilyHandlePtr](cfs.len)
|
||||
errors: cstring
|
||||
|
||||
let txDbPtr = rocksdb_transactiondb_open_column_families(
|
||||
dbOpts.cPtr,
|
||||
txDbOpts.cPtr,
|
||||
path.cstring,
|
||||
cfNames.len().cint,
|
||||
cast[cstringArray](cfNames[0].addr),
|
||||
cfOpts[0].addr,
|
||||
cfHandles[0].addr,
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
let db = TransactionDbRef(
|
||||
lock: createLock(),
|
||||
cPtr: txDbPtr,
|
||||
path: path,
|
||||
dbOpts: dbOpts,
|
||||
txDbOpts: txDbOpts,
|
||||
cfTable: newColFamilyTable(cfNames.mapIt($it), cfHandles))
|
||||
ok(db)
|
||||
|
||||
proc isClosed*(db: TransactionDbRef): bool {.inline.} =
|
||||
## Returns `true` if the `TransactionDbRef` has been closed.
|
||||
db.cPtr.isNil()
|
||||
|
||||
proc beginTransaction*(
|
||||
db: TransactionDbRef,
|
||||
readOpts = defaultReadOptions(),
|
||||
writeOpts = defaultWriteOptions(),
|
||||
txOpts = defaultTransactionOptions(),
|
||||
columnFamily = DEFAULT_COLUMN_FAMILY_NAME): TransactionRef =
|
||||
## Begin a new transaction against the database. The transaction will default
|
||||
## to using the specified column family. If no column family is specified
|
||||
## then the default column family will be used.
|
||||
doAssert not db.isClosed()
|
||||
|
||||
let txPtr = rocksdb_transaction_begin(
|
||||
db.cPtr,
|
||||
writeOpts.cPtr,
|
||||
txOpts.cPtr,
|
||||
nil)
|
||||
|
||||
newTransaction(txPtr, readOpts, writeOpts, txOpts, columnFamily, db.cfTable)
|
||||
|
||||
proc close*(db: TransactionDbRef) =
|
||||
## Close the `TransactionDbRef`.
|
||||
withLock(db.lock):
|
||||
if not db.isClosed():
|
||||
db.dbOpts.close()
|
||||
db.txDbOpts.close()
|
||||
db.cfTable.close()
|
||||
|
||||
rocksdb_transactiondb_close(db.cPtr)
|
||||
db.cPtr = nil
|
193
rocksdb/rocksdb/transactions/transaction.nim
Normal file
193
rocksdb/rocksdb/transactions/transaction.nim
Normal file
@ -0,0 +1,193 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
## To use transactions, you must first create a `TransactionDbRef`. Then to
|
||||
## create a transaction call `beginTransaction` on the `TransactionDbRef`.
|
||||
## `commit` and `rollback` are used to commit or rollback a transaction.
|
||||
## The `TransactionDbRef` currently supports `put`, `delete` and `get` operations.
|
||||
## Keys that have been writen to a transaction but are not yet committed can be
|
||||
## read from the transaction using `get`. Uncommitted updates will not be visible
|
||||
## to other transactions until they are committed to the database.
|
||||
## Multiple column families can be written to and read from in a single transaction
|
||||
## but a default column family will be used if none is specified in each call.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
../lib/librocksdb,
|
||||
../options/[readopts, writeopts],
|
||||
../internal/[cftable, utils],
|
||||
../rocksresult,
|
||||
./txopts
|
||||
|
||||
export
|
||||
rocksresult
|
||||
|
||||
type
|
||||
TransactionPtr* = ptr rocksdb_transaction_t
|
||||
|
||||
TransactionRef* = ref object
|
||||
cPtr: TransactionPtr
|
||||
readOpts: ReadOptionsRef
|
||||
writeOpts: WriteOptionsRef
|
||||
txOpts: TransactionOptionsRef
|
||||
defaultCfName: string
|
||||
cfTable: ColFamilyTableRef
|
||||
|
||||
proc newTransaction*(
|
||||
cPtr: TransactionPtr,
|
||||
readOpts: ReadOptionsRef,
|
||||
writeOpts: WriteOptionsRef,
|
||||
txOpts: TransactionOptionsRef,
|
||||
defaultCfName: string,
|
||||
cfTable: ColFamilyTableRef): TransactionRef =
|
||||
|
||||
TransactionRef(
|
||||
cPtr: cPtr,
|
||||
readOpts: readOpts,
|
||||
writeOpts: writeOpts,
|
||||
txOpts: txOpts,
|
||||
defaultCfName: defaultCfName,
|
||||
cfTable: cfTable)
|
||||
|
||||
proc isClosed*(tx: TransactionRef): bool {.inline.} =
|
||||
## Returns `true` if the `TransactionRef` has been closed.
|
||||
tx.cPtr.isNil()
|
||||
|
||||
proc get*(
|
||||
tx: TransactionRef,
|
||||
key: openArray[byte],
|
||||
onData: DataProc,
|
||||
columnFamily = tx.defaultCfName): RocksDBResult[bool] =
|
||||
## Get the value for a given key from the transaction using the provided
|
||||
## `onData` callback.
|
||||
|
||||
if key.len() == 0:
|
||||
return err("rocksdb: key is empty")
|
||||
|
||||
let cfHandle = tx.cfTable.get(columnFamily)
|
||||
if cfHandle.isNil():
|
||||
return err("rocksdb: unknown column family")
|
||||
|
||||
var
|
||||
len: csize_t
|
||||
errors: cstring
|
||||
let data = rocksdb_transaction_get_cf(
|
||||
tx.cPtr,
|
||||
tx.readOpts.cPtr,
|
||||
cfHandle.cPtr,
|
||||
cast[cstring](unsafeAddr key[0]),
|
||||
csize_t(key.len),
|
||||
len.addr,
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
if data.isNil():
|
||||
doAssert len == 0
|
||||
ok(false)
|
||||
else:
|
||||
onData(toOpenArrayByte(data, 0, len.int - 1))
|
||||
rocksdb_free(data)
|
||||
ok(true)
|
||||
|
||||
proc get*(
|
||||
tx: TransactionRef,
|
||||
key: openArray[byte],
|
||||
columnFamily = tx.defaultCfName): RocksDBResult[seq[byte]] =
|
||||
## Get the value for a given key from the transaction.
|
||||
|
||||
var dataRes: RocksDBResult[seq[byte]]
|
||||
proc onData(data: openArray[byte]) =
|
||||
dataRes.ok(@data)
|
||||
|
||||
let res = tx.get(key, onData, columnFamily)
|
||||
if res.isOk():
|
||||
return dataRes
|
||||
|
||||
dataRes.err(res.error())
|
||||
|
||||
proc put*(
|
||||
tx: TransactionRef,
|
||||
key, val: openArray[byte],
|
||||
columnFamily = tx.defaultCfName): RocksDBResult[void] =
|
||||
## Put the value for the given key into the transaction.
|
||||
|
||||
if key.len() == 0:
|
||||
return err("rocksdb: key is empty")
|
||||
|
||||
let cfHandle = tx.cfTable.get(columnFamily)
|
||||
if cfHandle.isNil():
|
||||
return err("rocksdb: unknown column family")
|
||||
|
||||
var errors: cstring
|
||||
rocksdb_transaction_put_cf(
|
||||
tx.cPtr,
|
||||
cfHandle.cPtr,
|
||||
cast[cstring](unsafeAddr key[0]),
|
||||
csize_t(key.len),
|
||||
cast[cstring](if val.len > 0: unsafeAddr val[0] else: nil),
|
||||
csize_t(val.len),
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
ok()
|
||||
|
||||
proc delete*(
|
||||
tx: TransactionRef,
|
||||
key: openArray[byte],
|
||||
columnFamily = tx.defaultCfName): RocksDBResult[void] =
|
||||
## Delete the value for the given key from the transaction.
|
||||
|
||||
if key.len() == 0:
|
||||
return err("rocksdb: key is empty")
|
||||
|
||||
let cfHandle = tx.cfTable.get(columnFamily)
|
||||
if cfHandle.isNil:
|
||||
return err("rocksdb: unknown column family")
|
||||
|
||||
var errors: cstring
|
||||
rocksdb_transaction_delete_cf(
|
||||
tx.cPtr,
|
||||
cfHandle.cPtr,
|
||||
cast[cstring](unsafeAddr key[0]),
|
||||
csize_t(key.len),
|
||||
cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
ok()
|
||||
|
||||
proc commit*(tx: TransactionRef): RocksDBResult[void] =
|
||||
## Commit the transaction.
|
||||
doAssert not tx.isClosed()
|
||||
|
||||
var errors: cstring
|
||||
rocksdb_transaction_commit(tx.cPtr, cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
ok()
|
||||
|
||||
proc rollback*(tx: TransactionRef): RocksDBResult[void] =
|
||||
## Rollback the transaction.
|
||||
doAssert not tx.isClosed()
|
||||
|
||||
var errors: cstring
|
||||
rocksdb_transaction_rollback(tx.cPtr, cast[cstringArray](errors.addr))
|
||||
bailOnErrors(errors)
|
||||
|
||||
ok()
|
||||
|
||||
proc close*(tx: TransactionRef) =
|
||||
## Close the `TransactionRef`.
|
||||
if not tx.isClosed():
|
||||
tx.readOpts.close()
|
||||
tx.writeOpts.close()
|
||||
tx.txOpts.close()
|
||||
|
||||
rocksdb_transaction_destroy(tx.cPtr)
|
||||
tx.cPtr = nil
|
40
rocksdb/rocksdb/transactions/txdbopts.nim
Normal file
40
rocksdb/rocksdb/transactions/txdbopts.nim
Normal file
@ -0,0 +1,40 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
../lib/librocksdb
|
||||
|
||||
type
|
||||
TransactionDbOptionsPtr* = ptr rocksdb_transactiondb_options_t
|
||||
|
||||
TransactionDbOptionsRef* = ref object
|
||||
cPtr: TransactionDbOptionsPtr
|
||||
|
||||
proc newTransactionDbOptions*(): TransactionDbOptionsRef =
|
||||
TransactionDbOptionsRef(cPtr: rocksdb_transactiondb_options_create())
|
||||
|
||||
proc isClosed*(txDbOpts: TransactionDbOptionsRef): bool {.inline.} =
|
||||
txDbOpts.cPtr.isNil()
|
||||
|
||||
proc cPtr*(txDbOpts: TransactionDbOptionsRef): TransactionDbOptionsPtr =
|
||||
doAssert not txDbOpts.isClosed()
|
||||
txDbOpts.cPtr
|
||||
|
||||
# TODO: Add setters and getters for backup options properties.
|
||||
|
||||
proc defaultTransactionDbOptions*(): TransactionDbOptionsRef {.inline.} =
|
||||
newTransactionDbOptions()
|
||||
# TODO: set prefered defaults
|
||||
|
||||
proc close*(txDbOpts: TransactionDbOptionsRef) =
|
||||
if not txDbOpts.isClosed():
|
||||
rocksdb_transactiondb_options_destroy(txDbOpts.cPtr)
|
||||
txDbOpts.cPtr = nil
|
40
rocksdb/rocksdb/transactions/txopts.nim
Normal file
40
rocksdb/rocksdb/transactions/txopts.nim
Normal file
@ -0,0 +1,40 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
../lib/librocksdb
|
||||
|
||||
type
|
||||
TransactionOptionsPtr* = ptr rocksdb_transaction_options_t
|
||||
|
||||
TransactionOptionsRef* = ref object
|
||||
cPtr: TransactionOptionsPtr
|
||||
|
||||
proc newTransactionOptions*(): TransactionOptionsRef =
|
||||
TransactionOptionsRef(cPtr: rocksdb_transaction_options_create())
|
||||
|
||||
proc isClosed*(txOpts: TransactionOptionsRef): bool {.inline.} =
|
||||
txOpts.cPtr.isNil()
|
||||
|
||||
proc cPtr*(txOpts: TransactionOptionsRef): TransactionOptionsPtr =
|
||||
doAssert not txOpts.isClosed()
|
||||
txOpts.cPtr
|
||||
|
||||
# TODO: Add setters and getters for backup options properties.
|
||||
|
||||
proc defaultTransactionOptions*(): TransactionOptionsRef {.inline.} =
|
||||
newTransactionOptions()
|
||||
# TODO: set prefered defaults
|
||||
|
||||
proc close*(txOpts: TransactionOptionsRef) =
|
||||
if not txOpts.isClosed():
|
||||
rocksdb_transaction_options_destroy(txOpts.cPtr)
|
||||
txOpts.cPtr = nil
|
103
rocksdb/rocksdb/writebatch.nim
Normal file
103
rocksdb/rocksdb/writebatch.nim
Normal file
@ -0,0 +1,103 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
## A `WriteBatchRef` holds a collection of updates to apply atomically to the database.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
./lib/librocksdb,
|
||||
./internal/[cftable, utils],
|
||||
./rocksresult
|
||||
|
||||
export
|
||||
rocksresult
|
||||
|
||||
type
|
||||
WriteBatchPtr* = ptr rocksdb_writebatch_t
|
||||
|
||||
WriteBatchRef* = ref object
|
||||
cPtr: WriteBatchPtr
|
||||
defaultCfName: string
|
||||
cfTable: ColFamilyTableRef
|
||||
|
||||
proc newWriteBatch*(cfTable: ColFamilyTableRef, defaultCfName: string): WriteBatchRef =
|
||||
WriteBatchRef(
|
||||
cPtr: rocksdb_writebatch_create(),
|
||||
defaultCfName: defaultCfName,
|
||||
cfTable: cfTable)
|
||||
|
||||
proc isClosed*(batch: WriteBatchRef): bool {.inline.} =
|
||||
## Returns `true` if the `WriteBatchRef` has been closed and `false` otherwise.
|
||||
batch.cPtr.isNil()
|
||||
|
||||
proc cPtr*(batch: WriteBatchRef): WriteBatchPtr =
|
||||
## Get the underlying database pointer.
|
||||
doAssert not batch.isClosed()
|
||||
batch.cPtr
|
||||
|
||||
proc clear*(batch: WriteBatchRef) =
|
||||
## Clears the write batch.
|
||||
doAssert not batch.isClosed()
|
||||
rocksdb_writebatch_clear(batch.cPtr)
|
||||
|
||||
proc count*(batch: WriteBatchRef): int =
|
||||
## Get the number of updates in the write batch.
|
||||
doAssert not batch.isClosed()
|
||||
rocksdb_writebatch_count(batch.cPtr).int
|
||||
|
||||
proc put*(
|
||||
batch: WriteBatchRef,
|
||||
key, val: openArray[byte],
|
||||
columnFamily = DEFAULT_COLUMN_FAMILY_NAME): RocksDBResult[void] =
|
||||
## Add a put operation to the write batch.
|
||||
|
||||
if key.len() == 0:
|
||||
return err("rocksdb: key is empty")
|
||||
|
||||
let cfHandle = batch.cfTable.get(columnFamily)
|
||||
if cfHandle.isNil:
|
||||
return err("rocksdb: unknown column family")
|
||||
|
||||
rocksdb_writebatch_put_cf(
|
||||
batch.cPtr,
|
||||
cfHandle.cPtr,
|
||||
cast[cstring](unsafeAddr key[0]),
|
||||
csize_t(key.len),
|
||||
cast[cstring](if val.len > 0: unsafeAddr val[0] else: nil),
|
||||
csize_t(val.len))
|
||||
|
||||
ok()
|
||||
|
||||
proc delete*(
|
||||
batch: WriteBatchRef,
|
||||
key: openArray[byte],
|
||||
columnFamily = DEFAULT_COLUMN_FAMILY_NAME): RocksDBResult[void] =
|
||||
## Add a delete operation to the write batch.
|
||||
|
||||
if key.len() == 0:
|
||||
return err("rocksdb: key is empty")
|
||||
|
||||
let cfHandle = batch.cfTable.get(columnFamily)
|
||||
if cfHandle.isNil:
|
||||
return err("rocksdb: unknown column family")
|
||||
|
||||
rocksdb_writebatch_delete_cf(
|
||||
batch.cPtr,
|
||||
cfHandle.cPtr,
|
||||
cast[cstring](unsafeAddr key[0]),
|
||||
csize_t(key.len))
|
||||
|
||||
ok()
|
||||
|
||||
proc close*(batch: WriteBatchRef) =
|
||||
## Close the `WriteBatchRef`.
|
||||
if not batch.isClosed():
|
||||
rocksdb_writebatch_destroy(batch.cPtr)
|
||||
batch.cPtr = nil
|
26
rocksdb/rocksdbds.nim
Normal file
26
rocksdb/rocksdbds.nim
Normal file
@ -0,0 +1,26 @@
|
||||
import std/times
|
||||
import std/options
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/sqlite3_abi
|
||||
from pkg/stew/results as stewResults import isErr
|
||||
import pkg/upraises
|
||||
|
||||
import pkg/datastore
|
||||
|
||||
push: {.upraises: [].}
|
||||
|
||||
type
|
||||
RocksDbDatastore* = ref object of Datastore
|
||||
a: string
|
||||
|
||||
method get*(self: RocksDbDatastore, key: Key): Future[?!seq[byte]] {.async, locks: "unknown".} =
|
||||
raiseAssert("a")
|
||||
|
||||
method put*(self: RocksDbDatastore, key: Key, data: seq[byte]): Future[?!void] {.async, locks: "unknown".} =
|
||||
raiseAssert("a")
|
||||
|
||||
proc new*(T: type RocksDbDatastore, dbName: string): ?!T =
|
||||
raiseAssert("a")
|
48
rocksdb/scripts/build_static_deps.sh
Normal file
48
rocksdb/scripts/build_static_deps.sh
Normal file
@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Nim-RocksDB
|
||||
# Copyright 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"/..
|
||||
|
||||
REPO_DIR="${PWD}"
|
||||
ROCKSDB_LIB_DIR="${REPO_DIR}/vendor/rocksdb"
|
||||
BUILD_DEST="${REPO_DIR}/build/lib"
|
||||
|
||||
|
||||
|
||||
[[ -z "$NPROC" ]] && NPROC=2 # number of CPU cores available
|
||||
|
||||
git submodule update --init
|
||||
|
||||
export DISABLE_WARNING_AS_ERROR=1
|
||||
|
||||
export ROCKSDB_DISABLE_SNAPPY=1
|
||||
export ROCKSDB_DISABLE_ZLIB=1
|
||||
export ROCKSDB_DISABLE_BZIP=1
|
||||
|
||||
export PORTABLE=1
|
||||
export DEBUG_LEVEL=0
|
||||
|
||||
make -C "${ROCKSDB_LIB_DIR}" -j${NPROC} liblz4.a libzstd.a --no-print-directory > /dev/null
|
||||
|
||||
export EXTRA_CFLAGS="-fpermissive -Wno-error -w -I${ROCKSDB_LIB_DIR}/lz4-1.9.4/lib -I${ROCKSDB_LIB_DIR}/zstd-1.5.5/lib -DLZ4 -DZSTD"
|
||||
export EXTRA_CXXFLAGS="-fpermissive -Wno-error -w -I${ROCKSDB_LIB_DIR}/lz4-1.9.4/lib -I${ROCKSDB_LIB_DIR}/zstd-1.5.5/lib -DLZ4 -DZSTD"
|
||||
|
||||
make -C "${ROCKSDB_LIB_DIR}" -j${NPROC} static_lib --no-print-directory > /dev/null
|
||||
|
||||
#cat "${REPO_DIR}/vendor/rocksdb/make_config.mk"
|
||||
|
||||
mkdir -p "${BUILD_DEST}"
|
||||
|
||||
cp "${ROCKSDB_LIB_DIR}/liblz4.a" "${BUILD_DEST}/"
|
||||
cp "${ROCKSDB_LIB_DIR}/libzstd.a" "${BUILD_DEST}/"
|
||||
cp "${ROCKSDB_LIB_DIR}/librocksdb.a" "${BUILD_DEST}/"
|
6
rocksdb/tests/.gitignore
vendored
Normal file
6
rocksdb/tests/.gitignore
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
# ignore all executable files
|
||||
*
|
||||
!*.*
|
||||
!*/
|
||||
*.exe
|
||||
|
59
rocksdb/tests/columnfamily/test_cfdescriptor.nim
Normal file
59
rocksdb/tests/columnfamily/test_cfdescriptor.nim
Normal file
@ -0,0 +1,59 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
unittest2,
|
||||
../../rocksdb/internal/utils,
|
||||
../../rocksdb/columnfamily/cfdescriptor
|
||||
|
||||
suite "ColFamilyDescriptor Tests":
|
||||
|
||||
const TEST_CF_NAME = "test"
|
||||
|
||||
test "Test initColFamilyDescriptor":
|
||||
var descriptor = initColFamilyDescriptor(TEST_CF_NAME)
|
||||
|
||||
check:
|
||||
descriptor.name() == TEST_CF_NAME
|
||||
not descriptor.options().isNil()
|
||||
not descriptor.isDefault()
|
||||
|
||||
descriptor.close()
|
||||
|
||||
test "Test initColFamilyDescriptor with options":
|
||||
var descriptor = initColFamilyDescriptor(TEST_CF_NAME, defaultColFamilyOptions())
|
||||
|
||||
check:
|
||||
descriptor.name() == TEST_CF_NAME
|
||||
not descriptor.options().isNil()
|
||||
not descriptor.isDefault()
|
||||
|
||||
descriptor.close()
|
||||
|
||||
test "Test defaultColFamilyDescriptor":
|
||||
var descriptor = defaultColFamilyDescriptor()
|
||||
|
||||
check:
|
||||
descriptor.name() == DEFAULT_COLUMN_FAMILY_NAME
|
||||
not descriptor.options().isNil()
|
||||
descriptor.isDefault()
|
||||
|
||||
descriptor.close()
|
||||
|
||||
test "Test close":
|
||||
var descriptor = defaultColFamilyDescriptor()
|
||||
|
||||
check not descriptor.isClosed()
|
||||
descriptor.close()
|
||||
check descriptor.isClosed()
|
||||
descriptor.close()
|
||||
check descriptor.isClosed()
|
||||
|
67
rocksdb/tests/columnfamily/test_cfhandle.nim
Normal file
67
rocksdb/tests/columnfamily/test_cfhandle.nim
Normal file
@ -0,0 +1,67 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/os,
|
||||
tempfile,
|
||||
unittest2,
|
||||
../../rocksdb/lib/librocksdb,
|
||||
../../rocksdb/columnfamily/cfhandle
|
||||
|
||||
suite "ColFamilyHandleRef Tests":
|
||||
|
||||
const TEST_CF_NAME = "test"
|
||||
|
||||
setup:
|
||||
let
|
||||
dbPath = mkdtemp() / "data"
|
||||
dbOpts = rocksdb_options_create()
|
||||
cfOpts = rocksdb_options_create()
|
||||
|
||||
var
|
||||
errors: cstring
|
||||
|
||||
rocksdb_options_set_create_if_missing(dbOpts, 1);
|
||||
|
||||
let db = rocksdb_open(dbOpts, dbPath.cstring, cast[cstringArray](errors.addr))
|
||||
doAssert errors.isNil()
|
||||
doAssert not db.isNil()
|
||||
|
||||
let cfHandlePtr = rocksdb_create_column_family(
|
||||
db,
|
||||
cfOpts,
|
||||
TEST_CF_NAME.cstring,
|
||||
cast[cstringArray](errors.addr))
|
||||
doAssert errors.isNil()
|
||||
doAssert not cfHandlePtr.isNil()
|
||||
|
||||
teardown:
|
||||
rocksdb_close(db)
|
||||
removeDir($dbPath)
|
||||
|
||||
test "Test newColFamilyHandle":
|
||||
var cfHandle = newColFamilyHandle(cfHandlePtr)
|
||||
|
||||
check:
|
||||
not cfHandle.cPtr.isNil()
|
||||
cfHandle.cPtr == cfHandlePtr
|
||||
|
||||
cfHandle.close()
|
||||
|
||||
test "Test close":
|
||||
var cfHandle = newColFamilyHandle(cfHandlePtr)
|
||||
|
||||
check not cfHandle.isClosed()
|
||||
cfHandle.close()
|
||||
check cfHandle.isClosed()
|
||||
cfHandle.close()
|
||||
check cfHandle.isClosed()
|
||||
|
47
rocksdb/tests/columnfamily/test_cfopts.nim
Normal file
47
rocksdb/tests/columnfamily/test_cfopts.nim
Normal file
@ -0,0 +1,47 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
unittest2,
|
||||
../../rocksdb/columnfamily/cfopts
|
||||
|
||||
suite "ColFamilyOptionsRef Tests":
|
||||
|
||||
test "Test newColFamilyOptions":
|
||||
var cfOpts = newColFamilyOptions()
|
||||
|
||||
check not cfOpts.cPtr.isNil()
|
||||
# check not cfOpts.getCreateMissingColumnFamilies()
|
||||
|
||||
cfOpts.setCreateMissingColumnFamilies(true)
|
||||
# check cfOpts.getCreateMissingColumnFamilies()
|
||||
|
||||
cfOpts.close()
|
||||
|
||||
test "Test defaultColFamilyOptions":
|
||||
var cfOpts = defaultColFamilyOptions()
|
||||
|
||||
check not cfOpts.cPtr.isNil()
|
||||
# check cfOpts.getCreateMissingColumnFamilies()
|
||||
|
||||
cfOpts.setCreateMissingColumnFamilies(false)
|
||||
# check not cfOpts.getCreateMissingColumnFamilies()
|
||||
|
||||
cfOpts.close()
|
||||
|
||||
test "Test close":
|
||||
var cfOpts = defaultColFamilyOptions()
|
||||
|
||||
check not cfOpts.isClosed()
|
||||
cfOpts.close()
|
||||
check cfOpts.isClosed()
|
||||
cfOpts.close()
|
||||
check cfOpts.isClosed()
|
77
rocksdb/tests/internal/test_cftable.nim
Normal file
77
rocksdb/tests/internal/test_cftable.nim
Normal file
@ -0,0 +1,77 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/os,
|
||||
tempfile,
|
||||
unittest2,
|
||||
../../rocksdb/lib/librocksdb,
|
||||
../../rocksdb/columnfamily/cfhandle,
|
||||
../../rocksdb/internal/cftable
|
||||
|
||||
suite "ColFamilyTableRef Tests":
|
||||
|
||||
const TEST_CF_NAME = "test"
|
||||
|
||||
setup:
|
||||
let
|
||||
dbPath = mkdtemp() / "data"
|
||||
dbOpts = rocksdb_options_create()
|
||||
cfOpts = rocksdb_options_create()
|
||||
|
||||
var
|
||||
errors: cstring
|
||||
|
||||
rocksdb_options_set_create_if_missing(dbOpts, 1);
|
||||
|
||||
let db = rocksdb_open(dbOpts, dbPath.cstring, cast[cstringArray](errors.addr))
|
||||
doAssert errors.isNil()
|
||||
doAssert not db.isNil()
|
||||
|
||||
let cfHandlePtr = rocksdb_create_column_family(
|
||||
db,
|
||||
cfOpts,
|
||||
TEST_CF_NAME.cstring,
|
||||
cast[cstringArray](errors.addr))
|
||||
doAssert errors.isNil()
|
||||
doAssert not cfHandlePtr.isNil()
|
||||
|
||||
teardown:
|
||||
rocksdb_close(db)
|
||||
removeDir($dbPath)
|
||||
|
||||
|
||||
test "Test newColFamilyTable":
|
||||
var cfTable = newColFamilyTable(
|
||||
@[TEST_CF_NAME, TEST_CF_NAME],
|
||||
@[cfHandlePtr, cfHandlePtr])
|
||||
|
||||
check cfTable.get(TEST_CF_NAME).cPtr() == cfHandlePtr
|
||||
check not cfTable.isClosed()
|
||||
|
||||
# doesn't exist
|
||||
check cfTable.get("other").isNil()
|
||||
check not cfTable.isClosed()
|
||||
|
||||
cfTable.close()
|
||||
|
||||
test "Test close":
|
||||
var cfTable = newColFamilyTable(@[TEST_CF_NAME], @[cfHandlePtr])
|
||||
|
||||
let cfHandle = cfTable.get(TEST_CF_NAME)
|
||||
|
||||
check not cfHandle.isClosed()
|
||||
check not cfTable.isClosed()
|
||||
cfTable.close()
|
||||
check cfHandle.isClosed()
|
||||
check cfTable.isClosed()
|
||||
cfTable.close()
|
||||
check cfTable.isClosed()
|
94
rocksdb/tests/lib/test_librocksdb.nim
Normal file
94
rocksdb/tests/lib/test_librocksdb.nim
Normal file
@ -0,0 +1,94 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[cpuinfo, os],
|
||||
tempfile,
|
||||
unittest2,
|
||||
../../rocksdb/lib/librocksdb
|
||||
|
||||
suite "librocksdb C wrapper Tests":
|
||||
setup:
|
||||
let
|
||||
dbPath = mkdtemp().cstring
|
||||
dbBackupPath = mkdtemp().cstring
|
||||
|
||||
teardown:
|
||||
removeDir($dbPath)
|
||||
removeDir($dbBackupPath)
|
||||
|
||||
test "Simple create-update-close example":
|
||||
var
|
||||
db: ptr rocksdb_t
|
||||
be: ptr rocksdb_backup_engine_t
|
||||
options = rocksdb_options_create()
|
||||
|
||||
let cpus = countProcessors()
|
||||
rocksdb_options_increase_parallelism(options, cpus.int32)
|
||||
# This requires snappy - disabled because rocksdb is not always compiled with
|
||||
# snappy support (for example Fedora 28, certain Ubuntu versions)
|
||||
# rocksdb_options_optimize_level_style_compaction(options, 0);
|
||||
# create the DB if it's not already present
|
||||
rocksdb_options_set_create_if_missing(options, 1);
|
||||
|
||||
# open DB
|
||||
var err: cstringArray # memory leak: example code does not free error string!
|
||||
db = rocksdb_open(options, dbPath, err)
|
||||
check: err.isNil
|
||||
|
||||
# open Backup Engine that we will use for backing up our database
|
||||
be = rocksdb_backup_engine_open(options, dbBackupPath, err)
|
||||
check: err.isNil
|
||||
|
||||
# Put key-value
|
||||
var writeOptions = rocksdb_writeoptions_create()
|
||||
let key = "key"
|
||||
let put_value = "value"
|
||||
rocksdb_put(
|
||||
db, writeOptions, key.cstring, csize_t(key.len),
|
||||
put_value.cstring, csize_t(put_value.len), err)
|
||||
check: err.isNil
|
||||
|
||||
# Get value
|
||||
var readOptions = rocksdb_readoptions_create()
|
||||
var len: csize_t
|
||||
let raw_value = rocksdb_get(
|
||||
db, readOptions, key.cstring, csize_t(key.len), addr len, err) # Important: rocksdb_get is not null-terminated
|
||||
check: err.isNil
|
||||
|
||||
# Copy it to a regular Nim string (copyMem workaround because non-null terminated)
|
||||
var get_value = newString(int(len))
|
||||
copyMem(addr get_value[0], unsafeAddr raw_value[0], int(len) * sizeof(char))
|
||||
|
||||
check: $get_value == $put_value
|
||||
|
||||
# create new backup in a directory specified by DBBackupPath
|
||||
rocksdb_backup_engine_create_new_backup(be, db, err)
|
||||
check: err.isNil
|
||||
|
||||
rocksdb_close(db)
|
||||
|
||||
# If something is wrong, you might want to restore data from last backup
|
||||
var restoreOptions = rocksdb_restore_options_create()
|
||||
rocksdb_backup_engine_restore_db_from_latest_backup(be, dbPath, dbPath,
|
||||
restoreOptions, err)
|
||||
check: err.isNil
|
||||
rocksdb_restore_options_destroy(restore_options)
|
||||
|
||||
db = rocksdb_open(options, dbPath, err)
|
||||
check: err.isNil
|
||||
|
||||
# cleanup
|
||||
rocksdb_writeoptions_destroy(writeOptions)
|
||||
rocksdb_readoptions_destroy(readOptions)
|
||||
rocksdb_options_destroy(options)
|
||||
rocksdb_backup_engine_close(be)
|
||||
rocksdb_close(db)
|
39
rocksdb/tests/options/test_backupopts.nim
Normal file
39
rocksdb/tests/options/test_backupopts.nim
Normal file
@ -0,0 +1,39 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
unittest2,
|
||||
../../rocksdb/options/backupopts
|
||||
|
||||
suite "BackupEngineOptionsRef Tests":
|
||||
|
||||
test "Test newBackupEngineOptions":
|
||||
var backupOpts = newBackupEngineOptions()
|
||||
|
||||
check not backupOpts.cPtr.isNil()
|
||||
|
||||
backupOpts.close()
|
||||
|
||||
test "Test defaultBackupEngineOptions":
|
||||
var backupOpts = defaultBackupEngineOptions()
|
||||
|
||||
check not backupOpts.cPtr.isNil()
|
||||
|
||||
backupOpts.close()
|
||||
|
||||
test "Test close":
|
||||
var backupOpts = defaultBackupEngineOptions()
|
||||
|
||||
check not backupOpts.isClosed()
|
||||
backupOpts.close()
|
||||
check backupOpts.isClosed()
|
||||
backupOpts.close()
|
||||
check backupOpts.isClosed()
|
61
rocksdb/tests/options/test_dbopts.nim
Normal file
61
rocksdb/tests/options/test_dbopts.nim
Normal file
@ -0,0 +1,61 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
unittest2,
|
||||
../../rocksdb/options/dbopts
|
||||
|
||||
suite "DbOptionsRef Tests":
|
||||
|
||||
test "Test newDbOptions":
|
||||
var dbOpts = newDbOptions()
|
||||
|
||||
check not dbOpts.cPtr.isNil()
|
||||
|
||||
dbOpts.setCreateIfMissing(true)
|
||||
dbOpts.setMaxOpenFiles(10)
|
||||
dbOpts.setCreateMissingColumnFamilies(false)
|
||||
|
||||
# check:
|
||||
# dbOpts.getCreateIfMissing()
|
||||
# dbOpts.getMaxOpenFiles() == 10
|
||||
# not dbOpts.getCreateMissingColumnFamilies()
|
||||
|
||||
dbOpts.close()
|
||||
|
||||
test "Test defaultDbOptions":
|
||||
var dbOpts = defaultDbOptions()
|
||||
|
||||
check:
|
||||
not dbOpts.cPtr.isNil()
|
||||
# dbOpts.getCreateIfMissing()
|
||||
# dbOpts.getMaxOpenFiles() == -1
|
||||
# dbOpts.getCreateMissingColumnFamilies()
|
||||
|
||||
dbOpts.setCreateIfMissing(false)
|
||||
dbOpts.setMaxOpenFiles(100)
|
||||
dbOpts.setCreateMissingColumnFamilies(false)
|
||||
|
||||
# check:
|
||||
# not dbOpts.getCreateIfMissing()
|
||||
# dbOpts.getMaxOpenFiles() == 100
|
||||
# not dbOpts.getCreateMissingColumnFamilies()
|
||||
|
||||
dbOpts.close()
|
||||
|
||||
test "Test close":
|
||||
var dbOpts = defaultDbOptions()
|
||||
|
||||
check not dbOpts.isClosed()
|
||||
dbOpts.close()
|
||||
check dbOpts.isClosed()
|
||||
dbOpts.close()
|
||||
check dbOpts.isClosed()
|
39
rocksdb/tests/options/test_readopts.nim
Normal file
39
rocksdb/tests/options/test_readopts.nim
Normal file
@ -0,0 +1,39 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
unittest2,
|
||||
../../rocksdb/options/readopts
|
||||
|
||||
suite "ReadOptionsRef Tests":
|
||||
|
||||
test "Test newReadOptions":
|
||||
var readOpts = newReadOptions()
|
||||
|
||||
check not readOpts.cPtr.isNil()
|
||||
|
||||
readOpts.close()
|
||||
|
||||
test "Test defaultReadOptions":
|
||||
var readOpts = defaultReadOptions()
|
||||
|
||||
check not readOpts.cPtr.isNil()
|
||||
|
||||
readOpts.close()
|
||||
|
||||
test "Test close":
|
||||
var readOpts = defaultReadOptions()
|
||||
|
||||
check not readOpts.isClosed()
|
||||
readOpts.close()
|
||||
check readOpts.isClosed()
|
||||
readOpts.close()
|
||||
check readOpts.isClosed()
|
39
rocksdb/tests/options/test_writeopts.nim
Normal file
39
rocksdb/tests/options/test_writeopts.nim
Normal file
@ -0,0 +1,39 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
unittest2,
|
||||
../../rocksdb/options/writeopts
|
||||
|
||||
suite "WriteOptionsRef Tests":
|
||||
|
||||
test "Test newWriteOptions":
|
||||
var writeOpts = newWriteOptions()
|
||||
|
||||
check not writeOpts.cPtr.isNil()
|
||||
|
||||
writeOpts.close()
|
||||
|
||||
test "Test defaultWriteOptions":
|
||||
var writeOpts = defaultWriteOptions()
|
||||
|
||||
check not writeOpts.cPtr.isNil()
|
||||
|
||||
writeOpts.close()
|
||||
|
||||
test "Test close":
|
||||
var writeOpts = defaultWriteOptions()
|
||||
|
||||
check not writeOpts.isClosed()
|
||||
writeOpts.close()
|
||||
check writeOpts.isClosed()
|
||||
writeOpts.close()
|
||||
check writeOpts.isClosed()
|
27
rocksdb/tests/test_all.nim
Normal file
27
rocksdb/tests/test_all.nim
Normal file
@ -0,0 +1,27 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
./columnfamily/test_cfdescriptor,
|
||||
./columnfamily/test_cfhandle,
|
||||
./columnfamily/test_cfopts,
|
||||
./internal/test_cftable,
|
||||
./lib/test_librocksdb,
|
||||
./options/test_backupopts,
|
||||
./options/test_dbopts,
|
||||
./options/test_readopts,
|
||||
./options/test_writeopts,
|
||||
./transactions/test_txdbopts,
|
||||
./transactions/test_txopts,
|
||||
./test_backup,
|
||||
./test_columnfamily,
|
||||
./test_rocksdb,
|
||||
./test_rocksiterator,
|
||||
./test_sstfilewriter,
|
||||
./test_writebatch
|
70
rocksdb/tests/test_backup.nim
Normal file
70
rocksdb/tests/test_backup.nim
Normal file
@ -0,0 +1,70 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/os,
|
||||
tempfile,
|
||||
unittest2,
|
||||
../rocksdb/backup,
|
||||
./test_helper
|
||||
|
||||
suite "BackupEngineRef Tests":
|
||||
|
||||
let
|
||||
key = @[byte(1), 2, 3, 4, 5]
|
||||
val = @[byte(1), 2, 3, 4, 5]
|
||||
|
||||
setup:
|
||||
let
|
||||
dbPath = mkdtemp() / "data"
|
||||
dbBackupPath = mkdtemp() / "backup"
|
||||
dbRestorePath = mkdtemp() / "restore"
|
||||
|
||||
var
|
||||
db = initReadWriteDb(dbPath)
|
||||
|
||||
teardown:
|
||||
|
||||
db.close()
|
||||
removeDir($dbPath)
|
||||
removeDir($dbBackupPath)
|
||||
|
||||
|
||||
test "Test backup":
|
||||
var engine = initBackupEngine(dbBackupPath)
|
||||
|
||||
check:
|
||||
db.put(key, val).isOk()
|
||||
db.keyExists(key).value()
|
||||
|
||||
check engine.createNewBackup(db).isOk()
|
||||
|
||||
check:
|
||||
db.delete(key).isOk()
|
||||
not db.keyExists(key).value()
|
||||
|
||||
check engine.restoreDbFromLatestBackup(dbRestorePath).isOk()
|
||||
|
||||
let db2 = initReadWriteDb(dbRestorePath)
|
||||
check db2.keyExists(key).value()
|
||||
|
||||
engine.close()
|
||||
|
||||
test "Test close":
|
||||
let res = openBackupEngine(dbPath)
|
||||
doAssert res.isOk()
|
||||
var engine = res.get()
|
||||
|
||||
check not engine.isClosed()
|
||||
engine.close()
|
||||
check engine.isClosed()
|
||||
engine.close()
|
||||
check engine.isClosed()
|
84
rocksdb/tests/test_columnfamily.nim
Normal file
84
rocksdb/tests/test_columnfamily.nim
Normal file
@ -0,0 +1,84 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/os,
|
||||
tempfile,
|
||||
unittest2,
|
||||
../rocksdb/columnfamily,
|
||||
./test_helper
|
||||
|
||||
suite "ColFamily Tests":
|
||||
const
|
||||
CF_DEFAULT = "default"
|
||||
CF_OTHER = "other"
|
||||
|
||||
let
|
||||
key = @[byte(1), 2, 3, 4, 5]
|
||||
otherKey = @[byte(1), 2, 3, 4, 5, 6]
|
||||
val = @[byte(1), 2, 3, 4, 5]
|
||||
|
||||
setup:
|
||||
let
|
||||
dbPath = mkdtemp() / "data"
|
||||
db = initReadWriteDb(dbPath, columnFamilyNames = @[CF_DEFAULT, CF_OTHER])
|
||||
|
||||
teardown:
|
||||
db.close()
|
||||
removeDir($dbPath)
|
||||
|
||||
test "Basic operations":
|
||||
let r0 = db.withColFamily(CF_OTHER)
|
||||
check r0.isOk()
|
||||
let cf = r0.value()
|
||||
|
||||
check cf.put(key, val).isOk()
|
||||
|
||||
var bytes: seq[byte]
|
||||
check cf.get(key, proc(data: openArray[byte]) = bytes = @data)[]
|
||||
check not cf.get(otherkey, proc(data: openArray[byte]) = bytes = @data)[]
|
||||
|
||||
var r1 = cf.get(key)
|
||||
check r1.isOk() and r1.value == val
|
||||
|
||||
var r2 = cf.get(otherKey)
|
||||
# there's no error string for missing keys
|
||||
check r2.isOk() == false and r2.error.len == 0
|
||||
|
||||
var e1 = cf.keyExists(key)
|
||||
check e1.isOk() and e1.value == true
|
||||
|
||||
var e2 = cf.keyExists(otherKey)
|
||||
check e2.isOk() and e2.value == false
|
||||
|
||||
var d = cf.delete(key)
|
||||
check d.isOk()
|
||||
|
||||
e1 = cf.keyExists(key)
|
||||
check e1.isOk() and e1.value == false
|
||||
|
||||
d = cf.delete(otherKey)
|
||||
check d.isOk()
|
||||
|
||||
cf.db.close()
|
||||
check db.isClosed()
|
||||
|
||||
# Open database in read only mode
|
||||
block:
|
||||
var res = initReadOnlyDb(dbPath).withColFamily(CF_DEFAULT)
|
||||
check res.isOk()
|
||||
|
||||
let readOnlyCf = res.value()
|
||||
let r = readOnlyCf.keyExists(key)
|
||||
check r.isOk() and r.value == false
|
||||
|
||||
readOnlyCf.db.close()
|
||||
check readOnlyCf.db.isClosed()
|
59
rocksdb/tests/test_helper.nim
Normal file
59
rocksdb/tests/test_helper.nim
Normal file
@ -0,0 +1,59 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/sequtils,
|
||||
../rocksdb/backup,
|
||||
../rocksdb/rocksdb,
|
||||
../rocksdb/transactiondb
|
||||
|
||||
|
||||
proc initReadWriteDb*(
|
||||
path: string,
|
||||
columnFamilyNames: openArray[string] = @[]): RocksDbReadWriteRef =
|
||||
|
||||
let res = openRocksDb(
|
||||
path,
|
||||
columnFamilies = columnFamilyNames.mapIt(initColFamilyDescriptor(it)))
|
||||
if res.isErr():
|
||||
echo res.error()
|
||||
doAssert res.isOk()
|
||||
res.value()
|
||||
|
||||
proc initReadOnlyDb*(
|
||||
path: string,
|
||||
columnFamilyNames: openArray[string] = @[]): RocksDbReadOnlyRef =
|
||||
|
||||
let res = openRocksDbReadOnly(
|
||||
path,
|
||||
columnFamilies = columnFamilyNames.mapIt(initColFamilyDescriptor(it)))
|
||||
if res.isErr():
|
||||
echo res.error()
|
||||
doAssert res.isOk()
|
||||
res.value()
|
||||
|
||||
proc initBackupEngine*(path: string): BackupEngineRef =
|
||||
|
||||
let res = openBackupEngine(path)
|
||||
doAssert res.isOk()
|
||||
res.value()
|
||||
|
||||
proc initTransactionDb*(
|
||||
path: string,
|
||||
columnFamilyNames: openArray[string] = @[]): TransactionDbRef =
|
||||
|
||||
let res = openTransactionDb(
|
||||
path,
|
||||
columnFamilies = columnFamilyNames.mapIt(initColFamilyDescriptor(it)))
|
||||
if res.isErr():
|
||||
echo res.error()
|
||||
doAssert res.isOk()
|
||||
res.value()
|
284
rocksdb/tests/test_rocksdb.nim
Normal file
284
rocksdb/tests/test_rocksdb.nim
Normal file
@ -0,0 +1,284 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/os,
|
||||
tempfile,
|
||||
unittest2,
|
||||
../rocksdb/rocksdb,
|
||||
./test_helper
|
||||
|
||||
suite "RocksDbRef Tests":
|
||||
const
|
||||
CF_DEFAULT = "default"
|
||||
CF_OTHER = "other"
|
||||
|
||||
let
|
||||
key = @[byte(1), 2, 3, 4, 5]
|
||||
otherKey = @[byte(1), 2, 3, 4, 5, 6]
|
||||
val = @[byte(1), 2, 3, 4, 5]
|
||||
|
||||
setup:
|
||||
let
|
||||
dbPath = mkdtemp() / "data"
|
||||
db = initReadWriteDb(dbPath, columnFamilyNames = @[CF_DEFAULT, CF_OTHER])
|
||||
|
||||
teardown:
|
||||
db.close()
|
||||
removeDir($dbPath)
|
||||
|
||||
test "Basic operations":
|
||||
|
||||
var s = db.put(key, val)
|
||||
check s.isOk()
|
||||
|
||||
var bytes: seq[byte]
|
||||
check db.get(key, proc(data: openArray[byte]) = bytes = @data)[]
|
||||
check not db.get(otherkey, proc(data: openArray[byte]) = bytes = @data)[]
|
||||
|
||||
var r1 = db.get(key)
|
||||
check r1.isOk() and r1.value == val
|
||||
|
||||
var r2 = db.get(otherKey)
|
||||
# there's no error string for missing keys
|
||||
check r2.isOk() == false and r2.error.len == 0
|
||||
|
||||
var e1 = db.keyExists(key)
|
||||
check e1.isOk() and e1.value == true
|
||||
|
||||
var e2 = db.keyExists(otherKey)
|
||||
check e2.isOk() and e2.value == false
|
||||
|
||||
var d = db.delete(key)
|
||||
check d.isOk()
|
||||
|
||||
e1 = db.keyExists(key)
|
||||
check e1.isOk() and e1.value == false
|
||||
|
||||
d = db.delete(otherKey)
|
||||
check d.isOk()
|
||||
|
||||
close(db)
|
||||
check db.isClosed()
|
||||
|
||||
# Open database in read only mode
|
||||
block:
|
||||
var
|
||||
readOnlyDb = initReadOnlyDb(dbPath)
|
||||
r = readOnlyDb.keyExists(key)
|
||||
check r.isOk() and r.value == false
|
||||
|
||||
# This won't compile as designed:
|
||||
# var r2 = readOnlyDb.put(key, @[123.byte])
|
||||
# check r2.isErr()
|
||||
|
||||
readOnlyDb.close()
|
||||
check readOnlyDb.isClosed()
|
||||
|
||||
test "Basic operations - default column family":
|
||||
|
||||
var s = db.put(key, val, CF_DEFAULT)
|
||||
check s.isOk()
|
||||
|
||||
var bytes: seq[byte]
|
||||
check db.get(key, proc(data: openArray[byte]) = bytes = @data, CF_DEFAULT)[]
|
||||
check not db.get(otherkey, proc(data: openArray[byte]) = bytes = @data, CF_DEFAULT)[]
|
||||
|
||||
var r1 = db.get(key)
|
||||
check r1.isOk() and r1.value == val
|
||||
|
||||
var r2 = db.get(otherKey)
|
||||
# there's no error string for missing keys
|
||||
check r2.isOk() == false and r2.error.len == 0
|
||||
|
||||
var e1 = db.keyExists(key, CF_DEFAULT)
|
||||
check e1.isOk() and e1.value == true
|
||||
|
||||
var e2 = db.keyExists(otherKey, CF_DEFAULT)
|
||||
check e2.isOk() and e2.value == false
|
||||
|
||||
var d = db.delete(key, CF_DEFAULT)
|
||||
check d.isOk()
|
||||
|
||||
e1 = db.keyExists(key, CF_DEFAULT)
|
||||
check e1.isOk() and e1.value == false
|
||||
|
||||
d = db.delete(otherKey, CF_DEFAULT)
|
||||
check d.isOk()
|
||||
|
||||
close(db)
|
||||
check db.isClosed()
|
||||
|
||||
# Open database in read only mode
|
||||
block:
|
||||
var
|
||||
readOnlyDb = initReadOnlyDb(dbPath, columnFamilyNames = @[CF_DEFAULT])
|
||||
r = readOnlyDb.keyExists(key, CF_DEFAULT)
|
||||
check r.isOk() and r.value == false
|
||||
|
||||
# Won't compile as designed:
|
||||
# var r2 = readOnlyDb.put(key, @[123.byte], CF_DEFAULT)
|
||||
# check r2.isErr()
|
||||
|
||||
readOnlyDb.close()
|
||||
check readOnlyDb.isClosed()
|
||||
|
||||
test "Basic operations - multiple column families":
|
||||
|
||||
var s = db.put(key, val, CF_DEFAULT)
|
||||
check s.isOk()
|
||||
|
||||
var s2 = db.put(otherKey, val, CF_OTHER)
|
||||
check s2.isOk()
|
||||
|
||||
var bytes: seq[byte]
|
||||
check db.get(key, proc(data: openArray[byte]) = bytes = @data, CF_DEFAULT)[]
|
||||
check not db.get(otherkey, proc(data: openArray[byte]) = bytes = @data, CF_DEFAULT)[]
|
||||
|
||||
var bytes2: seq[byte]
|
||||
check db.get(otherKey, proc(data: openArray[byte]) = bytes2 = @data, CF_OTHER)[]
|
||||
check not db.get(key, proc(data: openArray[byte]) = bytes2 = @data, CF_OTHER)[]
|
||||
|
||||
var e1 = db.keyExists(key, CF_DEFAULT)
|
||||
check e1.isOk() and e1.value == true
|
||||
var e2 = db.keyExists(otherKey, CF_DEFAULT)
|
||||
check e2.isOk() and e2.value == false
|
||||
|
||||
var e3 = db.keyExists(key, CF_OTHER)
|
||||
check e3.isOk() and e3.value == false
|
||||
var e4 = db.keyExists(otherKey, CF_OTHER)
|
||||
check e4.isOk() and e4.value == true
|
||||
|
||||
var d = db.delete(key, CF_DEFAULT)
|
||||
check d.isOk()
|
||||
e1 = db.keyExists(key, CF_DEFAULT)
|
||||
check e1.isOk() and e1.value == false
|
||||
d = db.delete(otherKey, CF_DEFAULT)
|
||||
check d.isOk()
|
||||
|
||||
var d2 = db.delete(key, CF_OTHER)
|
||||
check d2.isOk()
|
||||
e3 = db.keyExists(key, CF_OTHER)
|
||||
check e3.isOk() and e3.value == false
|
||||
d2 = db.delete(otherKey, CF_OTHER)
|
||||
check d2.isOk()
|
||||
d2 = db.delete(otherKey, CF_OTHER)
|
||||
check d2.isOk()
|
||||
|
||||
db.close()
|
||||
check db.isClosed()
|
||||
|
||||
# Open database in read only mode
|
||||
block:
|
||||
var
|
||||
readOnlyDb = initReadOnlyDb(dbPath, columnFamilyNames = @[CF_DEFAULT, CF_OTHER])
|
||||
|
||||
var r = readOnlyDb.keyExists(key, CF_OTHER)
|
||||
check r.isOk() and r.value == false
|
||||
|
||||
# Does not compile as designed:
|
||||
# var r2 = readOnlyDb.put(key, @[123.byte], CF_OTHER)
|
||||
# check r2.isErr()
|
||||
|
||||
readOnlyDb.close()
|
||||
check readOnlyDb.isClosed()
|
||||
|
||||
test "Close multiple times":
|
||||
|
||||
check not db.isClosed()
|
||||
db.close()
|
||||
check db.isClosed()
|
||||
db.close()
|
||||
check db.isClosed()
|
||||
|
||||
test "Unknown column family":
|
||||
const CF_UNKNOWN = "unknown"
|
||||
|
||||
let r = db.put(key, val, CF_UNKNOWN)
|
||||
check r.isErr() and r.error() == "rocksdb: unknown column family"
|
||||
|
||||
var bytes: seq[byte]
|
||||
let r2 = db.get(key, proc(data: openArray[byte]) = bytes = @data, CF_UNKNOWN)
|
||||
check r2.isErr() and r2.error() == "rocksdb: unknown column family"
|
||||
|
||||
let r3 = db.keyExists(key, CF_UNKNOWN)
|
||||
check r3.isErr() and r3.error() == "rocksdb: unknown column family"
|
||||
|
||||
let r4 = db.delete(key, CF_UNKNOWN)
|
||||
check r4.isErr() and r4.error() == "rocksdb: unknown column family"
|
||||
|
||||
test "Test missing key and values":
|
||||
let
|
||||
key1 = @[byte(1)] # exists with non empty value
|
||||
val1 = @[byte(1)]
|
||||
key2 = @[byte(2)] # exists with empty seq value
|
||||
val2: seq[byte] = @[]
|
||||
key3 = @[byte(3)] # exists with empty array value
|
||||
val3: array[0, byte] = []
|
||||
key4 = @[byte(4)] # deleted key
|
||||
key5 = @[byte(5)] # key not created
|
||||
|
||||
check:
|
||||
db.put(key1, val1).isOk()
|
||||
db.put(key2, val2).isOk()
|
||||
db.put(key3, val3).isOk()
|
||||
db.delete(key4).isOk()
|
||||
|
||||
db.keyExists(key1).get() == true
|
||||
db.keyExists(key2).get() == true
|
||||
db.keyExists(key3).get() == true
|
||||
db.keyExists(key4).get() == false
|
||||
db.keyExists(key5).get() == false
|
||||
|
||||
block:
|
||||
var v: seq[byte]
|
||||
let r = db.get(key1, proc(data: openArray[byte]) = v = @data)
|
||||
check:
|
||||
r.isOk()
|
||||
r.value() == true
|
||||
v == val1
|
||||
db.get(key1).isOk()
|
||||
|
||||
block:
|
||||
var v: seq[byte]
|
||||
let r = db.get(key2, proc(data: openArray[byte]) = v = @data)
|
||||
check:
|
||||
r.isOk()
|
||||
r.value() == true
|
||||
v.len() == 0
|
||||
db.get(key2).isOk()
|
||||
|
||||
block:
|
||||
var v: seq[byte]
|
||||
let r = db.get(key3, proc(data: openArray[byte]) = v = @data)
|
||||
check:
|
||||
r.isOk()
|
||||
r.value() == true
|
||||
v.len() == 0
|
||||
db.get(key3).isOk()
|
||||
|
||||
block:
|
||||
var v: seq[byte]
|
||||
let r = db.get(key4, proc(data: openArray[byte]) = v = @data)
|
||||
check:
|
||||
r.isOk()
|
||||
r.value() == false
|
||||
v.len() == 0
|
||||
db.get(key4).isErr()
|
||||
|
||||
block:
|
||||
var v: seq[byte]
|
||||
let r = db.get(key5, proc(data: openArray[byte]) = v = @data)
|
||||
check:
|
||||
r.isOk()
|
||||
r.value() == false
|
||||
v.len() == 0
|
||||
db.get(key5).isErr()
|
196
rocksdb/tests/test_rocksiterator.nim
Normal file
196
rocksdb/tests/test_rocksiterator.nim
Normal file
@ -0,0 +1,196 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/os,
|
||||
tempfile,
|
||||
unittest2,
|
||||
../rocksdb/[rocksdb, rocksiterator],
|
||||
./test_helper
|
||||
|
||||
suite "RocksIteratorRef Tests":
|
||||
|
||||
const
|
||||
CF_DEFAULT = "default"
|
||||
CF_OTHER = "other"
|
||||
CF_EMPTY = "empty"
|
||||
|
||||
let
|
||||
key1 = @[byte(1)]
|
||||
val1 = @[byte(1)]
|
||||
key2 = @[byte(2)]
|
||||
val2 = @[byte(2)]
|
||||
key3 = @[byte(3)]
|
||||
val3 = @[byte(3)]
|
||||
|
||||
setup:
|
||||
let
|
||||
dbPath = mkdtemp() / "data"
|
||||
db = initReadWriteDb(dbPath,
|
||||
columnFamilyNames = @[CF_DEFAULT, CF_OTHER, CF_EMPTY])
|
||||
|
||||
doAssert db.put(key1, val1).isOk()
|
||||
doAssert db.put(key2, val2).isOk()
|
||||
doAssert db.put(key3, val3).isOk()
|
||||
doAssert db.put(key1, val1, CF_OTHER).isOk()
|
||||
doAssert db.put(key2, val2, CF_OTHER).isOk()
|
||||
doAssert db.put(key3, val3, CF_OTHER).isOk()
|
||||
|
||||
teardown:
|
||||
db.close()
|
||||
removeDir($dbPath)
|
||||
|
||||
test "Iterate forwards using default column family":
|
||||
let res = db.openIterator(CF_DEFAULT)
|
||||
check res.isOk()
|
||||
|
||||
var iter = res.get()
|
||||
defer: iter.close()
|
||||
|
||||
iter.seekToFirst()
|
||||
check iter.isValid()
|
||||
|
||||
var expected = byte(1)
|
||||
while iter.isValid():
|
||||
let
|
||||
key = iter.key()
|
||||
val = iter.value()
|
||||
|
||||
check:
|
||||
key == @[expected]
|
||||
val == @[expected]
|
||||
|
||||
inc expected
|
||||
iter.next()
|
||||
|
||||
check expected == byte(4)
|
||||
|
||||
test "Iterate backwards using other column family":
|
||||
let res = db.openIterator(CF_OTHER)
|
||||
check res.isOk()
|
||||
|
||||
var iter = res.get()
|
||||
defer: iter.close()
|
||||
|
||||
iter.seekToLast()
|
||||
check iter.isValid()
|
||||
|
||||
var expected = byte(3)
|
||||
while iter.isValid():
|
||||
|
||||
var key: seq[byte]
|
||||
iter.key(proc(data: openArray[byte]) = key = @data)
|
||||
var val: seq[byte]
|
||||
iter.value(proc(data: openArray[byte]) = val = @data)
|
||||
|
||||
check:
|
||||
key == @[expected]
|
||||
val == @[expected]
|
||||
|
||||
dec expected
|
||||
iter.prev()
|
||||
|
||||
check expected == byte(0)
|
||||
iter.close()
|
||||
|
||||
test "Open two iterators on the same column family":
|
||||
let res1 = db.openIterator(CF_DEFAULT)
|
||||
check res1.isOk()
|
||||
var iter1 = res1.get()
|
||||
defer: iter1.close()
|
||||
let res2 = db.openIterator(CF_DEFAULT)
|
||||
check res2.isOk()
|
||||
var iter2 = res2.get()
|
||||
defer: iter2.close()
|
||||
|
||||
iter1.seekToFirst()
|
||||
check iter1.isValid()
|
||||
iter2.seekToLast()
|
||||
check iter2.isValid()
|
||||
|
||||
check:
|
||||
iter1.key() == @[byte(1)]
|
||||
iter1.value() == @[byte(1)]
|
||||
iter2.key() == @[byte(3)]
|
||||
iter2.value() == @[byte(3)]
|
||||
|
||||
test "Open two iterators on different column families":
|
||||
let res1 = db.openIterator(CF_DEFAULT)
|
||||
check res1.isOk()
|
||||
var iter1 = res1.get()
|
||||
defer: iter1.close()
|
||||
let res2 = db.openIterator(CF_OTHER)
|
||||
check res2.isOk()
|
||||
var iter2 = res2.get()
|
||||
defer: iter2.close()
|
||||
|
||||
iter1.seekToFirst()
|
||||
check iter1.isValid()
|
||||
iter2.seekToLast()
|
||||
check iter2.isValid()
|
||||
|
||||
check:
|
||||
iter1.key() == @[byte(1)]
|
||||
iter1.value() == @[byte(1)]
|
||||
iter2.key() == @[byte(3)]
|
||||
iter2.value() == @[byte(3)]
|
||||
|
||||
test "Invalid column family":
|
||||
let res = db.openIterator("unknown")
|
||||
check:
|
||||
res.isErr()
|
||||
res.error() == "rocksdb: unknown column family"
|
||||
|
||||
test "Empty column family":
|
||||
let res = db.openIterator(CF_EMPTY)
|
||||
check res.isOk()
|
||||
var iter = res.get()
|
||||
defer: iter.close()
|
||||
|
||||
iter.seekToFirst()
|
||||
check not iter.isValid()
|
||||
|
||||
iter.seekToLast()
|
||||
check not iter.isValid()
|
||||
|
||||
test "Test status":
|
||||
let res = db.openIterator(CF_EMPTY)
|
||||
check res.isOk()
|
||||
var iter = res.get()
|
||||
defer: iter.close()
|
||||
|
||||
check iter.status().isOk()
|
||||
iter.seekToLast()
|
||||
check iter.status().isOk()
|
||||
|
||||
test "Test pairs iterator":
|
||||
let res = db.openIterator(CF_DEFAULT)
|
||||
check res.isOk()
|
||||
var iter = res.get()
|
||||
|
||||
var expected = byte(1)
|
||||
for k, v in iter:
|
||||
check:
|
||||
k == @[expected]
|
||||
v == @[expected]
|
||||
inc expected
|
||||
check iter.isClosed()
|
||||
|
||||
test "Test close":
|
||||
let res = db.openIterator()
|
||||
check res.isOk()
|
||||
var iter = res.get()
|
||||
|
||||
check not iter.isClosed()
|
||||
iter.close()
|
||||
check iter.isClosed()
|
||||
iter.close()
|
||||
check iter.isClosed()
|
91
rocksdb/tests/test_sstfilewriter.nim
Normal file
91
rocksdb/tests/test_sstfilewriter.nim
Normal file
@ -0,0 +1,91 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/os,
|
||||
tempfile,
|
||||
unittest2,
|
||||
../rocksdb/[rocksdb, sstfilewriter],
|
||||
./test_helper
|
||||
|
||||
suite "SstFileWriterRef Tests":
|
||||
|
||||
const
|
||||
CF_DEFAULT = "default"
|
||||
CF_OTHER = "other"
|
||||
|
||||
let
|
||||
key1 = @[byte(1)]
|
||||
val1 = @[byte(1)]
|
||||
key2 = @[byte(2)]
|
||||
val2 = @[byte(2)]
|
||||
key3 = @[byte(3)]
|
||||
val3 = @[byte(3)]
|
||||
|
||||
setup:
|
||||
let
|
||||
dbPath = mkdtemp() / "data"
|
||||
sstFilePath = mkdtemp() / "sst"
|
||||
db = initReadWriteDb(dbPath,
|
||||
columnFamilyNames = @[CF_DEFAULT, CF_OTHER])
|
||||
|
||||
teardown:
|
||||
db.close()
|
||||
removeDir($dbPath)
|
||||
|
||||
test "Write to sst file then load into db using default column family":
|
||||
let res = openSstFileWriter(sstFilePath)
|
||||
check res.isOk()
|
||||
let writer = res.get()
|
||||
defer: writer.close()
|
||||
|
||||
check:
|
||||
writer.put(key1, val1).isOk()
|
||||
writer.put(key2, val2).isOk()
|
||||
writer.put(key3, val3).isOk()
|
||||
writer.delete(@[byte(4)]).isOk()
|
||||
writer.finish().isOk()
|
||||
|
||||
db.ingestExternalFile(sstFilePath).isOk()
|
||||
db.get(key1).get() == val1
|
||||
db.get(key2).get() == val2
|
||||
db.get(key3).get() == val3
|
||||
|
||||
test "Write to sst file then load into db using specific column family":
|
||||
let res = openSstFileWriter(sstFilePath)
|
||||
check res.isOk()
|
||||
let writer = res.get()
|
||||
defer: writer.close()
|
||||
|
||||
check:
|
||||
writer.put(key1, val1).isOk()
|
||||
writer.put(key2, val2).isOk()
|
||||
writer.put(key3, val3).isOk()
|
||||
writer.finish().isOk()
|
||||
|
||||
db.ingestExternalFile(sstFilePath, CF_OTHER).isOk()
|
||||
db.keyExists(key1, CF_DEFAULT).get() == false
|
||||
db.keyExists(key2, CF_DEFAULT).get() == false
|
||||
db.keyExists(key3, CF_DEFAULT).get() == false
|
||||
db.get(key1, CF_OTHER).get() == val1
|
||||
db.get(key2, CF_OTHER).get() == val2
|
||||
db.get(key3, CF_OTHER).get() == val3
|
||||
|
||||
test "Test close":
|
||||
let res = openSstFileWriter(sstFilePath)
|
||||
check res.isOk()
|
||||
let writer = res.get()
|
||||
|
||||
check not writer.isClosed()
|
||||
writer.close()
|
||||
check writer.isClosed()
|
||||
writer.close()
|
||||
check writer.isClosed()
|
203
rocksdb/tests/test_transactiondb.nim
Normal file
203
rocksdb/tests/test_transactiondb.nim
Normal file
@ -0,0 +1,203 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/os,
|
||||
tempfile,
|
||||
unittest2,
|
||||
../rocksdb/[transactiondb],
|
||||
./test_helper
|
||||
|
||||
suite "TransactionDbRef Tests":
|
||||
|
||||
const
|
||||
CF_DEFAULT = "default"
|
||||
CF_OTHER = "other"
|
||||
|
||||
let
|
||||
key1 = @[byte(1)]
|
||||
val1 = @[byte(1)]
|
||||
key2 = @[byte(2)]
|
||||
val2 = @[byte(2)]
|
||||
key3 = @[byte(3)]
|
||||
val3 = @[byte(3)]
|
||||
|
||||
setup:
|
||||
let dbPath = mkdtemp() / "data"
|
||||
var db = initTransactionDb(dbPath, columnFamilyNames = @[CF_OTHER])
|
||||
|
||||
teardown:
|
||||
db.close()
|
||||
removeDir($dbPath)
|
||||
|
||||
# test multiple transactions
|
||||
|
||||
test "Test rollback using default column family":
|
||||
var tx = db.beginTransaction()
|
||||
defer: tx.close()
|
||||
check not tx.isClosed()
|
||||
|
||||
check:
|
||||
tx.put(key1, val1).isOk()
|
||||
tx.put(key2, val2).isOk()
|
||||
tx.put(key3, val3).isOk()
|
||||
|
||||
tx.delete(key2).isOk()
|
||||
not tx.isClosed()
|
||||
|
||||
check:
|
||||
tx.get(key1).get() == val1
|
||||
tx.get(key2).error() == ""
|
||||
tx.get(key3).get() == val3
|
||||
|
||||
let res = tx.rollback()
|
||||
check:
|
||||
res.isOk()
|
||||
tx.get(key1).error() == ""
|
||||
tx.get(key2).error() == ""
|
||||
tx.get(key3).error() == ""
|
||||
|
||||
test "Test commit using default column family":
|
||||
var tx = db.beginTransaction()
|
||||
defer: tx.close()
|
||||
check not tx.isClosed()
|
||||
|
||||
check:
|
||||
tx.put(key1, val1).isOk()
|
||||
tx.put(key2, val2).isOk()
|
||||
tx.put(key3, val3).isOk()
|
||||
|
||||
tx.delete(key2).isOk()
|
||||
not tx.isClosed()
|
||||
|
||||
check:
|
||||
tx.get(key1).get() == val1
|
||||
tx.get(key2).error() == ""
|
||||
tx.get(key3).get() == val3
|
||||
|
||||
let res = tx.commit()
|
||||
check:
|
||||
res.isOk()
|
||||
tx.get(key1).get() == val1
|
||||
tx.get(key2).error() == ""
|
||||
tx.get(key3).get() == val3
|
||||
|
||||
test "Test setting column family in beginTransaction":
|
||||
var tx = db.beginTransaction(columnFamily = CF_OTHER)
|
||||
defer: tx.close()
|
||||
check not tx.isClosed()
|
||||
|
||||
check:
|
||||
tx.put(key1, val1).isOk()
|
||||
tx.put(key2, val2).isOk()
|
||||
tx.put(key3, val3).isOk()
|
||||
|
||||
tx.delete(key2).isOk()
|
||||
not tx.isClosed()
|
||||
|
||||
check:
|
||||
tx.get(key1, CF_DEFAULT).error() == ""
|
||||
tx.get(key2, CF_DEFAULT).error() == ""
|
||||
tx.get(key3, CF_DEFAULT).error() == ""
|
||||
tx.get(key1, CF_OTHER).get() == val1
|
||||
tx.get(key2, CF_OTHER).error() == ""
|
||||
tx.get(key3, CF_OTHER).get() == val3
|
||||
|
||||
|
||||
test "Test rollback and commit with multiple transactions":
|
||||
var tx1 = db.beginTransaction(columnFamily = CF_DEFAULT)
|
||||
defer: tx1.close()
|
||||
check not tx1.isClosed()
|
||||
var tx2 = db.beginTransaction(columnFamily = CF_OTHER)
|
||||
defer: tx2.close()
|
||||
check not tx2.isClosed()
|
||||
|
||||
check:
|
||||
tx1.put(key1, val1).isOk()
|
||||
tx1.put(key2, val2).isOk()
|
||||
tx1.put(key3, val3).isOk()
|
||||
tx1.delete(key2).isOk()
|
||||
not tx1.isClosed()
|
||||
tx2.put(key1, val1).isOk()
|
||||
tx2.put(key2, val2).isOk()
|
||||
tx2.put(key3, val3).isOk()
|
||||
tx2.delete(key2).isOk()
|
||||
not tx2.isClosed()
|
||||
|
||||
check:
|
||||
tx1.get(key1, CF_DEFAULT).get() == val1
|
||||
tx1.get(key2, CF_DEFAULT).error() == ""
|
||||
tx1.get(key3, CF_DEFAULT).get() == val3
|
||||
tx1.get(key1, CF_OTHER).error() == ""
|
||||
tx1.get(key2, CF_OTHER).error() == ""
|
||||
tx1.get(key3, CF_OTHER).error() == ""
|
||||
|
||||
tx2.get(key1, CF_DEFAULT).error() == ""
|
||||
tx2.get(key2, CF_DEFAULT).error() == ""
|
||||
tx2.get(key3, CF_DEFAULT).error() == ""
|
||||
tx2.get(key1, CF_OTHER).get() == val1
|
||||
tx2.get(key2, CF_OTHER).error() == ""
|
||||
tx2.get(key3, CF_OTHER).get() == val3
|
||||
|
||||
block:
|
||||
let res = tx1.rollback()
|
||||
check:
|
||||
res.isOk()
|
||||
tx1.get(key1, CF_DEFAULT).error() == ""
|
||||
tx1.get(key2, CF_DEFAULT).error() == ""
|
||||
tx1.get(key3, CF_DEFAULT).error() == ""
|
||||
tx1.get(key1, CF_OTHER).error() == ""
|
||||
tx1.get(key2, CF_OTHER).error() == ""
|
||||
tx1.get(key3, CF_OTHER).error() == ""
|
||||
|
||||
block:
|
||||
let res = tx2.commit()
|
||||
check:
|
||||
res.isOk()
|
||||
tx2.get(key1, CF_DEFAULT).error() == ""
|
||||
tx2.get(key2, CF_DEFAULT).error() == ""
|
||||
tx2.get(key3, CF_DEFAULT).error() == ""
|
||||
tx2.get(key1, CF_OTHER).get() == val1
|
||||
tx2.get(key2, CF_OTHER).error() == ""
|
||||
tx2.get(key3, CF_OTHER).get() == val3
|
||||
|
||||
test "Test close":
|
||||
var tx = db.beginTransaction()
|
||||
|
||||
check not tx.isClosed()
|
||||
tx.close()
|
||||
check tx.isClosed()
|
||||
tx.close()
|
||||
check tx.isClosed()
|
||||
|
||||
check not db.isClosed()
|
||||
db.close()
|
||||
check db.isClosed()
|
||||
db.close()
|
||||
check db.isClosed()
|
||||
|
||||
test "Test close multiple tx":
|
||||
var tx1 = db.beginTransaction()
|
||||
var tx2 = db.beginTransaction()
|
||||
|
||||
check not db.isClosed()
|
||||
check not tx1.isClosed()
|
||||
tx1.close()
|
||||
check tx1.isClosed()
|
||||
tx1.close()
|
||||
check tx1.isClosed()
|
||||
|
||||
check not db.isClosed()
|
||||
check not tx2.isClosed()
|
||||
tx2.close()
|
||||
check tx2.isClosed()
|
||||
tx2.close()
|
||||
check tx2.isClosed()
|
187
rocksdb/tests/test_writebatch.nim
Normal file
187
rocksdb/tests/test_writebatch.nim
Normal file
@ -0,0 +1,187 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/os,
|
||||
tempfile,
|
||||
unittest2,
|
||||
../rocksdb/[rocksdb, writebatch],
|
||||
./test_helper
|
||||
|
||||
suite "WriteBatchRef Tests":
|
||||
|
||||
const
|
||||
CF_DEFAULT = "default"
|
||||
CF_OTHER = "other"
|
||||
|
||||
let
|
||||
key1 = @[byte(1)]
|
||||
val1 = @[byte(1)]
|
||||
key2 = @[byte(2)]
|
||||
val2 = @[byte(2)]
|
||||
key3 = @[byte(3)]
|
||||
val3 = @[byte(3)]
|
||||
|
||||
setup:
|
||||
let dbPath = mkdtemp() / "data"
|
||||
var db = initReadWriteDb(dbPath, columnFamilyNames = @[CF_DEFAULT, CF_OTHER])
|
||||
|
||||
teardown:
|
||||
db.close()
|
||||
removeDir($dbPath)
|
||||
|
||||
test "Test writing batch to the default column family":
|
||||
var batch = db.openWriteBatch()
|
||||
defer: batch.close()
|
||||
check not batch.isClosed()
|
||||
|
||||
check:
|
||||
batch.put(key1, val1).isOk()
|
||||
batch.put(key2, val2).isOk()
|
||||
batch.put(key3, val3).isOk()
|
||||
batch.count() == 3
|
||||
|
||||
batch.delete(key2).isOk()
|
||||
batch.count() == 4
|
||||
not batch.isClosed()
|
||||
|
||||
let res = db.write(batch)
|
||||
check:
|
||||
res.isOk()
|
||||
db.write(batch).isOk() # test that it's idempotent
|
||||
db.get(key1).get() == val1
|
||||
db.keyExists(key2).get() == false
|
||||
db.get(key3).get() == val3
|
||||
|
||||
batch.clear()
|
||||
check:
|
||||
batch.count() == 0
|
||||
not batch.isClosed()
|
||||
|
||||
test "Test writing batch to column family":
|
||||
var batch = db.openWriteBatch()
|
||||
defer: batch.close()
|
||||
check not batch.isClosed()
|
||||
|
||||
check:
|
||||
batch.put(key1, val1, CF_OTHER).isOk()
|
||||
batch.put(key2, val2, CF_OTHER).isOk()
|
||||
batch.put(key3, val3, CF_OTHER).isOk()
|
||||
batch.count() == 3
|
||||
|
||||
batch.delete(key2, CF_OTHER).isOk()
|
||||
batch.count() == 4
|
||||
not batch.isClosed()
|
||||
|
||||
let res = db.write(batch)
|
||||
check:
|
||||
res.isOk()
|
||||
db.get(key1, CF_OTHER).get() == val1
|
||||
db.keyExists(key2, CF_OTHER).get() == false
|
||||
db.get(key3, CF_OTHER).get() == val3
|
||||
|
||||
batch.clear()
|
||||
check:
|
||||
batch.count() == 0
|
||||
not batch.isClosed()
|
||||
|
||||
test "Test writing to multiple column families in single batch":
|
||||
var batch = db.openWriteBatch()
|
||||
defer: batch.close()
|
||||
check not batch.isClosed()
|
||||
|
||||
check:
|
||||
batch.put(key1, val1).isOk()
|
||||
batch.put(key1, val1, CF_OTHER).isOk()
|
||||
batch.put(key2, val2, CF_OTHER).isOk()
|
||||
batch.put(key3, val3, CF_OTHER).isOk()
|
||||
batch.count() == 4
|
||||
|
||||
batch.delete(key2, CF_OTHER).isOk()
|
||||
batch.count() == 5
|
||||
not batch.isClosed()
|
||||
|
||||
let res = db.write(batch)
|
||||
check:
|
||||
res.isOk()
|
||||
db.get(key1).get() == val1
|
||||
db.get(key1, CF_OTHER).get() == val1
|
||||
db.keyExists(key2, CF_OTHER).get() == false
|
||||
db.get(key3, CF_OTHER).get() == val3
|
||||
|
||||
batch.clear()
|
||||
check:
|
||||
batch.count() == 0
|
||||
not batch.isClosed()
|
||||
|
||||
test "Test writing to multiple column families in multiple batches":
|
||||
var batch1 = db.openWriteBatch()
|
||||
defer: batch1.close()
|
||||
check not batch1.isClosed()
|
||||
|
||||
var batch2 = db.openWriteBatch()
|
||||
defer: batch2.close()
|
||||
check not batch2.isClosed()
|
||||
|
||||
check:
|
||||
batch1.put(key1, val1).isOk()
|
||||
batch1.delete(key2, CF_OTHER).isOk()
|
||||
batch1.put(key3, val3, CF_OTHER).isOk()
|
||||
batch2.put(key1, val1, CF_OTHER).isOk()
|
||||
batch2.delete(key1, CF_OTHER).isOk()
|
||||
batch2.put(key3, val3).isOk()
|
||||
batch1.count() == 3
|
||||
batch2.count() == 3
|
||||
|
||||
let res1 = db.write(batch1)
|
||||
let res2 = db.write(batch2)
|
||||
check:
|
||||
res1.isOk()
|
||||
res2.isOk()
|
||||
db.get(key1).get() == val1
|
||||
db.keyExists(key2).get() == false
|
||||
db.get(key3).get() == val3
|
||||
db.keyExists(key1, CF_OTHER).get() == false
|
||||
db.keyExists(key2, CF_OTHER).get() == false
|
||||
db.get(key3, CF_OTHER).get() == val3
|
||||
|
||||
test "Test unknown column family":
|
||||
const CF_UNKNOWN = "unknown"
|
||||
|
||||
var batch = db.openWriteBatch()
|
||||
defer: batch.close()
|
||||
check not batch.isClosed()
|
||||
|
||||
let r = batch.put(key1, val1, CF_UNKNOWN)
|
||||
check r.isErr() and r.error() == "rocksdb: unknown column family"
|
||||
|
||||
let r2 = batch.delete(key1, CF_UNKNOWN)
|
||||
check r2.isErr() and r2.error() == "rocksdb: unknown column family"
|
||||
|
||||
test "Test write empty batch":
|
||||
var batch = db.openWriteBatch()
|
||||
defer: batch.close()
|
||||
check not batch.isClosed()
|
||||
|
||||
check batch.count() == 0
|
||||
let res1 = db.write(batch)
|
||||
check:
|
||||
res1.isOk()
|
||||
batch.count() == 0
|
||||
|
||||
test "Test close":
|
||||
var batch = db.openWriteBatch()
|
||||
|
||||
check not batch.isClosed()
|
||||
batch.close()
|
||||
check batch.isClosed()
|
||||
batch.close()
|
||||
check batch.isClosed()
|
39
rocksdb/tests/transactions/test_txdbopts.nim
Normal file
39
rocksdb/tests/transactions/test_txdbopts.nim
Normal file
@ -0,0 +1,39 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
unittest2,
|
||||
../../rocksdb/transactions/txdbopts
|
||||
|
||||
suite "TransactionDbOptionsRef Tests":
|
||||
|
||||
test "Test newTransactionDbOptions":
|
||||
var txDbOpts = newTransactionDbOptions()
|
||||
|
||||
check not txDbOpts.cPtr.isNil()
|
||||
|
||||
txDbOpts.close()
|
||||
|
||||
test "Test defaultTransactionDbOptions":
|
||||
var txDbOpts = defaultTransactionDbOptions()
|
||||
|
||||
check not txDbOpts.cPtr.isNil()
|
||||
|
||||
txDbOpts.close()
|
||||
|
||||
test "Test close":
|
||||
var txDbOpts = defaultTransactionDbOptions()
|
||||
|
||||
check not txDbOpts.isClosed()
|
||||
txDbOpts.close()
|
||||
check txDbOpts.isClosed()
|
||||
txDbOpts.close()
|
||||
check txDbOpts.isClosed()
|
39
rocksdb/tests/transactions/test_txopts.nim
Normal file
39
rocksdb/tests/transactions/test_txopts.nim
Normal file
@ -0,0 +1,39 @@
|
||||
# Nim-RocksDB
|
||||
# Copyright 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
#
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * GPL license, version 2.0, ([LICENSE-GPLv2](LICENSE-GPLv2) or https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)
|
||||
#
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
unittest2,
|
||||
../../rocksdb/transactions/txopts
|
||||
|
||||
suite "TransactionOptionsRef Tests":
|
||||
|
||||
test "Test newTransactionOptions":
|
||||
var txOpts = newTransactionOptions()
|
||||
|
||||
check not txOpts.cPtr.isNil()
|
||||
|
||||
txOpts.close()
|
||||
|
||||
test "Test defaultTransactionOptions":
|
||||
var txOpts = defaultTransactionOptions()
|
||||
|
||||
check not txOpts.cPtr.isNil()
|
||||
|
||||
txOpts.close()
|
||||
|
||||
test "Test close":
|
||||
var txOpts = defaultTransactionOptions()
|
||||
|
||||
check not txOpts.isClosed()
|
||||
txOpts.close()
|
||||
check txOpts.isClosed()
|
||||
txOpts.close()
|
||||
check txOpts.isClosed()
|
966
rocksdb/vendor/rocksdb/.circleci/config.yml
vendored
Normal file
966
rocksdb/vendor/rocksdb/.circleci/config.yml
vendored
Normal file
@ -0,0 +1,966 @@
|
||||
version: 2.1
|
||||
|
||||
orbs:
|
||||
win: circleci/windows@5.0.0
|
||||
|
||||
commands:
|
||||
install-cmake-on-macos:
|
||||
steps:
|
||||
- run:
|
||||
name: Install cmake on macos
|
||||
command: |
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew install cmake
|
||||
|
||||
install-jdk8-on-macos:
|
||||
steps:
|
||||
- run:
|
||||
name: Install JDK 8 on macos
|
||||
command: |
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew tap bell-sw/liberica
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew install --cask liberica-jdk8
|
||||
|
||||
increase-max-open-files-on-macos:
|
||||
steps:
|
||||
- run:
|
||||
name: Increase max open files
|
||||
command: |
|
||||
sudo sysctl -w kern.maxfiles=1048576
|
||||
sudo sysctl -w kern.maxfilesperproc=1048576
|
||||
sudo launchctl limit maxfiles 1048576
|
||||
|
||||
pre-steps:
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Setup Environment Variables
|
||||
command: |
|
||||
echo "export GTEST_THROW_ON_FAILURE=0" >> $BASH_ENV
|
||||
echo "export GTEST_OUTPUT=\"xml:/tmp/test-results/\"" >> $BASH_ENV
|
||||
echo "export SKIP_FORMAT_BUCK_CHECKS=1" >> $BASH_ENV
|
||||
echo "export GTEST_COLOR=1" >> $BASH_ENV
|
||||
echo "export CTEST_OUTPUT_ON_FAILURE=1" >> $BASH_ENV
|
||||
echo "export CTEST_TEST_TIMEOUT=300" >> $BASH_ENV
|
||||
echo "export ZLIB_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/zlib" >> $BASH_ENV
|
||||
echo "export BZIP2_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/bzip2" >> $BASH_ENV
|
||||
echo "export SNAPPY_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/snappy" >> $BASH_ENV
|
||||
echo "export LZ4_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/lz4" >> $BASH_ENV
|
||||
echo "export ZSTD_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/zstd" >> $BASH_ENV
|
||||
|
||||
windows-build-steps:
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: "Install thirdparty dependencies"
|
||||
command: |
|
||||
echo "Installing CMake..."
|
||||
choco install cmake --installargs 'ADD_CMAKE_TO_PATH=System' -y
|
||||
choco install liberica8jdk -y
|
||||
mkdir $Env:THIRDPARTY_HOME
|
||||
cd $Env:THIRDPARTY_HOME
|
||||
echo "Building Snappy dependency..."
|
||||
curl https://github.com/google/snappy/archive/refs/tags/1.1.8.zip -O snappy-1.1.8.zip
|
||||
unzip -q snappy-1.1.8.zip
|
||||
cd snappy-1.1.8
|
||||
mkdir build
|
||||
cd build
|
||||
& $Env:CMAKE_BIN -G "$Env:CMAKE_GENERATOR" ..
|
||||
msbuild.exe Snappy.sln -maxCpuCount -property:Configuration=Debug -property:Platform=x64
|
||||
- run:
|
||||
name: "Build RocksDB"
|
||||
command: |
|
||||
$env:Path = $env:JAVA_HOME + ";" + $env:Path
|
||||
mkdir build
|
||||
cd build
|
||||
& $Env:CMAKE_BIN -G "$Env:CMAKE_GENERATOR" -DCMAKE_BUILD_TYPE=Debug -DOPTDBG=1 -DPORTABLE="$Env:CMAKE_PORTABLE" -DSNAPPY=1 -DJNI=1 ..
|
||||
cd ..
|
||||
echo "Building with VS version: $Env:CMAKE_GENERATOR"
|
||||
msbuild.exe build/rocksdb.sln -maxCpuCount -property:Configuration=Debug -property:Platform=x64
|
||||
- run:
|
||||
name: "Test RocksDB"
|
||||
shell: powershell.exe
|
||||
command: |
|
||||
build_tools\run_ci_db_test.ps1 -SuiteRun arena_test,db_basic_test,db_test,db_test2,db_merge_operand_test,bloom_test,c_test,coding_test,crc32c_test,dynamic_bloom_test,env_basic_test,env_test,hash_test,random_test -Concurrency 16
|
||||
- run:
|
||||
name: "Test RocksJava"
|
||||
command: |
|
||||
cd build\java
|
||||
& $Env:CTEST_BIN -C Debug -j 16
|
||||
pre-steps-macos:
|
||||
steps:
|
||||
- pre-steps
|
||||
|
||||
post-steps:
|
||||
steps:
|
||||
- store_test_results: # store test result if there's any
|
||||
path: /tmp/test-results
|
||||
- store_artifacts: # store LOG for debugging if there's any
|
||||
path: LOG
|
||||
- run: # on fail, compress Test Logs for diagnosing the issue
|
||||
name: Compress Test Logs
|
||||
command: tar -cvzf t.tar.gz t
|
||||
when: on_fail
|
||||
- store_artifacts: # on fail, store Test Logs for diagnosing the issue
|
||||
path: t.tar.gz
|
||||
destination: test_logs
|
||||
when: on_fail
|
||||
- run: # store core dumps if there's any
|
||||
command: |
|
||||
mkdir -p /tmp/core_dumps
|
||||
cp core.* /tmp/core_dumps
|
||||
when: on_fail
|
||||
- store_artifacts:
|
||||
path: /tmp/core_dumps
|
||||
when: on_fail
|
||||
|
||||
post-pmd-steps:
|
||||
steps:
|
||||
- store_artifacts:
|
||||
path: /home/circleci/project/java/target/pmd.xml
|
||||
when: on_fail
|
||||
- store_artifacts:
|
||||
path: /home/circleci/project/java/target/site
|
||||
when: on_fail
|
||||
|
||||
upgrade-cmake:
|
||||
steps:
|
||||
- run:
|
||||
name: Upgrade cmake
|
||||
command: |
|
||||
sudo apt remove --purge cmake
|
||||
sudo snap install cmake --classic
|
||||
|
||||
install-gflags:
|
||||
steps:
|
||||
- run:
|
||||
name: Install gflags
|
||||
command: |
|
||||
sudo apt-get update -y && sudo apt-get install -y libgflags-dev
|
||||
|
||||
install-gflags-on-macos:
|
||||
steps:
|
||||
- run:
|
||||
name: Install gflags on macos
|
||||
command: |
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew install gflags
|
||||
|
||||
install-maven:
|
||||
steps:
|
||||
- run:
|
||||
name: Install maven
|
||||
command: |
|
||||
sudo apt-get update -y && sudo apt-get install -y maven
|
||||
|
||||
setup-folly:
|
||||
steps:
|
||||
- run:
|
||||
name: Checkout folly sources
|
||||
command: |
|
||||
make checkout_folly
|
||||
|
||||
build-folly:
|
||||
steps:
|
||||
- run:
|
||||
name: Build folly and dependencies
|
||||
command: |
|
||||
make build_folly
|
||||
|
||||
build-for-benchmarks:
|
||||
steps:
|
||||
- pre-steps
|
||||
- run:
|
||||
name: "Linux build for benchmarks"
|
||||
command: #sized for the resource-class rocksdb-benchmark-sys1
|
||||
make V=1 J=8 -j8 release
|
||||
|
||||
perform-benchmarks:
|
||||
steps:
|
||||
- run:
|
||||
name: "Test low-variance benchmarks"
|
||||
command: ./tools/benchmark_ci.py --db_dir /tmp/rocksdb-benchmark-datadir --output_dir /tmp/benchmark-results --num_keys 20000000
|
||||
environment:
|
||||
LD_LIBRARY_PATH: /usr/local/lib
|
||||
# How long to run parts of the test(s)
|
||||
DURATION_RO: 300
|
||||
DURATION_RW: 500
|
||||
# Keep threads within physical capacity of server (much lower than default)
|
||||
NUM_THREADS: 1
|
||||
MAX_BACKGROUND_JOBS: 4
|
||||
# Don't run a couple of "optional" initial tests
|
||||
CI_TESTS_ONLY: "true"
|
||||
# Reduce configured size of levels to ensure more levels in the leveled compaction LSM tree
|
||||
WRITE_BUFFER_SIZE_MB: 16
|
||||
TARGET_FILE_SIZE_BASE_MB: 16
|
||||
MAX_BYTES_FOR_LEVEL_BASE_MB: 64
|
||||
# The benchmark host has 32GB memory
|
||||
# The following values are tailored to work with that
|
||||
# Note, tests may not exercise the targeted issues if the memory is increased on new test hosts.
|
||||
COMPRESSION_TYPE: "none"
|
||||
CACHE_INDEX_AND_FILTER_BLOCKS: 1
|
||||
MIN_LEVEL_TO_COMPRESS: 3
|
||||
CACHE_SIZE_MB: 10240
|
||||
MB_WRITE_PER_SEC: 2
|
||||
|
||||
post-benchmarks:
|
||||
steps:
|
||||
- store_artifacts: # store the benchmark output
|
||||
path: /tmp/benchmark-results
|
||||
destination: test_logs
|
||||
- run:
|
||||
name: Send benchmark report to visualisation
|
||||
command: |
|
||||
set +e
|
||||
set +o pipefail
|
||||
./build_tools/benchmark_log_tool.py --tsvfile /tmp/benchmark-results/report.tsv --esdocument https://search-rocksdb-bench-k2izhptfeap2hjfxteolsgsynm.us-west-2.es.amazonaws.com/bench_test3_rix/_doc
|
||||
true
|
||||
|
||||
executors:
|
||||
linux-docker:
|
||||
docker:
|
||||
# The image configuration is build_tools/ubuntu20_image/Dockerfile
|
||||
# To update and build the image:
|
||||
# $ cd build_tools/ubuntu20_image
|
||||
# $ docker build -t zjay437/rocksdb:0.5 .
|
||||
# $ docker push zjay437/rocksdb:0.5
|
||||
# `zjay437` is the account name for zjay@meta.com which readwrite token is shared internally. To login:
|
||||
# $ docker login --username zjay437
|
||||
# Or please feel free to change it to your docker hub account for hosting the image, meta employee should already have the account and able to login with SSO.
|
||||
# To avoid impacting the existing CI runs, please bump the version every time creating a new image
|
||||
# to run the CI image environment locally:
|
||||
# $ docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -it zjay437/rocksdb:0.5 bash
|
||||
# option `--cap-add=SYS_PTRACE --security-opt seccomp=unconfined` is used to enable gdb to attach an existing process
|
||||
- image: zjay437/rocksdb:0.6
|
||||
linux-java-docker:
|
||||
docker:
|
||||
- image: evolvedbinary/rocksjava:centos6_x64-be
|
||||
|
||||
jobs:
|
||||
build-macos:
|
||||
macos:
|
||||
xcode: 14.3.1
|
||||
resource_class: macos.m1.medium.gen1
|
||||
environment:
|
||||
ROCKSDB_DISABLE_JEMALLOC: 1 # jemalloc cause env_test hang, disable it for now
|
||||
steps:
|
||||
- increase-max-open-files-on-macos
|
||||
- install-gflags-on-macos
|
||||
- pre-steps-macos
|
||||
- run: ulimit -S -n `ulimit -H -n` && OPT=-DCIRCLECI make V=1 J=16 -j16 all
|
||||
- post-steps
|
||||
|
||||
build-macos-cmake:
|
||||
macos:
|
||||
xcode: 14.3.1
|
||||
resource_class: macos.m1.medium.gen1
|
||||
parameters:
|
||||
run_even_tests:
|
||||
description: run even or odd tests, used to split tests to 2 groups
|
||||
type: boolean
|
||||
default: true
|
||||
steps:
|
||||
- increase-max-open-files-on-macos
|
||||
- install-cmake-on-macos
|
||||
- install-gflags-on-macos
|
||||
- pre-steps-macos
|
||||
- run:
|
||||
name: "cmake generate project file"
|
||||
command: ulimit -S -n `ulimit -H -n` && mkdir build && cd build && cmake -DWITH_GFLAGS=1 ..
|
||||
- run:
|
||||
name: "Build tests"
|
||||
command: cd build && make V=1 -j16
|
||||
- when:
|
||||
condition: << parameters.run_even_tests >>
|
||||
steps:
|
||||
- run:
|
||||
name: "Run even tests"
|
||||
command: ulimit -S -n `ulimit -H -n` && cd build && ctest -j16 -I 0,,2
|
||||
- when:
|
||||
condition:
|
||||
not: << parameters.run_even_tests >>
|
||||
steps:
|
||||
- run:
|
||||
name: "Run odd tests"
|
||||
command: ulimit -S -n `ulimit -H -n` && cd build && ctest -j16 -I 1,,2
|
||||
- post-steps
|
||||
|
||||
build-linux:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: make V=1 J=32 -j32 check
|
||||
- post-steps
|
||||
|
||||
build-linux-encrypted_env-no_compression:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: ENCRYPTED_ENV=1 ROCKSDB_DISABLE_SNAPPY=1 ROCKSDB_DISABLE_ZLIB=1 ROCKSDB_DISABLE_BZIP=1 ROCKSDB_DISABLE_LZ4=1 ROCKSDB_DISABLE_ZSTD=1 make V=1 J=32 -j32 check
|
||||
- run: |
|
||||
./sst_dump --help | grep -E -q 'Supported compression types: kNoCompression$' # Verify no compiled in compression
|
||||
- post-steps
|
||||
|
||||
build-linux-static_lib-alt_namespace-status_checked:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: ASSERT_STATUS_CHECKED=1 TEST_UINT128_COMPAT=1 ROCKSDB_MODIFY_NPHASH=1 LIB_MODE=static OPT="-DROCKSDB_NAMESPACE=alternative_rocksdb_ns" make V=1 -j24 check
|
||||
- post-steps
|
||||
|
||||
build-linux-release:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- checkout # check out the code in the project directory
|
||||
- run: make V=1 -j32 LIB_MODE=shared release
|
||||
- run: ls librocksdb.so # ensure shared lib built
|
||||
- run: ./db_stress --version # ensure with gflags
|
||||
- run: make clean
|
||||
- run: make V=1 -j32 release
|
||||
- run: ls librocksdb.a # ensure static lib built
|
||||
- run: ./db_stress --version # ensure with gflags
|
||||
- run: make clean
|
||||
- run: apt-get remove -y libgflags-dev
|
||||
- run: make V=1 -j32 LIB_MODE=shared release
|
||||
- run: ls librocksdb.so # ensure shared lib built
|
||||
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
|
||||
- run: make clean
|
||||
- run: make V=1 -j32 release
|
||||
- run: ls librocksdb.a # ensure static lib built
|
||||
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
|
||||
- post-steps
|
||||
|
||||
build-linux-release-rtti:
|
||||
executor: linux-docker
|
||||
resource_class: xlarge
|
||||
steps:
|
||||
- checkout # check out the code in the project directory
|
||||
- run: USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j16 static_lib tools db_bench
|
||||
- run: ./db_stress --version # ensure with gflags
|
||||
- run: make clean
|
||||
- run: apt-get remove -y libgflags-dev
|
||||
- run: USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j16 static_lib tools db_bench
|
||||
- run: if ./db_stress --version; then false; else true; fi # ensure without gflags
|
||||
|
||||
build-linux-clang-no_test_run:
|
||||
executor: linux-docker
|
||||
resource_class: xlarge
|
||||
steps:
|
||||
- checkout # check out the code in the project directory
|
||||
- run: CC=clang CXX=clang++ USE_CLANG=1 PORTABLE=1 make V=1 -j16 all
|
||||
- post-steps
|
||||
|
||||
build-linux-clang10-asan:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: COMPILE_WITH_ASAN=1 CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 check # aligned new doesn't work for reason we haven't figured out
|
||||
- post-steps
|
||||
|
||||
build-linux-clang10-mini-tsan:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge+
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: COMPILE_WITH_TSAN=1 CC=clang-13 CXX=clang++-13 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 check
|
||||
- post-steps
|
||||
|
||||
build-linux-clang10-ubsan:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: COMPILE_WITH_UBSAN=1 OPT="-fsanitize-blacklist=.circleci/ubsan_suppression_list.txt" CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 ubsan_check # aligned new doesn't work for reason we haven't figured out
|
||||
- post-steps
|
||||
|
||||
build-linux-valgrind:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: PORTABLE=1 make V=1 -j32 valgrind_test
|
||||
- post-steps
|
||||
|
||||
build-linux-clang10-clang-analyze:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 CLANG_ANALYZER="/usr/bin/clang++-10" CLANG_SCAN_BUILD=scan-build-10 USE_CLANG=1 make V=1 -j32 analyze # aligned new doesn't work for reason we haven't figured out. For unknown, reason passing "clang++-10" as CLANG_ANALYZER doesn't work, and we need a full path.
|
||||
- post-steps
|
||||
- run:
|
||||
name: "compress test report"
|
||||
command: tar -cvzf scan_build_report.tar.gz scan_build_report
|
||||
when: on_fail
|
||||
- store_artifacts:
|
||||
path: scan_build_report.tar.gz
|
||||
destination: scan_build_report
|
||||
when: on_fail
|
||||
|
||||
build-linux-runner:
|
||||
machine: true
|
||||
resource_class: facebook/rocksdb-benchmark-sys1
|
||||
steps:
|
||||
- pre-steps
|
||||
- run:
|
||||
name: "Checked Linux build (Runner)"
|
||||
command: make V=1 J=8 -j8 check
|
||||
environment:
|
||||
LD_LIBRARY_PATH: /usr/local/lib
|
||||
- post-steps
|
||||
|
||||
build-linux-cmake-with-folly:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- setup-folly
|
||||
- build-folly
|
||||
- run: (mkdir build && cd build && cmake -DUSE_FOLLY=1 -DWITH_GFLAGS=1 -DROCKSDB_BUILD_SHARED=0 .. && make V=1 -j20 && ctest -j20)
|
||||
- post-steps
|
||||
|
||||
build-linux-cmake-with-folly-lite-no-test:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- setup-folly
|
||||
- run: (mkdir build && cd build && cmake -DUSE_FOLLY_LITE=1 -DWITH_GFLAGS=1 .. && make V=1 -j20)
|
||||
- post-steps
|
||||
|
||||
build-linux-cmake-with-benchmark:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: mkdir build && cd build && cmake -DWITH_GFLAGS=1 -DWITH_BENCHMARK=1 .. && make V=1 -j20 && ctest -j20
|
||||
- post-steps
|
||||
|
||||
build-linux-unity-and-headers:
|
||||
docker: # executor type
|
||||
- image: gcc:latest
|
||||
environment:
|
||||
EXTRA_CXXFLAGS: -mno-avx512f # Warnings-as-error in avx512fintrin.h, would be used on newer hardware
|
||||
resource_class: large
|
||||
steps:
|
||||
- checkout # check out the code in the project directory
|
||||
- run: apt-get update -y && apt-get install -y libgflags-dev
|
||||
- run:
|
||||
name: "Unity build"
|
||||
command: make V=1 -j8 unity_test
|
||||
no_output_timeout: 20m
|
||||
- run: make V=1 -j8 -k check-headers # could be moved to a different build
|
||||
- post-steps
|
||||
|
||||
build-linux-gcc-7-with-folly:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- setup-folly
|
||||
- build-folly
|
||||
- run: USE_FOLLY=1 LIB_MODE=static CC=gcc-7 CXX=g++-7 V=1 make -j32 check # TODO: LIB_MODE only to work around unresolved linker failures
|
||||
- post-steps
|
||||
|
||||
build-linux-gcc-7-with-folly-lite-no-test:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- setup-folly
|
||||
- run: USE_FOLLY_LITE=1 CC=gcc-7 CXX=g++-7 V=1 make -j32 all
|
||||
- post-steps
|
||||
|
||||
build-linux-gcc-8-no_test_run:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: CC=gcc-8 CXX=g++-8 V=1 make -j32 all
|
||||
- post-steps
|
||||
|
||||
build-linux-cmake-with-folly-coroutines:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
environment:
|
||||
CC: gcc-10
|
||||
CXX: g++-10
|
||||
steps:
|
||||
- pre-steps
|
||||
- setup-folly
|
||||
- build-folly
|
||||
- run: (mkdir build && cd build && cmake -DUSE_COROUTINES=1 -DWITH_GFLAGS=1 -DROCKSDB_BUILD_SHARED=0 .. && make V=1 -j20 && ctest -j20)
|
||||
- post-steps
|
||||
|
||||
build-linux-gcc-10-cxx20-no_test_run:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: CC=gcc-10 CXX=g++-10 V=1 ROCKSDB_CXX_STANDARD=c++20 make -j32 all
|
||||
- post-steps
|
||||
|
||||
build-linux-gcc-11-no_test_run:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: LIB_MODE=static CC=gcc-11 CXX=g++-11 V=1 make -j32 all microbench # TODO: LIB_MODE only to work around unresolved linker failures
|
||||
- post-steps
|
||||
|
||||
build-linux-clang-13-no_test_run:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: CC=clang-13 CXX=clang++-13 USE_CLANG=1 make -j32 all microbench
|
||||
- post-steps
|
||||
|
||||
# Ensure ASAN+UBSAN with folly, and full testsuite with clang 13
|
||||
build-linux-clang-13-asan-ubsan-with-folly:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- setup-folly
|
||||
- build-folly
|
||||
- run: CC=clang-13 CXX=clang++-13 LIB_MODE=static USE_CLANG=1 USE_FOLLY=1 COMPILE_WITH_UBSAN=1 COMPILE_WITH_ASAN=1 make -j32 check # TODO: LIB_MODE only to work around unresolved linker failures
|
||||
- post-steps
|
||||
|
||||
# This job is only to make sure the microbench tests are able to run, the benchmark result is not meaningful as the CI host is changing.
|
||||
build-linux-run-microbench:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: DEBUG_LEVEL=0 make -j32 run_microbench
|
||||
- post-steps
|
||||
|
||||
build-linux-mini-crashtest:
|
||||
executor: linux-docker
|
||||
resource_class: large
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: ulimit -S -n `ulimit -H -n` && make V=1 -j8 CRASH_TEST_EXT_ARGS='--duration=960 --max_key=2500000 --use_io_uring=0' blackbox_crash_test_with_atomic_flush
|
||||
- post-steps
|
||||
|
||||
build-linux-crashtest-tiered-storage-bb:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run:
|
||||
name: "run crashtest"
|
||||
command: ulimit -S -n `ulimit -H -n` && make V=1 -j32 CRASH_TEST_EXT_ARGS='--duration=10800 --use_io_uring=0' blackbox_crash_test_with_tiered_storage
|
||||
no_output_timeout: 100m
|
||||
- post-steps
|
||||
|
||||
build-linux-crashtest-tiered-storage-wb:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run:
|
||||
name: "run crashtest"
|
||||
command: ulimit -S -n `ulimit -H -n` && make V=1 -j32 CRASH_TEST_EXT_ARGS='--duration=10800 --use_io_uring=0' whitebox_crash_test_with_tiered_storage
|
||||
no_output_timeout: 100m
|
||||
- post-steps
|
||||
|
||||
build-windows-vs2022-avx2:
|
||||
executor:
|
||||
name: win/server-2022
|
||||
size: 2xlarge
|
||||
environment:
|
||||
THIRDPARTY_HOME: C:/Users/circleci/thirdparty
|
||||
CMAKE_HOME: C:/Program Files/CMake
|
||||
CMAKE_BIN: C:/Program Files/CMake/bin/cmake.exe
|
||||
CTEST_BIN: C:/Program Files/CMake/bin/ctest.exe
|
||||
JAVA_HOME: C:/Program Files/BellSoft/LibericaJDK-8
|
||||
SNAPPY_HOME: C:/Users/circleci/thirdparty/snappy-1.1.8
|
||||
SNAPPY_INCLUDE: C:/Users/circleci/thirdparty/snappy-1.1.8;C:/Users/circleci/thirdparty/snappy-1.1.8/build
|
||||
SNAPPY_LIB_DEBUG: C:/Users/circleci/thirdparty/snappy-1.1.8/build/Debug/snappy.lib
|
||||
CMAKE_GENERATOR: Visual Studio 17 2022
|
||||
CMAKE_PORTABLE: AVX2
|
||||
steps:
|
||||
- windows-build-steps
|
||||
|
||||
build-windows-vs2022:
|
||||
executor:
|
||||
name: win/server-2022
|
||||
size: 2xlarge
|
||||
environment:
|
||||
THIRDPARTY_HOME: C:/Users/circleci/thirdparty
|
||||
CMAKE_HOME: C:/Program Files/CMake
|
||||
CMAKE_BIN: C:/Program Files/CMake/bin/cmake.exe
|
||||
CTEST_BIN: C:/Program Files/CMake/bin/ctest.exe
|
||||
JAVA_HOME: C:/Program Files/BellSoft/LibericaJDK-8
|
||||
SNAPPY_HOME: C:/Users/circleci/thirdparty/snappy-1.1.8
|
||||
SNAPPY_INCLUDE: C:/Users/circleci/thirdparty/snappy-1.1.8;C:/Users/circleci/thirdparty/snappy-1.1.8/build
|
||||
SNAPPY_LIB_DEBUG: C:/Users/circleci/thirdparty/snappy-1.1.8/build/Debug/snappy.lib
|
||||
CMAKE_GENERATOR: Visual Studio 17 2022
|
||||
CMAKE_PORTABLE: 1
|
||||
steps:
|
||||
- windows-build-steps
|
||||
|
||||
build-windows-vs2019:
|
||||
executor:
|
||||
name: win/server-2019
|
||||
size: 2xlarge
|
||||
environment:
|
||||
THIRDPARTY_HOME: C:/Users/circleci/thirdparty
|
||||
CMAKE_HOME: C:/Program Files/CMake
|
||||
CMAKE_BIN: C:/Program Files/CMake/bin/cmake.exe
|
||||
CTEST_BIN: C:/Program Files/CMake/bin/ctest.exe
|
||||
JAVA_HOME: C:/Program Files/BellSoft/LibericaJDK-8
|
||||
SNAPPY_HOME: C:/Users/circleci/thirdparty/snappy-1.1.8
|
||||
SNAPPY_INCLUDE: C:/Users/circleci/thirdparty/snappy-1.1.8;C:/Users/circleci/thirdparty/snappy-1.1.8/build
|
||||
SNAPPY_LIB_DEBUG: C:/Users/circleci/thirdparty/snappy-1.1.8/build/Debug/snappy.lib
|
||||
CMAKE_GENERATOR: Visual Studio 16 2019
|
||||
CMAKE_PORTABLE: 1
|
||||
steps:
|
||||
- windows-build-steps
|
||||
|
||||
build-linux-java:
|
||||
executor: linux-docker
|
||||
resource_class: large
|
||||
steps:
|
||||
- pre-steps
|
||||
- run:
|
||||
name: "Set Java Environment"
|
||||
command: |
|
||||
echo "JAVA_HOME=${JAVA_HOME}"
|
||||
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
|
||||
which java && java -version
|
||||
which javac && javac -version
|
||||
- run:
|
||||
name: "Test RocksDBJava"
|
||||
command: make V=1 J=8 -j8 jtest
|
||||
- post-steps
|
||||
|
||||
build-linux-java-pmd:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: large
|
||||
environment:
|
||||
JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
|
||||
steps:
|
||||
- install-maven
|
||||
- pre-steps
|
||||
- run:
|
||||
name: "Set Java Environment"
|
||||
command: |
|
||||
echo "JAVA_HOME=${JAVA_HOME}"
|
||||
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
|
||||
which java && java -version
|
||||
which javac && javac -version
|
||||
- run:
|
||||
name: "PMD RocksDBJava"
|
||||
command: make V=1 J=8 -j8 jpmd
|
||||
- post-pmd-steps
|
||||
|
||||
build-linux-java-static:
|
||||
executor: linux-java-docker
|
||||
resource_class: large
|
||||
steps:
|
||||
- pre-steps
|
||||
- run:
|
||||
name: "Set Java Environment"
|
||||
command: |
|
||||
echo "JAVA_HOME=${JAVA_HOME}"
|
||||
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
|
||||
which java && java -version
|
||||
which javac && javac -version
|
||||
- run:
|
||||
name: "Build RocksDBJava Static Library"
|
||||
command: scl enable devtoolset-7 'make V=1 J=8 -j8 rocksdbjavastatic'
|
||||
- post-steps
|
||||
|
||||
build-macos-java:
|
||||
macos:
|
||||
xcode: 14.3.1
|
||||
resource_class: macos.m1.medium.gen1
|
||||
environment:
|
||||
JAVA_HOME: /Library/Java/JavaVirtualMachines/liberica-jdk-8.jdk/Contents/Home
|
||||
ROCKSDB_DISABLE_JEMALLOC: 1 # jemalloc causes java 8 crash
|
||||
steps:
|
||||
- increase-max-open-files-on-macos
|
||||
- install-gflags-on-macos
|
||||
- install-jdk8-on-macos
|
||||
- pre-steps-macos
|
||||
- run:
|
||||
name: "Set Java Environment"
|
||||
command: |
|
||||
echo "JAVA_HOME=${JAVA_HOME}"
|
||||
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
|
||||
which java && java -version
|
||||
which javac && javac -version
|
||||
- run:
|
||||
name: "Test RocksDBJava"
|
||||
command: make V=1 J=16 -j16 jtest
|
||||
no_output_timeout: 20m
|
||||
- post-steps
|
||||
|
||||
build-macos-java-static:
|
||||
macos:
|
||||
xcode: 14.3.1
|
||||
resource_class: macos.m1.medium.gen1
|
||||
environment:
|
||||
JAVA_HOME: /Library/Java/JavaVirtualMachines/liberica-jdk-8.jdk/Contents/Home
|
||||
steps:
|
||||
- increase-max-open-files-on-macos
|
||||
- install-gflags-on-macos
|
||||
- install-cmake-on-macos
|
||||
- install-jdk8-on-macos
|
||||
- pre-steps-macos
|
||||
- run:
|
||||
name: "Set Java Environment"
|
||||
command: |
|
||||
echo "JAVA_HOME=${JAVA_HOME}"
|
||||
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
|
||||
which java && java -version
|
||||
which javac && javac -version
|
||||
- run:
|
||||
name: "Build RocksDBJava x86 and ARM Static Libraries"
|
||||
command: make V=1 J=16 -j16 rocksdbjavastaticosx
|
||||
no_output_timeout: 20m
|
||||
- post-steps
|
||||
|
||||
build-macos-java-static-universal:
|
||||
macos:
|
||||
xcode: 14.3.1
|
||||
resource_class: macos.m1.medium.gen1
|
||||
environment:
|
||||
JAVA_HOME: /Library/Java/JavaVirtualMachines/liberica-jdk-8.jdk/Contents/Home
|
||||
steps:
|
||||
- increase-max-open-files-on-macos
|
||||
- install-gflags-on-macos
|
||||
- install-cmake-on-macos
|
||||
- install-jdk8-on-macos
|
||||
- pre-steps-macos
|
||||
- run:
|
||||
name: "Set Java Environment"
|
||||
command: |
|
||||
echo "JAVA_HOME=${JAVA_HOME}"
|
||||
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
|
||||
which java && java -version
|
||||
which javac && javac -version
|
||||
- run:
|
||||
name: "Build RocksDBJava Universal Binary Static Library"
|
||||
command: make V=1 J=16 -j16 rocksdbjavastaticosx_ub
|
||||
no_output_timeout: 20m
|
||||
- post-steps
|
||||
|
||||
build-examples:
|
||||
executor: linux-docker
|
||||
resource_class: large
|
||||
steps:
|
||||
- pre-steps
|
||||
- run:
|
||||
name: "Build examples"
|
||||
command: |
|
||||
make V=1 -j4 static_lib && cd examples && make V=1 -j4
|
||||
- post-steps
|
||||
|
||||
build-cmake-mingw:
|
||||
executor: linux-docker
|
||||
resource_class: large
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: update-alternatives --set x86_64-w64-mingw32-g++ /usr/bin/x86_64-w64-mingw32-g++-posix
|
||||
- run:
|
||||
name: "Build cmake-mingw"
|
||||
command: |
|
||||
export PATH=$JAVA_HOME/bin:$PATH
|
||||
echo "JAVA_HOME=${JAVA_HOME}"
|
||||
which java && java -version
|
||||
which javac && javac -version
|
||||
mkdir build && cd build && cmake -DJNI=1 -DWITH_GFLAGS=OFF .. -DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc -DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ -DCMAKE_SYSTEM_NAME=Windows && make -j4 rocksdb rocksdbjni
|
||||
- post-steps
|
||||
|
||||
build-linux-non-shm:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
environment:
|
||||
TEST_TMPDIR: /tmp/rocksdb_test_tmp
|
||||
steps:
|
||||
- pre-steps
|
||||
- run: make V=1 -j32 check
|
||||
- post-steps
|
||||
|
||||
build-linux-arm-test-full:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: arm.large
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- run: make V=1 J=4 -j4 check
|
||||
- post-steps
|
||||
|
||||
build-linux-arm:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: arm.large
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- run: ROCKSDBTESTS_PLATFORM_DEPENDENT=only make V=1 J=4 -j4 all_but_some_tests check_some
|
||||
- post-steps
|
||||
|
||||
build-linux-arm-cmake-no_test_run:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
resource_class: arm.large
|
||||
environment:
|
||||
JAVA_HOME: /usr/lib/jvm/java-8-openjdk-arm64
|
||||
steps:
|
||||
- pre-steps
|
||||
- install-gflags
|
||||
- run:
|
||||
name: "Set Java Environment"
|
||||
command: |
|
||||
echo "JAVA_HOME=${JAVA_HOME}"
|
||||
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV
|
||||
which java && java -version
|
||||
which javac && javac -version
|
||||
- run:
|
||||
name: "Build with cmake"
|
||||
command: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=Release -DWITH_TESTS=0 -DWITH_GFLAGS=1 -DWITH_BENCHMARK_TOOLS=0 -DWITH_TOOLS=0 -DWITH_CORE_TOOLS=1 ..
|
||||
make -j4
|
||||
- run:
|
||||
name: "Build Java with cmake"
|
||||
command: |
|
||||
rm -rf build
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DJNI=1 -DCMAKE_BUILD_TYPE=Release -DWITH_GFLAGS=1 ..
|
||||
make -j4 rocksdb rocksdbjni
|
||||
- post-steps
|
||||
|
||||
build-format-compatible:
|
||||
executor: linux-docker
|
||||
resource_class: 2xlarge
|
||||
steps:
|
||||
- pre-steps
|
||||
- run:
|
||||
name: "test"
|
||||
command: |
|
||||
export TEST_TMPDIR=/dev/shm/rocksdb
|
||||
rm -rf /dev/shm/rocksdb
|
||||
mkdir /dev/shm/rocksdb
|
||||
tools/check_format_compatible.sh
|
||||
- post-steps
|
||||
|
||||
build-fuzzers:
|
||||
executor: linux-docker
|
||||
resource_class: large
|
||||
steps:
|
||||
- pre-steps
|
||||
- run:
|
||||
name: "Build rocksdb lib"
|
||||
command: CC=clang-13 CXX=clang++-13 USE_CLANG=1 make -j4 static_lib
|
||||
- run:
|
||||
name: "Build fuzzers"
|
||||
command: cd fuzz && make sst_file_writer_fuzzer db_fuzzer db_map_fuzzer
|
||||
- post-steps
|
||||
|
||||
benchmark-linux: #use a private Circle CI runner (resource_class) to run the job
|
||||
machine: true
|
||||
resource_class: facebook/rocksdb-benchmark-sys1
|
||||
steps:
|
||||
- build-for-benchmarks
|
||||
- perform-benchmarks
|
||||
- post-benchmarks
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
jobs-linux-run-tests:
|
||||
jobs:
|
||||
- build-linux
|
||||
- build-linux-cmake-with-folly
|
||||
- build-linux-cmake-with-folly-lite-no-test
|
||||
- build-linux-gcc-7-with-folly
|
||||
- build-linux-gcc-7-with-folly-lite-no-test
|
||||
- build-linux-cmake-with-folly-coroutines
|
||||
- build-linux-cmake-with-benchmark
|
||||
- build-linux-encrypted_env-no_compression
|
||||
jobs-linux-run-tests-san:
|
||||
jobs:
|
||||
- build-linux-clang10-asan
|
||||
- build-linux-clang10-ubsan
|
||||
- build-linux-clang10-mini-tsan
|
||||
- build-linux-static_lib-alt_namespace-status_checked
|
||||
jobs-linux-no-test-run:
|
||||
jobs:
|
||||
- build-linux-release
|
||||
- build-linux-release-rtti
|
||||
- build-examples
|
||||
- build-fuzzers
|
||||
- build-linux-clang-no_test_run
|
||||
- build-linux-clang-13-no_test_run
|
||||
- build-linux-gcc-8-no_test_run
|
||||
- build-linux-gcc-10-cxx20-no_test_run
|
||||
- build-linux-gcc-11-no_test_run
|
||||
- build-linux-arm-cmake-no_test_run
|
||||
jobs-linux-other-checks:
|
||||
jobs:
|
||||
- build-linux-clang10-clang-analyze
|
||||
- build-linux-unity-and-headers
|
||||
- build-linux-mini-crashtest
|
||||
jobs-windows:
|
||||
jobs:
|
||||
- build-windows-vs2019
|
||||
- build-cmake-mingw
|
||||
jobs-java:
|
||||
jobs:
|
||||
- build-linux-java
|
||||
- build-linux-java-static
|
||||
- build-macos-java
|
||||
- build-macos-java-static
|
||||
- build-macos-java-static-universal
|
||||
- build-linux-java-pmd
|
||||
jobs-macos:
|
||||
jobs:
|
||||
- build-macos
|
||||
- build-macos-cmake:
|
||||
run_even_tests: true
|
||||
- build-macos-cmake:
|
||||
run_even_tests: false
|
||||
jobs-linux-arm:
|
||||
jobs:
|
||||
- build-linux-arm
|
||||
build-fuzzers:
|
||||
jobs:
|
||||
- build-fuzzers
|
||||
benchmark-linux:
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 * * * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- main
|
||||
jobs:
|
||||
- benchmark-linux
|
||||
nightly:
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 9 * * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- main
|
||||
jobs:
|
||||
- build-format-compatible
|
||||
- build-linux-arm-test-full
|
||||
- build-linux-run-microbench
|
||||
- build-linux-non-shm
|
||||
- build-linux-clang-13-asan-ubsan-with-folly
|
||||
- build-linux-valgrind
|
||||
- build-windows-vs2022-avx2
|
||||
- build-windows-vs2022
|
6
rocksdb/vendor/rocksdb/.circleci/ubsan_suppression_list.txt
vendored
Normal file
6
rocksdb/vendor/rocksdb/.circleci/ubsan_suppression_list.txt
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
# Supress UBSAN warnings related to stl_tree.h, e.g.
|
||||
# UndefinedBehaviorSanitizer: undefined-behavior /usr/bin/../lib/gcc/x86_64-linux-gnu/5.4.0/../../../../include/c++/5.4.0/bits/stl_tree.h:1505:43 in
|
||||
# /usr/bin/../lib/gcc/x86_64-linux-gnu/5.4.0/../../../../include/c++/5.4.0/bits/stl_tree.h:1505:43:
|
||||
# runtime error: upcast of address 0x000001fa8820 with insufficient space for an object of type
|
||||
# 'std::_Rb_tree_node<std::pair<const std::__cxx11::basic_string<char>, rocksdb::(anonymous namespace)::LockHoldingInfo> >'
|
||||
src:*bits/stl_tree.h
|
5
rocksdb/vendor/rocksdb/.clang-format
vendored
Normal file
5
rocksdb/vendor/rocksdb/.clang-format
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
# Complete list of style options can be found at:
|
||||
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
|
||||
---
|
||||
BasedOnStyle: Google
|
||||
...
|
45
rocksdb/vendor/rocksdb/.github/workflows/sanity_check.yml
vendored
Normal file
45
rocksdb/vendor/rocksdb/.github/workflows/sanity_check.yml
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
name: Check buck targets and code format
|
||||
on: [push, pull_request]
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check:
|
||||
name: Check TARGETS file and code format
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout feature branch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Fetch from upstream
|
||||
run: |
|
||||
git remote add upstream https://github.com/facebook/rocksdb.git && git fetch upstream
|
||||
|
||||
- name: Where am I
|
||||
run: |
|
||||
echo git status && git status
|
||||
echo "git remote -v" && git remote -v
|
||||
echo git branch && git branch
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v1
|
||||
|
||||
- name: Install Dependencies
|
||||
run: python -m pip install --upgrade pip
|
||||
|
||||
- name: Install argparse
|
||||
run: pip install argparse
|
||||
|
||||
- name: Download clang-format-diff.py
|
||||
run: wget https://raw.githubusercontent.com/llvm/llvm-project/release/12.x/clang/tools/clang-format/clang-format-diff.py
|
||||
|
||||
- name: Check format
|
||||
run: VERBOSE_CHECK=1 make check-format
|
||||
|
||||
- name: Compare buckify output
|
||||
run: make check-buck-targets
|
||||
|
||||
- name: Simple source code checks
|
||||
run: make check-sources
|
100
rocksdb/vendor/rocksdb/.gitignore
vendored
Normal file
100
rocksdb/vendor/rocksdb/.gitignore
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
make_config.mk
|
||||
rocksdb.pc
|
||||
|
||||
*.a
|
||||
*.arc
|
||||
*.d
|
||||
*.dylib*
|
||||
*.gcda
|
||||
*.gcno
|
||||
*.o
|
||||
*.o.tmp
|
||||
*.so
|
||||
*.so.*
|
||||
*_test
|
||||
*_bench
|
||||
*_stress
|
||||
*.out
|
||||
*.class
|
||||
*.jar
|
||||
*.*jnilib*
|
||||
*.d-e
|
||||
*.o-*
|
||||
*.swp
|
||||
*~
|
||||
*.vcxproj
|
||||
*.vcxproj.filters
|
||||
*.sln
|
||||
*.cmake
|
||||
.watchmanconfig
|
||||
CMakeCache.txt
|
||||
CMakeFiles/
|
||||
build/
|
||||
|
||||
ldb
|
||||
manifest_dump
|
||||
sst_dump
|
||||
blob_dump
|
||||
block_cache_trace_analyzer
|
||||
tools/block_cache_analyzer/*.pyc
|
||||
column_aware_encoding_exp
|
||||
util/build_version.cc
|
||||
build_tools/VALGRIND_LOGS/
|
||||
coverage/COVERAGE_REPORT
|
||||
.gdbhistory
|
||||
.gdb_history
|
||||
package/
|
||||
unity.a
|
||||
tags
|
||||
etags
|
||||
rocksdb_dump
|
||||
rocksdb_undump
|
||||
db_test2
|
||||
trace_analyzer
|
||||
block_cache_trace_analyzer
|
||||
io_tracer_parser
|
||||
.DS_Store
|
||||
.vs
|
||||
.vscode
|
||||
.clangd
|
||||
|
||||
java/out
|
||||
java/target
|
||||
java/test-libs
|
||||
java/*.log
|
||||
java/include/org_rocksdb_*.h
|
||||
|
||||
.idea/
|
||||
*.iml
|
||||
|
||||
rocksdb.cc
|
||||
rocksdb.h
|
||||
unity.cc
|
||||
java/crossbuild/.vagrant
|
||||
.vagrant/
|
||||
java/**/*.asc
|
||||
java/javadoc
|
||||
|
||||
scan_build_report/
|
||||
t
|
||||
LOG
|
||||
|
||||
db_logs/
|
||||
tp2/
|
||||
fbcode/
|
||||
fbcode
|
||||
buckifier/*.pyc
|
||||
buckifier/__pycache__
|
||||
.arcconfig
|
||||
|
||||
compile_commands.json
|
||||
clang-format-diff.py
|
||||
.py3/
|
||||
|
||||
fuzz/proto/gen/
|
||||
fuzz/crash-*
|
||||
|
||||
cmake-build-*
|
||||
third-party/folly/
|
||||
.cache
|
||||
*.sublime-*
|
4
rocksdb/vendor/rocksdb/.lgtm.yml
vendored
Normal file
4
rocksdb/vendor/rocksdb/.lgtm.yml
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
extraction:
|
||||
cpp:
|
||||
index:
|
||||
build_command: make static_lib
|
1603
rocksdb/vendor/rocksdb/CMakeLists.txt
vendored
Normal file
1603
rocksdb/vendor/rocksdb/CMakeLists.txt
vendored
Normal file
File diff suppressed because it is too large
Load Diff
77
rocksdb/vendor/rocksdb/CODE_OF_CONDUCT.md
vendored
Normal file
77
rocksdb/vendor/rocksdb/CODE_OF_CONDUCT.md
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
# Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to make participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, sex characteristics, gender identity and expression,
|
||||
level of experience, education, socio-economic status, nationality, personal
|
||||
appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all project spaces, and it also applies when
|
||||
an individual is representing the project or its community in public spaces.
|
||||
Examples of representing a project or community include using an official
|
||||
project e-mail address, posting via an official social media account, or acting
|
||||
as an appointed representative at an online or offline event. Representation of
|
||||
a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at <opensource-conduct@fb.com>. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see
|
||||
https://www.contributor-covenant.org/faq
|
||||
|
17
rocksdb/vendor/rocksdb/CONTRIBUTING.md
vendored
Normal file
17
rocksdb/vendor/rocksdb/CONTRIBUTING.md
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
# Contributing to RocksDB
|
||||
|
||||
## Code of Conduct
|
||||
The code of conduct is described in [`CODE_OF_CONDUCT.md`](CODE_OF_CONDUCT.md)
|
||||
|
||||
## Contributor License Agreement ("CLA")
|
||||
|
||||
In order to accept your pull request, we need you to submit a CLA. You
|
||||
only need to do this once, so if you've done this for another Facebook
|
||||
open source project, you're good to go. If you are submitting a pull
|
||||
request for the first time, just let us know that you have completed
|
||||
the CLA and we can cross-check with your GitHub username.
|
||||
|
||||
Complete your CLA here: <https://code.facebook.com/cla>
|
||||
|
||||
If you prefer to sign a paper copy, we can send you a PDF. Send us an
|
||||
e-mail or create a new github issue to request the CLA in PDF format.
|
24
rocksdb/vendor/rocksdb/DEFAULT_OPTIONS_HISTORY.md
vendored
Normal file
24
rocksdb/vendor/rocksdb/DEFAULT_OPTIONS_HISTORY.md
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
# RocksDB default options change log (NO LONGER MAINTAINED)
|
||||
## Unreleased
|
||||
* delayed_write_rate takes the rate given by rate_limiter if not specified.
|
||||
|
||||
## 5.2
|
||||
* Change the default of delayed slowdown value to 16MB/s and further increase the L0 stop condition to 36 files.
|
||||
|
||||
## 5.0 (11/17/2016)
|
||||
* Options::allow_concurrent_memtable_write and Options::enable_write_thread_adaptive_yield are now true by default
|
||||
* Options.level0_stop_writes_trigger default value changes from 24 to 32.
|
||||
|
||||
## 4.8.0 (5/2/2016)
|
||||
* options.max_open_files changes from 5000 to -1. It improves performance, but users need to set file descriptor limit to be large enough and watch memory usage for index and bloom filters.
|
||||
* options.base_background_compactions changes from max_background_compactions to 1. When users set higher max_background_compactions but the write throughput is not high, the writes are less spiky to disks.
|
||||
* options.wal_recovery_mode changes from kTolerateCorruptedTailRecords to kPointInTimeRecovery. Avoid some false positive when file system or hardware reorder the writes for file data and metadata.
|
||||
|
||||
## 4.7.0 (4/8/2016)
|
||||
* options.write_buffer_size changes from 4MB to 64MB.
|
||||
* options.target_file_size_base changes from 2MB to 64MB.
|
||||
* options.max_bytes_for_level_base changes from 10MB to 256MB.
|
||||
* options.soft_pending_compaction_bytes_limit changes from 0 (disabled) to 64GB.
|
||||
* options.hard_pending_compaction_bytes_limit changes from 0 (disabled) to 256GB.
|
||||
* table_cache_numshardbits changes from 4 to 6.
|
||||
* max_file_opening_threads changes from 1 to 16.
|
16
rocksdb/vendor/rocksdb/DUMP_FORMAT.md
vendored
Normal file
16
rocksdb/vendor/rocksdb/DUMP_FORMAT.md
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
## RocksDB dump format
|
||||
|
||||
The version 1 RocksDB dump format is fairly simple:
|
||||
|
||||
1) The dump starts with the magic 8 byte identifier "ROCKDUMP"
|
||||
|
||||
2) The magic is followed by an 8 byte big-endian version which is 0x00000001.
|
||||
|
||||
3) Next are arbitrarily sized chunks of bytes prepended by 4 byte little endian number indicating how large each chunk is.
|
||||
|
||||
4) The first chunk is special and is a json string indicating some things about the creation of this dump. It contains the following keys:
|
||||
* database-path: The path of the database this dump was created from.
|
||||
* hostname: The hostname of the machine where the dump was created.
|
||||
* creation-time: Unix seconds since epoc when this dump was created.
|
||||
|
||||
5) Following the info dump the slices paired into are key/value pairs.
|
2603
rocksdb/vendor/rocksdb/HISTORY.md
vendored
Normal file
2603
rocksdb/vendor/rocksdb/HISTORY.md
vendored
Normal file
File diff suppressed because it is too large
Load Diff
220
rocksdb/vendor/rocksdb/INSTALL.md
vendored
Normal file
220
rocksdb/vendor/rocksdb/INSTALL.md
vendored
Normal file
@ -0,0 +1,220 @@
|
||||
## Compilation
|
||||
|
||||
**Important**: If you plan to run RocksDB in production, don't compile using default
|
||||
`make` or `make all`. That will compile RocksDB in debug mode, which is much slower
|
||||
than release mode.
|
||||
|
||||
RocksDB's library should be able to compile without any dependency installed,
|
||||
although we recommend installing some compression libraries (see below).
|
||||
We do depend on newer gcc/clang with C++17 support (GCC >= 7, Clang >= 5).
|
||||
|
||||
There are few options when compiling RocksDB:
|
||||
|
||||
* [recommended] `make static_lib` will compile librocksdb.a, RocksDB static library. Compiles static library in release mode.
|
||||
|
||||
* `make shared_lib` will compile librocksdb.so, RocksDB shared library. Compiles shared library in release mode.
|
||||
|
||||
* `make check` will compile and run all the unit tests. `make check` will compile RocksDB in debug mode.
|
||||
|
||||
* `make all` will compile our static library, and all our tools and unit tests. Our tools
|
||||
depend on gflags 2.2.0 or newer. You will need to have gflags installed to run `make all`. This will compile RocksDB in debug mode. Don't
|
||||
use binaries compiled by `make all` in production.
|
||||
|
||||
* By default the binary we produce is optimized for the CPU you're compiling on
|
||||
(`-march=native` or the equivalent). To build a binary compatible with the most
|
||||
general architecture supported by your CPU and compiler, set `PORTABLE=1` for
|
||||
the build, but performance will suffer as many operations benefit from newer
|
||||
and wider instructions. In addition to `PORTABLE=0` (default) and `PORTABLE=1`,
|
||||
it can be set to an architecture name recognized by your compiler. For example,
|
||||
on 64-bit x86, a reasonable compromise is `PORTABLE=haswell` which supports
|
||||
many or most of the available optimizations while still being compatible with
|
||||
most processors made since roughly 2013.
|
||||
|
||||
## Dependencies
|
||||
|
||||
* You can link RocksDB with following compression libraries:
|
||||
- [zlib](http://www.zlib.net/) - a library for data compression.
|
||||
- [bzip2](http://www.bzip.org/) - a library for data compression.
|
||||
- [lz4](https://github.com/lz4/lz4) - a library for extremely fast data compression.
|
||||
- [snappy](http://google.github.io/snappy/) - a library for fast
|
||||
data compression.
|
||||
- [zstandard](http://www.zstd.net) - Fast real-time compression
|
||||
algorithm.
|
||||
|
||||
* All our tools depend on:
|
||||
- [gflags](https://gflags.github.io/gflags/) - a library that handles
|
||||
command line flags processing. You can compile rocksdb library even
|
||||
if you don't have gflags installed.
|
||||
|
||||
* `make check` will also check code formatting, which requires [clang-format](https://clang.llvm.org/docs/ClangFormat.html)
|
||||
|
||||
* If you wish to build the RocksJava static target, then cmake is required for building Snappy.
|
||||
|
||||
* If you wish to run microbench (e.g, `make microbench`, `make ribbon_bench` or `cmake -DWITH_BENCHMARK=1`), Google benchmark >= 1.6.0 is needed.
|
||||
* You can do the following to install Google benchmark. These commands are copied from `./build_tools/ubuntu20_image/Dockerfile`:
|
||||
|
||||
`$ git clone --depth 1 --branch v1.7.0 https://github.com/google/benchmark.git ~/benchmark`
|
||||
|
||||
`$ cd ~/benchmark && mkdir build && cd build && cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_GTEST_TESTS=0 && ninja && ninja install`
|
||||
|
||||
## Supported platforms
|
||||
|
||||
* **Linux - Ubuntu**
|
||||
* Upgrade your gcc to version at least 7 to get C++17 support.
|
||||
* Install gflags. First, try: `sudo apt-get install libgflags-dev`
|
||||
If this doesn't work and you're using Ubuntu, here's a nice tutorial:
|
||||
(http://askubuntu.com/questions/312173/installing-gflags-12-04)
|
||||
* Install snappy. This is usually as easy as:
|
||||
`sudo apt-get install libsnappy-dev`.
|
||||
* Install zlib. Try: `sudo apt-get install zlib1g-dev`.
|
||||
* Install bzip2: `sudo apt-get install libbz2-dev`.
|
||||
* Install lz4: `sudo apt-get install liblz4-dev`.
|
||||
* Install zstandard: `sudo apt-get install libzstd-dev`.
|
||||
|
||||
* **Linux - CentOS / RHEL**
|
||||
* Upgrade your gcc to version at least 7 to get C++17 support
|
||||
* Install gflags:
|
||||
|
||||
git clone https://github.com/gflags/gflags.git
|
||||
cd gflags
|
||||
git checkout v2.2.0
|
||||
./configure && make && sudo make install
|
||||
|
||||
**Notice**: Once installed, please add the include path for gflags to your `CPATH` environment variable and the
|
||||
lib path to `LIBRARY_PATH`. If installed with default settings, the include path will be `/usr/local/include`
|
||||
and the lib path will be `/usr/local/lib`.
|
||||
|
||||
* Install snappy:
|
||||
|
||||
sudo yum install snappy snappy-devel
|
||||
|
||||
* Install zlib:
|
||||
|
||||
sudo yum install zlib zlib-devel
|
||||
|
||||
* Install bzip2:
|
||||
|
||||
sudo yum install bzip2 bzip2-devel
|
||||
|
||||
* Install lz4:
|
||||
|
||||
sudo yum install lz4-devel
|
||||
|
||||
* Install ASAN (optional for debugging):
|
||||
|
||||
sudo yum install libasan
|
||||
|
||||
* Install zstandard:
|
||||
* With [EPEL](https://fedoraproject.org/wiki/EPEL):
|
||||
|
||||
sudo yum install libzstd-devel
|
||||
|
||||
* With CentOS 8:
|
||||
|
||||
sudo dnf install libzstd-devel
|
||||
|
||||
* From source:
|
||||
|
||||
wget https://github.com/facebook/zstd/archive/v1.1.3.tar.gz
|
||||
mv v1.1.3.tar.gz zstd-1.1.3.tar.gz
|
||||
tar zxvf zstd-1.1.3.tar.gz
|
||||
cd zstd-1.1.3
|
||||
make && sudo make install
|
||||
|
||||
* **OS X**:
|
||||
* Install latest C++ compiler that supports C++ 17:
|
||||
* Update XCode: run `xcode-select --install` (or install it from XCode App's settting).
|
||||
* Install via [homebrew](http://brew.sh/).
|
||||
* If you're first time developer in MacOS, you still need to run: `xcode-select --install` in your command line.
|
||||
* run `brew tap homebrew/versions; brew install gcc7 --use-llvm` to install gcc 7 (or higher).
|
||||
* run `brew install rocksdb`
|
||||
|
||||
* **FreeBSD** (11.01):
|
||||
|
||||
* You can either install RocksDB from the Ports system using `cd /usr/ports/databases/rocksdb && make install`, or you can follow the details below to install dependencies and compile from source code:
|
||||
|
||||
* Install the dependencies for RocksDB:
|
||||
|
||||
export BATCH=YES
|
||||
cd /usr/ports/devel/gmake && make install
|
||||
cd /usr/ports/devel/gflags && make install
|
||||
|
||||
cd /usr/ports/archivers/snappy && make install
|
||||
cd /usr/ports/archivers/bzip2 && make install
|
||||
cd /usr/ports/archivers/liblz4 && make install
|
||||
cd /usr/ports/archivesrs/zstd && make install
|
||||
|
||||
cd /usr/ports/devel/git && make install
|
||||
|
||||
|
||||
* Install the dependencies for RocksJava (optional):
|
||||
|
||||
export BATCH=yes
|
||||
cd /usr/ports/java/openjdk7 && make install
|
||||
|
||||
* Build RocksDB from source:
|
||||
cd ~
|
||||
git clone https://github.com/facebook/rocksdb.git
|
||||
cd rocksdb
|
||||
gmake static_lib
|
||||
|
||||
* Build RocksJava from source (optional):
|
||||
cd rocksdb
|
||||
export JAVA_HOME=/usr/local/openjdk7
|
||||
gmake rocksdbjava
|
||||
|
||||
* **OpenBSD** (6.3/-current):
|
||||
|
||||
* As RocksDB is not available in the ports yet you have to build it on your own:
|
||||
|
||||
* Install the dependencies for RocksDB:
|
||||
|
||||
pkg_add gmake gflags snappy bzip2 lz4 zstd git jdk bash findutils gnuwatch
|
||||
|
||||
* Build RocksDB from source:
|
||||
|
||||
cd ~
|
||||
git clone https://github.com/facebook/rocksdb.git
|
||||
cd rocksdb
|
||||
gmake static_lib
|
||||
|
||||
* Build RocksJava from source (optional):
|
||||
|
||||
cd rocksdb
|
||||
export JAVA_HOME=/usr/local/jdk-1.8.0
|
||||
export PATH=$PATH:/usr/local/jdk-1.8.0/bin
|
||||
gmake rocksdbjava
|
||||
|
||||
* **iOS**:
|
||||
* Run: `TARGET_OS=IOS make static_lib`. When building the project which uses rocksdb iOS library, make sure to define an important pre-processing macros: `IOS_CROSS_COMPILE`.
|
||||
|
||||
* **Windows** (Visual Studio 2017 to up):
|
||||
* Read and follow the instructions at CMakeLists.txt
|
||||
* Or install via [vcpkg](https://github.com/microsoft/vcpkg)
|
||||
* run `vcpkg install rocksdb:x64-windows`
|
||||
|
||||
* **AIX 6.1**
|
||||
* Install AIX Toolbox rpms with gcc
|
||||
* Use these environment variables:
|
||||
|
||||
export PORTABLE=1
|
||||
export CC=gcc
|
||||
export AR="ar -X64"
|
||||
export EXTRA_ARFLAGS=-X64
|
||||
export EXTRA_CFLAGS=-maix64
|
||||
export EXTRA_CXXFLAGS=-maix64
|
||||
export PLATFORM_LDFLAGS="-static-libstdc++ -static-libgcc"
|
||||
export LIBPATH=/opt/freeware/lib
|
||||
export JAVA_HOME=/usr/java8_64
|
||||
export PATH=/opt/freeware/bin:$PATH
|
||||
|
||||
* **Solaris Sparc**
|
||||
* Install GCC 7 and higher.
|
||||
* Use these environment variables:
|
||||
|
||||
export CC=gcc
|
||||
export EXTRA_CFLAGS=-m64
|
||||
export EXTRA_CXXFLAGS=-m64
|
||||
export EXTRA_LDFLAGS=-m64
|
||||
export PORTABLE=1
|
||||
export PLATFORM_LDFLAGS="-static-libstdc++ -static-libgcc"
|
26
rocksdb/vendor/rocksdb/LANGUAGE-BINDINGS.md
vendored
Normal file
26
rocksdb/vendor/rocksdb/LANGUAGE-BINDINGS.md
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
This is the list of all known third-party language bindings for RocksDB. If something is missing, please open a pull request to add it.
|
||||
|
||||
* Java - https://github.com/facebook/rocksdb/tree/main/java
|
||||
* Python
|
||||
* http://python-rocksdb.readthedocs.io/en/latest/
|
||||
* http://pyrocksdb.readthedocs.org/en/latest/ (unmaintained)
|
||||
* Perl - https://metacpan.org/pod/RocksDB
|
||||
* Node.js - https://npmjs.org/package/rocksdb
|
||||
* Go
|
||||
* https://github.com/linxGnu/grocksdb
|
||||
* https://github.com/tecbot/gorocksdb (unmaintained)
|
||||
* Ruby - http://rubygems.org/gems/rocksdb-ruby
|
||||
* Haskell - https://hackage.haskell.org/package/rocksdb-haskell
|
||||
* PHP - https://github.com/Photonios/rocksdb-php
|
||||
* C#
|
||||
* https://github.com/warrenfalk/rocksdb-sharp
|
||||
* https://github.com/curiosity-ai/rocksdb-sharp
|
||||
* Rust
|
||||
* https://github.com/pingcap/rust-rocksdb (used in production fork of https://github.com/spacejam/rust-rocksdb)
|
||||
* https://github.com/spacejam/rust-rocksdb
|
||||
* https://github.com/bh1xuw/rust-rocks
|
||||
* D programming language - https://github.com/b1naryth1ef/rocksdb
|
||||
* Erlang - https://gitlab.com/barrel-db/erlang-rocksdb
|
||||
* Elixir - https://github.com/urbint/rox
|
||||
* Nim - https://github.com/status-im/nim-rocksdb
|
||||
* Swift and Objective-C (iOS/OSX) - https://github.com/iabudiab/ObjectiveRocks
|
202
rocksdb/vendor/rocksdb/LICENSE.Apache
vendored
Normal file
202
rocksdb/vendor/rocksdb/LICENSE.Apache
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
29
rocksdb/vendor/rocksdb/LICENSE.leveldb
vendored
Normal file
29
rocksdb/vendor/rocksdb/LICENSE.leveldb
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
This contains code that is from LevelDB, and that code is under the following license:
|
||||
|
||||
Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2595
rocksdb/vendor/rocksdb/Makefile
vendored
Normal file
2595
rocksdb/vendor/rocksdb/Makefile
vendored
Normal file
File diff suppressed because it is too large
Load Diff
9
rocksdb/vendor/rocksdb/PLUGINS.md
vendored
Normal file
9
rocksdb/vendor/rocksdb/PLUGINS.md
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
This is the list of all known third-party plugins for RocksDB. If something is missing, please open a pull request to add it.
|
||||
|
||||
* [Dedupfs](https://github.com/ajkr/dedupfs): an example for plugin developers to reference
|
||||
* [HDFS](https://github.com/riversand963/rocksdb-hdfs-env): an Env used for interacting with HDFS. Migrated from main RocksDB repo
|
||||
* [ZenFS](https://github.com/westerndigitalcorporation/zenfs): a file system for zoned block devices
|
||||
* [RADOS](https://github.com/riversand963/rocksdb-rados-env): an Env used for interacting with RADOS. Migrated from RocksDB main repo.
|
||||
* [PMEM](https://github.com/pmem/pmem-rocksdb-plugin): a collection of plugins to enable Persistent Memory on RocksDB.
|
||||
* [IPPCP](https://github.com/intel/ippcp-plugin-rocksdb): a plugin to enable encryption on RocksDB based on Intel optimized open source IPP-Crypto library.
|
||||
* [encfs](https://github.com/pegasus-kv/encfs): a plugin to enable encryption on RocksDB based on OpenSSL library.
|
29
rocksdb/vendor/rocksdb/README.md
vendored
Normal file
29
rocksdb/vendor/rocksdb/README.md
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
## RocksDB: A Persistent Key-Value Store for Flash and RAM Storage
|
||||
|
||||
[](https://circleci.com/gh/facebook/rocksdb)
|
||||
|
||||
RocksDB is developed and maintained by Facebook Database Engineering Team.
|
||||
It is built on earlier work on [LevelDB](https://github.com/google/leveldb) by Sanjay Ghemawat (sanjay@google.com)
|
||||
and Jeff Dean (jeff@google.com)
|
||||
|
||||
This code is a library that forms the core building block for a fast
|
||||
key-value server, especially suited for storing data on flash drives.
|
||||
It has a Log-Structured-Merge-Database (LSM) design with flexible tradeoffs
|
||||
between Write-Amplification-Factor (WAF), Read-Amplification-Factor (RAF)
|
||||
and Space-Amplification-Factor (SAF). It has multi-threaded compactions,
|
||||
making it especially suitable for storing multiple terabytes of data in a
|
||||
single database.
|
||||
|
||||
Start with example usage here: https://github.com/facebook/rocksdb/tree/main/examples
|
||||
|
||||
See the [github wiki](https://github.com/facebook/rocksdb/wiki) for more explanation.
|
||||
|
||||
The public interface is in `include/`. Callers should not include or
|
||||
rely on the details of any other header files in this package. Those
|
||||
internal APIs may be changed without warning.
|
||||
|
||||
Questions and discussions are welcome on the [RocksDB Developers Public](https://www.facebook.com/groups/rocksdb.dev/) Facebook group and [email list](https://groups.google.com/g/rocksdb) on Google Groups.
|
||||
|
||||
## License
|
||||
|
||||
RocksDB is dual-licensed under both the GPLv2 (found in the COPYING file in the root directory) and Apache 2.0 License (found in the LICENSE.Apache file in the root directory). You may select, at your option, one of the above-listed licenses.
|
172
rocksdb/vendor/rocksdb/USERS.md
vendored
Normal file
172
rocksdb/vendor/rocksdb/USERS.md
vendored
Normal file
@ -0,0 +1,172 @@
|
||||
This document lists users of RocksDB and their use cases. If you are using RocksDB, please open a pull request and add yourself to the list.
|
||||
|
||||
## Facebook
|
||||
At Facebook, we use RocksDB as storage engines in multiple data management services and a backend for many different stateful services, including:
|
||||
|
||||
1. MyRocks -- https://github.com/MySQLOnRocksDB/mysql-5.6
|
||||
2. MongoRocks -- https://github.com/mongodb-partners/mongo-rocks
|
||||
3. ZippyDB -- Facebook's distributed key-value store with Paxos-style replication, built on top of RocksDB.[1] https://www.youtube.com/watch?v=DfiN7pG0D0khtt
|
||||
4. Laser -- Laser is a high query throughput, low (millisecond) latency, key-value storage service built on top of RocksDB.[1]
|
||||
4. Dragon -- a distributed graph query engine. https://code.facebook.com/posts/1737605303120405/dragon-a-distributed-graph-query-engine/
|
||||
5. Stylus -- a low-level stream processing framework writtenin C++.[1]
|
||||
6. LogDevice -- a distributed data store for logs [2]
|
||||
|
||||
[1] https://research.facebook.com/publications/realtime-data-processing-at-facebook/
|
||||
|
||||
[2] https://code.facebook.com/posts/357056558062811/logdevice-a-distributed-data-store-for-logs/
|
||||
|
||||
## Bilibili
|
||||
[Bilibili](bilibili.com) [uses](https://www.alluxio.io/blog/when-ai-meets-alluxio-at-bilibili-building-an-efficient-ai-platform-for-data-preprocessing-and-model-training/) Alluxio to speed up its ML training workloads, and Alluxio uses RocksDB to store its filesystem metadata, so Bilibili uses RocksDB.
|
||||
|
||||
Bilibili's [real-time platform](https://www.alibabacloud.com/blog/architecture-and-practices-of-bilibilis-real-time-platform_596676) uses Flink, and uses RocksDB as Flink's state store.
|
||||
|
||||
## TikTok
|
||||
TikTok, or its parent company ByteDance, uses RocksDB as the storage engine for some storage systems, such as its distributed graph database [ByteGraph](https://vldb.org/pvldb/vol15/p3306-li.pdf).
|
||||
|
||||
Also, TikTok uses [Alluxio](alluxio.io) to [speed up Presto queries](https://www.alluxio.io/resources/videos/improving-presto-performance-with-alluxio-at-tiktok/), and Alluxio stores the files' metadata in RocksDB.
|
||||
|
||||
## FoundationDB
|
||||
[FoundationDB](https://www.foundationdb.org/) [uses](https://github.com/apple/foundationdb/blob/377f1f692da6ab2fe5bdac57035651db3e5fb66d/fdbserver/KeyValueStoreRocksDB.actor.cpp) RocksDB to implement a [key-value store interface](https://github.com/apple/foundationdb/blob/377f1f692da6ab2fe5bdac57035651db3e5fb66d/fdbserver/KeyValueStoreRocksDB.actor.cpp#L1127) in its server backend.
|
||||
|
||||
## Apple
|
||||
Apple [uses](https://opensource.apple.com/projects/foundationdb/) FoundationDB, so it also uses RocksDB.
|
||||
|
||||
## Snowflake
|
||||
Snowflake [uses](https://www.snowflake.com/blog/how-foundationdb-powers-snowflake-metadata-forward/) FoundationDB, so it also uses RocksDB.
|
||||
|
||||
## Microsoft
|
||||
The Bing search engine from Microsoft uses RocksDB as the storage engine for its web data platform: https://blogs.bing.com/Engineering-Blog/october-2021/RocksDB-in-Microsoft-Bing
|
||||
|
||||
## LinkedIn
|
||||
Two different use cases at Linkedin are using RocksDB as a storage engine:
|
||||
|
||||
1. LinkedIn's follow feed for storing user's activities. Check out the blog post: https://engineering.linkedin.com/blog/2016/03/followfeed--linkedin-s-feed-made-faster-and-smarter
|
||||
2. Apache Samza, open source framework for stream processing
|
||||
|
||||
Learn more about those use cases in a Tech Talk by Ankit Gupta and Naveen Somasundaram: http://www.youtube.com/watch?v=plqVp_OnSzg
|
||||
|
||||
## Yahoo
|
||||
Yahoo is using RocksDB as a storage engine for their biggest distributed data store Sherpa. Learn more about it here: http://yahooeng.tumblr.com/post/120730204806/sherpa-scales-new-heights
|
||||
|
||||
## Tencent
|
||||
[PaxosStore](https://github.com/Tencent/paxosstore) is a distributed database supporting WeChat. It uses RocksDB as its storage engine.
|
||||
|
||||
## Baidu
|
||||
[Apache Doris](http://doris.apache.org/master/en/) is a MPP analytical database engine released by Baidu. It [uses RocksDB](http://doris.apache.org/master/en/administrator-guide/operation/tablet-meta-tool.html) to manage its tablet's metadata.
|
||||
|
||||
## CockroachDB
|
||||
CockroachDB is an open-source geo-replicated transactional database. They are using RocksDB as their storage engine. Check out their github: https://github.com/cockroachdb/cockroach
|
||||
|
||||
## DNANexus
|
||||
DNANexus is using RocksDB to speed up processing of genomics data.
|
||||
You can learn more from this great blog post by Mike Lin: http://devblog.dnanexus.com/faster-bam-sorting-with-samtools-and-rocksdb/
|
||||
|
||||
## Iron.io
|
||||
Iron.io is using RocksDB as a storage engine for their distributed queueing system.
|
||||
Learn more from Tech Talk by Reed Allman: http://www.youtube.com/watch?v=HTjt6oj-RL4
|
||||
|
||||
## Tango Me
|
||||
Tango is using RocksDB as a graph storage to store all users' connection data and other social activity data.
|
||||
|
||||
## Turn
|
||||
Turn is using RocksDB as a storage layer for their key/value store, serving at peak 2.4MM QPS out of different datacenters.
|
||||
Check out our RocksDB Protobuf merge operator at: https://github.com/vladb38/rocksdb_protobuf
|
||||
|
||||
## Santander UK/Cloudera Profession Services
|
||||
Check out their blog post: http://blog.cloudera.com/blog/2015/08/inside-santanders-near-real-time-data-ingest-architecture/
|
||||
|
||||
## Airbnb
|
||||
Airbnb is using RocksDB as a storage engine for their personalized search service. You can learn more about it here: https://www.youtube.com/watch?v=ASQ6XMtogMs
|
||||
|
||||
## Alluxio
|
||||
[Alluxio](https://www.alluxio.io) uses RocksDB to serve and scale file system metadata to beyond 1 Billion files. The detailed design and implementation is described in this engineering blog:
|
||||
https://www.alluxio.io/blog/scalable-metadata-service-in-alluxio-storing-billions-of-files/
|
||||
|
||||
## Pinterest
|
||||
Pinterest's Object Retrieval System uses RocksDB for storage: https://www.youtube.com/watch?v=MtFEVEs_2Vo
|
||||
|
||||
## Smyte
|
||||
[Smyte](https://www.smyte.com/) uses RocksDB as the storage layer for their core key-value storage, high-performance counters and time-windowed HyperLogLog services.
|
||||
|
||||
## Rakuten Marketing
|
||||
[Rakuten Marketing](https://marketing.rakuten.com/) uses RocksDB as the disk cache layer for the real-time bidding service in their Performance DSP.
|
||||
|
||||
## VWO, Wingify
|
||||
[VWO's](https://vwo.com/) Smart Code checker and URL helper uses RocksDB to store all the URLs where VWO's Smart Code is installed.
|
||||
|
||||
## quasardb
|
||||
[quasardb](https://www.quasardb.net) is a high-performance, distributed, transactional key-value database that integrates well with in-memory analytics engines such as Apache Spark.
|
||||
quasardb uses a heavily tuned RocksDB as its persistence layer.
|
||||
|
||||
## Netflix
|
||||
[Netflix](http://techblog.netflix.com/2016/05/application-data-caching-using-ssds.html) Netflix uses RocksDB on AWS EC2 instances with local SSD drives to cache application data.
|
||||
|
||||
## TiKV
|
||||
[TiKV](https://github.com/pingcap/tikv) is a GEO-replicated, high-performance, distributed, transactional key-value database. TiKV is powered by Rust and Raft. TiKV uses RocksDB as its persistence layer.
|
||||
|
||||
## TiDB
|
||||
[TiDB](https://github.com/pingcap/tidb) uses the TiKV distributed key-value database, so it uses RocksDB.
|
||||
|
||||
## PingCAP
|
||||
[PingCAP](https://www.pingcap.com/) is the company behind TiDB, its cloud database service uses RocksDB.
|
||||
|
||||
## Apache Spark
|
||||
[Spark Structured Streaming](https://docs.databricks.com/structured-streaming/rocksdb-state-store.html) uses RocksDB as the local state store.
|
||||
|
||||
## Databricks
|
||||
[Databricks](https://www.databricks.com/) [replaces AWS RDS with TiDB](https://www.pingcap.com/case-study/how-databricks-tackles-the-scalability-limit-with-a-mysql-alternative/) for scalability, so it uses RocksDB.
|
||||
|
||||
## Apache Flink
|
||||
[Apache Flink](https://flink.apache.org/news/2016/03/08/release-1.0.0.html) uses RocksDB to store state locally on a machine.
|
||||
|
||||
## Dgraph
|
||||
[Dgraph](https://github.com/dgraph-io/dgraph) is an open-source, scalable, distributed, low latency, high throughput Graph database .They use RocksDB to store state locally on a machine.
|
||||
|
||||
## Uber
|
||||
[Uber](http://eng.uber.com/cherami/) uses RocksDB as a durable and scalable task queue.
|
||||
|
||||
## 360 Pika
|
||||
[360](http://www.360.cn/) [Pika](https://github.com/Qihoo360/pika) is a nosql compatible with redis. With the huge amount of data stored, redis may suffer for a capacity bottleneck, and pika was born for solving it. It has widely been used in many companies.
|
||||
|
||||
## LzLabs
|
||||
LzLabs is using RocksDB as a storage engine in their multi-database distributed framework to store application configuration and user data.
|
||||
|
||||
## ProfaneDB
|
||||
[ProfaneDB](https://profanedb.gitlab.io/) is a database for Protocol Buffers, and uses RocksDB for storage. It is accessible via gRPC, and the schema is defined using directly `.proto` files.
|
||||
|
||||
## IOTA Foundation
|
||||
[IOTA Foundation](https://www.iota.org/) is using RocksDB in the [IOTA Reference Implementation (IRI)](https://github.com/iotaledger/iri) to store the local state of the Tangle. The Tangle is the first open-source distributed ledger powering the future of the Internet of Things.
|
||||
|
||||
## Avrio Project
|
||||
[Avrio Project](http://avrio-project.github.io/avrio.network/) is using RocksDB in [Avrio ](https://github.com/avrio-project/avrio) to store blocks, account balances and data and other blockchain-releated data. Avrio is a multiblockchain decentralized cryptocurrency empowering monetary transactions.
|
||||
|
||||
## Crux
|
||||
[Crux](https://github.com/juxt/crux) is a document database that uses RocksDB for local [EAV](https://en.wikipedia.org/wiki/Entity%E2%80%93attribute%E2%80%93value_model) index storage to enable point-in-time bitemporal Datalog queries. The "unbundled" architecture uses Kafka to provide horizontal scalability.
|
||||
|
||||
## Nebula Graph
|
||||
[Nebula Graph](https://github.com/vesoft-inc/nebula) is a distributed, scalable, lightning-fast, open source graph database capable of hosting super large scale graphs with dozens of billions of vertices (nodes) and trillions of edges, with milliseconds of latency.
|
||||
|
||||
## YugabyteDB
|
||||
[YugabyteDB](https://www.yugabyte.com/) is an open source, high performance, distributed SQL database that uses RocksDB as its storage layer. For more information, please see https://github.com/yugabyte/yugabyte-db/.
|
||||
|
||||
## ArangoDB
|
||||
[ArangoDB](https://www.arangodb.com/) is a native multi-model database with flexible data models for documents, graphs, and key-values, for building high performance applications using a convenient SQL-like query language or JavaScript extensions. It uses RocksDB as its storage engine.
|
||||
|
||||
## Qdrant
|
||||
[Qdrant](https://qdrant.tech/) is an open source vector database, it [uses](https://qdrant.tech/documentation/concepts/storage/) RocksDB as its persistent storage.
|
||||
|
||||
## Milvus
|
||||
[Milvus](https://milvus.io/) is an open source vector database for unstructured data. It uses RocksDB not only as one of the supported kv storage engines, but also as a message queue.
|
||||
|
||||
## Kafka
|
||||
[Kafka](https://kafka.apache.org/) is an open-source distributed event streaming platform, it uses RocksDB to store state in Kafka Streams: https://www.confluent.io/blog/how-to-tune-rocksdb-kafka-streams-state-stores-performance/.
|
||||
|
||||
## Solana Labs
|
||||
[Solana](https://github.com/solana-labs/solana) is a fast, secure, scalable, and decentralized blockchain. It uses RocksDB as the underlying storage for its ledger store.
|
||||
|
||||
## Apache Kvrocks
|
||||
|
||||
[Apache Kvrocks](https://github.com/apache/kvrocks) is an open-source distributed key-value NoSQL database built on top of RocksDB. It serves as a cost-saving and capacity-increasing alternative drop-in replacement for Redis.
|
||||
|
||||
## Others
|
||||
More databases using RocksDB can be found at [dbdb.io](https://dbdb.io/browse?embeds=rocksdb).
|
228
rocksdb/vendor/rocksdb/WINDOWS_PORT.md
vendored
Normal file
228
rocksdb/vendor/rocksdb/WINDOWS_PORT.md
vendored
Normal file
@ -0,0 +1,228 @@
|
||||
# Microsoft Contribution Notes
|
||||
|
||||
## Contributors
|
||||
* Alexander Zinoviev https://github.com/zinoale
|
||||
* Dmitri Smirnov https://github.com/yuslepukhin
|
||||
* Praveen Rao https://github.com/PraveenSinghRao
|
||||
* Sherlock Huang https://github.com/SherlockNoMad
|
||||
|
||||
## Introduction
|
||||
RocksDB is a well proven open source key-value persistent store, optimized for fast storage. It provides scalability with number of CPUs and storage IOPS, to support IO-bound, in-memory and write-once workloads, most importantly, to be flexible to allow for innovation.
|
||||
|
||||
As Microsoft Bing team we have been continuously pushing hard to improve the scalability, efficiency of platform and eventually benefit Bing end-user satisfaction. We would like to explore the opportunity to embrace open source, RocksDB here, to use, enhance and customize for our usage, and also contribute back to the RocksDB community. Herein, we are pleased to offer this RocksDB port for Windows platform.
|
||||
|
||||
These notes describe some decisions and changes we had to make with regards to porting RocksDB on Windows. We hope this will help both reviewers and users of the Windows port.
|
||||
We are open for comments and improvements.
|
||||
|
||||
## OS specifics
|
||||
All of the porting, testing and benchmarking was done on Windows Server 2012 R2 Datacenter 64-bit but to the best of our knowledge there is not a specific API we used during porting that is unsupported on other Windows OS after Vista.
|
||||
|
||||
## Porting goals
|
||||
We strive to achieve the following goals:
|
||||
* make use of the existing porting interface of RocksDB
|
||||
* make minimum [WY2]modifications within platform independent code.
|
||||
* make all unit test pass both in debug and release builds.
|
||||
* Note: latest introduction of SyncPoint seems to disable running db_test in Release.
|
||||
* make performance on par with published benchmarks accounting for HW differences
|
||||
* we would like to keep the port code inline with the main branch with no forking
|
||||
|
||||
## Build system
|
||||
We have chosen CMake as a widely accepted build system to build the Windows port. It is very fast and convenient.
|
||||
|
||||
At the same time it generates Visual Studio projects that are both usable from a command line and IDE.
|
||||
|
||||
The top-level CMakeLists.txt file contains description of all targets and build rules. It also provides brief instructions on how to build the software for Windows. One more build related file is thirdparty.inc that also resides on the top level. This file must be edited to point to actual third party libraries location.
|
||||
We think that it would be beneficial to merge the existing make-based build system and the new cmake-based build system into a single one to use on all platforms.
|
||||
|
||||
All building and testing was done for 64-bit. We have not conducted any testing for 32-bit and early reports indicate that it will not run on 32-bit.
|
||||
|
||||
## C++ and STL notes
|
||||
We had to make some minimum changes within the portable files that either account for OS differences or the shortcomings of C++11 support in the current version of the MS compiler. Most or all of them are expected to be fixed in the upcoming compiler releases.
|
||||
|
||||
We plan to use this port for our business purposes here at Bing and this provided business justification for this port. This also means, we do not have at present to choose the compiler version at will.
|
||||
|
||||
* Certain headers that are not present and not necessary on Windows were simply `#ifndef OS_WIN` in a few places (`unistd.h`)
|
||||
* All posix specific headers were replaced to port/port.h which worked well
|
||||
* Replaced `dirent.h` for `port/port_dirent.h` (very few places) with the implementation of the relevant interfaces within `rocksdb::port` namespace
|
||||
* Replaced `sys/time.h` to `port/sys_time.h` (few places) implemented equivalents within `rocksdb::port`
|
||||
* `printf %z` specification is not supported on Windows. To imitate existing standards we came up with a string macro `ROCKSDB_PRIszt` which expands to `zu` on posix systems and to `Iu` on windows.
|
||||
* in class member initialization were moved to a __ctors in some cases
|
||||
* `constexpr` is not supported. We had to replace `std::numeric_limits<>::max/min()` to its C macros for constants. Sometimes we had to make class members `static const` and place a definition within a .cc file.
|
||||
* `constexpr` for functions was replaced to a template specialization (1 place)
|
||||
* Union members that have non-trivial constructors were replaced to `char[]` in one place along with bug fixes (spatial experimental feature)
|
||||
* Zero-sized arrays are deemed a non-standard extension which we converted to 1 size array and that should work well for the purposes of these classes.
|
||||
* `std::chrono` lacks nanoseconds support (fixed in the upcoming release of the STL) and we had to use `QueryPerfCounter()` within env_win.cc
|
||||
* Function local statics initialization is still not safe. Used `std::once` to mitigate within WinEnv.
|
||||
|
||||
## Windows Environments notes
|
||||
We endeavored to make it functionally on par with posix_env. This means we replicated the functionality of the thread pool and other things as precise as possible, including:
|
||||
* Replicate posix logic using std:thread primitives.
|
||||
* Implement all posix_env disk access functionality.
|
||||
* Set `use_os_buffer=false` to disable OS disk buffering for WinWritableFile and WinRandomAccessFile.
|
||||
* Replace `pread/pwrite` with `WriteFile/ReadFile` with `OVERLAPPED` structure.
|
||||
* Use `SetFileInformationByHandle` to compensate absence of `fallocate`.
|
||||
|
||||
### In detail
|
||||
Even though Windows provides its own efficient thread-pool implementation we chose to replicate posix logic using `std::thread` primitives. This allows anyone to quickly detect any changes within the posix source code and replicate them within windows env. This has proven to work very well. At the same time for anyone who wishes to replace the built-in thread-pool can do so using RocksDB stackable environments.
|
||||
|
||||
For disk access we implemented all of the functionality present within the posix_env which includes memory mapped files, random access, rate-limiter support etc.
|
||||
The `use_os_buffer` flag on Posix platforms currently denotes disabling read-ahead log via `fadvise` mechanism. Windows does not have `fadvise` system call. What is more, it implements disk cache in a way that differs from Linux greatly. It's not an uncommon practice on Windows to perform un-buffered disk access to gain control of the memory consumption. We think that in our use case this may also be a good configuration option at the expense of disk throughput. To compensate one may increase the configured in-memory cache size instead. Thus we have chosen `use_os_buffer=false` to disable OS disk buffering for `WinWritableFile` and `WinRandomAccessFile`. The OS imposes restrictions on the alignment of the disk offsets, buffers used and the amount of data that is read/written when accessing files in un-buffered mode. When the option is true, the classes behave in a standard way. This allows to perform writes and reads in cases when un-buffered access does not make sense such as WAL and MANIFEST.
|
||||
|
||||
We have replaced `pread/pwrite` with `WriteFile/ReadFile` with `OVERLAPPED` structure so we can atomically seek to the position of the disk operation but still perform the operation synchronously. Thus we able to emulate that functionality of `pread/pwrite` reasonably well. The only difference is that the file pointer is not returned to its original position but that hardly matters given the random nature of access.
|
||||
|
||||
We used `SetFileInformationByHandle` both to truncate files after writing a full final page to disk and to pre-allocate disk space for faster I/O thus compensating for the absence of `fallocate` although some differences remain. For example, the pre-allocated space is not filled with zeros like on Linux, however, on a positive note, the end of file position is also not modified after pre-allocation.
|
||||
|
||||
RocksDB renames, copies and deletes files at will even though they may be opened with another handle at the same time. We had to relax and allow nearly all the concurrent access permissions possible.
|
||||
|
||||
## Thread-Local Storage
|
||||
Thread-Local storage plays a significant role for RocksDB performance. Rather than creating a separate implementation we chose to create inline wrappers that forward `pthread_specific` calls to Windows `Tls` interfaces within `rocksdb::port` namespace. This leaves the existing meat of the logic in tact and unchanged and just as maintainable.
|
||||
|
||||
To mitigate the lack of thread local storage cleanup on thread-exit we added a limited amount of windows specific code within the same thread_local.cc file that injects a cleanup callback into a `"__tls"` structure within `".CRT$XLB"` data segment. This approach guarantees that the callback is invoked regardless of whether RocksDB used within an executable, standalone DLL or within another DLL.
|
||||
|
||||
## Jemalloc usage
|
||||
|
||||
When RocksDB is used with Jemalloc the latter needs to be initialized before any of the C++ globals or statics. To accomplish that we injected an initialization routine into `".CRT$XCT"` that is automatically invoked by the runtime before initializing static objects. je-uninit is queued to `atexit()`.
|
||||
|
||||
The jemalloc redirecting `new/delete` global operators are used by the linker providing certain conditions are met. See build section in these notes.
|
||||
|
||||
## Stack Trace and Unhandled Exception Handler
|
||||
|
||||
We decided not to implement these two features because the hosting program as a rule has these two things in it.
|
||||
We experienced no inconveniences debugging issues in the debugger or analyzing process dumps if need be and thus we did not
|
||||
see this as a priority.
|
||||
|
||||
## Performance results
|
||||
### Setup
|
||||
All of the benchmarks are run on the same set of machines. Here are the details of the test setup:
|
||||
* 2 Intel(R) Xeon(R) E5 2450 0 @ 2.10 GHz (total 16 cores)
|
||||
* 2 XK0480GDQPH SSD Device, total 894GB free disk
|
||||
* Machine has 128 GB of RAM
|
||||
* Operating System: Windows Server 2012 R2 Datacenter
|
||||
* 100 Million keys; each key is of size 10 bytes, each value is of size 800 bytes
|
||||
* total database size is ~76GB
|
||||
* The performance result is based on RocksDB 3.11.
|
||||
* The parameters used, unless specified, were exactly the same as published in the GitHub Wiki page.
|
||||
|
||||
### RocksDB on flash storage
|
||||
|
||||
#### Test 1. Bulk Load of keys in Random Order
|
||||
|
||||
Version 3.11
|
||||
|
||||
* Total Run Time: 17.6 min
|
||||
* Fillrandom: 5.480 micros/op 182465 ops/sec; 142.0 MB/s
|
||||
* Compact: 486056544.000 micros/op 0 ops/sec
|
||||
|
||||
Version 3.10
|
||||
|
||||
* Total Run Time: 16.2 min
|
||||
* Fillrandom: 5.018 micros/op 199269 ops/sec; 155.1 MB/s
|
||||
* Compact: 441313173.000 micros/op 0 ops/sec;
|
||||
|
||||
|
||||
#### Test 2. Bulk Load of keys in Sequential Order
|
||||
|
||||
Version 3.11
|
||||
|
||||
* Fillseq: 4.944 micros/op 202k ops/sec; 157.4 MB/s
|
||||
|
||||
Version 3.10
|
||||
|
||||
* Fillseq: 4.105 micros/op 243.6k ops/sec; 189.6 MB/s
|
||||
|
||||
|
||||
#### Test 3. Random Write
|
||||
|
||||
Version 3.11
|
||||
|
||||
* Unbuffered I/O enabled
|
||||
* Overwrite: 52.661 micros/op 18.9k ops/sec; 14.8 MB/s
|
||||
|
||||
Version 3.10
|
||||
|
||||
* Unbuffered I/O enabled
|
||||
* Overwrite: 52.661 micros/op 18.9k ops/sec;
|
||||
|
||||
|
||||
#### Test 4. Random Read
|
||||
|
||||
Version 3.11
|
||||
|
||||
* Unbuffered I/O enabled
|
||||
* Readrandom: 15.716 micros/op 63.6k ops/sec; 49.5 MB/s
|
||||
|
||||
Version 3.10
|
||||
|
||||
* Unbuffered I/O enabled
|
||||
* Readrandom: 15.548 micros/op 64.3k ops/sec;
|
||||
|
||||
|
||||
#### Test 5. Multi-threaded read and single-threaded write
|
||||
|
||||
Version 3.11
|
||||
|
||||
* Unbuffered I/O enabled
|
||||
* Readwhilewriting: 25.128 micros/op 39.7k ops/sec;
|
||||
|
||||
Version 3.10
|
||||
|
||||
* Unbuffered I/O enabled
|
||||
* Readwhilewriting: 24.854 micros/op 40.2k ops/sec;
|
||||
|
||||
|
||||
### RocksDB In Memory
|
||||
|
||||
#### Test 1. Point Lookup
|
||||
|
||||
Version 3.11
|
||||
|
||||
80K writes/sec
|
||||
* Write Rate Achieved: 40.5k write/sec;
|
||||
* Readwhilewriting: 0.314 micros/op 3187455 ops/sec; 364.8 MB/s (715454999 of 715454999 found)
|
||||
|
||||
Version 3.10
|
||||
|
||||
* Write Rate Achieved: 50.6k write/sec
|
||||
* Readwhilewriting: 0.316 micros/op 3162028 ops/sec; (719576999 of 719576999 found)
|
||||
|
||||
|
||||
*10K writes/sec*
|
||||
|
||||
Version 3.11
|
||||
|
||||
* Write Rate Achieved: 5.8k/s write/sec
|
||||
* Readwhilewriting: 0.246 micros/op 4062669 ops/sec; 464.9 MB/s (915481999 of 915481999 found)
|
||||
|
||||
Version 3.10
|
||||
|
||||
* Write Rate Achieved: 5.8k/s write/sec
|
||||
* Readwhilewriting: 0.244 micros/op 4106253 ops/sec; (927986999 of 927986999 found)
|
||||
|
||||
|
||||
#### Test 2. Prefix Range Query
|
||||
|
||||
Version 3.11
|
||||
|
||||
80K writes/sec
|
||||
* Write Rate Achieved: 46.3k/s write/sec
|
||||
* Readwhilewriting: 0.362 micros/op 2765052 ops/sec; 316.4 MB/s (611549999 of 611549999 found)
|
||||
|
||||
Version 3.10
|
||||
|
||||
* Write Rate Achieved: 45.8k/s write/sec
|
||||
* Readwhilewriting: 0.317 micros/op 3154941 ops/sec; (708158999 of 708158999 found)
|
||||
|
||||
Version 3.11
|
||||
|
||||
10K writes/sec
|
||||
* Write Rate Achieved: 5.78k write/sec
|
||||
* Readwhilewriting: 0.269 micros/op 3716692 ops/sec; 425.3 MB/s (837401999 of 837401999 found)
|
||||
|
||||
Version 3.10
|
||||
|
||||
* Write Rate Achieved: 5.7k write/sec
|
||||
* Readwhilewriting: 0.261 micros/op 3830152 ops/sec; (863482999 of 863482999 found)
|
||||
|
||||
|
||||
We think that there is still big room to improve the performance, which will be an ongoing effort for us.
|
||||
|
6163
rocksdb/vendor/rocksdb/buckifier/bench-slow.json
vendored
Normal file
6163
rocksdb/vendor/rocksdb/buckifier/bench-slow.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1594
rocksdb/vendor/rocksdb/buckifier/bench.json
vendored
Normal file
1594
rocksdb/vendor/rocksdb/buckifier/bench.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
338
rocksdb/vendor/rocksdb/buckifier/buckify_rocksdb.py
vendored
Normal file
338
rocksdb/vendor/rocksdb/buckifier/buckify_rocksdb.py
vendored
Normal file
@ -0,0 +1,338 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
try:
|
||||
from builtins import str
|
||||
except ImportError:
|
||||
from __builtin__ import str
|
||||
import fnmatch
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
from targets_builder import TARGETSBuilder
|
||||
|
||||
from util import ColorString
|
||||
|
||||
# This script generates TARGETS file for Buck.
|
||||
# Buck is a build tool specifying dependencies among different build targets.
|
||||
# User can pass extra dependencies as a JSON object via command line, and this
|
||||
# script can include these dependencies in the generate TARGETS file.
|
||||
# Usage:
|
||||
# $python3 buckifier/buckify_rocksdb.py
|
||||
# (This generates a TARGET file without user-specified dependency for unit
|
||||
# tests.)
|
||||
# $python3 buckifier/buckify_rocksdb.py \
|
||||
# '{"fake": {
|
||||
# "extra_deps": [":test_dep", "//fakes/module:mock1"],
|
||||
# "extra_compiler_flags": ["-DFOO_BAR", "-Os"]
|
||||
# }
|
||||
# }'
|
||||
# (Generated TARGETS file has test_dep and mock1 as dependencies for RocksDB
|
||||
# unit tests, and will use the extra_compiler_flags to compile the unit test
|
||||
# source.)
|
||||
|
||||
# tests to export as libraries for inclusion in other projects
|
||||
_EXPORTED_TEST_LIBS = ["env_basic_test"]
|
||||
|
||||
# Parse src.mk files as a Dictionary of
|
||||
# VAR_NAME => list of files
|
||||
def parse_src_mk(repo_path):
|
||||
src_mk = repo_path + "/src.mk"
|
||||
src_files = {}
|
||||
for line in open(src_mk):
|
||||
line = line.strip()
|
||||
if len(line) == 0 or line[0] == "#":
|
||||
continue
|
||||
if "=" in line:
|
||||
current_src = line.split("=")[0].strip()
|
||||
src_files[current_src] = []
|
||||
elif ".c" in line:
|
||||
src_path = line.split("\\")[0].strip()
|
||||
src_files[current_src].append(src_path)
|
||||
return src_files
|
||||
|
||||
|
||||
# get all .cc / .c files
|
||||
def get_cc_files(repo_path):
|
||||
cc_files = []
|
||||
for root, _dirnames, filenames in os.walk(
|
||||
repo_path
|
||||
): # noqa: B007 T25377293 Grandfathered in
|
||||
root = root[(len(repo_path) + 1) :]
|
||||
if "java" in root:
|
||||
# Skip java
|
||||
continue
|
||||
for filename in fnmatch.filter(filenames, "*.cc"):
|
||||
cc_files.append(os.path.join(root, filename))
|
||||
for filename in fnmatch.filter(filenames, "*.c"):
|
||||
cc_files.append(os.path.join(root, filename))
|
||||
return cc_files
|
||||
|
||||
|
||||
# Get non_parallel tests from Makefile
|
||||
def get_non_parallel_tests(repo_path):
|
||||
Makefile = repo_path + "/Makefile"
|
||||
|
||||
s = set({})
|
||||
|
||||
found_non_parallel_tests = False
|
||||
for line in open(Makefile):
|
||||
line = line.strip()
|
||||
if line.startswith("NON_PARALLEL_TEST ="):
|
||||
found_non_parallel_tests = True
|
||||
elif found_non_parallel_tests:
|
||||
if line.endswith("\\"):
|
||||
# remove the trailing \
|
||||
line = line[:-1]
|
||||
line = line.strip()
|
||||
s.add(line)
|
||||
else:
|
||||
# we consumed all the non_parallel tests
|
||||
break
|
||||
|
||||
return s
|
||||
|
||||
|
||||
# Parse extra dependencies passed by user from command line
|
||||
def get_dependencies():
|
||||
deps_map = {"": {"extra_deps": [], "extra_compiler_flags": []}}
|
||||
if len(sys.argv) < 2:
|
||||
return deps_map
|
||||
|
||||
def encode_dict(data):
|
||||
rv = {}
|
||||
for k, v in data.items():
|
||||
if isinstance(v, dict):
|
||||
v = encode_dict(v)
|
||||
rv[k] = v
|
||||
return rv
|
||||
|
||||
extra_deps = json.loads(sys.argv[1], object_hook=encode_dict)
|
||||
for target_alias, deps in extra_deps.items():
|
||||
deps_map[target_alias] = deps
|
||||
return deps_map
|
||||
|
||||
|
||||
# Prepare TARGETS file for buck
|
||||
def generate_targets(repo_path, deps_map):
|
||||
print(ColorString.info("Generating TARGETS"))
|
||||
# parsed src.mk file
|
||||
src_mk = parse_src_mk(repo_path)
|
||||
# get all .cc files
|
||||
cc_files = get_cc_files(repo_path)
|
||||
# get non_parallel tests from Makefile
|
||||
non_parallel_tests = get_non_parallel_tests(repo_path)
|
||||
|
||||
if src_mk is None or cc_files is None or non_parallel_tests is None:
|
||||
return False
|
||||
|
||||
extra_argv = ""
|
||||
if len(sys.argv) >= 2:
|
||||
# Heuristically quote and canonicalize whitespace for inclusion
|
||||
# in how the file was generated.
|
||||
extra_argv = " '{0}'".format(" ".join(sys.argv[1].split()))
|
||||
|
||||
TARGETS = TARGETSBuilder("%s/TARGETS" % repo_path, extra_argv)
|
||||
|
||||
# rocksdb_lib
|
||||
TARGETS.add_library(
|
||||
"rocksdb_lib",
|
||||
src_mk["LIB_SOURCES"] +
|
||||
# always add range_tree, it's only excluded on ppc64, which we don't use internally
|
||||
src_mk["RANGE_TREE_SOURCES"] + src_mk["TOOL_LIB_SOURCES"],
|
||||
deps=[
|
||||
"//folly/container:f14_hash",
|
||||
"//folly/experimental/coro:blocking_wait",
|
||||
"//folly/experimental/coro:collect",
|
||||
"//folly/experimental/coro:coroutine",
|
||||
"//folly/experimental/coro:task",
|
||||
"//folly/synchronization:distributed_mutex",
|
||||
],
|
||||
)
|
||||
# rocksdb_whole_archive_lib
|
||||
TARGETS.add_library(
|
||||
"rocksdb_whole_archive_lib",
|
||||
[],
|
||||
deps=[
|
||||
":rocksdb_lib",
|
||||
],
|
||||
headers=None,
|
||||
extra_external_deps="",
|
||||
link_whole=True,
|
||||
)
|
||||
# rocksdb_test_lib
|
||||
TARGETS.add_library(
|
||||
"rocksdb_test_lib",
|
||||
src_mk.get("MOCK_LIB_SOURCES", [])
|
||||
+ src_mk.get("TEST_LIB_SOURCES", [])
|
||||
+ src_mk.get("EXP_LIB_SOURCES", [])
|
||||
+ src_mk.get("ANALYZER_LIB_SOURCES", []),
|
||||
[":rocksdb_lib"],
|
||||
extra_test_libs=True,
|
||||
)
|
||||
# rocksdb_tools_lib
|
||||
TARGETS.add_library(
|
||||
"rocksdb_tools_lib",
|
||||
src_mk.get("BENCH_LIB_SOURCES", [])
|
||||
+ src_mk.get("ANALYZER_LIB_SOURCES", [])
|
||||
+ ["test_util/testutil.cc"],
|
||||
[":rocksdb_lib"],
|
||||
)
|
||||
# rocksdb_cache_bench_tools_lib
|
||||
TARGETS.add_library(
|
||||
"rocksdb_cache_bench_tools_lib",
|
||||
src_mk.get("CACHE_BENCH_LIB_SOURCES", []),
|
||||
[":rocksdb_lib"],
|
||||
)
|
||||
# rocksdb_stress_lib
|
||||
TARGETS.add_rocksdb_library(
|
||||
"rocksdb_stress_lib",
|
||||
src_mk.get("ANALYZER_LIB_SOURCES", [])
|
||||
+ src_mk.get("STRESS_LIB_SOURCES", [])
|
||||
+ ["test_util/testutil.cc"],
|
||||
)
|
||||
# db_stress binary
|
||||
TARGETS.add_binary(
|
||||
"db_stress", ["db_stress_tool/db_stress.cc"], [":rocksdb_stress_lib"]
|
||||
)
|
||||
# cache_bench binary
|
||||
TARGETS.add_binary(
|
||||
"cache_bench", ["cache/cache_bench.cc"], [":rocksdb_cache_bench_tools_lib"]
|
||||
)
|
||||
# bench binaries
|
||||
for src in src_mk.get("MICROBENCH_SOURCES", []):
|
||||
name = src.rsplit("/", 1)[1].split(".")[0] if "/" in src else src.split(".")[0]
|
||||
TARGETS.add_binary(name, [src], [], extra_bench_libs=True)
|
||||
print("Extra dependencies:\n{0}".format(json.dumps(deps_map)))
|
||||
|
||||
# Dictionary test executable name -> relative source file path
|
||||
test_source_map = {}
|
||||
|
||||
# c_test.c is added through TARGETS.add_c_test(). If there
|
||||
# are more than one .c test file, we need to extend
|
||||
# TARGETS.add_c_test() to include other C tests too.
|
||||
for test_src in src_mk.get("TEST_MAIN_SOURCES_C", []):
|
||||
if test_src != "db/c_test.c":
|
||||
print("Don't know how to deal with " + test_src)
|
||||
return False
|
||||
TARGETS.add_c_test()
|
||||
|
||||
try:
|
||||
with open(f"{repo_path}/buckifier/bench.json") as json_file:
|
||||
fast_fancy_bench_config_list = json.load(json_file)
|
||||
for config_dict in fast_fancy_bench_config_list:
|
||||
clean_benchmarks = {}
|
||||
benchmarks = config_dict["benchmarks"]
|
||||
for binary, benchmark_dict in benchmarks.items():
|
||||
clean_benchmarks[binary] = {}
|
||||
for benchmark, overloaded_metric_list in benchmark_dict.items():
|
||||
clean_benchmarks[binary][benchmark] = []
|
||||
for metric in overloaded_metric_list:
|
||||
if not isinstance(metric, dict):
|
||||
clean_benchmarks[binary][benchmark].append(metric)
|
||||
TARGETS.add_fancy_bench_config(
|
||||
config_dict["name"],
|
||||
clean_benchmarks,
|
||||
False,
|
||||
config_dict["expected_runtime_one_iter"],
|
||||
config_dict["sl_iterations"],
|
||||
config_dict["regression_threshold"],
|
||||
)
|
||||
|
||||
with open(f"{repo_path}/buckifier/bench-slow.json") as json_file:
|
||||
slow_fancy_bench_config_list = json.load(json_file)
|
||||
for config_dict in slow_fancy_bench_config_list:
|
||||
clean_benchmarks = {}
|
||||
benchmarks = config_dict["benchmarks"]
|
||||
for binary, benchmark_dict in benchmarks.items():
|
||||
clean_benchmarks[binary] = {}
|
||||
for benchmark, overloaded_metric_list in benchmark_dict.items():
|
||||
clean_benchmarks[binary][benchmark] = []
|
||||
for metric in overloaded_metric_list:
|
||||
if not isinstance(metric, dict):
|
||||
clean_benchmarks[binary][benchmark].append(metric)
|
||||
for config_dict in slow_fancy_bench_config_list:
|
||||
TARGETS.add_fancy_bench_config(
|
||||
config_dict["name"] + "_slow",
|
||||
clean_benchmarks,
|
||||
True,
|
||||
config_dict["expected_runtime_one_iter"],
|
||||
config_dict["sl_iterations"],
|
||||
config_dict["regression_threshold"],
|
||||
)
|
||||
# it is better servicelab experiments break
|
||||
# than rocksdb github ci
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
TARGETS.add_test_header()
|
||||
|
||||
for test_src in src_mk.get("TEST_MAIN_SOURCES", []):
|
||||
test = test_src.split(".c")[0].strip().split("/")[-1].strip()
|
||||
test_source_map[test] = test_src
|
||||
print("" + test + " " + test_src)
|
||||
|
||||
for target_alias, deps in deps_map.items():
|
||||
for test, test_src in sorted(test_source_map.items()):
|
||||
if len(test) == 0:
|
||||
print(ColorString.warning("Failed to get test name for %s" % test_src))
|
||||
continue
|
||||
|
||||
test_target_name = test if not target_alias else test + "_" + target_alias
|
||||
|
||||
if test in _EXPORTED_TEST_LIBS:
|
||||
test_library = "%s_lib" % test_target_name
|
||||
TARGETS.add_library(
|
||||
test_library,
|
||||
[test_src],
|
||||
deps=[":rocksdb_test_lib"],
|
||||
extra_test_libs=True,
|
||||
)
|
||||
TARGETS.register_test(
|
||||
test_target_name,
|
||||
test_src,
|
||||
deps=json.dumps(deps["extra_deps"] + [":" + test_library]),
|
||||
extra_compiler_flags=json.dumps(deps["extra_compiler_flags"]),
|
||||
)
|
||||
else:
|
||||
TARGETS.register_test(
|
||||
test_target_name,
|
||||
test_src,
|
||||
deps=json.dumps(deps["extra_deps"] + [":rocksdb_test_lib"]),
|
||||
extra_compiler_flags=json.dumps(deps["extra_compiler_flags"]),
|
||||
)
|
||||
TARGETS.export_file("tools/db_crashtest.py")
|
||||
|
||||
print(ColorString.info("Generated TARGETS Summary:"))
|
||||
print(ColorString.info("- %d libs" % TARGETS.total_lib))
|
||||
print(ColorString.info("- %d binarys" % TARGETS.total_bin))
|
||||
print(ColorString.info("- %d tests" % TARGETS.total_test))
|
||||
return True
|
||||
|
||||
|
||||
def get_rocksdb_path():
|
||||
# rocksdb = {script_dir}/..
|
||||
script_dir = os.path.dirname(sys.argv[0])
|
||||
script_dir = os.path.abspath(script_dir)
|
||||
rocksdb_path = os.path.abspath(os.path.join(script_dir, "../"))
|
||||
|
||||
return rocksdb_path
|
||||
|
||||
|
||||
def exit_with_error(msg):
|
||||
print(ColorString.error(msg))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
deps_map = get_dependencies()
|
||||
# Generate TARGETS file for buck
|
||||
ok = generate_targets(get_rocksdb_path(), deps_map)
|
||||
if not ok:
|
||||
exit_with_error("Failed to generate TARGETS files")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
32
rocksdb/vendor/rocksdb/buckifier/check_buck_targets.sh
vendored
Normal file
32
rocksdb/vendor/rocksdb/buckifier/check_buck_targets.sh
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
# If clang_format_diff.py command is not specfied, we assume we are able to
|
||||
# access directly without any path.
|
||||
|
||||
TGT_DIFF=`git diff TARGETS | head -n 1`
|
||||
|
||||
if [ ! -z "$TGT_DIFF" ]
|
||||
then
|
||||
echo "TARGETS file has uncommitted changes. Skip this check."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo Backup original TARGETS file.
|
||||
|
||||
cp TARGETS TARGETS.bkp
|
||||
|
||||
${PYTHON:-python3} buckifier/buckify_rocksdb.py
|
||||
|
||||
TGT_DIFF=`git diff TARGETS | head -n 1`
|
||||
|
||||
if [ -z "$TGT_DIFF" ]
|
||||
then
|
||||
mv TARGETS.bkp TARGETS
|
||||
exit 0
|
||||
else
|
||||
echo "Please run '${PYTHON:-python3} buckifier/buckify_rocksdb.py' to update TARGETS file."
|
||||
echo "Do not manually update TARGETS file."
|
||||
${PYTHON:-python3} --version
|
||||
mv TARGETS.bkp TARGETS
|
||||
exit 1
|
||||
fi
|
6
rocksdb/vendor/rocksdb/buckifier/rocks_test_runner.sh
vendored
Normal file
6
rocksdb/vendor/rocksdb/buckifier/rocks_test_runner.sh
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
# Create a tmp directory for the test to use
|
||||
TEST_DIR=$(mktemp -d /dev/shm/fbcode_rocksdb_XXXXXXX)
|
||||
# shellcheck disable=SC2068
|
||||
TEST_TMPDIR="$TEST_DIR" $@ && rm -rf "$TEST_DIR"
|
156
rocksdb/vendor/rocksdb/buckifier/targets_builder.py
vendored
Normal file
156
rocksdb/vendor/rocksdb/buckifier/targets_builder.py
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
try:
|
||||
from builtins import object, str
|
||||
except ImportError:
|
||||
from __builtin__ import object, str
|
||||
import pprint
|
||||
|
||||
import targets_cfg
|
||||
|
||||
|
||||
def pretty_list(lst, indent=8):
|
||||
if lst is None or len(lst) == 0:
|
||||
return ""
|
||||
|
||||
if len(lst) == 1:
|
||||
return '"%s"' % lst[0]
|
||||
|
||||
separator = '",\n%s"' % (" " * indent)
|
||||
res = separator.join(sorted(lst))
|
||||
res = "\n" + (" " * indent) + '"' + res + '",\n' + (" " * (indent - 4))
|
||||
return res
|
||||
|
||||
|
||||
class TARGETSBuilder:
|
||||
def __init__(self, path, extra_argv):
|
||||
self.path = path
|
||||
header = targets_cfg.rocksdb_target_header_template.format(
|
||||
extra_argv=extra_argv
|
||||
)
|
||||
with open(path, "wb") as targets_file:
|
||||
targets_file.write(header.encode("utf-8"))
|
||||
self.total_lib = 0
|
||||
self.total_bin = 0
|
||||
self.total_test = 0
|
||||
self.tests_cfg = ""
|
||||
|
||||
def add_library(
|
||||
self,
|
||||
name,
|
||||
srcs,
|
||||
deps=None,
|
||||
headers=None,
|
||||
extra_external_deps="",
|
||||
link_whole=False,
|
||||
external_dependencies=None,
|
||||
extra_test_libs=False,
|
||||
):
|
||||
if headers is not None:
|
||||
headers = "[" + pretty_list(headers) + "]"
|
||||
with open(self.path, "ab") as targets_file:
|
||||
targets_file.write(
|
||||
targets_cfg.library_template.format(
|
||||
name=name,
|
||||
srcs=pretty_list(srcs),
|
||||
headers=headers,
|
||||
deps=pretty_list(deps),
|
||||
extra_external_deps=extra_external_deps,
|
||||
link_whole=link_whole,
|
||||
external_dependencies=pretty_list(external_dependencies),
|
||||
extra_test_libs=extra_test_libs,
|
||||
).encode("utf-8")
|
||||
)
|
||||
self.total_lib = self.total_lib + 1
|
||||
|
||||
def add_rocksdb_library(self, name, srcs, headers=None, external_dependencies=None):
|
||||
if headers is not None:
|
||||
headers = "[" + pretty_list(headers) + "]"
|
||||
with open(self.path, "ab") as targets_file:
|
||||
targets_file.write(
|
||||
targets_cfg.rocksdb_library_template.format(
|
||||
name=name,
|
||||
srcs=pretty_list(srcs),
|
||||
headers=headers,
|
||||
external_dependencies=pretty_list(external_dependencies),
|
||||
).encode("utf-8")
|
||||
)
|
||||
self.total_lib = self.total_lib + 1
|
||||
|
||||
def add_binary(
|
||||
self,
|
||||
name,
|
||||
srcs,
|
||||
deps=None,
|
||||
extra_preprocessor_flags=None,
|
||||
extra_bench_libs=False,
|
||||
):
|
||||
with open(self.path, "ab") as targets_file:
|
||||
targets_file.write(
|
||||
targets_cfg.binary_template.format(
|
||||
name=name,
|
||||
srcs=pretty_list(srcs),
|
||||
deps=pretty_list(deps),
|
||||
extra_preprocessor_flags=pretty_list(extra_preprocessor_flags),
|
||||
extra_bench_libs=extra_bench_libs,
|
||||
).encode("utf-8")
|
||||
)
|
||||
self.total_bin = self.total_bin + 1
|
||||
|
||||
def add_c_test(self):
|
||||
with open(self.path, "ab") as targets_file:
|
||||
targets_file.write(
|
||||
b"""
|
||||
add_c_test_wrapper()
|
||||
"""
|
||||
)
|
||||
|
||||
def add_test_header(self):
|
||||
with open(self.path, "ab") as targets_file:
|
||||
targets_file.write(
|
||||
b"""
|
||||
# Generate a test rule for each entry in ROCKS_TESTS
|
||||
# Do not build the tests in opt mode, since SyncPoint and other test code
|
||||
# will not be included.
|
||||
"""
|
||||
)
|
||||
|
||||
def add_fancy_bench_config(
|
||||
self,
|
||||
name,
|
||||
bench_config,
|
||||
slow,
|
||||
expected_runtime,
|
||||
sl_iterations,
|
||||
regression_threshold,
|
||||
):
|
||||
with open(self.path, "ab") as targets_file:
|
||||
targets_file.write(
|
||||
targets_cfg.fancy_bench_template.format(
|
||||
name=name,
|
||||
bench_config=pprint.pformat(bench_config),
|
||||
slow=slow,
|
||||
expected_runtime=expected_runtime,
|
||||
sl_iterations=sl_iterations,
|
||||
regression_threshold=regression_threshold,
|
||||
).encode("utf-8")
|
||||
)
|
||||
|
||||
def register_test(self, test_name, src, deps, extra_compiler_flags):
|
||||
with open(self.path, "ab") as targets_file:
|
||||
targets_file.write(
|
||||
targets_cfg.unittests_template.format(
|
||||
test_name=test_name,
|
||||
test_cc=str(src),
|
||||
deps=deps,
|
||||
extra_compiler_flags=extra_compiler_flags,
|
||||
).encode("utf-8")
|
||||
)
|
||||
self.total_test = self.total_test + 1
|
||||
|
||||
def export_file(self, name):
|
||||
with open(self.path, "a") as targets_file:
|
||||
targets_file.write(
|
||||
targets_cfg.export_file_template.format(name=name)
|
||||
)
|
43
rocksdb/vendor/rocksdb/buckifier/targets_cfg.py
vendored
Normal file
43
rocksdb/vendor/rocksdb/buckifier/targets_cfg.py
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
rocksdb_target_header_template = """# This file \100generated by:
|
||||
#$ python3 buckifier/buckify_rocksdb.py{extra_argv}
|
||||
# --> DO NOT EDIT MANUALLY <--
|
||||
# This file is a Facebook-specific integration for buck builds, so can
|
||||
# only be validated by Facebook employees.
|
||||
load("//rocks/buckifier:defs.bzl", "cpp_library_wrapper","rocks_cpp_library_wrapper","cpp_binary_wrapper","cpp_unittest_wrapper","fancy_bench_wrapper","add_c_test_wrapper")
|
||||
|
||||
"""
|
||||
|
||||
|
||||
library_template = """
|
||||
cpp_library_wrapper(name="{name}", srcs=[{srcs}], deps=[{deps}], headers={headers}, link_whole={link_whole}, extra_test_libs={extra_test_libs})
|
||||
"""
|
||||
|
||||
rocksdb_library_template = """
|
||||
rocks_cpp_library_wrapper(name="{name}", srcs=[{srcs}], headers={headers})
|
||||
|
||||
"""
|
||||
|
||||
|
||||
binary_template = """
|
||||
cpp_binary_wrapper(name="{name}", srcs=[{srcs}], deps=[{deps}], extra_preprocessor_flags=[{extra_preprocessor_flags}], extra_bench_libs={extra_bench_libs})
|
||||
"""
|
||||
|
||||
unittests_template = """
|
||||
cpp_unittest_wrapper(name="{test_name}",
|
||||
srcs=["{test_cc}"],
|
||||
deps={deps},
|
||||
extra_compiler_flags={extra_compiler_flags})
|
||||
|
||||
"""
|
||||
|
||||
fancy_bench_template = """
|
||||
fancy_bench_wrapper(suite_name="{name}", binary_to_bench_to_metric_list_map={bench_config}, slow={slow}, expected_runtime={expected_runtime}, sl_iterations={sl_iterations}, regression_threshold={regression_threshold})
|
||||
|
||||
"""
|
||||
|
||||
export_file_template = """
|
||||
export_file(name = "{name}")
|
||||
"""
|
118
rocksdb/vendor/rocksdb/buckifier/util.py
vendored
Normal file
118
rocksdb/vendor/rocksdb/buckifier/util.py
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
"""
|
||||
This module keeps commonly used components.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
try:
|
||||
from builtins import object
|
||||
except ImportError:
|
||||
from __builtin__ import object
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
class ColorString:
|
||||
"""Generate colorful strings on terminal"""
|
||||
|
||||
HEADER = "\033[95m"
|
||||
BLUE = "\033[94m"
|
||||
GREEN = "\033[92m"
|
||||
WARNING = "\033[93m"
|
||||
FAIL = "\033[91m"
|
||||
ENDC = "\033[0m"
|
||||
|
||||
@staticmethod
|
||||
def _make_color_str(text, color):
|
||||
# In Python2, default encoding for unicode string is ASCII
|
||||
if sys.version_info.major <= 2:
|
||||
return "".join([color, text.encode("utf-8"), ColorString.ENDC])
|
||||
# From Python3, default encoding for unicode string is UTF-8
|
||||
return "".join([color, text, ColorString.ENDC])
|
||||
|
||||
@staticmethod
|
||||
def ok(text):
|
||||
if ColorString.is_disabled:
|
||||
return text
|
||||
return ColorString._make_color_str(text, ColorString.GREEN)
|
||||
|
||||
@staticmethod
|
||||
def info(text):
|
||||
if ColorString.is_disabled:
|
||||
return text
|
||||
return ColorString._make_color_str(text, ColorString.BLUE)
|
||||
|
||||
@staticmethod
|
||||
def header(text):
|
||||
if ColorString.is_disabled:
|
||||
return text
|
||||
return ColorString._make_color_str(text, ColorString.HEADER)
|
||||
|
||||
@staticmethod
|
||||
def error(text):
|
||||
if ColorString.is_disabled:
|
||||
return text
|
||||
return ColorString._make_color_str(text, ColorString.FAIL)
|
||||
|
||||
@staticmethod
|
||||
def warning(text):
|
||||
if ColorString.is_disabled:
|
||||
return text
|
||||
return ColorString._make_color_str(text, ColorString.WARNING)
|
||||
|
||||
is_disabled = False
|
||||
|
||||
|
||||
def run_shell_command(shell_cmd, cmd_dir=None):
|
||||
"""Run a single shell command.
|
||||
@returns a tuple of shell command return code, stdout, stderr"""
|
||||
|
||||
if cmd_dir is not None and not os.path.exists(cmd_dir):
|
||||
run_shell_command("mkdir -p %s" % cmd_dir)
|
||||
|
||||
start = time.time()
|
||||
print("\t>>> Running: " + shell_cmd)
|
||||
p = subprocess.Popen( # noqa
|
||||
shell_cmd,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
cwd=cmd_dir,
|
||||
)
|
||||
stdout, stderr = p.communicate()
|
||||
end = time.time()
|
||||
|
||||
# Report time if we spent more than 5 minutes executing a command
|
||||
execution_time = end - start
|
||||
if execution_time > (60 * 5):
|
||||
mins = execution_time / 60
|
||||
secs = execution_time % 60
|
||||
print("\t>time spent: %d minutes %d seconds" % (mins, secs))
|
||||
|
||||
return p.returncode, stdout, stderr
|
||||
|
||||
|
||||
def run_shell_commands(shell_cmds, cmd_dir=None, verbose=False):
|
||||
"""Execute a sequence of shell commands, which is equivalent to
|
||||
running `cmd1 && cmd2 && cmd3`
|
||||
@returns boolean indication if all commands succeeds.
|
||||
"""
|
||||
|
||||
if cmd_dir:
|
||||
print("\t=== Set current working directory => %s" % cmd_dir)
|
||||
|
||||
for shell_cmd in shell_cmds:
|
||||
ret_code, stdout, stderr = run_shell_command(shell_cmd, cmd_dir)
|
||||
if stdout:
|
||||
if verbose or ret_code != 0:
|
||||
print(ColorString.info("stdout: \n"), stdout)
|
||||
if stderr:
|
||||
# contents in stderr is not necessarily to be error messages.
|
||||
if verbose or ret_code != 0:
|
||||
print(ColorString.error("stderr: \n"), stderr)
|
||||
if ret_code != 0:
|
||||
return False
|
||||
|
||||
return True
|
168
rocksdb/vendor/rocksdb/build_tools/amalgamate.py
vendored
Normal file
168
rocksdb/vendor/rocksdb/build_tools/amalgamate.py
vendored
Normal file
@ -0,0 +1,168 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
|
||||
# amalgamate.py creates an amalgamation from a unity build.
|
||||
# It can be run with either Python 2 or 3.
|
||||
# An amalgamation consists of a header that includes the contents of all public
|
||||
# headers and a source file that includes the contents of all source files and
|
||||
# private headers.
|
||||
#
|
||||
# This script works by starting with the unity build file and recursively expanding
|
||||
# #include directives. If the #include is found in a public include directory,
|
||||
# that header is expanded into the amalgamation header.
|
||||
#
|
||||
# A particular header is only expanded once, so this script will
|
||||
# break if there are multiple inclusions of the same header that are expected to
|
||||
# expand differently. Similarly, this type of code causes issues:
|
||||
#
|
||||
# #ifdef FOO
|
||||
# #include "bar.h"
|
||||
# // code here
|
||||
# #else
|
||||
# #include "bar.h" // oops, doesn't get expanded
|
||||
# // different code here
|
||||
# #endif
|
||||
#
|
||||
# The solution is to move the include out of the #ifdef.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
from os import path
|
||||
|
||||
include_re = re.compile('^[ \t]*#include[ \t]+"(.*)"[ \t]*$')
|
||||
included = set()
|
||||
excluded = set()
|
||||
|
||||
|
||||
def find_header(name, abs_path, include_paths):
|
||||
samedir = path.join(path.dirname(abs_path), name)
|
||||
if path.exists(samedir):
|
||||
return samedir
|
||||
for include_path in include_paths:
|
||||
include_path = path.join(include_path, name)
|
||||
if path.exists(include_path):
|
||||
return include_path
|
||||
return None
|
||||
|
||||
|
||||
def expand_include(
|
||||
include_path,
|
||||
f,
|
||||
abs_path,
|
||||
source_out,
|
||||
header_out,
|
||||
include_paths,
|
||||
public_include_paths,
|
||||
):
|
||||
if include_path in included:
|
||||
return False
|
||||
|
||||
included.add(include_path)
|
||||
with open(include_path) as f:
|
||||
print('#line 1 "{}"'.format(include_path), file=source_out)
|
||||
process_file(
|
||||
f, include_path, source_out, header_out, include_paths, public_include_paths
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
def process_file(
|
||||
f, abs_path, source_out, header_out, include_paths, public_include_paths
|
||||
):
|
||||
for (line, text) in enumerate(f):
|
||||
m = include_re.match(text)
|
||||
if m:
|
||||
filename = m.groups()[0]
|
||||
# first check private headers
|
||||
include_path = find_header(filename, abs_path, include_paths)
|
||||
if include_path:
|
||||
if include_path in excluded:
|
||||
source_out.write(text)
|
||||
expanded = False
|
||||
else:
|
||||
expanded = expand_include(
|
||||
include_path,
|
||||
f,
|
||||
abs_path,
|
||||
source_out,
|
||||
header_out,
|
||||
include_paths,
|
||||
public_include_paths,
|
||||
)
|
||||
else:
|
||||
# now try public headers
|
||||
include_path = find_header(filename, abs_path, public_include_paths)
|
||||
if include_path:
|
||||
# found public header
|
||||
expanded = False
|
||||
if include_path in excluded:
|
||||
source_out.write(text)
|
||||
else:
|
||||
expand_include(
|
||||
include_path,
|
||||
f,
|
||||
abs_path,
|
||||
header_out,
|
||||
None,
|
||||
public_include_paths,
|
||||
[],
|
||||
)
|
||||
else:
|
||||
sys.exit(
|
||||
"unable to find {}, included in {} on line {}".format(
|
||||
filename, abs_path, line
|
||||
)
|
||||
)
|
||||
|
||||
if expanded:
|
||||
print('#line {} "{}"'.format(line + 1, abs_path), file=source_out)
|
||||
elif text != "#pragma once\n":
|
||||
source_out.write(text)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Transform a unity build into an amalgamation"
|
||||
)
|
||||
parser.add_argument("source", help="source file")
|
||||
parser.add_argument(
|
||||
"-I",
|
||||
action="append",
|
||||
dest="include_paths",
|
||||
help="include paths for private headers",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-i",
|
||||
action="append",
|
||||
dest="public_include_paths",
|
||||
help="include paths for public headers",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-x", action="append", dest="excluded", help="excluded header files"
|
||||
)
|
||||
parser.add_argument("-o", dest="source_out", help="output C++ file", required=True)
|
||||
parser.add_argument(
|
||||
"-H", dest="header_out", help="output C++ header file", required=True
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
include_paths = list(map(path.abspath, args.include_paths or []))
|
||||
public_include_paths = list(map(path.abspath, args.public_include_paths or []))
|
||||
excluded.update(map(path.abspath, args.excluded or []))
|
||||
filename = args.source
|
||||
abs_path = path.abspath(filename)
|
||||
with open(filename) as f, open(args.source_out, "w") as source_out, open(
|
||||
args.header_out, "w"
|
||||
) as header_out:
|
||||
print('#line 1 "{}"'.format(filename), file=source_out)
|
||||
print('#include "{}"'.format(header_out.name), file=source_out)
|
||||
process_file(
|
||||
f, abs_path, source_out, header_out, include_paths, public_include_paths
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
238
rocksdb/vendor/rocksdb/build_tools/benchmark_log_tool.py
vendored
Normal file
238
rocksdb/vendor/rocksdb/build_tools/benchmark_log_tool.py
vendored
Normal file
@ -0,0 +1,238 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
# This source code is licensed under both the GPLv2 (found in the
|
||||
# COPYING file in the root directory) and Apache 2.0 License
|
||||
# (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
"""Access the results of benchmark runs
|
||||
Send these results on to OpenSearch graphing service
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import itertools
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
import requests
|
||||
from dateutil import parser
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
|
||||
class Configuration:
|
||||
opensearch_user = os.environ["ES_USER"]
|
||||
opensearch_pass = os.environ["ES_PASS"]
|
||||
|
||||
|
||||
class BenchmarkResultException(Exception):
|
||||
def __init__(self, message, content):
|
||||
super().__init__(self, message)
|
||||
self.content = content
|
||||
|
||||
|
||||
class BenchmarkUtils:
|
||||
|
||||
expected_keys = [
|
||||
"ops_sec",
|
||||
"mb_sec",
|
||||
"lsm_sz",
|
||||
"blob_sz",
|
||||
"c_wgb",
|
||||
"w_amp",
|
||||
"c_mbps",
|
||||
"c_wsecs",
|
||||
"c_csecs",
|
||||
"b_rgb",
|
||||
"b_wgb",
|
||||
"usec_op",
|
||||
"p50",
|
||||
"p99",
|
||||
"p99.9",
|
||||
"p99.99",
|
||||
"pmax",
|
||||
"uptime",
|
||||
"stall%",
|
||||
"Nstall",
|
||||
"u_cpu",
|
||||
"s_cpu",
|
||||
"rss",
|
||||
"test",
|
||||
"date",
|
||||
"version",
|
||||
"job_id",
|
||||
]
|
||||
|
||||
def sanity_check(row):
|
||||
if "test" not in row:
|
||||
logging.debug(f"not 'test' in row: {row}")
|
||||
return False
|
||||
if row["test"] == "":
|
||||
logging.debug(f"row['test'] == '': {row}")
|
||||
return False
|
||||
if "date" not in row:
|
||||
logging.debug(f"not 'date' in row: {row}")
|
||||
return False
|
||||
if "ops_sec" not in row:
|
||||
logging.debug(f"not 'ops_sec' in row: {row}")
|
||||
return False
|
||||
try:
|
||||
_ = int(row["ops_sec"])
|
||||
except (ValueError, TypeError):
|
||||
logging.debug(f"int(row['ops_sec']): {row}")
|
||||
return False
|
||||
try:
|
||||
(_, _) = parser.parse(row["date"], fuzzy_with_tokens=True)
|
||||
except (parser.ParserError):
|
||||
logging.error(
|
||||
f"parser.parse((row['date']): not a valid format for date in row: {row}"
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
def conform_opensearch(row):
|
||||
(dt, _) = parser.parse(row["date"], fuzzy_with_tokens=True)
|
||||
# create a test_date field, which was previously what was expected
|
||||
# repair the date field, which has what can be a WRONG ISO FORMAT, (no leading 0 on single-digit day-of-month)
|
||||
# e.g. 2022-07-1T00:14:55 should be 2022-07-01T00:14:55
|
||||
row["test_date"] = dt.isoformat()
|
||||
row["date"] = dt.isoformat()
|
||||
return {key.replace(".", "_"): value for key, value in row.items()}
|
||||
|
||||
|
||||
class ResultParser:
|
||||
def __init__(self, field="(\w|[+-:.%])+", intrafield="(\s)+", separator="\t"):
|
||||
self.field = re.compile(field)
|
||||
self.intra = re.compile(intrafield)
|
||||
self.sep = re.compile(separator)
|
||||
|
||||
def ignore(self, l_in: str):
|
||||
if len(l_in) == 0:
|
||||
return True
|
||||
if l_in[0:1] == "#":
|
||||
return True
|
||||
return False
|
||||
|
||||
def line(self, line_in: str):
|
||||
"""Parse a line into items
|
||||
Being clever about separators
|
||||
"""
|
||||
line = line_in
|
||||
row = []
|
||||
while line != "":
|
||||
match_item = self.field.match(line)
|
||||
if match_item:
|
||||
item = match_item.group(0)
|
||||
row.append(item)
|
||||
line = line[len(item) :]
|
||||
else:
|
||||
match_intra = self.intra.match(line)
|
||||
if match_intra:
|
||||
intra = match_intra.group(0)
|
||||
# Count the separators
|
||||
# If there are >1 then generate extra blank fields
|
||||
# White space with no true separators fakes up a single separator
|
||||
tabbed = self.sep.split(intra)
|
||||
sep_count = len(tabbed) - 1
|
||||
if sep_count == 0:
|
||||
sep_count = 1
|
||||
for _ in range(sep_count - 1):
|
||||
row.append("")
|
||||
line = line[len(intra) :]
|
||||
else:
|
||||
raise BenchmarkResultException(
|
||||
"Invalid TSV line", f"{line_in} at {line}"
|
||||
)
|
||||
return row
|
||||
|
||||
def parse(self, lines):
|
||||
"""Parse something that iterates lines"""
|
||||
rows = [self.line(line) for line in lines if not self.ignore(line)]
|
||||
header = rows[0]
|
||||
width = len(header)
|
||||
records = [
|
||||
{k: v for (k, v) in itertools.zip_longest(header, row[:width])}
|
||||
for row in rows[1:]
|
||||
]
|
||||
return records
|
||||
|
||||
|
||||
def load_report_from_tsv(filename: str):
|
||||
file = open(filename, "r")
|
||||
contents = file.readlines()
|
||||
file.close()
|
||||
parser = ResultParser()
|
||||
report = parser.parse(contents)
|
||||
logging.debug(f"Loaded TSV Report: {report}")
|
||||
return report
|
||||
|
||||
|
||||
def push_report_to_opensearch(report, esdocument):
|
||||
sanitized = [
|
||||
BenchmarkUtils.conform_opensearch(row)
|
||||
for row in report
|
||||
if BenchmarkUtils.sanity_check(row)
|
||||
]
|
||||
logging.debug(
|
||||
f"upload {len(sanitized)} sane of {len(report)} benchmarks to opensearch"
|
||||
)
|
||||
for single_benchmark in sanitized:
|
||||
logging.debug(f"upload benchmark: {single_benchmark}")
|
||||
response = requests.post(
|
||||
esdocument,
|
||||
json=single_benchmark,
|
||||
auth=(os.environ["ES_USER"], os.environ["ES_PASS"]),
|
||||
)
|
||||
logging.debug(
|
||||
f"Sent to OpenSearch, status: {response.status_code}, result: {response.text}"
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
|
||||
def push_report_to_null(report):
|
||||
|
||||
for row in report:
|
||||
if BenchmarkUtils.sanity_check(row):
|
||||
logging.debug(f"row {row}")
|
||||
conformed = BenchmarkUtils.conform_opensearch(row)
|
||||
logging.debug(f"conformed row {conformed}")
|
||||
|
||||
|
||||
def main():
|
||||
"""Tool for fetching, parsing and uploading benchmark results to OpenSearch / ElasticSearch
|
||||
This tool will
|
||||
|
||||
(1) Open a local tsv benchmark report file
|
||||
(2) Upload to OpenSearch document, via https/JSON
|
||||
"""
|
||||
|
||||
parser = argparse.ArgumentParser(description="CircleCI benchmark scraper.")
|
||||
|
||||
# --tsvfile is the name of the file to read results from
|
||||
# --esdocument is the ElasticSearch document to push these results into
|
||||
#
|
||||
parser.add_argument(
|
||||
"--tsvfile",
|
||||
default="build_tools/circle_api_scraper_input.txt",
|
||||
help="File from which to read tsv report",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--esdocument",
|
||||
help="ElasticSearch/OpenSearch document URL to upload report into",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--upload", choices=["opensearch", "none"], default="opensearch"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
logging.debug(f"Arguments: {args}")
|
||||
reports = load_report_from_tsv(args.tsvfile)
|
||||
if args.upload == "opensearch":
|
||||
push_report_to_opensearch(reports, args.esdocument)
|
||||
else:
|
||||
push_report_to_null(reports)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
48
rocksdb/vendor/rocksdb/build_tools/check-sources.sh
vendored
Normal file
48
rocksdb/vendor/rocksdb/build_tools/check-sources.sh
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Check for some simple mistakes that should prevent commit or push
|
||||
|
||||
BAD=""
|
||||
|
||||
git grep -n 'namespace rocksdb' -- '*.[ch]*'
|
||||
if [ "$?" != "1" ]; then
|
||||
echo "^^^^^ Do not hardcode namespace rocksdb. Use ROCKSDB_NAMESPACE"
|
||||
BAD=1
|
||||
fi
|
||||
|
||||
git grep -n -i 'nocommit' -- ':!build_tools/check-sources.sh'
|
||||
if [ "$?" != "1" ]; then
|
||||
echo "^^^^^ Code was not intended to be committed"
|
||||
BAD=1
|
||||
fi
|
||||
|
||||
git grep -n 'include <rocksdb/' -- ':!build_tools/check-sources.sh'
|
||||
if [ "$?" != "1" ]; then
|
||||
echo '^^^^^ Use double-quotes as in #include "rocksdb/something.h"'
|
||||
BAD=1
|
||||
fi
|
||||
|
||||
git grep -n 'include "include/rocksdb/' -- ':!build_tools/check-sources.sh'
|
||||
if [ "$?" != "1" ]; then
|
||||
echo '^^^^^ Use #include "rocksdb/something.h" instead of #include "include/rocksdb/something.h"'
|
||||
BAD=1
|
||||
fi
|
||||
|
||||
git grep -n 'using namespace' -- ':!build_tools' ':!docs' \
|
||||
':!third-party/folly/folly/lang/Align.h' \
|
||||
':!third-party/gtest-1.8.1/fused-src/gtest/gtest.h'
|
||||
if [ "$?" != "1" ]; then
|
||||
echo '^^^^ Do not use "using namespace"'
|
||||
BAD=1
|
||||
fi
|
||||
|
||||
git grep -n -P "[\x80-\xFF]" -- ':!docs' ':!*.md'
|
||||
if [ "$?" != "1" ]; then
|
||||
echo '^^^^ Use only ASCII characters in source files'
|
||||
BAD=1
|
||||
fi
|
||||
|
||||
if [ "$BAD" ]; then
|
||||
exit 1
|
||||
fi
|
22
rocksdb/vendor/rocksdb/build_tools/dependencies_platform010.sh
vendored
Normal file
22
rocksdb/vendor/rocksdb/build_tools/dependencies_platform010.sh
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
# The file is generated using update_dependencies.sh.
|
||||
GCC_BASE=/mnt/gvfs/third-party2/gcc/e40bde78650fa91b8405a857e3f10bf336633fb0/11.x/centos7-native/886b5eb
|
||||
CLANG_BASE=/mnt/gvfs/third-party2/llvm-fb/2043340983c032915adbb6f78903dc855b65aee8/12/platform010/9520e0f
|
||||
LIBGCC_BASE=/mnt/gvfs/third-party2/libgcc/c00dcc6a3e4125c7e8b248e9a79c14b78ac9e0ca/11.x/platform010/5684a5a
|
||||
GLIBC_BASE=/mnt/gvfs/third-party2/glibc/0b9c8e4b060eda62f3bc1c6127bbe1256697569b/2.34/platform010/f259413
|
||||
SNAPPY_BASE=/mnt/gvfs/third-party2/snappy/bc9647f7912b131315827d65cb6189c21f381d05/1.1.3/platform010/76ebdda
|
||||
ZLIB_BASE=/mnt/gvfs/third-party2/zlib/a6f5f3f1d063d2d00cd02fc12f0f05fc3ab3a994/1.2.11/platform010/76ebdda
|
||||
BZIP2_BASE=/mnt/gvfs/third-party2/bzip2/09703139cfc376bd8a82642385a0e97726b28287/1.0.6/platform010/76ebdda
|
||||
LZ4_BASE=/mnt/gvfs/third-party2/lz4/60220d6a5bf7722b9cc239a1368c596619b12060/1.9.1/platform010/76ebdda
|
||||
ZSTD_BASE=/mnt/gvfs/third-party2/zstd/50eace8143eaaea9473deae1f3283e0049e05633/1.4.x/platform010/64091f4
|
||||
GFLAGS_BASE=/mnt/gvfs/third-party2/gflags/5d27e5919771603da06000a027b12f799e58a4f7/2.2.0/platform010/76ebdda
|
||||
JEMALLOC_BASE=/mnt/gvfs/third-party2/jemalloc/b62912d333ef33f9760efa6219dbe3fe6abb3b0e/master/platform010/f57cc4a
|
||||
NUMA_BASE=/mnt/gvfs/third-party2/numa/6b412770957aa3c8a87e5e0dcd8cc2f45f393bc0/2.0.11/platform010/76ebdda
|
||||
LIBUNWIND_BASE=/mnt/gvfs/third-party2/libunwind/52f69816e936e147664ad717eb71a1a0e9dc973a/1.4/platform010/5074a48
|
||||
TBB_BASE=/mnt/gvfs/third-party2/tbb/c9cc192099fa84c0dcd0ffeedd44a373ad6e4925/2018_U5/platform010/76ebdda
|
||||
LIBURING_BASE=/mnt/gvfs/third-party2/liburing/a98e2d137007e3ebf7f33bd6f99c2c56bdaf8488/20210212/platform010/76ebdda
|
||||
BENCHMARK_BASE=/mnt/gvfs/third-party2/benchmark/780c7a0f9cf0967961e69ad08e61cddd85d61821/trunk/platform010/76ebdda
|
||||
KERNEL_HEADERS_BASE=/mnt/gvfs/third-party2/kernel-headers/02d9f76aaaba580611cf75e741753c800c7fdc12/fb/platform010/da39a3e
|
||||
BINUTILS_BASE=/mnt/gvfs/third-party2/binutils/938dc3f064ef3a48c0446f5b11d788d50b3eb5ee/2.37/centos7-native/da39a3e
|
||||
VALGRIND_BASE=/mnt/gvfs/third-party2/valgrind/429a6b3203eb415f1599bd15183659153129188e/3.15.0/platform010/76ebdda
|
||||
LUA_BASE=/mnt/gvfs/third-party2/lua/363787fa5cac2a8aa20638909210443278fa138e/5.3.4/platform010/9079c97
|
3
rocksdb/vendor/rocksdb/build_tools/dockerbuild.sh
vendored
Normal file
3
rocksdb/vendor/rocksdb/build_tools/dockerbuild.sh
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
docker run -v $PWD:/rocks -w /rocks buildpack-deps make
|
181
rocksdb/vendor/rocksdb/build_tools/error_filter.py
vendored
Normal file
181
rocksdb/vendor/rocksdb/build_tools/error_filter.py
vendored
Normal file
@ -0,0 +1,181 @@
|
||||
# Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
# This source code is licensed under both the GPLv2 (found in the
|
||||
# COPYING file in the root directory) and Apache 2.0 License
|
||||
# (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
"""Filter for error messages in test output:
|
||||
- Receives merged stdout/stderr from test on stdin
|
||||
- Finds patterns of known error messages for test name (first argument)
|
||||
- Prints those error messages to stdout
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
class ErrorParserBase:
|
||||
def parse_error(self, line):
|
||||
"""Parses a line of test output. If it contains an error, returns a
|
||||
formatted message describing the error; otherwise, returns None.
|
||||
Subclasses must override this method.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class GTestErrorParser(ErrorParserBase):
|
||||
"""A parser that remembers the last test that began running so it can print
|
||||
that test's name upon detecting failure.
|
||||
"""
|
||||
|
||||
_GTEST_NAME_PATTERN = re.compile(r"\[ RUN \] (\S+)$")
|
||||
# format: '<filename or "unknown file">:<line #>: Failure'
|
||||
_GTEST_FAIL_PATTERN = re.compile(r"(unknown file|\S+:\d+): Failure$")
|
||||
|
||||
def __init__(self):
|
||||
self._last_gtest_name = "Unknown test"
|
||||
|
||||
def parse_error(self, line):
|
||||
gtest_name_match = self._GTEST_NAME_PATTERN.match(line)
|
||||
if gtest_name_match:
|
||||
self._last_gtest_name = gtest_name_match.group(1)
|
||||
return None
|
||||
gtest_fail_match = self._GTEST_FAIL_PATTERN.match(line)
|
||||
if gtest_fail_match:
|
||||
return "%s failed: %s" % (self._last_gtest_name, gtest_fail_match.group(1))
|
||||
return None
|
||||
|
||||
|
||||
class MatchErrorParser(ErrorParserBase):
|
||||
"""A simple parser that returns the whole line if it matches the pattern."""
|
||||
|
||||
def __init__(self, pattern):
|
||||
self._pattern = re.compile(pattern)
|
||||
|
||||
def parse_error(self, line):
|
||||
if self._pattern.match(line):
|
||||
return line
|
||||
return None
|
||||
|
||||
|
||||
class CompilerErrorParser(MatchErrorParser):
|
||||
def __init__(self):
|
||||
# format (compile error):
|
||||
# '<filename>:<line #>:<column #>: error: <error msg>'
|
||||
# format (link error):
|
||||
# '<filename>:<line #>: error: <error msg>'
|
||||
# The below regex catches both
|
||||
super(CompilerErrorParser, self).__init__(r"\S+:\d+: error:")
|
||||
|
||||
|
||||
class ScanBuildErrorParser(MatchErrorParser):
|
||||
def __init__(self):
|
||||
super(ScanBuildErrorParser, self).__init__(r"scan-build: \d+ bugs found.$")
|
||||
|
||||
|
||||
class DbCrashErrorParser(MatchErrorParser):
|
||||
def __init__(self):
|
||||
super(DbCrashErrorParser, self).__init__(r"\*\*\*.*\^$|TEST FAILED.")
|
||||
|
||||
|
||||
class WriteStressErrorParser(MatchErrorParser):
|
||||
def __init__(self):
|
||||
super(WriteStressErrorParser, self).__init__(
|
||||
r"ERROR: write_stress died with exitcode=\d+"
|
||||
)
|
||||
|
||||
|
||||
class AsanErrorParser(MatchErrorParser):
|
||||
def __init__(self):
|
||||
super(AsanErrorParser, self).__init__(r"==\d+==ERROR: AddressSanitizer:")
|
||||
|
||||
|
||||
class UbsanErrorParser(MatchErrorParser):
|
||||
def __init__(self):
|
||||
# format: '<filename>:<line #>:<column #>: runtime error: <error msg>'
|
||||
super(UbsanErrorParser, self).__init__(r"\S+:\d+:\d+: runtime error:")
|
||||
|
||||
|
||||
class ValgrindErrorParser(MatchErrorParser):
|
||||
def __init__(self):
|
||||
# just grab the summary, valgrind doesn't clearly distinguish errors
|
||||
# from other log messages.
|
||||
super(ValgrindErrorParser, self).__init__(r"==\d+== ERROR SUMMARY:")
|
||||
|
||||
|
||||
class CompatErrorParser(MatchErrorParser):
|
||||
def __init__(self):
|
||||
super(CompatErrorParser, self).__init__(r"==== .*[Ee]rror.* ====$")
|
||||
|
||||
|
||||
class TsanErrorParser(MatchErrorParser):
|
||||
def __init__(self):
|
||||
super(TsanErrorParser, self).__init__(r"WARNING: ThreadSanitizer:")
|
||||
|
||||
|
||||
_TEST_NAME_TO_PARSERS = {
|
||||
"punit": [CompilerErrorParser, GTestErrorParser],
|
||||
"unit": [CompilerErrorParser, GTestErrorParser],
|
||||
"release": [CompilerErrorParser, GTestErrorParser],
|
||||
"unit_481": [CompilerErrorParser, GTestErrorParser],
|
||||
"release_481": [CompilerErrorParser, GTestErrorParser],
|
||||
"clang_unit": [CompilerErrorParser, GTestErrorParser],
|
||||
"clang_release": [CompilerErrorParser, GTestErrorParser],
|
||||
"clang_analyze": [CompilerErrorParser, ScanBuildErrorParser],
|
||||
"code_cov": [CompilerErrorParser, GTestErrorParser],
|
||||
"unity": [CompilerErrorParser, GTestErrorParser],
|
||||
"lite": [CompilerErrorParser],
|
||||
"lite_test": [CompilerErrorParser, GTestErrorParser],
|
||||
"stress_crash": [CompilerErrorParser, DbCrashErrorParser],
|
||||
"stress_crash_with_atomic_flush": [CompilerErrorParser, DbCrashErrorParser],
|
||||
"stress_crash_with_txn": [CompilerErrorParser, DbCrashErrorParser],
|
||||
"write_stress": [CompilerErrorParser, WriteStressErrorParser],
|
||||
"asan": [CompilerErrorParser, GTestErrorParser, AsanErrorParser],
|
||||
"asan_crash": [CompilerErrorParser, AsanErrorParser, DbCrashErrorParser],
|
||||
"asan_crash_with_atomic_flush": [
|
||||
CompilerErrorParser,
|
||||
AsanErrorParser,
|
||||
DbCrashErrorParser,
|
||||
],
|
||||
"asan_crash_with_txn": [CompilerErrorParser, AsanErrorParser, DbCrashErrorParser],
|
||||
"ubsan": [CompilerErrorParser, GTestErrorParser, UbsanErrorParser],
|
||||
"ubsan_crash": [CompilerErrorParser, UbsanErrorParser, DbCrashErrorParser],
|
||||
"ubsan_crash_with_atomic_flush": [
|
||||
CompilerErrorParser,
|
||||
UbsanErrorParser,
|
||||
DbCrashErrorParser,
|
||||
],
|
||||
"ubsan_crash_with_txn": [CompilerErrorParser, UbsanErrorParser, DbCrashErrorParser],
|
||||
"valgrind": [CompilerErrorParser, GTestErrorParser, ValgrindErrorParser],
|
||||
"tsan": [CompilerErrorParser, GTestErrorParser, TsanErrorParser],
|
||||
"format_compatible": [CompilerErrorParser, CompatErrorParser],
|
||||
"run_format_compatible": [CompilerErrorParser, CompatErrorParser],
|
||||
"no_compression": [CompilerErrorParser, GTestErrorParser],
|
||||
"run_no_compression": [CompilerErrorParser, GTestErrorParser],
|
||||
"regression": [CompilerErrorParser],
|
||||
"run_regression": [CompilerErrorParser],
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
return "Usage: %s <test name>" % sys.argv[0]
|
||||
test_name = sys.argv[1]
|
||||
if test_name not in _TEST_NAME_TO_PARSERS:
|
||||
return "Unknown test name: %s" % test_name
|
||||
|
||||
error_parsers = []
|
||||
for parser_cls in _TEST_NAME_TO_PARSERS[test_name]:
|
||||
error_parsers.append(parser_cls())
|
||||
|
||||
for line in sys.stdin:
|
||||
line = line.strip()
|
||||
for error_parser in error_parsers:
|
||||
error_msg = error_parser.parse_error(line)
|
||||
if error_msg is not None:
|
||||
print(error_msg)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
55
rocksdb/vendor/rocksdb/build_tools/fb_compile_mongo.sh
vendored
Normal file
55
rocksdb/vendor/rocksdb/build_tools/fb_compile_mongo.sh
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
# fail early
|
||||
set -e
|
||||
|
||||
if test -z $ROCKSDB_PATH; then
|
||||
ROCKSDB_PATH=~/rocksdb
|
||||
fi
|
||||
source $ROCKSDB_PATH/build_tools/fbcode_config4.8.1.sh
|
||||
|
||||
EXTRA_LDFLAGS=""
|
||||
|
||||
if test -z $ALLOC; then
|
||||
# default
|
||||
ALLOC=tcmalloc
|
||||
elif [[ $ALLOC == "jemalloc" ]]; then
|
||||
ALLOC=system
|
||||
EXTRA_LDFLAGS+=" -Wl,--whole-archive $JEMALLOC_LIB -Wl,--no-whole-archive"
|
||||
fi
|
||||
|
||||
# we need to force mongo to use static library, not shared
|
||||
STATIC_LIB_DEP_DIR='build/static_library_dependencies'
|
||||
test -d $STATIC_LIB_DEP_DIR || mkdir $STATIC_LIB_DEP_DIR
|
||||
test -h $STATIC_LIB_DEP_DIR/`basename $SNAPPY_LIBS` || ln -s $SNAPPY_LIBS $STATIC_LIB_DEP_DIR
|
||||
test -h $STATIC_LIB_DEP_DIR/`basename $LZ4_LIBS` || ln -s $LZ4_LIBS $STATIC_LIB_DEP_DIR
|
||||
|
||||
EXTRA_LDFLAGS+=" -L $STATIC_LIB_DEP_DIR"
|
||||
|
||||
set -x
|
||||
|
||||
EXTRA_CMD=""
|
||||
if ! test -e version.json; then
|
||||
# this is Mongo 3.0
|
||||
EXTRA_CMD="--rocksdb \
|
||||
--variant-dir=linux2/norm
|
||||
--cxx=${CXX} \
|
||||
--cc=${CC} \
|
||||
--use-system-zlib" # add this line back to normal code path
|
||||
# when https://jira.mongodb.org/browse/SERVER-19123 is resolved
|
||||
fi
|
||||
|
||||
scons \
|
||||
LINKFLAGS="$EXTRA_LDFLAGS $EXEC_LDFLAGS $PLATFORM_LDFLAGS" \
|
||||
CCFLAGS="$CXXFLAGS -L $STATIC_LIB_DEP_DIR" \
|
||||
LIBS="lz4 gcc stdc++" \
|
||||
LIBPATH="$ROCKSDB_PATH" \
|
||||
CPPPATH="$ROCKSDB_PATH/include" \
|
||||
-j32 \
|
||||
--allocator=$ALLOC \
|
||||
--nostrip \
|
||||
--opt=on \
|
||||
--disable-minimum-compiler-version-enforcement \
|
||||
--use-system-snappy \
|
||||
--disable-warnings-as-errors \
|
||||
$EXTRA_CMD $*
|
175
rocksdb/vendor/rocksdb/build_tools/fbcode_config.sh
vendored
Normal file
175
rocksdb/vendor/rocksdb/build_tools/fbcode_config.sh
vendored
Normal file
@ -0,0 +1,175 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Set environment variables so that we can compile rocksdb using
|
||||
# fbcode settings. It uses the latest g++ and clang compilers and also
|
||||
# uses jemalloc
|
||||
# Environment variables that change the behavior of this script:
|
||||
# PIC_BUILD -- if true, it will only take pic versions of libraries from fbcode. libraries that don't have pic variant will not be included
|
||||
|
||||
|
||||
BASEDIR=`dirname $BASH_SOURCE`
|
||||
source "$BASEDIR/dependencies.sh"
|
||||
|
||||
CFLAGS=""
|
||||
|
||||
# libgcc
|
||||
LIBGCC_INCLUDE="$LIBGCC_BASE/include"
|
||||
LIBGCC_LIBS=" -L $LIBGCC_BASE/lib"
|
||||
|
||||
# glibc
|
||||
GLIBC_INCLUDE="$GLIBC_BASE/include"
|
||||
GLIBC_LIBS=" -L $GLIBC_BASE/lib"
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_SNAPPY; then
|
||||
# snappy
|
||||
SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include/"
|
||||
if test -z $PIC_BUILD; then
|
||||
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy.a"
|
||||
else
|
||||
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy_pic.a"
|
||||
fi
|
||||
CFLAGS+=" -DSNAPPY"
|
||||
fi
|
||||
|
||||
if test -z $PIC_BUILD; then
|
||||
if ! test $ROCKSDB_DISABLE_ZLIB; then
|
||||
# location of zlib headers and libraries
|
||||
ZLIB_INCLUDE=" -I $ZLIB_BASE/include/"
|
||||
ZLIB_LIBS=" $ZLIB_BASE/lib/libz.a"
|
||||
CFLAGS+=" -DZLIB"
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_BZIP; then
|
||||
# location of bzip headers and libraries
|
||||
BZIP_INCLUDE=" -I $BZIP2_BASE/include/"
|
||||
BZIP_LIBS=" $BZIP2_BASE/lib/libbz2.a"
|
||||
CFLAGS+=" -DBZIP2"
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_LZ4; then
|
||||
LZ4_INCLUDE=" -I $LZ4_BASE/include/"
|
||||
LZ4_LIBS=" $LZ4_BASE/lib/liblz4.a"
|
||||
CFLAGS+=" -DLZ4"
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_ZSTD; then
|
||||
ZSTD_INCLUDE=" -I $ZSTD_BASE/include/"
|
||||
if test -z $PIC_BUILD; then
|
||||
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd.a"
|
||||
else
|
||||
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd_pic.a"
|
||||
fi
|
||||
CFLAGS+=" -DZSTD -DZSTD_STATIC_LINKING_ONLY"
|
||||
fi
|
||||
|
||||
# location of gflags headers and libraries
|
||||
GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/"
|
||||
if test -z $PIC_BUILD; then
|
||||
GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags.a"
|
||||
else
|
||||
GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags_pic.a"
|
||||
fi
|
||||
CFLAGS+=" -DGFLAGS=gflags"
|
||||
|
||||
# location of jemalloc
|
||||
JEMALLOC_INCLUDE=" -I $JEMALLOC_BASE/include/"
|
||||
JEMALLOC_LIB=" $JEMALLOC_BASE/lib/libjemalloc.a"
|
||||
|
||||
if test -z $PIC_BUILD; then
|
||||
# location of numa
|
||||
NUMA_INCLUDE=" -I $NUMA_BASE/include/"
|
||||
NUMA_LIB=" $NUMA_BASE/lib/libnuma.a"
|
||||
CFLAGS+=" -DNUMA"
|
||||
|
||||
# location of libunwind
|
||||
LIBUNWIND="$LIBUNWIND_BASE/lib/libunwind.a"
|
||||
fi
|
||||
|
||||
# location of TBB
|
||||
TBB_INCLUDE=" -isystem $TBB_BASE/include/"
|
||||
if test -z $PIC_BUILD; then
|
||||
TBB_LIBS="$TBB_BASE/lib/libtbb.a"
|
||||
else
|
||||
TBB_LIBS="$TBB_BASE/lib/libtbb_pic.a"
|
||||
fi
|
||||
CFLAGS+=" -DTBB"
|
||||
|
||||
test "$USE_SSE" || USE_SSE=1
|
||||
export USE_SSE
|
||||
test "$PORTABLE" || PORTABLE=1
|
||||
export PORTABLE
|
||||
|
||||
BINUTILS="$BINUTILS_BASE/bin"
|
||||
AR="$BINUTILS/ar"
|
||||
|
||||
DEPS_INCLUDE="$SNAPPY_INCLUDE $ZLIB_INCLUDE $BZIP_INCLUDE $LZ4_INCLUDE $ZSTD_INCLUDE $GFLAGS_INCLUDE $NUMA_INCLUDE $TBB_INCLUDE"
|
||||
|
||||
STDLIBS="-L $GCC_BASE/lib64"
|
||||
|
||||
CLANG_BIN="$CLANG_BASE/bin"
|
||||
CLANG_LIB="$CLANG_BASE/lib"
|
||||
CLANG_SRC="$CLANG_BASE/../../src"
|
||||
|
||||
CLANG_ANALYZER="$CLANG_BIN/clang++"
|
||||
CLANG_SCAN_BUILD="$CLANG_SRC/llvm/tools/clang/tools/scan-build/bin/scan-build"
|
||||
|
||||
if [ -z "$USE_CLANG" ]; then
|
||||
# gcc
|
||||
CC="$GCC_BASE/bin/gcc"
|
||||
CXX="$GCC_BASE/bin/g++"
|
||||
AR="$GCC_BASE/bin/gcc-ar"
|
||||
|
||||
CFLAGS+=" -B$BINUTILS/gold"
|
||||
CFLAGS+=" -isystem $GLIBC_INCLUDE"
|
||||
CFLAGS+=" -isystem $LIBGCC_INCLUDE"
|
||||
JEMALLOC=1
|
||||
else
|
||||
# clang
|
||||
CLANG_INCLUDE="$CLANG_LIB/clang/stable/include"
|
||||
CC="$CLANG_BIN/clang"
|
||||
CXX="$CLANG_BIN/clang++"
|
||||
AR="$CLANG_BIN/llvm-ar"
|
||||
|
||||
KERNEL_HEADERS_INCLUDE="$KERNEL_HEADERS_BASE/include"
|
||||
|
||||
CFLAGS+=" -B$BINUTILS/gold -nostdinc -nostdlib"
|
||||
CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/5.x "
|
||||
CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/5.x/x86_64-facebook-linux "
|
||||
CFLAGS+=" -isystem $GLIBC_INCLUDE"
|
||||
CFLAGS+=" -isystem $LIBGCC_INCLUDE"
|
||||
CFLAGS+=" -isystem $CLANG_INCLUDE"
|
||||
CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE/linux "
|
||||
CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE "
|
||||
CFLAGS+=" -Wno-expansion-to-defined "
|
||||
CXXFLAGS="-nostdinc++"
|
||||
fi
|
||||
|
||||
CFLAGS+=" $DEPS_INCLUDE"
|
||||
CFLAGS+=" -DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DROCKSDB_FALLOCATE_PRESENT -DROCKSDB_MALLOC_USABLE_SIZE -DROCKSDB_RANGESYNC_PRESENT -DROCKSDB_SCHED_GETCPU_PRESENT"
|
||||
CXXFLAGS+=" $CFLAGS"
|
||||
|
||||
EXEC_LDFLAGS=" $SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $NUMA_LIB $TBB_LIBS"
|
||||
EXEC_LDFLAGS+=" -B$BINUTILS/gold"
|
||||
EXEC_LDFLAGS+=" -Wl,--dynamic-linker,/usr/local/fbcode/gcc-5-glibc-2.23/lib/ld.so"
|
||||
EXEC_LDFLAGS+=" $LIBUNWIND"
|
||||
EXEC_LDFLAGS+=" -Wl,-rpath=/usr/local/fbcode/gcc-5-glibc-2.23/lib"
|
||||
# required by libtbb
|
||||
EXEC_LDFLAGS+=" -ldl"
|
||||
|
||||
PLATFORM_LDFLAGS="$LIBGCC_LIBS $GLIBC_LIBS $STDLIBS -lgcc -lstdc++"
|
||||
|
||||
EXEC_LDFLAGS_SHARED="$SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $TBB_LIBS"
|
||||
|
||||
VALGRIND_VER="$VALGRIND_BASE/bin/"
|
||||
|
||||
LUA_PATH="$LUA_BASE"
|
||||
|
||||
if test -z $PIC_BUILD; then
|
||||
LUA_LIB=" $LUA_PATH/lib/liblua.a"
|
||||
else
|
||||
LUA_LIB=" $LUA_PATH/lib/liblua_pic.a"
|
||||
fi
|
||||
|
||||
export CC CXX AR CFLAGS CXXFLAGS EXEC_LDFLAGS EXEC_LDFLAGS_SHARED VALGRIND_VER JEMALLOC_LIB JEMALLOC_INCLUDE CLANG_ANALYZER CLANG_SCAN_BUILD LUA_PATH LUA_LIB
|
175
rocksdb/vendor/rocksdb/build_tools/fbcode_config_platform010.sh
vendored
Normal file
175
rocksdb/vendor/rocksdb/build_tools/fbcode_config_platform010.sh
vendored
Normal file
@ -0,0 +1,175 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Set environment variables so that we can compile rocksdb using
|
||||
# fbcode settings. It uses the latest g++ and clang compilers and also
|
||||
# uses jemalloc
|
||||
# Environment variables that change the behavior of this script:
|
||||
# PIC_BUILD -- if true, it will only take pic versions of libraries from fbcode. libraries that don't have pic variant will not be included
|
||||
|
||||
|
||||
BASEDIR=`dirname $BASH_SOURCE`
|
||||
source "$BASEDIR/dependencies_platform010.sh"
|
||||
|
||||
# Disallow using libraries from default locations as they might not be compatible with platform010 libraries.
|
||||
CFLAGS=" --sysroot=/DOES/NOT/EXIST"
|
||||
|
||||
# libgcc
|
||||
LIBGCC_INCLUDE="$LIBGCC_BASE/include/c++/trunk"
|
||||
LIBGCC_LIBS=" -L $LIBGCC_BASE/lib -B$LIBGCC_BASE/lib/gcc/x86_64-facebook-linux/trunk/"
|
||||
|
||||
# glibc
|
||||
GLIBC_INCLUDE="$GLIBC_BASE/include"
|
||||
GLIBC_LIBS=" -L $GLIBC_BASE/lib"
|
||||
GLIBC_LIBS+=" -B$GLIBC_BASE/lib"
|
||||
|
||||
if test -z $PIC_BUILD; then
|
||||
MAYBE_PIC=
|
||||
else
|
||||
MAYBE_PIC=_pic
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_SNAPPY; then
|
||||
# snappy
|
||||
SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include/"
|
||||
SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DSNAPPY"
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_ZLIB; then
|
||||
# location of zlib headers and libraries
|
||||
ZLIB_INCLUDE=" -I $ZLIB_BASE/include/"
|
||||
ZLIB_LIBS=" $ZLIB_BASE/lib/libz${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DZLIB"
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_BZIP; then
|
||||
# location of bzip headers and libraries
|
||||
BZIP_INCLUDE=" -I $BZIP2_BASE/include/"
|
||||
BZIP_LIBS=" $BZIP2_BASE/lib/libbz2${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DBZIP2"
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_LZ4; then
|
||||
LZ4_INCLUDE=" -I $LZ4_BASE/include/"
|
||||
LZ4_LIBS=" $LZ4_BASE/lib/liblz4${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DLZ4"
|
||||
fi
|
||||
|
||||
if ! test $ROCKSDB_DISABLE_ZSTD; then
|
||||
ZSTD_INCLUDE=" -I $ZSTD_BASE/include/"
|
||||
ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DZSTD"
|
||||
fi
|
||||
|
||||
# location of gflags headers and libraries
|
||||
GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/"
|
||||
GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DGFLAGS=gflags"
|
||||
|
||||
BENCHMARK_INCLUDE=" -I $BENCHMARK_BASE/include/"
|
||||
BENCHMARK_LIBS=" $BENCHMARK_BASE/lib/libbenchmark${MAYBE_PIC}.a"
|
||||
|
||||
# location of jemalloc
|
||||
JEMALLOC_INCLUDE=" -I $JEMALLOC_BASE/include/"
|
||||
JEMALLOC_LIB=" $JEMALLOC_BASE/lib/libjemalloc${MAYBE_PIC}.a"
|
||||
|
||||
# location of numa
|
||||
NUMA_INCLUDE=" -I $NUMA_BASE/include/"
|
||||
NUMA_LIB=" $NUMA_BASE/lib/libnuma${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DNUMA"
|
||||
|
||||
# location of libunwind
|
||||
LIBUNWIND="$LIBUNWIND_BASE/lib/libunwind${MAYBE_PIC}.a"
|
||||
|
||||
# location of TBB
|
||||
TBB_INCLUDE=" -isystem $TBB_BASE/include/"
|
||||
TBB_LIBS="$TBB_BASE/lib/libtbb${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DTBB"
|
||||
|
||||
# location of LIBURING
|
||||
LIBURING_INCLUDE=" -isystem $LIBURING_BASE/include/"
|
||||
LIBURING_LIBS="$LIBURING_BASE/lib/liburing${MAYBE_PIC}.a"
|
||||
CFLAGS+=" -DLIBURING"
|
||||
|
||||
test "$USE_SSE" || USE_SSE=1
|
||||
export USE_SSE
|
||||
test "$PORTABLE" || PORTABLE=1
|
||||
export PORTABLE
|
||||
|
||||
BINUTILS="$BINUTILS_BASE/bin"
|
||||
AR="$BINUTILS/ar"
|
||||
AS="$BINUTILS/as"
|
||||
|
||||
DEPS_INCLUDE="$SNAPPY_INCLUDE $ZLIB_INCLUDE $BZIP_INCLUDE $LZ4_INCLUDE $ZSTD_INCLUDE $GFLAGS_INCLUDE $NUMA_INCLUDE $TBB_INCLUDE $LIBURING_INCLUDE $BENCHMARK_INCLUDE"
|
||||
|
||||
STDLIBS="-L $GCC_BASE/lib64"
|
||||
|
||||
CLANG_BIN="$CLANG_BASE/bin"
|
||||
CLANG_LIB="$CLANG_BASE/lib"
|
||||
CLANG_SRC="$CLANG_BASE/../../src"
|
||||
|
||||
CLANG_ANALYZER="$CLANG_BIN/clang++"
|
||||
CLANG_SCAN_BUILD="$CLANG_SRC/llvm/clang/tools/scan-build/bin/scan-build"
|
||||
|
||||
if [ -z "$USE_CLANG" ]; then
|
||||
# gcc
|
||||
CC="$GCC_BASE/bin/gcc"
|
||||
CXX="$GCC_BASE/bin/g++"
|
||||
AR="$GCC_BASE/bin/gcc-ar"
|
||||
|
||||
CFLAGS+=" -B$BINUTILS -nostdinc -nostdlib"
|
||||
CFLAGS+=" -I$GCC_BASE/include"
|
||||
CFLAGS+=" -isystem $GCC_BASE/lib/gcc/x86_64-redhat-linux-gnu/11.2.1/include"
|
||||
CFLAGS+=" -isystem $GCC_BASE/lib/gcc/x86_64-redhat-linux-gnu/11.2.1/install-tools/include"
|
||||
CFLAGS+=" -isystem $GCC_BASE/lib/gcc/x86_64-redhat-linux-gnu/11.2.1/include-fixed/"
|
||||
CFLAGS+=" -isystem $LIBGCC_INCLUDE"
|
||||
CFLAGS+=" -isystem $GLIBC_INCLUDE"
|
||||
CFLAGS+=" -I$GLIBC_INCLUDE"
|
||||
CFLAGS+=" -I$LIBGCC_BASE/include"
|
||||
CFLAGS+=" -I$LIBGCC_BASE/include/c++/11.x/"
|
||||
CFLAGS+=" -I$LIBGCC_BASE/include/c++/11.x/x86_64-facebook-linux/"
|
||||
CFLAGS+=" -I$LIBGCC_BASE/include/c++/11.x/backward"
|
||||
CFLAGS+=" -isystem $GLIBC_INCLUDE -I$GLIBC_INCLUDE"
|
||||
JEMALLOC=1
|
||||
else
|
||||
# clang
|
||||
CLANG_INCLUDE="$CLANG_LIB/clang/stable/include"
|
||||
CC="$CLANG_BIN/clang"
|
||||
CXX="$CLANG_BIN/clang++"
|
||||
AR="$CLANG_BIN/llvm-ar"
|
||||
|
||||
CFLAGS+=" -B$BINUTILS -nostdinc -nostdlib"
|
||||
CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/trunk "
|
||||
CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/trunk/x86_64-facebook-linux "
|
||||
CFLAGS+=" -isystem $GLIBC_INCLUDE"
|
||||
CFLAGS+=" -isystem $LIBGCC_INCLUDE"
|
||||
CFLAGS+=" -isystem $CLANG_INCLUDE"
|
||||
CFLAGS+=" -Wno-expansion-to-defined "
|
||||
CXXFLAGS="-nostdinc++"
|
||||
fi
|
||||
|
||||
KERNEL_HEADERS_INCLUDE="$KERNEL_HEADERS_BASE/include"
|
||||
CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE/linux "
|
||||
CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE "
|
||||
|
||||
CFLAGS+=" $DEPS_INCLUDE"
|
||||
CFLAGS+=" -DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DROCKSDB_FALLOCATE_PRESENT -DROCKSDB_MALLOC_USABLE_SIZE -DROCKSDB_RANGESYNC_PRESENT -DROCKSDB_SCHED_GETCPU_PRESENT -DROCKSDB_IOURING_PRESENT"
|
||||
CXXFLAGS+=" $CFLAGS"
|
||||
|
||||
EXEC_LDFLAGS=" $SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $NUMA_LIB $TBB_LIBS $LIBURING_LIBS $BENCHMARK_LIBS"
|
||||
EXEC_LDFLAGS+=" -Wl,--dynamic-linker,/usr/local/fbcode/platform010/lib/ld.so"
|
||||
EXEC_LDFLAGS+=" $LIBUNWIND"
|
||||
EXEC_LDFLAGS+=" -Wl,-rpath=/usr/local/fbcode/platform010/lib"
|
||||
EXEC_LDFLAGS+=" -Wl,-rpath=$GCC_BASE/lib64"
|
||||
# required by libtbb
|
||||
EXEC_LDFLAGS+=" -ldl"
|
||||
|
||||
PLATFORM_LDFLAGS="$LIBGCC_LIBS $GLIBC_LIBS $STDLIBS -lgcc -lstdc++"
|
||||
PLATFORM_LDFLAGS+=" -B$BINUTILS"
|
||||
|
||||
EXEC_LDFLAGS_SHARED="$SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $TBB_LIBS $LIBURING_LIBS $BENCHMARK_LIBS"
|
||||
|
||||
VALGRIND_VER="$VALGRIND_BASE/bin/"
|
||||
|
||||
export CC CXX AR AS CFLAGS CXXFLAGS EXEC_LDFLAGS EXEC_LDFLAGS_SHARED VALGRIND_VER JEMALLOC_LIB JEMALLOC_INCLUDE CLANG_ANALYZER CLANG_SCAN_BUILD LUA_PATH LUA_LIB
|
203
rocksdb/vendor/rocksdb/build_tools/format-diff.sh
vendored
Normal file
203
rocksdb/vendor/rocksdb/build_tools/format-diff.sh
vendored
Normal file
@ -0,0 +1,203 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
# If clang_format_diff.py command is not specfied, we assume we are able to
|
||||
# access directly without any path.
|
||||
|
||||
print_usage () {
|
||||
echo "Usage:"
|
||||
echo "format-diff.sh [OPTIONS]"
|
||||
echo "-c: check only."
|
||||
echo "-h: print this message."
|
||||
}
|
||||
|
||||
while getopts ':ch' OPTION; do
|
||||
case "$OPTION" in
|
||||
c)
|
||||
CHECK_ONLY=1
|
||||
;;
|
||||
h)
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
?)
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
REPO_ROOT="$(git rev-parse --show-toplevel)"
|
||||
|
||||
if [ "$CLANG_FORMAT_DIFF" ]; then
|
||||
echo "Note: CLANG_FORMAT_DIFF='$CLANG_FORMAT_DIFF'"
|
||||
# Dry run to confirm dependencies like argparse
|
||||
if $CLANG_FORMAT_DIFF --help >/dev/null < /dev/null; then
|
||||
true #Good
|
||||
else
|
||||
exit 128
|
||||
fi
|
||||
else
|
||||
# First try directly executing the possibilities
|
||||
if clang-format-diff --help &> /dev/null < /dev/null; then
|
||||
CLANG_FORMAT_DIFF=clang-format-diff
|
||||
elif clang-format-diff.py --help &> /dev/null < /dev/null; then
|
||||
CLANG_FORMAT_DIFF=clang-format-diff.py
|
||||
elif $REPO_ROOT/clang-format-diff.py --help &> /dev/null < /dev/null; then
|
||||
CLANG_FORMAT_DIFF=$REPO_ROOT/clang-format-diff.py
|
||||
else
|
||||
# This probably means we need to directly invoke the interpreter.
|
||||
# But first find clang-format-diff.py
|
||||
if [ -f "$REPO_ROOT/clang-format-diff.py" ]; then
|
||||
CFD_PATH="$REPO_ROOT/clang-format-diff.py"
|
||||
elif which clang-format-diff.py &> /dev/null; then
|
||||
CFD_PATH="$(which clang-format-diff.py)"
|
||||
else
|
||||
echo "You didn't have clang-format-diff.py and/or clang-format available in your computer!"
|
||||
echo "You can download clang-format-diff.py by running: "
|
||||
echo " curl --location https://raw.githubusercontent.com/llvm/llvm-project/main/clang/tools/clang-format/clang-format-diff.py -o ${REPO_ROOT}/clang-format-diff.py"
|
||||
echo "You should make sure the downloaded script is not compromised."
|
||||
echo "You can download clang-format by running:"
|
||||
echo " brew install clang-format"
|
||||
echo " Or"
|
||||
echo " apt install clang-format"
|
||||
echo " This might work too:"
|
||||
echo " yum install git-clang-format"
|
||||
echo "Then make sure clang-format is available and executable from \$PATH:"
|
||||
echo " clang-format --version"
|
||||
exit 128
|
||||
fi
|
||||
# Check argparse pre-req on interpreter, or it will fail
|
||||
if echo import argparse | ${PYTHON:-python3}; then
|
||||
true # Good
|
||||
else
|
||||
echo "To run clang-format-diff.py, we'll need the library "argparse" to be"
|
||||
echo "installed. You can try either of the follow ways to install it:"
|
||||
echo " 1. Manually download argparse: https://pypi.python.org/pypi/argparse"
|
||||
echo " 2. easy_install argparse (if you have easy_install)"
|
||||
echo " 3. pip install argparse (if you have pip)"
|
||||
exit 129
|
||||
fi
|
||||
# Unfortunately, some machines have a Python2 clang-format-diff.py
|
||||
# installed but only a Python3 interpreter installed. Unfortunately,
|
||||
# automatic 2to3 migration is insufficient, so suggest downloading latest.
|
||||
if grep -q "print '" "$CFD_PATH" && \
|
||||
${PYTHON:-python3} --version | grep -q 'ython 3'; then
|
||||
echo "You have clang-format-diff.py for Python 2 but are using a Python 3"
|
||||
echo "interpreter (${PYTHON:-python3})."
|
||||
echo "You can download clang-format-diff.py for Python 3 by running: "
|
||||
echo " curl --location https://raw.githubusercontent.com/llvm/llvm-project/main/clang/tools/clang-format/clang-format-diff.py -o ${REPO_ROOT}/clang-format-diff.py"
|
||||
echo "You should make sure the downloaded script is not compromised."
|
||||
exit 130
|
||||
fi
|
||||
CLANG_FORMAT_DIFF="${PYTHON:-python3} $CFD_PATH"
|
||||
# This had better work after all those checks
|
||||
if $CLANG_FORMAT_DIFF --help >/dev/null < /dev/null; then
|
||||
true #Good
|
||||
else
|
||||
exit 128
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# TODO(kailiu) following work is not complete since we still need to figure
|
||||
# out how to add the modified files done pre-commit hook to git's commit index.
|
||||
#
|
||||
# Check if this script has already been added to pre-commit hook.
|
||||
# Will suggest user to add this script to pre-commit hook if their pre-commit
|
||||
# is empty.
|
||||
# PRE_COMMIT_SCRIPT_PATH="`git rev-parse --show-toplevel`/.git/hooks/pre-commit"
|
||||
# if ! ls $PRE_COMMIT_SCRIPT_PATH &> /dev/null
|
||||
# then
|
||||
# echo "Would you like to add this script to pre-commit hook, which will do "
|
||||
# echo -n "the format check for all the affected lines before you check in (y/n):"
|
||||
# read add_to_hook
|
||||
# if [ "$add_to_hook" == "y" ]
|
||||
# then
|
||||
# ln -s `git rev-parse --show-toplevel`/build_tools/format-diff.sh $PRE_COMMIT_SCRIPT_PATH
|
||||
# fi
|
||||
# fi
|
||||
set -e
|
||||
|
||||
uncommitted_code=`git diff HEAD`
|
||||
|
||||
# If there's no uncommitted changes, we assume user are doing post-commit
|
||||
# format check, in which case we'll try to check the modified lines vs. the
|
||||
# facebook/rocksdb.git main branch. Otherwise, we'll check format of the
|
||||
# uncommitted code only.
|
||||
if [ -z "$uncommitted_code" ]
|
||||
then
|
||||
# Attempt to get name of facebook/rocksdb.git remote.
|
||||
[ "$FORMAT_REMOTE" ] || FORMAT_REMOTE="$(LC_ALL=POSIX LANG=POSIX git remote -v | grep 'facebook/rocksdb.git' | head -n 1 | cut -f 1)"
|
||||
# Fall back on 'origin' if that fails
|
||||
[ "$FORMAT_REMOTE" ] || FORMAT_REMOTE=origin
|
||||
# Use main branch from that remote
|
||||
[ "$FORMAT_UPSTREAM" ] || FORMAT_UPSTREAM="$FORMAT_REMOTE/$(LC_ALL=POSIX LANG=POSIX git remote show $FORMAT_REMOTE | sed -n '/HEAD branch/s/.*: //p')"
|
||||
# Get the common ancestor with that remote branch. Everything after that
|
||||
# common ancestor would be considered the contents of a pull request, so
|
||||
# should be relevant for formatting fixes.
|
||||
FORMAT_UPSTREAM_MERGE_BASE="$(git merge-base "$FORMAT_UPSTREAM" HEAD)"
|
||||
# Get the differences
|
||||
diffs=$(git diff -U0 "$FORMAT_UPSTREAM_MERGE_BASE" | $CLANG_FORMAT_DIFF -p 1)
|
||||
echo "Checking format of changes not yet in $FORMAT_UPSTREAM..."
|
||||
else
|
||||
# Check the format of uncommitted lines,
|
||||
diffs=$(git diff -U0 HEAD | $CLANG_FORMAT_DIFF -p 1)
|
||||
echo "Checking format of uncommitted changes..."
|
||||
fi
|
||||
|
||||
if [ -z "$diffs" ]
|
||||
then
|
||||
echo "Nothing needs to be reformatted!"
|
||||
exit 0
|
||||
elif [ $CHECK_ONLY ]
|
||||
then
|
||||
echo "Your change has unformatted code. Please run make format!"
|
||||
if [ $VERBOSE_CHECK ]; then
|
||||
clang-format --version
|
||||
echo "$diffs"
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Highlight the insertion/deletion from the clang-format-diff.py's output
|
||||
COLOR_END="\033[0m"
|
||||
COLOR_RED="\033[0;31m"
|
||||
COLOR_GREEN="\033[0;32m"
|
||||
|
||||
echo -e "Detect lines that doesn't follow the format rules:\r"
|
||||
# Add the color to the diff. lines added will be green; lines removed will be red.
|
||||
echo "$diffs" |
|
||||
sed -e "s/\(^-.*$\)/`echo -e \"$COLOR_RED\1$COLOR_END\"`/" |
|
||||
sed -e "s/\(^+.*$\)/`echo -e \"$COLOR_GREEN\1$COLOR_END\"`/"
|
||||
|
||||
echo -e "Would you like to fix the format automatically (y/n): \c"
|
||||
|
||||
# Make sure under any mode, we can read user input.
|
||||
exec < /dev/tty
|
||||
read to_fix
|
||||
|
||||
if [ "$to_fix" != "y" ]
|
||||
then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Do in-place format adjustment.
|
||||
if [ -z "$uncommitted_code" ]
|
||||
then
|
||||
git diff -U0 "$FORMAT_UPSTREAM_MERGE_BASE" | $CLANG_FORMAT_DIFF -i -p 1
|
||||
else
|
||||
git diff -U0 HEAD | $CLANG_FORMAT_DIFF -i -p 1
|
||||
fi
|
||||
echo "Files reformatted!"
|
||||
|
||||
# Amend to last commit if user do the post-commit format check
|
||||
if [ -z "$uncommitted_code" ]; then
|
||||
echo -e "Would you like to amend the changes to last commit (`git log HEAD --oneline | head -1`)? (y/n): \c"
|
||||
read to_amend
|
||||
|
||||
if [ "$to_amend" == "y" ]
|
||||
then
|
||||
git commit -a --amend --reuse-message HEAD
|
||||
echo "Amended to last commit"
|
||||
fi
|
||||
fi
|
129
rocksdb/vendor/rocksdb/build_tools/make_package.sh
vendored
Normal file
129
rocksdb/vendor/rocksdb/build_tools/make_package.sh
vendored
Normal file
@ -0,0 +1,129 @@
|
||||
# shellcheck disable=SC1113
|
||||
#/usr/bin/env bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
|
||||
set -e
|
||||
|
||||
function log() {
|
||||
echo "[+] $1"
|
||||
}
|
||||
|
||||
function fatal() {
|
||||
echo "[!] $1"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function platform() {
|
||||
local __resultvar=$1
|
||||
if [[ -f "/etc/yum.conf" ]]; then
|
||||
eval $__resultvar="centos"
|
||||
elif [[ -f "/etc/dpkg/dpkg.cfg" ]]; then
|
||||
eval $__resultvar="ubuntu"
|
||||
else
|
||||
fatal "Unknwon operating system"
|
||||
fi
|
||||
}
|
||||
platform OS
|
||||
|
||||
function package() {
|
||||
if [[ $OS = "ubuntu" ]]; then
|
||||
if dpkg --get-selections | grep --quiet $1; then
|
||||
log "$1 is already installed. skipping."
|
||||
else
|
||||
# shellcheck disable=SC2068
|
||||
apt-get install $@ -y
|
||||
fi
|
||||
elif [[ $OS = "centos" ]]; then
|
||||
if rpm -qa | grep --quiet $1; then
|
||||
log "$1 is already installed. skipping."
|
||||
else
|
||||
# shellcheck disable=SC2068
|
||||
yum install $@ -y
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function detect_fpm_output() {
|
||||
if [[ $OS = "ubuntu" ]]; then
|
||||
export FPM_OUTPUT=deb
|
||||
elif [[ $OS = "centos" ]]; then
|
||||
export FPM_OUTPUT=rpm
|
||||
fi
|
||||
}
|
||||
detect_fpm_output
|
||||
|
||||
function gem_install() {
|
||||
if gem list | grep --quiet $1; then
|
||||
log "$1 is already installed. skipping."
|
||||
else
|
||||
# shellcheck disable=SC2068
|
||||
gem install $@
|
||||
fi
|
||||
}
|
||||
|
||||
function main() {
|
||||
if [[ $# -ne 1 ]]; then
|
||||
fatal "Usage: $0 <rocksdb_version>"
|
||||
else
|
||||
log "using rocksdb version: $1"
|
||||
fi
|
||||
|
||||
if [[ -d /vagrant ]]; then
|
||||
if [[ $OS = "ubuntu" ]]; then
|
||||
package g++-4.8
|
||||
export CXX=g++-4.8
|
||||
|
||||
# the deb would depend on libgflags2, but the static lib is the only thing
|
||||
# installed by make install
|
||||
package libgflags-dev
|
||||
|
||||
package ruby-all-dev
|
||||
elif [[ $OS = "centos" ]]; then
|
||||
pushd /etc/yum.repos.d
|
||||
if [[ ! -f /etc/yum.repos.d/devtools-1.1.repo ]]; then
|
||||
wget http://people.centos.org/tru/devtools-1.1/devtools-1.1.repo
|
||||
fi
|
||||
package devtoolset-1.1-gcc --enablerepo=testing-1.1-devtools-6
|
||||
package devtoolset-1.1-gcc-c++ --enablerepo=testing-1.1-devtools-6
|
||||
export CC=/opt/centos/devtoolset-1.1/root/usr/bin/gcc
|
||||
export CPP=/opt/centos/devtoolset-1.1/root/usr/bin/cpp
|
||||
export CXX=/opt/centos/devtoolset-1.1/root/usr/bin/c++
|
||||
export PATH=$PATH:/opt/centos/devtoolset-1.1/root/usr/bin
|
||||
popd
|
||||
if ! rpm -qa | grep --quiet gflags; then
|
||||
rpm -i https://github.com/schuhschuh/gflags/releases/download/v2.1.0/gflags-devel-2.1.0-1.amd64.rpm
|
||||
fi
|
||||
|
||||
package ruby
|
||||
package ruby-devel
|
||||
package rubygems
|
||||
package rpm-build
|
||||
fi
|
||||
fi
|
||||
gem_install fpm
|
||||
|
||||
make static_lib
|
||||
LIBDIR=/usr/lib
|
||||
if [[ $FPM_OUTPUT = "rpm" ]]; then
|
||||
LIBDIR=$(rpm --eval '%_libdir')
|
||||
fi
|
||||
|
||||
rm -rf package
|
||||
make install DESTDIR=package PREFIX=/usr LIBDIR=$LIBDIR
|
||||
|
||||
fpm \
|
||||
-s dir \
|
||||
-t $FPM_OUTPUT \
|
||||
-C package \
|
||||
-n rocksdb \
|
||||
-v $1 \
|
||||
--url http://rocksdb.org/ \
|
||||
-m rocksdb@fb.com \
|
||||
--license BSD \
|
||||
--vendor Facebook \
|
||||
--description "RocksDB is an embeddable persistent key-value store for fast storage." \
|
||||
usr
|
||||
}
|
||||
|
||||
# shellcheck disable=SC2068
|
||||
main $@
|
396
rocksdb/vendor/rocksdb/build_tools/regression_build_test.sh
vendored
Normal file
396
rocksdb/vendor/rocksdb/build_tools/regression_build_test.sh
vendored
Normal file
@ -0,0 +1,396 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
|
||||
set -e
|
||||
|
||||
NUM=10000000
|
||||
|
||||
if [ $# -eq 1 ];then
|
||||
DATA_DIR=$1
|
||||
elif [ $# -eq 2 ];then
|
||||
DATA_DIR=$1
|
||||
STAT_FILE=$2
|
||||
fi
|
||||
|
||||
# On the production build servers, set data and stat
|
||||
# files/directories not in /tmp or else the tempdir cleaning
|
||||
# scripts will make you very unhappy.
|
||||
DATA_DIR=${DATA_DIR:-$(mktemp -t -d rocksdb_XXXX)}
|
||||
STAT_FILE=${STAT_FILE:-$(mktemp -t -u rocksdb_test_stats_XXXX)}
|
||||
|
||||
function cleanup {
|
||||
rm -rf $DATA_DIR
|
||||
rm -f $STAT_FILE.*
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
make release
|
||||
|
||||
# measure fillseq + fill up the DB for overwrite benchmark
|
||||
./db_bench \
|
||||
--benchmarks=fillseq \
|
||||
--db=$DATA_DIR \
|
||||
--use_existing_db=0 \
|
||||
--bloom_bits=10 \
|
||||
--num=$NUM \
|
||||
--writes=$NUM \
|
||||
--cache_size=6442450944 \
|
||||
--cache_numshardbits=6 \
|
||||
--table_cache_numshardbits=4 \
|
||||
--open_files=55000 \
|
||||
--statistics=1 \
|
||||
--histogram=1 \
|
||||
--disable_wal=1 \
|
||||
--sync=0 > ${STAT_FILE}.fillseq
|
||||
|
||||
# measure overwrite performance
|
||||
./db_bench \
|
||||
--benchmarks=overwrite \
|
||||
--db=$DATA_DIR \
|
||||
--use_existing_db=1 \
|
||||
--bloom_bits=10 \
|
||||
--num=$NUM \
|
||||
--writes=$((NUM / 10)) \
|
||||
--cache_size=6442450944 \
|
||||
--cache_numshardbits=6 \
|
||||
--table_cache_numshardbits=4 \
|
||||
--open_files=55000 \
|
||||
--statistics=1 \
|
||||
--histogram=1 \
|
||||
--disable_wal=1 \
|
||||
--sync=0 \
|
||||
--threads=8 > ${STAT_FILE}.overwrite
|
||||
|
||||
# fill up the db for readrandom benchmark (1GB total size)
|
||||
./db_bench \
|
||||
--benchmarks=fillseq \
|
||||
--db=$DATA_DIR \
|
||||
--use_existing_db=0 \
|
||||
--bloom_bits=10 \
|
||||
--num=$NUM \
|
||||
--writes=$NUM \
|
||||
--cache_size=6442450944 \
|
||||
--cache_numshardbits=6 \
|
||||
--table_cache_numshardbits=4 \
|
||||
--open_files=55000 \
|
||||
--statistics=1 \
|
||||
--histogram=1 \
|
||||
--disable_wal=1 \
|
||||
--sync=0 \
|
||||
--threads=1 > /dev/null
|
||||
|
||||
# measure readrandom with 6GB block cache
|
||||
./db_bench \
|
||||
--benchmarks=readrandom \
|
||||
--db=$DATA_DIR \
|
||||
--use_existing_db=1 \
|
||||
--bloom_bits=10 \
|
||||
--num=$NUM \
|
||||
--reads=$((NUM / 5)) \
|
||||
--cache_size=6442450944 \
|
||||
--cache_numshardbits=6 \
|
||||
--table_cache_numshardbits=4 \
|
||||
--open_files=55000 \
|
||||
--statistics=1 \
|
||||
--histogram=1 \
|
||||
--disable_wal=1 \
|
||||
--sync=0 \
|
||||
--threads=16 > ${STAT_FILE}.readrandom
|
||||
|
||||
# measure readrandom with 6GB block cache and tailing iterator
|
||||
./db_bench \
|
||||
--benchmarks=readrandom \
|
||||
--db=$DATA_DIR \
|
||||
--use_existing_db=1 \
|
||||
--bloom_bits=10 \
|
||||
--num=$NUM \
|
||||
--reads=$((NUM / 5)) \
|
||||
--cache_size=6442450944 \
|
||||
--cache_numshardbits=6 \
|
||||
--table_cache_numshardbits=4 \
|
||||
--open_files=55000 \
|
||||
--use_tailing_iterator=1 \
|
||||
--statistics=1 \
|
||||
--histogram=1 \
|
||||
--disable_wal=1 \
|
||||
--sync=0 \
|
||||
--threads=16 > ${STAT_FILE}.readrandomtailing
|
||||
|
||||
# measure readrandom with 100MB block cache
|
||||
./db_bench \
|
||||
--benchmarks=readrandom \
|
||||
--db=$DATA_DIR \
|
||||
--use_existing_db=1 \
|
||||
--bloom_bits=10 \
|
||||
--num=$NUM \
|
||||
--reads=$((NUM / 5)) \
|
||||
--cache_size=104857600 \
|
||||
--cache_numshardbits=6 \
|
||||
--table_cache_numshardbits=4 \
|
||||
--open_files=55000 \
|
||||
--statistics=1 \
|
||||
--histogram=1 \
|
||||
--disable_wal=1 \
|
||||
--sync=0 \
|
||||
--threads=16 > ${STAT_FILE}.readrandomsmallblockcache
|
||||
|
||||
# measure readrandom with 8k data in memtable
|
||||
./db_bench \
|
||||
--benchmarks=overwrite,readrandom \
|
||||
--db=$DATA_DIR \
|
||||
--use_existing_db=1 \
|
||||
--bloom_bits=10 \
|
||||
--num=$NUM \
|
||||
--reads=$((NUM / 5)) \
|
||||
--writes=512 \
|
||||
--cache_size=6442450944 \
|
||||
--cache_numshardbits=6 \
|
||||
--table_cache_numshardbits=4 \
|
||||
--write_buffer_size=1000000000 \
|
||||
--open_files=55000 \
|
||||
--statistics=1 \
|
||||
--histogram=1 \
|
||||
--disable_wal=1 \
|
||||
--sync=0 \
|
||||
--threads=16 > ${STAT_FILE}.readrandom_mem_sst
|
||||
|
||||
|
||||
# fill up the db for readrandom benchmark with filluniquerandom (1GB total size)
|
||||
./db_bench \
|
||||
--benchmarks=filluniquerandom \
|
||||
--db=$DATA_DIR \
|
||||
--use_existing_db=0 \
|
||||
--bloom_bits=10 \
|
||||
--num=$((NUM / 4)) \
|
||||
--writes=$((NUM / 4)) \
|
||||
--cache_size=6442450944 \
|
||||
--cache_numshardbits=6 \
|
||||
--table_cache_numshardbits=4 \
|
||||
--open_files=55000 \
|
||||
--statistics=1 \
|
||||
--histogram=1 \
|
||||
--disable_wal=1 \
|
||||
--sync=0 \
|
||||
--threads=1 > /dev/null
|
||||
|
||||
# dummy test just to compact the data
|
||||
./db_bench \
|
||||
--benchmarks=readrandom \
|
||||
--db=$DATA_DIR \
|
||||
--use_existing_db=1 \
|
||||
--bloom_bits=10 \
|
||||
--num=$((NUM / 1000)) \
|
||||
--reads=$((NUM / 1000)) \
|
||||
--cache_size=6442450944 \
|
||||
--cache_numshardbits=6 \
|
||||
--table_cache_numshardbits=4 \
|
||||
--open_files=55000 \
|
||||
--statistics=1 \
|
||||
--histogram=1 \
|
||||
--disable_wal=1 \
|
||||
--sync=0 \
|
||||
--threads=16 > /dev/null
|
||||
|
||||
# measure readrandom after load with filluniquerandom with 6GB block cache
|
||||
./db_bench \
|
||||
--benchmarks=readrandom \
|
||||
--db=$DATA_DIR \
|
||||
--use_existing_db=1 \
|
||||
--bloom_bits=10 \
|
||||
--num=$((NUM / 4)) \
|
||||
--reads=$((NUM / 4)) \
|
||||
--cache_size=6442450944 \
|
||||
--cache_numshardbits=6 \
|
||||
--table_cache_numshardbits=4 \
|
||||
--open_files=55000 \
|
||||
--disable_auto_compactions=1 \
|
||||
--statistics=1 \
|
||||
--histogram=1 \
|
||||
--disable_wal=1 \
|
||||
--sync=0 \
|
||||
--threads=16 > ${STAT_FILE}.readrandom_filluniquerandom
|
||||
|
||||
# measure readwhilewriting after load with filluniquerandom with 6GB block cache
|
||||
./db_bench \
|
||||
--benchmarks=readwhilewriting \
|
||||
--db=$DATA_DIR \
|
||||
--use_existing_db=1 \
|
||||
--bloom_bits=10 \
|
||||
--num=$((NUM / 4)) \
|
||||
--reads=$((NUM / 4)) \
|
||||
--benchmark_write_rate_limit=$(( 110 * 1024 )) \
|
||||
--write_buffer_size=100000000 \
|
||||
--cache_size=6442450944 \
|
||||
--cache_numshardbits=6 \
|
||||
--table_cache_numshardbits=4 \
|
||||
--open_files=55000 \
|
||||
--statistics=1 \
|
||||
--histogram=1 \
|
||||
--disable_wal=1 \
|
||||
--sync=0 \
|
||||
--threads=16 > ${STAT_FILE}.readwhilewriting
|
||||
|
||||
# measure memtable performance -- none of the data gets flushed to disk
|
||||
./db_bench \
|
||||
--benchmarks=fillrandom,readrandom, \
|
||||
--db=$DATA_DIR \
|
||||
--use_existing_db=0 \
|
||||
--num=$((NUM / 10)) \
|
||||
--reads=$NUM \
|
||||
--cache_size=6442450944 \
|
||||
--cache_numshardbits=6 \
|
||||
--table_cache_numshardbits=4 \
|
||||
--write_buffer_size=1000000000 \
|
||||
--open_files=55000 \
|
||||
--statistics=1 \
|
||||
--histogram=1 \
|
||||
--disable_wal=1 \
|
||||
--sync=0 \
|
||||
--value_size=10 \
|
||||
--threads=16 > ${STAT_FILE}.memtablefillreadrandom
|
||||
|
||||
common_in_mem_args="--db=/dev/shm/rocksdb \
|
||||
--num_levels=6 \
|
||||
--key_size=20 \
|
||||
--prefix_size=12 \
|
||||
--keys_per_prefix=10 \
|
||||
--value_size=100 \
|
||||
--compression_type=none \
|
||||
--compression_ratio=1 \
|
||||
--write_buffer_size=134217728 \
|
||||
--max_write_buffer_number=4 \
|
||||
--level0_file_num_compaction_trigger=8 \
|
||||
--level0_slowdown_writes_trigger=16 \
|
||||
--level0_stop_writes_trigger=24 \
|
||||
--target_file_size_base=134217728 \
|
||||
--max_bytes_for_level_base=1073741824 \
|
||||
--disable_wal=0 \
|
||||
--wal_dir=/dev/shm/rocksdb \
|
||||
--sync=0 \
|
||||
--verify_checksum=1 \
|
||||
--delete_obsolete_files_period_micros=314572800 \
|
||||
--use_plain_table=1 \
|
||||
--open_files=-1 \
|
||||
--mmap_read=1 \
|
||||
--mmap_write=0 \
|
||||
--bloom_bits=10 \
|
||||
--bloom_locality=1 \
|
||||
--perf_level=0"
|
||||
|
||||
# prepare a in-memory DB with 50M keys, total DB size is ~6G
|
||||
./db_bench \
|
||||
$common_in_mem_args \
|
||||
--statistics=0 \
|
||||
--max_background_compactions=16 \
|
||||
--max_background_flushes=16 \
|
||||
--benchmarks=filluniquerandom \
|
||||
--use_existing_db=0 \
|
||||
--num=52428800 \
|
||||
--threads=1 > /dev/null
|
||||
|
||||
# Readwhilewriting
|
||||
./db_bench \
|
||||
$common_in_mem_args \
|
||||
--statistics=1 \
|
||||
--max_background_compactions=4 \
|
||||
--max_background_flushes=0 \
|
||||
--benchmarks=readwhilewriting\
|
||||
--use_existing_db=1 \
|
||||
--duration=600 \
|
||||
--threads=32 \
|
||||
--benchmark_write_rate_limit=9502720 > ${STAT_FILE}.readwhilewriting_in_ram
|
||||
|
||||
# Seekrandomwhilewriting
|
||||
./db_bench \
|
||||
$common_in_mem_args \
|
||||
--statistics=1 \
|
||||
--max_background_compactions=4 \
|
||||
--max_background_flushes=0 \
|
||||
--benchmarks=seekrandomwhilewriting \
|
||||
--use_existing_db=1 \
|
||||
--use_tailing_iterator=1 \
|
||||
--duration=600 \
|
||||
--threads=32 \
|
||||
--benchmark_write_rate_limit=9502720 > ${STAT_FILE}.seekwhilewriting_in_ram
|
||||
|
||||
# measure fillseq with bunch of column families
|
||||
./db_bench \
|
||||
--benchmarks=fillseq \
|
||||
--num_column_families=500 \
|
||||
--write_buffer_size=1048576 \
|
||||
--db=$DATA_DIR \
|
||||
--use_existing_db=0 \
|
||||
--num=$NUM \
|
||||
--writes=$NUM \
|
||||
--open_files=55000 \
|
||||
--statistics=1 \
|
||||
--histogram=1 \
|
||||
--disable_wal=1 \
|
||||
--sync=0 > ${STAT_FILE}.fillseq_lots_column_families
|
||||
|
||||
# measure overwrite performance with bunch of column families
|
||||
./db_bench \
|
||||
--benchmarks=overwrite \
|
||||
--num_column_families=500 \
|
||||
--write_buffer_size=1048576 \
|
||||
--db=$DATA_DIR \
|
||||
--use_existing_db=1 \
|
||||
--num=$NUM \
|
||||
--writes=$((NUM / 10)) \
|
||||
--open_files=55000 \
|
||||
--statistics=1 \
|
||||
--histogram=1 \
|
||||
--disable_wal=1 \
|
||||
--sync=0 \
|
||||
--threads=8 > ${STAT_FILE}.overwrite_lots_column_families
|
||||
|
||||
# send data to ods
|
||||
function send_to_ods {
|
||||
key="$1"
|
||||
value="$2"
|
||||
|
||||
if [ -z $JENKINS_HOME ]; then
|
||||
# running on devbox, just print out the values
|
||||
echo $1 $2
|
||||
return
|
||||
fi
|
||||
|
||||
if [ -z "$value" ];then
|
||||
echo >&2 "ERROR: Key $key doesn't have a value."
|
||||
return
|
||||
fi
|
||||
curl --silent "https://www.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build&key=$key&value=$value" \
|
||||
--connect-timeout 60
|
||||
}
|
||||
|
||||
function send_benchmark_to_ods {
|
||||
bench="$1"
|
||||
bench_key="$2"
|
||||
file="$3"
|
||||
|
||||
QPS=$(grep $bench $file | awk '{print $5}')
|
||||
P50_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $3}' )
|
||||
P75_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $5}' )
|
||||
P99_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $7}' )
|
||||
|
||||
send_to_ods rocksdb.build.$bench_key.qps $QPS
|
||||
send_to_ods rocksdb.build.$bench_key.p50_micros $P50_MICROS
|
||||
send_to_ods rocksdb.build.$bench_key.p75_micros $P75_MICROS
|
||||
send_to_ods rocksdb.build.$bench_key.p99_micros $P99_MICROS
|
||||
}
|
||||
|
||||
send_benchmark_to_ods overwrite overwrite $STAT_FILE.overwrite
|
||||
send_benchmark_to_ods fillseq fillseq $STAT_FILE.fillseq
|
||||
send_benchmark_to_ods readrandom readrandom $STAT_FILE.readrandom
|
||||
send_benchmark_to_ods readrandom readrandom_tailing $STAT_FILE.readrandomtailing
|
||||
send_benchmark_to_ods readrandom readrandom_smallblockcache $STAT_FILE.readrandomsmallblockcache
|
||||
send_benchmark_to_ods readrandom readrandom_memtable_sst $STAT_FILE.readrandom_mem_sst
|
||||
send_benchmark_to_ods readrandom readrandom_fillunique_random $STAT_FILE.readrandom_filluniquerandom
|
||||
send_benchmark_to_ods fillrandom memtablefillrandom $STAT_FILE.memtablefillreadrandom
|
||||
send_benchmark_to_ods readrandom memtablereadrandom $STAT_FILE.memtablefillreadrandom
|
||||
send_benchmark_to_ods readwhilewriting readwhilewriting $STAT_FILE.readwhilewriting
|
||||
send_benchmark_to_ods readwhilewriting readwhilewriting_in_ram ${STAT_FILE}.readwhilewriting_in_ram
|
||||
send_benchmark_to_ods seekrandomwhilewriting seekwhilewriting_in_ram ${STAT_FILE}.seekwhilewriting_in_ram
|
||||
send_benchmark_to_ods fillseq fillseq_lots_column_families ${STAT_FILE}.fillseq_lots_column_families
|
||||
send_benchmark_to_ods overwrite overwrite_lots_column_families ${STAT_FILE}.overwrite_lots_column_families
|
493
rocksdb/vendor/rocksdb/build_tools/run_ci_db_test.ps1
vendored
Normal file
493
rocksdb/vendor/rocksdb/build_tools/run_ci_db_test.ps1
vendored
Normal file
@ -0,0 +1,493 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
# This script enables you running RocksDB tests by running
|
||||
# All the tests concurrently and utilizing all the cores
|
||||
Param(
|
||||
[switch]$EnableJE = $false, # Look for and use test executable, append _je to listed exclusions
|
||||
[switch]$RunAll = $false, # Will attempt discover all *_test[_je].exe binaries and run all
|
||||
# of them as Google suites. I.e. It will run test cases concurrently
|
||||
# except those mentioned as $Run, those will run as individual test cases
|
||||
# And any execlued with $ExcludeExes or $ExcludeCases
|
||||
# It will also not run any individual test cases
|
||||
# excluded but $ExcludeCasese
|
||||
[switch]$RunAllExe = $false, # Look for and use test exdcutables, append _je to exclusions automatically
|
||||
# It will attempt to run them in parallel w/o breaking them up on individual
|
||||
# test cases. Those listed with $ExcludeExes will be excluded
|
||||
[string]$SuiteRun = "", # Split test suites in test cases and run in parallel, not compatible with $RunAll
|
||||
[string]$Run = "", # Run specified executables in parallel but do not split to test cases
|
||||
[string]$ExcludeCases = "", # Exclude test cases, expects a comma separated list, no spaces
|
||||
# Takes effect when $RunAll or $SuiteRun is specified. Must have full
|
||||
# Test cases name including a group and a parameter if any
|
||||
[string]$ExcludeExes = "", # Exclude exes from consideration, expects a comma separated list,
|
||||
# no spaces. Takes effect only when $RunAll is specified
|
||||
[string]$WorkFolder = "", # Direct tests to use that folder. SSD or Ram drive are better options.
|
||||
# Number of async tasks that would run concurrently. Recommend a number below 64.
|
||||
# However, CPU utlization really depends on the storage media. Recommend ram based disk.
|
||||
# a value of 1 will run everything serially
|
||||
[int]$Concurrency = 8,
|
||||
[int]$Limit = -1 # -1 means do not limit for test purposes
|
||||
)
|
||||
|
||||
# Folders and commands must be fullpath to run assuming
|
||||
# the current folder is at the root of the git enlistment
|
||||
$StartDate = (Get-Date)
|
||||
$StartDate
|
||||
|
||||
|
||||
$DebugPreference = "Continue"
|
||||
|
||||
# These tests are not google test suites and we should guard
|
||||
# Against running them as suites
|
||||
$RunOnly = New-Object System.Collections.Generic.HashSet[string]
|
||||
$RunOnly.Add("c_test") | Out-Null
|
||||
$RunOnly.Add("compact_on_deletion_collector_test") | Out-Null
|
||||
$RunOnly.Add("merge_test") | Out-Null
|
||||
$RunOnly.Add("stringappend_test") | Out-Null # Apparently incorrectly written
|
||||
$RunOnly.Add("backup_engine_test") | Out-Null # Disabled
|
||||
$RunOnly.Add("timer_queue_test") | Out-Null # Not a gtest
|
||||
|
||||
if($RunAll -and $SuiteRun -ne "") {
|
||||
Write-Error "$RunAll and $SuiteRun are not compatible"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if($RunAllExe -and $Run -ne "") {
|
||||
Write-Error "$RunAllExe and $Run are not compatible"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# If running under Appveyor assume that root
|
||||
[string]$Appveyor = $Env:APPVEYOR_BUILD_FOLDER
|
||||
if($Appveyor -ne "") {
|
||||
$RootFolder = $Appveyor
|
||||
} else {
|
||||
$RootFolder = $PSScriptRoot -replace '\\build_tools', ''
|
||||
}
|
||||
|
||||
$LogFolder = -Join($RootFolder, "\db_logs\")
|
||||
$BinariesFolder = -Join($RootFolder, "\build\Debug\")
|
||||
|
||||
if($WorkFolder -eq "") {
|
||||
|
||||
# If TEST_TMPDIR is set use it
|
||||
[string]$var = $Env:TEST_TMPDIR
|
||||
if($var -eq "") {
|
||||
$WorkFolder = -Join($RootFolder, "\db_tests\")
|
||||
$Env:TEST_TMPDIR = $WorkFolder
|
||||
} else {
|
||||
$WorkFolder = $var
|
||||
}
|
||||
} else {
|
||||
# Override from a command line
|
||||
$Env:TEST_TMPDIR = $WorkFolder
|
||||
}
|
||||
|
||||
Write-Output "Root: $RootFolder, WorkFolder: $WorkFolder"
|
||||
Write-Output "BinariesFolder: $BinariesFolder, LogFolder: $LogFolder"
|
||||
|
||||
# Create test directories in the current folder
|
||||
md -Path $WorkFolder -ErrorAction Ignore | Out-Null
|
||||
md -Path $LogFolder -ErrorAction Ignore | Out-Null
|
||||
|
||||
|
||||
$ExcludeCasesSet = New-Object System.Collections.Generic.HashSet[string]
|
||||
if($ExcludeCases -ne "") {
|
||||
Write-Host "ExcludeCases: $ExcludeCases"
|
||||
$l = $ExcludeCases -split ' '
|
||||
ForEach($t in $l) {
|
||||
$ExcludeCasesSet.Add($t) | Out-Null
|
||||
}
|
||||
}
|
||||
|
||||
$ExcludeExesSet = New-Object System.Collections.Generic.HashSet[string]
|
||||
if($ExcludeExes -ne "") {
|
||||
Write-Host "ExcludeExe: $ExcludeExes"
|
||||
$l = $ExcludeExes -split ' '
|
||||
ForEach($t in $l) {
|
||||
$ExcludeExesSet.Add($t) | Out-Null
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Extract the names of its tests by running db_test with --gtest_list_tests.
|
||||
# This filter removes the "#"-introduced comments, and expands to
|
||||
# fully-qualified names by changing input like this:
|
||||
#
|
||||
# DBTest.
|
||||
# Empty
|
||||
# WriteEmptyBatch
|
||||
# MultiThreaded/MultiThreadedDBTest.
|
||||
# MultiThreaded/0 # GetParam() = 0
|
||||
# MultiThreaded/1 # GetParam() = 1
|
||||
# RibbonTypeParamTest/0. # TypeParam = struct DefaultTypesAndSettings
|
||||
# CompactnessAndBacktrackAndFpRate
|
||||
# Extremes
|
||||
# FindOccupancyForSuccessRate
|
||||
#
|
||||
# into this:
|
||||
#
|
||||
# DBTest.Empty
|
||||
# DBTest.WriteEmptyBatch
|
||||
# MultiThreaded/MultiThreadedDBTest.MultiThreaded/0
|
||||
# MultiThreaded/MultiThreadedDBTest.MultiThreaded/1
|
||||
# RibbonTypeParamTest/0.CompactnessAndBacktrackAndFpRate
|
||||
# RibbonTypeParamTest/0.Extremes
|
||||
# RibbonTypeParamTest/0.FindOccupancyForSuccessRate
|
||||
#
|
||||
# Output into the parameter in a form TestName -> Log File Name
|
||||
function ExtractTestCases([string]$GTestExe, $HashTable) {
|
||||
|
||||
$Tests = @()
|
||||
# Run db_test to get a list of tests and store it into $a array
|
||||
&$GTestExe --gtest_list_tests | tee -Variable Tests | Out-Null
|
||||
|
||||
# Current group
|
||||
$Group=""
|
||||
|
||||
ForEach( $l in $Tests) {
|
||||
|
||||
# remove trailing comment if any
|
||||
$l = $l -replace '\s+\#.*',''
|
||||
# Leading whitespace is fine
|
||||
$l = $l -replace '^\s+',''
|
||||
# Trailing dot is a test group but no whitespace
|
||||
if ($l -match "\.$" -and $l -notmatch "\s+") {
|
||||
$Group = $l
|
||||
} else {
|
||||
# Otherwise it is a test name, remove leading space
|
||||
$test = $l
|
||||
# create a log name
|
||||
$test = "$Group$test"
|
||||
|
||||
if($ExcludeCasesSet.Contains($test)) {
|
||||
Write-Warning "$test case is excluded"
|
||||
continue
|
||||
}
|
||||
|
||||
$test_log = $test -replace '[\./]','_'
|
||||
$test_log += ".log"
|
||||
$log_path = -join ($LogFolder, $test_log)
|
||||
|
||||
# Add to a hashtable
|
||||
$HashTable.Add($test, $log_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# The function removes trailing .exe siffix if any,
|
||||
# creates a name for the log file
|
||||
# Then adds the test name if it was not excluded into
|
||||
# a HashTable in a form of test_name -> log_path
|
||||
function MakeAndAdd([string]$token, $HashTable) {
|
||||
|
||||
$test_name = $token -replace '.exe$', ''
|
||||
$log_name = -join ($test_name, ".log")
|
||||
$log_path = -join ($LogFolder, $log_name)
|
||||
$HashTable.Add($test_name, $log_path)
|
||||
}
|
||||
|
||||
# This function takes a list of Suites to run
|
||||
# Lists all the test cases in each of the suite
|
||||
# and populates HashOfHashes
|
||||
# Ordered by suite(exe) @{ Exe = @{ TestCase = LogName }}
|
||||
function ProcessSuites($ListOfSuites, $HashOfHashes) {
|
||||
|
||||
$suite_list = $ListOfSuites
|
||||
# Problem: if you run --gtest_list_tests on
|
||||
# a non Google Test executable then it will start executing
|
||||
# and we will get nowhere
|
||||
ForEach($suite in $suite_list) {
|
||||
|
||||
if($RunOnly.Contains($suite)) {
|
||||
Write-Warning "$suite is excluded from running as Google test suite"
|
||||
continue
|
||||
}
|
||||
|
||||
if($EnableJE) {
|
||||
$suite += "_je"
|
||||
}
|
||||
|
||||
$Cases = [ordered]@{}
|
||||
$Cases.Clear()
|
||||
$suite_exe = -Join ($BinariesFolder, $suite)
|
||||
ExtractTestCases -GTestExe $suite_exe -HashTable $Cases
|
||||
if($Cases.Count -gt 0) {
|
||||
$HashOfHashes.Add($suite, $Cases);
|
||||
}
|
||||
}
|
||||
|
||||
# Make logs and run
|
||||
if($CasesToRun.Count -lt 1) {
|
||||
Write-Error "Failed to extract tests from $SuiteRun"
|
||||
exit 1
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
# This will contain all test executables to run
|
||||
|
||||
# Hash table that contains all non suite
|
||||
# Test executable to run
|
||||
$TestExes = [ordered]@{}
|
||||
|
||||
# Check for test exe that are not
|
||||
# Google Test Suites
|
||||
# Since this is explicitely mentioned it is not subject
|
||||
# for exclusions
|
||||
if($Run -ne "") {
|
||||
|
||||
$test_list = $Run -split ' '
|
||||
ForEach($t in $test_list) {
|
||||
|
||||
if($EnableJE) {
|
||||
$t += "_je"
|
||||
}
|
||||
MakeAndAdd -token $t -HashTable $TestExes
|
||||
}
|
||||
|
||||
if($TestExes.Count -lt 1) {
|
||||
Write-Error "Failed to extract tests from $Run"
|
||||
exit 1
|
||||
}
|
||||
} elseif($RunAllExe) {
|
||||
# Discover all the test binaries
|
||||
if($EnableJE) {
|
||||
$pattern = "*_test_je.exe"
|
||||
} else {
|
||||
$pattern = "*_test.exe"
|
||||
}
|
||||
|
||||
$search_path = -join ($BinariesFolder, $pattern)
|
||||
Write-Host "Binaries Search Path: $search_path"
|
||||
|
||||
$DiscoveredExe = @()
|
||||
dir -Path $search_path | ForEach-Object {
|
||||
$DiscoveredExe += ($_.Name)
|
||||
}
|
||||
|
||||
# Remove exclusions
|
||||
ForEach($e in $DiscoveredExe) {
|
||||
$e = $e -replace '.exe$', ''
|
||||
$bare_name = $e -replace '_je$', ''
|
||||
|
||||
if($ExcludeExesSet.Contains($bare_name)) {
|
||||
Write-Warning "Test $e is excluded"
|
||||
continue
|
||||
}
|
||||
MakeAndAdd -token $e -HashTable $TestExes
|
||||
}
|
||||
|
||||
if($TestExes.Count -lt 1) {
|
||||
Write-Error "Failed to discover test executables"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
# Ordered by exe @{ Exe = @{ TestCase = LogName }}
|
||||
$CasesToRun = [ordered]@{}
|
||||
|
||||
if($SuiteRun -ne "") {
|
||||
$suite_list = $SuiteRun -split ' '
|
||||
ProcessSuites -ListOfSuites $suite_list -HashOfHashes $CasesToRun
|
||||
} elseif ($RunAll) {
|
||||
# Discover all the test binaries
|
||||
if($EnableJE) {
|
||||
$pattern = "*_test_je.exe"
|
||||
} else {
|
||||
$pattern = "*_test.exe"
|
||||
}
|
||||
|
||||
$search_path = -join ($BinariesFolder, $pattern)
|
||||
Write-Host "Binaries Search Path: $search_path"
|
||||
|
||||
$ListOfExe = @()
|
||||
dir -Path $search_path | ForEach-Object {
|
||||
$ListOfExe += ($_.Name)
|
||||
}
|
||||
|
||||
# Exclude those in RunOnly from running as suites
|
||||
$ListOfSuites = @()
|
||||
ForEach($e in $ListOfExe) {
|
||||
|
||||
$e = $e -replace '.exe$', ''
|
||||
$bare_name = $e -replace '_je$', ''
|
||||
|
||||
if($ExcludeExesSet.Contains($bare_name)) {
|
||||
Write-Warning "Test $e is excluded"
|
||||
continue
|
||||
}
|
||||
|
||||
if($RunOnly.Contains($bare_name)) {
|
||||
MakeAndAdd -token $e -HashTable $TestExes
|
||||
} else {
|
||||
$ListOfSuites += $bare_name
|
||||
}
|
||||
}
|
||||
|
||||
ProcessSuites -ListOfSuites $ListOfSuites -HashOfHashes $CasesToRun
|
||||
}
|
||||
|
||||
|
||||
# Invoke a test with a filter and redirect all output
|
||||
$InvokeTestCase = {
|
||||
param($exe, $test, $log);
|
||||
&$exe --gtest_filter=$test > $log 2>&1
|
||||
}
|
||||
|
||||
# Invoke all tests and redirect output
|
||||
$InvokeTestAsync = {
|
||||
param($exe, $log)
|
||||
&$exe > $log 2>&1
|
||||
}
|
||||
|
||||
# Hash that contains tests to rerun if any failed
|
||||
# Those tests will be rerun sequentially
|
||||
# $Rerun = [ordered]@{}
|
||||
# Test limiting factor here
|
||||
[int]$count = 0
|
||||
# Overall status
|
||||
[bool]$script:success = $true;
|
||||
|
||||
function RunJobs($Suites, $TestCmds, [int]$ConcurrencyVal)
|
||||
{
|
||||
# Array to wait for any of the running jobs
|
||||
$jobs = @()
|
||||
# Hash JobToLog
|
||||
$JobToLog = @{}
|
||||
|
||||
# Wait for all to finish and get the results
|
||||
while(($JobToLog.Count -gt 0) -or
|
||||
($TestCmds.Count -gt 0) -or
|
||||
($Suites.Count -gt 0)) {
|
||||
|
||||
# Make sure we have maximum concurrent jobs running if anything
|
||||
# and the $Limit either not set or allows to proceed
|
||||
while(($JobToLog.Count -lt $ConcurrencyVal) -and
|
||||
((($TestCmds.Count -gt 0) -or ($Suites.Count -gt 0)) -and
|
||||
(($Limit -lt 0) -or ($count -lt $Limit)))) {
|
||||
|
||||
# We always favore suites to run if available
|
||||
[string]$exe_name = ""
|
||||
[string]$log_path = ""
|
||||
$Cases = @{}
|
||||
|
||||
if($Suites.Count -gt 0) {
|
||||
# Will the first one
|
||||
ForEach($e in $Suites.Keys) {
|
||||
$exe_name = $e
|
||||
$Cases = $Suites[$e]
|
||||
break
|
||||
}
|
||||
[string]$test_case = ""
|
||||
[string]$log_path = ""
|
||||
ForEach($c in $Cases.Keys) {
|
||||
$test_case = $c
|
||||
$log_path = $Cases[$c]
|
||||
break
|
||||
}
|
||||
|
||||
Write-Host "Starting $exe_name::$test_case"
|
||||
[string]$Exe = -Join ($BinariesFolder, $exe_name)
|
||||
$job = Start-Job -Name "$exe_name::$test_case" -ArgumentList @($Exe,$test_case,$log_path) -ScriptBlock $InvokeTestCase
|
||||
$JobToLog.Add($job, $log_path)
|
||||
|
||||
$Cases.Remove($test_case)
|
||||
if($Cases.Count -lt 1) {
|
||||
$Suites.Remove($exe_name)
|
||||
}
|
||||
|
||||
} elseif ($TestCmds.Count -gt 0) {
|
||||
|
||||
ForEach($e in $TestCmds.Keys) {
|
||||
$exe_name = $e
|
||||
$log_path = $TestCmds[$e]
|
||||
break
|
||||
}
|
||||
|
||||
Write-Host "Starting $exe_name"
|
||||
[string]$Exe = -Join ($BinariesFolder, $exe_name)
|
||||
$job = Start-Job -Name $exe_name -ScriptBlock $InvokeTestAsync -ArgumentList @($Exe,$log_path)
|
||||
$JobToLog.Add($job, $log_path)
|
||||
|
||||
$TestCmds.Remove($exe_name)
|
||||
|
||||
} else {
|
||||
Write-Error "In the job loop but nothing to run"
|
||||
exit 1
|
||||
}
|
||||
|
||||
++$count
|
||||
} # End of Job starting loop
|
||||
|
||||
if($JobToLog.Count -lt 1) {
|
||||
break
|
||||
}
|
||||
|
||||
$jobs = @()
|
||||
foreach($k in $JobToLog.Keys) { $jobs += $k }
|
||||
|
||||
$completed = Wait-Job -Job $jobs -Any
|
||||
$log = $JobToLog[$completed]
|
||||
$JobToLog.Remove($completed)
|
||||
|
||||
$message = -join @($completed.Name, " State: ", ($completed.State))
|
||||
|
||||
$log_content = @(Get-Content $log)
|
||||
|
||||
if($completed.State -ne "Completed") {
|
||||
$script:success = $false
|
||||
Write-Warning $message
|
||||
$log_content | Write-Warning
|
||||
} else {
|
||||
# Scan the log. If we find PASSED and no occurrence of FAILED
|
||||
# then it is a success
|
||||
[bool]$pass_found = $false
|
||||
ForEach($l in $log_content) {
|
||||
|
||||
if(($l -match "^\[\s+FAILED") -or
|
||||
($l -match "Assertion failed:")) {
|
||||
$pass_found = $false
|
||||
break
|
||||
}
|
||||
|
||||
if(($l -match "^\[\s+PASSED") -or
|
||||
($l -match " : PASSED$") -or
|
||||
($l -match "^PASS$") -or # Special c_test case
|
||||
($l -match "Passed all tests!") ) {
|
||||
$pass_found = $true
|
||||
}
|
||||
}
|
||||
|
||||
if(!$pass_found) {
|
||||
$script:success = $false;
|
||||
Write-Warning $message
|
||||
$log_content | Write-Warning
|
||||
} else {
|
||||
Write-Host $message
|
||||
}
|
||||
}
|
||||
|
||||
# Remove cached job info from the system
|
||||
# Should be no output
|
||||
Receive-Job -Job $completed | Out-Null
|
||||
}
|
||||
}
|
||||
|
||||
RunJobs -Suites $CasesToRun -TestCmds $TestExes -ConcurrencyVal $Concurrency
|
||||
|
||||
$EndDate = (Get-Date)
|
||||
|
||||
New-TimeSpan -Start $StartDate -End $EndDate |
|
||||
ForEach-Object {
|
||||
"Elapsed time: {0:g}" -f $_
|
||||
}
|
||||
|
||||
|
||||
if(!$script:success) {
|
||||
# This does not succeed killing off jobs quick
|
||||
# So we simply exit
|
||||
# Remove-Job -Job $jobs -Force
|
||||
# indicate failure using this exit code
|
||||
exit 1
|
||||
}
|
||||
|
||||
exit 0
|
45
rocksdb/vendor/rocksdb/build_tools/setup_centos7.sh
vendored
Normal file
45
rocksdb/vendor/rocksdb/build_tools/setup_centos7.sh
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
set -ex
|
||||
|
||||
ROCKSDB_VERSION="6.7.3"
|
||||
ZSTD_VERSION="1.4.4"
|
||||
|
||||
echo "This script configures CentOS with everything needed to build and run RocksDB"
|
||||
|
||||
yum update -y && yum install epel-release -y
|
||||
|
||||
yum install -y \
|
||||
wget \
|
||||
gcc-c++ \
|
||||
snappy snappy-devel \
|
||||
zlib zlib-devel \
|
||||
bzip2 bzip2-devel \
|
||||
lz4-devel \
|
||||
libasan \
|
||||
gflags
|
||||
|
||||
mkdir -pv /usr/local/rocksdb-${ROCKSDB_VERSION}
|
||||
ln -sfT /usr/local/rocksdb-${ROCKSDB_VERSION} /usr/local/rocksdb
|
||||
|
||||
wget -qO /tmp/zstd-${ZSTD_VERSION}.tar.gz https://github.com/facebook/zstd/archive/v${ZSTD_VERSION}.tar.gz
|
||||
wget -qO /tmp/rocksdb-${ROCKSDB_VERSION}.tar.gz https://github.com/facebook/rocksdb/archive/v${ROCKSDB_VERSION}.tar.gz
|
||||
|
||||
cd /tmp
|
||||
|
||||
tar xzvf zstd-${ZSTD_VERSION}.tar.gz
|
||||
tar xzvf rocksdb-${ROCKSDB_VERSION}.tar.gz -C /usr/local/
|
||||
|
||||
echo "Installing ZSTD..."
|
||||
pushd zstd-${ZSTD_VERSION}
|
||||
make && make install
|
||||
popd
|
||||
|
||||
echo "Compiling RocksDB..."
|
||||
cd /usr/local/rocksdb
|
||||
chown -R vagrant:vagrant /usr/local/rocksdb/
|
||||
sudo -u vagrant make static_lib
|
||||
cd examples/
|
||||
sudo -u vagrant LD_LIBRARY_PATH=/usr/local/lib/ make all
|
||||
sudo -u vagrant LD_LIBRARY_PATH=/usr/local/lib/ ./c_simple_example
|
||||
|
106
rocksdb/vendor/rocksdb/build_tools/update_dependencies.sh
vendored
Normal file
106
rocksdb/vendor/rocksdb/build_tools/update_dependencies.sh
vendored
Normal file
@ -0,0 +1,106 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Update dependencies.sh file with the latest avaliable versions
|
||||
|
||||
BASEDIR=$(dirname $0)
|
||||
OUTPUT=""
|
||||
|
||||
function log_header()
|
||||
{
|
||||
echo "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved." >> "$OUTPUT"
|
||||
echo "# The file is generated using update_dependencies.sh." >> "$OUTPUT"
|
||||
}
|
||||
|
||||
|
||||
function log_variable()
|
||||
{
|
||||
echo "$1=${!1}" >> "$OUTPUT"
|
||||
}
|
||||
|
||||
|
||||
TP2_LATEST="/data/users/$USER/fbsource/fbcode/third-party2/"
|
||||
## $1 => lib name
|
||||
## $2 => lib version (if not provided, will try to pick latest)
|
||||
## $3 => platform (if not provided, will try to pick latest gcc)
|
||||
##
|
||||
## get_lib_base will set a variable named ${LIB_NAME}_BASE to the lib location
|
||||
function get_lib_base()
|
||||
{
|
||||
local lib_name=$1
|
||||
local lib_version=$2
|
||||
local lib_platform=$3
|
||||
|
||||
local result="$TP2_LATEST/$lib_name/"
|
||||
|
||||
# Lib Version
|
||||
if [ -z "$lib_version" ] || [ "$lib_version" = "LATEST" ]; then
|
||||
# version is not provided, use latest
|
||||
result=`ls -dr1v $result/*/ | head -n1`
|
||||
else
|
||||
result="$result/$lib_version/"
|
||||
fi
|
||||
|
||||
# Lib Platform
|
||||
if [ -z "$lib_platform" ]; then
|
||||
# platform is not provided, use latest gcc
|
||||
result=`ls -dr1v $result/gcc-*[^fb]/ | head -n1`
|
||||
else
|
||||
echo $lib_platform
|
||||
result="$result/$lib_platform/"
|
||||
fi
|
||||
|
||||
result=`ls -1d $result/*/ | head -n1`
|
||||
|
||||
echo Finding link $result
|
||||
|
||||
# lib_name => LIB_NAME_BASE
|
||||
local __res_var=${lib_name^^}"_BASE"
|
||||
__res_var=`echo $__res_var | tr - _`
|
||||
# LIB_NAME_BASE=$result
|
||||
eval $__res_var=`readlink -f $result`
|
||||
|
||||
log_variable $__res_var
|
||||
}
|
||||
|
||||
###########################################################
|
||||
# platform010 dependencies #
|
||||
###########################################################
|
||||
|
||||
OUTPUT="$BASEDIR/dependencies_platform010.sh"
|
||||
|
||||
rm -f "$OUTPUT"
|
||||
touch "$OUTPUT"
|
||||
|
||||
echo "Writing dependencies to $OUTPUT"
|
||||
|
||||
# Compilers locations
|
||||
GCC_BASE=`readlink -f $TP2_LATEST/gcc/11.x/centos7-native/*/`
|
||||
CLANG_BASE=`readlink -f $TP2_LATEST/llvm-fb/12/platform010/*/`
|
||||
|
||||
log_header
|
||||
log_variable GCC_BASE
|
||||
log_variable CLANG_BASE
|
||||
|
||||
# Libraries locations
|
||||
get_lib_base libgcc 11.x platform010
|
||||
get_lib_base glibc 2.34 platform010
|
||||
get_lib_base snappy LATEST platform010
|
||||
get_lib_base zlib LATEST platform010
|
||||
get_lib_base bzip2 LATEST platform010
|
||||
get_lib_base lz4 LATEST platform010
|
||||
get_lib_base zstd LATEST platform010
|
||||
get_lib_base gflags LATEST platform010
|
||||
get_lib_base jemalloc LATEST platform010
|
||||
get_lib_base numa LATEST platform010
|
||||
get_lib_base libunwind LATEST platform010
|
||||
get_lib_base tbb 2018_U5 platform010
|
||||
get_lib_base liburing LATEST platform010
|
||||
get_lib_base benchmark LATEST platform010
|
||||
|
||||
get_lib_base kernel-headers fb platform010
|
||||
get_lib_base binutils LATEST centos7-native
|
||||
get_lib_base valgrind LATEST platform010
|
||||
get_lib_base lua 5.3.4 platform010
|
||||
|
||||
git diff $OUTPUT
|
23
rocksdb/vendor/rocksdb/build_tools/version.sh
vendored
Normal file
23
rocksdb/vendor/rocksdb/build_tools/version.sh
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
if [ "$#" = "0" ]; then
|
||||
echo "Usage: $0 major|minor|patch|full"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$1" = "major" ]; then
|
||||
cat include/rocksdb/version.h | grep MAJOR | head -n1 | awk '{print $3}'
|
||||
fi
|
||||
if [ "$1" = "minor" ]; then
|
||||
cat include/rocksdb/version.h | grep MINOR | head -n1 | awk '{print $3}'
|
||||
fi
|
||||
if [ "$1" = "patch" ]; then
|
||||
cat include/rocksdb/version.h | grep PATCH | head -n1 | awk '{print $3}'
|
||||
fi
|
||||
if [ "$1" = "full" ]; then
|
||||
awk '/#define ROCKSDB/ { env[$2] = $3 }
|
||||
END { printf "%s.%s.%s\n", env["ROCKSDB_MAJOR"],
|
||||
env["ROCKSDB_MINOR"],
|
||||
env["ROCKSDB_PATCH"] }' \
|
||||
include/rocksdb/version.h
|
||||
fi
|
193
rocksdb/vendor/rocksdb/cache/cache.cc
vendored
Normal file
193
rocksdb/vendor/rocksdb/cache/cache.cc
vendored
Normal file
@ -0,0 +1,193 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include "rocksdb/cache.h"
|
||||
|
||||
#include "cache/lru_cache.h"
|
||||
#include "rocksdb/secondary_cache.h"
|
||||
#include "rocksdb/utilities/customizable_util.h"
|
||||
#include "rocksdb/utilities/options_type.h"
|
||||
#include "util/string_util.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
const Cache::CacheItemHelper kNoopCacheItemHelper{};
|
||||
|
||||
static std::unordered_map<std::string, OptionTypeInfo>
|
||||
lru_cache_options_type_info = {
|
||||
{"capacity",
|
||||
{offsetof(struct LRUCacheOptions, capacity), OptionType::kSizeT,
|
||||
OptionVerificationType::kNormal, OptionTypeFlags::kMutable}},
|
||||
{"num_shard_bits",
|
||||
{offsetof(struct LRUCacheOptions, num_shard_bits), OptionType::kInt,
|
||||
OptionVerificationType::kNormal, OptionTypeFlags::kMutable}},
|
||||
{"strict_capacity_limit",
|
||||
{offsetof(struct LRUCacheOptions, strict_capacity_limit),
|
||||
OptionType::kBoolean, OptionVerificationType::kNormal,
|
||||
OptionTypeFlags::kMutable}},
|
||||
{"high_pri_pool_ratio",
|
||||
{offsetof(struct LRUCacheOptions, high_pri_pool_ratio),
|
||||
OptionType::kDouble, OptionVerificationType::kNormal,
|
||||
OptionTypeFlags::kMutable}},
|
||||
{"low_pri_pool_ratio",
|
||||
{offsetof(struct LRUCacheOptions, low_pri_pool_ratio),
|
||||
OptionType::kDouble, OptionVerificationType::kNormal,
|
||||
OptionTypeFlags::kMutable}},
|
||||
};
|
||||
|
||||
static std::unordered_map<std::string, OptionTypeInfo>
|
||||
comp_sec_cache_options_type_info = {
|
||||
{"capacity",
|
||||
{offsetof(struct CompressedSecondaryCacheOptions, capacity),
|
||||
OptionType::kSizeT, OptionVerificationType::kNormal,
|
||||
OptionTypeFlags::kMutable}},
|
||||
{"num_shard_bits",
|
||||
{offsetof(struct CompressedSecondaryCacheOptions, num_shard_bits),
|
||||
OptionType::kInt, OptionVerificationType::kNormal,
|
||||
OptionTypeFlags::kMutable}},
|
||||
{"compression_type",
|
||||
{offsetof(struct CompressedSecondaryCacheOptions, compression_type),
|
||||
OptionType::kCompressionType, OptionVerificationType::kNormal,
|
||||
OptionTypeFlags::kMutable}},
|
||||
{"compress_format_version",
|
||||
{offsetof(struct CompressedSecondaryCacheOptions,
|
||||
compress_format_version),
|
||||
OptionType::kUInt32T, OptionVerificationType::kNormal,
|
||||
OptionTypeFlags::kMutable}},
|
||||
{"enable_custom_split_merge",
|
||||
{offsetof(struct CompressedSecondaryCacheOptions,
|
||||
enable_custom_split_merge),
|
||||
OptionType::kBoolean, OptionVerificationType::kNormal,
|
||||
OptionTypeFlags::kMutable}},
|
||||
};
|
||||
|
||||
namespace {
|
||||
static void NoopDelete(Cache::ObjectPtr /*obj*/,
|
||||
MemoryAllocator* /*allocator*/) {
|
||||
assert(false);
|
||||
}
|
||||
|
||||
static size_t SliceSize(Cache::ObjectPtr obj) {
|
||||
return static_cast<Slice*>(obj)->size();
|
||||
}
|
||||
|
||||
static Status SliceSaveTo(Cache::ObjectPtr from_obj, size_t from_offset,
|
||||
size_t length, char* out) {
|
||||
const Slice& slice = *static_cast<Slice*>(from_obj);
|
||||
std::memcpy(out, slice.data() + from_offset, length);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
static Status NoopCreate(const Slice& /*data*/, CompressionType /*type*/,
|
||||
CacheTier /*source*/, Cache::CreateContext* /*ctx*/,
|
||||
MemoryAllocator* /*allocator*/,
|
||||
Cache::ObjectPtr* /*out_obj*/,
|
||||
size_t* /*out_charge*/) {
|
||||
assert(false);
|
||||
return Status::NotSupported();
|
||||
}
|
||||
|
||||
static Cache::CacheItemHelper kBasicCacheItemHelper(CacheEntryRole::kMisc,
|
||||
&NoopDelete);
|
||||
} // namespace
|
||||
|
||||
const Cache::CacheItemHelper kSliceCacheItemHelper{
|
||||
CacheEntryRole::kMisc, &NoopDelete, &SliceSize,
|
||||
&SliceSaveTo, &NoopCreate, &kBasicCacheItemHelper,
|
||||
};
|
||||
|
||||
Status SecondaryCache::CreateFromString(
|
||||
const ConfigOptions& config_options, const std::string& value,
|
||||
std::shared_ptr<SecondaryCache>* result) {
|
||||
if (value.find("compressed_secondary_cache://") == 0) {
|
||||
std::string args = value;
|
||||
args.erase(0, std::strlen("compressed_secondary_cache://"));
|
||||
Status status;
|
||||
std::shared_ptr<SecondaryCache> sec_cache;
|
||||
|
||||
CompressedSecondaryCacheOptions sec_cache_opts;
|
||||
status = OptionTypeInfo::ParseStruct(config_options, "",
|
||||
&comp_sec_cache_options_type_info, "",
|
||||
args, &sec_cache_opts);
|
||||
if (status.ok()) {
|
||||
sec_cache = NewCompressedSecondaryCache(sec_cache_opts);
|
||||
}
|
||||
|
||||
|
||||
if (status.ok()) {
|
||||
result->swap(sec_cache);
|
||||
}
|
||||
return status;
|
||||
} else {
|
||||
return LoadSharedObject<SecondaryCache>(config_options, value, result);
|
||||
}
|
||||
}
|
||||
|
||||
Status Cache::CreateFromString(const ConfigOptions& config_options,
|
||||
const std::string& value,
|
||||
std::shared_ptr<Cache>* result) {
|
||||
Status status;
|
||||
std::shared_ptr<Cache> cache;
|
||||
if (value.find('=') == std::string::npos) {
|
||||
cache = NewLRUCache(ParseSizeT(value));
|
||||
} else {
|
||||
LRUCacheOptions cache_opts;
|
||||
status = OptionTypeInfo::ParseStruct(config_options, "",
|
||||
&lru_cache_options_type_info, "",
|
||||
value, &cache_opts);
|
||||
if (status.ok()) {
|
||||
cache = NewLRUCache(cache_opts);
|
||||
}
|
||||
}
|
||||
if (status.ok()) {
|
||||
result->swap(cache);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
bool Cache::AsyncLookupHandle::IsReady() {
|
||||
return pending_handle == nullptr || pending_handle->IsReady();
|
||||
}
|
||||
|
||||
bool Cache::AsyncLookupHandle::IsPending() { return pending_handle != nullptr; }
|
||||
|
||||
Cache::Handle* Cache::AsyncLookupHandle::Result() {
|
||||
assert(!IsPending());
|
||||
return result_handle;
|
||||
}
|
||||
|
||||
void Cache::StartAsyncLookup(AsyncLookupHandle& async_handle) {
|
||||
async_handle.found_dummy_entry = false; // in case re-used
|
||||
assert(!async_handle.IsPending());
|
||||
async_handle.result_handle =
|
||||
Lookup(async_handle.key, async_handle.helper, async_handle.create_context,
|
||||
async_handle.priority, async_handle.stats);
|
||||
}
|
||||
|
||||
Cache::Handle* Cache::Wait(AsyncLookupHandle& async_handle) {
|
||||
WaitAll(&async_handle, 1);
|
||||
return async_handle.Result();
|
||||
}
|
||||
|
||||
void Cache::WaitAll(AsyncLookupHandle* async_handles, size_t count) {
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
if (async_handles[i].IsPending()) {
|
||||
// If a pending handle gets here, it should be marked at "to be handled
|
||||
// by a caller" by that caller erasing the pending_cache on it.
|
||||
assert(async_handles[i].pending_cache == nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Cache::SetEvictionCallback(EvictionCallback&& fn) {
|
||||
// Overwriting non-empty with non-empty could indicate a bug
|
||||
assert(!eviction_callback_ || !fn);
|
||||
eviction_callback_ = std::move(fn);
|
||||
}
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
20
rocksdb/vendor/rocksdb/cache/cache_bench.cc
vendored
Normal file
20
rocksdb/vendor/rocksdb/cache/cache_bench.cc
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright (c) 2013-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
#ifndef GFLAGS
|
||||
#include <cstdio>
|
||||
int main() {
|
||||
fprintf(stderr, "Please install gflags to run rocksdb tools\n");
|
||||
return 1;
|
||||
}
|
||||
#else
|
||||
#include "rocksdb/cache_bench_tool.h"
|
||||
int main(int argc, char** argv) {
|
||||
return ROCKSDB_NAMESPACE::cache_bench_tool(argc, argv);
|
||||
}
|
||||
#endif // GFLAGS
|
1177
rocksdb/vendor/rocksdb/cache/cache_bench_tool.cc
vendored
Normal file
1177
rocksdb/vendor/rocksdb/cache/cache_bench_tool.cc
vendored
Normal file
File diff suppressed because it is too large
Load Diff
104
rocksdb/vendor/rocksdb/cache/cache_entry_roles.cc
vendored
Normal file
104
rocksdb/vendor/rocksdb/cache/cache_entry_roles.cc
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#include "cache/cache_entry_roles.h"
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include "port/lang.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
std::array<std::string, kNumCacheEntryRoles> kCacheEntryRoleToCamelString{{
|
||||
"DataBlock",
|
||||
"FilterBlock",
|
||||
"FilterMetaBlock",
|
||||
"DeprecatedFilterBlock",
|
||||
"IndexBlock",
|
||||
"OtherBlock",
|
||||
"WriteBuffer",
|
||||
"CompressionDictionaryBuildingBuffer",
|
||||
"FilterConstruction",
|
||||
"BlockBasedTableReader",
|
||||
"FileMetadata",
|
||||
"BlobValue",
|
||||
"BlobCache",
|
||||
"Misc",
|
||||
}};
|
||||
|
||||
std::array<std::string, kNumCacheEntryRoles> kCacheEntryRoleToHyphenString{{
|
||||
"data-block",
|
||||
"filter-block",
|
||||
"filter-meta-block",
|
||||
"deprecated-filter-block",
|
||||
"index-block",
|
||||
"other-block",
|
||||
"write-buffer",
|
||||
"compression-dictionary-building-buffer",
|
||||
"filter-construction",
|
||||
"block-based-table-reader",
|
||||
"file-metadata",
|
||||
"blob-value",
|
||||
"blob-cache",
|
||||
"misc",
|
||||
}};
|
||||
|
||||
const std::string& GetCacheEntryRoleName(CacheEntryRole role) {
|
||||
return kCacheEntryRoleToHyphenString[static_cast<size_t>(role)];
|
||||
}
|
||||
|
||||
const std::string& BlockCacheEntryStatsMapKeys::CacheId() {
|
||||
static const std::string kCacheId = "id";
|
||||
return kCacheId;
|
||||
}
|
||||
|
||||
const std::string& BlockCacheEntryStatsMapKeys::CacheCapacityBytes() {
|
||||
static const std::string kCacheCapacityBytes = "capacity";
|
||||
return kCacheCapacityBytes;
|
||||
}
|
||||
|
||||
const std::string&
|
||||
BlockCacheEntryStatsMapKeys::LastCollectionDurationSeconds() {
|
||||
static const std::string kLastCollectionDurationSeconds =
|
||||
"secs_for_last_collection";
|
||||
return kLastCollectionDurationSeconds;
|
||||
}
|
||||
|
||||
const std::string& BlockCacheEntryStatsMapKeys::LastCollectionAgeSeconds() {
|
||||
static const std::string kLastCollectionAgeSeconds =
|
||||
"secs_since_last_collection";
|
||||
return kLastCollectionAgeSeconds;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
std::string GetPrefixedCacheEntryRoleName(const std::string& prefix,
|
||||
CacheEntryRole role) {
|
||||
const std::string& role_name = GetCacheEntryRoleName(role);
|
||||
std::string prefixed_role_name;
|
||||
prefixed_role_name.reserve(prefix.size() + role_name.size());
|
||||
prefixed_role_name.append(prefix);
|
||||
prefixed_role_name.append(role_name);
|
||||
return prefixed_role_name;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
std::string BlockCacheEntryStatsMapKeys::EntryCount(CacheEntryRole role) {
|
||||
const static std::string kPrefix = "count.";
|
||||
return GetPrefixedCacheEntryRoleName(kPrefix, role);
|
||||
}
|
||||
|
||||
std::string BlockCacheEntryStatsMapKeys::UsedBytes(CacheEntryRole role) {
|
||||
const static std::string kPrefix = "bytes.";
|
||||
return GetPrefixedCacheEntryRoleName(kPrefix, role);
|
||||
}
|
||||
|
||||
std::string BlockCacheEntryStatsMapKeys::UsedPercent(CacheEntryRole role) {
|
||||
const static std::string kPrefix = "percent.";
|
||||
return GetPrefixedCacheEntryRoleName(kPrefix, role);
|
||||
}
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user